Compare commits

...

4 Commits

Author SHA1 Message Date
l5y
c1898037c0 web: add secondary seed node jmrp.io (#568) 2025-12-16 21:38:41 +01:00
l5y
efc5f64279 data: implement whitelist for ingestor (#567)
* data: implement whitelist for ingestor

* data: run black

* data: cover missing unit test vectors
2025-12-16 21:11:53 +01:00
l5y
636a203254 web: add ?since= parameter to all apis (#566) 2025-12-16 20:24:31 +01:00
l5y
2e78fa7a3a matrix: fix docker build 2025-12-16 19:26:31 +01:00
16 changed files with 501 additions and 39 deletions

View File

@@ -53,6 +53,7 @@ Additional environment variables are optional:
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom (disables the auto-fit checkbox when set). |
| `MAX_DISTANCE` | `42` | Maximum relationship distance (km) before edges are hidden. |
| `DEBUG` | `0` | Enables verbose logging across services when set to `1`. |
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; other channels are skipped before hidden filters. |
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor skips when forwarding packets. |
| `FEDERATION` | `1` | Controls whether the instance announces itself and crawls peers (`1`) or stays isolated (`0`). |
| `PRIVATE` | `0` | Restricts public visibility and disables chat/message endpoints when set to `1`. |

View File

@@ -92,6 +92,7 @@ The web app can be configured with environment variables (defaults shown):
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom applied on first load; disables auto-fit when provided. |
| `MAX_DISTANCE` | `42` | Maximum distance (km) before node relationships are hidden on the map. |
| `DEBUG` | `0` | Set to `1` for verbose logging in the web and ingestor services. |
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; when set, all other channels are skipped before hidden filters. |
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor will ignore when forwarding packets. |
| `FEDERATION` | `1` | Set to `1` to announce your instance and crawl peers, or `0` to disable federation. Private mode overrides this. |
| `PRIVATE` | `0` | Set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients from public listings. |
@@ -203,9 +204,10 @@ specify the connection target with `CONNECTION` (default `/dev/ttyACM0`) or set
an IP address (for example `192.168.1.20:4403`) to use the Meshtastic TCP
interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available. To keep
private channels out of the web UI, set `HIDDEN_CHANNELS` to a comma-separated
list of channel names (for example `HIDDEN_CHANNELS="Secret,Ops"`); packets on
those channels are discarded instead of being sent to `/api/messages`.
ingestion limited, set `ALLOWED_CHANNELS` to a comma-separated whitelist (for
example `ALLOWED_CHANNELS="Chat,Ops"`); packets on other channels are discarded.
Use `HIDDEN_CHANNELS` to block specific channels from the web UI even when they
appear in the allowlist.
## Docker

View File

@@ -77,6 +77,7 @@ FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' ||
FEDERATION=$(grep "^FEDERATION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "1")
PRIVATE=$(grep "^PRIVATE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
HIDDEN_CHANNELS=$(grep "^HIDDEN_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
ALLOWED_CHANNELS=$(grep "^ALLOWED_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
MAP_ZOOM=$(grep "^MAP_ZOOM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
@@ -127,6 +128,9 @@ echo "-------------------"
echo "Private mode hides public mesh messages from unauthenticated visitors."
echo "Set to 1 to hide public feeds or 0 to keep them visible."
read_with_default "Enable private mode (1=yes, 0=no)" "$PRIVATE" PRIVATE
echo "Provide a comma-separated whitelist of channel names to ingest (optional)."
echo "When set, only listed channels are ingested unless explicitly hidden below."
read_with_default "Allowed channels" "$ALLOWED_CHANNELS" ALLOWED_CHANNELS
echo "Provide a comma-separated list of channel names to hide from the web UI (optional)."
read_with_default "Hidden channels" "$HIDDEN_CHANNELS" HIDDEN_CHANNELS
@@ -199,6 +203,11 @@ update_env "POTATOMESH_IMAGE_TAG" "$POTATOMESH_IMAGE_TAG"
update_env "FEDERATION" "$FEDERATION"
update_env "PRIVATE" "$PRIVATE"
update_env "CONNECTION" "$CONNECTION"
if [ -n "$ALLOWED_CHANNELS" ]; then
update_env "ALLOWED_CHANNELS" "\"$ALLOWED_CHANNELS\""
else
sed -i.bak '/^ALLOWED_CHANNELS=.*/d' .env
fi
if [ -n "$HIDDEN_CHANNELS" ]; then
update_env "HIDDEN_CHANNELS" "\"$HIDDEN_CHANNELS\""
else
@@ -252,6 +261,7 @@ echo " API Token: ${API_TOKEN:0:8}..."
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
echo " Docker Image Tag: $POTATOMESH_IMAGE_TAG"
echo " Private Mode: ${PRIVATE}"
echo " Allowed Channels: ${ALLOWED_CHANNELS:-'All'}"
echo " Hidden Channels: ${HIDDEN_CHANNELS:-'None'}"
echo " Instance Domain: ${INSTANCE_DOMAIN:-'Auto-detected'}"
if [ "${FEDERATION:-1}" = "0" ]; then

View File

@@ -50,6 +50,8 @@ USER potatomesh
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
ALLOWED_CHANNELS="" \
HIDDEN_CHANNELS="" \
INSTANCE_DOMAIN="" \
API_TOKEN=""
@@ -75,6 +77,8 @@ USER ContainerUser
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
ALLOWED_CHANNELS="" \
HIDDEN_CHANNELS="" \
INSTANCE_DOMAIN="" \
API_TOKEN=""

View File

@@ -70,6 +70,7 @@ _CONFIG_ATTRS = {
"DEBUG",
"INSTANCE",
"API_TOKEN",
"ALLOWED_CHANNELS",
"HIDDEN_CHANNELS",
"LORA_FREQ",
"MODEM_PRESET",

View File

@@ -228,6 +228,33 @@ def hidden_channel_names() -> tuple[str, ...]:
return tuple(getattr(config, "HIDDEN_CHANNELS", ()))
def allowed_channel_names() -> tuple[str, ...]:
"""Return the configured set of explicitly allowed channel names."""
return tuple(getattr(config, "ALLOWED_CHANNELS", ()))
def is_allowed_channel(channel_name_value: str | None) -> bool:
"""Return ``True`` when ``channel_name_value`` is permitted by policy."""
allowed = getattr(config, "ALLOWED_CHANNELS", ())
if not allowed:
return True
if channel_name_value is None:
return False
normalized = channel_name_value.strip()
if not normalized:
return False
normalized_casefold = normalized.casefold()
for allowed_name in allowed:
if normalized_casefold == allowed_name.casefold():
return True
return False
def is_hidden_channel(channel_name_value: str | None) -> bool:
"""Return ``True`` when ``channel_name_value`` is configured as hidden."""
@@ -255,7 +282,9 @@ __all__ = [
"capture_from_interface",
"channel_mappings",
"channel_name",
"allowed_channel_names",
"hidden_channel_names",
"is_allowed_channel",
"is_hidden_channel",
"_reset_channel_cache",
]

View File

@@ -66,8 +66,8 @@ CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
DEBUG = os.environ.get("DEBUG") == "1"
def _parse_hidden_channels(raw_value: str | None) -> tuple[str, ...]:
"""Normalise a comma-separated list of hidden channel names.
def _parse_channel_names(raw_value: str | None) -> tuple[str, ...]:
"""Normalise a comma-separated list of channel names.
Parameters:
raw_value: Raw environment string containing channel names separated by
@@ -96,9 +96,18 @@ def _parse_hidden_channels(raw_value: str | None) -> tuple[str, ...]:
return tuple(normalized_entries)
def _parse_hidden_channels(raw_value: str | None) -> tuple[str, ...]:
"""Compatibility wrapper that parses hidden channel names."""
return _parse_channel_names(raw_value)
HIDDEN_CHANNELS = _parse_hidden_channels(os.environ.get("HIDDEN_CHANNELS"))
"""Channel names configured to be ignored by the ingestor."""
ALLOWED_CHANNELS = _parse_channel_names(os.environ.get("ALLOWED_CHANNELS"))
"""Explicitly permitted channel names; when set, other channels are ignored."""
def _resolve_instance_domain() -> str:
"""Resolve the configured instance domain from the environment.
@@ -183,6 +192,7 @@ __all__ = [
"CHANNEL_INDEX",
"DEBUG",
"HIDDEN_CHANNELS",
"ALLOWED_CHANNELS",
"INSTANCE",
"API_TOKEN",
"ENERGY_SAVING",

View File

@@ -1461,6 +1461,18 @@ def store_packet_dict(packet: Mapping) -> None:
_record_ignored_packet(packet, reason="skipped-direct-message")
return
if not channels.is_allowed_channel(channel_name_value):
_record_ignored_packet(packet, reason="disallowed-channel")
if config.DEBUG:
config._debug_log(
"Ignored packet on disallowed channel",
context="handlers.store_packet_dict",
channel=channel,
channel_name=channel_name_value,
allowed_channels=channels.allowed_channel_names(),
)
return
if channels.is_hidden_channel(channel_name_value):
_record_ignored_packet(packet, reason="hidden-channel")
if config.DEBUG:

View File

@@ -49,6 +49,7 @@ x-ingestor-base: &ingestor-base
environment:
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
ALLOWED_CHANNELS: ${ALLOWED_CHANNELS:-""}
HIDDEN_CHANNELS: ${HIDDEN_CHANNELS:-""}
API_TOKEN: ${API_TOKEN}
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM rust:1.79-bookworm AS builder
FROM rust:1.91-bookworm AS builder
WORKDIR /app

View File

@@ -285,6 +285,40 @@ def test_instance_domain_infers_scheme_for_hostnames(mesh_module, monkeypatch):
mesh_module.INSTANCE = mesh_module.config.INSTANCE
def test_parse_channel_names_applies_allowlist(mesh_module):
"""Ensure allowlists reuse the shared channel parser."""
mesh = mesh_module
previous_allowed = mesh.ALLOWED_CHANNELS
try:
parsed = mesh.config._parse_channel_names(" Primary ,Chat ,primary , Ops ")
mesh.ALLOWED_CHANNELS = parsed
assert parsed == ("Primary", "Chat", "Ops")
assert mesh.channels.allowed_channel_names() == ("Primary", "Chat", "Ops")
assert mesh.channels.is_allowed_channel("chat")
assert mesh.channels.is_allowed_channel(" ops ")
assert not mesh.channels.is_allowed_channel("unknown")
assert not mesh.channels.is_allowed_channel(None)
assert mesh.config._parse_channel_names("") == ()
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
def test_allowed_channel_defaults_allow_all(mesh_module):
"""Ensure unset allowlists do not block any channels."""
mesh = mesh_module
previous_allowed = mesh.ALLOWED_CHANNELS
try:
mesh.ALLOWED_CHANNELS = ()
assert mesh.channels.is_allowed_channel("Any")
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
def test_parse_hidden_channels_deduplicates_names(mesh_module):
"""Ensure hidden channel parsing strips blanks and deduplicates."""
@@ -1997,8 +2031,10 @@ def test_store_packet_dict_skips_hidden_channel(mesh_module, monkeypatch, capsys
previous_debug = mesh.config.DEBUG
previous_hidden = mesh.HIDDEN_CHANNELS
previous_allowed = mesh.ALLOWED_CHANNELS
mesh.config.DEBUG = True
mesh.DEBUG = True
mesh.ALLOWED_CHANNELS = ("Chat",)
mesh.HIDDEN_CHANNELS = ("Chat",)
try:
@@ -2017,6 +2053,77 @@ def test_store_packet_dict_skips_hidden_channel(mesh_module, monkeypatch, capsys
assert ignored == ["hidden-channel"]
assert "Ignored packet on hidden channel" in capsys.readouterr().out
finally:
mesh.HIDDEN_CHANNELS = previous_hidden
mesh.ALLOWED_CHANNELS = previous_allowed
mesh.config.DEBUG = previous_debug
mesh.DEBUG = previous_debug
def test_store_packet_dict_skips_disallowed_channel(mesh_module, monkeypatch, capsys):
mesh = mesh_module
mesh.channels._reset_channel_cache()
mesh.config.MODEM_PRESET = None
class DummyInterface:
def __init__(self) -> None:
self.localNode = SimpleNamespace(
channels=[
SimpleNamespace(
role=1,
settings=SimpleNamespace(name="Primary"),
),
SimpleNamespace(
role=2,
index=5,
settings=SimpleNamespace(name="Chat"),
),
]
)
def waitForConfig(self):
return None
mesh.channels.capture_from_interface(DummyInterface())
capsys.readouterr()
captured: list[tuple[str, dict, int]] = []
ignored: list[str] = []
monkeypatch.setattr(
mesh,
"_queue_post_json",
lambda path, payload, *, priority: captured.append((path, payload, priority)),
)
monkeypatch.setattr(
mesh.handlers,
"_record_ignored_packet",
lambda packet, *, reason: ignored.append(reason),
)
previous_debug = mesh.config.DEBUG
previous_allowed = mesh.ALLOWED_CHANNELS
previous_hidden = mesh.HIDDEN_CHANNELS
mesh.config.DEBUG = True
mesh.DEBUG = True
mesh.ALLOWED_CHANNELS = ("Primary",)
mesh.HIDDEN_CHANNELS = ()
try:
packet = {
"id": "1001",
"rxTime": 25_680,
"from": "!sender",
"to": "^all",
"channel": 5,
"decoded": {"text": "disallowed msg", "portnum": 1},
}
mesh.store_packet_dict(packet)
assert captured == []
assert ignored == ["disallowed-channel"]
assert "Ignored packet on disallowed channel" in capsys.readouterr().out
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
mesh.HIDDEN_CHANNELS = previous_hidden
mesh.config.DEBUG = previous_debug
mesh.DEBUG = previous_debug

View File

@@ -116,6 +116,17 @@ module PotatoMesh
coerced
end
# Normalise a caller-supplied timestamp for API pagination windows.
#
# @param since [Object] requested lower bound expressed as seconds since the epoch.
# @param floor [Integer] minimum allowable timestamp used to clamp the value.
# @return [Integer] non-negative timestamp greater than or equal to +floor+.
def normalize_since_threshold(since, floor: 0)
threshold = coerce_integer(since)
threshold = 0 if threshold.nil? || threshold.negative?
[threshold, floor].max
end
def node_reference_tokens(node_ref)
parts = canonical_node_parts(node_ref)
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
@@ -198,12 +209,19 @@ module PotatoMesh
["(#{clauses.join(" OR ")})", params]
end
def query_nodes(limit, node_ref: nil)
# Fetch node state optionally scoped by identifier and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to narrow results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted node rows suitable for API responses.
def query_nodes(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
min_last_heard = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_last_heard)
params = []
where_clauses = []
@@ -214,7 +232,7 @@ module PotatoMesh
params.concat(clause.last)
else
where_clauses << "last_heard >= ?"
params << min_last_heard
params << since_threshold
end
if private_mode?
@@ -242,7 +260,7 @@ module PotatoMesh
.map { |value| coerce_integer(value) }
.compact
.max
last_candidate && last_candidate >= min_last_heard
last_candidate && last_candidate >= since_threshold
end
rows.each do |r|
r["role"] ||= "CLIENT"
@@ -262,12 +280,18 @@ module PotatoMesh
db&.close
end
def query_ingestors(limit)
# Fetch ingestor heartbeats with optional freshness filtering.
#
# @param limit [Integer] maximum number of ingestors to return.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted ingestor rows suitable for API responses.
def query_ingestors(limit, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
cutoff = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: cutoff)
sql = <<~SQL
SELECT node_id, start_time, last_seen_time, version, lora_freq, modem_preset
FROM ingestors
@@ -276,7 +300,7 @@ module PotatoMesh
LIMIT ?
SQL
rows = db.execute(sql, [cutoff, limit])
rows = db.execute(sql, [since_threshold, limit])
rows.each do |row|
row.delete_if { |key, _| key.is_a?(Integer) }
start_time = coerce_integer(row["start_time"])
@@ -306,8 +330,7 @@ module PotatoMesh
# @return [Array<Hash>] compacted message rows safe for API responses.
def query_messages(limit, node_ref: nil, include_encrypted: false, since: 0)
limit = coerce_query_limit(limit)
since_threshold = coerce_integer(since)
since_threshold = 0 if since_threshold.nil? || since_threshold.negative?
since_threshold = normalize_since_threshold(since, floor: 0)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
@@ -385,7 +408,13 @@ module PotatoMesh
db&.close
end
def query_positions(limit, node_ref: nil)
# Fetch positions optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted position rows suitable for API responses.
def query_positions(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -393,8 +422,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, position_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
@@ -436,7 +466,13 @@ module PotatoMesh
db&.close
end
def query_neighbors(limit, node_ref: nil)
# Fetch neighbor relationships optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted neighbor rows suitable for API responses.
def query_neighbors(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -444,8 +480,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id", "neighbor_id"])
@@ -476,7 +513,13 @@ module PotatoMesh
db&.close
end
def query_telemetry(limit, node_ref: nil)
# Fetch telemetry packets optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted telemetry rows suitable for API responses.
def query_telemetry(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -484,8 +527,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, telemetry_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
@@ -555,7 +599,13 @@ module PotatoMesh
db&.close
end
def query_telemetry_buckets(window_seconds:, bucket_seconds:)
# Aggregate telemetry metrics into time buckets.
#
# @param window_seconds [Integer] duration expressed in seconds to include in the query.
# @param bucket_seconds [Integer] size of each aggregation bucket in seconds.
# @param since [Integer] unix timestamp threshold applied in addition to the requested window.
# @return [Array<Hash>] aggregated telemetry metrics grouped by bucket start time.
def query_telemetry_buckets(window_seconds:, bucket_seconds:, since: 0)
window = coerce_integer(window_seconds) || DEFAULT_TELEMETRY_WINDOW_SECONDS
window = DEFAULT_TELEMETRY_WINDOW_SECONDS if window <= 0
bucket = coerce_integer(bucket_seconds) || DEFAULT_TELEMETRY_BUCKET_SECONDS
@@ -565,6 +615,7 @@ module PotatoMesh
db.results_as_hash = true
now = Time.now.to_i
min_timestamp = now - window
since_threshold = normalize_since_threshold(since, floor: min_timestamp)
bucket_expression = "((COALESCE(rx_time, telemetry_time) / ?) * ?)"
select_clauses = [
"#{bucket_expression} AS bucket_start",
@@ -590,7 +641,7 @@ module PotatoMesh
ORDER BY bucket_start ASC
LIMIT ?
SQL
params = [bucket, bucket, min_timestamp, MAX_QUERY_LIMIT]
params = [bucket, bucket, since_threshold, MAX_QUERY_LIMIT]
rows = db.execute(sql, params)
rows.map do |row|
bucket_start = coerce_integer(row["bucket_start"])
@@ -670,7 +721,13 @@ module PotatoMesh
column
end
def query_traces(limit, node_ref: nil)
# Fetch trace records optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted trace rows suitable for API responses.
def query_traces(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -678,8 +735,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
tokens = node_reference_tokens(node_ref)

View File

@@ -64,7 +64,7 @@ module PotatoMesh
app.get "/api/nodes" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_nodes(limit).to_json
query_nodes(limit, since: params["since"]).to_json
end
app.get "/api/nodes/:id" do
@@ -72,7 +72,7 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
rows = query_nodes(limit, node_ref: node_ref)
rows = query_nodes(limit, node_ref: node_ref, since: params["since"])
halt 404, { error: "not found" }.to_json if rows.empty?
rows.first.to_json
end
@@ -80,7 +80,7 @@ module PotatoMesh
app.get "/api/ingestors" do
content_type :json
limit = coerce_query_limit(params["limit"])
query_ingestors(limit).to_json
query_ingestors(limit, since: params["since"]).to_json
end
app.get "/api/messages" do
@@ -111,7 +111,7 @@ module PotatoMesh
app.get "/api/positions" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit).to_json
query_positions(limit, since: params["since"]).to_json
end
app.get "/api/positions/:id" do
@@ -119,13 +119,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit, node_ref: node_ref).to_json
query_positions(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/neighbors" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit).to_json
query_neighbors(limit, since: params["since"]).to_json
end
app.get "/api/neighbors/:id" do
@@ -133,13 +133,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit, node_ref: node_ref).to_json
query_neighbors(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/telemetry" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit).to_json
query_telemetry(limit, since: params["since"]).to_json
end
app.get "/api/telemetry/aggregated" do
@@ -170,7 +170,11 @@ module PotatoMesh
halt 400, { error: "bucketSeconds too small for requested window" }.to_json
end
query_telemetry_buckets(window_seconds: window_seconds, bucket_seconds: bucket_seconds).to_json
query_telemetry_buckets(
window_seconds: window_seconds,
bucket_seconds: bucket_seconds,
since: params["since"],
).to_json
end
app.get "/api/telemetry/:id" do
@@ -178,13 +182,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit, node_ref: node_ref).to_json
query_telemetry(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/traces" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit).to_json
query_traces(limit, since: params["since"]).to_json
end
app.get "/api/traces/:id" do
@@ -192,7 +196,7 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit, node_ref: node_ref).to_json
query_traces(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/instances" do

View File

@@ -42,6 +42,7 @@ module PotatoMesh
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY = 128
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS = 120
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
DEFAULT_FEDERATION_SEED_DOMAINS = %w[potatomesh.net potatomesh.jmrp.io].freeze
# Retrieve the configured API token used for authenticated requests.
#
@@ -409,7 +410,7 @@ module PotatoMesh
#
# @return [Array<String>] list of default seed domains.
def federation_seed_domains
["potatomesh.net"].freeze
DEFAULT_FEDERATION_SEED_DOMAINS
end
# Determine how often we broadcast federation announcements.

View File

@@ -1080,7 +1080,8 @@ RSpec.describe "Potato Mesh Sinatra app" do
targets = application_class.federation_target_domains("self.mesh")
expect(targets.first).to eq("potatomesh.net")
seed_domains = PotatoMesh::Config.federation_seed_domains.map(&:downcase)
expect(targets.first(seed_domains.length)).to eq(seed_domains)
expect(targets).to include("remote.mesh")
expect(targets).not_to include("self.mesh")
end
@@ -1090,7 +1091,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
targets = application_class.federation_target_domains("self.mesh")
expect(targets).to eq(["potatomesh.net"])
expect(targets).to eq(PotatoMesh::Config.federation_seed_domains.map(&:downcase))
end
it "ignores remote instances that have not updated within a week" do
@@ -1118,7 +1119,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
targets = application_class.federation_target_domains("self.mesh")
expect(targets).to eq(["potatomesh.net"])
expect(targets).to eq(PotatoMesh::Config.federation_seed_domains.map(&:downcase))
end
end
@@ -4106,6 +4107,39 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(payload["node_id"]).to eq("!fresh-node")
end
it "filters node results using the since parameter for collections and single lookups" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_last_heard = now - 120
recent_last_heard = now - 30
with_db do |db|
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!older-node", "old", "Older", "TBEAM", "CLIENT", 0.0, older_last_heard, older_last_heard],
)
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!recent-node", "new", "Recent", "TBEAM", "CLIENT", 0.0, recent_last_heard, recent_last_heard],
)
end
get "/api/nodes?since=#{recent_last_heard}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["node_id"] }).to eq(["!recent-node"])
get "/api/nodes/!older-node?since=#{recent_last_heard}"
expect(last_response.status).to eq(404)
get "/api/nodes/!recent-node?since=#{recent_last_heard}"
expect(last_response).to be_ok
detail = JSON.parse(last_response.body)
expect(detail["node_id"]).to eq("!recent-node")
end
it "omits blank values from node responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4467,6 +4501,37 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(filtered.map { |row| row["id"] }).to eq([2])
end
it "filters positions using the since parameter for both global and node queries" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 180
recent_rx = now - 15
with_db do |db|
db.execute(
"INSERT INTO positions(id, node_id, node_num, rx_time, rx_iso, position_time, latitude, longitude) VALUES(?,?,?,?,?,?,?,?)",
[10, "!pos-since", 101, older_rx, Time.at(older_rx).utc.iso8601, older_rx - 5, 52.0, 13.0],
)
db.execute(
"INSERT INTO positions(id, node_id, node_num, rx_time, rx_iso, position_time, latitude, longitude) VALUES(?,?,?,?,?,?,?,?)",
[11, "!pos-since", 101, recent_rx, Time.at(recent_rx).utc.iso8601, recent_rx - 5, 53.0, 14.0],
)
end
get "/api/positions?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["id"] }).to eq([11])
get "/api/positions/!pos-since?since=#{recent_rx}"
expect(last_response).to be_ok
filtered = JSON.parse(last_response.body)
expect(filtered.map { |row| row["id"] }).to eq([11])
end
it "omits blank values from position responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4565,6 +4630,49 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(filtered.first["rx_time"]).to eq(fresh_rx)
end
it "honours the since parameter for neighbor queries" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 300
recent_rx = now - 30
with_db do |db|
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!origin-since", "orig", "Origin", "TBEAM", "CLIENT", 0.0, now, now],
)
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!neighbor-old", "oldn", "Neighbor Old", "TBEAM", "CLIENT", 0.0, now, now],
)
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!neighbor-new", "newn", "Neighbor New", "TBEAM", "CLIENT", 0.0, now, now],
)
db.execute(
"INSERT INTO neighbors(node_id, neighbor_id, snr, rx_time) VALUES(?,?,?,?)",
["!origin-since", "!neighbor-old", 1.5, older_rx],
)
db.execute(
"INSERT INTO neighbors(node_id, neighbor_id, snr, rx_time) VALUES(?,?,?,?)",
["!origin-since", "!neighbor-new", 7.5, recent_rx],
)
end
get "/api/neighbors?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["neighbor_id"] }).to eq(["!neighbor-new"])
get "/api/neighbors/!origin-since?since=#{recent_rx}"
expect(last_response).to be_ok
filtered = JSON.parse(last_response.body)
expect(filtered.map { |row| row["neighbor_id"] }).to eq(["!neighbor-new"])
end
it "omits blank values from neighbor responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4695,6 +4803,37 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(filtered.map { |row| row["id"] }).to eq([2])
end
it "filters telemetry rows using the since parameter for both global and node-scoped queries" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 300
recent_rx = now - 60
with_db do |db|
db.execute(
"INSERT INTO telemetry(id, node_id, node_num, rx_time, rx_iso, telemetry_time, battery_level, voltage) VALUES(?,?,?,?,?,?,?,?)",
[10, "!tele-since", 21, older_rx, Time.at(older_rx).utc.iso8601, older_rx - 5, 20.0, 3.9],
)
db.execute(
"INSERT INTO telemetry(id, node_id, node_num, rx_time, rx_iso, telemetry_time, battery_level, voltage) VALUES(?,?,?,?,?,?,?,?)",
[11, "!tele-since", 21, recent_rx, Time.at(recent_rx).utc.iso8601, recent_rx - 5, 80.0, 4.1],
)
end
get "/api/telemetry?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["id"] }).to eq([11])
get "/api/telemetry/!tele-since?since=#{recent_rx}"
expect(last_response).to be_ok
filtered = JSON.parse(last_response.body)
expect(filtered.map { |row| row["id"] }).to eq([11])
end
it "omits blank values from telemetry responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4858,6 +4997,34 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(buckets.first["bucket_seconds"]).to eq(PotatoMesh::App::Queries::DEFAULT_TELEMETRY_BUCKET_SECONDS)
end
it "filters aggregated telemetry buckets using the since parameter" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 1800
recent_rx = now - 120
with_db do |db|
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level) VALUES(?,?,?,?,?,?)",
[801, "!agg-since", older_rx, Time.at(older_rx).utc.iso8601, older_rx - 30, 30.0],
)
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level) VALUES(?,?,?,?,?,?)",
[802, "!agg-since", recent_rx, Time.at(recent_rx).utc.iso8601, recent_rx - 30, 80.0],
)
end
get "/api/telemetry/aggregated?windowSeconds=3600&bucketSeconds=300&since=#{recent_rx}"
expect(last_response).to be_ok
buckets = JSON.parse(last_response.body)
expect(buckets.length).to eq(1)
aggregates = buckets.first.fetch("aggregates")
expect(aggregates).to have_key("battery_level")
expect_same_value(aggregates.dig("battery_level", "avg"), 80.0)
end
it "omits zero-valued battery and voltage aggregates" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -5025,6 +5192,37 @@ RSpec.describe "Potato Mesh Sinatra app" do
ids = JSON.parse(last_response.body).map { |row| row["id"] }
expect(ids).to eq([50_001])
end
it "filters traces using the since parameter for collection and scoped requests" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 300
recent_rx = now - 25
with_db do |db|
db.execute(
"INSERT INTO traces(id, src, dest, rx_time, rx_iso) VALUES(?,?,?,?,?)",
[60_001, 123, 456, older_rx, Time.at(older_rx).utc.iso8601],
)
db.execute(
"INSERT INTO traces(id, src, dest, rx_time, rx_iso) VALUES(?,?,?,?,?)",
[60_002, 123, 456, recent_rx, Time.at(recent_rx).utc.iso8601],
)
end
get "/api/traces?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["id"] }).to eq([60_002])
get "/api/traces/123?since=#{recent_rx}"
expect(last_response).to be_ok
scoped = JSON.parse(last_response.body)
expect(scoped.map { |row| row["id"] }).to eq([60_002])
end
end
describe "GET /nodes/:id" do

View File

@@ -159,6 +159,30 @@ RSpec.describe "Ingestor endpoints" do
expect(rich["start_time_iso"]).to be_a(String)
expect(rich["last_seen_iso"]).to be_a(String)
end
it "filters ingestors using the since parameter" do
frozen_time = Time.at(1_700_000_000)
allow(Time).to receive(:now).and_return(frozen_time)
now = frozen_time.to_i
recent_cutoff = now - 120
with_db do |db|
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!old-ingestor", now - 600, now - 300, "0.5.5"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!new-ingestor", now - 60, now - 30, "0.5.8"],
)
end
get "/api/ingestors?since=#{recent_cutoff}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |entry| entry["node_id"] }).to eq(["!new-ingestor"])
end
end
describe "schema migrations" do