Don't use ghost shape of telemetry, and fix ceiling behavior

This commit is contained in:
Jack Kingsman
2026-05-13 17:47:24 -07:00
parent 896267ff7e
commit 2778e8bd4f
4 changed files with 70 additions and 53 deletions
+17 -4
View File
@@ -154,6 +154,18 @@ def _repeater_telemetry_payload(data: dict[str, Any]) -> dict[str, Any]:
return payload
def _contact_telemetry_payload(data: dict[str, Any]) -> dict[str, Any]:
"""Build the flat HA state payload for a contact LPP telemetry snapshot.
Unlike repeaters, contacts only have LPP sensor data — no battery_volts,
noise_floor_dbm, packets_received, etc.
"""
payload: dict[str, Any] = {}
for sensor, key, _ in _assign_lpp_keys(data.get("lpp_sensors", []) or []):
payload[key] = sensor.get("value")
return payload
def _lpp_discovery_configs(
prefix: str,
pub_key: str,
@@ -596,7 +608,7 @@ class MqttHaModule(FanoutModule):
)
)
if latest_ct_data:
ct_payload = _repeater_telemetry_payload(latest_ct_data)
ct_payload = _contact_telemetry_payload(latest_ct_data)
cached_repeater_states.append(
(f"{self._prefix}/{_node_id(pub_key)}/telemetry", ct_payload)
)
@@ -782,9 +794,10 @@ class MqttHaModule(FanoutModule):
return
nid = _node_id(pub_key)
# Publish the full telemetry dict — HA sensors use value_template
# to extract individual fields
payload = _repeater_telemetry_payload(data)
is_repeater = pub_key in self._tracked_repeaters
payload = (
_repeater_telemetry_payload(data) if is_repeater else _contact_telemetry_payload(data)
)
lpp_sensors: list[dict] = data.get("lpp_sensors", [])
rediscover = False
for _, key, _ in _assign_lpp_keys(lpp_sensors):
+38 -17
View File
@@ -1968,20 +1968,28 @@ async def _collect_contact_telemetry(mc: MeshCore, contact: Contact) -> bool:
return False
async def _run_telemetry_cycle(*, routed_only: bool = False) -> None:
"""Collect one telemetry sample from tracked repeaters and contacts.
async def _run_telemetry_cycle(
*,
routed_only: bool = False,
collect_repeaters: bool = True,
collect_contacts: bool = True,
) -> None:
"""Collect one telemetry sample from tracked repeaters and/or contacts.
When *routed_only* is True, only targets whose effective route is
``"direct"`` or ``"override"`` (i.e. not ``"flood"``) are collected.
This is used by the hourly routed-path fast-poll feature.
*collect_repeaters* and *collect_contacts* allow the scheduler to
selectively skip one list when its interval hasn't elapsed yet.
"""
if not radio_manager.is_connected:
logger.debug("Telemetry collect: radio not connected, skipping cycle")
return
app_settings = await AppSettingsRepository.get()
tracked_repeaters = app_settings.tracked_telemetry_repeaters
tracked_contacts = app_settings.tracked_telemetry_contacts
tracked_repeaters = app_settings.tracked_telemetry_repeaters if collect_repeaters else []
tracked_contacts = app_settings.tracked_telemetry_contacts if collect_contacts else []
if not tracked_repeaters and not tracked_contacts:
return
@@ -2072,22 +2080,35 @@ async def _maybe_run_scheduled_cycle(now: datetime) -> None:
telemetry).
"""
app_settings = await AppSettingsRepository.get()
tracked_count = len(app_settings.tracked_telemetry_repeaters) + len(
app_settings.tracked_telemetry_contacts
)
if tracked_count == 0:
return
effective_hours = clamp_telemetry_interval(app_settings.telemetry_interval_hours, tracked_count)
if effective_hours <= 0:
n_repeaters = len(app_settings.tracked_telemetry_repeaters)
n_contacts = len(app_settings.tracked_telemetry_contacts)
if n_repeaters == 0 and n_contacts == 0:
return
is_normal_cycle = now.hour % effective_hours == 0
pref = app_settings.telemetry_interval_hours
routed_hourly = app_settings.telemetry_routed_hourly
if is_normal_cycle:
# Normal scheduled boundary: collect ALL tracked targets.
await _run_telemetry_cycle()
elif app_settings.telemetry_routed_hourly:
# Hourly routed-path fast-poll: only targets with a non-flood route.
# Each list has its own 24/day ceiling. Check eligibility independently
# so 8 repeaters on an 8h interval don't drag 1 contact to 8h too.
repeaters_due = False
contacts_due = False
if n_repeaters > 0:
eff_rep = clamp_telemetry_interval(pref, n_repeaters)
if now.hour % eff_rep == 0:
repeaters_due = True
if n_contacts > 0:
eff_ct = clamp_telemetry_interval(pref, n_contacts)
if now.hour % eff_ct == 0:
contacts_due = True
if repeaters_due or contacts_due:
await _run_telemetry_cycle(
collect_repeaters=repeaters_due,
collect_contacts=contacts_due,
)
elif routed_hourly:
await _run_telemetry_cycle(routed_only=True)
+7 -24
View File
@@ -351,8 +351,6 @@ async def toggle_tracked_telemetry(request: TrackedTelemetryRequest) -> TrackedT
names[k] = contact.name if contact and contact.name else k[:12]
return names
n_contacts = len(settings.tracked_telemetry_contacts)
if key in current:
# Remove
new_list = [k for k in current if k != key]
@@ -362,7 +360,7 @@ async def toggle_tracked_telemetry(request: TrackedTelemetryRequest) -> TrackedT
tracked_telemetry_repeaters=new_list,
names=await _resolve_names(new_list),
schedule=_build_schedule(
len(new_list) + n_contacts,
len(new_list),
settings.telemetry_interval_hours,
settings.telemetry_routed_hourly,
),
@@ -393,7 +391,7 @@ async def toggle_tracked_telemetry(request: TrackedTelemetryRequest) -> TrackedT
tracked_telemetry_repeaters=new_list,
names=await _resolve_names(new_list),
schedule=_build_schedule(
len(new_list) + n_contacts,
len(new_list),
settings.telemetry_interval_hours,
settings.telemetry_routed_hourly,
),
@@ -408,15 +406,10 @@ async def get_telemetry_schedule() -> TelemetrySchedule:
surface saved-vs-effective when they differ, and show the next-run-at
timestamp so users know when the next cycle will fire.
The tracked count includes both repeaters and contacts for ceiling
enforcement.
"""
app_settings = await AppSettingsRepository.get()
combined_count = len(app_settings.tracked_telemetry_repeaters) + len(
app_settings.tracked_telemetry_contacts
)
return _build_schedule(
combined_count,
len(app_settings.tracked_telemetry_repeaters),
app_settings.telemetry_interval_hours,
app_settings.telemetry_routed_hourly,
)
@@ -457,9 +450,6 @@ async def toggle_tracked_telemetry_contact(
names[k] = contact.name if contact and contact.name else k[:12]
return names
def combined_count(lst: list[str]) -> int:
return len(settings.tracked_telemetry_repeaters) + len(lst)
if key in current:
# Remove
new_list = [k for k in current if k != key]
@@ -469,7 +459,7 @@ async def toggle_tracked_telemetry_contact(
tracked_telemetry_contacts=new_list,
names=await _resolve_names(new_list),
schedule=_build_schedule(
combined_count(new_list),
len(new_list),
settings.telemetry_interval_hours,
settings.telemetry_routed_hourly,
),
@@ -503,7 +493,7 @@ async def toggle_tracked_telemetry_contact(
tracked_telemetry_contacts=new_list,
names=await _resolve_names(new_list),
schedule=_build_schedule(
combined_count(new_list),
len(new_list),
settings.telemetry_interval_hours,
settings.telemetry_routed_hourly,
),
@@ -512,17 +502,10 @@ async def toggle_tracked_telemetry_contact(
@router.get("/tracked-telemetry-contacts/schedule", response_model=TelemetrySchedule)
async def get_contact_telemetry_schedule() -> TelemetrySchedule:
"""Return the current telemetry scheduling derivation for contacts.
Uses the combined tracked count (repeaters + contacts) for ceiling
enforcement since they share one collection loop.
"""
"""Return the current telemetry scheduling derivation for contacts."""
app_settings = await AppSettingsRepository.get()
combined_count = len(app_settings.tracked_telemetry_repeaters) + len(
app_settings.tracked_telemetry_contacts
)
return _build_schedule(
combined_count,
len(app_settings.tracked_telemetry_contacts),
app_settings.telemetry_interval_hours,
app_settings.telemetry_routed_hourly,
)
+8 -8
View File
@@ -2504,7 +2504,7 @@ class TestTelemetryCollectSchedulerDecision:
)
ran = False
async def fake_cycle():
async def fake_cycle(**_kwargs):
nonlocal ran
ran = True
@@ -2560,7 +2560,7 @@ class TestTelemetryCollectSchedulerDecision:
)
ran = False
async def fake_cycle():
async def fake_cycle(**_kwargs):
nonlocal ran
ran = True
@@ -2609,7 +2609,7 @@ class TestTelemetryCollectSchedulerDecision:
settings = AppSettings(tracked_telemetry_repeaters=[], telemetry_interval_hours=8)
ran = False
async def fake_cycle():
async def fake_cycle(**_kwargs):
nonlocal ran
ran = True
@@ -2670,7 +2670,7 @@ class TestTelemetryCollectSchedulerDecision:
)
ran = False
async def fake_cycle():
async def fake_cycle(**_kwargs):
nonlocal ran
ran = True
@@ -2733,7 +2733,7 @@ class TestTelemetryCollectSchedulerDecision:
)
ran = False
async def fake_cycle():
async def fake_cycle(**_kwargs):
nonlocal ran
ran = True
@@ -2794,7 +2794,7 @@ class TestRoutedHourlySchedulerDecision:
)
calls = []
async def fake_cycle(*, routed_only=False):
async def fake_cycle(*, routed_only=False, **_kwargs):
calls.append({"routed_only": routed_only})
now = real_datetime.datetime(2026, 4, 16, 9, 0, 0, tzinfo=real_datetime.UTC)
@@ -2828,7 +2828,7 @@ class TestRoutedHourlySchedulerDecision:
)
calls = []
async def fake_cycle(*, routed_only=False):
async def fake_cycle(*, routed_only=False, **_kwargs):
calls.append({"routed_only": routed_only})
now = real_datetime.datetime(2026, 4, 16, 9, 0, 0, tzinfo=real_datetime.UTC)
@@ -2862,7 +2862,7 @@ class TestRoutedHourlySchedulerDecision:
)
calls = []
async def fake_cycle(*, routed_only=False):
async def fake_cycle(*, routed_only=False, **_kwargs):
calls.append({"routed_only": routed_only})
now = real_datetime.datetime(2026, 4, 16, 16, 0, 0, tzinfo=real_datetime.UTC)