fix(charts): skip short counter intervals (#73)

This commit is contained in:
Jorijn Schrijvershof
2026-01-13 17:09:01 +01:00
committed by GitHub
parent 8372fc5ef0
commit 97ebba4f2d
2 changed files with 59 additions and 6 deletions

View File

@@ -36,6 +36,7 @@ ThemeName = Literal["light", "dark"]
BIN_30_MINUTES = 1800 # 30 minutes in seconds
BIN_2_HOURS = 7200 # 2 hours in seconds
BIN_1_DAY = 86400 # 1 day in seconds
MIN_COUNTER_INTERVAL_RATIO = 0.9 # Allow small scheduling jitter
@dataclass(frozen=True)
@@ -223,25 +224,38 @@ def load_timeseries_from_db(
# For counter metrics, calculate rate of change
if is_counter:
rate_points: list[tuple[datetime, float]] = []
cfg = get_config()
min_interval = max(
1.0,
(cfg.companion_step if role == "companion" else cfg.repeater_step)
* MIN_COUNTER_INTERVAL_RATIO,
)
for i in range(1, len(raw_points)):
prev_ts, prev_val = raw_points[i - 1]
curr_ts, curr_val = raw_points[i]
delta_val = curr_val - prev_val
prev_ts, prev_val = raw_points[0]
for curr_ts, curr_val in raw_points[1:]:
delta_secs = (curr_ts - prev_ts).total_seconds()
if delta_secs <= 0:
continue
if delta_secs < min_interval:
log.debug(
f"Skipping counter sample for {metric} at {curr_ts} "
f"({delta_secs:.1f}s < {min_interval:.1f}s)"
)
continue
delta_val = curr_val - prev_val
# Skip negative deltas (device reboot)
if delta_val < 0:
log.debug(f"Counter reset detected for {metric} at {curr_ts}")
prev_ts, prev_val = curr_ts, curr_val
continue
# Calculate per-second rate, then apply scaling (typically x60 for per-minute)
rate = (delta_val / delta_secs) * scale
rate_points.append((curr_ts, rate))
prev_ts, prev_val = curr_ts, curr_val
raw_points = rate_points
else:

View File

@@ -67,10 +67,49 @@ class TestCounterToRateConversion:
assert ts.points[0].value == pytest.approx(expected_rate)
assert ts.points[1].value == pytest.approx(expected_rate)
def test_applies_scale_factor(self, initialized_db, configured_env):
def test_counter_rate_short_interval_under_step_is_skipped(
self,
initialized_db,
configured_env,
monkeypatch,
):
"""Short sampling intervals are skipped to avoid rate spikes."""
base_ts = 1704067200
monkeypatch.setenv("REPEATER_STEP", "900")
import meshmon.env
meshmon.env._config = None
insert_metrics(base_ts, "repeater", {"nb_recv": 0.0}, initialized_db)
insert_metrics(base_ts + 900, "repeater", {"nb_recv": 100.0}, initialized_db)
insert_metrics(base_ts + 904, "repeater", {"nb_recv": 110.0}, initialized_db)
insert_metrics(base_ts + 1800, "repeater", {"nb_recv": 200.0}, initialized_db)
ts = load_timeseries_from_db(
role="repeater",
metric="nb_recv",
end_time=datetime.fromtimestamp(base_ts + 1800),
lookback=timedelta(hours=2),
period="day",
)
expected_rate = (100.0 / 900.0) * 60.0
assert len(ts.points) == 2
assert ts.points[0].timestamp == datetime.fromtimestamp(base_ts + 900)
assert ts.points[1].timestamp == datetime.fromtimestamp(base_ts + 1800)
for point in ts.points:
assert point.value == pytest.approx(expected_rate)
def test_applies_scale_factor(self, initialized_db, configured_env, monkeypatch):
"""Counter rate is scaled (typically x60 for per-minute)."""
base_ts = 1704067200
monkeypatch.setenv("REPEATER_STEP", "60")
import meshmon.env
meshmon.env._config = None
# Insert values 60 seconds apart for easy math
insert_metrics(base_ts, "repeater", {"nb_recv": 0.0}, initialized_db)
insert_metrics(base_ts + 60, "repeater", {"nb_recv": 60.0}, initialized_db)