mirror of
https://github.com/jkingsman/Remote-Terminal-for-MeshCore.git
synced 2026-04-30 18:42:51 +02:00
Make migrations more better
This commit is contained in:
@@ -7,7 +7,7 @@ from app.config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SCHEMA = """
|
||||
SCHEMA_TABLES = """
|
||||
CREATE TABLE IF NOT EXISTS contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
@@ -130,7 +130,12 @@ CREATE TABLE IF NOT EXISTS repeater_telemetry_history (
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
|
||||
# Indexes are created after migrations so that legacy databases have all
|
||||
# required columns (e.g. sender_key, added by migration 25) before index
|
||||
# creation runs.
|
||||
SCHEMA_INDEXES = """
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_received ON messages(received_at);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))
|
||||
@@ -185,15 +190,20 @@ class Database:
|
||||
# constraints, then re-enabled for all subsequent application queries.
|
||||
await self._connection.execute("PRAGMA foreign_keys = OFF")
|
||||
|
||||
await self._connection.executescript(SCHEMA)
|
||||
await self._connection.executescript(SCHEMA_TABLES)
|
||||
await self._connection.commit()
|
||||
logger.debug("Database schema initialized")
|
||||
logger.debug("Database tables initialized")
|
||||
|
||||
# Run any pending migrations
|
||||
# Run any pending migrations before creating indexes, so that
|
||||
# legacy databases have all required columns first.
|
||||
from app.migrations import run_migrations
|
||||
|
||||
await run_migrations(self._connection)
|
||||
|
||||
await self._connection.executescript(SCHEMA_INDEXES)
|
||||
await self._connection.commit()
|
||||
logger.debug("Database indexes initialized")
|
||||
|
||||
# Enable FK enforcement for all application queries from this point on.
|
||||
await self._connection.execute("PRAGMA foreign_keys = ON")
|
||||
logger.debug("Foreign key enforcement enabled")
|
||||
|
||||
3353
app/migrations.py
3353
app/migrations.py
File diff suppressed because it is too large
Load Diff
38
app/migrations/_001_add_last_read_at.py
Normal file
38
app/migrations/_001_add_last_read_at.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add last_read_at column to contacts and channels tables.
|
||||
|
||||
This enables server-side read state tracking, replacing the localStorage
|
||||
approach for consistent read state across devices.
|
||||
|
||||
ALTER TABLE ADD COLUMN is safe - it preserves existing data and handles
|
||||
the "column already exists" case gracefully.
|
||||
"""
|
||||
# Add to contacts table
|
||||
try:
|
||||
await conn.execute("ALTER TABLE contacts ADD COLUMN last_read_at INTEGER")
|
||||
logger.debug("Added last_read_at to contacts table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.last_read_at already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Add to channels table
|
||||
try:
|
||||
await conn.execute("ALTER TABLE channels ADD COLUMN last_read_at INTEGER")
|
||||
logger.debug("Added last_read_at to channels table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("channels.last_read_at already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
32
app/migrations/_002_drop_decrypt_attempt_columns.py
Normal file
32
app/migrations/_002_drop_decrypt_attempt_columns.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop unused decrypt_attempts and last_attempt columns from raw_packets.
|
||||
|
||||
These columns were added for a retry-limiting feature that was never implemented.
|
||||
They are written to but never read, so we can safely remove them.
|
||||
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip (the columns will remain but are harmless).
|
||||
"""
|
||||
for column in ["decrypt_attempts", "last_attempt"]:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE raw_packets DROP COLUMN {column}")
|
||||
logger.debug("Dropped %s from raw_packets table", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("raw_packets.%s already dropped, skipping", column)
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, %s column will remain", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
49
app/migrations/_003_drop_decrypted_column.py
Normal file
49
app/migrations/_003_drop_decrypted_column.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop the decrypted column and update indexes.
|
||||
|
||||
The decrypted column is redundant with message_id - a packet is decrypted
|
||||
iff message_id IS NOT NULL. We replace the decrypted index with a message_id index.
|
||||
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip the column drop but still update the index.
|
||||
"""
|
||||
# First, drop the old index on decrypted (safe even if it doesn't exist)
|
||||
try:
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_raw_packets_decrypted")
|
||||
logger.debug("Dropped idx_raw_packets_decrypted index")
|
||||
except aiosqlite.OperationalError:
|
||||
pass # Index didn't exist
|
||||
|
||||
# Create new index on message_id for efficient undecrypted packet queries
|
||||
try:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_message_id ON raw_packets(message_id)"
|
||||
)
|
||||
logger.debug("Created idx_raw_packets_message_id index")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "already exists" not in str(e).lower():
|
||||
raise
|
||||
|
||||
# Try to drop the decrypted column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE raw_packets DROP COLUMN decrypted")
|
||||
logger.debug("Dropped decrypted from raw_packets table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("raw_packets.decrypted already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, decrypted column will remain")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
24
app/migrations/_004_add_payload_hash_column.py
Normal file
24
app/migrations/_004_add_payload_hash_column.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add payload_hash column to raw_packets for deduplication.
|
||||
|
||||
This column stores the SHA-256 hash of the packet payload (excluding routing/path info).
|
||||
It will be used with a unique index to prevent duplicate packets from being stored.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE raw_packets ADD COLUMN payload_hash TEXT")
|
||||
logger.debug("Added payload_hash column to raw_packets table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("raw_packets.payload_hash already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
126
app/migrations/_005_backfill_payload_hashes.py
Normal file
126
app/migrations/_005_backfill_payload_hashes.py
Normal file
@@ -0,0 +1,126 @@
|
||||
from hashlib import sha256
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_payload_for_hash(raw_packet: bytes) -> bytes | None:
|
||||
"""
|
||||
Extract payload from a raw packet for hashing using canonical framing validation.
|
||||
|
||||
Returns the payload bytes, or None if packet is malformed.
|
||||
"""
|
||||
from app.path_utils import parse_packet_envelope
|
||||
|
||||
envelope = parse_packet_envelope(raw_packet)
|
||||
return envelope.payload if envelope is not None else None
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Backfill payload_hash for existing packets and remove duplicates.
|
||||
|
||||
This may take a while for large databases. Progress is logged.
|
||||
After backfilling, a unique index is created to prevent future duplicates.
|
||||
"""
|
||||
# Get count first
|
||||
cursor = await conn.execute("SELECT COUNT(*) FROM raw_packets WHERE payload_hash IS NULL")
|
||||
row = await cursor.fetchone()
|
||||
total = row[0] if row else 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug("No packets need hash backfill")
|
||||
else:
|
||||
logger.info("Backfilling payload hashes for %d packets. This may take a while...", total)
|
||||
|
||||
# Process in batches to avoid memory issues
|
||||
batch_size = 1000
|
||||
processed = 0
|
||||
duplicates_deleted = 0
|
||||
|
||||
# Track seen hashes to identify duplicates (keep oldest = lowest ID)
|
||||
seen_hashes: dict[str, int] = {} # hash -> oldest packet ID
|
||||
|
||||
# First pass: compute hashes and identify duplicates
|
||||
cursor = await conn.execute("SELECT id, data FROM raw_packets ORDER BY id ASC")
|
||||
|
||||
packets_to_update: list[tuple[str, int]] = [] # (hash, id)
|
||||
ids_to_delete: list[int] = []
|
||||
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
for row in rows:
|
||||
packet_id = row[0]
|
||||
packet_data = bytes(row[1])
|
||||
|
||||
# Extract payload and compute hash
|
||||
payload = _extract_payload_for_hash(packet_data)
|
||||
if payload:
|
||||
payload_hash = sha256(payload).hexdigest()
|
||||
else:
|
||||
# For malformed packets, hash the full data
|
||||
payload_hash = sha256(packet_data).hexdigest()
|
||||
|
||||
if payload_hash in seen_hashes:
|
||||
# Duplicate - mark for deletion (we keep the older one)
|
||||
ids_to_delete.append(packet_id)
|
||||
duplicates_deleted += 1
|
||||
else:
|
||||
# New hash - keep this packet
|
||||
seen_hashes[payload_hash] = packet_id
|
||||
packets_to_update.append((payload_hash, packet_id))
|
||||
|
||||
processed += 1
|
||||
|
||||
if processed % 10000 == 0:
|
||||
logger.info("Processed %d/%d packets...", processed, total)
|
||||
|
||||
# Second pass: update hashes for packets we're keeping
|
||||
total_updates = len(packets_to_update)
|
||||
logger.info("Updating %d packets with hashes...", total_updates)
|
||||
for idx, (payload_hash, packet_id) in enumerate(packets_to_update, 1):
|
||||
await conn.execute(
|
||||
"UPDATE raw_packets SET payload_hash = ? WHERE id = ?",
|
||||
(payload_hash, packet_id),
|
||||
)
|
||||
if idx % 10000 == 0:
|
||||
logger.info("Updated %d/%d packets...", idx, total_updates)
|
||||
|
||||
# Third pass: delete duplicates
|
||||
if ids_to_delete:
|
||||
total_deletes = len(ids_to_delete)
|
||||
logger.info("Removing %d duplicate packets...", total_deletes)
|
||||
deleted_count = 0
|
||||
# Delete in batches to avoid "too many SQL variables" error
|
||||
for i in range(0, len(ids_to_delete), 500):
|
||||
batch = ids_to_delete[i : i + 500]
|
||||
placeholders = ",".join("?" * len(batch))
|
||||
await conn.execute(f"DELETE FROM raw_packets WHERE id IN ({placeholders})", batch)
|
||||
deleted_count += len(batch)
|
||||
if deleted_count % 10000 < 500: # Log roughly every 10k
|
||||
logger.info("Removed %d/%d duplicates...", deleted_count, total_deletes)
|
||||
|
||||
await conn.commit()
|
||||
logger.info(
|
||||
"Hash backfill complete: %d packets updated, %d duplicates removed",
|
||||
len(packets_to_update),
|
||||
duplicates_deleted,
|
||||
)
|
||||
|
||||
# Create unique index on payload_hash (this enforces uniqueness going forward)
|
||||
try:
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_raw_packets_payload_hash "
|
||||
"ON raw_packets(payload_hash)"
|
||||
)
|
||||
logger.debug("Created unique index on payload_hash")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "already exists" not in str(e).lower():
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
42
app/migrations/_006_replace_path_len_with_path.py
Normal file
42
app/migrations/_006_replace_path_len_with_path.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Replace path_len INTEGER column with path TEXT column in messages table.
|
||||
|
||||
The path column stores the hex-encoded routing path bytes. Path length can
|
||||
be derived from the hex string (2 chars per byte = 1 hop).
|
||||
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip the drop (the column will remain but is unused).
|
||||
"""
|
||||
# First, add the new path column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages ADD COLUMN path TEXT")
|
||||
logger.debug("Added path column to messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("messages.path already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Try to drop the old path_len column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages DROP COLUMN path_len")
|
||||
logger.debug("Dropped path_len from messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("messages.path_len already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, path_len column will remain")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
96
app/migrations/_007_backfill_message_paths.py
Normal file
96
app/migrations/_007_backfill_message_paths.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_path_from_packet(raw_packet: bytes) -> str | None:
|
||||
"""
|
||||
Extract path hex string from a raw packet using canonical framing validation.
|
||||
|
||||
Returns the path as a hex string, or None if packet is malformed.
|
||||
"""
|
||||
from app.path_utils import parse_packet_envelope
|
||||
|
||||
envelope = parse_packet_envelope(raw_packet)
|
||||
return envelope.path.hex() if envelope is not None else None
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Backfill path column for messages that have linked raw_packets.
|
||||
|
||||
For each message with a linked raw_packet (via message_id), extract the
|
||||
path from the raw packet and update the message.
|
||||
|
||||
Only updates incoming messages (outgoing=0) since outgoing messages
|
||||
don't have meaningful path data.
|
||||
"""
|
||||
# Get count of messages that need backfill
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT COUNT(*)
|
||||
FROM messages m
|
||||
JOIN raw_packets rp ON rp.message_id = m.id
|
||||
WHERE m.path IS NULL AND m.outgoing = 0
|
||||
"""
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
total = row[0] if row else 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug("No messages need path backfill")
|
||||
return
|
||||
|
||||
logger.info("Backfilling path for %d messages. This may take a while...", total)
|
||||
|
||||
# Process in batches
|
||||
batch_size = 1000
|
||||
processed = 0
|
||||
updated = 0
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT m.id, rp.data
|
||||
FROM messages m
|
||||
JOIN raw_packets rp ON rp.message_id = m.id
|
||||
WHERE m.path IS NULL AND m.outgoing = 0
|
||||
ORDER BY m.id ASC
|
||||
"""
|
||||
)
|
||||
|
||||
updates: list[tuple[str, int]] = [] # (path, message_id)
|
||||
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
for row in rows:
|
||||
message_id = row[0]
|
||||
packet_data = bytes(row[1])
|
||||
|
||||
path_hex = _extract_path_from_packet(packet_data)
|
||||
if path_hex is not None:
|
||||
updates.append((path_hex, message_id))
|
||||
|
||||
processed += 1
|
||||
|
||||
if processed % 10000 == 0:
|
||||
logger.info("Processed %d/%d messages...", processed, total)
|
||||
|
||||
# Apply updates in batches
|
||||
if updates:
|
||||
logger.info("Updating %d messages with path data...", len(updates))
|
||||
for idx, (path_hex, message_id) in enumerate(updates, 1):
|
||||
await conn.execute(
|
||||
"UPDATE messages SET path = ? WHERE id = ?",
|
||||
(path_hex, message_id),
|
||||
)
|
||||
updated += 1
|
||||
if idx % 10000 == 0:
|
||||
logger.info("Updated %d/%d messages...", idx, len(updates))
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Path backfill complete: %d messages updated", updated)
|
||||
66
app/migrations/_008_convert_path_to_paths_array.py
Normal file
66
app/migrations/_008_convert_path_to_paths_array.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Convert path TEXT column to paths TEXT column storing JSON array.
|
||||
|
||||
The new format stores multiple paths as a JSON array of objects:
|
||||
[{"path": "1A2B", "received_at": 1234567890}, ...]
|
||||
|
||||
This enables tracking multiple delivery paths for the same message
|
||||
(e.g., when a message is received via different repeater routes).
|
||||
"""
|
||||
|
||||
# First, add the new paths column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages ADD COLUMN paths TEXT")
|
||||
logger.debug("Added paths column to messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("messages.paths already exists, skipping column add")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Migrate existing path data to paths array format
|
||||
cursor = await conn.execute(
|
||||
"SELECT id, path, received_at FROM messages WHERE path IS NOT NULL AND paths IS NULL"
|
||||
)
|
||||
rows = list(await cursor.fetchall())
|
||||
|
||||
if rows:
|
||||
logger.info("Converting %d messages from path to paths array format...", len(rows))
|
||||
for row in rows:
|
||||
message_id = row[0]
|
||||
old_path = row[1]
|
||||
received_at = row[2]
|
||||
|
||||
# Convert single path to array format
|
||||
paths_json = json.dumps([{"path": old_path, "received_at": received_at}])
|
||||
await conn.execute(
|
||||
"UPDATE messages SET paths = ? WHERE id = ?",
|
||||
(paths_json, message_id),
|
||||
)
|
||||
|
||||
logger.info("Converted %d messages to paths array format", len(rows))
|
||||
|
||||
# Try to drop the old path column (SQLite 3.35.0+ only)
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages DROP COLUMN path")
|
||||
logger.debug("Dropped path column from messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("messages.path already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, path column will remain")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
41
app/migrations/_009_create_app_settings_table.py
Normal file
41
app/migrations/_009_create_app_settings_table.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Create app_settings table for persistent application preferences.
|
||||
|
||||
This table stores:
|
||||
- max_radio_contacts: Configured radio contact capacity baseline for maintenance thresholds
|
||||
- favorites: JSON array of favorite conversations [{type, id}, ...]
|
||||
- auto_decrypt_dm_on_advert: Whether to attempt historical DM decryption on new contact
|
||||
- sidebar_sort_order: 'recent' or 'alpha' for sidebar sorting
|
||||
- last_message_times: JSON object mapping conversation keys to timestamps
|
||||
- preferences_migrated: Flag to track if localStorage has been migrated
|
||||
|
||||
The table uses a single-row pattern (id=1) for simplicity.
|
||||
"""
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS app_settings (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
max_radio_contacts INTEGER DEFAULT 200,
|
||||
favorites TEXT DEFAULT '[]',
|
||||
auto_decrypt_dm_on_advert INTEGER DEFAULT 1,
|
||||
sidebar_sort_order TEXT DEFAULT 'recent',
|
||||
last_message_times TEXT DEFAULT '{}',
|
||||
preferences_migrated INTEGER DEFAULT 0
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# Initialize with default row (use only the id column so this works
|
||||
# regardless of which columns exist — defaults fill the rest).
|
||||
await conn.execute("INSERT OR IGNORE INTO app_settings (id) VALUES (1)")
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Created app_settings table with default values")
|
||||
23
app/migrations/_010_add_advert_interval.py
Normal file
23
app/migrations/_010_add_advert_interval.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add advert_interval column to app_settings table.
|
||||
|
||||
This enables configurable periodic advertisement interval (default 0 = disabled).
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN advert_interval INTEGER DEFAULT 0")
|
||||
logger.debug("Added advert_interval column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("advert_interval column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
24
app/migrations/_011_add_last_advert_time.py
Normal file
24
app/migrations/_011_add_last_advert_time.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add last_advert_time column to app_settings table.
|
||||
|
||||
This tracks when the last advertisement was sent, ensuring we never
|
||||
advertise faster than the configured advert_interval.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN last_advert_time INTEGER DEFAULT 0")
|
||||
logger.debug("Added last_advert_time column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("last_advert_time column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
33
app/migrations/_012_add_bot_settings.py
Normal file
33
app/migrations/_012_add_bot_settings.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add bot_enabled and bot_code columns to app_settings table.
|
||||
|
||||
This enables user-defined Python code to be executed when messages are received,
|
||||
allowing for custom bot responses.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN bot_enabled INTEGER DEFAULT 0")
|
||||
logger.debug("Added bot_enabled column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("bot_enabled column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN bot_code TEXT DEFAULT ''")
|
||||
logger.debug("Added bot_code column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("bot_code column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
76
app/migrations/_013_convert_to_multi_bot.py
Normal file
76
app/migrations/_013_convert_to_multi_bot.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import json
|
||||
import uuid
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Convert single bot_enabled/bot_code to multi-bot format.
|
||||
|
||||
Adds a 'bots' TEXT column storing a JSON array of bot configs:
|
||||
[{"id": "uuid", "name": "Bot 1", "enabled": true, "code": "..."}]
|
||||
|
||||
If existing bot_code is non-empty OR bot_enabled is true, migrates
|
||||
to a single bot named "Bot 1". Otherwise, creates empty array.
|
||||
|
||||
Attempts to drop the old bot_enabled and bot_code columns.
|
||||
"""
|
||||
|
||||
# Add new bots column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN bots TEXT DEFAULT '[]'")
|
||||
logger.debug("Added bots column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("bots column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Migrate existing bot data
|
||||
cursor = await conn.execute("SELECT bot_enabled, bot_code FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
|
||||
if row:
|
||||
bot_enabled = bool(row[0]) if row[0] is not None else False
|
||||
bot_code = row[1] or ""
|
||||
|
||||
# If there's existing bot data, migrate it
|
||||
if bot_code.strip() or bot_enabled:
|
||||
bots = [
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"name": "Bot 1",
|
||||
"enabled": bot_enabled,
|
||||
"code": bot_code,
|
||||
}
|
||||
]
|
||||
bots_json = json.dumps(bots)
|
||||
logger.info("Migrating existing bot to multi-bot format: enabled=%s", bot_enabled)
|
||||
else:
|
||||
bots_json = "[]"
|
||||
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET bots = ? WHERE id = 1",
|
||||
(bots_json,),
|
||||
)
|
||||
|
||||
# Try to drop old columns (SQLite 3.35.0+ only)
|
||||
for column in ["bot_enabled", "bot_code"]:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE app_settings DROP COLUMN {column}")
|
||||
logger.debug("Dropped %s column from app_settings", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("app_settings.%s already dropped, skipping", column)
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, %s column will remain", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
152
app/migrations/_014_lowercase_public_keys.py
Normal file
152
app/migrations/_014_lowercase_public_keys.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Lowercase all contact public keys and related data for case-insensitive matching.
|
||||
|
||||
Updates:
|
||||
- contacts.public_key (PRIMARY KEY) via temp table swap
|
||||
- messages.conversation_key for PRIV messages
|
||||
- app_settings.favorites (contact IDs)
|
||||
- app_settings.last_message_times (contact- prefixed keys)
|
||||
|
||||
Handles case collisions by keeping the most-recently-seen contact.
|
||||
"""
|
||||
|
||||
# 1. Lowercase message conversation keys for private messages
|
||||
try:
|
||||
await conn.execute(
|
||||
"UPDATE messages SET conversation_key = lower(conversation_key) WHERE type = 'PRIV'"
|
||||
)
|
||||
logger.debug("Lowercased PRIV message conversation_keys")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "no such table" in str(e).lower():
|
||||
logger.debug("messages table does not exist yet, skipping conversation_key lowercase")
|
||||
else:
|
||||
raise
|
||||
|
||||
# 2. Check if contacts table exists before proceeding
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("contacts table does not exist yet, skipping key lowercase")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# 3. Handle contacts table - check for case collisions first
|
||||
cursor = await conn.execute(
|
||||
"SELECT lower(public_key) as lk, COUNT(*) as cnt "
|
||||
"FROM contacts GROUP BY lower(public_key) HAVING COUNT(*) > 1"
|
||||
)
|
||||
collisions = list(await cursor.fetchall())
|
||||
|
||||
if collisions:
|
||||
logger.warning(
|
||||
"Found %d case-colliding contact groups, keeping most-recently-seen",
|
||||
len(collisions),
|
||||
)
|
||||
for row in collisions:
|
||||
lower_key = row[0]
|
||||
# Delete all but the most recently seen
|
||||
await conn.execute(
|
||||
"""DELETE FROM contacts WHERE public_key IN (
|
||||
SELECT public_key FROM contacts
|
||||
WHERE lower(public_key) = ?
|
||||
ORDER BY COALESCE(last_seen, 0) DESC
|
||||
LIMIT -1 OFFSET 1
|
||||
)""",
|
||||
(lower_key,),
|
||||
)
|
||||
|
||||
# 3. Rebuild contacts with lowercased keys
|
||||
# Get the actual column names from the table (handles different schema versions)
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns_info = await cursor.fetchall()
|
||||
all_columns = [col[1] for col in columns_info] # col[1] is column name
|
||||
|
||||
# Build column lists, lowering public_key
|
||||
select_cols = ", ".join(f"lower({c})" if c == "public_key" else c for c in all_columns)
|
||||
col_defs = []
|
||||
for col in columns_info:
|
||||
name, col_type, _notnull, default, pk = col[1], col[2], col[3], col[4], col[5]
|
||||
parts = [name, col_type or "TEXT"]
|
||||
if pk:
|
||||
parts.append("PRIMARY KEY")
|
||||
if default is not None:
|
||||
parts.append(f"DEFAULT {default}")
|
||||
col_defs.append(" ".join(parts))
|
||||
|
||||
create_sql = f"CREATE TABLE contacts_new ({', '.join(col_defs)})"
|
||||
await conn.execute(create_sql)
|
||||
await conn.execute(f"INSERT INTO contacts_new SELECT {select_cols} FROM contacts")
|
||||
await conn.execute("DROP TABLE contacts")
|
||||
await conn.execute("ALTER TABLE contacts_new RENAME TO contacts")
|
||||
|
||||
# Recreate the on_radio index (if column exists)
|
||||
if "on_radio" in all_columns:
|
||||
await conn.execute("CREATE INDEX IF NOT EXISTS idx_contacts_on_radio ON contacts(on_radio)")
|
||||
|
||||
# 4. Lowercase contact IDs in favorites JSON (if app_settings exists)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
await conn.commit()
|
||||
logger.info("Lowercased all contact public keys (no app_settings table)")
|
||||
return
|
||||
|
||||
cursor = await conn.execute("SELECT favorites FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
if row and row[0]:
|
||||
try:
|
||||
favorites = json.loads(row[0])
|
||||
updated = False
|
||||
for fav in favorites:
|
||||
if fav.get("type") == "contact" and fav.get("id"):
|
||||
new_id = fav["id"].lower()
|
||||
if new_id != fav["id"]:
|
||||
fav["id"] = new_id
|
||||
updated = True
|
||||
if updated:
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET favorites = ? WHERE id = 1",
|
||||
(json.dumps(favorites),),
|
||||
)
|
||||
logger.debug("Lowercased contact IDs in favorites")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
# 5. Lowercase contact keys in last_message_times JSON
|
||||
cursor = await conn.execute("SELECT last_message_times FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
if row and row[0]:
|
||||
try:
|
||||
times = json.loads(row[0])
|
||||
new_times = {}
|
||||
updated = False
|
||||
for key, val in times.items():
|
||||
if key.startswith("contact-"):
|
||||
new_key = "contact-" + key[8:].lower()
|
||||
if new_key != key:
|
||||
updated = True
|
||||
new_times[new_key] = val
|
||||
else:
|
||||
new_times[key] = val
|
||||
if updated:
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET last_message_times = ? WHERE id = 1",
|
||||
(json.dumps(new_times),),
|
||||
)
|
||||
logger.debug("Lowercased contact keys in last_message_times")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Lowercased all contact public keys")
|
||||
44
app/migrations/_015_fix_null_sender_timestamp.py
Normal file
44
app/migrations/_015_fix_null_sender_timestamp.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Fix NULL sender_timestamp values and add null-safe dedup index.
|
||||
|
||||
1. Set sender_timestamp = received_at for any messages with NULL sender_timestamp
|
||||
2. Create a null-safe unique index as belt-and-suspenders protection
|
||||
"""
|
||||
# Check if messages table exists
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("messages table does not exist yet, skipping NULL sender_timestamp fix")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# Backfill NULL sender_timestamps with received_at
|
||||
cursor = await conn.execute(
|
||||
"UPDATE messages SET sender_timestamp = received_at WHERE sender_timestamp IS NULL"
|
||||
)
|
||||
if cursor.rowcount > 0:
|
||||
logger.info("Backfilled %d messages with NULL sender_timestamp", cursor.rowcount)
|
||||
|
||||
# Try to create null-safe dedup index (may fail if existing duplicates exist)
|
||||
try:
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))"""
|
||||
)
|
||||
logger.debug("Created null-safe dedup index")
|
||||
except aiosqlite.IntegrityError:
|
||||
logger.warning(
|
||||
"Could not create null-safe dedup index due to existing duplicates - "
|
||||
"the application-level dedup will handle these"
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
26
app/migrations/_016_add_experimental_channel_double_send.py
Normal file
26
app/migrations/_016_add_experimental_channel_double_send.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add experimental_channel_double_send column to app_settings table.
|
||||
|
||||
When enabled, channel sends perform an immediate byte-perfect duplicate send
|
||||
using the same timestamp bytes.
|
||||
"""
|
||||
try:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN experimental_channel_double_send INTEGER DEFAULT 0"
|
||||
)
|
||||
logger.debug("Added experimental_channel_double_send column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("experimental_channel_double_send column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
31
app/migrations/_017_drop_experimental_channel_double_send.py
Normal file
31
app/migrations/_017_drop_experimental_channel_double_send.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop experimental_channel_double_send column from app_settings.
|
||||
|
||||
This feature is replaced by a user-triggered resend button.
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip (the column will remain but is unused).
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings DROP COLUMN experimental_channel_double_send")
|
||||
logger.debug("Dropped experimental_channel_double_send from app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("app_settings.experimental_channel_double_send already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug(
|
||||
"SQLite doesn't support DROP COLUMN, "
|
||||
"experimental_channel_double_send column will remain"
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
64
app/migrations/_018_drop_raw_packets_data_unique.py
Normal file
64
app/migrations/_018_drop_raw_packets_data_unique.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop the UNIQUE constraint on raw_packets.data via table rebuild.
|
||||
|
||||
This constraint creates a large autoindex (~30 MB on a 340K-row database) that
|
||||
stores a complete copy of every raw packet BLOB in a B-tree. Deduplication is
|
||||
already handled by the unique index on payload_hash, making the data UNIQUE
|
||||
constraint pure storage overhead.
|
||||
|
||||
Requires table recreation since SQLite doesn't support DROP CONSTRAINT.
|
||||
"""
|
||||
# Check if the autoindex exists (indicates UNIQUE constraint on data)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='index' "
|
||||
"AND name='sqlite_autoindex_raw_packets_1'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("raw_packets.data UNIQUE constraint already absent, skipping rebuild")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
logger.info("Rebuilding raw_packets table to remove UNIQUE(data) constraint...")
|
||||
|
||||
# Get current columns from the existing table
|
||||
cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
old_cols = {col[1] for col in await cursor.fetchall()}
|
||||
|
||||
# Target schema without UNIQUE on data
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash TEXT,
|
||||
FOREIGN KEY (message_id) REFERENCES messages(id)
|
||||
)
|
||||
""")
|
||||
|
||||
# Copy only columns that exist in both old and new tables
|
||||
new_cols = {"id", "timestamp", "data", "message_id", "payload_hash"}
|
||||
copy_cols = ", ".join(sorted(c for c in new_cols if c in old_cols))
|
||||
|
||||
await conn.execute(
|
||||
f"INSERT INTO raw_packets_new ({copy_cols}) SELECT {copy_cols} FROM raw_packets"
|
||||
)
|
||||
await conn.execute("DROP TABLE raw_packets")
|
||||
await conn.execute("ALTER TABLE raw_packets_new RENAME TO raw_packets")
|
||||
|
||||
# Recreate indexes
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_message_id ON raw_packets(message_id)")
|
||||
|
||||
await conn.commit()
|
||||
logger.info("raw_packets table rebuilt without UNIQUE(data) constraint")
|
||||
83
app/migrations/_019_drop_messages_unique_constraint.py
Normal file
83
app/migrations/_019_drop_messages_unique_constraint.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop the UNIQUE(type, conversation_key, text, sender_timestamp) constraint on messages.
|
||||
|
||||
This constraint creates a large autoindex (~13 MB on a 112K-row database) that
|
||||
stores the full message text in a B-tree. The idx_messages_dedup_null_safe unique
|
||||
index already provides identical dedup protection — no rows have NULL
|
||||
sender_timestamp since migration 15 backfilled them all.
|
||||
|
||||
INSERT OR IGNORE still works correctly because it checks all unique constraints,
|
||||
including unique indexes like idx_messages_dedup_null_safe.
|
||||
|
||||
Requires table recreation since SQLite doesn't support DROP CONSTRAINT.
|
||||
"""
|
||||
# Check if the autoindex exists (indicates UNIQUE constraint)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='index' AND name='sqlite_autoindex_messages_1'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("messages UNIQUE constraint already absent, skipping rebuild")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
logger.info("Rebuilding messages table to remove UNIQUE constraint...")
|
||||
|
||||
# Get current columns from the existing table
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
old_cols = {col[1] for col in await cursor.fetchall()}
|
||||
|
||||
# Target schema without the UNIQUE table constraint
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
paths TEXT
|
||||
)
|
||||
""")
|
||||
|
||||
# Copy only columns that exist in both old and new tables
|
||||
new_cols = {
|
||||
"id",
|
||||
"type",
|
||||
"conversation_key",
|
||||
"text",
|
||||
"sender_timestamp",
|
||||
"received_at",
|
||||
"txt_type",
|
||||
"signature",
|
||||
"outgoing",
|
||||
"acked",
|
||||
"paths",
|
||||
}
|
||||
copy_cols = ", ".join(sorted(c for c in new_cols if c in old_cols))
|
||||
|
||||
await conn.execute(f"INSERT INTO messages_new ({copy_cols}) SELECT {copy_cols} FROM messages")
|
||||
await conn.execute("DROP TABLE messages")
|
||||
await conn.execute("ALTER TABLE messages_new RENAME TO messages")
|
||||
|
||||
# Recreate indexes
|
||||
await conn.execute("CREATE INDEX idx_messages_conversation ON messages(type, conversation_key)")
|
||||
await conn.execute("CREATE INDEX idx_messages_received ON messages(received_at)")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.info("messages table rebuilt without UNIQUE constraint")
|
||||
45
app/migrations/_020_enable_wal_and_auto_vacuum.py
Normal file
45
app/migrations/_020_enable_wal_and_auto_vacuum.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Enable WAL journal mode and incremental auto-vacuum.
|
||||
|
||||
WAL (Write-Ahead Logging):
|
||||
- Faster writes: appends to a WAL file instead of rewriting the main DB
|
||||
- Concurrent reads during writes (readers don't block writers)
|
||||
- No journal file create/delete churn on every commit
|
||||
|
||||
Incremental auto-vacuum:
|
||||
- Pages freed by DELETE become reclaimable without a full VACUUM
|
||||
- Call PRAGMA incremental_vacuum to reclaim on demand
|
||||
- Less overhead than FULL auto-vacuum (which reorganizes on every commit)
|
||||
|
||||
auto_vacuum mode change requires a VACUUM to restructure the file.
|
||||
The VACUUM is performed before switching to WAL so it runs under the
|
||||
current journal mode; WAL is then set as the final step.
|
||||
"""
|
||||
# Check current auto_vacuum mode
|
||||
cursor = await conn.execute("PRAGMA auto_vacuum")
|
||||
row = await cursor.fetchone()
|
||||
current_auto_vacuum = row[0] if row else 0
|
||||
|
||||
if current_auto_vacuum != 2: # 2 = INCREMENTAL
|
||||
logger.info("Switching auto_vacuum to INCREMENTAL (requires VACUUM)...")
|
||||
await conn.execute("PRAGMA auto_vacuum = INCREMENTAL")
|
||||
await conn.execute("VACUUM")
|
||||
logger.info("VACUUM complete, auto_vacuum set to INCREMENTAL")
|
||||
else:
|
||||
logger.debug("auto_vacuum already INCREMENTAL, skipping VACUUM")
|
||||
|
||||
# Enable WAL mode (idempotent — returns current mode)
|
||||
cursor = await conn.execute("PRAGMA journal_mode = WAL")
|
||||
row = await cursor.fetchone()
|
||||
mode = row[0] if row else "unknown"
|
||||
logger.info("Journal mode set to %s", mode)
|
||||
|
||||
await conn.commit()
|
||||
29
app/migrations/_021_enforce_min_advert_interval.py
Normal file
29
app/migrations/_021_enforce_min_advert_interval.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Enforce minimum 1-hour advert interval.
|
||||
|
||||
Any advert_interval between 1 and 3599 is clamped up to 3600 (1 hour).
|
||||
Zero (disabled) is left unchanged.
|
||||
"""
|
||||
# Guard: app_settings table may not exist if running against a very old schema
|
||||
# (it's created in migration 9). The UPDATE is harmless if the table exists
|
||||
# but has no rows, but will error if the table itself is missing.
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
logger.debug("app_settings table does not exist yet, skipping advert_interval clamp")
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET advert_interval = 3600 WHERE advert_interval > 0 AND advert_interval < 3600"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Clamped advert_interval to minimum 3600 seconds")
|
||||
33
app/migrations/_022_add_repeater_advert_paths.py
Normal file
33
app/migrations/_022_add_repeater_advert_paths.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Create table for recent unique advert paths per repeater.
|
||||
|
||||
This keeps path diversity for repeater advertisements without changing the
|
||||
existing payload-hash raw packet dedup policy.
|
||||
"""
|
||||
await conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS repeater_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
repeater_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(repeater_key, path_hex),
|
||||
FOREIGN KEY (repeater_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_repeater_advert_paths_recent "
|
||||
"ON repeater_advert_paths(repeater_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Ensured repeater_advert_paths table and indexes exist")
|
||||
60
app/migrations/_023_add_first_seen.py
Normal file
60
app/migrations/_023_add_first_seen.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add first_seen column to contacts table.
|
||||
|
||||
Backfill strategy:
|
||||
1. Set first_seen = last_seen for all contacts (baseline).
|
||||
2. For contacts with PRIV messages, set first_seen = MIN(messages.received_at)
|
||||
if that timestamp is earlier.
|
||||
"""
|
||||
# Guard: skip if contacts table doesn't exist (e.g. partial test schemas)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
return
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE contacts ADD COLUMN first_seen INTEGER")
|
||||
logger.debug("Added first_seen to contacts table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.first_seen already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Baseline: set first_seen = last_seen for all contacts
|
||||
# Check if last_seen column exists (should in production, may not in minimal test schemas)
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
if "last_seen" in columns:
|
||||
await conn.execute("UPDATE contacts SET first_seen = last_seen WHERE first_seen IS NULL")
|
||||
|
||||
# Refine: for contacts with PRIV messages, use earliest message timestamp if earlier
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone():
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts SET first_seen = (
|
||||
SELECT MIN(m.received_at) FROM messages m
|
||||
WHERE m.type = 'PRIV' AND m.conversation_key = contacts.public_key
|
||||
)
|
||||
WHERE EXISTS (
|
||||
SELECT 1 FROM messages m
|
||||
WHERE m.type = 'PRIV' AND m.conversation_key = contacts.public_key
|
||||
AND m.received_at < COALESCE(contacts.first_seen, 9999999999)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Added and backfilled first_seen column")
|
||||
53
app/migrations/_024_create_contact_name_history.py
Normal file
53
app/migrations/_024_create_contact_name_history.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Create contact_name_history table and seed with current contact names.
|
||||
"""
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_name_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
UNIQUE(public_key, name),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_name_history_key "
|
||||
"ON contact_name_history(public_key, last_seen DESC)"
|
||||
)
|
||||
|
||||
# Seed: one row per contact from current data (skip if contacts table doesn't exist
|
||||
# or lacks needed columns)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone():
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
cols = {row[1] for row in await cursor.fetchall()}
|
||||
if "name" in cols and "public_key" in cols:
|
||||
first_seen_expr = "first_seen" if "first_seen" in cols else "0"
|
||||
last_seen_expr = "last_seen" if "last_seen" in cols else "0"
|
||||
await conn.execute(
|
||||
f"""
|
||||
INSERT OR IGNORE INTO contact_name_history (public_key, name, first_seen, last_seen)
|
||||
SELECT public_key, name,
|
||||
COALESCE({first_seen_expr}, {last_seen_expr}, 0),
|
||||
COALESCE({last_seen_expr}, 0)
|
||||
FROM contacts
|
||||
WHERE name IS NOT NULL AND name != ''
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Created contact_name_history table and seeded from contacts")
|
||||
124
app/migrations/_025_add_sender_columns.py
Normal file
124
app/migrations/_025_add_sender_columns.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add sender_name and sender_key columns to messages table.
|
||||
|
||||
Backfill:
|
||||
- sender_name for CHAN messages: extract from "Name: message" format
|
||||
- sender_key for CHAN messages: match name to contact (skip ambiguous)
|
||||
- sender_key for incoming PRIV messages: set to conversation_key
|
||||
"""
|
||||
# Guard: skip if messages table doesn't exist
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
return
|
||||
|
||||
for column in ["sender_name", "sender_key"]:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE messages ADD COLUMN {column} TEXT")
|
||||
logger.debug("Added %s to messages table", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("messages.%s already exists, skipping", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Check which columns the messages table has (may be minimal in test environments)
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
msg_cols = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
# Only backfill if the required columns exist
|
||||
if "type" in msg_cols and "text" in msg_cols:
|
||||
# Count messages to backfill for progress reporting
|
||||
cursor = await conn.execute(
|
||||
"SELECT COUNT(*) FROM messages WHERE type = 'CHAN' AND sender_name IS NULL"
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
chan_count = row[0] if row else 0
|
||||
if chan_count > 0:
|
||||
logger.info("Backfilling sender_name for %d channel messages...", chan_count)
|
||||
|
||||
# Backfill sender_name for CHAN messages from "Name: message" format
|
||||
# Only extract if colon position is valid (> 1 and < 51, i.e. name is 1-50 chars)
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
UPDATE messages SET sender_name = SUBSTR(text, 1, INSTR(text, ': ') - 1)
|
||||
WHERE type = 'CHAN' AND sender_name IS NULL
|
||||
AND INSTR(text, ': ') > 1 AND INSTR(text, ': ') < 52
|
||||
"""
|
||||
)
|
||||
if cursor.rowcount > 0:
|
||||
logger.info("Backfilled sender_name for %d channel messages", cursor.rowcount)
|
||||
|
||||
# Backfill sender_key for incoming PRIV messages
|
||||
if "outgoing" in msg_cols and "conversation_key" in msg_cols:
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
UPDATE messages SET sender_key = conversation_key
|
||||
WHERE type = 'PRIV' AND outgoing = 0 AND sender_key IS NULL
|
||||
"""
|
||||
)
|
||||
if cursor.rowcount > 0:
|
||||
logger.info("Backfilled sender_key for %d DM messages", cursor.rowcount)
|
||||
|
||||
# Backfill sender_key for CHAN messages: match sender_name to contacts
|
||||
# Build name->key map, skip ambiguous names (multiple contacts with same name)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone():
|
||||
cursor = await conn.execute(
|
||||
"SELECT public_key, name FROM contacts WHERE name IS NOT NULL AND name != ''"
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
|
||||
name_to_keys: dict[str, list[str]] = {}
|
||||
for row in rows:
|
||||
name = row["name"]
|
||||
key = row["public_key"]
|
||||
if name not in name_to_keys:
|
||||
name_to_keys[name] = []
|
||||
name_to_keys[name].append(key)
|
||||
|
||||
# Only use unambiguous names (single contact per name)
|
||||
unambiguous = {n: ks[0] for n, ks in name_to_keys.items() if len(ks) == 1}
|
||||
if unambiguous:
|
||||
logger.info(
|
||||
"Matching sender_key for %d unique contact names...",
|
||||
len(unambiguous),
|
||||
)
|
||||
# Use a temp table for a single bulk UPDATE instead of N individual queries
|
||||
await conn.execute(
|
||||
"CREATE TEMP TABLE _name_key_map (name TEXT PRIMARY KEY, public_key TEXT NOT NULL)"
|
||||
)
|
||||
await conn.executemany(
|
||||
"INSERT INTO _name_key_map (name, public_key) VALUES (?, ?)",
|
||||
list(unambiguous.items()),
|
||||
)
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
UPDATE messages SET sender_key = (
|
||||
SELECT public_key FROM _name_key_map WHERE _name_key_map.name = messages.sender_name
|
||||
)
|
||||
WHERE type = 'CHAN' AND sender_key IS NULL
|
||||
AND sender_name IN (SELECT name FROM _name_key_map)
|
||||
"""
|
||||
)
|
||||
updated = cursor.rowcount
|
||||
await conn.execute("DROP TABLE _name_key_map")
|
||||
if updated > 0:
|
||||
logger.info("Backfilled sender_key for %d channel messages", updated)
|
||||
|
||||
# Create index on sender_key for per-contact channel message counts
|
||||
await conn.execute("CREATE INDEX IF NOT EXISTS idx_messages_sender_key ON messages(sender_key)")
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Added sender_name and sender_key columns with backfill")
|
||||
81
app/migrations/_026_rename_advert_paths_table.py
Normal file
81
app/migrations/_026_rename_advert_paths_table.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Rename repeater_advert_paths to contact_advert_paths with column
|
||||
repeater_key -> public_key.
|
||||
|
||||
Uses table rebuild since ALTER TABLE RENAME COLUMN may not be available
|
||||
in older SQLite versions.
|
||||
"""
|
||||
# Check if old table exists
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='repeater_advert_paths'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
# Already renamed or doesn't exist — ensure new table exists
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("contact_advert_paths already exists or old table missing, skipping rename")
|
||||
return
|
||||
|
||||
# Create new table (IF NOT EXISTS in case SCHEMA already created it)
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# Copy data (INSERT OR IGNORE in case of duplicates)
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO contact_advert_paths (public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
SELECT repeater_key, path_hex, path_len, first_seen, last_seen, heard_count
|
||||
FROM repeater_advert_paths
|
||||
"""
|
||||
)
|
||||
|
||||
# Drop old table
|
||||
await conn.execute("DROP TABLE repeater_advert_paths")
|
||||
|
||||
# Create index
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Renamed repeater_advert_paths to contact_advert_paths")
|
||||
36
app/migrations/_027_backfill_first_seen_from_advert_paths.py
Normal file
36
app/migrations/_027_backfill_first_seen_from_advert_paths.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Backfill contacts.first_seen from contact_advert_paths where advert path
|
||||
first_seen is earlier than the contact's current first_seen.
|
||||
"""
|
||||
# Guard: skip if either table doesn't exist
|
||||
for table in ("contacts", "contact_advert_paths"):
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table,)
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts SET first_seen = (
|
||||
SELECT MIN(cap.first_seen) FROM contact_advert_paths cap
|
||||
WHERE cap.public_key = contacts.public_key
|
||||
)
|
||||
WHERE EXISTS (
|
||||
SELECT 1 FROM contact_advert_paths cap
|
||||
WHERE cap.public_key = contacts.public_key
|
||||
AND cap.first_seen < COALESCE(contacts.first_seen, 9999999999)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Backfilled first_seen from contact_advert_paths")
|
||||
107
app/migrations/_028_payload_hash_text_to_blob.py
Normal file
107
app/migrations/_028_payload_hash_text_to_blob.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from hashlib import sha256
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Convert payload_hash from 64-char hex TEXT to 32-byte BLOB.
|
||||
|
||||
Halves storage for both the column data and its UNIQUE index.
|
||||
Uses Python bytes.fromhex() for the conversion since SQLite's unhex()
|
||||
requires 3.41.0+ which may not be available on all deployments.
|
||||
"""
|
||||
# Guard: skip if raw_packets table doesn't exist
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='raw_packets'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("raw_packets table does not exist, skipping payload_hash conversion")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# Check column types — skip if payload_hash doesn't exist or is already BLOB
|
||||
cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
cols = {row[1]: row[2] for row in await cursor.fetchall()}
|
||||
if "payload_hash" not in cols:
|
||||
logger.debug("payload_hash column does not exist, skipping conversion")
|
||||
await conn.commit()
|
||||
return
|
||||
if cols["payload_hash"].upper() == "BLOB":
|
||||
logger.debug("payload_hash is already BLOB, skipping conversion")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
logger.info("Rebuilding raw_packets to convert payload_hash TEXT → BLOB...")
|
||||
|
||||
# Create new table with BLOB type
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash BLOB,
|
||||
FOREIGN KEY (message_id) REFERENCES messages(id)
|
||||
)
|
||||
""")
|
||||
|
||||
# Batch-convert rows: read TEXT hashes, convert to bytes, insert into new table
|
||||
batch_size = 5000
|
||||
cursor = await conn.execute(
|
||||
"SELECT id, timestamp, data, message_id, payload_hash FROM raw_packets ORDER BY id"
|
||||
)
|
||||
|
||||
total = 0
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
batch: list[tuple[int, int, bytes, int | None, bytes | None]] = []
|
||||
for row in rows:
|
||||
rid, ts, data, mid, ph = row[0], row[1], row[2], row[3], row[4]
|
||||
if ph is not None and isinstance(ph, str):
|
||||
try:
|
||||
ph = bytes.fromhex(ph)
|
||||
except ValueError:
|
||||
# Not a valid hex string — hash the value to produce a valid BLOB
|
||||
ph = sha256(ph.encode()).digest()
|
||||
batch.append((rid, ts, data, mid, ph))
|
||||
|
||||
await conn.executemany(
|
||||
"INSERT INTO raw_packets_new (id, timestamp, data, message_id, payload_hash) "
|
||||
"VALUES (?, ?, ?, ?, ?)",
|
||||
batch,
|
||||
)
|
||||
total += len(batch)
|
||||
|
||||
if total % 50000 == 0:
|
||||
logger.info("Converted %d rows...", total)
|
||||
|
||||
# Preserve autoincrement sequence
|
||||
cursor = await conn.execute("SELECT seq FROM sqlite_sequence WHERE name = 'raw_packets'")
|
||||
seq_row = await cursor.fetchone()
|
||||
if seq_row is not None:
|
||||
await conn.execute(
|
||||
"INSERT OR REPLACE INTO sqlite_sequence (name, seq) VALUES ('raw_packets_new', ?)",
|
||||
(seq_row[0],),
|
||||
)
|
||||
|
||||
await conn.execute("DROP TABLE raw_packets")
|
||||
await conn.execute("ALTER TABLE raw_packets_new RENAME TO raw_packets")
|
||||
|
||||
# Clean up the sqlite_sequence entry for the old temp name
|
||||
await conn.execute("DELETE FROM sqlite_sequence WHERE name = 'raw_packets_new'")
|
||||
|
||||
# Recreate indexes
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_message_id ON raw_packets(message_id)")
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Converted %d payload_hash values from TEXT to BLOB", total)
|
||||
27
app/migrations/_029_add_unread_covering_index.py
Normal file
27
app/migrations/_029_add_unread_covering_index.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add a covering index for the unread counts query.
|
||||
|
||||
The /api/read-state/unreads endpoint runs three queries against messages.
|
||||
The last-message-times query (GROUP BY type, conversation_key + MAX(received_at))
|
||||
was doing a full table scan. This covering index lets SQLite resolve the
|
||||
grouping and MAX entirely from the index without touching the table.
|
||||
It also improves the unread count queries which filter on outgoing and received_at.
|
||||
"""
|
||||
# Guard: table or columns may not exist in partial-schema test setups
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required = {"type", "conversation_key", "outgoing", "received_at"}
|
||||
if required <= columns:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_messages_unread_covering "
|
||||
"ON messages(type, conversation_key, outgoing, received_at)"
|
||||
)
|
||||
await conn.commit()
|
||||
31
app/migrations/_030_add_pagination_index.py
Normal file
31
app/migrations/_030_add_pagination_index.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add a composite index for message pagination and drop the now-redundant
|
||||
idx_messages_conversation.
|
||||
|
||||
The pagination query (ORDER BY received_at DESC, id DESC LIMIT N) hits a
|
||||
temp B-tree sort without this index. With it, SQLite walks the index in
|
||||
order and stops after N rows — critical for channels with 30K+ messages.
|
||||
|
||||
idx_messages_conversation(type, conversation_key) is a strict prefix of
|
||||
both this index and idx_messages_unread_covering, so SQLite never picks it.
|
||||
Dropping it saves ~6 MB and one index to maintain per INSERT.
|
||||
"""
|
||||
# Guard: table or columns may not exist in partial-schema test setups
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required = {"type", "conversation_key", "received_at", "id"}
|
||||
if required <= columns:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_messages_pagination "
|
||||
"ON messages(type, conversation_key, received_at DESC, id DESC)"
|
||||
)
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_conversation")
|
||||
await conn.commit()
|
||||
37
app/migrations/_031_add_mqtt_columns.py
Normal file
37
app/migrations/_031_add_mqtt_columns.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add MQTT configuration columns to app_settings."""
|
||||
# Guard: app_settings may not exist in partial-schema test setups
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
new_columns = [
|
||||
("mqtt_broker_host", "TEXT DEFAULT ''"),
|
||||
("mqtt_broker_port", "INTEGER DEFAULT 1883"),
|
||||
("mqtt_username", "TEXT DEFAULT ''"),
|
||||
("mqtt_password", "TEXT DEFAULT ''"),
|
||||
("mqtt_use_tls", "INTEGER DEFAULT 0"),
|
||||
("mqtt_tls_insecure", "INTEGER DEFAULT 0"),
|
||||
("mqtt_topic_prefix", "TEXT DEFAULT 'meshcore'"),
|
||||
("mqtt_publish_messages", "INTEGER DEFAULT 0"),
|
||||
("mqtt_publish_raw_packets", "INTEGER DEFAULT 0"),
|
||||
]
|
||||
|
||||
for col_name, col_def in new_columns:
|
||||
if col_name not in columns:
|
||||
await conn.execute(f"ALTER TABLE app_settings ADD COLUMN {col_name} {col_def}")
|
||||
|
||||
await conn.commit()
|
||||
33
app/migrations/_032_add_community_mqtt_columns.py
Normal file
33
app/migrations/_032_add_community_mqtt_columns.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add community MQTT configuration columns to app_settings."""
|
||||
# Guard: app_settings may not exist in partial-schema test setups
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
new_columns = [
|
||||
("community_mqtt_enabled", "INTEGER DEFAULT 0"),
|
||||
("community_mqtt_iata", "TEXT DEFAULT ''"),
|
||||
("community_mqtt_broker_host", "TEXT DEFAULT 'mqtt-us-v1.letsmesh.net'"),
|
||||
("community_mqtt_broker_port", "INTEGER DEFAULT 443"),
|
||||
("community_mqtt_email", "TEXT DEFAULT ''"),
|
||||
]
|
||||
|
||||
for col_name, col_def in new_columns:
|
||||
if col_name not in columns:
|
||||
await conn.execute(f"ALTER TABLE app_settings ADD COLUMN {col_name} {col_def}")
|
||||
|
||||
await conn.commit()
|
||||
23
app/migrations/_033_seed_remoteterm_channel.py
Normal file
23
app/migrations/_033_seed_remoteterm_channel.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Seed the #remoteterm hashtag channel so new installs have it by default.
|
||||
|
||||
Uses INSERT OR IGNORE so it's a no-op if the channel already exists
|
||||
(e.g. existing users who already added it manually). The channels table
|
||||
is created by the base schema before migrations run, so it always exists
|
||||
in production.
|
||||
"""
|
||||
try:
|
||||
await conn.execute(
|
||||
"INSERT OR IGNORE INTO channels (key, name, is_hashtag, on_radio) VALUES (?, ?, ?, ?)",
|
||||
("8959AE053F2201801342A1DBDDA184F6", "#remoteterm", 1, 0),
|
||||
)
|
||||
await conn.commit()
|
||||
except Exception:
|
||||
logger.debug("Skipping #remoteterm seed (channels table not ready)")
|
||||
23
app/migrations/_034_add_flood_scope.py
Normal file
23
app/migrations/_034_add_flood_scope.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add flood_scope column to app_settings for outbound region tagging.
|
||||
|
||||
Empty string means disabled (no scope set, messages sent unscoped).
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN flood_scope TEXT DEFAULT ''")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("flood_scope column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping flood_scope migration")
|
||||
else:
|
||||
raise
|
||||
36
app/migrations/_035_add_block_lists.py
Normal file
36
app/migrations/_035_add_block_lists.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add blocked_keys and blocked_names columns to app_settings.
|
||||
|
||||
These store JSON arrays of blocked public keys and display names.
|
||||
Blocking hides messages from the UI but does not affect MQTT or bots.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN blocked_keys TEXT DEFAULT '[]'")
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("blocked_keys column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping blocked_keys migration")
|
||||
else:
|
||||
raise
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN blocked_names TEXT DEFAULT '[]'")
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("blocked_names column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping blocked_names migration")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
143
app/migrations/_036_create_fanout_configs.py
Normal file
143
app/migrations/_036_create_fanout_configs.py
Normal file
@@ -0,0 +1,143 @@
|
||||
import json
|
||||
import uuid
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Create fanout_configs table and migrate existing MQTT settings.
|
||||
|
||||
Reads existing MQTT settings from app_settings and creates corresponding
|
||||
fanout_configs rows. Old columns are NOT dropped (rollback safety).
|
||||
"""
|
||||
|
||||
# 1. Create fanout_configs table
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS fanout_configs (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
enabled INTEGER DEFAULT 0,
|
||||
config TEXT NOT NULL DEFAULT '{}',
|
||||
scope TEXT NOT NULL DEFAULT '{}',
|
||||
sort_order INTEGER DEFAULT 0,
|
||||
created_at INTEGER NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# 2. Read existing MQTT settings
|
||||
try:
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT mqtt_broker_host, mqtt_broker_port, mqtt_username, mqtt_password,
|
||||
mqtt_use_tls, mqtt_tls_insecure, mqtt_topic_prefix,
|
||||
mqtt_publish_messages, mqtt_publish_raw_packets,
|
||||
community_mqtt_enabled, community_mqtt_iata,
|
||||
community_mqtt_broker_host, community_mqtt_broker_port,
|
||||
community_mqtt_email
|
||||
FROM app_settings WHERE id = 1
|
||||
"""
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
except Exception:
|
||||
row = None
|
||||
|
||||
if row is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
import time
|
||||
|
||||
now = int(time.time())
|
||||
sort_order = 0
|
||||
|
||||
# 3. Migrate private MQTT if configured
|
||||
broker_host = row["mqtt_broker_host"] or ""
|
||||
if broker_host:
|
||||
publish_messages = bool(row["mqtt_publish_messages"])
|
||||
publish_raw = bool(row["mqtt_publish_raw_packets"])
|
||||
enabled = publish_messages or publish_raw
|
||||
|
||||
config = {
|
||||
"broker_host": broker_host,
|
||||
"broker_port": row["mqtt_broker_port"] or 1883,
|
||||
"username": row["mqtt_username"] or "",
|
||||
"password": row["mqtt_password"] or "",
|
||||
"use_tls": bool(row["mqtt_use_tls"]),
|
||||
"tls_insecure": bool(row["mqtt_tls_insecure"]),
|
||||
"topic_prefix": row["mqtt_topic_prefix"] or "meshcore",
|
||||
}
|
||||
|
||||
scope = {
|
||||
"messages": "all" if publish_messages else "none",
|
||||
"raw_packets": "all" if publish_raw else "none",
|
||||
}
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO fanout_configs (id, type, name, enabled, config, scope, sort_order, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
str(uuid.uuid4()),
|
||||
"mqtt_private",
|
||||
"Private MQTT",
|
||||
1 if enabled else 0,
|
||||
json.dumps(config),
|
||||
json.dumps(scope),
|
||||
sort_order,
|
||||
now,
|
||||
),
|
||||
)
|
||||
sort_order += 1
|
||||
logger.info("Migrated private MQTT settings to fanout_configs (enabled=%s)", enabled)
|
||||
|
||||
# 4. Migrate community MQTT if enabled OR configured (preserve disabled-but-configured)
|
||||
community_enabled = bool(row["community_mqtt_enabled"])
|
||||
community_iata = row["community_mqtt_iata"] or ""
|
||||
community_host = row["community_mqtt_broker_host"] or ""
|
||||
community_email = row["community_mqtt_email"] or ""
|
||||
community_has_config = bool(
|
||||
community_iata
|
||||
or community_email
|
||||
or (community_host and community_host != "mqtt-us-v1.letsmesh.net")
|
||||
)
|
||||
if community_enabled or community_has_config:
|
||||
config = {
|
||||
"broker_host": community_host or "mqtt-us-v1.letsmesh.net",
|
||||
"broker_port": row["community_mqtt_broker_port"] or 443,
|
||||
"iata": community_iata,
|
||||
"email": community_email,
|
||||
}
|
||||
|
||||
scope = {
|
||||
"messages": "none",
|
||||
"raw_packets": "all",
|
||||
}
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO fanout_configs (id, type, name, enabled, config, scope, sort_order, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
str(uuid.uuid4()),
|
||||
"mqtt_community",
|
||||
"Community MQTT",
|
||||
1 if community_enabled else 0,
|
||||
json.dumps(config),
|
||||
json.dumps(scope),
|
||||
sort_order,
|
||||
now,
|
||||
),
|
||||
)
|
||||
logger.info(
|
||||
"Migrated community MQTT settings to fanout_configs (enabled=%s)", community_enabled
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
63
app/migrations/_037_bots_to_fanout.py
Normal file
63
app/migrations/_037_bots_to_fanout.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import json
|
||||
import uuid
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Migrate bots from app_settings.bots JSON to fanout_configs rows."""
|
||||
|
||||
try:
|
||||
cursor = await conn.execute("SELECT bots FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
except Exception:
|
||||
row = None
|
||||
|
||||
if row is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
bots_json = row["bots"] or "[]"
|
||||
try:
|
||||
bots = json.loads(bots_json)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
bots = []
|
||||
|
||||
if not bots:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
import time
|
||||
|
||||
now = int(time.time())
|
||||
|
||||
# Use sort_order starting at 200 to place bots after MQTT configs (0-99)
|
||||
for i, bot in enumerate(bots):
|
||||
bot_name = bot.get("name") or f"Bot {i + 1}"
|
||||
bot_enabled = bool(bot.get("enabled", False))
|
||||
bot_code = bot.get("code", "")
|
||||
|
||||
config_blob = json.dumps({"code": bot_code})
|
||||
scope = json.dumps({"messages": "all", "raw_packets": "none"})
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO fanout_configs (id, type, name, enabled, config, scope, sort_order, created_at)
|
||||
VALUES (?, 'bot', ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
str(uuid.uuid4()),
|
||||
bot_name,
|
||||
1 if bot_enabled else 0,
|
||||
config_blob,
|
||||
scope,
|
||||
200 + i,
|
||||
now,
|
||||
),
|
||||
)
|
||||
logger.info("Migrated bot '%s' to fanout_configs (enabled=%s)", bot_name, bot_enabled)
|
||||
|
||||
await conn.commit()
|
||||
54
app/migrations/_038_drop_legacy_columns.py
Normal file
54
app/migrations/_038_drop_legacy_columns.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Drop legacy MQTT, community MQTT, and bots columns from app_settings.
|
||||
|
||||
These columns were migrated to fanout_configs in migrations 36 and 37.
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
the columns remain but are harmless (no longer read or written).
|
||||
"""
|
||||
# Check if app_settings table exists (some test DBs may not have it)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
columns_to_drop = [
|
||||
"bots",
|
||||
"mqtt_broker_host",
|
||||
"mqtt_broker_port",
|
||||
"mqtt_username",
|
||||
"mqtt_password",
|
||||
"mqtt_use_tls",
|
||||
"mqtt_tls_insecure",
|
||||
"mqtt_topic_prefix",
|
||||
"mqtt_publish_messages",
|
||||
"mqtt_publish_raw_packets",
|
||||
"community_mqtt_enabled",
|
||||
"community_mqtt_iata",
|
||||
"community_mqtt_broker_host",
|
||||
"community_mqtt_broker_port",
|
||||
"community_mqtt_email",
|
||||
]
|
||||
|
||||
for column in columns_to_drop:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE app_settings DROP COLUMN {column}")
|
||||
logger.debug("Dropped %s from app_settings", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("app_settings.%s already dropped, skipping", column)
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, %s column will remain", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
65
app/migrations/_039_add_contact_out_path_hash_mode.py
Normal file
65
app/migrations/_039_add_contact_out_path_hash_mode.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add contacts.out_path_hash_mode and backfill legacy rows.
|
||||
|
||||
Historical databases predate multibyte routing support. Backfill rules:
|
||||
- contacts with last_path_len = -1 are flood routes -> out_path_hash_mode = -1
|
||||
- all other existing contacts default to 0 (1-byte legacy hop identifiers)
|
||||
"""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
column_cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns = {row[1] for row in await column_cursor.fetchall()}
|
||||
|
||||
added_column = False
|
||||
|
||||
try:
|
||||
await conn.execute(
|
||||
"ALTER TABLE contacts ADD COLUMN out_path_hash_mode INTEGER NOT NULL DEFAULT 0"
|
||||
)
|
||||
added_column = True
|
||||
logger.debug("Added out_path_hash_mode to contacts table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.out_path_hash_mode already exists, skipping add")
|
||||
else:
|
||||
raise
|
||||
|
||||
if "last_path_len" not in columns:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
if added_column:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts
|
||||
SET out_path_hash_mode = CASE
|
||||
WHEN last_path_len = -1 THEN -1
|
||||
ELSE 0
|
||||
END
|
||||
"""
|
||||
)
|
||||
else:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts
|
||||
SET out_path_hash_mode = CASE
|
||||
WHEN last_path_len = -1 THEN -1
|
||||
ELSE 0
|
||||
END
|
||||
WHERE out_path_hash_mode NOT IN (-1, 0, 1, 2)
|
||||
OR (last_path_len = -1 AND out_path_hash_mode != -1)
|
||||
"""
|
||||
)
|
||||
await conn.commit()
|
||||
82
app/migrations/_040_rebuild_contact_advert_paths_identity.py
Normal file
82
app/migrations/_040_rebuild_contact_advert_paths_identity.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(
|
||||
conn: aiosqlite.Connection,
|
||||
) -> None:
|
||||
"""Rebuild contact_advert_paths so uniqueness includes path_len.
|
||||
|
||||
Multi-byte routing can produce the same path_hex bytes with a different hop count,
|
||||
which changes the hop boundaries and therefore the semantic next-hop identity.
|
||||
"""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contact_advert_paths'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_contact_advert_paths_recent")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contact_advert_paths_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_advert_paths_new
|
||||
(public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
SELECT
|
||||
public_key,
|
||||
path_hex,
|
||||
path_len,
|
||||
MIN(first_seen),
|
||||
MAX(last_seen),
|
||||
SUM(heard_count)
|
||||
FROM contact_advert_paths
|
||||
GROUP BY public_key, path_hex, path_len
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.execute("DROP TABLE contact_advert_paths")
|
||||
await conn.execute("ALTER TABLE contact_advert_paths_new RENAME TO contact_advert_paths")
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_contact_advert_paths_recent")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
31
app/migrations/_041_add_contact_routing_override_columns.py
Normal file
31
app/migrations/_041_add_contact_routing_override_columns.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add nullable routing-override columns to contacts."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
for column_name, column_type in (
|
||||
("route_override_path", "TEXT"),
|
||||
("route_override_len", "INTEGER"),
|
||||
("route_override_hash_mode", "INTEGER"),
|
||||
):
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE contacts ADD COLUMN {column_name} {column_type}")
|
||||
logger.debug("Added %s to contacts table", column_name)
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.%s already exists, skipping", column_name)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
26
app/migrations/_042_add_channel_flood_scope_override.py
Normal file
26
app/migrations/_042_add_channel_flood_scope_override.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add nullable per-channel flood-scope override column."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='channels'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE channels ADD COLUMN flood_scope_override TEXT")
|
||||
logger.debug("Added flood_scope_override to channels table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("channels.flood_scope_override already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
31
app/migrations/_043_split_message_dedup_by_type.py
Normal file
31
app/migrations/_043_split_message_dedup_by_type.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Restrict the message dedup index to channel messages."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required_columns = {"type", "conversation_key", "text", "sender_timestamp"}
|
||||
if not required_columns.issubset(columns):
|
||||
logger.debug("messages table missing dedup-index columns, skipping migration 43")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_dedup_null_safe")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))
|
||||
WHERE type = 'CHAN'"""
|
||||
)
|
||||
await conn.commit()
|
||||
157
app/migrations/_044_dedupe_incoming_direct_messages.py
Normal file
157
app/migrations/_044_dedupe_incoming_direct_messages.py
Normal file
@@ -0,0 +1,157 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _merge_message_paths(paths_json_values: list[str | None]) -> str | None:
|
||||
"""Merge multiple message path arrays into one exact-observation list."""
|
||||
merged: list[dict[str, object]] = []
|
||||
seen: set[tuple[object | None, object | None, object | None]] = set()
|
||||
|
||||
for paths_json in paths_json_values:
|
||||
if not paths_json:
|
||||
continue
|
||||
try:
|
||||
parsed = json.loads(paths_json)
|
||||
except (TypeError, json.JSONDecodeError):
|
||||
continue
|
||||
if not isinstance(parsed, list):
|
||||
continue
|
||||
for entry in parsed:
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
key = (
|
||||
entry.get("path"),
|
||||
entry.get("received_at"),
|
||||
entry.get("path_len"),
|
||||
)
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
merged.append(entry)
|
||||
|
||||
return json.dumps(merged) if merged else None
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Collapse same-contact same-text same-second incoming DMs into one row."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required_columns = {
|
||||
"id",
|
||||
"type",
|
||||
"conversation_key",
|
||||
"text",
|
||||
"sender_timestamp",
|
||||
"received_at",
|
||||
"paths",
|
||||
"txt_type",
|
||||
"signature",
|
||||
"outgoing",
|
||||
"acked",
|
||||
"sender_name",
|
||||
"sender_key",
|
||||
}
|
||||
if not required_columns.issubset(columns):
|
||||
logger.debug("messages table missing incoming-DM dedup columns, skipping migration 44")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
raw_packets_cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='raw_packets'"
|
||||
)
|
||||
raw_packets_exists = await raw_packets_cursor.fetchone() is not None
|
||||
|
||||
duplicate_groups_cursor = await conn.execute(
|
||||
"""
|
||||
SELECT conversation_key, text,
|
||||
COALESCE(sender_timestamp, 0) AS normalized_sender_timestamp,
|
||||
COUNT(*) AS duplicate_count
|
||||
FROM messages
|
||||
WHERE type = 'PRIV' AND outgoing = 0
|
||||
GROUP BY conversation_key, text, COALESCE(sender_timestamp, 0)
|
||||
HAVING COUNT(*) > 1
|
||||
"""
|
||||
)
|
||||
duplicate_groups = await duplicate_groups_cursor.fetchall()
|
||||
|
||||
for group in duplicate_groups:
|
||||
normalized_sender_timestamp = group["normalized_sender_timestamp"]
|
||||
rows_cursor = await conn.execute(
|
||||
"""
|
||||
SELECT *
|
||||
FROM messages
|
||||
WHERE type = 'PRIV' AND outgoing = 0
|
||||
AND conversation_key = ? AND text = ?
|
||||
AND COALESCE(sender_timestamp, 0) = ?
|
||||
ORDER BY id ASC
|
||||
""",
|
||||
(
|
||||
group["conversation_key"],
|
||||
group["text"],
|
||||
normalized_sender_timestamp,
|
||||
),
|
||||
)
|
||||
rows = list(await rows_cursor.fetchall())
|
||||
if len(rows) < 2:
|
||||
continue
|
||||
|
||||
keeper = rows[0]
|
||||
duplicate_ids = [row["id"] for row in rows[1:]]
|
||||
merged_paths = _merge_message_paths([row["paths"] for row in rows])
|
||||
merged_received_at = min(row["received_at"] for row in rows)
|
||||
merged_txt_type = next((row["txt_type"] for row in rows if row["txt_type"] != 0), 0)
|
||||
merged_signature = next((row["signature"] for row in rows if row["signature"]), None)
|
||||
merged_sender_name = next((row["sender_name"] for row in rows if row["sender_name"]), None)
|
||||
merged_sender_key = next((row["sender_key"] for row in rows if row["sender_key"]), None)
|
||||
merged_acked = max(int(row["acked"] or 0) for row in rows)
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE messages
|
||||
SET received_at = ?, paths = ?, txt_type = ?, signature = ?,
|
||||
acked = ?, sender_name = ?, sender_key = ?
|
||||
WHERE id = ?
|
||||
""",
|
||||
(
|
||||
merged_received_at,
|
||||
merged_paths,
|
||||
merged_txt_type,
|
||||
merged_signature,
|
||||
merged_acked,
|
||||
merged_sender_name,
|
||||
merged_sender_key,
|
||||
keeper["id"],
|
||||
),
|
||||
)
|
||||
|
||||
if raw_packets_exists:
|
||||
for duplicate_id in duplicate_ids:
|
||||
await conn.execute(
|
||||
"UPDATE raw_packets SET message_id = ? WHERE message_id = ?",
|
||||
(keeper["id"], duplicate_id),
|
||||
)
|
||||
|
||||
placeholders = ",".join("?" for _ in duplicate_ids)
|
||||
await conn.execute(
|
||||
f"DELETE FROM messages WHERE id IN ({placeholders})",
|
||||
duplicate_ids,
|
||||
)
|
||||
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_incoming_priv_dedup")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_incoming_priv_dedup
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))
|
||||
WHERE type = 'PRIV' AND outgoing = 0"""
|
||||
)
|
||||
await conn.commit()
|
||||
136
app/migrations/_045_rebuild_contacts_direct_route_columns.py
Normal file
136
app/migrations/_045_rebuild_contacts_direct_route_columns.py
Normal file
@@ -0,0 +1,136 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Replace legacy contact route columns with canonical direct-route columns."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
target_columns = {
|
||||
"public_key",
|
||||
"name",
|
||||
"type",
|
||||
"flags",
|
||||
"direct_path",
|
||||
"direct_path_len",
|
||||
"direct_path_hash_mode",
|
||||
"direct_path_updated_at",
|
||||
"route_override_path",
|
||||
"route_override_len",
|
||||
"route_override_hash_mode",
|
||||
"last_advert",
|
||||
"lat",
|
||||
"lon",
|
||||
"last_seen",
|
||||
"on_radio",
|
||||
"last_contacted",
|
||||
"first_seen",
|
||||
"last_read_at",
|
||||
}
|
||||
if (
|
||||
target_columns.issubset(columns)
|
||||
and "last_path" not in columns
|
||||
and "out_path_hash_mode" not in columns
|
||||
):
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contacts_new (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
flags INTEGER DEFAULT 0,
|
||||
direct_path TEXT,
|
||||
direct_path_len INTEGER,
|
||||
direct_path_hash_mode INTEGER,
|
||||
direct_path_updated_at INTEGER,
|
||||
route_override_path TEXT,
|
||||
route_override_len INTEGER,
|
||||
route_override_hash_mode INTEGER,
|
||||
last_advert INTEGER,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
last_seen INTEGER,
|
||||
on_radio INTEGER DEFAULT 0,
|
||||
last_contacted INTEGER,
|
||||
first_seen INTEGER,
|
||||
last_read_at INTEGER
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
select_expr = {
|
||||
"public_key": "public_key",
|
||||
"name": "NULL",
|
||||
"type": "0",
|
||||
"flags": "0",
|
||||
"direct_path": "NULL",
|
||||
"direct_path_len": "NULL",
|
||||
"direct_path_hash_mode": "NULL",
|
||||
"direct_path_updated_at": "NULL",
|
||||
"route_override_path": "NULL",
|
||||
"route_override_len": "NULL",
|
||||
"route_override_hash_mode": "NULL",
|
||||
"last_advert": "NULL",
|
||||
"lat": "NULL",
|
||||
"lon": "NULL",
|
||||
"last_seen": "NULL",
|
||||
"on_radio": "0",
|
||||
"last_contacted": "NULL",
|
||||
"first_seen": "NULL",
|
||||
"last_read_at": "NULL",
|
||||
}
|
||||
for name in ("name", "type", "flags"):
|
||||
if name in columns:
|
||||
select_expr[name] = name
|
||||
|
||||
if "direct_path" in columns:
|
||||
select_expr["direct_path"] = "direct_path"
|
||||
|
||||
if "direct_path_len" in columns:
|
||||
select_expr["direct_path_len"] = "direct_path_len"
|
||||
|
||||
if "direct_path_hash_mode" in columns:
|
||||
select_expr["direct_path_hash_mode"] = "direct_path_hash_mode"
|
||||
|
||||
for name in (
|
||||
"route_override_path",
|
||||
"route_override_len",
|
||||
"route_override_hash_mode",
|
||||
"last_advert",
|
||||
"lat",
|
||||
"lon",
|
||||
"last_seen",
|
||||
"on_radio",
|
||||
"last_contacted",
|
||||
"first_seen",
|
||||
"last_read_at",
|
||||
):
|
||||
if name in columns:
|
||||
select_expr[name] = name
|
||||
|
||||
ordered_columns = list(select_expr.keys())
|
||||
await conn.execute(
|
||||
f"""
|
||||
INSERT INTO contacts_new ({", ".join(ordered_columns)})
|
||||
SELECT {", ".join(select_expr[name] for name in ordered_columns)}
|
||||
FROM contacts
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.execute("DROP TABLE contacts")
|
||||
await conn.execute("ALTER TABLE contacts_new RENAME TO contacts")
|
||||
await conn.commit()
|
||||
93
app/migrations/_046_cleanup_orphaned_contact_child_rows.py
Normal file
93
app/migrations/_046_cleanup_orphaned_contact_child_rows.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Move uniquely resolvable orphan contact child rows onto full contacts, drop the rest."""
|
||||
existing_tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
existing_tables = {row[0] for row in await existing_tables_cursor.fetchall()}
|
||||
if "contacts" not in existing_tables:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
child_tables = [
|
||||
table
|
||||
for table in ("contact_name_history", "contact_advert_paths")
|
||||
if table in existing_tables
|
||||
]
|
||||
if not child_tables:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
orphan_keys: set[str] = set()
|
||||
|
||||
for table in child_tables:
|
||||
cursor = await conn.execute(
|
||||
f"""
|
||||
SELECT DISTINCT child.public_key
|
||||
FROM {table} child
|
||||
LEFT JOIN contacts c ON c.public_key = child.public_key
|
||||
WHERE c.public_key IS NULL
|
||||
"""
|
||||
)
|
||||
orphan_keys.update(row[0] for row in await cursor.fetchall())
|
||||
|
||||
for orphan_key in sorted(orphan_keys, key=len, reverse=True):
|
||||
match_cursor = await conn.execute(
|
||||
"""
|
||||
SELECT public_key
|
||||
FROM contacts
|
||||
WHERE length(public_key) = 64
|
||||
AND public_key LIKE ? || '%'
|
||||
ORDER BY public_key
|
||||
""",
|
||||
(orphan_key.lower(),),
|
||||
)
|
||||
matches = [row[0] for row in await match_cursor.fetchall()]
|
||||
resolved_key = matches[0] if len(matches) == 1 else None
|
||||
|
||||
if resolved_key is not None:
|
||||
if "contact_name_history" in child_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_name_history (public_key, name, first_seen, last_seen)
|
||||
SELECT ?, name, first_seen, last_seen
|
||||
FROM contact_name_history
|
||||
WHERE public_key = ?
|
||||
ON CONFLICT(public_key, name) DO UPDATE SET
|
||||
first_seen = MIN(contact_name_history.first_seen, excluded.first_seen),
|
||||
last_seen = MAX(contact_name_history.last_seen, excluded.last_seen)
|
||||
""",
|
||||
(resolved_key, orphan_key),
|
||||
)
|
||||
if "contact_advert_paths" in child_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_advert_paths
|
||||
(public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
SELECT ?, path_hex, path_len, first_seen, last_seen, heard_count
|
||||
FROM contact_advert_paths
|
||||
WHERE public_key = ?
|
||||
ON CONFLICT(public_key, path_hex, path_len) DO UPDATE SET
|
||||
first_seen = MIN(contact_advert_paths.first_seen, excluded.first_seen),
|
||||
last_seen = MAX(contact_advert_paths.last_seen, excluded.last_seen),
|
||||
heard_count = contact_advert_paths.heard_count + excluded.heard_count
|
||||
""",
|
||||
(resolved_key, orphan_key),
|
||||
)
|
||||
|
||||
if "contact_name_history" in child_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_name_history WHERE public_key = ?",
|
||||
(orphan_key,),
|
||||
)
|
||||
if "contact_advert_paths" in child_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_advert_paths WHERE public_key = ?",
|
||||
(orphan_key,),
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
39
app/migrations/_047_add_statistics_indexes.py
Normal file
39
app/migrations/_047_add_statistics_indexes.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add indexes used by the statistics endpoint's time-windowed scans."""
|
||||
cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
tables = {row[0] for row in await cursor.fetchall()}
|
||||
|
||||
if "raw_packets" in tables:
|
||||
cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
raw_packet_columns = {row[1] for row in await cursor.fetchall()}
|
||||
if "timestamp" in raw_packet_columns:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_timestamp ON raw_packets(timestamp)"
|
||||
)
|
||||
|
||||
if "contacts" in tables:
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
contact_columns = {row[1] for row in await cursor.fetchall()}
|
||||
if {"type", "last_seen"}.issubset(contact_columns):
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contacts_type_last_seen ON contacts(type, last_seen)"
|
||||
)
|
||||
|
||||
if "messages" in tables:
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
message_columns = {row[1] for row in await cursor.fetchall()}
|
||||
if {"type", "received_at", "conversation_key"}.issubset(message_columns):
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_type_received_conversation
|
||||
ON messages(type, received_at, conversation_key)
|
||||
"""
|
||||
)
|
||||
await conn.commit()
|
||||
27
app/migrations/_048_discovery_blocked_types.py
Normal file
27
app/migrations/_048_discovery_blocked_types.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add discovery_blocked_types column to app_settings.
|
||||
|
||||
Stores a JSON array of integer contact type codes (1=Client, 2=Repeater,
|
||||
3=Room, 4=Sensor) whose advertisements should not create new contacts.
|
||||
Empty list means all types are accepted.
|
||||
"""
|
||||
try:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN discovery_blocked_types TEXT DEFAULT '[]'"
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("discovery_blocked_types column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping discovery_blocked_types migration")
|
||||
else:
|
||||
raise
|
||||
await conn.commit()
|
||||
158
app/migrations/_049_foreign_key_cascade.py
Normal file
158
app/migrations/_049_foreign_key_cascade.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Rebuild FK tables with CASCADE/SET NULL and clean orphaned rows.
|
||||
|
||||
SQLite cannot ALTER existing FK constraints, so each table is rebuilt.
|
||||
Orphaned child rows are cleaned up before the rebuild to ensure the
|
||||
INSERT...SELECT into the new table (which has enforced FKs) succeeds.
|
||||
"""
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
# Back up the database before table rebuilds (skip for in-memory DBs).
|
||||
cursor = await conn.execute("PRAGMA database_list")
|
||||
db_row = await cursor.fetchone()
|
||||
db_path = db_row[2] if db_row else ""
|
||||
if db_path and db_path != ":memory:" and Path(db_path).exists():
|
||||
backup_path = db_path + ".pre-fk-migration.bak"
|
||||
for suffix in ("", "-wal", "-shm"):
|
||||
src = Path(db_path + suffix)
|
||||
if src.exists():
|
||||
shutil.copy2(str(src), backup_path + suffix)
|
||||
logger.info("Database backed up to %s before FK migration", backup_path)
|
||||
|
||||
# --- Phase 1: clean orphans (guard each table's existence) ---
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
existing_tables = {row[0] for row in await tables_cursor.fetchall()}
|
||||
|
||||
if "contact_advert_paths" in existing_tables and "contacts" in existing_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_advert_paths "
|
||||
"WHERE public_key NOT IN (SELECT public_key FROM contacts)"
|
||||
)
|
||||
if "contact_name_history" in existing_tables and "contacts" in existing_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_name_history "
|
||||
"WHERE public_key NOT IN (SELECT public_key FROM contacts)"
|
||||
)
|
||||
if "raw_packets" in existing_tables and "messages" in existing_tables:
|
||||
# Guard: message_id column may not exist on very old schemas
|
||||
col_cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
raw_cols = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "message_id" in raw_cols:
|
||||
await conn.execute(
|
||||
"UPDATE raw_packets SET message_id = NULL WHERE message_id IS NOT NULL "
|
||||
"AND message_id NOT IN (SELECT id FROM messages)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Cleaned orphaned child rows before FK rebuild")
|
||||
|
||||
# --- Phase 2: rebuild raw_packets with ON DELETE SET NULL ---
|
||||
# Skip if raw_packets doesn't have message_id (pre-migration-18 schema)
|
||||
raw_has_message_id = False
|
||||
if "raw_packets" in existing_tables:
|
||||
col_cursor2 = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
raw_has_message_id = "message_id" in {row[1] for row in await col_cursor2.fetchall()}
|
||||
|
||||
if raw_has_message_id:
|
||||
# Dynamically build column list based on what the old table actually has,
|
||||
# since very old schemas may lack payload_hash (added in migration 28).
|
||||
col_cursor3 = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
old_cols = [row[1] for row in await col_cursor3.fetchall()]
|
||||
|
||||
new_col_defs = [
|
||||
"id INTEGER PRIMARY KEY AUTOINCREMENT",
|
||||
"timestamp INTEGER NOT NULL",
|
||||
"data BLOB NOT NULL",
|
||||
"message_id INTEGER",
|
||||
]
|
||||
copy_cols = ["id", "timestamp", "data", "message_id"]
|
||||
if "payload_hash" in old_cols:
|
||||
new_col_defs.append("payload_hash BLOB")
|
||||
copy_cols.append("payload_hash")
|
||||
new_col_defs.append("FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE SET NULL")
|
||||
|
||||
cols_sql = ", ".join(new_col_defs)
|
||||
copy_sql = ", ".join(copy_cols)
|
||||
await conn.execute(f"CREATE TABLE raw_packets_fk ({cols_sql})")
|
||||
await conn.execute(
|
||||
f"INSERT INTO raw_packets_fk ({copy_sql}) SELECT {copy_sql} FROM raw_packets"
|
||||
)
|
||||
await conn.execute("DROP TABLE raw_packets")
|
||||
await conn.execute("ALTER TABLE raw_packets_fk RENAME TO raw_packets")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_message_id ON raw_packets(message_id)"
|
||||
)
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_timestamp ON raw_packets(timestamp)"
|
||||
)
|
||||
if "payload_hash" in old_cols:
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Rebuilt raw_packets with ON DELETE SET NULL")
|
||||
|
||||
# --- Phase 3: rebuild contact_advert_paths with ON DELETE CASCADE ---
|
||||
if "contact_advert_paths" in existing_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contact_advert_paths_fk (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO contact_advert_paths_fk (id, public_key, path_hex, path_len, first_seen, last_seen, heard_count) "
|
||||
"SELECT id, public_key, path_hex, path_len, first_seen, last_seen, heard_count FROM contact_advert_paths"
|
||||
)
|
||||
await conn.execute("DROP TABLE contact_advert_paths")
|
||||
await conn.execute("ALTER TABLE contact_advert_paths_fk RENAME TO contact_advert_paths")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Rebuilt contact_advert_paths with ON DELETE CASCADE")
|
||||
|
||||
# --- Phase 4: rebuild contact_name_history with ON DELETE CASCADE ---
|
||||
if "contact_name_history" in existing_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contact_name_history_fk (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
UNIQUE(public_key, name),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO contact_name_history_fk (id, public_key, name, first_seen, last_seen) "
|
||||
"SELECT id, public_key, name, first_seen, last_seen FROM contact_name_history"
|
||||
)
|
||||
await conn.execute("DROP TABLE contact_name_history")
|
||||
await conn.execute("ALTER TABLE contact_name_history_fk RENAME TO contact_name_history")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_name_history_key "
|
||||
"ON contact_name_history(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Rebuilt contact_name_history with ON DELETE CASCADE")
|
||||
27
app/migrations/_050_repeater_telemetry_history.py
Normal file
27
app/migrations/_050_repeater_telemetry_history.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Create repeater_telemetry_history table for JSON-blob telemetry snapshots."""
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS repeater_telemetry_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_repeater_telemetry_pk_ts
|
||||
ON repeater_telemetry_history (public_key, timestamp)
|
||||
"""
|
||||
)
|
||||
await conn.commit()
|
||||
24
app/migrations/_051_drop_sidebar_sort_order.py
Normal file
24
app/migrations/_051_drop_sidebar_sort_order.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Remove vestigial sidebar_sort_order column from app_settings."""
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "sidebar_sort_order" in columns:
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings DROP COLUMN sidebar_sort_order")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug(
|
||||
"SQLite doesn't support DROP COLUMN, sidebar_sort_order column will remain"
|
||||
)
|
||||
await conn.commit()
|
||||
else:
|
||||
raise
|
||||
21
app/migrations/_052_add_channel_path_hash_mode_override.py
Normal file
21
app/migrations/_052_add_channel_path_hash_mode_override.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add nullable per-channel path hash mode override column."""
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "channels" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
try:
|
||||
await conn.execute("ALTER TABLE channels ADD COLUMN path_hash_mode_override INTEGER")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
await conn.commit()
|
||||
else:
|
||||
raise
|
||||
20
app/migrations/_053_tracked_telemetry_repeaters.py
Normal file
20
app/migrations/_053_tracked_telemetry_repeaters.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add tracked_telemetry_repeaters JSON list column to app_settings."""
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "app_settings" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "tracked_telemetry_repeaters" not in columns:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN tracked_telemetry_repeaters TEXT DEFAULT '[]'"
|
||||
)
|
||||
await conn.commit()
|
||||
20
app/migrations/_054_auto_resend_channel.py
Normal file
20
app/migrations/_054_auto_resend_channel.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add auto_resend_channel boolean column to app_settings."""
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "app_settings" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "auto_resend_channel" not in columns:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN auto_resend_channel INTEGER DEFAULT 0"
|
||||
)
|
||||
await conn.commit()
|
||||
94
app/migrations/_055_favorites_to_columns.py
Normal file
94
app/migrations/_055_favorites_to_columns.py
Normal file
@@ -0,0 +1,94 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Move favorites from app_settings JSON blob to per-entity boolean columns.
|
||||
|
||||
1. Add ``favorite`` column to contacts and channels tables.
|
||||
2. Backfill from the ``app_settings.favorites`` JSON array.
|
||||
3. Drop the ``favorites`` column from app_settings.
|
||||
"""
|
||||
import json as _json
|
||||
|
||||
# --- Add columns ---
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
existing_tables = {row[0] for row in await tables_cursor.fetchall()}
|
||||
for table in ("contacts", "channels"):
|
||||
if table not in existing_tables:
|
||||
continue
|
||||
col_cursor = await conn.execute(f"PRAGMA table_info({table})")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "favorite" not in columns:
|
||||
await conn.execute(f"ALTER TABLE {table} ADD COLUMN favorite INTEGER DEFAULT 0")
|
||||
await conn.commit()
|
||||
|
||||
# --- Backfill from JSON ---
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "app_settings" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
settings_columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "favorites" not in settings_columns:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("SELECT favorites FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
if row and row[0]:
|
||||
try:
|
||||
favorites = _json.loads(row[0])
|
||||
except (ValueError, TypeError):
|
||||
favorites = []
|
||||
|
||||
contact_keys = []
|
||||
channel_keys = []
|
||||
for fav in favorites:
|
||||
if not isinstance(fav, dict):
|
||||
continue
|
||||
fav_type = fav.get("type")
|
||||
fav_id = fav.get("id")
|
||||
if not fav_id:
|
||||
continue
|
||||
if fav_type == "contact":
|
||||
contact_keys.append(fav_id)
|
||||
elif fav_type == "channel":
|
||||
channel_keys.append(fav_id)
|
||||
|
||||
if contact_keys:
|
||||
placeholders = ",".join("?" for _ in contact_keys)
|
||||
await conn.execute(
|
||||
f"UPDATE contacts SET favorite = 1 WHERE public_key IN ({placeholders})",
|
||||
contact_keys,
|
||||
)
|
||||
if channel_keys:
|
||||
placeholders = ",".join("?" for _ in channel_keys)
|
||||
await conn.execute(
|
||||
f"UPDATE channels SET favorite = 1 WHERE key IN ({placeholders})",
|
||||
channel_keys,
|
||||
)
|
||||
if contact_keys or channel_keys:
|
||||
logger.info(
|
||||
"Backfilled %d contact favorite(s) and %d channel favorite(s) from app_settings",
|
||||
len(contact_keys),
|
||||
len(channel_keys),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
# --- Drop the JSON column ---
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings DROP COLUMN favorites")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug("SQLite doesn't support DROP COLUMN; favorites column will remain unused")
|
||||
await conn.commit()
|
||||
else:
|
||||
raise
|
||||
43
app/migrations/_056_priv_dedup_include_sender_key.py
Normal file
43
app/migrations/_056_priv_dedup_include_sender_key.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add sender_key to the incoming PRIV dedup index.
|
||||
|
||||
Room-server posts are stored as PRIV messages sharing one conversation_key
|
||||
(the room contact). Without sender_key in the uniqueness constraint, two
|
||||
different room participants sending identical text in the same clock second
|
||||
collide and the second message is silently dropped.
|
||||
|
||||
Adding COALESCE(sender_key, '') is strictly more permissive — no existing
|
||||
rows can conflict — so the migration only needs to rebuild the index.
|
||||
"""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# The index references type, conversation_key, sender_timestamp, outgoing,
|
||||
# and sender_key. Some migration tests create minimal messages tables that
|
||||
# lack these columns. Skip gracefully when the schema is too old.
|
||||
col_cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
required = {"type", "conversation_key", "sender_timestamp", "outgoing", "sender_key"}
|
||||
if not required.issubset(columns):
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_incoming_priv_dedup")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_incoming_priv_dedup
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0),
|
||||
COALESCE(sender_key, ''))
|
||||
WHERE type = 'PRIV' AND outgoing = 0"""
|
||||
)
|
||||
await conn.commit()
|
||||
66
app/migrations/__init__.py
Normal file
66
app/migrations/__init__.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""
|
||||
Database migrations using SQLite's user_version pragma.
|
||||
|
||||
Migrations run automatically on startup. The user_version pragma tracks
|
||||
which migrations have been applied (defaults to 0 for existing databases).
|
||||
|
||||
Each migration lives in its own file: ``_NNN_description.py``, exposing an
|
||||
``async def migrate(conn)`` entry point. The runner auto-discovers files by
|
||||
numeric prefix and executes them in order.
|
||||
|
||||
This approach is safe for existing users - their databases have user_version=0,
|
||||
so all migrations run in order on first startup after upgrade.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import pkgutil
|
||||
import re
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def get_version(conn: aiosqlite.Connection) -> int:
|
||||
"""Get current schema version from SQLite user_version pragma."""
|
||||
cursor = await conn.execute("PRAGMA user_version")
|
||||
row = await cursor.fetchone()
|
||||
return row[0] if row else 0
|
||||
|
||||
|
||||
async def set_version(conn: aiosqlite.Connection, version: int) -> None:
|
||||
"""Set schema version using SQLite user_version pragma."""
|
||||
await conn.execute(f"PRAGMA user_version = {version}")
|
||||
|
||||
|
||||
async def run_migrations(conn: aiosqlite.Connection) -> int:
|
||||
"""
|
||||
Run all pending migrations.
|
||||
|
||||
Returns the number of migrations applied.
|
||||
"""
|
||||
version = await get_version(conn)
|
||||
applied = 0
|
||||
|
||||
for module_info in sorted(pkgutil.iter_modules(__path__), key=lambda m: m.name):
|
||||
match = re.match(r"_(\d+)_", module_info.name)
|
||||
if not match:
|
||||
continue
|
||||
num = int(match.group(1))
|
||||
if num <= version:
|
||||
continue
|
||||
logger.info("Applying migration %d: %s", num, module_info.name)
|
||||
mod = importlib.import_module(f"{__name__}.{module_info.name}")
|
||||
await mod.migrate(conn)
|
||||
await set_version(conn, num)
|
||||
applied += 1
|
||||
|
||||
if applied > 0:
|
||||
logger.info(
|
||||
"Applied %d migration(s), schema now at version %d", applied, await get_version(conn)
|
||||
)
|
||||
else:
|
||||
logger.debug("Schema up to date at version %d", version)
|
||||
|
||||
return applied
|
||||
@@ -351,7 +351,7 @@ class TestMigration036:
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_036_create_fanout_configs
|
||||
from app.migrations._036_create_fanout_configs import migrate as _migrate_036_create_fanout_configs
|
||||
|
||||
await _migrate_036_create_fanout_configs(conn)
|
||||
|
||||
@@ -382,7 +382,7 @@ class TestMigration036:
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_036_create_fanout_configs
|
||||
from app.migrations._036_create_fanout_configs import migrate as _migrate_036_create_fanout_configs
|
||||
|
||||
await _migrate_036_create_fanout_configs(conn)
|
||||
|
||||
@@ -413,7 +413,7 @@ class TestMigration036:
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_036_create_fanout_configs
|
||||
from app.migrations._036_create_fanout_configs import migrate as _migrate_036_create_fanout_configs
|
||||
|
||||
await _migrate_036_create_fanout_configs(conn)
|
||||
|
||||
@@ -439,7 +439,7 @@ class TestMigration036:
|
||||
await conn.execute("INSERT INTO app_settings (id) VALUES (1)")
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_036_create_fanout_configs
|
||||
from app.migrations._036_create_fanout_configs import migrate as _migrate_036_create_fanout_configs
|
||||
|
||||
await _migrate_036_create_fanout_configs(conn)
|
||||
|
||||
@@ -478,7 +478,7 @@ class TestMigration037:
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_037_bots_to_fanout
|
||||
from app.migrations._037_bots_to_fanout import migrate as _migrate_037_bots_to_fanout
|
||||
|
||||
await _migrate_037_bots_to_fanout(conn)
|
||||
|
||||
@@ -512,7 +512,7 @@ class TestMigration037:
|
||||
await conn.execute("INSERT INTO app_settings (id, bots) VALUES (1, '[]')")
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_037_bots_to_fanout
|
||||
from app.migrations._037_bots_to_fanout import migrate as _migrate_037_bots_to_fanout
|
||||
|
||||
await _migrate_037_bots_to_fanout(conn)
|
||||
|
||||
@@ -536,7 +536,7 @@ class TestMigration038:
|
||||
await conn.execute("INSERT INTO app_settings (id) VALUES (1)")
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_038_drop_legacy_columns
|
||||
from app.migrations._038_drop_legacy_columns import migrate as _migrate_038_drop_legacy_columns
|
||||
|
||||
await _migrate_038_drop_legacy_columns(conn)
|
||||
|
||||
@@ -561,7 +561,7 @@ class TestMigration038:
|
||||
await conn.execute("CREATE TABLE app_settings (id INTEGER PRIMARY KEY)")
|
||||
await conn.commit()
|
||||
|
||||
from app.migrations import _migrate_038_drop_legacy_columns
|
||||
from app.migrations._038_drop_legacy_columns import migrate as _migrate_038_drop_legacy_columns
|
||||
|
||||
# Should not raise
|
||||
await _migrate_038_drop_legacy_columns(conn)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
0
tests/test_migrations/__init__.py
Normal file
0
tests/test_migrations/__init__.py
Normal file
5
tests/test_migrations/conftest.py
Normal file
5
tests/test_migrations/conftest.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Updated automatically when a new migration is added. Migration tests that
|
||||
# run ``run_migrations`` to completion assert ``get_version == LATEST`` and
|
||||
# ``applied == LATEST - starting_version`` so only this constant needs to
|
||||
# change, not every individual assertion.
|
||||
LATEST_SCHEMA_VERSION = 56
|
||||
309
tests/test_migrations/test_migration_001.py
Normal file
309
tests/test_migrations/test_migration_001.py
Normal file
@@ -0,0 +1,309 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations
|
||||
|
||||
class TestMigration001:
|
||||
"""Test migration 001: add last_read_at columns."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_adds_last_read_at_to_contacts(self):
|
||||
"""Migration adds last_read_at column to contacts table."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
# Create schema without last_read_at (simulating pre-migration state)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
flags INTEGER DEFAULT 0,
|
||||
last_path TEXT,
|
||||
last_path_len INTEGER DEFAULT -1,
|
||||
last_advert INTEGER,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
last_seen INTEGER,
|
||||
on_radio INTEGER DEFAULT 0,
|
||||
last_contacted INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
is_hashtag INTEGER DEFAULT 0,
|
||||
on_radio INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
# Raw packets table with old schema (for migrations 2 and 3)
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
decrypted INTEGER DEFAULT 0,
|
||||
message_id INTEGER,
|
||||
decrypt_attempts INTEGER DEFAULT 0,
|
||||
last_attempt INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_decrypted ON raw_packets(decrypted)")
|
||||
# Messages table with old schema (for migrations 6 and 7)
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
path_len INTEGER,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
UNIQUE(type, conversation_key, text, sender_timestamp)
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
# Run migrations
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify columns exist by inserting and selecting
|
||||
await conn.execute(
|
||||
"INSERT INTO contacts (public_key, name, last_read_at) VALUES (?, ?, ?)",
|
||||
("abc123", "Test", 12345),
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO channels (key, name, last_read_at) VALUES (?, ?, ?)",
|
||||
("KEY123", "#test", 67890),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT last_read_at FROM contacts WHERE public_key = ?", ("abc123",)
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["last_read_at"] == 12345
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT last_read_at FROM channels WHERE key = ?", ("KEY123",)
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["last_read_at"] == 67890
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_is_idempotent(self):
|
||||
"""Running migration multiple times is safe."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
# Create schema without last_read_at
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL
|
||||
)
|
||||
""")
|
||||
# Raw packets table with old schema (for migrations 2 and 3)
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
decrypted INTEGER DEFAULT 0,
|
||||
message_id INTEGER,
|
||||
decrypt_attempts INTEGER DEFAULT 0,
|
||||
last_attempt INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_decrypted ON raw_packets(decrypted)")
|
||||
# Messages table with old schema (for migrations 6 and 7)
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
path_len INTEGER,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
UNIQUE(type, conversation_key, text, sender_timestamp)
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
# Run migrations twice
|
||||
applied1 = await run_migrations(conn)
|
||||
applied2 = await run_migrations(conn)
|
||||
|
||||
assert applied1 > 0 # Migrations were applied
|
||||
assert applied2 == 0 # No migrations on second run
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_handles_column_already_exists(self):
|
||||
"""Migration handles case where column already exists."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
# Create schema with last_read_at already present
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
last_read_at INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
last_read_at INTEGER
|
||||
)
|
||||
""")
|
||||
# Raw packets table with old schema (for migrations 2 and 3)
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
decrypted INTEGER DEFAULT 0,
|
||||
message_id INTEGER,
|
||||
decrypt_attempts INTEGER DEFAULT 0,
|
||||
last_attempt INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_decrypted ON raw_packets(decrypted)")
|
||||
# Messages table with old schema (for migrations 6 and 7)
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
path_len INTEGER,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
UNIQUE(type, conversation_key, text, sender_timestamp)
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
# Run migrations - should not fail
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
# All migrations applied (version incremented) but no error
|
||||
assert applied > 0
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_existing_data_preserved_after_migration(self):
|
||||
"""Migration preserves existing contact and channel data."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
# Create schema and insert data before migration
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
is_hashtag INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
# Raw packets table with old schema (for migrations 2 and 3)
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
decrypted INTEGER DEFAULT 0,
|
||||
message_id INTEGER,
|
||||
decrypt_attempts INTEGER DEFAULT 0,
|
||||
last_attempt INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_decrypted ON raw_packets(decrypted)")
|
||||
# Messages table with old schema (for migrations 6 and 7)
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
path_len INTEGER,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
UNIQUE(type, conversation_key, text, sender_timestamp)
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"INSERT INTO contacts (public_key, name, type) VALUES (?, ?, ?)",
|
||||
("existingkey", "ExistingContact", 1),
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO channels (key, name, is_hashtag) VALUES (?, ?, ?)",
|
||||
("EXISTINGCHAN", "#existing", 1),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
# Run migrations
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify data is preserved
|
||||
cursor = await conn.execute(
|
||||
"SELECT public_key, name, type, last_read_at FROM contacts WHERE public_key = ?",
|
||||
("existingkey",),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["public_key"] == "existingkey"
|
||||
assert row["name"] == "ExistingContact"
|
||||
assert row["type"] == 1
|
||||
assert row["last_read_at"] is None # New column defaults to NULL
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT key, name, is_hashtag, last_read_at FROM channels WHERE key = ?",
|
||||
("EXISTINGCHAN",),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["key"] == "EXISTINGCHAN"
|
||||
assert row["name"] == "#existing"
|
||||
assert row["is_hashtag"] == 1
|
||||
assert row["last_read_at"] is None
|
||||
finally:
|
||||
await conn.close()
|
||||
97
tests/test_migrations/test_migration_013.py
Normal file
97
tests/test_migrations/test_migration_013.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
import json
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration013:
|
||||
"""Test migration 013: convert bot_enabled/bot_code to multi-bot format."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_converts_existing_bot_to_array(self):
|
||||
"""Migration converts existing bot_enabled/bot_code to bots array."""
|
||||
import json
|
||||
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
# Set version to 12 (just before migration 13)
|
||||
await set_version(conn, 12)
|
||||
|
||||
# Create app_settings with old bot columns
|
||||
await conn.execute("""
|
||||
CREATE TABLE app_settings (
|
||||
id INTEGER PRIMARY KEY,
|
||||
max_radio_contacts INTEGER DEFAULT 50,
|
||||
favorites TEXT DEFAULT '[]',
|
||||
auto_decrypt_dm_on_advert INTEGER DEFAULT 0,
|
||||
sidebar_sort_order TEXT DEFAULT 'recent',
|
||||
last_message_times TEXT DEFAULT '{}',
|
||||
preferences_migrated INTEGER DEFAULT 0,
|
||||
advert_interval INTEGER DEFAULT 0,
|
||||
last_advert_time INTEGER DEFAULT 0,
|
||||
bot_enabled INTEGER DEFAULT 0,
|
||||
bot_code TEXT DEFAULT ''
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"INSERT INTO app_settings (id, bot_enabled, bot_code) VALUES (1, 1, 'def bot(): return \"hello\"')"
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
# Run migration 13 (plus remaining which also run)
|
||||
await run_migrations(conn)
|
||||
|
||||
# Bots were migrated from app_settings to fanout_configs (migration 37)
|
||||
# and the bots column was dropped (migration 38)
|
||||
cursor = await conn.execute("SELECT * FROM fanout_configs WHERE type = 'bot'")
|
||||
row = await cursor.fetchone()
|
||||
assert row is not None
|
||||
|
||||
config = json.loads(row["config"])
|
||||
assert config["code"] == 'def bot(): return "hello"'
|
||||
assert row["name"] == "Bot 1"
|
||||
assert bool(row["enabled"])
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_creates_empty_array_when_no_bot(self):
|
||||
"""Migration creates empty bots array when no existing bot data."""
|
||||
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 12)
|
||||
|
||||
await conn.execute("""
|
||||
CREATE TABLE app_settings (
|
||||
id INTEGER PRIMARY KEY,
|
||||
max_radio_contacts INTEGER DEFAULT 50,
|
||||
favorites TEXT DEFAULT '[]',
|
||||
auto_decrypt_dm_on_advert INTEGER DEFAULT 0,
|
||||
sidebar_sort_order TEXT DEFAULT 'recent',
|
||||
last_message_times TEXT DEFAULT '{}',
|
||||
preferences_migrated INTEGER DEFAULT 0,
|
||||
advert_interval INTEGER DEFAULT 0,
|
||||
last_advert_time INTEGER DEFAULT 0,
|
||||
bot_enabled INTEGER DEFAULT 0,
|
||||
bot_code TEXT DEFAULT ''
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"INSERT INTO app_settings (id, bot_enabled, bot_code) VALUES (1, 0, '')"
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Bots column was dropped by migration 38; verify no bots in fanout_configs
|
||||
cursor = await conn.execute("SELECT COUNT(*) FROM fanout_configs WHERE type = 'bot'")
|
||||
row = await cursor.fetchone()
|
||||
assert row[0] == 0
|
||||
finally:
|
||||
await conn.close()
|
||||
154
tests/test_migrations/test_migration_018.py
Normal file
154
tests/test_migrations/test_migration_018.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
from hashlib import sha256
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration018:
|
||||
"""Test migration 018: drop UNIQUE(data) from raw_packets."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_drops_data_unique_constraint(self):
|
||||
"""Migration rebuilds raw_packets without UNIQUE(data), preserving data."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 17)
|
||||
|
||||
# Create raw_packets WITH UNIQUE(data) — simulates production schema
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL UNIQUE,
|
||||
message_id INTEGER,
|
||||
payload_hash TEXT
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_message_id ON raw_packets(message_id)")
|
||||
|
||||
# Insert test data
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data, payload_hash) VALUES (?, ?, ?)",
|
||||
(1000, b"\x01\x02\x03", "hash_a"),
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data, message_id, payload_hash) VALUES (?, ?, ?, ?)",
|
||||
(2000, b"\x04\x05\x06", 42, "hash_b"),
|
||||
)
|
||||
# Create messages table stub (needed for migration 19)
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
paths TEXT
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))"""
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
# Verify autoindex exists before migration
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='sqlite_autoindex_raw_packets_1'"
|
||||
)
|
||||
assert await cursor.fetchone() is not None
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify autoindex is gone
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='sqlite_autoindex_raw_packets_1'"
|
||||
)
|
||||
assert await cursor.fetchone() is None
|
||||
|
||||
# Verify data is preserved
|
||||
cursor = await conn.execute("SELECT COUNT(*) FROM raw_packets")
|
||||
assert (await cursor.fetchone())[0] == 2
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT timestamp, data, message_id, payload_hash FROM raw_packets ORDER BY id"
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert rows[0]["timestamp"] == 1000
|
||||
assert bytes(rows[0]["data"]) == b"\x01\x02\x03"
|
||||
assert rows[0]["message_id"] is None
|
||||
# payload_hash was converted from TEXT to BLOB by migration 28;
|
||||
# "hash_a" is not valid hex so gets sha256-hashed
|
||||
from hashlib import sha256
|
||||
|
||||
assert bytes(rows[0]["payload_hash"]) == sha256(b"hash_a").digest()
|
||||
# message_id=42 was orphaned (no matching messages row), so
|
||||
# migration 49's orphan cleanup NULLs it out.
|
||||
assert rows[1]["message_id"] is None
|
||||
|
||||
# Verify payload_hash unique index still works
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='idx_raw_packets_payload_hash'"
|
||||
)
|
||||
assert await cursor.fetchone() is not None
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_skips_when_no_unique_constraint(self):
|
||||
"""Migration is a no-op when UNIQUE(data) is already absent."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 17)
|
||||
|
||||
# Create raw_packets WITHOUT UNIQUE(data) — fresh install schema
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash TEXT
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
# Messages stub for migration 19
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
paths TEXT
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))"""
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
finally:
|
||||
await conn.close()
|
||||
125
tests/test_migrations/test_migration_019.py
Normal file
125
tests/test_migrations/test_migration_019.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration019:
|
||||
"""Test migration 019: drop UNIQUE constraint from messages."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_drops_messages_unique_constraint(self):
|
||||
"""Migration rebuilds messages without UNIQUE, preserving data and channel dedup index."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 17)
|
||||
|
||||
# raw_packets stub (no UNIQUE on data, so migration 18 skips)
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash TEXT
|
||||
)
|
||||
""")
|
||||
# Create messages WITH UNIQUE constraint — simulates production schema
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
paths TEXT,
|
||||
UNIQUE(type, conversation_key, text, sender_timestamp)
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"CREATE INDEX idx_messages_conversation ON messages(type, conversation_key)"
|
||||
)
|
||||
await conn.execute("CREATE INDEX idx_messages_received ON messages(received_at)")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))"""
|
||||
)
|
||||
|
||||
# Insert test data
|
||||
await conn.execute(
|
||||
"INSERT INTO messages (type, conversation_key, text, sender_timestamp, received_at, paths) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?)",
|
||||
("CHAN", "KEY1", "hello world", 1000, 1000, '[{"path":"ab","received_at":1000}]'),
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO messages (type, conversation_key, text, sender_timestamp, received_at, outgoing) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?)",
|
||||
("PRIV", "abc123", "dm text", 2000, 2000, 1),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
# Verify autoindex exists before migration
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='sqlite_autoindex_messages_1'"
|
||||
)
|
||||
assert await cursor.fetchone() is not None
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify autoindex is gone
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='sqlite_autoindex_messages_1'"
|
||||
)
|
||||
assert await cursor.fetchone() is None
|
||||
|
||||
# Verify data is preserved
|
||||
cursor = await conn.execute("SELECT COUNT(*) FROM messages")
|
||||
assert (await cursor.fetchone())[0] == 2
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT type, conversation_key, text, paths, outgoing FROM messages ORDER BY id"
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert rows[0]["type"] == "CHAN"
|
||||
assert rows[0]["text"] == "hello world"
|
||||
assert rows[0]["paths"] == '[{"path":"ab","received_at":1000}]'
|
||||
assert rows[1]["type"] == "PRIV"
|
||||
assert rows[1]["outgoing"] == 1
|
||||
|
||||
# Verify channel dedup index still works (INSERT OR IGNORE should ignore duplicates)
|
||||
cursor = await conn.execute(
|
||||
"INSERT OR IGNORE INTO messages (type, conversation_key, text, sender_timestamp, received_at) "
|
||||
"VALUES (?, ?, ?, ?, ?)",
|
||||
("CHAN", "KEY1", "hello world", 1000, 9999),
|
||||
)
|
||||
assert cursor.rowcount == 0 # Duplicate ignored
|
||||
|
||||
# Direct messages no longer use the shared dedup index.
|
||||
cursor = await conn.execute(
|
||||
"INSERT OR IGNORE INTO messages (type, conversation_key, text, sender_timestamp, received_at) "
|
||||
"VALUES (?, ?, ?, ?, ?)",
|
||||
("PRIV", "abc123", "dm text", 2000, 9999),
|
||||
)
|
||||
assert cursor.rowcount == 1
|
||||
|
||||
# Verify dedup index exists
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='idx_messages_dedup_null_safe'"
|
||||
)
|
||||
assert await cursor.fetchone() is not None
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT sql FROM sqlite_master WHERE name='idx_messages_dedup_null_safe'"
|
||||
)
|
||||
index_sql = (await cursor.fetchone())["sql"]
|
||||
assert "WHERE type = 'CHAN'" in index_sql
|
||||
finally:
|
||||
await conn.close()
|
||||
72
tests/test_migrations/test_migration_020.py
Normal file
72
tests/test_migrations/test_migration_020.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration020:
|
||||
"""Test migration 020: enable WAL mode and incremental auto-vacuum."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_enables_wal_and_incremental_auto_vacuum(self, tmp_path):
|
||||
"""Migration switches journal mode to WAL and auto_vacuum to INCREMENTAL."""
|
||||
db_path = str(tmp_path / "test.db")
|
||||
conn = await aiosqlite.connect(db_path)
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 19)
|
||||
|
||||
# Create minimal tables so migration 20 can run
|
||||
await conn.execute(
|
||||
"CREATE TABLE raw_packets (id INTEGER PRIMARY KEY, data BLOB NOT NULL)"
|
||||
)
|
||||
await conn.execute("CREATE TABLE messages (id INTEGER PRIMARY KEY, text TEXT NOT NULL)")
|
||||
await conn.commit()
|
||||
|
||||
# Verify defaults before migration
|
||||
cursor = await conn.execute("PRAGMA auto_vacuum")
|
||||
assert (await cursor.fetchone())[0] == 0 # NONE
|
||||
|
||||
cursor = await conn.execute("PRAGMA journal_mode")
|
||||
assert (await cursor.fetchone())[0] == "delete"
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify WAL mode
|
||||
cursor = await conn.execute("PRAGMA journal_mode")
|
||||
assert (await cursor.fetchone())[0] == "wal"
|
||||
|
||||
# Verify incremental auto-vacuum
|
||||
cursor = await conn.execute("PRAGMA auto_vacuum")
|
||||
assert (await cursor.fetchone())[0] == 2 # INCREMENTAL
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_is_idempotent(self, tmp_path):
|
||||
"""Running migration 20 twice doesn't error or re-VACUUM."""
|
||||
db_path = str(tmp_path / "test.db")
|
||||
conn = await aiosqlite.connect(db_path)
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
# Set up as if already at version 20 with WAL + incremental
|
||||
await conn.execute("PRAGMA auto_vacuum = INCREMENTAL")
|
||||
await conn.execute("PRAGMA journal_mode = WAL")
|
||||
await conn.execute(
|
||||
"CREATE TABLE raw_packets (id INTEGER PRIMARY KEY, data BLOB NOT NULL)"
|
||||
)
|
||||
await conn.execute("CREATE TABLE messages (id INTEGER PRIMARY KEY, text TEXT NOT NULL)")
|
||||
await conn.commit()
|
||||
await set_version(conn, 20)
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Still WAL + INCREMENTAL
|
||||
cursor = await conn.execute("PRAGMA journal_mode")
|
||||
assert (await cursor.fetchone())[0] == "wal"
|
||||
cursor = await conn.execute("PRAGMA auto_vacuum")
|
||||
assert (await cursor.fetchone())[0] == 2
|
||||
finally:
|
||||
await conn.close()
|
||||
131
tests/test_migrations/test_migration_028.py
Normal file
131
tests/test_migrations/test_migration_028.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
from hashlib import sha256
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration028:
|
||||
"""Test migration 028: convert payload_hash from TEXT to BLOB."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_converts_hex_text_to_blob(self):
|
||||
"""Migration converts 64-char hex TEXT payload_hash values to 32-byte BLOBs."""
|
||||
from hashlib import sha256
|
||||
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 27)
|
||||
|
||||
# Create raw_packets with TEXT payload_hash (pre-migration schema)
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash TEXT
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_message_id ON raw_packets(message_id)")
|
||||
|
||||
# Insert rows with hex TEXT hashes (as produced by .hexdigest())
|
||||
hash_a = sha256(b"packet_a").hexdigest()
|
||||
hash_b = sha256(b"packet_b").hexdigest()
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data, payload_hash) VALUES (?, ?, ?)",
|
||||
(1000, b"\x01\x02", hash_a),
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data, message_id, payload_hash) VALUES (?, ?, ?, ?)",
|
||||
(2000, b"\x03\x04", 42, hash_b),
|
||||
)
|
||||
# Row with NULL payload_hash
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data) VALUES (?, ?)",
|
||||
(3000, b"\x05\x06"),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify payload_hash column is now BLOB
|
||||
cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
cols = {row[1]: row[2] for row in await cursor.fetchall()}
|
||||
assert cols["payload_hash"] == "BLOB"
|
||||
|
||||
# Verify data is preserved and converted correctly
|
||||
cursor = await conn.execute(
|
||||
"SELECT id, timestamp, data, message_id, payload_hash FROM raw_packets ORDER BY id"
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert len(rows) == 3
|
||||
|
||||
assert rows[0]["timestamp"] == 1000
|
||||
assert bytes(rows[0]["data"]) == b"\x01\x02"
|
||||
assert bytes(rows[0]["payload_hash"]) == sha256(b"packet_a").digest()
|
||||
assert rows[0]["message_id"] is None
|
||||
|
||||
assert rows[1]["timestamp"] == 2000
|
||||
assert bytes(rows[1]["payload_hash"]) == sha256(b"packet_b").digest()
|
||||
assert rows[1]["message_id"] == 42
|
||||
|
||||
assert rows[2]["payload_hash"] is None
|
||||
|
||||
# Verify unique index works
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='idx_raw_packets_payload_hash'"
|
||||
)
|
||||
assert await cursor.fetchone() is not None
|
||||
|
||||
# Verify message_id index exists
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE name='idx_raw_packets_message_id'"
|
||||
)
|
||||
assert await cursor.fetchone() is not None
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_skips_when_already_blob(self):
|
||||
"""Migration is a no-op when payload_hash is already BLOB (fresh install)."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 27)
|
||||
|
||||
# Create raw_packets with BLOB payload_hash (new schema)
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash BLOB
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
|
||||
# Insert a row with a BLOB hash
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data, payload_hash) VALUES (?, ?, ?)",
|
||||
(1000, b"\x01", b"\xab" * 32),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify data unchanged
|
||||
cursor = await conn.execute("SELECT payload_hash FROM raw_packets")
|
||||
row = await cursor.fetchone()
|
||||
assert bytes(row["payload_hash"]) == b"\xab" * 32
|
||||
finally:
|
||||
await conn.close()
|
||||
57
tests/test_migrations/test_migration_032.py
Normal file
57
tests/test_migrations/test_migration_032.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration032:
|
||||
"""Test migration 032: add community MQTT columns to app_settings."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_adds_all_community_mqtt_columns(self):
|
||||
"""Migration adds enabled, iata, broker, and email columns."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 31)
|
||||
|
||||
# Create app_settings without community columns (pre-migration schema)
|
||||
await conn.execute("""
|
||||
CREATE TABLE app_settings (
|
||||
id INTEGER PRIMARY KEY,
|
||||
max_radio_contacts INTEGER DEFAULT 200,
|
||||
favorites TEXT DEFAULT '[]',
|
||||
auto_decrypt_dm_on_advert INTEGER DEFAULT 0,
|
||||
sidebar_sort_order TEXT DEFAULT 'recent',
|
||||
last_message_times TEXT DEFAULT '{}',
|
||||
preferences_migrated INTEGER DEFAULT 0,
|
||||
advert_interval INTEGER DEFAULT 0,
|
||||
last_advert_time INTEGER DEFAULT 0,
|
||||
bots TEXT DEFAULT '[]',
|
||||
mqtt_broker_host TEXT DEFAULT '',
|
||||
mqtt_broker_port INTEGER DEFAULT 1883,
|
||||
mqtt_username TEXT DEFAULT '',
|
||||
mqtt_password TEXT DEFAULT '',
|
||||
mqtt_use_tls INTEGER DEFAULT 0,
|
||||
mqtt_tls_insecure INTEGER DEFAULT 0,
|
||||
mqtt_topic_prefix TEXT DEFAULT 'meshcore',
|
||||
mqtt_publish_messages INTEGER DEFAULT 0,
|
||||
mqtt_publish_raw_packets INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
await conn.execute("INSERT INTO app_settings (id) VALUES (1)")
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Community MQTT columns were added by migration 32 and dropped by migration 38.
|
||||
# Verify community settings were NOT migrated (no community config existed).
|
||||
cursor = await conn.execute(
|
||||
"SELECT COUNT(*) FROM fanout_configs WHERE type = 'mqtt_community'"
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row[0] == 0
|
||||
finally:
|
||||
await conn.close()
|
||||
95
tests/test_migrations/test_migration_033.py
Normal file
95
tests/test_migrations/test_migration_033.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration033:
|
||||
"""Test migration 033: seed #remoteterm channel."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_seeds_remoteterm_channel(self):
|
||||
"""Migration inserts the #remoteterm channel for new installs."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 32)
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
is_hashtag INTEGER DEFAULT 0,
|
||||
on_radio INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
# Minimal app_settings so earlier migrations don't fail
|
||||
await conn.execute("""
|
||||
CREATE TABLE app_settings (
|
||||
id INTEGER PRIMARY KEY,
|
||||
community_mqtt_enabled INTEGER DEFAULT 0,
|
||||
community_mqtt_iata TEXT DEFAULT '',
|
||||
community_mqtt_broker_host TEXT DEFAULT '',
|
||||
community_mqtt_broker_port INTEGER DEFAULT 443,
|
||||
community_mqtt_email TEXT DEFAULT ''
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT key, name, is_hashtag, on_radio FROM channels WHERE key = ?",
|
||||
("8959AE053F2201801342A1DBDDA184F6",),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row is not None
|
||||
assert row["name"] == "#remoteterm"
|
||||
assert row["is_hashtag"] == 1
|
||||
assert row["on_radio"] == 0
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_does_not_overwrite_existing_channel(self):
|
||||
"""Migration is a no-op if #remoteterm already exists."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 32)
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
is_hashtag INTEGER DEFAULT 0,
|
||||
on_radio INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE app_settings (
|
||||
id INTEGER PRIMARY KEY,
|
||||
community_mqtt_enabled INTEGER DEFAULT 0,
|
||||
community_mqtt_iata TEXT DEFAULT '',
|
||||
community_mqtt_broker_host TEXT DEFAULT '',
|
||||
community_mqtt_broker_port INTEGER DEFAULT 443,
|
||||
community_mqtt_email TEXT DEFAULT ''
|
||||
)
|
||||
""")
|
||||
# Pre-existing channel with on_radio=1 (user added it to radio)
|
||||
await conn.execute(
|
||||
"INSERT INTO channels (key, name, is_hashtag, on_radio) VALUES (?, ?, ?, ?)",
|
||||
("8959AE053F2201801342A1DBDDA184F6", "#remoteterm", 1, 1),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT on_radio FROM channels WHERE key = ?",
|
||||
("8959AE053F2201801342A1DBDDA184F6",),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["on_radio"] == 1 # Not overwritten
|
||||
finally:
|
||||
await conn.close()
|
||||
68
tests/test_migrations/test_migration_034.py
Normal file
68
tests/test_migrations/test_migration_034.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration034:
|
||||
"""Test migration 034: add flood_scope column to app_settings."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_adds_flood_scope_column(self):
|
||||
"""Migration adds flood_scope column with empty string default."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 33)
|
||||
|
||||
# Create app_settings without flood_scope (pre-migration schema)
|
||||
await conn.execute("""
|
||||
CREATE TABLE app_settings (
|
||||
id INTEGER PRIMARY KEY,
|
||||
max_radio_contacts INTEGER DEFAULT 200,
|
||||
favorites TEXT DEFAULT '[]',
|
||||
auto_decrypt_dm_on_advert INTEGER DEFAULT 0,
|
||||
sidebar_sort_order TEXT DEFAULT 'recent',
|
||||
last_message_times TEXT DEFAULT '{}',
|
||||
preferences_migrated INTEGER DEFAULT 0,
|
||||
advert_interval INTEGER DEFAULT 0,
|
||||
last_advert_time INTEGER DEFAULT 0,
|
||||
bots TEXT DEFAULT '[]',
|
||||
mqtt_broker_host TEXT DEFAULT '',
|
||||
mqtt_broker_port INTEGER DEFAULT 1883,
|
||||
mqtt_username TEXT DEFAULT '',
|
||||
mqtt_password TEXT DEFAULT '',
|
||||
mqtt_use_tls INTEGER DEFAULT 0,
|
||||
mqtt_tls_insecure INTEGER DEFAULT 0,
|
||||
mqtt_topic_prefix TEXT DEFAULT 'meshcore',
|
||||
mqtt_publish_messages INTEGER DEFAULT 0,
|
||||
mqtt_publish_raw_packets INTEGER DEFAULT 0,
|
||||
community_mqtt_enabled INTEGER DEFAULT 0,
|
||||
community_mqtt_iata TEXT DEFAULT '',
|
||||
community_mqtt_broker_host TEXT DEFAULT 'mqtt-us-v1.letsmesh.net',
|
||||
community_mqtt_broker_port INTEGER DEFAULT 443,
|
||||
community_mqtt_email TEXT DEFAULT ''
|
||||
)
|
||||
""")
|
||||
await conn.execute("INSERT INTO app_settings (id) VALUES (1)")
|
||||
# Channels table needed for migration 33
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
is_hashtag INTEGER DEFAULT 0,
|
||||
on_radio INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
# Verify column exists with correct default
|
||||
cursor = await conn.execute("SELECT flood_scope FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
assert row["flood_scope"] == ""
|
||||
finally:
|
||||
await conn.close()
|
||||
217
tests/test_migrations/test_migration_039.py
Normal file
217
tests/test_migrations/test_migration_039.py
Normal file
@@ -0,0 +1,217 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import get_version, run_migrations, set_version
|
||||
|
||||
from tests.test_migrations.conftest import LATEST_SCHEMA_VERSION
|
||||
|
||||
class TestMigration039:
|
||||
"""Test migration 039: persist contacts.out_path_hash_mode."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_legacy_advert_paths_do_not_become_direct_routes_after_upgrade(self):
|
||||
"""Pre-045 advert-derived last_path data is dropped from active direct-route columns."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 38)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
flags INTEGER DEFAULT 0,
|
||||
last_path TEXT,
|
||||
last_path_len INTEGER DEFAULT -1,
|
||||
last_advert INTEGER,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
last_seen INTEGER,
|
||||
on_radio INTEGER DEFAULT 0,
|
||||
last_contacted INTEGER,
|
||||
first_seen INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contacts (
|
||||
public_key, name, last_path, last_path_len, first_seen
|
||||
) VALUES (?, ?, ?, ?, ?), (?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"aa" * 32,
|
||||
"Flood",
|
||||
"",
|
||||
-1,
|
||||
1000,
|
||||
"bb" * 32,
|
||||
"LegacyPath",
|
||||
"1122",
|
||||
1,
|
||||
1001,
|
||||
),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 38
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT public_key, direct_path, direct_path_len, direct_path_hash_mode
|
||||
FROM contacts
|
||||
ORDER BY public_key
|
||||
"""
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert rows[0]["public_key"] == "aa" * 32
|
||||
assert rows[0]["direct_path"] is None
|
||||
assert rows[0]["direct_path_len"] is None
|
||||
assert rows[0]["direct_path_hash_mode"] is None
|
||||
assert rows[1]["public_key"] == "bb" * 32
|
||||
assert rows[1]["direct_path"] is None
|
||||
assert rows[1]["direct_path_len"] is None
|
||||
assert rows[1]["direct_path_hash_mode"] is None
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_legacy_out_path_hash_mode_is_not_promoted_into_direct_routes(self):
|
||||
"""Pre-045 out_path_hash_mode does not make advert paths become active direct routes."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 38)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
flags INTEGER DEFAULT 0,
|
||||
last_path TEXT,
|
||||
last_path_len INTEGER DEFAULT -1,
|
||||
out_path_hash_mode INTEGER NOT NULL DEFAULT 0,
|
||||
last_advert INTEGER,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
last_seen INTEGER,
|
||||
on_radio INTEGER DEFAULT 0,
|
||||
last_contacted INTEGER,
|
||||
first_seen INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contacts (
|
||||
public_key, name, last_path, last_path_len, out_path_hash_mode, first_seen
|
||||
) VALUES (?, ?, ?, ?, ?, ?), (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"cc" * 32,
|
||||
"Multi",
|
||||
"aa00bb00",
|
||||
2,
|
||||
1,
|
||||
1000,
|
||||
"dd" * 32,
|
||||
"Flood",
|
||||
"",
|
||||
-1,
|
||||
0,
|
||||
1001,
|
||||
),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 38
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT public_key, direct_path, direct_path_len, direct_path_hash_mode
|
||||
FROM contacts
|
||||
WHERE public_key IN (?, ?)
|
||||
ORDER BY public_key
|
||||
""",
|
||||
("cc" * 32, "dd" * 32),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert rows[0]["public_key"] == "cc" * 32
|
||||
assert rows[0]["direct_path"] is None
|
||||
assert rows[0]["direct_path_len"] is None
|
||||
assert rows[0]["direct_path_hash_mode"] is None
|
||||
assert rows[1]["public_key"] == "dd" * 32
|
||||
assert rows[1]["direct_path"] is None
|
||||
assert rows[1]["direct_path_len"] is None
|
||||
assert rows[1]["direct_path_hash_mode"] is None
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_existing_direct_route_columns_are_preserved(self):
|
||||
"""Already-migrated databases keep canonical direct-route data intact."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 44)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
flags INTEGER DEFAULT 0,
|
||||
direct_path TEXT,
|
||||
direct_path_len INTEGER,
|
||||
direct_path_hash_mode INTEGER,
|
||||
direct_path_updated_at INTEGER,
|
||||
route_override_path TEXT,
|
||||
route_override_len INTEGER,
|
||||
route_override_hash_mode INTEGER,
|
||||
last_advert INTEGER,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
last_seen INTEGER,
|
||||
on_radio INTEGER DEFAULT 0,
|
||||
last_contacted INTEGER,
|
||||
first_seen INTEGER,
|
||||
last_read_at INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contacts (
|
||||
public_key, name, direct_path, direct_path_len, direct_path_hash_mode,
|
||||
direct_path_updated_at, last_seen
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
("ee" * 32, "Direct", "aa00bb00", 2, 1, 123456, 123457),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 44
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT direct_path, direct_path_len, direct_path_hash_mode, direct_path_updated_at
|
||||
FROM contacts
|
||||
WHERE public_key = ?
|
||||
""",
|
||||
("ee" * 32,),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["direct_path"] == "aa00bb00"
|
||||
assert row["direct_path_len"] == 2
|
||||
assert row["direct_path_hash_mode"] == 1
|
||||
assert row["direct_path_updated_at"] == 123456
|
||||
finally:
|
||||
await conn.close()
|
||||
72
tests/test_migrations/test_migration_040.py
Normal file
72
tests/test_migrations/test_migration_040.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import get_version, run_migrations, set_version
|
||||
|
||||
from tests.test_migrations.conftest import LATEST_SCHEMA_VERSION
|
||||
|
||||
class TestMigration040:
|
||||
"""Test migration 040: include path_len in advert-path identity."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rebuilds_contact_advert_paths_to_distinguish_same_bytes_by_hop_count(self):
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 39)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex)
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_advert_paths
|
||||
(public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
("aa" * 32, "aa00", 1, 1000, 1001, 2),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 39
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_advert_paths
|
||||
(public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
("aa" * 32, "aa00", 2, 1002, 1002, 1),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT path_hex, path_len, heard_count
|
||||
FROM contact_advert_paths
|
||||
WHERE public_key = ?
|
||||
ORDER BY path_len ASC
|
||||
""",
|
||||
("aa" * 32,),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert [(row["path_hex"], row["path_len"], row["heard_count"]) for row in rows] == [
|
||||
("aa00", 1, 2),
|
||||
("aa00", 2, 1),
|
||||
]
|
||||
finally:
|
||||
await conn.close()
|
||||
71
tests/test_migrations/test_migration_041.py
Normal file
71
tests/test_migrations/test_migration_041.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import get_version, run_migrations, set_version
|
||||
|
||||
from tests.test_migrations.conftest import LATEST_SCHEMA_VERSION
|
||||
|
||||
class TestMigration041:
|
||||
"""Test migration 041: add nullable routing override columns."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_adds_contact_routing_override_columns(self):
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 40)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
flags INTEGER DEFAULT 0,
|
||||
last_path TEXT,
|
||||
last_path_len INTEGER DEFAULT -1,
|
||||
out_path_hash_mode INTEGER DEFAULT 0,
|
||||
last_advert INTEGER,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
last_seen INTEGER,
|
||||
on_radio INTEGER DEFAULT 0,
|
||||
last_contacted INTEGER,
|
||||
first_seen INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 40
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contacts (
|
||||
public_key,
|
||||
route_override_path,
|
||||
route_override_len,
|
||||
route_override_hash_mode
|
||||
) VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
("aa" * 32, "ae92f13e", 2, 1),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT route_override_path, route_override_len, route_override_hash_mode
|
||||
FROM contacts
|
||||
WHERE public_key = ?
|
||||
""",
|
||||
("aa" * 32,),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["route_override_path"] == "ae92f13e"
|
||||
assert row["route_override_len"] == 2
|
||||
assert row["route_override_hash_mode"] == 1
|
||||
finally:
|
||||
await conn.close()
|
||||
52
tests/test_migrations/test_migration_042.py
Normal file
52
tests/test_migrations/test_migration_042.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import get_version, run_migrations, set_version
|
||||
|
||||
from tests.test_migrations.conftest import LATEST_SCHEMA_VERSION
|
||||
|
||||
class TestMigration042:
|
||||
"""Test migration 042: add channels.flood_scope_override."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_adds_channel_flood_scope_override_column(self):
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 41)
|
||||
await conn.execute("""
|
||||
CREATE TABLE channels (
|
||||
key TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
is_hashtag INTEGER DEFAULT 0,
|
||||
on_radio INTEGER DEFAULT 0
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 41
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO channels (
|
||||
key, name, is_hashtag, on_radio, flood_scope_override
|
||||
) VALUES (?, ?, ?, ?, ?)
|
||||
""",
|
||||
("AA" * 16, "#flightless", 1, 0, "#Esperance"),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT flood_scope_override FROM channels WHERE key = ?",
|
||||
("AA" * 16,),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
assert row["flood_scope_override"] == "#Esperance"
|
||||
finally:
|
||||
await conn.close()
|
||||
123
tests/test_migrations/test_migration_044.py
Normal file
123
tests/test_migrations/test_migration_044.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
import json
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import run_migrations, set_version
|
||||
|
||||
class TestMigration044:
|
||||
"""Test migration 044: dedupe incoming direct messages."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_migration_merges_incoming_dm_duplicates_and_adds_index(self):
|
||||
"""Migration 44 collapses duplicate incoming DMs and re-links raw packets."""
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 43)
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
paths TEXT,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
sender_name TEXT,
|
||||
sender_key TEXT
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO messages
|
||||
(id, type, conversation_key, text, sender_timestamp, received_at, paths,
|
||||
txt_type, signature, outgoing, acked, sender_name, sender_key)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(1, "PRIV", "abc123", "hello", 0, 1001, None, 0, None, 0, 0, None, "abc123"),
|
||||
)
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO messages
|
||||
(id, type, conversation_key, text, sender_timestamp, received_at, paths,
|
||||
txt_type, signature, outgoing, acked, sender_name, sender_key)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
2,
|
||||
"PRIV",
|
||||
"abc123",
|
||||
"hello",
|
||||
None,
|
||||
1002,
|
||||
json.dumps([{"path": "", "received_at": 1002, "path_len": 0}]),
|
||||
2,
|
||||
"abcd",
|
||||
0,
|
||||
0,
|
||||
"Alice",
|
||||
"abc123",
|
||||
),
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data, message_id) VALUES (?, ?, ?)",
|
||||
(1001, b"pkt1", 1),
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO raw_packets (timestamp, data, message_id) VALUES (?, ?, ?)",
|
||||
(1002, b"pkt2", 2),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
await run_migrations(conn)
|
||||
|
||||
cursor = await conn.execute("SELECT * FROM messages")
|
||||
rows = await cursor.fetchall()
|
||||
assert len(rows) == 1
|
||||
assert rows[0]["id"] == 1
|
||||
assert rows[0]["received_at"] == 1001
|
||||
assert rows[0]["signature"] == "abcd"
|
||||
assert rows[0]["txt_type"] == 2
|
||||
assert rows[0]["sender_name"] == "Alice"
|
||||
assert json.loads(rows[0]["paths"]) == [
|
||||
{"path": "", "received_at": 1002, "path_len": 0}
|
||||
]
|
||||
|
||||
cursor = await conn.execute("SELECT message_id FROM raw_packets ORDER BY id")
|
||||
assert [row["message_id"] for row in await cursor.fetchall()] == [1, 1]
|
||||
|
||||
cursor = await conn.execute(
|
||||
"INSERT OR IGNORE INTO messages (type, conversation_key, text, sender_timestamp, received_at, outgoing, sender_key) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||
("PRIV", "abc123", "hello", 0, 9999, 0, "abc123"),
|
||||
)
|
||||
assert cursor.rowcount == 0
|
||||
|
||||
cursor = await conn.execute(
|
||||
"SELECT sql FROM sqlite_master WHERE name='idx_messages_incoming_priv_dedup'"
|
||||
)
|
||||
index_sql = (await cursor.fetchone())["sql"]
|
||||
assert "WHERE type = 'PRIV' AND outgoing = 0" in index_sql
|
||||
assert "sender_key" in index_sql
|
||||
finally:
|
||||
await conn.close()
|
||||
181
tests/test_migrations/test_migration_046.py
Normal file
181
tests/test_migrations/test_migration_046.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import get_version, run_migrations, set_version
|
||||
|
||||
from tests.test_migrations.conftest import LATEST_SCHEMA_VERSION
|
||||
|
||||
class TestMigration046:
|
||||
"""Test migration 046: clean orphaned contact child rows."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_merges_uniquely_resolvable_orphans_and_drops_unresolved_ones(self):
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 45)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE contact_name_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
UNIQUE(public_key, name)
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len)
|
||||
)
|
||||
""")
|
||||
|
||||
resolved_prefix = "abc123"
|
||||
resolved_key = resolved_prefix + ("00" * 29)
|
||||
ambiguous_prefix = "deadbe"
|
||||
ambiguous_key_a = ambiguous_prefix + ("11" * 29)
|
||||
ambiguous_key_b = ambiguous_prefix + ("22" * 29)
|
||||
dead_prefix = "ffffaa"
|
||||
|
||||
await conn.execute(
|
||||
"INSERT INTO contacts (public_key, name) VALUES (?, ?), (?, ?), (?, ?)",
|
||||
(
|
||||
resolved_key,
|
||||
"Resolved Sender",
|
||||
ambiguous_key_a,
|
||||
"Ambiguous A",
|
||||
ambiguous_key_b,
|
||||
"Ambiguous B",
|
||||
),
|
||||
)
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_name_history (public_key, name, first_seen, last_seen)
|
||||
VALUES (?, ?, ?, ?), (?, ?, ?, ?), (?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
resolved_key,
|
||||
"Resolved Sender",
|
||||
900,
|
||||
905,
|
||||
resolved_prefix,
|
||||
"Prefix Sender",
|
||||
1000,
|
||||
1010,
|
||||
ambiguous_prefix,
|
||||
"Ambiguous Prefix",
|
||||
1100,
|
||||
1110,
|
||||
),
|
||||
)
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_advert_paths
|
||||
(public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
VALUES
|
||||
(?, ?, ?, ?, ?, ?),
|
||||
(?, ?, ?, ?, ?, ?),
|
||||
(?, ?, ?, ?, ?, ?),
|
||||
(?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
resolved_key,
|
||||
"1122",
|
||||
1,
|
||||
950,
|
||||
960,
|
||||
2,
|
||||
resolved_prefix,
|
||||
"1122",
|
||||
1,
|
||||
1001,
|
||||
1002,
|
||||
3,
|
||||
ambiguous_prefix,
|
||||
"3344",
|
||||
2,
|
||||
1200,
|
||||
1201,
|
||||
1,
|
||||
dead_prefix,
|
||||
"5566",
|
||||
1,
|
||||
1300,
|
||||
1301,
|
||||
1,
|
||||
),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 45
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT name, first_seen, last_seen
|
||||
FROM contact_name_history
|
||||
WHERE public_key = ?
|
||||
ORDER BY name
|
||||
""",
|
||||
(resolved_key,),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert [(row["name"], row["first_seen"], row["last_seen"]) for row in rows] == [
|
||||
("Prefix Sender", 1000, 1010),
|
||||
("Resolved Sender", 900, 905),
|
||||
]
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT path_hex, path_len, first_seen, last_seen, heard_count
|
||||
FROM contact_advert_paths
|
||||
WHERE public_key = ?
|
||||
ORDER BY path_hex, path_len
|
||||
""",
|
||||
(resolved_key,),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert [
|
||||
(
|
||||
row["path_hex"],
|
||||
row["path_len"],
|
||||
row["first_seen"],
|
||||
row["last_seen"],
|
||||
row["heard_count"],
|
||||
)
|
||||
for row in rows
|
||||
] == [
|
||||
("1122", 1, 950, 1002, 5),
|
||||
]
|
||||
|
||||
for orphan_key in (resolved_prefix, ambiguous_prefix, dead_prefix):
|
||||
cursor = await conn.execute(
|
||||
"SELECT COUNT(*) FROM contact_name_history WHERE public_key = ?",
|
||||
(orphan_key,),
|
||||
)
|
||||
assert (await cursor.fetchone())[0] == 0
|
||||
cursor = await conn.execute(
|
||||
"SELECT COUNT(*) FROM contact_advert_paths WHERE public_key = ?",
|
||||
(orphan_key,),
|
||||
)
|
||||
assert (await cursor.fetchone())[0] == 0
|
||||
finally:
|
||||
await conn.close()
|
||||
72
tests/test_migrations/test_migration_047.py
Normal file
72
tests/test_migrations/test_migration_047.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
import aiosqlite
|
||||
import pytest
|
||||
|
||||
from app.migrations import get_version, run_migrations, set_version
|
||||
|
||||
from tests.test_migrations.conftest import LATEST_SCHEMA_VERSION
|
||||
|
||||
class TestMigration047:
|
||||
"""Test migration 047: add statistics indexes."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_adds_statistics_indexes(self):
|
||||
conn = await aiosqlite.connect(":memory:")
|
||||
conn.row_factory = aiosqlite.Row
|
||||
try:
|
||||
await set_version(conn, 46)
|
||||
await conn.execute("""
|
||||
CREATE TABLE contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
last_seen INTEGER
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
received_at INTEGER NOT NULL
|
||||
)
|
||||
""")
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash BLOB
|
||||
)
|
||||
""")
|
||||
await conn.commit()
|
||||
|
||||
applied = await run_migrations(conn)
|
||||
|
||||
assert applied == LATEST_SCHEMA_VERSION - 46
|
||||
assert await get_version(conn) == LATEST_SCHEMA_VERSION
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT name
|
||||
FROM sqlite_master
|
||||
WHERE type = 'index'
|
||||
AND name IN (
|
||||
'idx_raw_packets_timestamp',
|
||||
'idx_contacts_type_last_seen',
|
||||
'idx_messages_type_received_conversation'
|
||||
)
|
||||
ORDER BY name
|
||||
"""
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
assert [row["name"] for row in rows] == [
|
||||
"idx_contacts_type_last_seen",
|
||||
"idx_messages_type_received_conversation",
|
||||
"idx_raw_packets_timestamp",
|
||||
]
|
||||
finally:
|
||||
await conn.close()
|
||||
23
tests/test_migrations/test_migration_helpers.py
Normal file
23
tests/test_migrations/test_migration_helpers.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""Tests for database migration(s)."""
|
||||
|
||||
|
||||
class TestMigrationPacketHelpers:
|
||||
"""Test migration-local packet helpers against canonical path validation."""
|
||||
|
||||
def test_extract_payload_for_hash_rejects_oversize_path(self):
|
||||
from app.migrations._005_backfill_payload_hashes import _extract_payload_for_hash
|
||||
|
||||
packet = bytes([0x15, 0xBF]) + bytes(189) + b"payload"
|
||||
assert _extract_payload_for_hash(packet) is None
|
||||
|
||||
def test_extract_payload_for_hash_rejects_no_payload_packet(self):
|
||||
from app.migrations._005_backfill_payload_hashes import _extract_payload_for_hash
|
||||
|
||||
packet = bytes([0x15, 0x02, 0xAA, 0xBB])
|
||||
assert _extract_payload_for_hash(packet) is None
|
||||
|
||||
def test_extract_path_from_packet_rejects_reserved_mode(self):
|
||||
from app.migrations._007_backfill_message_paths import _extract_path_from_packet
|
||||
|
||||
packet = bytes([0x15, 0xC1, 0xAA, 0xBB, 0xCC])
|
||||
assert _extract_path_from_packet(packet) is None
|
||||
Reference in New Issue
Block a user