mirror of
https://github.com/leonvanzyl/autocoder.git
synced 2026-02-01 23:13:36 +00:00
fix: prevent SQLite corruption in parallel mode with atomic operations
Replace ineffective threading.Lock() with atomic SQL operations for cross-process safety. Key changes: - Add SQLAlchemy event hooks (do_connect/do_begin) for BEGIN IMMEDIATE transactions in api/database.py - Add atomic_transaction() context manager for multi-statement ops - Convert all feature MCP write operations to atomic UPDATE...WHERE with compare-and-swap patterns (feature_claim, mark_passing, etc.) - Add WHERE passes=0 state guard to feature_mark_passing - Add WAL checkpoint on shutdown and idempotent cleanup() in parallel_orchestrator.py with async-safe signal handling - Wrap SQLite connections with contextlib.closing() in progress.py - Add thread-safe engine cache with double-checked locking in assistant_database.py - Migrate to SQLAlchemy 2.0 DeclarativeBase across all modules Inspired by PR #108 (cabana8471-arch), with fixes for nested BEGIN EXCLUSIVE bug and missing state guards. Closes #106 Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
102
progress.py
102
progress.py
@@ -10,12 +10,21 @@ import json
|
||||
import os
|
||||
import sqlite3
|
||||
import urllib.request
|
||||
from contextlib import closing
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
WEBHOOK_URL = os.environ.get("PROGRESS_N8N_WEBHOOK_URL")
|
||||
PROGRESS_CACHE_FILE = ".progress_cache"
|
||||
|
||||
# SQLite connection settings for parallel mode safety
|
||||
SQLITE_TIMEOUT = 30 # seconds to wait for locks
|
||||
|
||||
|
||||
def _get_connection(db_file: Path) -> sqlite3.Connection:
|
||||
"""Get a SQLite connection with proper timeout settings for parallel mode."""
|
||||
return sqlite3.connect(db_file, timeout=SQLITE_TIMEOUT)
|
||||
|
||||
|
||||
def has_features(project_dir: Path) -> bool:
|
||||
"""
|
||||
@@ -31,8 +40,6 @@ def has_features(project_dir: Path) -> bool:
|
||||
|
||||
Returns False if no features exist (initializer needs to run).
|
||||
"""
|
||||
import sqlite3
|
||||
|
||||
# Check legacy JSON file first
|
||||
json_file = project_dir / "feature_list.json"
|
||||
if json_file.exists():
|
||||
@@ -44,12 +51,11 @@ def has_features(project_dir: Path) -> bool:
|
||||
return False
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(db_file)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT COUNT(*) FROM features")
|
||||
count = cursor.fetchone()[0]
|
||||
conn.close()
|
||||
return count > 0
|
||||
with closing(_get_connection(db_file)) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT COUNT(*) FROM features")
|
||||
count: int = cursor.fetchone()[0]
|
||||
return bool(count > 0)
|
||||
except Exception:
|
||||
# Database exists but can't be read or has no features table
|
||||
return False
|
||||
@@ -70,36 +76,35 @@ def count_passing_tests(project_dir: Path) -> tuple[int, int, int]:
|
||||
return 0, 0, 0
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(db_file)
|
||||
cursor = conn.cursor()
|
||||
# Single aggregate query instead of 3 separate COUNT queries
|
||||
# Handle case where in_progress column doesn't exist yet (legacy DBs)
|
||||
try:
|
||||
cursor.execute("""
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing,
|
||||
SUM(CASE WHEN in_progress = 1 THEN 1 ELSE 0 END) as in_progress
|
||||
FROM features
|
||||
""")
|
||||
row = cursor.fetchone()
|
||||
total = row[0] or 0
|
||||
passing = row[1] or 0
|
||||
in_progress = row[2] or 0
|
||||
except sqlite3.OperationalError:
|
||||
# Fallback for databases without in_progress column
|
||||
cursor.execute("""
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing
|
||||
FROM features
|
||||
""")
|
||||
row = cursor.fetchone()
|
||||
total = row[0] or 0
|
||||
passing = row[1] or 0
|
||||
in_progress = 0
|
||||
conn.close()
|
||||
return passing, in_progress, total
|
||||
with closing(_get_connection(db_file)) as conn:
|
||||
cursor = conn.cursor()
|
||||
# Single aggregate query instead of 3 separate COUNT queries
|
||||
# Handle case where in_progress column doesn't exist yet (legacy DBs)
|
||||
try:
|
||||
cursor.execute("""
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing,
|
||||
SUM(CASE WHEN in_progress = 1 THEN 1 ELSE 0 END) as in_progress
|
||||
FROM features
|
||||
""")
|
||||
row = cursor.fetchone()
|
||||
total = row[0] or 0
|
||||
passing = row[1] or 0
|
||||
in_progress = row[2] or 0
|
||||
except sqlite3.OperationalError:
|
||||
# Fallback for databases without in_progress column
|
||||
cursor.execute("""
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing
|
||||
FROM features
|
||||
""")
|
||||
row = cursor.fetchone()
|
||||
total = row[0] or 0
|
||||
passing = row[1] or 0
|
||||
in_progress = 0
|
||||
return passing, in_progress, total
|
||||
except Exception as e:
|
||||
print(f"[Database error in count_passing_tests: {e}]")
|
||||
return 0, 0, 0
|
||||
@@ -120,17 +125,16 @@ def get_all_passing_features(project_dir: Path) -> list[dict]:
|
||||
return []
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(db_file)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"SELECT id, category, name FROM features WHERE passes = 1 ORDER BY priority ASC"
|
||||
)
|
||||
features = [
|
||||
{"id": row[0], "category": row[1], "name": row[2]}
|
||||
for row in cursor.fetchall()
|
||||
]
|
||||
conn.close()
|
||||
return features
|
||||
with closing(_get_connection(db_file)) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"SELECT id, category, name FROM features WHERE passes = 1 ORDER BY priority ASC"
|
||||
)
|
||||
features = [
|
||||
{"id": row[0], "category": row[1], "name": row[2]}
|
||||
for row in cursor.fetchall()
|
||||
]
|
||||
return features
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
Reference in New Issue
Block a user