Files
autocoder/progress.py
Auto 0736b5ec6b fix: address critical issues in PR #75 agent scheduling feature
This commit fixes several issues identified in the agent scheduling
feature from PR #75:

Frontend Fixes:
- Add day boundary handling in timeUtils.ts for timezone conversions
- Add utcToLocalWithDayShift/localToUTCWithDayShift functions
- Add shiftDaysForward/shiftDaysBackward helpers for bitfield adjustment
- Update ScheduleModal to correctly adjust days_of_week when crossing
  day boundaries during UTC conversion (fixes schedules running on
  wrong days for users in extreme timezones like UTC+9)

Backend Fixes:
- Add MAX_SCHEDULES_PER_PROJECT (50) limit to prevent resource exhaustion
- Wire up crash recovery callback in scheduler_service._start_agent()
- Convert schedules.py endpoints to use context manager for DB sessions
- Fix race condition in override creation with atomic delete-then-create
- Replace deprecated datetime.utcnow with datetime.now(timezone.utc)
- Add DB-level CHECK constraints for Schedule model fields

Files Modified:
- api/database.py: Add _utc_now helper, CheckConstraint imports, constraints
- progress.py: Replace deprecated datetime.utcnow
- server/routers/schedules.py: Add context manager, schedule limits
- server/services/assistant_database.py: Replace deprecated datetime.utcnow
- server/services/scheduler_service.py: Wire crash recovery, fix race condition
- ui/src/components/ScheduleModal.tsx: Use day shift functions
- ui/src/lib/timeUtils.ts: Add day boundary handling functions

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-22 08:35:57 +02:00

224 lines
7.4 KiB
Python

"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress of the autonomous coding agent.
Uses direct SQLite access for database queries.
"""
import json
import os
import sqlite3
import urllib.request
from datetime import datetime, timezone
from pathlib import Path
WEBHOOK_URL = os.environ.get("PROGRESS_N8N_WEBHOOK_URL")
PROGRESS_CACHE_FILE = ".progress_cache"
def has_features(project_dir: Path) -> bool:
"""
Check if the project has features in the database.
This is used to determine if the initializer agent needs to run.
We check the database directly (not via API) since the API server
may not be running yet when this check is performed.
Returns True if:
- features.db exists AND has at least 1 feature, OR
- feature_list.json exists (legacy format)
Returns False if no features exist (initializer needs to run).
"""
import sqlite3
# Check legacy JSON file first
json_file = project_dir / "feature_list.json"
if json_file.exists():
return True
# Check SQLite database
db_file = project_dir / "features.db"
if not db_file.exists():
return False
try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM features")
count = cursor.fetchone()[0]
conn.close()
return count > 0
except Exception:
# Database exists but can't be read or has no features table
return False
def count_passing_tests(project_dir: Path) -> tuple[int, int, int]:
"""
Count passing, in_progress, and total tests via direct database access.
Args:
project_dir: Directory containing the project
Returns:
(passing_count, in_progress_count, total_count)
"""
db_file = project_dir / "features.db"
if not db_file.exists():
return 0, 0, 0
try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM features")
total = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM features WHERE passes = 1")
passing = cursor.fetchone()[0]
# Handle case where in_progress column doesn't exist yet
try:
cursor.execute("SELECT COUNT(*) FROM features WHERE in_progress = 1")
in_progress = cursor.fetchone()[0]
except sqlite3.OperationalError:
in_progress = 0
conn.close()
return passing, in_progress, total
except Exception as e:
print(f"[Database error in count_passing_tests: {e}]")
return 0, 0, 0
def get_all_passing_features(project_dir: Path) -> list[dict]:
"""
Get all passing features for webhook notifications.
Args:
project_dir: Directory containing the project
Returns:
List of dicts with id, category, name for each passing feature
"""
db_file = project_dir / "features.db"
if not db_file.exists():
return []
try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.execute(
"SELECT id, category, name FROM features WHERE passes = 1 ORDER BY priority ASC"
)
features = [
{"id": row[0], "category": row[1], "name": row[2]}
for row in cursor.fetchall()
]
conn.close()
return features
except Exception:
return []
def send_progress_webhook(passing: int, total: int, project_dir: Path) -> None:
"""Send webhook notification when progress increases."""
if not WEBHOOK_URL:
return # Webhook not configured
cache_file = project_dir / PROGRESS_CACHE_FILE
previous = 0
previous_passing_ids = set()
# Read previous progress and passing feature IDs
if cache_file.exists():
try:
cache_data = json.loads(cache_file.read_text())
previous = cache_data.get("count", 0)
previous_passing_ids = set(cache_data.get("passing_ids", []))
except Exception:
previous = 0
# Only notify if progress increased
if passing > previous:
# Find which features are now passing via API
completed_tests = []
current_passing_ids = []
# Detect transition from old cache format (had count but no passing_ids)
# In this case, we can't reliably identify which specific tests are new
is_old_cache_format = len(previous_passing_ids) == 0 and previous > 0
# Get all passing features via direct database access
all_passing = get_all_passing_features(project_dir)
for feature in all_passing:
feature_id = feature.get("id")
current_passing_ids.append(feature_id)
# Only identify individual new tests if we have previous IDs to compare
if not is_old_cache_format and feature_id not in previous_passing_ids:
# This feature is newly passing
name = feature.get("name", f"Feature #{feature_id}")
category = feature.get("category", "")
if category:
completed_tests.append(f"{category} {name}")
else:
completed_tests.append(name)
payload = {
"event": "test_progress",
"passing": passing,
"total": total,
"percentage": round((passing / total) * 100, 1) if total > 0 else 0,
"previous_passing": previous,
"tests_completed_this_session": passing - previous,
"completed_tests": completed_tests,
"project": project_dir.name,
"timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
}
try:
req = urllib.request.Request(
WEBHOOK_URL,
data=json.dumps([payload]).encode("utf-8"), # n8n expects array
headers={"Content-Type": "application/json"},
)
urllib.request.urlopen(req, timeout=5)
except Exception as e:
print(f"[Webhook notification failed: {e}]")
# Update cache with count and passing IDs
cache_file.write_text(
json.dumps({"count": passing, "passing_ids": current_passing_ids})
)
else:
# Update cache even if no change (for initial state)
if not cache_file.exists():
all_passing = get_all_passing_features(project_dir)
current_passing_ids = [f.get("id") for f in all_passing]
cache_file.write_text(
json.dumps({"count": passing, "passing_ids": current_passing_ids})
)
def print_session_header(session_num: int, is_initializer: bool) -> None:
"""Print a formatted header for the session."""
session_type = "INITIALIZER" if is_initializer else "CODING AGENT"
print("\n" + "=" * 70)
print(f" SESSION {session_num}: {session_type}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current progress."""
passing, in_progress, total = count_passing_tests(project_dir)
if total > 0:
percentage = (passing / total) * 100
status_parts = [f"{passing}/{total} tests passing ({percentage:.1f}%)"]
if in_progress > 0:
status_parts.append(f"{in_progress} in progress")
print(f"\nProgress: {', '.join(status_parts)}")
send_progress_webhook(passing, total, project_dir)
else:
print("\nProgress: No features in database yet")