Files
autocoder/progress.py
Caitlyn Byrne 656df0fd9a feat: add "blocked for human input" feature across full stack
Agents can now request structured human input when they encounter
genuine blockers (API keys, design choices, external configs). The
request is displayed in the UI with a dynamic form, and the human's
response is stored and made available when the agent resumes.

Changes span 21 files + 1 new component:
- Database: 3 new columns (needs_human_input, human_input_request,
  human_input_response) with migration
- MCP: new feature_request_human_input tool + guards on existing tools
- API: new resolve-human-input endpoint, 4th feature bucket
- Orchestrator: skip needs_human_input features in scheduling
- Progress: 4-tuple return from count_passing_tests
- WebSocket: needs_human_input count in progress messages
- UI: conditional 4th Kanban column, HumanInputForm component,
  amber status indicators, dependency graph support

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 14:11:35 -05:00

265 lines
9.6 KiB
Python

"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress of the autonomous coding agent.
Uses direct SQLite access for database queries.
"""
import json
import os
import sqlite3
import urllib.request
from contextlib import closing
from datetime import datetime, timezone
from pathlib import Path
WEBHOOK_URL = os.environ.get("PROGRESS_N8N_WEBHOOK_URL")
PROGRESS_CACHE_FILE = ".progress_cache"
# SQLite connection settings for parallel mode safety
SQLITE_TIMEOUT = 30 # seconds to wait for locks
def _get_connection(db_file: Path) -> sqlite3.Connection:
"""Get a SQLite connection with proper timeout settings for parallel mode."""
return sqlite3.connect(db_file, timeout=SQLITE_TIMEOUT)
def has_features(project_dir: Path) -> bool:
"""
Check if the project has features in the database.
This is used to determine if the initializer agent needs to run.
We check the database directly (not via API) since the API server
may not be running yet when this check is performed.
Returns True if:
- features.db exists AND has at least 1 feature, OR
- feature_list.json exists (legacy format)
Returns False if no features exist (initializer needs to run).
"""
# Check legacy JSON file first
json_file = project_dir / "feature_list.json"
if json_file.exists():
return True
# Check SQLite database
from autoforge_paths import get_features_db_path
db_file = get_features_db_path(project_dir)
if not db_file.exists():
return False
try:
with closing(_get_connection(db_file)) as conn:
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM features")
count: int = cursor.fetchone()[0]
return bool(count > 0)
except Exception:
# Database exists but can't be read or has no features table
return False
def count_passing_tests(project_dir: Path) -> tuple[int, int, int, int]:
"""
Count passing, in_progress, total, and needs_human_input tests via direct database access.
Args:
project_dir: Directory containing the project
Returns:
(passing_count, in_progress_count, total_count, needs_human_input_count)
"""
from autoforge_paths import get_features_db_path
db_file = get_features_db_path(project_dir)
if not db_file.exists():
return 0, 0, 0, 0
try:
with closing(_get_connection(db_file)) as conn:
cursor = conn.cursor()
# Single aggregate query instead of separate COUNT queries
# Handle case where columns don't exist yet (legacy DBs)
try:
cursor.execute("""
SELECT
COUNT(*) as total,
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing,
SUM(CASE WHEN in_progress = 1 THEN 1 ELSE 0 END) as in_progress,
SUM(CASE WHEN needs_human_input = 1 THEN 1 ELSE 0 END) as needs_human_input
FROM features
""")
row = cursor.fetchone()
total = row[0] or 0
passing = row[1] or 0
in_progress = row[2] or 0
needs_human_input = row[3] or 0
except sqlite3.OperationalError:
# Fallback for databases without newer columns
try:
cursor.execute("""
SELECT
COUNT(*) as total,
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing,
SUM(CASE WHEN in_progress = 1 THEN 1 ELSE 0 END) as in_progress
FROM features
""")
row = cursor.fetchone()
total = row[0] or 0
passing = row[1] or 0
in_progress = row[2] or 0
needs_human_input = 0
except sqlite3.OperationalError:
cursor.execute("""
SELECT
COUNT(*) as total,
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing
FROM features
""")
row = cursor.fetchone()
total = row[0] or 0
passing = row[1] or 0
in_progress = 0
needs_human_input = 0
return passing, in_progress, total, needs_human_input
except Exception as e:
print(f"[Database error in count_passing_tests: {e}]")
return 0, 0, 0, 0
def get_all_passing_features(project_dir: Path) -> list[dict]:
"""
Get all passing features for webhook notifications.
Args:
project_dir: Directory containing the project
Returns:
List of dicts with id, category, name for each passing feature
"""
from autoforge_paths import get_features_db_path
db_file = get_features_db_path(project_dir)
if not db_file.exists():
return []
try:
with closing(_get_connection(db_file)) as conn:
cursor = conn.cursor()
cursor.execute(
"SELECT id, category, name FROM features WHERE passes = 1 ORDER BY priority ASC"
)
features = [
{"id": row[0], "category": row[1], "name": row[2]}
for row in cursor.fetchall()
]
return features
except Exception:
return []
def send_progress_webhook(passing: int, total: int, project_dir: Path) -> None:
"""Send webhook notification when progress increases."""
if not WEBHOOK_URL:
return # Webhook not configured
from autoforge_paths import get_progress_cache_path
cache_file = get_progress_cache_path(project_dir)
previous = 0
previous_passing_ids = set()
# Read previous progress and passing feature IDs
if cache_file.exists():
try:
cache_data = json.loads(cache_file.read_text())
previous = cache_data.get("count", 0)
previous_passing_ids = set(cache_data.get("passing_ids", []))
except Exception:
previous = 0
# Only notify if progress increased
if passing > previous:
# Find which features are now passing via API
completed_tests = []
current_passing_ids = []
# Detect transition from old cache format (had count but no passing_ids)
# In this case, we can't reliably identify which specific tests are new
is_old_cache_format = len(previous_passing_ids) == 0 and previous > 0
# Get all passing features via direct database access
all_passing = get_all_passing_features(project_dir)
for feature in all_passing:
feature_id = feature.get("id")
current_passing_ids.append(feature_id)
# Only identify individual new tests if we have previous IDs to compare
if not is_old_cache_format and feature_id not in previous_passing_ids:
# This feature is newly passing
name = feature.get("name", f"Feature #{feature_id}")
category = feature.get("category", "")
if category:
completed_tests.append(f"{category} {name}")
else:
completed_tests.append(name)
payload = {
"event": "test_progress",
"passing": passing,
"total": total,
"percentage": round((passing / total) * 100, 1) if total > 0 else 0,
"previous_passing": previous,
"tests_completed_this_session": passing - previous,
"completed_tests": completed_tests,
"project": project_dir.name,
"timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
}
try:
req = urllib.request.Request(
WEBHOOK_URL,
data=json.dumps([payload]).encode("utf-8"), # n8n expects array
headers={"Content-Type": "application/json"},
)
urllib.request.urlopen(req, timeout=5)
except Exception as e:
print(f"[Webhook notification failed: {e}]")
# Update cache with count and passing IDs
cache_file.write_text(
json.dumps({"count": passing, "passing_ids": current_passing_ids})
)
else:
# Update cache even if no change (for initial state)
if not cache_file.exists():
all_passing = get_all_passing_features(project_dir)
current_passing_ids = [f.get("id") for f in all_passing]
cache_file.write_text(
json.dumps({"count": passing, "passing_ids": current_passing_ids})
)
def print_session_header(session_num: int, is_initializer: bool) -> None:
"""Print a formatted header for the session."""
session_type = "INITIALIZER" if is_initializer else "CODING AGENT"
print("\n" + "=" * 70)
print(f" SESSION {session_num}: {session_type}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current progress."""
passing, in_progress, total, _needs_human_input = count_passing_tests(project_dir)
if total > 0:
percentage = (passing / total) * 100
status_parts = [f"{passing}/{total} tests passing ({percentage:.1f}%)"]
if in_progress > 0:
status_parts.append(f"{in_progress} in progress")
print(f"\nProgress: {', '.join(status_parts)}")
send_progress_webhook(passing, total, project_dir)
else:
print("\nProgress: No features in database yet")