diff --git a/.claude/templates/initializer_prompt.template.md b/.claude/templates/initializer_prompt.template.md index 312cd17..080e81c 100644 --- a/.claude/templates/initializer_prompt.template.md +++ b/.claude/templates/initializer_prompt.template.md @@ -32,23 +32,35 @@ Use the feature_create_bulk tool to add all features at once: Use the feature_create_bulk tool with features=[ { "category": "functional", - "name": "Brief feature name", - "description": "Brief description of the feature and what this test verifies", + "name": "User can create an account", + "description": "Basic user registration functionality", "steps": [ - "Step 1: Navigate to relevant page", - "Step 2: Perform action", - "Step 3: Verify expected result" + "Step 1: Navigate to registration page", + "Step 2: Fill in required fields", + "Step 3: Submit form and verify account created" ] }, { - "category": "style", - "name": "Brief feature name", - "description": "Brief description of UI/UX requirement", + "category": "functional", + "name": "User can log in", + "description": "Authentication with existing credentials", "steps": [ - "Step 1: Navigate to page", - "Step 2: Take screenshot", - "Step 3: Verify visual requirements" - ] + "Step 1: Navigate to login page", + "Step 2: Enter credentials", + "Step 3: Verify successful login and redirect" + ], + "depends_on_indices": [0] + }, + { + "category": "functional", + "name": "User can view dashboard", + "description": "Protected dashboard requires authentication", + "steps": [ + "Step 1: Log in as user", + "Step 2: Navigate to dashboard", + "Step 3: Verify personalized content displays" + ], + "depends_on_indices": [1] } ] ``` @@ -57,6 +69,7 @@ Use the feature_create_bulk tool with features=[ - IDs and priorities are assigned automatically based on order - All features start with `passes: false` by default - You can create features in batches if there are many (e.g., 50 at a time) +- Use `depends_on_indices` to specify dependencies (see FEATURE DEPENDENCIES section below) **Requirements for features:** @@ -75,6 +88,86 @@ Use the feature_create_bulk tool with features=[ --- +## FEATURE DEPENDENCIES + +Dependencies enable **parallel execution** of independent features. When you specify dependencies correctly, multiple agents can work on unrelated features simultaneously, dramatically speeding up development. + +### Why Dependencies Matter + +1. **Parallel Execution**: Features without dependencies can run in parallel +2. **Logical Ordering**: Ensures features are built in the right order +3. **Blocking Prevention**: An agent won't start a feature until its dependencies pass + +### How to Determine Dependencies + +Ask yourself: "What MUST be working before this feature can be tested?" + +| Dependency Type | Example | +|-----------------|---------| +| **Data dependencies** | "Edit item" depends on "Create item" | +| **Auth dependencies** | "View dashboard" depends on "User can log in" | +| **Navigation dependencies** | "Modal close works" depends on "Modal opens" | +| **UI dependencies** | "Filter results" depends on "Display results list" | +| **API dependencies** | "Fetch user data" depends on "API authentication" | + +### Using `depends_on_indices` + +Since feature IDs aren't assigned until after creation, use **array indices** (0-based) to reference dependencies: + +```json +{ + "features": [ + { "name": "Create account", ... }, // Index 0 + { "name": "Login", "depends_on_indices": [0] }, // Index 1, depends on 0 + { "name": "View profile", "depends_on_indices": [1] }, // Index 2, depends on 1 + { "name": "Edit profile", "depends_on_indices": [2] } // Index 3, depends on 2 + ] +} +``` + +### Rules for Dependencies + +1. **Can only depend on EARLIER features**: Index must be less than current feature's position +2. **No circular dependencies**: A cannot depend on B if B depends on A +3. **Maximum 20 dependencies** per feature +4. **Foundation features have NO dependencies**: First features in each category typically have none +5. **Don't over-depend**: Only add dependencies that are truly required for testing + +### Best Practices + +1. **Start with foundation features** (index 0-10): Core setup, basic navigation, authentication +2. **Group related features together**: Keep CRUD operations adjacent +3. **Chain complex flows**: Registration → Login → Dashboard → Settings +4. **Keep dependencies shallow**: Prefer 1-2 dependencies over deep chains +5. **Skip dependencies for independent features**: Visual tests often have no dependencies + +### Example: Todo App Feature Chain + +```json +[ + // Foundation (no dependencies) + { "name": "App loads without errors", "category": "functional" }, + { "name": "Navigation bar displays", "category": "style" }, + + // Auth chain + { "name": "User can register", "depends_on_indices": [0] }, + { "name": "User can login", "depends_on_indices": [2] }, + { "name": "User can logout", "depends_on_indices": [3] }, + + // Todo CRUD (depends on auth) + { "name": "User can create todo", "depends_on_indices": [3] }, + { "name": "User can view todos", "depends_on_indices": [5] }, + { "name": "User can edit todo", "depends_on_indices": [5] }, + { "name": "User can delete todo", "depends_on_indices": [5] }, + + // Advanced features (multiple dependencies) + { "name": "User can filter todos", "depends_on_indices": [6] }, + { "name": "User can search todos", "depends_on_indices": [6] } +] +``` + +--- + ## MANDATORY TEST CATEGORIES The feature_list.json **MUST** include tests from ALL of these categories. The minimum counts scale by complexity tier. diff --git a/.gitignore b/.gitignore index 0c478ea..6935128 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,13 @@ # Agent-generated output directories generations/ +automaker/ nul issues/ +# Browser profiles for parallel agent execution +.browser-profiles/ + # Log files logs/ *.log diff --git a/agent.py b/agent.py index 50edc46..c6199b4 100644 --- a/agent.py +++ b/agent.py @@ -19,8 +19,8 @@ from claude_agent_sdk import ClaudeSDKClient # Fix Windows console encoding for Unicode characters (emoji, etc.) # Without this, print() crashes when Claude outputs emoji like ✅ if sys.platform == "win32": - sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace") - sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace") + sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace", line_buffering=True) + sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace", line_buffering=True) from client import create_client from progress import has_features, print_progress_summary, print_session_header @@ -29,6 +29,7 @@ from prompts import ( get_coding_prompt, get_coding_prompt_yolo, get_initializer_prompt, + get_single_feature_prompt, ) # Configuration @@ -114,6 +115,7 @@ async def run_autonomous_agent( model: str, max_iterations: Optional[int] = None, yolo_mode: bool = False, + feature_id: Optional[int] = None, ) -> None: """ Run the autonomous agent loop. @@ -123,6 +125,7 @@ async def run_autonomous_agent( model: Claude model to use max_iterations: Maximum number of iterations (None for unlimited) yolo_mode: If True, skip browser testing and use YOLO prompt + feature_id: If set, work only on this specific feature (used by parallel orchestrator) """ print("\n" + "=" * 70) print(" AUTONOMOUS CODING AGENT DEMO") @@ -133,6 +136,8 @@ async def run_autonomous_agent( print("Mode: YOLO (testing disabled)") else: print("Mode: Standard (full testing)") + if feature_id: + print(f"Single-feature mode: Feature #{feature_id}") if max_iterations: print(f"Max iterations: {max_iterations}") else: @@ -178,13 +183,18 @@ async def run_autonomous_agent( print_session_header(iteration, is_first_run) # Create client (fresh context) - client = create_client(project_dir, model, yolo_mode=yolo_mode) + # In single-feature mode, pass agent_id for browser isolation + agent_id = f"feature-{feature_id}" if feature_id else None + client = create_client(project_dir, model, yolo_mode=yolo_mode, agent_id=agent_id) # Choose prompt based on session type # Pass project_dir to enable project-specific prompts if is_first_run: prompt = get_initializer_prompt(project_dir) is_first_run = False # Only use initializer once + elif feature_id: + # Single-feature mode (used by parallel orchestrator) + prompt = get_single_feature_prompt(feature_id, project_dir, yolo_mode) else: # Use YOLO prompt if in YOLO mode if yolo_mode: diff --git a/api/database.py b/api/database.py index 69a919b..3fc586c 100644 --- a/api/database.py +++ b/api/database.py @@ -8,7 +8,7 @@ SQLite database schema for feature storage using SQLAlchemy. from pathlib import Path from typing import Optional -from sqlalchemy import Boolean, Column, Integer, String, Text, create_engine +from sqlalchemy import Boolean, Column, Integer, String, Text, create_engine, text from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Session, sessionmaker from sqlalchemy.types import JSON @@ -29,6 +29,9 @@ class Feature(Base): steps = Column(JSON, nullable=False) # Stored as JSON array passes = Column(Boolean, nullable=False, default=False, index=True) in_progress = Column(Boolean, nullable=False, default=False, index=True) + # Dependencies: list of feature IDs that must be completed before this feature + # NULL/empty = no dependencies (backwards compatible) + dependencies = Column(JSON, nullable=True, default=None) def to_dict(self) -> dict: """Convert feature to dictionary for JSON serialization.""" @@ -42,8 +45,18 @@ class Feature(Base): # Handle legacy NULL values gracefully - treat as False "passes": self.passes if self.passes is not None else False, "in_progress": self.in_progress if self.in_progress is not None else False, + # Dependencies: NULL/empty treated as empty list for backwards compat + "dependencies": self.dependencies if self.dependencies else [], } + def get_dependencies_safe(self) -> list[int]: + """Safely extract dependencies, handling NULL and malformed data.""" + if self.dependencies is None: + return [] + if isinstance(self.dependencies, list): + return [d for d in self.dependencies if isinstance(d, int)] + return [] + def get_database_path(project_dir: Path) -> Path: """Return the path to the SQLite database for a project.""" @@ -61,8 +74,6 @@ def get_database_url(project_dir: Path) -> str: def _migrate_add_in_progress_column(engine) -> None: """Add in_progress column to existing databases that don't have it.""" - from sqlalchemy import text - with engine.connect() as conn: # Check if column exists result = conn.execute(text("PRAGMA table_info(features)")) @@ -76,8 +87,6 @@ def _migrate_add_in_progress_column(engine) -> None: def _migrate_fix_null_boolean_fields(engine) -> None: """Fix NULL values in passes and in_progress columns.""" - from sqlalchemy import text - with engine.connect() as conn: # Fix NULL passes values conn.execute(text("UPDATE features SET passes = 0 WHERE passes IS NULL")) @@ -86,6 +95,23 @@ def _migrate_fix_null_boolean_fields(engine) -> None: conn.commit() +def _migrate_add_dependencies_column(engine) -> None: + """Add dependencies column to existing databases that don't have it. + + Uses NULL default for backwards compatibility - existing features + without dependencies will have NULL which is treated as empty list. + """ + with engine.connect() as conn: + # Check if column exists + result = conn.execute(text("PRAGMA table_info(features)")) + columns = [row[1] for row in result.fetchall()] + + if "dependencies" not in columns: + # Use TEXT for SQLite JSON storage, NULL default for backwards compat + conn.execute(text("ALTER TABLE features ADD COLUMN dependencies TEXT DEFAULT NULL")) + conn.commit() + + def create_database(project_dir: Path) -> tuple: """ Create database and return engine + session maker. @@ -97,12 +123,22 @@ def create_database(project_dir: Path) -> tuple: Tuple of (engine, SessionLocal) """ db_url = get_database_url(project_dir) - engine = create_engine(db_url, connect_args={"check_same_thread": False}) + engine = create_engine(db_url, connect_args={ + "check_same_thread": False, + "timeout": 30 # Wait up to 30s for locks + }) Base.metadata.create_all(bind=engine) + # Enable WAL mode for better concurrent read/write performance + with engine.connect() as conn: + conn.execute(text("PRAGMA journal_mode=WAL")) + conn.execute(text("PRAGMA busy_timeout=30000")) + conn.commit() + # Migrate existing databases _migrate_add_in_progress_column(engine) _migrate_fix_null_boolean_fields(engine) + _migrate_add_dependencies_column(engine) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) return engine, SessionLocal diff --git a/api/dependency_resolver.py b/api/dependency_resolver.py new file mode 100644 index 0000000..daaad17 --- /dev/null +++ b/api/dependency_resolver.py @@ -0,0 +1,341 @@ +""" +Dependency Resolver +=================== + +Provides dependency resolution using Kahn's algorithm for topological sorting. +Includes cycle detection, validation, and helper functions for dependency management. +""" + +from typing import TypedDict + +# Security: Prevent DoS via excessive dependencies +MAX_DEPENDENCIES_PER_FEATURE = 20 +MAX_DEPENDENCY_DEPTH = 50 # Prevent stack overflow in cycle detection + + +class DependencyResult(TypedDict): + """Result from dependency resolution.""" + + ordered_features: list[dict] + circular_dependencies: list[list[int]] + blocked_features: dict[int, list[int]] # feature_id -> [blocking_ids] + missing_dependencies: dict[int, list[int]] # feature_id -> [missing_ids] + + +def resolve_dependencies(features: list[dict]) -> DependencyResult: + """Topological sort using Kahn's algorithm with priority-aware ordering. + + Returns ordered features respecting dependencies, plus metadata about + cycles, blocked features, and missing dependencies. + + Args: + features: List of feature dicts with id, priority, passes, and dependencies fields + + Returns: + DependencyResult with ordered_features, circular_dependencies, + blocked_features, and missing_dependencies + """ + feature_map = {f["id"]: f for f in features} + in_degree = {f["id"]: 0 for f in features} + adjacency: dict[int, list[int]] = {f["id"]: [] for f in features} + blocked: dict[int, list[int]] = {} + missing: dict[int, list[int]] = {} + + # Build graph + for feature in features: + deps = feature.get("dependencies") or [] + for dep_id in deps: + if dep_id not in feature_map: + missing.setdefault(feature["id"], []).append(dep_id) + else: + adjacency[dep_id].append(feature["id"]) + in_degree[feature["id"]] += 1 + # Track blocked features + dep = feature_map[dep_id] + if not dep.get("passes"): + blocked.setdefault(feature["id"], []).append(dep_id) + + # Kahn's algorithm with priority-aware selection + queue = [f for f in features if in_degree[f["id"]] == 0] + queue.sort(key=lambda f: (f.get("priority", 999), f["id"])) + ordered: list[dict] = [] + + while queue: + current = queue.pop(0) + ordered.append(current) + for dependent_id in adjacency[current["id"]]: + in_degree[dependent_id] -= 1 + if in_degree[dependent_id] == 0: + queue.append(feature_map[dependent_id]) + queue.sort(key=lambda f: (f.get("priority", 999), f["id"])) + + # Detect cycles (features not in ordered = part of cycle) + cycles: list[list[int]] = [] + if len(ordered) < len(features): + remaining = [f for f in features if f not in ordered] + cycles = _detect_cycles(remaining, feature_map) + ordered.extend(remaining) # Add cyclic features at end + + return { + "ordered_features": ordered, + "circular_dependencies": cycles, + "blocked_features": blocked, + "missing_dependencies": missing, + } + + +def are_dependencies_satisfied(feature: dict, all_features: list[dict]) -> bool: + """Check if all dependencies have passes=True. + + Args: + feature: Feature dict to check + all_features: List of all feature dicts + + Returns: + True if all dependencies are satisfied (or no dependencies) + """ + deps = feature.get("dependencies") or [] + if not deps: + return True + passing_ids = {f["id"] for f in all_features if f.get("passes")} + return all(dep_id in passing_ids for dep_id in deps) + + +def get_blocking_dependencies(feature: dict, all_features: list[dict]) -> list[int]: + """Get list of incomplete dependency IDs. + + Args: + feature: Feature dict to check + all_features: List of all feature dicts + + Returns: + List of feature IDs that are blocking this feature + """ + deps = feature.get("dependencies") or [] + passing_ids = {f["id"] for f in all_features if f.get("passes")} + return [dep_id for dep_id in deps if dep_id not in passing_ids] + + +def would_create_circular_dependency( + features: list[dict], source_id: int, target_id: int +) -> bool: + """Check if adding a dependency from target to source would create a cycle. + + Uses DFS with visited set for efficient cycle detection. + + Args: + features: List of all feature dicts + source_id: The feature that would gain the dependency + target_id: The feature that would become a dependency + + Returns: + True if adding the dependency would create a cycle + """ + if source_id == target_id: + return True # Self-reference is a cycle + + feature_map = {f["id"]: f for f in features} + source = feature_map.get(source_id) + if not source: + return False + + # Check if target already depends on source (direct or indirect) + target = feature_map.get(target_id) + if not target: + return False + + # DFS from target to see if we can reach source + visited: set[int] = set() + + def can_reach(current_id: int, depth: int = 0) -> bool: + # Security: Prevent stack overflow with depth limit + if depth > MAX_DEPENDENCY_DEPTH: + return True # Assume cycle if too deep (fail-safe) + if current_id == source_id: + return True + if current_id in visited: + return False + visited.add(current_id) + + current = feature_map.get(current_id) + if not current: + return False + + deps = current.get("dependencies") or [] + for dep_id in deps: + if can_reach(dep_id, depth + 1): + return True + return False + + return can_reach(target_id) + + +def validate_dependencies( + feature_id: int, dependency_ids: list[int], all_feature_ids: set[int] +) -> tuple[bool, str]: + """Validate dependency list. + + Args: + feature_id: ID of the feature being validated + dependency_ids: List of proposed dependency IDs + all_feature_ids: Set of all valid feature IDs + + Returns: + Tuple of (is_valid, error_message) + """ + # Security: Check limits + if len(dependency_ids) > MAX_DEPENDENCIES_PER_FEATURE: + return False, f"Maximum {MAX_DEPENDENCIES_PER_FEATURE} dependencies allowed" + + # Check self-reference + if feature_id in dependency_ids: + return False, "A feature cannot depend on itself" + + # Check all dependencies exist + missing = [d for d in dependency_ids if d not in all_feature_ids] + if missing: + return False, f"Dependencies not found: {missing}" + + # Check for duplicates + if len(dependency_ids) != len(set(dependency_ids)): + return False, "Duplicate dependencies not allowed" + + return True, "" + + +def _detect_cycles(features: list[dict], feature_map: dict) -> list[list[int]]: + """Detect cycles using DFS with recursion tracking. + + Args: + features: List of features to check for cycles + feature_map: Map of feature_id -> feature dict + + Returns: + List of cycles, where each cycle is a list of feature IDs + """ + cycles: list[list[int]] = [] + visited: set[int] = set() + rec_stack: set[int] = set() + path: list[int] = [] + + def dfs(fid: int) -> bool: + visited.add(fid) + rec_stack.add(fid) + path.append(fid) + + feature = feature_map.get(fid) + if feature: + for dep_id in feature.get("dependencies") or []: + if dep_id not in visited: + if dfs(dep_id): + return True + elif dep_id in rec_stack: + cycle_start = path.index(dep_id) + cycles.append(path[cycle_start:]) + return True + + path.pop() + rec_stack.remove(fid) + return False + + for f in features: + if f["id"] not in visited: + dfs(f["id"]) + + return cycles + + +def get_ready_features(features: list[dict], limit: int = 10) -> list[dict]: + """Get features that are ready to be worked on. + + A feature is ready if: + - It is not passing + - It is not in progress + - All its dependencies are satisfied + + Args: + features: List of all feature dicts + limit: Maximum number of features to return + + Returns: + List of ready features, sorted by priority + """ + passing_ids = {f["id"] for f in features if f.get("passes")} + + ready = [] + for f in features: + if f.get("passes") or f.get("in_progress"): + continue + deps = f.get("dependencies") or [] + if all(dep_id in passing_ids for dep_id in deps): + ready.append(f) + + # Sort by priority + ready.sort(key=lambda f: (f.get("priority", 999), f["id"])) + + return ready[:limit] + + +def get_blocked_features(features: list[dict]) -> list[dict]: + """Get features that are blocked by unmet dependencies. + + Args: + features: List of all feature dicts + + Returns: + List of blocked features with 'blocked_by' field added + """ + passing_ids = {f["id"] for f in features if f.get("passes")} + + blocked = [] + for f in features: + if f.get("passes"): + continue + deps = f.get("dependencies") or [] + blocking = [d for d in deps if d not in passing_ids] + if blocking: + blocked.append({**f, "blocked_by": blocking}) + + return blocked + + +def build_graph_data(features: list[dict]) -> dict: + """Build graph data structure for visualization. + + Args: + features: List of all feature dicts + + Returns: + Dict with 'nodes' and 'edges' for graph visualization + """ + passing_ids = {f["id"] for f in features if f.get("passes")} + + nodes = [] + edges = [] + + for f in features: + deps = f.get("dependencies") or [] + blocking = [d for d in deps if d not in passing_ids] + + if f.get("passes"): + status = "done" + elif blocking: + status = "blocked" + elif f.get("in_progress"): + status = "in_progress" + else: + status = "pending" + + nodes.append({ + "id": f["id"], + "name": f["name"], + "category": f["category"], + "status": status, + "priority": f.get("priority", 999), + "dependencies": deps, + }) + + for dep_id in deps: + edges.append({"source": dep_id, "target": f["id"]}) + + return {"nodes": nodes, "edges": edges} diff --git a/autonomous_agent_demo.py b/autonomous_agent_demo.py index 4e2b656..47fdcb3 100644 --- a/autonomous_agent_demo.py +++ b/autonomous_agent_demo.py @@ -19,6 +19,12 @@ Example Usage: # YOLO mode: rapid prototyping without browser testing python autonomous_agent_demo.py --project-dir my-app --yolo + + # Parallel execution with 3 concurrent agents (default) + python autonomous_agent_demo.py --project-dir my-app --parallel + + # Parallel execution with 5 concurrent agents + python autonomous_agent_demo.py --project-dir my-app --parallel 5 """ import argparse @@ -91,6 +97,24 @@ Authentication: help="Enable YOLO mode: rapid prototyping without browser testing", ) + parser.add_argument( + "--parallel", + "-p", + type=int, + nargs="?", + const=3, + default=None, + metavar="N", + help="Enable parallel execution with N concurrent agents (default: 3, max: 5)", + ) + + parser.add_argument( + "--feature-id", + type=int, + default=None, + help="Work on a specific feature ID only (used by parallel orchestrator)", + ) + return parser.parse_args() @@ -123,15 +147,30 @@ def main() -> None: return try: - # Run the agent (MCP server handles feature database) - asyncio.run( - run_autonomous_agent( - project_dir=project_dir, - model=args.model, - max_iterations=args.max_iterations, - yolo_mode=args.yolo, + if args.parallel is not None: + # Parallel execution mode + from parallel_orchestrator import run_parallel_orchestrator + + print(f"Running in parallel mode with {args.parallel} concurrent agents") + asyncio.run( + run_parallel_orchestrator( + project_dir=project_dir, + max_concurrency=args.parallel, + model=args.model, + yolo_mode=args.yolo, + ) + ) + else: + # Standard single-agent mode (MCP server handles feature database) + asyncio.run( + run_autonomous_agent( + project_dir=project_dir, + model=args.model, + max_iterations=args.max_iterations, + yolo_mode=args.yolo, + feature_id=args.feature_id, + ) ) - ) except KeyboardInterrupt: print("\n\nInterrupted by user") print("To resume, run the same command again") diff --git a/client.py b/client.py index 7074fef..6ce7dfb 100644 --- a/client.py +++ b/client.py @@ -52,13 +52,25 @@ def get_playwright_headless() -> bool: # Feature MCP tools for feature/test management FEATURE_MCP_TOOLS = [ + # Core feature operations "mcp__features__feature_get_stats", "mcp__features__feature_get_next", + "mcp__features__feature_claim_next", # Atomic get+claim for parallel execution "mcp__features__feature_get_for_regression", "mcp__features__feature_mark_in_progress", "mcp__features__feature_mark_passing", "mcp__features__feature_skip", "mcp__features__feature_create_bulk", + "mcp__features__feature_create", + "mcp__features__feature_clear_in_progress", + # Dependency management + "mcp__features__feature_add_dependency", + "mcp__features__feature_remove_dependency", + "mcp__features__feature_set_dependencies", + # Parallel execution support + "mcp__features__feature_get_ready", + "mcp__features__feature_get_blocked", + "mcp__features__feature_get_graph", ] # Playwright MCP tools for browser automation @@ -107,7 +119,12 @@ BUILTIN_TOOLS = [ ] -def create_client(project_dir: Path, model: str, yolo_mode: bool = False): +def create_client( + project_dir: Path, + model: str, + yolo_mode: bool = False, + agent_id: str | None = None, +): """ Create a Claude Agent SDK client with multi-layered security. @@ -115,6 +132,8 @@ def create_client(project_dir: Path, model: str, yolo_mode: bool = False): project_dir: Directory for the project model: Claude model to use yolo_mode: If True, skip Playwright MCP server for rapid prototyping + agent_id: Optional unique identifier for browser isolation in parallel mode. + When provided, each agent gets its own browser profile. Returns: Configured ClaudeSDKClient (from claude_agent_sdk) @@ -211,6 +230,16 @@ def create_client(project_dir: Path, model: str, yolo_mode: bool = False): playwright_args = ["@playwright/mcp@latest", "--viewport-size", "1280x720"] if get_playwright_headless(): playwright_args.append("--headless") + + # Browser isolation for parallel execution + # Each agent gets its own isolated browser context to prevent tab conflicts + if agent_id: + # Use --isolated for ephemeral browser context + # This creates a fresh, isolated context without persistent state + # Note: --isolated and --user-data-dir are mutually exclusive + playwright_args.append("--isolated") + print(f" - Browser isolation enabled for agent: {agent_id}") + mcp_servers["playwright"] = { "command": "npx", "args": playwright_args, diff --git a/mcp_server/feature_mcp.py b/mcp_server/feature_mcp.py index 2af499f..f640fc5 100755 --- a/mcp_server/feature_mcp.py +++ b/mcp_server/feature_mcp.py @@ -22,12 +22,14 @@ import json import os import sys import threading +import time as _time from contextlib import asynccontextmanager from pathlib import Path from typing import Annotated from mcp.server.fastmcp import FastMCP from pydantic import BaseModel, Field +from sqlalchemy import text from sqlalchemy.sql.expression import func # Add parent directory to path so we can import from api module @@ -35,6 +37,12 @@ sys.path.insert(0, str(Path(__file__).parent.parent)) from api.database import Feature, create_database from api.migration import migrate_json_to_sqlite +from api.dependency_resolver import ( + would_create_circular_dependency, + are_dependencies_satisfied, + get_blocking_dependencies, + MAX_DEPENDENCIES_PER_FEATURE, +) # Configuration from environment PROJECT_DIR = Path(os.environ.get("PROJECT_DIR", ".")).resolve() @@ -148,32 +156,192 @@ def feature_get_stats() -> str: @mcp.tool() def feature_get_next() -> str: - """Get the highest-priority pending feature to work on. + """Get the highest-priority pending feature that has all dependencies satisfied. - Returns the feature with the lowest priority number that has passes=false. - Use this at the start of each coding session to determine what to implement next. + Returns the feature with the lowest priority number that: + 1. Has passes=false and in_progress=false + 2. Has all dependency features already passing (or no dependencies) + 3. All dependency IDs actually exist (orphaned dependencies are ignored) + + For backwards compatibility: if all pending features are blocked by dependencies, + falls back to returning the first pending feature (same as before dependencies). Returns: - JSON with feature details (id, priority, category, name, description, steps, passes, in_progress) - or error message if all features are passing. + JSON with feature details (id, priority, category, name, description, steps, passes, + in_progress, dependencies) or error message if all features are passing. """ session = get_session() try: - feature = ( - session.query(Feature) - .filter(Feature.passes == False) - .order_by(Feature.priority.asc(), Feature.id.asc()) - .first() - ) + all_features = session.query(Feature).all() + all_feature_ids = {f.id for f in all_features} + passing_ids = {f.id for f in all_features if f.passes} - if feature is None: + # Get pending, non-in-progress features + pending = [f for f in all_features if not f.passes and not f.in_progress] + pending.sort(key=lambda f: (f.priority, f.id)) + + if not pending: + if any(f.in_progress for f in all_features if not f.passes): + return json.dumps({"error": "All pending features are in progress by other agents"}) return json.dumps({"error": "All features are passing! No more work to do."}) - return json.dumps(feature.to_dict(), indent=2) + # Find first feature with satisfied dependencies + for feature in pending: + deps = feature.dependencies or [] + # Filter out orphaned dependencies (IDs that no longer exist) + valid_deps = [d for d in deps if d in all_feature_ids] + if all(dep_id in passing_ids for dep_id in valid_deps): + return json.dumps(feature.to_dict(), indent=2) + + # All pending features are blocked by unmet dependencies + # Return error with details about what's blocking progress + blocking_info = [] + for feature in pending[:3]: # Show first 3 blocked features + deps = feature.dependencies or [] + valid_deps = [d for d in deps if d in all_feature_ids] + orphaned = [d for d in deps if d not in all_feature_ids] + unmet = [d for d in valid_deps if d not in passing_ids] + info = f"#{feature.id} '{feature.name}'" + if unmet: + info += f" blocked by: {unmet}" + if orphaned: + info += f" (orphaned deps ignored: {orphaned})" + blocking_info.append(info) + + return json.dumps({ + "error": "All pending features are blocked by unmet dependencies", + "blocked_features": len(pending), + "examples": blocking_info, + "hint": "Complete the blocking dependencies first, or remove invalid dependencies" + }, indent=2) finally: session.close() +# Maximum retry attempts for feature claiming under contention +MAX_CLAIM_RETRIES = 10 + + +def _feature_claim_next_internal(attempt: int = 0) -> str: + """Internal implementation of feature claiming with retry tracking. + + Args: + attempt: Current retry attempt (0-indexed) + + Returns: + JSON with claimed feature details, or error message if no feature available. + """ + if attempt >= MAX_CLAIM_RETRIES: + return json.dumps({ + "error": "Failed to claim feature after maximum retries", + "hint": "High contention detected - try again or reduce parallel agents" + }) + + session = get_session() + try: + # Use a lock to prevent concurrent claims within this process + with _priority_lock: + all_features = session.query(Feature).all() + all_feature_ids = {f.id for f in all_features} + passing_ids = {f.id for f in all_features if f.passes} + + # Get pending, non-in-progress features + pending = [f for f in all_features if not f.passes and not f.in_progress] + pending.sort(key=lambda f: (f.priority, f.id)) + + if not pending: + if any(f.in_progress for f in all_features if not f.passes): + return json.dumps({"error": "All pending features are in progress by other agents"}) + return json.dumps({"error": "All features are passing! No more work to do."}) + + # Find first feature with satisfied dependencies + candidate_id = None + for feature in pending: + deps = feature.dependencies or [] + # Filter out orphaned dependencies (IDs that no longer exist) + valid_deps = [d for d in deps if d in all_feature_ids] + if all(dep_id in passing_ids for dep_id in valid_deps): + candidate_id = feature.id + break + + if candidate_id is None: + # All pending features are blocked by unmet dependencies + blocking_info = [] + for feature in pending[:3]: + deps = feature.dependencies or [] + valid_deps = [d for d in deps if d in all_feature_ids] + orphaned = [d for d in deps if d not in all_feature_ids] + unmet = [d for d in valid_deps if d not in passing_ids] + info = f"#{feature.id} '{feature.name}'" + if unmet: + info += f" blocked by: {unmet}" + if orphaned: + info += f" (orphaned deps ignored: {orphaned})" + blocking_info.append(info) + + return json.dumps({ + "error": "All pending features are blocked by unmet dependencies", + "blocked_features": len(pending), + "examples": blocking_info, + "hint": "Complete the blocking dependencies first, or remove invalid dependencies" + }, indent=2) + + # Atomic claim: UPDATE only if still claimable + # This prevents race conditions even across processes + result = session.execute( + text(""" + UPDATE features + SET in_progress = 1 + WHERE id = :feature_id + AND in_progress = 0 + AND passes = 0 + """), + {"feature_id": candidate_id} + ) + session.commit() + + # Check if we actually claimed it + if result.rowcount == 0: + # Another process claimed it first - retry with backoff + session.close() + # Exponential backoff: 0.1s, 0.2s, 0.4s, ... up to 1.0s + backoff = min(0.1 * (2 ** attempt), 1.0) + _time.sleep(backoff) + return _feature_claim_next_internal(attempt + 1) + + # Fetch the claimed feature + session.expire_all() # Clear cache to get fresh data + claimed_feature = session.query(Feature).filter(Feature.id == candidate_id).first() + return json.dumps(claimed_feature.to_dict(), indent=2) + + except Exception as e: + session.rollback() + return json.dumps({"error": f"Failed to claim feature: {str(e)}"}) + finally: + session.close() + + +@mcp.tool() +def feature_claim_next() -> str: + """Atomically get and claim the next available feature. + + This combines feature_get_next() and feature_mark_in_progress() in a single + atomic operation, preventing race conditions where two agents could claim + the same feature. + + Returns the feature with the lowest priority number that: + 1. Has passes=false and in_progress=false + 2. Has all dependency features already passing (or no dependencies) + 3. All dependency IDs actually exist (orphaned dependencies are ignored) + + On success, the feature's in_progress flag is set to True. + + Returns: + JSON with claimed feature details, or error message if no feature available. + """ + return _feature_claim_next_internal(attempt=0) + + @mcp.tool() def feature_get_for_regression( limit: Annotated[int, Field(default=3, ge=1, le=10, description="Maximum number of passing features to return")] = 3 @@ -382,9 +550,13 @@ def feature_create_bulk( - name (str): Feature name - description (str): Detailed description - steps (list[str]): Implementation/test steps + - depends_on_indices (list[int], optional): Array indices (0-based) of + features in THIS batch that this feature depends on. Use this instead + of 'dependencies' since IDs aren't known until after creation. + Example: [0, 2] means this feature depends on features at index 0 and 2. Returns: - JSON with: created (int) - number of features created + JSON with: created (int) - number of features created, with_dependencies (int) """ session = get_session() try: @@ -394,7 +566,7 @@ def feature_create_bulk( max_priority_result = session.query(Feature.priority).order_by(Feature.priority.desc()).first() start_priority = (max_priority_result[0] + 1) if max_priority_result else 1 - created_count = 0 + # First pass: validate all features and their index-based dependencies for i, feature_data in enumerate(features): # Validate required fields if not all(key in feature_data for key in ["category", "name", "description", "steps"]): @@ -402,6 +574,33 @@ def feature_create_bulk( "error": f"Feature at index {i} missing required fields (category, name, description, steps)" }) + # Validate depends_on_indices + indices = feature_data.get("depends_on_indices", []) + if indices: + # Check max dependencies + if len(indices) > MAX_DEPENDENCIES_PER_FEATURE: + return json.dumps({ + "error": f"Feature at index {i} has {len(indices)} dependencies, max is {MAX_DEPENDENCIES_PER_FEATURE}" + }) + # Check for duplicates + if len(indices) != len(set(indices)): + return json.dumps({ + "error": f"Feature at index {i} has duplicate dependencies" + }) + # Check for forward references (can only depend on earlier features) + for idx in indices: + if not isinstance(idx, int) or idx < 0: + return json.dumps({ + "error": f"Feature at index {i} has invalid dependency index: {idx}" + }) + if idx >= i: + return json.dumps({ + "error": f"Feature at index {i} cannot depend on feature at index {idx} (forward reference not allowed)" + }) + + # Second pass: create all features + created_features: list[Feature] = [] + for i, feature_data in enumerate(features): db_feature = Feature( priority=start_priority + i, category=feature_data["category"], @@ -412,11 +611,27 @@ def feature_create_bulk( in_progress=False, ) session.add(db_feature) - created_count += 1 + created_features.append(db_feature) + + # Flush to get IDs assigned + session.flush() + + # Third pass: resolve index-based dependencies to actual IDs + deps_count = 0 + for i, feature_data in enumerate(features): + indices = feature_data.get("depends_on_indices", []) + if indices: + # Convert indices to actual feature IDs + dep_ids = [created_features[idx].id for idx in indices] + created_features[i].dependencies = sorted(dep_ids) + deps_count += 1 session.commit() - return json.dumps({"created": created_count}, indent=2) + return json.dumps({ + "created": len(created_features), + "with_dependencies": deps_count + }, indent=2) except Exception as e: session.rollback() return json.dumps({"error": str(e)}) @@ -479,5 +694,298 @@ def feature_create( session.close() +@mcp.tool() +def feature_add_dependency( + feature_id: Annotated[int, Field(ge=1, description="Feature to add dependency to")], + dependency_id: Annotated[int, Field(ge=1, description="ID of the dependency feature")] +) -> str: + """Add a dependency relationship between features. + + The dependency_id feature must be completed before feature_id can be started. + Validates: self-reference, existence, circular dependencies, max limit. + + Args: + feature_id: The ID of the feature that will depend on another feature + dependency_id: The ID of the feature that must be completed first + + Returns: + JSON with success status and updated dependencies list, or error message + """ + session = get_session() + try: + # Security: Self-reference check + if feature_id == dependency_id: + return json.dumps({"error": "A feature cannot depend on itself"}) + + feature = session.query(Feature).filter(Feature.id == feature_id).first() + dependency = session.query(Feature).filter(Feature.id == dependency_id).first() + + if not feature: + return json.dumps({"error": f"Feature {feature_id} not found"}) + if not dependency: + return json.dumps({"error": f"Dependency feature {dependency_id} not found"}) + + current_deps = feature.dependencies or [] + + # Security: Max dependencies limit + if len(current_deps) >= MAX_DEPENDENCIES_PER_FEATURE: + return json.dumps({"error": f"Maximum {MAX_DEPENDENCIES_PER_FEATURE} dependencies allowed per feature"}) + + # Check if already exists + if dependency_id in current_deps: + return json.dumps({"error": "Dependency already exists"}) + + # Security: Circular dependency check + # would_create_circular_dependency(features, source_id, target_id) + # source_id = feature gaining the dependency, target_id = feature being depended upon + all_features = [f.to_dict() for f in session.query(Feature).all()] + if would_create_circular_dependency(all_features, feature_id, dependency_id): + return json.dumps({"error": "Cannot add: would create circular dependency"}) + + # Add dependency + current_deps.append(dependency_id) + feature.dependencies = sorted(current_deps) + session.commit() + + return json.dumps({ + "success": True, + "feature_id": feature_id, + "dependencies": feature.dependencies + }) + finally: + session.close() + + +@mcp.tool() +def feature_remove_dependency( + feature_id: Annotated[int, Field(ge=1, description="Feature to remove dependency from")], + dependency_id: Annotated[int, Field(ge=1, description="ID of dependency to remove")] +) -> str: + """Remove a dependency from a feature. + + Args: + feature_id: The ID of the feature to remove a dependency from + dependency_id: The ID of the dependency to remove + + Returns: + JSON with success status and updated dependencies list, or error message + """ + session = get_session() + try: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if not feature: + return json.dumps({"error": f"Feature {feature_id} not found"}) + + current_deps = feature.dependencies or [] + if dependency_id not in current_deps: + return json.dumps({"error": "Dependency does not exist"}) + + current_deps.remove(dependency_id) + feature.dependencies = current_deps if current_deps else None + session.commit() + + return json.dumps({ + "success": True, + "feature_id": feature_id, + "dependencies": feature.dependencies or [] + }) + finally: + session.close() + + +@mcp.tool() +def feature_get_ready( + limit: Annotated[int, Field(default=10, ge=1, le=50, description="Max features to return")] = 10 +) -> str: + """Get all features ready to start (dependencies satisfied, not in progress). + + Useful for parallel execution - returns multiple features that can run simultaneously. + A feature is ready if it is not passing, not in progress, and all dependencies are passing. + + Args: + limit: Maximum number of features to return (1-50, default 10) + + Returns: + JSON with: features (list), count (int), total_ready (int) + """ + session = get_session() + try: + all_features = session.query(Feature).all() + passing_ids = {f.id for f in all_features if f.passes} + + ready = [] + for f in all_features: + if f.passes or f.in_progress: + continue + deps = f.dependencies or [] + if all(dep_id in passing_ids for dep_id in deps): + ready.append(f.to_dict()) + + # Sort by priority + ready.sort(key=lambda f: (f["priority"], f["id"])) + + return json.dumps({ + "features": ready[:limit], + "count": len(ready[:limit]), + "total_ready": len(ready) + }, indent=2) + finally: + session.close() + + +@mcp.tool() +def feature_get_blocked() -> str: + """Get all features that are blocked by unmet dependencies. + + Returns features that have dependencies which are not yet passing. + Each feature includes a 'blocked_by' field listing the blocking feature IDs. + + Returns: + JSON with: features (list with blocked_by field), count (int) + """ + session = get_session() + try: + all_features = session.query(Feature).all() + passing_ids = {f.id for f in all_features if f.passes} + + blocked = [] + for f in all_features: + if f.passes: + continue + deps = f.dependencies or [] + blocking = [d for d in deps if d not in passing_ids] + if blocking: + blocked.append({ + **f.to_dict(), + "blocked_by": blocking + }) + + return json.dumps({ + "features": blocked, + "count": len(blocked) + }, indent=2) + finally: + session.close() + + +@mcp.tool() +def feature_get_graph() -> str: + """Get dependency graph data for visualization. + + Returns nodes (features) and edges (dependencies) for rendering a graph. + Each node includes status: 'pending', 'in_progress', 'done', or 'blocked'. + + Returns: + JSON with: nodes (list), edges (list of {source, target}) + """ + session = get_session() + try: + all_features = session.query(Feature).all() + passing_ids = {f.id for f in all_features if f.passes} + + nodes = [] + edges = [] + + for f in all_features: + deps = f.dependencies or [] + blocking = [d for d in deps if d not in passing_ids] + + if f.passes: + status = "done" + elif blocking: + status = "blocked" + elif f.in_progress: + status = "in_progress" + else: + status = "pending" + + nodes.append({ + "id": f.id, + "name": f.name, + "category": f.category, + "status": status, + "priority": f.priority, + "dependencies": deps + }) + + for dep_id in deps: + edges.append({"source": dep_id, "target": f.id}) + + return json.dumps({ + "nodes": nodes, + "edges": edges + }, indent=2) + finally: + session.close() + + +@mcp.tool() +def feature_set_dependencies( + feature_id: Annotated[int, Field(ge=1, description="Feature to set dependencies for")], + dependency_ids: Annotated[list[int], Field(description="List of dependency feature IDs")] +) -> str: + """Set all dependencies for a feature at once, replacing any existing dependencies. + + Validates: self-reference, existence of all dependencies, circular dependencies, max limit. + + Args: + feature_id: The ID of the feature to set dependencies for + dependency_ids: List of feature IDs that must be completed first + + Returns: + JSON with success status and updated dependencies list, or error message + """ + session = get_session() + try: + # Security: Self-reference check + if feature_id in dependency_ids: + return json.dumps({"error": "A feature cannot depend on itself"}) + + # Security: Max dependencies limit + if len(dependency_ids) > MAX_DEPENDENCIES_PER_FEATURE: + return json.dumps({"error": f"Maximum {MAX_DEPENDENCIES_PER_FEATURE} dependencies allowed"}) + + # Check for duplicates + if len(dependency_ids) != len(set(dependency_ids)): + return json.dumps({"error": "Duplicate dependencies not allowed"}) + + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if not feature: + return json.dumps({"error": f"Feature {feature_id} not found"}) + + # Validate all dependencies exist + all_feature_ids = {f.id for f in session.query(Feature).all()} + missing = [d for d in dependency_ids if d not in all_feature_ids] + if missing: + return json.dumps({"error": f"Dependencies not found: {missing}"}) + + # Check for circular dependencies + all_features = [f.to_dict() for f in session.query(Feature).all()] + # Temporarily update the feature's dependencies for cycle check + test_features = [] + for f in all_features: + if f["id"] == feature_id: + test_features.append({**f, "dependencies": dependency_ids}) + else: + test_features.append(f) + + for dep_id in dependency_ids: + # source_id = feature_id (gaining dep), target_id = dep_id (being depended upon) + if would_create_circular_dependency(test_features, feature_id, dep_id): + return json.dumps({"error": f"Cannot add dependency {dep_id}: would create circular dependency"}) + + # Set dependencies + feature.dependencies = sorted(dependency_ids) if dependency_ids else None + session.commit() + + return json.dumps({ + "success": True, + "feature_id": feature_id, + "dependencies": feature.dependencies or [] + }) + finally: + session.close() + + if __name__ == "__main__": mcp.run() diff --git a/parallel_orchestrator.py b/parallel_orchestrator.py new file mode 100644 index 0000000..35d03c4 --- /dev/null +++ b/parallel_orchestrator.py @@ -0,0 +1,504 @@ +""" +Parallel Orchestrator +===================== + +Coordinates parallel execution of independent features using multiple agent processes. +Uses dependency-aware scheduling to ensure features are only started when their +dependencies are satisfied. + +Usage: + python parallel_orchestrator.py --project-dir my-app --max-concurrency 3 +""" + +import asyncio +import os +import subprocess +import sys +import threading +import time +from pathlib import Path +from typing import Callable, Awaitable + +from api.database import Feature, create_database +from api.dependency_resolver import are_dependencies_satisfied + +# Root directory of autocoder (where this script and autonomous_agent_demo.py live) +AUTOCODER_ROOT = Path(__file__).parent.resolve() + +# Performance: Limit parallel agents to prevent memory exhaustion +MAX_PARALLEL_AGENTS = 5 +DEFAULT_CONCURRENCY = 3 +POLL_INTERVAL = 5 # seconds between checking for ready features +MAX_FEATURE_RETRIES = 3 # Maximum times to retry a failed feature + + +class ParallelOrchestrator: + """Orchestrates parallel execution of independent features.""" + + def __init__( + self, + project_dir: Path, + max_concurrency: int = DEFAULT_CONCURRENCY, + model: str = None, + yolo_mode: bool = False, + on_output: Callable[[int, str], None] = None, + on_status: Callable[[int, str], None] = None, + ): + """Initialize the orchestrator. + + Args: + project_dir: Path to the project directory + max_concurrency: Maximum number of concurrent agents (1-5) + model: Claude model to use (or None for default) + yolo_mode: Whether to run in YOLO mode (skip browser testing) + on_output: Callback for agent output (feature_id, line) + on_status: Callback for agent status changes (feature_id, status) + """ + self.project_dir = project_dir + self.max_concurrency = min(max(max_concurrency, 1), MAX_PARALLEL_AGENTS) + self.model = model + self.yolo_mode = yolo_mode + self.on_output = on_output + self.on_status = on_status + + # Thread-safe state + self._lock = threading.Lock() + self.running_agents: dict[int, subprocess.Popen] = {} + self.abort_events: dict[int, threading.Event] = {} + self.is_running = False + + # Track feature failures to prevent infinite retry loops + self._failure_counts: dict[int, int] = {} + + # Database session for this orchestrator + self._engine, self._session_maker = create_database(project_dir) + + def get_session(self): + """Get a new database session.""" + return self._session_maker() + + def get_resumable_features(self) -> list[dict]: + """Get features that were left in_progress from a previous session. + + These are features where in_progress=True but passes=False, and they're + not currently being worked on by this orchestrator. This handles the case + where a previous session was interrupted before completing the feature. + """ + session = self.get_session() + try: + # Find features that are in_progress but not complete + stale = session.query(Feature).filter( + Feature.in_progress == True, + Feature.passes == False + ).all() + + resumable = [] + for f in stale: + # Skip if already running in this orchestrator instance + with self._lock: + if f.id in self.running_agents: + continue + # Skip if feature has failed too many times + if self._failure_counts.get(f.id, 0) >= MAX_FEATURE_RETRIES: + continue + resumable.append(f.to_dict()) + + # Sort by priority (highest priority first) + resumable.sort(key=lambda f: (f["priority"], f["id"])) + return resumable + finally: + session.close() + + def get_ready_features(self) -> list[dict]: + """Get features with satisfied dependencies, not already running.""" + session = self.get_session() + try: + all_features = session.query(Feature).all() + all_dicts = [f.to_dict() for f in all_features] + + ready = [] + for f in all_features: + if f.passes or f.in_progress: + continue + # Skip if already running in this orchestrator + with self._lock: + if f.id in self.running_agents: + continue + # Skip if feature has failed too many times + if self._failure_counts.get(f.id, 0) >= MAX_FEATURE_RETRIES: + continue + # Check dependencies + if are_dependencies_satisfied(f.to_dict(), all_dicts): + ready.append(f.to_dict()) + + # Sort by priority + ready.sort(key=lambda f: (f["priority"], f["id"])) + return ready + finally: + session.close() + + def get_all_complete(self) -> bool: + """Check if all features are complete.""" + session = self.get_session() + try: + pending = session.query(Feature).filter(Feature.passes == False).count() + return pending == 0 + finally: + session.close() + + def start_feature(self, feature_id: int, resume: bool = False) -> tuple[bool, str]: + """Start a single feature agent. + + Args: + feature_id: ID of the feature to start + resume: If True, resume a feature that's already in_progress from a previous session + + Returns: + Tuple of (success, message) + """ + with self._lock: + if feature_id in self.running_agents: + return False, "Feature already running" + if len(self.running_agents) >= self.max_concurrency: + return False, "At max concurrency" + + # Mark as in_progress in database (or verify it's resumable) + session = self.get_session() + try: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if not feature: + return False, "Feature not found" + if feature.passes: + return False, "Feature already complete" + + if resume: + # Resuming: feature should already be in_progress + if not feature.in_progress: + return False, "Feature not in progress, cannot resume" + else: + # Starting fresh: feature should not be in_progress + if feature.in_progress: + return False, "Feature already in progress" + feature.in_progress = True + session.commit() + finally: + session.close() + + # Create abort event + abort_event = threading.Event() + + # Start subprocess for this feature + cmd = [ + sys.executable, + "-u", # Force unbuffered stdout/stderr + str(AUTOCODER_ROOT / "autonomous_agent_demo.py"), + "--project-dir", str(self.project_dir), + "--max-iterations", "1", # Single feature mode + "--feature-id", str(feature_id), # Work on this specific feature only + ] + if self.model: + cmd.extend(["--model", self.model]) + if self.yolo_mode: + cmd.append("--yolo") + + try: + proc = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + cwd=str(AUTOCODER_ROOT), # Run from autocoder root for proper imports + env={**os.environ, "PYTHONUNBUFFERED": "1"}, + ) + except Exception as e: + # Reset in_progress on failure + session = self.get_session() + try: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if feature: + feature.in_progress = False + session.commit() + finally: + session.close() + return False, f"Failed to start agent: {e}" + + with self._lock: + self.running_agents[feature_id] = proc + self.abort_events[feature_id] = abort_event + + # Start output reader thread + threading.Thread( + target=self._read_output, + args=(feature_id, proc, abort_event), + daemon=True + ).start() + + if self.on_status: + self.on_status(feature_id, "running") + + print(f"Started agent for feature #{feature_id}", flush=True) + return True, f"Started feature {feature_id}" + + def _read_output(self, feature_id: int, proc: subprocess.Popen, abort: threading.Event): + """Read output from subprocess and emit events.""" + try: + for line in proc.stdout: + if abort.is_set(): + break + line = line.rstrip() + if self.on_output: + self.on_output(feature_id, line) + else: + print(f"[Feature #{feature_id}] {line}", flush=True) + proc.wait() + finally: + self._on_feature_complete(feature_id, proc.returncode) + + def _on_feature_complete(self, feature_id: int, return_code: int): + """Handle feature completion. + + ALWAYS clears in_progress when agent exits, regardless of success/failure. + This prevents features from getting stuck if an agent crashes or is killed. + The agent marks features as passing BEFORE clearing in_progress, so this + is safe - we won't accidentally clear a feature that's being worked on. + """ + with self._lock: + self.running_agents.pop(feature_id, None) + self.abort_events.pop(feature_id, None) + + # ALWAYS clear in_progress when agent exits to prevent stuck features + # The agent marks features as passing before clearing in_progress, + # so if in_progress is still True here, the feature didn't complete successfully + session = self.get_session() + try: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if feature and feature.in_progress and not feature.passes: + feature.in_progress = False + session.commit() + finally: + session.close() + + # Track failures to prevent infinite retry loops + if return_code != 0: + with self._lock: + self._failure_counts[feature_id] = self._failure_counts.get(feature_id, 0) + 1 + failure_count = self._failure_counts[feature_id] + if failure_count >= MAX_FEATURE_RETRIES: + print(f"Feature #{feature_id} has failed {failure_count} times, will not retry", flush=True) + + status = "completed" if return_code == 0 else "failed" + if self.on_status: + self.on_status(feature_id, status) + print(f"Feature #{feature_id} {status}", flush=True) + + def stop_feature(self, feature_id: int) -> tuple[bool, str]: + """Stop a running feature agent.""" + with self._lock: + if feature_id not in self.running_agents: + return False, "Feature not running" + + abort = self.abort_events.get(feature_id) + proc = self.running_agents.get(feature_id) + + if abort: + abort.set() + if proc: + proc.terminate() + try: + proc.wait(timeout=5) + except subprocess.TimeoutExpired: + proc.kill() + + return True, f"Stopped feature {feature_id}" + + def stop_all(self) -> None: + """Stop all running feature agents.""" + self.is_running = False + with self._lock: + feature_ids = list(self.running_agents.keys()) + + for fid in feature_ids: + self.stop_feature(fid) + + async def run_loop(self): + """Main orchestration loop.""" + self.is_running = True + + print(f"Starting parallel orchestrator with max_concurrency={self.max_concurrency}", flush=True) + print(f"Project: {self.project_dir}", flush=True) + print(flush=True) + + # Check for features to resume from previous session + resumable = self.get_resumable_features() + if resumable: + print(f"Found {len(resumable)} feature(s) to resume from previous session:", flush=True) + for f in resumable: + print(f" - Feature #{f['id']}: {f['name']}", flush=True) + print(flush=True) + + while self.is_running: + try: + # Check if all complete + if self.get_all_complete(): + print("\nAll features complete!", flush=True) + break + + # Check capacity + with self._lock: + current = len(self.running_agents) + if current >= self.max_concurrency: + await asyncio.sleep(POLL_INTERVAL) + continue + + # Priority 1: Resume features from previous session + resumable = self.get_resumable_features() + if resumable: + slots = self.max_concurrency - current + for feature in resumable[:slots]: + print(f"Resuming feature #{feature['id']}: {feature['name']}", flush=True) + self.start_feature(feature["id"], resume=True) + await asyncio.sleep(2) + continue + + # Priority 2: Start new ready features + ready = self.get_ready_features() + if not ready: + # Wait for running features to complete + if current > 0: + await asyncio.sleep(POLL_INTERVAL) + continue + else: + # No ready features and nothing running - might be blocked + print("No ready features available. All remaining features may be blocked by dependencies.", flush=True) + await asyncio.sleep(POLL_INTERVAL * 2) + continue + + # Start features up to capacity + slots = self.max_concurrency - current + for feature in ready[:slots]: + print(f"Starting feature #{feature['id']}: {feature['name']}", flush=True) + self.start_feature(feature["id"]) + + await asyncio.sleep(2) # Brief pause between starts + + except Exception as e: + print(f"Orchestrator error: {e}", flush=True) + await asyncio.sleep(POLL_INTERVAL) + + # Wait for remaining agents to complete + print("Waiting for running agents to complete...", flush=True) + while True: + with self._lock: + if not self.running_agents: + break + await asyncio.sleep(1) + + print("Orchestrator finished.", flush=True) + + def get_status(self) -> dict: + """Get current orchestrator status.""" + with self._lock: + return { + "running_features": list(self.running_agents.keys()), + "count": len(self.running_agents), + "max_concurrency": self.max_concurrency, + "is_running": self.is_running, + } + + +async def run_parallel_orchestrator( + project_dir: Path, + max_concurrency: int = DEFAULT_CONCURRENCY, + model: str = None, + yolo_mode: bool = False, +) -> None: + """Run the parallel orchestrator. + + Args: + project_dir: Path to the project directory + max_concurrency: Maximum number of concurrent agents + model: Claude model to use + yolo_mode: Whether to run in YOLO mode + """ + orchestrator = ParallelOrchestrator( + project_dir=project_dir, + max_concurrency=max_concurrency, + model=model, + yolo_mode=yolo_mode, + ) + + try: + await orchestrator.run_loop() + except KeyboardInterrupt: + print("\n\nInterrupted by user. Stopping agents...", flush=True) + orchestrator.stop_all() + + +def main(): + """Main entry point for parallel orchestration.""" + import argparse + from dotenv import load_dotenv + from registry import DEFAULT_MODEL, get_project_path + + load_dotenv() + + parser = argparse.ArgumentParser( + description="Parallel Feature Orchestrator - Run multiple agent instances", + ) + parser.add_argument( + "--project-dir", + type=str, + required=True, + help="Project directory path (absolute) or registered project name", + ) + parser.add_argument( + "--max-concurrency", + "-p", + type=int, + default=DEFAULT_CONCURRENCY, + help=f"Maximum concurrent agents (1-{MAX_PARALLEL_AGENTS}, default: {DEFAULT_CONCURRENCY})", + ) + parser.add_argument( + "--model", + type=str, + default=DEFAULT_MODEL, + help=f"Claude model to use (default: {DEFAULT_MODEL})", + ) + parser.add_argument( + "--yolo", + action="store_true", + default=False, + help="Enable YOLO mode: rapid prototyping without browser testing", + ) + + args = parser.parse_args() + + # Resolve project directory + project_dir_input = args.project_dir + project_dir = Path(project_dir_input) + + if project_dir.is_absolute(): + if not project_dir.exists(): + print(f"Error: Project directory does not exist: {project_dir}", flush=True) + sys.exit(1) + else: + registered_path = get_project_path(project_dir_input) + if registered_path: + project_dir = registered_path + else: + print(f"Error: Project '{project_dir_input}' not found in registry", flush=True) + sys.exit(1) + + try: + asyncio.run(run_parallel_orchestrator( + project_dir=project_dir, + max_concurrency=args.max_concurrency, + model=args.model, + yolo_mode=args.yolo, + )) + except KeyboardInterrupt: + print("\n\nInterrupted by user", flush=True) + + +if __name__ == "__main__": + main() diff --git a/prompts.py b/prompts.py index 0fc403b..2c0dcfc 100644 --- a/prompts.py +++ b/prompts.py @@ -79,6 +79,56 @@ def get_coding_prompt_yolo(project_dir: Path | None = None) -> str: return load_prompt("coding_prompt_yolo", project_dir) +def get_single_feature_prompt(feature_id: int, project_dir: Path | None = None, yolo_mode: bool = False) -> str: + """ + Load the coding prompt with single-feature focus instructions prepended. + + When the parallel orchestrator assigns a specific feature to an agent, + this prompt ensures the agent works ONLY on that feature. + + Args: + feature_id: The specific feature ID to work on + project_dir: Optional project directory for project-specific prompts + yolo_mode: If True, use the YOLO prompt variant + + Returns: + The prompt with single-feature instructions prepended + """ + # Get the base prompt + if yolo_mode: + base_prompt = get_coding_prompt_yolo(project_dir) + else: + base_prompt = get_coding_prompt(project_dir) + + # Prepend single-feature instructions + single_feature_header = f"""## SINGLE FEATURE MODE + +**CRITICAL: You are assigned to work on Feature #{feature_id} ONLY.** + +This session is part of a parallel execution where multiple agents work on different features simultaneously. You MUST: + +1. **Skip the `feature_get_next` step** - Your feature is already assigned: #{feature_id} +2. **Immediately mark feature #{feature_id} as in-progress** using `feature_mark_in_progress` +3. **Focus ONLY on implementing and testing feature #{feature_id}** +4. **Do NOT work on any other features** - other agents are handling them + +When you complete feature #{feature_id}: +- Mark it as passing with `feature_mark_passing` +- Commit your changes +- End the session + +If you cannot complete feature #{feature_id} due to a blocker: +- Use `feature_skip` to move it to the end of the queue +- Document the blocker in claude-progress.txt +- End the session + +--- + +""" + + return single_feature_header + base_prompt + + def get_app_spec(project_dir: Path) -> str: """ Load the app spec from the project. diff --git a/server/routers/agent.py b/server/routers/agent.py index 309ab1c..a6d121b 100644 --- a/server/routers/agent.py +++ b/server/routers/agent.py @@ -85,6 +85,8 @@ async def get_agent_status(project_name: str): started_at=manager.started_at, yolo_mode=manager.yolo_mode, model=manager.model, + parallel_mode=manager.parallel_mode, + max_concurrency=manager.max_concurrency, ) @@ -100,8 +102,15 @@ async def start_agent( default_yolo, default_model = _get_settings_defaults() yolo_mode = request.yolo_mode if request.yolo_mode is not None else default_yolo model = request.model if request.model else default_model + parallel_mode = request.parallel_mode or False + max_concurrency = request.max_concurrency - success, message = await manager.start(yolo_mode=yolo_mode, model=model) + success, message = await manager.start( + yolo_mode=yolo_mode, + model=model, + parallel_mode=parallel_mode, + max_concurrency=max_concurrency, + ) return AgentActionResponse( success=success, diff --git a/server/routers/features.py b/server/routers/features.py index 755b9fa..d6c3913 100644 --- a/server/routers/features.py +++ b/server/routers/features.py @@ -12,6 +12,9 @@ from pathlib import Path from fastapi import APIRouter, HTTPException from ..schemas import ( + DependencyGraphNode, + DependencyGraphResponse, + DependencyUpdate, FeatureBulkCreate, FeatureBulkCreateResponse, FeatureCreate, @@ -72,11 +75,27 @@ def get_db_session(project_dir: Path): session.close() -def feature_to_response(f) -> FeatureResponse: +def feature_to_response(f, passing_ids: set[int] | None = None) -> FeatureResponse: """Convert a Feature model to a FeatureResponse. Handles legacy NULL values in boolean fields by treating them as False. + Computes blocked status if passing_ids is provided. + + Args: + f: Feature model instance + passing_ids: Optional set of feature IDs that are passing (for computing blocked status) + + Returns: + FeatureResponse with computed blocked status """ + deps = f.dependencies or [] + if passing_ids is None: + blocking = [] + blocked = False + else: + blocking = [d for d in deps if d not in passing_ids] + blocked = len(blocking) > 0 + return FeatureResponse( id=f.id, priority=f.priority, @@ -84,9 +103,12 @@ def feature_to_response(f) -> FeatureResponse: name=f.name, description=f.description, steps=f.steps if isinstance(f.steps, list) else [], + dependencies=deps, # Handle legacy NULL values gracefully - treat as False passes=f.passes if f.passes is not None else False, in_progress=f.in_progress if f.in_progress is not None else False, + blocked=blocked, + blocking_dependencies=blocking, ) @@ -119,12 +141,15 @@ async def list_features(project_name: str): with get_db_session(project_dir) as session: all_features = session.query(Feature).order_by(Feature.priority).all() + # Compute passing IDs for blocked status calculation + passing_ids = {f.id for f in all_features if f.passes} + pending = [] in_progress = [] done = [] for f in all_features: - feature_response = feature_to_response(f) + feature_response = feature_to_response(f, passing_ids) if f.passes: done.append(feature_response) elif f.in_progress: @@ -174,6 +199,7 @@ async def create_feature(project_name: str, feature: FeatureCreate): name=feature.name, description=feature.description, steps=feature.steps, + dependencies=feature.dependencies if feature.dependencies else None, passes=False, in_progress=False, ) @@ -190,6 +216,167 @@ async def create_feature(project_name: str, feature: FeatureCreate): raise HTTPException(status_code=500, detail="Failed to create feature") +# ============================================================================ +# Static path endpoints - MUST be declared before /{feature_id} routes +# ============================================================================ + + +@router.post("/bulk", response_model=FeatureBulkCreateResponse) +async def create_features_bulk(project_name: str, bulk: FeatureBulkCreate): + """ + Create multiple features at once. + + Features are assigned sequential priorities starting from: + - starting_priority if specified (must be >= 1) + - max(existing priorities) + 1 if not specified + + This is useful for: + - Expanding a project with new features via AI + - Importing features from external sources + - Batch operations + + Returns: + {"created": N, "features": [...]} + """ + project_name = validate_project_name(project_name) + project_dir = _get_project_path(project_name) + + if not project_dir: + raise HTTPException(status_code=404, detail=f"Project '{project_name}' not found in registry") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + if not bulk.features: + return FeatureBulkCreateResponse(created=0, features=[]) + + # Validate starting_priority if provided + if bulk.starting_priority is not None and bulk.starting_priority < 1: + raise HTTPException(status_code=400, detail="starting_priority must be >= 1") + + _, Feature = _get_db_classes() + + try: + with get_db_session(project_dir) as session: + # Determine starting priority with row-level lock to prevent race conditions + if bulk.starting_priority is not None: + current_priority = bulk.starting_priority + else: + # Lock the max priority row to prevent concurrent inserts from getting same priority + max_priority_feature = ( + session.query(Feature) + .order_by(Feature.priority.desc()) + .with_for_update() + .first() + ) + current_priority = (max_priority_feature.priority + 1) if max_priority_feature else 1 + + created_ids = [] + + for feature_data in bulk.features: + db_feature = Feature( + priority=current_priority, + category=feature_data.category, + name=feature_data.name, + description=feature_data.description, + steps=feature_data.steps, + dependencies=feature_data.dependencies if feature_data.dependencies else None, + passes=False, + in_progress=False, + ) + session.add(db_feature) + session.flush() # Flush to get the ID immediately + created_ids.append(db_feature.id) + current_priority += 1 + + session.commit() + + # Query created features by their IDs (avoids relying on priority range) + created_features = [] + for db_feature in session.query(Feature).filter( + Feature.id.in_(created_ids) + ).order_by(Feature.priority).all(): + created_features.append(feature_to_response(db_feature)) + + return FeatureBulkCreateResponse( + created=len(created_features), + features=created_features + ) + except HTTPException: + raise + except Exception: + logger.exception("Failed to bulk create features") + raise HTTPException(status_code=500, detail="Failed to bulk create features") + + +@router.get("/graph", response_model=DependencyGraphResponse) +async def get_dependency_graph(project_name: str): + """Return dependency graph data for visualization. + + Returns nodes (features) and edges (dependencies) suitable for + rendering with React Flow or similar graph libraries. + """ + project_name = validate_project_name(project_name) + project_dir = _get_project_path(project_name) + + if not project_dir: + raise HTTPException(status_code=404, detail=f"Project '{project_name}' not found in registry") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + db_file = project_dir / "features.db" + if not db_file.exists(): + return DependencyGraphResponse(nodes=[], edges=[]) + + _, Feature = _get_db_classes() + + try: + with get_db_session(project_dir) as session: + all_features = session.query(Feature).all() + passing_ids = {f.id for f in all_features if f.passes} + + nodes = [] + edges = [] + + for f in all_features: + deps = f.dependencies or [] + blocking = [d for d in deps if d not in passing_ids] + + if f.passes: + status = "done" + elif blocking: + status = "blocked" + elif f.in_progress: + status = "in_progress" + else: + status = "pending" + + nodes.append(DependencyGraphNode( + id=f.id, + name=f.name, + category=f.category, + status=status, + priority=f.priority, + dependencies=deps + )) + + for dep_id in deps: + edges.append({"source": dep_id, "target": f.id}) + + return DependencyGraphResponse(nodes=nodes, edges=edges) + except HTTPException: + raise + except Exception: + logger.exception("Failed to get dependency graph") + raise HTTPException(status_code=500, detail="Failed to get dependency graph") + + +# ============================================================================ +# Parameterized path endpoints - /{feature_id} routes +# ============================================================================ + + @router.get("/{feature_id}", response_model=FeatureResponse) async def get_feature(project_name: str, feature_id: int): """Get details of a specific feature.""" @@ -268,11 +455,17 @@ async def update_feature(project_name: str, feature_id: int, update: FeatureUpda feature.steps = update.steps if update.priority is not None: feature.priority = update.priority + if update.dependencies is not None: + feature.dependencies = update.dependencies if update.dependencies else None session.commit() session.refresh(feature) - return feature_to_response(feature) + # Compute passing IDs for response + all_features = session.query(Feature).all() + passing_ids = {f.id for f in all_features if f.passes} + + return feature_to_response(feature, passing_ids) except HTTPException: raise except Exception: @@ -282,7 +475,12 @@ async def update_feature(project_name: str, feature_id: int, update: FeatureUpda @router.delete("/{feature_id}") async def delete_feature(project_name: str, feature_id: int): - """Delete a feature.""" + """Delete a feature and clean up references in other features' dependencies. + + When a feature is deleted, any other features that depend on it will have + that dependency removed from their dependencies list. This prevents orphaned + dependencies that would permanently block features. + """ project_name = validate_project_name(project_name) project_dir = _get_project_path(project_name) @@ -301,10 +499,24 @@ async def delete_feature(project_name: str, feature_id: int): if not feature: raise HTTPException(status_code=404, detail=f"Feature {feature_id} not found") + # Clean up dependency references in other features + # This prevents orphaned dependencies that would block features forever + affected_features = [] + for f in session.query(Feature).all(): + if f.dependencies and feature_id in f.dependencies: + # Remove the deleted feature from this feature's dependencies + deps = [d for d in f.dependencies if d != feature_id] + f.dependencies = deps if deps else None + affected_features.append(f.id) + session.delete(feature) session.commit() - return {"success": True, "message": f"Feature {feature_id} deleted"} + message = f"Feature {feature_id} deleted" + if affected_features: + message += f". Removed from dependencies of features: {affected_features}" + + return {"success": True, "message": message, "affected_features": affected_features} except HTTPException: raise except Exception: @@ -352,22 +564,123 @@ async def skip_feature(project_name: str, feature_id: int): raise HTTPException(status_code=500, detail="Failed to skip feature") -@router.post("/bulk", response_model=FeatureBulkCreateResponse) -async def create_features_bulk(project_name: str, bulk: FeatureBulkCreate): +# ============================================================================ +# Dependency Management Endpoints +# ============================================================================ + + +def _get_dependency_resolver(): + """Lazy import of dependency resolver.""" + import sys + root = Path(__file__).parent.parent.parent + if str(root) not in sys.path: + sys.path.insert(0, str(root)) + from api.dependency_resolver import would_create_circular_dependency, MAX_DEPENDENCIES_PER_FEATURE + return would_create_circular_dependency, MAX_DEPENDENCIES_PER_FEATURE + + +@router.post("/{feature_id}/dependencies/{dep_id}") +async def add_dependency(project_name: str, feature_id: int, dep_id: int): + """Add a dependency relationship between features. + + The dep_id feature must be completed before feature_id can be started. + Validates: self-reference, existence, circular dependencies, max limit. """ - Create multiple features at once. + project_name = validate_project_name(project_name) - Features are assigned sequential priorities starting from: - - starting_priority if specified (must be >= 1) - - max(existing priorities) + 1 if not specified + # Security: Self-reference check + if feature_id == dep_id: + raise HTTPException(status_code=400, detail="A feature cannot depend on itself") - This is useful for: - - Expanding a project with new features via AI - - Importing features from external sources - - Batch operations + project_dir = _get_project_path(project_name) - Returns: - {"created": N, "features": [...]} + if not project_dir: + raise HTTPException(status_code=404, detail=f"Project '{project_name}' not found in registry") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + would_create_circular_dependency, MAX_DEPENDENCIES_PER_FEATURE = _get_dependency_resolver() + _, Feature = _get_db_classes() + + try: + with get_db_session(project_dir) as session: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + dependency = session.query(Feature).filter(Feature.id == dep_id).first() + + if not feature: + raise HTTPException(status_code=404, detail=f"Feature {feature_id} not found") + if not dependency: + raise HTTPException(status_code=404, detail=f"Dependency {dep_id} not found") + + current_deps = feature.dependencies or [] + + # Security: Limit check + if len(current_deps) >= MAX_DEPENDENCIES_PER_FEATURE: + raise HTTPException(status_code=400, detail=f"Maximum {MAX_DEPENDENCIES_PER_FEATURE} dependencies allowed") + + if dep_id in current_deps: + raise HTTPException(status_code=400, detail="Dependency already exists") + + # Security: Circular dependency check + # source_id = feature_id (gaining dep), target_id = dep_id (being depended upon) + all_features = [f.to_dict() for f in session.query(Feature).all()] + if would_create_circular_dependency(all_features, feature_id, dep_id): + raise HTTPException(status_code=400, detail="Would create circular dependency") + + current_deps.append(dep_id) + feature.dependencies = sorted(current_deps) + session.commit() + + return {"success": True, "feature_id": feature_id, "dependencies": feature.dependencies} + except HTTPException: + raise + except Exception: + logger.exception("Failed to add dependency") + raise HTTPException(status_code=500, detail="Failed to add dependency") + + +@router.delete("/{feature_id}/dependencies/{dep_id}") +async def remove_dependency(project_name: str, feature_id: int, dep_id: int): + """Remove a dependency from a feature.""" + project_name = validate_project_name(project_name) + project_dir = _get_project_path(project_name) + + if not project_dir: + raise HTTPException(status_code=404, detail=f"Project '{project_name}' not found in registry") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + _, Feature = _get_db_classes() + + try: + with get_db_session(project_dir) as session: + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if not feature: + raise HTTPException(status_code=404, detail=f"Feature {feature_id} not found") + + current_deps = feature.dependencies or [] + if dep_id not in current_deps: + raise HTTPException(status_code=400, detail="Dependency does not exist") + + current_deps.remove(dep_id) + feature.dependencies = current_deps if current_deps else None + session.commit() + + return {"success": True, "feature_id": feature_id, "dependencies": feature.dependencies or []} + except HTTPException: + raise + except Exception: + logger.exception("Failed to remove dependency") + raise HTTPException(status_code=500, detail="Failed to remove dependency") + + +@router.put("/{feature_id}/dependencies") +async def set_dependencies(project_name: str, feature_id: int, update: DependencyUpdate): + """Set all dependencies for a feature at once, replacing any existing. + + Validates: self-reference, existence of all dependencies, circular dependencies, max limit. """ project_name = validate_project_name(project_name) project_dir = _get_project_path(project_name) @@ -378,62 +691,56 @@ async def create_features_bulk(project_name: str, bulk: FeatureBulkCreate): if not project_dir.exists(): raise HTTPException(status_code=404, detail="Project directory not found") - if not bulk.features: - return FeatureBulkCreateResponse(created=0, features=[]) + dependency_ids = update.dependency_ids - # Validate starting_priority if provided - if bulk.starting_priority is not None and bulk.starting_priority < 1: - raise HTTPException(status_code=400, detail="starting_priority must be >= 1") + # Security: Self-reference check + if feature_id in dependency_ids: + raise HTTPException(status_code=400, detail="A feature cannot depend on itself") + # Check for duplicates + if len(dependency_ids) != len(set(dependency_ids)): + raise HTTPException(status_code=400, detail="Duplicate dependencies not allowed") + + would_create_circular_dependency, _ = _get_dependency_resolver() _, Feature = _get_db_classes() try: with get_db_session(project_dir) as session: - # Determine starting priority with row-level lock to prevent race conditions - if bulk.starting_priority is not None: - current_priority = bulk.starting_priority - else: - # Lock the max priority row to prevent concurrent inserts from getting same priority - max_priority_feature = ( - session.query(Feature) - .order_by(Feature.priority.desc()) - .with_for_update() - .first() - ) - current_priority = (max_priority_feature.priority + 1) if max_priority_feature else 1 + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if not feature: + raise HTTPException(status_code=404, detail=f"Feature {feature_id} not found") - created_ids = [] + # Validate all dependencies exist + all_feature_ids = {f.id for f in session.query(Feature).all()} + missing = [d for d in dependency_ids if d not in all_feature_ids] + if missing: + raise HTTPException(status_code=400, detail=f"Dependencies not found: {missing}") - for feature_data in bulk.features: - db_feature = Feature( - priority=current_priority, - category=feature_data.category, - name=feature_data.name, - description=feature_data.description, - steps=feature_data.steps, - passes=False, - in_progress=False, - ) - session.add(db_feature) - session.flush() # Flush to get the ID immediately - created_ids.append(db_feature.id) - current_priority += 1 + # Check for circular dependencies + all_features = [f.to_dict() for f in session.query(Feature).all()] + # Temporarily update the feature's dependencies for cycle check + test_features = [] + for f in all_features: + if f["id"] == feature_id: + test_features.append({**f, "dependencies": dependency_ids}) + else: + test_features.append(f) + for dep_id in dependency_ids: + # source_id = feature_id (gaining dep), target_id = dep_id (being depended upon) + if would_create_circular_dependency(test_features, feature_id, dep_id): + raise HTTPException( + status_code=400, + detail=f"Cannot add dependency {dep_id}: would create circular dependency" + ) + + # Set dependencies + feature.dependencies = sorted(dependency_ids) if dependency_ids else None session.commit() - # Query created features by their IDs (avoids relying on priority range) - created_features = [] - for db_feature in session.query(Feature).filter( - Feature.id.in_(created_ids) - ).order_by(Feature.priority).all(): - created_features.append(feature_to_response(db_feature)) - - return FeatureBulkCreateResponse( - created=len(created_features), - features=created_features - ) + return {"success": True, "feature_id": feature_id, "dependencies": feature.dependencies or []} except HTTPException: raise except Exception: - logger.exception("Failed to bulk create features") - raise HTTPException(status_code=500, detail="Failed to bulk create features") + logger.exception("Failed to set dependencies") + raise HTTPException(status_code=500, detail="Failed to set dependencies") diff --git a/server/schemas.py b/server/schemas.py index 968cb6f..b91ba5a 100644 --- a/server/schemas.py +++ b/server/schemas.py @@ -80,6 +80,7 @@ class FeatureBase(BaseModel): name: str description: str steps: list[str] + dependencies: list[int] = Field(default_factory=list) # Optional dependencies class FeatureCreate(FeatureBase): @@ -94,6 +95,7 @@ class FeatureUpdate(BaseModel): description: str | None = None steps: list[str] | None = None priority: int | None = None + dependencies: list[int] | None = None # Optional - can update dependencies class FeatureResponse(FeatureBase): @@ -102,6 +104,8 @@ class FeatureResponse(FeatureBase): priority: int passes: bool in_progress: bool + blocked: bool = False # Computed: has unmet dependencies + blocking_dependencies: list[int] = Field(default_factory=list) # Computed class Config: from_attributes = True @@ -126,6 +130,37 @@ class FeatureBulkCreateResponse(BaseModel): features: list[FeatureResponse] +# ============================================================================ +# Dependency Graph Schemas +# ============================================================================ + +class DependencyGraphNode(BaseModel): + """Minimal node for graph visualization (no description exposed for security).""" + id: int + name: str + category: str + status: Literal["pending", "in_progress", "done", "blocked"] + priority: int + dependencies: list[int] + + +class DependencyGraphEdge(BaseModel): + """Edge in the dependency graph.""" + source: int + target: int + + +class DependencyGraphResponse(BaseModel): + """Response for dependency graph visualization.""" + nodes: list[DependencyGraphNode] + edges: list[DependencyGraphEdge] + + +class DependencyUpdate(BaseModel): + """Request schema for updating a feature's dependencies.""" + dependency_ids: list[int] = Field(..., max_length=20) # Security: limit + + # ============================================================================ # Agent Schemas # ============================================================================ @@ -134,6 +169,8 @@ class AgentStartRequest(BaseModel): """Request schema for starting the agent.""" yolo_mode: bool | None = None # None means use global settings model: str | None = None # None means use global settings + parallel_mode: bool | None = None # Enable parallel execution + max_concurrency: int | None = None # Max concurrent agents (1-5) @field_validator('model') @classmethod @@ -143,6 +180,14 @@ class AgentStartRequest(BaseModel): raise ValueError(f"Invalid model. Must be one of: {VALID_MODELS}") return v + @field_validator('max_concurrency') + @classmethod + def validate_concurrency(cls, v: int | None) -> int | None: + """Validate max_concurrency is between 1 and 5.""" + if v is not None and (v < 1 or v > 5): + raise ValueError("max_concurrency must be between 1 and 5") + return v + class AgentStatus(BaseModel): """Current agent status.""" @@ -151,6 +196,8 @@ class AgentStatus(BaseModel): started_at: datetime | None = None yolo_mode: bool = False model: str | None = None # Model being used by running agent + parallel_mode: bool = False + max_concurrency: int | None = None class AgentActionResponse(BaseModel): @@ -180,6 +227,7 @@ class WSProgressMessage(BaseModel): """WebSocket message for progress updates.""" type: Literal["progress"] = "progress" passing: int + in_progress: int total: int percentage: float @@ -196,6 +244,8 @@ class WSLogMessage(BaseModel): type: Literal["log"] = "log" line: str timestamp: datetime + featureId: int | None = None + agentIndex: int | None = None class WSAgentStatusMessage(BaseModel): @@ -204,6 +254,25 @@ class WSAgentStatusMessage(BaseModel): status: str +# Agent state for multi-agent tracking +AgentState = Literal["idle", "thinking", "working", "testing", "success", "error", "struggling"] + +# Agent mascot names assigned by index +AGENT_MASCOTS = ["Spark", "Fizz", "Octo", "Hoot", "Buzz"] + + +class WSAgentUpdateMessage(BaseModel): + """WebSocket message for multi-agent status updates.""" + type: Literal["agent_update"] = "agent_update" + agentIndex: int + agentName: str # One of AGENT_MASCOTS + featureId: int + featureName: str + state: AgentState + thought: str | None = None + timestamp: datetime + + # ============================================================================ # Spec Chat Schemas # ============================================================================ diff --git a/server/services/process_manager.py b/server/services/process_manager.py index fd80665..07015b0 100644 --- a/server/services/process_manager.py +++ b/server/services/process_manager.py @@ -80,6 +80,8 @@ class AgentProcessManager: self._output_task: asyncio.Task | None = None self.yolo_mode: bool = False # YOLO mode for rapid prototyping self.model: str | None = None # Model being used + self.parallel_mode: bool = False # Parallel execution mode + self.max_concurrency: int | None = None # Max concurrent agents # Support multiple callbacks (for multiple WebSocket clients) self._output_callbacks: Set[Callable[[str], Awaitable[None]]] = set() @@ -241,13 +243,21 @@ class AgentProcessManager: self.status = "stopped" self._remove_lock() - async def start(self, yolo_mode: bool = False, model: str | None = None) -> tuple[bool, str]: + async def start( + self, + yolo_mode: bool = False, + model: str | None = None, + parallel_mode: bool = False, + max_concurrency: int | None = None, + ) -> tuple[bool, str]: """ Start the agent as a subprocess. Args: yolo_mode: If True, run in YOLO mode (no browser testing) model: Model to use (e.g., claude-opus-4-5-20251101) + parallel_mode: If True, run multiple features in parallel + max_concurrency: Max concurrent agents (default 3 if parallel enabled) Returns: Tuple of (success, message) @@ -261,6 +271,8 @@ class AgentProcessManager: # Store for status queries self.yolo_mode = yolo_mode self.model = model + self.parallel_mode = parallel_mode + self.max_concurrency = max_concurrency # Build command - pass absolute path to project directory cmd = [ @@ -278,6 +290,11 @@ class AgentProcessManager: if yolo_mode: cmd.append("--yolo") + # Add --parallel flag if parallel mode is enabled + if parallel_mode: + cmd.append("--parallel") + cmd.append(str(max_concurrency or 3)) # Default to 3 concurrent agents + try: # Start subprocess with piped stdout/stderr # Use project_dir as cwd so Claude SDK sandbox allows access to project files @@ -340,6 +357,8 @@ class AgentProcessManager: self.started_at = None self.yolo_mode = False # Reset YOLO mode self.model = None # Reset model + self.parallel_mode = False # Reset parallel mode + self.max_concurrency = None # Reset concurrency return True, "Agent stopped" except Exception as e: @@ -422,6 +441,8 @@ class AgentProcessManager: "started_at": self.started_at.isoformat() if self.started_at else None, "yolo_mode": self.yolo_mode, "model": self.model, + "parallel_mode": self.parallel_mode, + "max_concurrency": self.max_concurrency, } diff --git a/server/websocket.py b/server/websocket.py index e987cfb..63a2a1d 100644 --- a/server/websocket.py +++ b/server/websocket.py @@ -15,6 +15,7 @@ from typing import Set from fastapi import WebSocket, WebSocketDisconnect +from .schemas import AGENT_MASCOTS from .services.dev_server_manager import get_devserver_manager from .services.process_manager import get_manager @@ -23,6 +24,177 @@ _count_passing_tests = None logger = logging.getLogger(__name__) +# Pattern to extract feature ID from parallel orchestrator output +FEATURE_ID_PATTERN = re.compile(r'\[Feature #(\d+)\]\s*(.*)') + +# Patterns for detecting agent activity and thoughts +THOUGHT_PATTERNS = [ + # Claude's tool usage patterns (actual format: [Tool: name]) + (re.compile(r'\[Tool:\s*Read\]', re.I), 'thinking'), + (re.compile(r'\[Tool:\s*(?:Write|Edit|NotebookEdit)\]', re.I), 'working'), + (re.compile(r'\[Tool:\s*Bash\]', re.I), 'testing'), + (re.compile(r'\[Tool:\s*(?:Glob|Grep)\]', re.I), 'thinking'), + (re.compile(r'\[Tool:\s*(\w+)\]', re.I), 'working'), # Fallback for other tools + # Claude's internal thoughts + (re.compile(r'(?:Reading|Analyzing|Checking|Looking at|Examining)\s+(.+)', re.I), 'thinking'), + (re.compile(r'(?:Creating|Writing|Adding|Implementing|Building)\s+(.+)', re.I), 'working'), + (re.compile(r'(?:Testing|Verifying|Running tests|Validating)\s+(.+)', re.I), 'testing'), + (re.compile(r'(?:Error|Failed|Cannot|Unable to|Exception)\s+(.+)', re.I), 'struggling'), + # Test results + (re.compile(r'(?:PASS|passed|success)', re.I), 'success'), + (re.compile(r'(?:FAIL|failed|error)', re.I), 'struggling'), +] + + +class AgentTracker: + """Tracks active agents and their states for multi-agent mode.""" + + def __init__(self): + # feature_id -> {name, state, last_thought, agent_index} + self.active_agents: dict[int, dict] = {} + self._next_agent_index = 0 + self._lock = asyncio.Lock() + + async def process_line(self, line: str) -> dict | None: + """ + Process an output line and return an agent_update message if relevant. + + Returns None if no update should be emitted. + """ + # Check for feature-specific output + match = FEATURE_ID_PATTERN.match(line) + if not match: + # Also check for orchestrator status messages + if line.startswith("Started agent for feature #"): + try: + feature_id = int(re.search(r'#(\d+)', line).group(1)) + return await self._handle_agent_start(feature_id, line) + except (AttributeError, ValueError): + pass + elif line.startswith("Feature #") and ("completed" in line or "failed" in line): + try: + feature_id = int(re.search(r'#(\d+)', line).group(1)) + is_success = "completed" in line + return await self._handle_agent_complete(feature_id, is_success) + except (AttributeError, ValueError): + pass + return None + + feature_id = int(match.group(1)) + content = match.group(2) + + async with self._lock: + # Ensure agent is tracked + if feature_id not in self.active_agents: + agent_index = self._next_agent_index + self._next_agent_index += 1 + self.active_agents[feature_id] = { + 'name': AGENT_MASCOTS[agent_index % len(AGENT_MASCOTS)], + 'agent_index': agent_index, + 'state': 'thinking', + 'feature_name': f'Feature #{feature_id}', + 'last_thought': None, + } + + agent = self.active_agents[feature_id] + + # Detect state and thought from content + state = 'working' + thought = None + + for pattern, detected_state in THOUGHT_PATTERNS: + m = pattern.search(content) + if m: + state = detected_state + thought = m.group(1) if m.lastindex else content[:100] + break + + # Only emit update if state changed or we have a new thought + if state != agent['state'] or thought != agent['last_thought']: + agent['state'] = state + if thought: + agent['last_thought'] = thought + + return { + 'type': 'agent_update', + 'agentIndex': agent['agent_index'], + 'agentName': agent['name'], + 'featureId': feature_id, + 'featureName': agent['feature_name'], + 'state': state, + 'thought': thought, + 'timestamp': datetime.now().isoformat(), + } + + return None + + def get_agent_info(self, feature_id: int) -> tuple[int | None, str | None]: + """Get agent index and name for a feature ID. + + Returns: + Tuple of (agentIndex, agentName) or (None, None) if not tracked. + """ + agent = self.active_agents.get(feature_id) + if agent: + return agent['agent_index'], agent['name'] + return None, None + + async def _handle_agent_start(self, feature_id: int, line: str) -> dict | None: + """Handle agent start message from orchestrator.""" + async with self._lock: + agent_index = self._next_agent_index + self._next_agent_index += 1 + + # Try to extract feature name from line + feature_name = f'Feature #{feature_id}' + name_match = re.search(r'#\d+:\s*(.+)$', line) + if name_match: + feature_name = name_match.group(1) + + self.active_agents[feature_id] = { + 'name': AGENT_MASCOTS[agent_index % len(AGENT_MASCOTS)], + 'agent_index': agent_index, + 'state': 'thinking', + 'feature_name': feature_name, + 'last_thought': 'Starting work...', + } + + return { + 'type': 'agent_update', + 'agentIndex': agent_index, + 'agentName': AGENT_MASCOTS[agent_index % len(AGENT_MASCOTS)], + 'featureId': feature_id, + 'featureName': feature_name, + 'state': 'thinking', + 'thought': 'Starting work...', + 'timestamp': datetime.now().isoformat(), + } + + async def _handle_agent_complete(self, feature_id: int, is_success: bool) -> dict | None: + """Handle agent completion message from orchestrator.""" + async with self._lock: + if feature_id not in self.active_agents: + return None + + agent = self.active_agents[feature_id] + state = 'success' if is_success else 'error' + + result = { + 'type': 'agent_update', + 'agentIndex': agent['agent_index'], + 'agentName': agent['name'], + 'featureId': feature_id, + 'featureName': agent['feature_name'], + 'state': state, + 'thought': 'Completed successfully!' if is_success else 'Failed to complete', + 'timestamp': datetime.now().isoformat(), + } + + # Remove from active agents + del self.active_agents[feature_id] + + return result + def _get_project_path(project_name: str) -> Path: """Get project path from registry.""" @@ -171,14 +343,38 @@ async def project_websocket(websocket: WebSocket, project_name: str): # Get agent manager and register callbacks agent_manager = get_manager(project_name, project_dir, ROOT_DIR) + # Create agent tracker for multi-agent mode + agent_tracker = AgentTracker() + async def on_output(line: str): """Handle agent output - broadcast to this WebSocket.""" try: - await websocket.send_json({ + # Extract feature ID from line if present + feature_id = None + agent_index = None + match = FEATURE_ID_PATTERN.match(line) + if match: + feature_id = int(match.group(1)) + agent_index, _ = agent_tracker.get_agent_info(feature_id) + + # Send the raw log line with optional feature/agent attribution + log_msg = { "type": "log", "line": line, "timestamp": datetime.now().isoformat(), - }) + } + if feature_id is not None: + log_msg["featureId"] = feature_id + if agent_index is not None: + log_msg["agentIndex"] = agent_index + + await websocket.send_json(log_msg) + + # Check if this line indicates agent activity (parallel mode) + # and emit agent_update messages if so + agent_update = await agent_tracker.process_line(line) + if agent_update: + await websocket.send_json(agent_update) except Exception: pass # Connection may be closed diff --git a/ui/package-lock.json b/ui/package-lock.json index 6135f47..984d25a 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -15,8 +15,10 @@ "@xterm/addon-fit": "^0.11.0", "@xterm/addon-web-links": "^0.12.0", "@xterm/xterm": "^6.0.0", + "@xyflow/react": "^12.10.0", "canvas-confetti": "^1.9.4", "clsx": "^2.1.1", + "dagre": "^0.8.5", "lucide-react": "^0.460.0", "react": "^18.3.1", "react-dom": "^18.3.1" @@ -25,6 +27,7 @@ "@eslint/js": "^9.13.0", "@tailwindcss/vite": "^4.0.0-beta.4", "@types/canvas-confetti": "^1.9.0", + "@types/dagre": "^0.7.53", "@types/react": "^18.3.12", "@types/react-dom": "^18.3.1", "@vitejs/plugin-react": "^4.3.3", @@ -2299,6 +2302,62 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/dagre": { + "version": "0.7.53", + "resolved": "https://registry.npmjs.org/@types/dagre/-/dagre-0.7.53.tgz", + "integrity": "sha512-f4gkWqzPZvYmKhOsDnhq/R8mO4UMcKdxZo+i5SCkOU1wvGeHJeUXGIHeE9pnwGyPMDof1Vx5ZQo4nxpeg2TTVQ==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/estree": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", @@ -2652,6 +2711,38 @@ "addons/*" ] }, + "node_modules/@xyflow/react": { + "version": "12.10.0", + "resolved": "https://registry.npmjs.org/@xyflow/react/-/react-12.10.0.tgz", + "integrity": "sha512-eOtz3whDMWrB4KWVatIBrKuxECHqip6PfA8fTpaS2RUGVpiEAe+nqDKsLqkViVWxDGreq0lWX71Xth/SPAzXiw==", + "license": "MIT", + "dependencies": { + "@xyflow/system": "0.0.74", + "classcat": "^5.0.3", + "zustand": "^4.4.0" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@xyflow/system": { + "version": "0.0.74", + "resolved": "https://registry.npmjs.org/@xyflow/system/-/system-0.0.74.tgz", + "integrity": "sha512-7v7B/PkiVrkdZzSbL+inGAo6tkR/WQHHG0/jhSvLQToCsfa8YubOGmBYd1s08tpKpihdHDZFwzQZeR69QSBb4Q==", + "license": "MIT", + "dependencies": { + "@types/d3-drag": "^3.0.7", + "@types/d3-interpolate": "^3.0.4", + "@types/d3-selection": "^3.0.10", + "@types/d3-transition": "^3.0.8", + "@types/d3-zoom": "^3.0.8", + "d3-drag": "^3.0.0", + "d3-interpolate": "^3.0.1", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0" + } + }, "node_modules/acorn": { "version": "8.15.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", @@ -2847,6 +2938,12 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/classcat": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==", + "license": "MIT" + }, "node_modules/clsx": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", @@ -2912,6 +3009,121 @@ "devOptional": true, "license": "MIT" }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/dagre": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/dagre/-/dagre-0.8.5.tgz", + "integrity": "sha512-/aTqmnRta7x7MCCpExk7HQL2O4owCT2h8NT//9I1OQ9vt29Pa0BzSAkR5lwFUcQ7491yVi/3CXU9jQ5o0Mn2Sw==", + "license": "MIT", + "dependencies": { + "graphlib": "^2.1.8", + "lodash": "^4.17.15" + } + }, "node_modules/debug": { "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", @@ -3370,6 +3582,15 @@ "dev": true, "license": "ISC" }, + "node_modules/graphlib": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/graphlib/-/graphlib-2.1.8.tgz", + "integrity": "sha512-jcLLfkpoVGmH7/InMC/1hIvOPSUh38oJtGhvrOFGzioE1DZ+0YW16RgmOJhHiuWTvGiJQ9Z1Ik43JvkRPRvE+A==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.15" + } + }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -3824,6 +4045,12 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "license": "MIT" + }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -4503,6 +4730,15 @@ } } }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, "node_modules/vite": { "version": "5.4.21", "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", @@ -4608,6 +4844,34 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } } } } diff --git a/ui/package.json b/ui/package.json index 560f821..c9d6f81 100644 --- a/ui/package.json +++ b/ui/package.json @@ -17,8 +17,10 @@ "@xterm/addon-fit": "^0.11.0", "@xterm/addon-web-links": "^0.12.0", "@xterm/xterm": "^6.0.0", + "@xyflow/react": "^12.10.0", "canvas-confetti": "^1.9.4", "clsx": "^2.1.1", + "dagre": "^0.8.5", "lucide-react": "^0.460.0", "react": "^18.3.1", "react-dom": "^18.3.1" @@ -27,6 +29,7 @@ "@eslint/js": "^9.13.0", "@tailwindcss/vite": "^4.0.0-beta.4", "@types/canvas-confetti": "^1.9.0", + "@types/dagre": "^0.7.53", "@types/react": "^18.3.12", "@types/react-dom": "^18.3.1", "@vitejs/plugin-react": "^4.3.3", diff --git a/ui/src/App.tsx b/ui/src/App.tsx index baefb48..fbaff40 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -1,5 +1,5 @@ import { useState, useEffect, useCallback } from 'react' -import { useQueryClient } from '@tanstack/react-query' +import { useQueryClient, useQuery } from '@tanstack/react-query' import { useProjects, useFeatures, useAgentStatus, useSettings } from './hooks/useProjects' import { useProjectWebSocket } from './hooks/useWebSocket' import { useFeatureSound } from './hooks/useFeatureSound' @@ -13,16 +13,23 @@ import { AddFeatureForm } from './components/AddFeatureForm' import { FeatureModal } from './components/FeatureModal' import { DebugLogViewer, type TabType } from './components/DebugLogViewer' import { AgentThought } from './components/AgentThought' +import { AgentMissionControl } from './components/AgentMissionControl' +import { CelebrationOverlay } from './components/CelebrationOverlay' import { AssistantFAB } from './components/AssistantFAB' import { AssistantPanel } from './components/AssistantPanel' import { ExpandProjectModal } from './components/ExpandProjectModal' import { SettingsModal } from './components/SettingsModal' import { DevServerControl } from './components/DevServerControl' +import { ViewToggle, type ViewMode } from './components/ViewToggle' +import { DependencyGraph } from './components/DependencyGraph' +import { KeyboardShortcutsHelp } from './components/KeyboardShortcutsHelp' +import { getDependencyGraph } from './lib/api' import { Loader2, Settings, Moon, Sun } from 'lucide-react' import type { Feature } from './lib/types' const STORAGE_KEY = 'autocoder-selected-project' const DARK_MODE_KEY = 'autocoder-dark-mode' +const VIEW_MODE_KEY = 'autocoder-view-mode' function App() { // Initialize selected project from localStorage @@ -42,6 +49,7 @@ function App() { const [debugActiveTab, setDebugActiveTab] = useState('agent') const [assistantOpen, setAssistantOpen] = useState(false) const [showSettings, setShowSettings] = useState(false) + const [showKeyboardHelp, setShowKeyboardHelp] = useState(false) const [isSpecCreating, setIsSpecCreating] = useState(false) const [darkMode, setDarkMode] = useState(() => { try { @@ -50,6 +58,14 @@ function App() { return false } }) + const [viewMode, setViewMode] = useState(() => { + try { + const stored = localStorage.getItem(VIEW_MODE_KEY) + return (stored === 'graph' ? 'graph' : 'kanban') as ViewMode + } catch { + return 'kanban' + } + }) const queryClient = useQueryClient() const { data: projects, isLoading: projectsLoading } = useProjects() @@ -58,6 +74,14 @@ function App() { useAgentStatus(selectedProject) // Keep polling for status updates const wsState = useProjectWebSocket(selectedProject) + // Fetch graph data when in graph view + const { data: graphData } = useQuery({ + queryKey: ['dependencyGraph', selectedProject], + queryFn: () => getDependencyGraph(selectedProject!), + enabled: !!selectedProject && viewMode === 'graph', + refetchInterval: 5000, // Refresh every 5 seconds + }) + // Apply dark mode class to document useEffect(() => { if (darkMode) { @@ -72,6 +96,15 @@ function App() { } }, [darkMode]) + // Persist view mode to localStorage + useEffect(() => { + try { + localStorage.setItem(VIEW_MODE_KEY, viewMode) + } catch { + // localStorage not available + } + }, [viewMode]) + // Play sounds when features move between columns useFeatureSound(features) @@ -154,9 +187,23 @@ function App() { setShowSettings(true) } + // G : Toggle between Kanban and Graph view (when project selected) + if ((e.key === 'g' || e.key === 'G') && selectedProject) { + e.preventDefault() + setViewMode(prev => prev === 'kanban' ? 'graph' : 'kanban') + } + + // ? : Show keyboard shortcuts help + if (e.key === '?') { + e.preventDefault() + setShowKeyboardHelp(true) + } + // Escape : Close modals if (e.key === 'Escape') { - if (showExpandProject) { + if (showKeyboardHelp) { + setShowKeyboardHelp(false) + } else if (showExpandProject) { setShowExpandProject(false) } else if (showSettings) { setShowSettings(false) @@ -174,7 +221,7 @@ function App() { window.addEventListener('keydown', handleKeyDown) return () => window.removeEventListener('keydown', handleKeyDown) - }, [selectedProject, showAddFeature, showExpandProject, selectedFeature, debugOpen, debugActiveTab, assistantOpen, features, showSettings, isSpecCreating]) + }, [selectedProject, showAddFeature, showExpandProject, selectedFeature, debugOpen, debugActiveTab, assistantOpen, features, showSettings, showKeyboardHelp, isSpecCreating, viewMode]) // Combine WebSocket progress with feature data const progress = wsState.progress.total > 0 ? wsState.progress : { @@ -284,11 +331,21 @@ function App() { isConnected={wsState.isConnected} /> - {/* Agent Thought - shows latest agent narrative */} - + {/* Agent Mission Control - shows active agents in parallel mode */} + {wsState.activeAgents.length > 0 && ( + + )} + + {/* Agent Thought - shows latest agent narrative (single agent mode) */} + {wsState.activeAgents.length === 0 && ( + + )} {/* Initializing Features State - show when agent is running but no features yet */} {features && @@ -307,13 +364,45 @@ function App() { )} - {/* Kanban Board */} - setShowAddFeature(true)} - onExpandProject={() => setShowExpandProject(true)} - /> + {/* View Toggle - only show when there are features */} + {features && (features.pending.length + features.in_progress.length + features.done.length) > 0 && ( +
+ +
+ )} + + {/* Kanban Board or Dependency Graph based on view mode */} + {viewMode === 'kanban' ? ( + setShowAddFeature(true)} + onExpandProject={() => setShowExpandProject(true)} + activeAgents={wsState.activeAgents} + /> + ) : ( +
+ {graphData ? ( + { + // Find the feature and open the modal + const allFeatures = [ + ...(features?.pending ?? []), + ...(features?.in_progress ?? []), + ...(features?.done ?? []) + ] + const feature = allFeatures.find(f => f.id === nodeId) + if (feature) setSelectedFeature(feature) + }} + /> + ) : ( +
+ +
+ )} +
+ )} )} @@ -383,6 +472,20 @@ function App() { {showSettings && ( setShowSettings(false)} /> )} + + {/* Keyboard Shortcuts Help */} + {showKeyboardHelp && ( + setShowKeyboardHelp(false)} /> + )} + + {/* Celebration Overlay - shows when a feature is completed by an agent */} + {wsState.celebration && ( + + )} ) } diff --git a/ui/src/components/ActivityFeed.tsx b/ui/src/components/ActivityFeed.tsx new file mode 100644 index 0000000..b986b0f --- /dev/null +++ b/ui/src/components/ActivityFeed.tsx @@ -0,0 +1,93 @@ +import { Activity } from 'lucide-react' +import { AgentAvatar } from './AgentAvatar' +import type { AgentMascot } from '../lib/types' + +interface ActivityItem { + agentName: string + thought: string + timestamp: string + featureId: number +} + +interface ActivityFeedProps { + activities: ActivityItem[] + maxItems?: number + showHeader?: boolean +} + +function formatTimestamp(timestamp: string): string { + const date = new Date(timestamp) + const now = new Date() + const diffMs = now.getTime() - date.getTime() + const diffSec = Math.floor(diffMs / 1000) + + if (diffSec < 5) return 'just now' + if (diffSec < 60) return `${diffSec}s ago` + if (diffSec < 3600) return `${Math.floor(diffSec / 60)}m ago` + return date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }) +} + +export function ActivityFeed({ activities, maxItems = 5, showHeader = true }: ActivityFeedProps) { + const displayedActivities = activities.slice(0, maxItems) + + if (displayedActivities.length === 0) { + return null + } + + return ( +
+ {showHeader && ( +
+ + + Recent Activity + +
+ )} + +
+ {displayedActivities.map((activity) => ( +
+ +
+
+ + {activity.agentName} + + + #{activity.featureId} + + + {formatTimestamp(activity.timestamp)} + +
+

+ {activity.thought} +

+
+
+ ))} +
+
+ ) +} + +function getMascotColor(name: AgentMascot): string { + const colors: Record = { + Spark: '#3B82F6', + Fizz: '#F97316', + Octo: '#8B5CF6', + Hoot: '#22C55E', + Buzz: '#EAB308', + } + return colors[name] || '#6B7280' +} diff --git a/ui/src/components/AgentAvatar.tsx b/ui/src/components/AgentAvatar.tsx new file mode 100644 index 0000000..5d0c9f1 --- /dev/null +++ b/ui/src/components/AgentAvatar.tsx @@ -0,0 +1,261 @@ +import { type AgentMascot, type AgentState } from '../lib/types' + +interface AgentAvatarProps { + name: AgentMascot + state: AgentState + size?: 'sm' | 'md' | 'lg' + showName?: boolean +} + +const AVATAR_COLORS: Record = { + Spark: { primary: '#3B82F6', secondary: '#60A5FA', accent: '#DBEAFE' }, // Blue robot + Fizz: { primary: '#F97316', secondary: '#FB923C', accent: '#FFEDD5' }, // Orange fox + Octo: { primary: '#8B5CF6', secondary: '#A78BFA', accent: '#EDE9FE' }, // Purple octopus + Hoot: { primary: '#22C55E', secondary: '#4ADE80', accent: '#DCFCE7' }, // Green owl + Buzz: { primary: '#EAB308', secondary: '#FACC15', accent: '#FEF9C3' }, // Yellow bee +} + +const SIZES = { + sm: { svg: 32, font: 'text-xs' }, + md: { svg: 48, font: 'text-sm' }, + lg: { svg: 64, font: 'text-base' }, +} + +// SVG mascot definitions - simple cute characters +function SparkSVG({ colors, size }: { colors: typeof AVATAR_COLORS.Spark; size: number }) { + return ( + + {/* Robot body */} + + {/* Robot head */} + + {/* Antenna */} + + + {/* Eyes */} + + + + + {/* Mouth */} + + {/* Arms */} + + + + ) +} + +function FizzSVG({ colors, size }: { colors: typeof AVATAR_COLORS.Fizz; size: number }) { + return ( + + {/* Ears */} + + + + + {/* Head */} + + {/* Face */} + + {/* Eyes */} + + + + + {/* Nose */} + + {/* Whiskers */} + + + + + + ) +} + +function OctoSVG({ colors, size }: { colors: typeof AVATAR_COLORS.Octo; size: number }) { + return ( + + {/* Tentacles */} + + + + + + {/* Head */} + + {/* Eyes */} + + + + + {/* Smile */} + + + ) +} + +function HootSVG({ colors, size }: { colors: typeof AVATAR_COLORS.Hoot; size: number }) { + return ( + + {/* Ear tufts */} + + + {/* Body */} + + {/* Head */} + + {/* Eye circles */} + + + {/* Eyes */} + + + + + {/* Beak */} + + {/* Belly */} + + + ) +} + +function BuzzSVG({ colors, size }: { colors: typeof AVATAR_COLORS.Buzz; size: number }) { + return ( + + {/* Wings */} + + + {/* Body stripes */} + + + + {/* Head */} + + {/* Antennae */} + + + + + {/* Eyes */} + + + + + {/* Smile */} + + + ) +} + +const MASCOT_SVGS: Record = { + Spark: SparkSVG, + Fizz: FizzSVG, + Octo: OctoSVG, + Hoot: HootSVG, + Buzz: BuzzSVG, +} + +// Animation classes based on state +function getStateAnimation(state: AgentState): string { + switch (state) { + case 'idle': + return 'animate-bounce-gentle' + case 'thinking': + return 'animate-thinking' + case 'working': + return 'animate-working' + case 'testing': + return 'animate-testing' + case 'success': + return 'animate-celebrate' + case 'error': + case 'struggling': + return 'animate-shake-gentle' + default: + return '' + } +} + +// Glow effect based on state +function getStateGlow(state: AgentState): string { + switch (state) { + case 'working': + return 'shadow-[0_0_12px_rgba(0,180,216,0.5)]' + case 'thinking': + return 'shadow-[0_0_8px_rgba(255,214,10,0.4)]' + case 'success': + return 'shadow-[0_0_16px_rgba(112,224,0,0.6)]' + case 'error': + case 'struggling': + return 'shadow-[0_0_12px_rgba(255,84,0,0.5)]' + default: + return '' + } +} + +// Get human-readable state description for accessibility +function getStateDescription(state: AgentState): string { + switch (state) { + case 'idle': + return 'waiting' + case 'thinking': + return 'analyzing' + case 'working': + return 'coding' + case 'testing': + return 'running tests' + case 'success': + return 'completed successfully' + case 'error': + return 'encountered an error' + case 'struggling': + return 'having difficulty' + default: + return state + } +} + +export function AgentAvatar({ name, state, size = 'md', showName = false }: AgentAvatarProps) { + const colors = AVATAR_COLORS[name] + const { svg: svgSize, font } = SIZES[size] + const SvgComponent = MASCOT_SVGS[name] + const stateDesc = getStateDescription(state) + const ariaLabel = `Agent ${name} is ${stateDesc}` + + return ( +
+ + {showName && ( + + {name} + + )} +
+ ) +} + +// Get mascot name by index (cycles through available mascots) +export function getMascotName(index: number): AgentMascot { + const mascots: AgentMascot[] = ['Spark', 'Fizz', 'Octo', 'Hoot', 'Buzz'] + return mascots[index % mascots.length] +} diff --git a/ui/src/components/AgentCard.tsx b/ui/src/components/AgentCard.tsx new file mode 100644 index 0000000..0c5c510 --- /dev/null +++ b/ui/src/components/AgentCard.tsx @@ -0,0 +1,99 @@ +import { MessageCircle } from 'lucide-react' +import { AgentAvatar } from './AgentAvatar' +import type { ActiveAgent } from '../lib/types' + +interface AgentCardProps { + agent: ActiveAgent +} + +// Get a friendly state description +function getStateText(state: ActiveAgent['state']): string { + switch (state) { + case 'idle': + return 'Waiting...' + case 'thinking': + return 'Thinking...' + case 'working': + return 'Coding...' + case 'testing': + return 'Testing...' + case 'success': + return 'Done!' + case 'error': + return 'Hit an issue' + case 'struggling': + return 'Retrying...' + default: + return 'Working...' + } +} + +// Get state color +function getStateColor(state: ActiveAgent['state']): string { + switch (state) { + case 'success': + return 'text-neo-done' + case 'error': + case 'struggling': + return 'text-neo-danger' + case 'working': + case 'testing': + return 'text-neo-progress' + case 'thinking': + return 'text-neo-pending' + default: + return 'text-neo-text-secondary' + } +} + +export function AgentCard({ agent }: AgentCardProps) { + const isActive = ['thinking', 'working', 'testing'].includes(agent.state) + + return ( +
+ {/* Header with avatar and name */} +
+ +
+
+ {agent.agentName} +
+
+ {getStateText(agent.state)} +
+
+
+ + {/* Feature info */} +
+
+ Feature #{agent.featureId} +
+
+ {agent.featureName} +
+
+ + {/* Thought bubble */} + {agent.thought && ( +
+
+ +

+ {agent.thought} +

+
+
+ )} +
+ ) +} diff --git a/ui/src/components/AgentControl.tsx b/ui/src/components/AgentControl.tsx index 1ae77b3..e3d0a92 100644 --- a/ui/src/components/AgentControl.tsx +++ b/ui/src/components/AgentControl.tsx @@ -1,4 +1,5 @@ -import { Play, Square, Loader2 } from 'lucide-react' +import { useState } from 'react' +import { Play, Square, Loader2, GitBranch } from 'lucide-react' import { useStartAgent, useStopAgent, @@ -15,19 +16,57 @@ export function AgentControl({ projectName, status }: AgentControlProps) { const { data: settings } = useSettings() const yoloMode = settings?.yolo_mode ?? false + // Concurrency: 1 = single agent, 2-5 = parallel + const [concurrency, setConcurrency] = useState(3) + const startAgent = useStartAgent(projectName) const stopAgent = useStopAgent(projectName) const isLoading = startAgent.isPending || stopAgent.isPending + const isRunning = status === 'running' || status === 'paused' + const isParallel = concurrency > 1 - const handleStart = () => startAgent.mutate(yoloMode) + const handleStart = () => startAgent.mutate({ + yoloMode, + parallelMode: isParallel, + maxConcurrency: isParallel ? concurrency : undefined, + }) const handleStop = () => stopAgent.mutate() // Simplified: either show Start (when stopped/crashed) or Stop (when running/paused) const isStopped = status === 'stopped' || status === 'crashed' return ( -
+
+ {/* Concurrency slider - always visible when stopped */} + {isStopped && ( +
+ + setConcurrency(Number(e.target.value))} + disabled={isLoading} + className="w-16 h-2 accent-[var(--color-neo-primary)] cursor-pointer" + title={`${concurrency} concurrent agent${concurrency > 1 ? 's' : ''}`} + aria-label="Set number of concurrent agents" + /> + + {concurrency}x + +
+ )} + + {/* Show concurrency indicator when running with multiple agents */} + {isRunning && isParallel && ( +
+ + {concurrency}x +
+ )} + {isStopped ? ( + + {/* Content */} +
+
+ {/* Agent Cards Row */} +
+ {agents.map((agent) => ( + + ))} +
+ + {/* Collapsible Activity Feed */} + {recentActivity.length > 0 && ( +
+ +
+ +
+
+ )} +
+
+
+ ) +} diff --git a/ui/src/components/AgentThought.tsx b/ui/src/components/AgentThought.tsx index 65a50a1..6c8d1be 100644 --- a/ui/src/components/AgentThought.tsx +++ b/ui/src/components/AgentThought.tsx @@ -25,14 +25,14 @@ function isAgentThought(line: string): boolean { // Skip JSON and very short lines if (/^[[{]/.test(trimmed)) return false - if (trimmed.length < 15) return false + if (trimmed.length < 10) return false // Skip lines that are just paths or technical output if (/^[A-Za-z]:\\/.test(trimmed)) return false if (/^\/[a-z]/.test(trimmed)) return false - // Keep narrative text (starts with capital, looks like a sentence) - return /^[A-Z]/.test(trimmed) && trimmed.length > 20 + // Keep narrative text (looks like a sentence, relaxed filter) + return trimmed.length > 10 } /** diff --git a/ui/src/components/CelebrationOverlay.tsx b/ui/src/components/CelebrationOverlay.tsx new file mode 100644 index 0000000..a6c9eab --- /dev/null +++ b/ui/src/components/CelebrationOverlay.tsx @@ -0,0 +1,120 @@ +import { useCallback, useEffect, useState } from 'react' +import { Sparkles, PartyPopper } from 'lucide-react' +import { AgentAvatar } from './AgentAvatar' +import type { AgentMascot } from '../lib/types' + +interface CelebrationOverlayProps { + agentName: AgentMascot + featureName: string + onComplete?: () => void +} + +// Generate random confetti particles +function generateConfetti(count: number) { + return Array.from({ length: count }, (_, i) => ({ + id: i, + x: Math.random() * 100, + delay: Math.random() * 0.5, + duration: 1 + Math.random() * 1, + color: ['#ff006e', '#ffd60a', '#70e000', '#00b4d8', '#8338ec'][Math.floor(Math.random() * 5)], + rotation: Math.random() * 360, + })) +} + +export function CelebrationOverlay({ agentName, featureName, onComplete }: CelebrationOverlayProps) { + const [isVisible, setIsVisible] = useState(true) + const [confetti] = useState(() => generateConfetti(30)) + + const dismiss = useCallback(() => { + setIsVisible(false) + setTimeout(() => onComplete?.(), 300) // Wait for fade animation + }, [onComplete]) + + useEffect(() => { + // Auto-dismiss after 3 seconds + const timer = setTimeout(dismiss, 3000) + + // Escape key to dismiss early + const handleKeyDown = (e: KeyboardEvent) => { + if (e.key === 'Escape') { + dismiss() + } + } + + window.addEventListener('keydown', handleKeyDown) + return () => { + clearTimeout(timer) + window.removeEventListener('keydown', handleKeyDown) + } + }, [dismiss]) + + if (!isVisible) { + return null + } + + return ( +
+ {/* Confetti particles */} +
+ {confetti.map((particle) => ( +
+ ))} +
+ + {/* Celebration card - click to dismiss */} + +
+ ) +} diff --git a/ui/src/components/DependencyBadge.tsx b/ui/src/components/DependencyBadge.tsx new file mode 100644 index 0000000..48f2e97 --- /dev/null +++ b/ui/src/components/DependencyBadge.tsx @@ -0,0 +1,121 @@ +import { AlertTriangle, GitBranch, Check } from 'lucide-react' +import type { Feature } from '../lib/types' + +interface DependencyBadgeProps { + feature: Feature + allFeatures?: Feature[] + compact?: boolean +} + +/** + * Badge component showing dependency status for a feature. + * Shows: + * - Blocked status with count of blocking dependencies + * - Dependency count for features with satisfied dependencies + * - Nothing if feature has no dependencies + */ +export function DependencyBadge({ feature, allFeatures = [], compact = false }: DependencyBadgeProps) { + const dependencies = feature.dependencies || [] + + if (dependencies.length === 0) { + return null + } + + // Use API-computed blocked status if available, otherwise compute locally + const isBlocked = feature.blocked ?? + (feature.blocking_dependencies && feature.blocking_dependencies.length > 0) ?? + false + + const blockingCount = feature.blocking_dependencies?.length ?? 0 + + // Compute satisfied count from allFeatures if available + let satisfiedCount = dependencies.length - blockingCount + if (allFeatures.length > 0 && !feature.blocking_dependencies) { + const passingIds = new Set(allFeatures.filter(f => f.passes).map(f => f.id)) + satisfiedCount = dependencies.filter(d => passingIds.has(d)).length + } + + if (compact) { + // Compact view for card displays + return ( +
+ {isBlocked ? ( + <> + + {blockingCount} + + ) : ( + <> + + {satisfiedCount}/{dependencies.length} + + )} +
+ ) + } + + // Full view with more details + return ( +
+ {isBlocked ? ( +
+ + + Blocked by {blockingCount} {blockingCount === 1 ? 'dependency' : 'dependencies'} + +
+ ) : ( +
+ + + All {dependencies.length} {dependencies.length === 1 ? 'dependency' : 'dependencies'} satisfied + +
+ )} +
+ ) +} + +/** + * Small inline indicator for dependency status + */ +export function DependencyIndicator({ feature }: { feature: Feature }) { + const dependencies = feature.dependencies || [] + const isBlocked = feature.blocked || (feature.blocking_dependencies && feature.blocking_dependencies.length > 0) + + if (dependencies.length === 0) { + return null + } + + if (isBlocked) { + return ( + + + + ) + } + + return ( + + + + ) +} diff --git a/ui/src/components/DependencyGraph.tsx b/ui/src/components/DependencyGraph.tsx new file mode 100644 index 0000000..de3931e --- /dev/null +++ b/ui/src/components/DependencyGraph.tsx @@ -0,0 +1,289 @@ +import { useCallback, useEffect, useMemo, useState } from 'react' +import { + ReactFlow, + Background, + Controls, + MiniMap, + useNodesState, + useEdgesState, + Node, + Edge, + Position, + MarkerType, + ConnectionMode, + Handle, +} from '@xyflow/react' +import dagre from 'dagre' +import { CheckCircle2, Circle, Loader2, AlertTriangle } from 'lucide-react' +import type { DependencyGraph as DependencyGraphData, GraphNode } from '../lib/types' +import '@xyflow/react/dist/style.css' + +// Node dimensions +const NODE_WIDTH = 220 +const NODE_HEIGHT = 80 + +interface DependencyGraphProps { + graphData: DependencyGraphData + onNodeClick?: (nodeId: number) => void +} + +// Custom node component +function FeatureNode({ data }: { data: GraphNode & { onClick?: () => void } }) { + const statusColors = { + pending: 'bg-neo-pending border-neo-border', + in_progress: 'bg-neo-progress border-neo-border', + done: 'bg-neo-done border-neo-border', + blocked: 'bg-neo-danger/20 border-neo-danger', + } + + const StatusIcon = () => { + switch (data.status) { + case 'done': + return + case 'in_progress': + return + case 'blocked': + return + default: + return + } + } + + return ( + <> + +
+
+ + + #{data.priority} + +
+
+ {data.name} +
+
+ {data.category} +
+
+ + + ) +} + +const nodeTypes = { + feature: FeatureNode, +} + +// Layout nodes using dagre +function getLayoutedElements( + nodes: Node[], + edges: Edge[], + direction: 'TB' | 'LR' = 'LR' +): { nodes: Node[]; edges: Edge[] } { + const dagreGraph = new dagre.graphlib.Graph() + dagreGraph.setDefaultEdgeLabel(() => ({})) + + const isHorizontal = direction === 'LR' + dagreGraph.setGraph({ + rankdir: direction, + nodesep: 50, + ranksep: 100, + marginx: 50, + marginy: 50, + }) + + nodes.forEach((node) => { + dagreGraph.setNode(node.id, { width: NODE_WIDTH, height: NODE_HEIGHT }) + }) + + edges.forEach((edge) => { + dagreGraph.setEdge(edge.source, edge.target) + }) + + dagre.layout(dagreGraph) + + const layoutedNodes = nodes.map((node) => { + const nodeWithPosition = dagreGraph.node(node.id) + return { + ...node, + position: { + x: nodeWithPosition.x - NODE_WIDTH / 2, + y: nodeWithPosition.y - NODE_HEIGHT / 2, + }, + sourcePosition: isHorizontal ? Position.Right : Position.Bottom, + targetPosition: isHorizontal ? Position.Left : Position.Top, + } + }) + + return { nodes: layoutedNodes, edges } +} + +export function DependencyGraph({ graphData, onNodeClick }: DependencyGraphProps) { + const [direction, setDirection] = useState<'TB' | 'LR'>('LR') + + // Convert graph data to React Flow format + const initialElements = useMemo(() => { + const nodes: Node[] = graphData.nodes.map((node) => ({ + id: String(node.id), + type: 'feature', + position: { x: 0, y: 0 }, + data: { + ...node, + onClick: () => onNodeClick?.(node.id), + }, + })) + + const edges: Edge[] = graphData.edges.map((edge, index) => ({ + id: `e${edge.source}-${edge.target}-${index}`, + source: String(edge.source), + target: String(edge.target), + type: 'smoothstep', + animated: false, + style: { stroke: 'var(--color-neo-border)', strokeWidth: 2 }, + markerEnd: { + type: MarkerType.ArrowClosed, + color: 'var(--color-neo-border)', + }, + })) + + return getLayoutedElements(nodes, edges, direction) + }, [graphData, direction, onNodeClick]) + + const [nodes, setNodes, onNodesChange] = useNodesState(initialElements.nodes) + const [edges, setEdges, onEdgesChange] = useEdgesState(initialElements.edges) + + // Update layout when data or direction changes + useEffect(() => { + const { nodes: layoutedNodes, edges: layoutedEdges } = getLayoutedElements( + initialElements.nodes, + initialElements.edges, + direction + ) + setNodes(layoutedNodes) + setEdges(layoutedEdges) + }, [graphData, direction, setNodes, setEdges, initialElements]) + + const onLayout = useCallback( + (newDirection: 'TB' | 'LR') => { + setDirection(newDirection) + }, + [] + ) + + // Color nodes for minimap + const nodeColor = useCallback((node: Node) => { + const status = (node.data as unknown as GraphNode).status + switch (status) { + case 'done': + return 'var(--color-neo-done)' + case 'in_progress': + return 'var(--color-neo-progress)' + case 'blocked': + return 'var(--color-neo-danger)' + default: + return 'var(--color-neo-pending)' + } + }, []) + + if (graphData.nodes.length === 0) { + return ( +
+
+
No features to display
+
+ Create features to see the dependency graph +
+
+
+ ) + } + + return ( +
+ {/* Layout toggle */} +
+ + +
+ + {/* Legend */} +
+
Status
+
+
+
+ Pending +
+
+
+ In Progress +
+
+
+ Done +
+
+
+ Blocked +
+
+
+ + + + + + +
+ ) +} diff --git a/ui/src/components/FeatureCard.tsx b/ui/src/components/FeatureCard.tsx index 8e54f12..76fb237 100644 --- a/ui/src/components/FeatureCard.tsx +++ b/ui/src/components/FeatureCard.tsx @@ -1,10 +1,14 @@ -import { CheckCircle2, Circle, Loader2 } from 'lucide-react' -import type { Feature } from '../lib/types' +import { CheckCircle2, Circle, Loader2, MessageCircle } from 'lucide-react' +import type { Feature, ActiveAgent } from '../lib/types' +import { DependencyBadge } from './DependencyBadge' +import { AgentAvatar } from './AgentAvatar' interface FeatureCardProps { feature: Feature onClick: () => void isInProgress?: boolean + allFeatures?: Feature[] + activeAgent?: ActiveAgent // Agent working on this feature } // Generate consistent color for category using CSS variable references @@ -28,26 +32,33 @@ function getCategoryColor(category: string): string { return colors[Math.abs(hash) % colors.length] } -export function FeatureCard({ feature, onClick, isInProgress }: FeatureCardProps) { +export function FeatureCard({ feature, onClick, isInProgress, allFeatures = [], activeAgent }: FeatureCardProps) { const categoryColor = getCategoryColor(feature.category) + const isBlocked = feature.blocked || (feature.blocking_dependencies && feature.blocking_dependencies.length > 0) + const hasActiveAgent = !!activeAgent return ( +
+ + {/* Shortcuts list */} +
    + {shortcuts.map((shortcut) => ( +
  • +
    + + {shortcut.key} + + {shortcut.description} +
    + {shortcut.context && ( + {shortcut.context} + )} +
  • + ))} +
+ + {/* Footer */} +

+ Press ? or Esc to close +

+
+
+ ) +} diff --git a/ui/src/components/NewProjectModal.tsx b/ui/src/components/NewProjectModal.tsx index 436c19a..188c3b0 100644 --- a/ui/src/components/NewProjectModal.tsx +++ b/ui/src/components/NewProjectModal.tsx @@ -129,7 +129,7 @@ export function NewProjectModal({ // Auto-start the initializer agent setInitializerStatus('starting') try { - await startAgent(projectName.trim(), yoloMode) + await startAgent(projectName.trim(), { yoloMode }) // Success - navigate to project changeStep('complete') setTimeout(() => { diff --git a/ui/src/components/ViewToggle.tsx b/ui/src/components/ViewToggle.tsx new file mode 100644 index 0000000..4c5e4ce --- /dev/null +++ b/ui/src/components/ViewToggle.tsx @@ -0,0 +1,46 @@ +import { LayoutGrid, GitBranch } from 'lucide-react' + +export type ViewMode = 'kanban' | 'graph' + +interface ViewToggleProps { + viewMode: ViewMode + onViewModeChange: (mode: ViewMode) => void +} + +/** + * Toggle button to switch between Kanban and Graph views + */ +export function ViewToggle({ viewMode, onViewModeChange }: ViewToggleProps) { + return ( +
+ + +
+ ) +} diff --git a/ui/src/hooks/useProjects.ts b/ui/src/hooks/useProjects.ts index 6582e85..695b3b5 100644 --- a/ui/src/hooks/useProjects.ts +++ b/ui/src/hooks/useProjects.ts @@ -123,7 +123,11 @@ export function useStartAgent(projectName: string) { const queryClient = useQueryClient() return useMutation({ - mutationFn: (yoloMode: boolean = false) => api.startAgent(projectName, yoloMode), + mutationFn: (options: { + yoloMode?: boolean + parallelMode?: boolean + maxConcurrency?: number + } = {}) => api.startAgent(projectName, options), onSuccess: () => { queryClient.invalidateQueries({ queryKey: ['agent-status', projectName] }) }, diff --git a/ui/src/hooks/useWebSocket.ts b/ui/src/hooks/useWebSocket.ts index 2f7e385..e6b143c 100644 --- a/ui/src/hooks/useWebSocket.ts +++ b/ui/src/hooks/useWebSocket.ts @@ -3,7 +3,28 @@ */ import { useEffect, useRef, useState, useCallback } from 'react' -import type { WSMessage, AgentStatus, DevServerStatus } from '../lib/types' +import type { + WSMessage, + AgentStatus, + DevServerStatus, + ActiveAgent, + AgentMascot, +} from '../lib/types' + +// Activity item for the feed +interface ActivityItem { + agentName: string + thought: string + timestamp: string + featureId: number +} + +// Celebration trigger for overlay +interface CelebrationTrigger { + agentName: AgentMascot + featureName: string + featureId: number +} interface WebSocketState { progress: { @@ -13,14 +34,21 @@ interface WebSocketState { percentage: number } agentStatus: AgentStatus - logs: Array<{ line: string; timestamp: string }> + logs: Array<{ line: string; timestamp: string; featureId?: number; agentIndex?: number }> isConnected: boolean devServerStatus: DevServerStatus devServerUrl: string | null devLogs: Array<{ line: string; timestamp: string }> + // Multi-agent state + activeAgents: ActiveAgent[] + recentActivity: ActivityItem[] + // Celebration queue to handle rapid successes without race conditions + celebrationQueue: CelebrationTrigger[] + celebration: CelebrationTrigger | null } const MAX_LOGS = 100 // Keep last 100 log lines +const MAX_ACTIVITY = 20 // Keep last 20 activity items export function useProjectWebSocket(projectName: string | null) { const [state, setState] = useState({ @@ -31,6 +59,10 @@ export function useProjectWebSocket(projectName: string | null) { devServerStatus: 'stopped', devServerUrl: null, devLogs: [], + activeAgents: [], + recentActivity: [], + celebrationQueue: [], + celebration: null, }) const wsRef = useRef(null) @@ -83,7 +115,12 @@ export function useProjectWebSocket(projectName: string | null) { ...prev, logs: [ ...prev.logs.slice(-MAX_LOGS + 1), - { line: message.line, timestamp: message.timestamp }, + { + line: message.line, + timestamp: message.timestamp, + featureId: message.featureId, + agentIndex: message.agentIndex, + }, ], })) break @@ -92,6 +129,91 @@ export function useProjectWebSocket(projectName: string | null) { // Feature updates will trigger a refetch via React Query break + case 'agent_update': + setState(prev => { + // Update or add the agent in activeAgents + const agentIndex = prev.activeAgents.findIndex( + a => a.agentIndex === message.agentIndex + ) + + let newAgents: ActiveAgent[] + if (message.state === 'success') { + // Remove agent from active list on success + newAgents = prev.activeAgents.filter( + a => a.agentIndex !== message.agentIndex + ) + } else if (agentIndex >= 0) { + // Update existing agent + newAgents = [...prev.activeAgents] + newAgents[agentIndex] = { + agentIndex: message.agentIndex, + agentName: message.agentName, + featureId: message.featureId, + featureName: message.featureName, + state: message.state, + thought: message.thought, + timestamp: message.timestamp, + } + } else { + // Add new agent + newAgents = [ + ...prev.activeAgents, + { + agentIndex: message.agentIndex, + agentName: message.agentName, + featureId: message.featureId, + featureName: message.featureName, + state: message.state, + thought: message.thought, + timestamp: message.timestamp, + }, + ] + } + + // Add to activity feed if there's a thought + let newActivity = prev.recentActivity + if (message.thought) { + newActivity = [ + { + agentName: message.agentName, + thought: message.thought, + timestamp: message.timestamp, + featureId: message.featureId, + }, + ...prev.recentActivity.slice(0, MAX_ACTIVITY - 1), + ] + } + + // Handle celebration queue on success + let newCelebrationQueue = prev.celebrationQueue + let newCelebration = prev.celebration + + if (message.state === 'success') { + const newCelebrationItem: CelebrationTrigger = { + agentName: message.agentName, + featureName: message.featureName, + featureId: message.featureId, + } + + // If no celebration is showing, show this one immediately + // Otherwise, add to queue + if (!prev.celebration) { + newCelebration = newCelebrationItem + } else { + newCelebrationQueue = [...prev.celebrationQueue, newCelebrationItem] + } + } + + return { + ...prev, + activeAgents: newAgents, + recentActivity: newActivity, + celebrationQueue: newCelebrationQueue, + celebration: newCelebration, + } + }) + break + case 'dev_log': setState(prev => ({ ...prev, @@ -147,6 +269,19 @@ export function useProjectWebSocket(projectName: string | null) { } }, []) + // Clear celebration and show next one from queue if available + const clearCelebration = useCallback(() => { + setState(prev => { + // Pop the next celebration from the queue if available + const [nextCelebration, ...remainingQueue] = prev.celebrationQueue + return { + ...prev, + celebration: nextCelebration || null, + celebrationQueue: remainingQueue, + } + }) + }, []) + // Connect when project changes useEffect(() => { // Reset state when project changes to clear stale data @@ -158,6 +293,10 @@ export function useProjectWebSocket(projectName: string | null) { devServerStatus: 'stopped', devServerUrl: null, devLogs: [], + activeAgents: [], + recentActivity: [], + celebrationQueue: [], + celebration: null, }) if (!projectName) { @@ -200,5 +339,6 @@ export function useProjectWebSocket(projectName: string | null) { ...state, clearLogs, clearDevLogs, + clearCelebration, } } diff --git a/ui/src/lib/api.ts b/ui/src/lib/api.ts index 85345c0..b12203a 100644 --- a/ui/src/lib/api.ts +++ b/ui/src/lib/api.ts @@ -12,6 +12,7 @@ import type { FeatureUpdate, FeatureBulkCreate, FeatureBulkCreateResponse, + DependencyGraph, AgentStatusResponse, AgentActionResponse, SetupStatus, @@ -141,6 +142,50 @@ export async function createFeaturesBulk( }) } +// ============================================================================ +// Dependency Graph API +// ============================================================================ + +export async function getDependencyGraph(projectName: string): Promise { + return fetchJSON(`/projects/${encodeURIComponent(projectName)}/features/graph`) +} + +export async function addDependency( + projectName: string, + featureId: number, + dependencyId: number +): Promise<{ success: boolean; feature_id: number; dependencies: number[] }> { + return fetchJSON( + `/projects/${encodeURIComponent(projectName)}/features/${featureId}/dependencies/${dependencyId}`, + { method: 'POST' } + ) +} + +export async function removeDependency( + projectName: string, + featureId: number, + dependencyId: number +): Promise<{ success: boolean; feature_id: number; dependencies: number[] }> { + return fetchJSON( + `/projects/${encodeURIComponent(projectName)}/features/${featureId}/dependencies/${dependencyId}`, + { method: 'DELETE' } + ) +} + +export async function setDependencies( + projectName: string, + featureId: number, + dependencyIds: number[] +): Promise<{ success: boolean; feature_id: number; dependencies: number[] }> { + return fetchJSON( + `/projects/${encodeURIComponent(projectName)}/features/${featureId}/dependencies`, + { + method: 'PUT', + body: JSON.stringify({ dependency_ids: dependencyIds }), + } + ) +} + // ============================================================================ // Agent API // ============================================================================ @@ -151,11 +196,19 @@ export async function getAgentStatus(projectName: string): Promise { return fetchJSON(`/projects/${encodeURIComponent(projectName)}/agent/start`, { method: 'POST', - body: JSON.stringify({ yolo_mode: yoloMode }), + body: JSON.stringify({ + yolo_mode: options.yoloMode ?? false, + parallel_mode: options.parallelMode ?? false, + max_concurrency: options.maxConcurrency, + }), }) } diff --git a/ui/src/lib/types.ts b/ui/src/lib/types.ts index 80d6b1f..8b1ceed 100644 --- a/ui/src/lib/types.ts +++ b/ui/src/lib/types.ts @@ -66,6 +66,32 @@ export interface Feature { steps: string[] passes: boolean in_progress: boolean + dependencies?: number[] // Optional for backwards compat + blocked?: boolean // Computed by API + blocking_dependencies?: number[] // Computed by API +} + +// Status type for graph nodes +export type FeatureStatus = 'pending' | 'in_progress' | 'done' | 'blocked' + +// Graph visualization types +export interface GraphNode { + id: number + name: string + category: string + status: FeatureStatus + priority: number + dependencies: number[] +} + +export interface GraphEdge { + source: number + target: number +} + +export interface DependencyGraph { + nodes: GraphNode[] + edges: GraphEdge[] } export interface FeatureListResponse { @@ -80,6 +106,7 @@ export interface FeatureCreate { description: string steps: string[] priority?: number + dependencies?: number[] } export interface FeatureUpdate { @@ -88,6 +115,7 @@ export interface FeatureUpdate { description?: string steps?: string[] priority?: number + dependencies?: number[] } // Agent types @@ -99,6 +127,8 @@ export interface AgentStatusResponse { started_at: string | null yolo_mode: boolean model: string | null // Model being used by running agent + parallel_mode: boolean + max_concurrency: number | null } export interface AgentActionResponse { @@ -140,8 +170,26 @@ export interface TerminalInfo { created_at: string } +// Agent mascot names for multi-agent UI +export const AGENT_MASCOTS = ['Spark', 'Fizz', 'Octo', 'Hoot', 'Buzz'] as const +export type AgentMascot = typeof AGENT_MASCOTS[number] + +// Agent state for Mission Control +export type AgentState = 'idle' | 'thinking' | 'working' | 'testing' | 'success' | 'error' | 'struggling' + +// Agent update from backend +export interface ActiveAgent { + agentIndex: number + agentName: AgentMascot + featureId: number + featureName: string + state: AgentState + thought?: string + timestamp: string +} + // WebSocket message types -export type WSMessageType = 'progress' | 'feature_update' | 'log' | 'agent_status' | 'pong' | 'dev_log' | 'dev_server_status' +export type WSMessageType = 'progress' | 'feature_update' | 'log' | 'agent_status' | 'pong' | 'dev_log' | 'dev_server_status' | 'agent_update' export interface WSProgressMessage { type: 'progress' @@ -161,6 +209,20 @@ export interface WSLogMessage { type: 'log' line: string timestamp: string + featureId?: number + agentIndex?: number + agentName?: AgentMascot +} + +export interface WSAgentUpdateMessage { + type: 'agent_update' + agentIndex: number + agentName: AgentMascot + featureId: number + featureName: string + state: AgentState + thought?: string + timestamp: string } export interface WSAgentStatusMessage { @@ -189,6 +251,7 @@ export type WSMessage = | WSFeatureUpdateMessage | WSLogMessage | WSAgentStatusMessage + | WSAgentUpdateMessage | WSPongMessage | WSDevLogMessage | WSDevServerStatusMessage diff --git a/ui/src/styles/globals.css b/ui/src/styles/globals.css index 144c513..5c8199a 100644 --- a/ui/src/styles/globals.css +++ b/ui/src/styles/globals.css @@ -870,6 +870,96 @@ } } +/* ============================================================================ + Agent Mascot Animations + ============================================================================ */ + +@keyframes bounce-gentle { + 0%, 100% { + transform: translateY(0); + } + 50% { + transform: translateY(-4px); + } +} + +@keyframes thinking { + 0%, 100% { + transform: translateY(0) scale(1); + } + 25% { + transform: translateY(-2px) scale(1.02); + } + 50% { + transform: translateY(0) scale(1); + } + 75% { + transform: translateY(-2px) scale(0.98); + } +} + +@keyframes working { + 0%, 100% { + transform: translateX(0); + } + 25% { + transform: translateX(-1px); + } + 75% { + transform: translateX(1px); + } +} + +@keyframes testing { + 0%, 100% { + transform: rotate(0deg); + } + 25% { + transform: rotate(-3deg); + } + 75% { + transform: rotate(3deg); + } +} + +@keyframes celebrate { + 0%, 100% { + transform: scale(1) rotate(0deg); + } + 25% { + transform: scale(1.1) rotate(-5deg); + } + 50% { + transform: scale(1.15) rotate(0deg); + } + 75% { + transform: scale(1.1) rotate(5deg); + } +} + +@keyframes shake-gentle { + 0%, 100% { + transform: translateX(0); + } + 20%, 60% { + transform: translateX(-2px); + } + 40%, 80% { + transform: translateX(2px); + } +} + +@keyframes confetti { + 0% { + transform: translateY(0) rotate(0deg); + opacity: 1; + } + 100% { + transform: translateY(100vh) rotate(720deg); + opacity: 0; + } +} + /* ============================================================================ Utilities Layer ============================================================================ */ @@ -970,6 +1060,35 @@ .font-mono { font-family: var(--font-neo-mono); } + + /* Agent mascot animation utilities */ + .animate-bounce-gentle { + animation: bounce-gentle 2s ease-in-out infinite; + } + + .animate-thinking { + animation: thinking 1.5s ease-in-out infinite; + } + + .animate-working { + animation: working 0.3s ease-in-out infinite; + } + + .animate-testing { + animation: testing 0.8s ease-in-out infinite; + } + + .animate-celebrate { + animation: celebrate 0.6s ease-in-out; + } + + .animate-shake-gentle { + animation: shake-gentle 0.5s ease-in-out infinite; + } + + .animate-confetti { + animation: confetti 2s ease-out forwards; + } } /* ============================================================================