diff --git a/CLAUDE.md b/CLAUDE.md index 51c0949..a6857db 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -45,6 +45,9 @@ python autonomous_agent_demo.py --project-dir my-app # if registered # YOLO mode: rapid prototyping without browser testing python autonomous_agent_demo.py --project-dir my-app --yolo + +# Parallel mode: run multiple agents concurrently (1-5 agents) +python autonomous_agent_demo.py --project-dir my-app --parallel --max-concurrency 3 ``` ### YOLO Mode (Rapid Prototyping) @@ -95,6 +98,8 @@ npm run lint # Run ESLint - `prompts.py` - Prompt template loading with project-specific fallback - `progress.py` - Progress tracking, database queries, webhook notifications - `registry.py` - Project registry for mapping names to paths (cross-platform) +- `parallel_orchestrator.py` - Concurrent agent execution with dependency-aware scheduling +- `api/dependency_resolver.py` - Cycle detection (Kahn's algorithm + DFS) and dependency validation ### Project Registry @@ -121,26 +126,40 @@ The FastAPI server provides REST endpoints for the UI: Features are stored in SQLite (`features.db`) via SQLAlchemy. The agent interacts with features through an MCP server: - `mcp_server/feature_mcp.py` - MCP server exposing feature management tools -- `api/database.py` - SQLAlchemy models (Feature table with priority, category, name, description, steps, passes) +- `api/database.py` - SQLAlchemy models (Feature table with priority, category, name, description, steps, passes, dependencies) MCP tools available to the agent: - `feature_get_stats` - Progress statistics -- `feature_get_next` - Get highest-priority pending feature +- `feature_get_next` - Get highest-priority pending feature (respects dependencies) +- `feature_claim_next` - Atomically claim next available feature (for parallel mode) - `feature_get_for_regression` - Random passing features for regression testing - `feature_mark_passing` - Mark feature complete - `feature_skip` - Move feature to end of queue - `feature_create_bulk` - Initialize all features (used by initializer) +- `feature_add_dependency` - Add dependency between features (with cycle detection) +- `feature_remove_dependency` - Remove a dependency ### React UI (ui/) -- Tech stack: React 18, TypeScript, TanStack Query, Tailwind CSS v4, Radix UI +- Tech stack: React 18, TypeScript, TanStack Query, Tailwind CSS v4, Radix UI, dagre (graph layout) - `src/App.tsx` - Main app with project selection, kanban board, agent controls -- `src/hooks/useWebSocket.ts` - Real-time updates via WebSocket +- `src/hooks/useWebSocket.ts` - Real-time updates via WebSocket (progress, agent status, logs, agent updates) - `src/hooks/useProjects.ts` - React Query hooks for API calls - `src/lib/api.ts` - REST API client - `src/lib/types.ts` - TypeScript type definitions -- `src/components/FolderBrowser.tsx` - Server-side filesystem browser for project folder selection -- `src/components/NewProjectModal.tsx` - Multi-step project creation wizard + +Key components: +- `AgentMissionControl.tsx` - Dashboard showing active agents with mascots (Spark, Fizz, Octo, Hoot, Buzz) +- `DependencyGraph.tsx` - Interactive node graph visualization with dagre layout +- `CelebrationOverlay.tsx` - Confetti animation on feature completion +- `FolderBrowser.tsx` - Server-side filesystem browser for project folder selection + +Keyboard shortcuts (press `?` for help): +- `D` - Toggle debug panel +- `G` - Toggle Kanban/Graph view +- `N` - Add new feature +- `A` - Toggle AI assistant +- `,` - Open settings ### Project Structure for Generated Apps @@ -181,10 +200,20 @@ Defense-in-depth approach configured in `client.py`: ### Real-time UI Updates The UI receives updates via WebSocket (`/ws/projects/{project_name}`): -- `progress` - Test pass counts +- `progress` - Test pass counts (passing, in_progress, total) - `agent_status` - Running/paused/stopped/crashed -- `log` - Agent output lines (streamed from subprocess stdout) +- `log` - Agent output lines with optional featureId/agentIndex for attribution - `feature_update` - Feature status changes +- `agent_update` - Multi-agent state updates (thinking/working/testing/success/error) with mascot names + +### Parallel Mode + +When running with `--parallel`, the orchestrator: +1. Spawns multiple Claude agents as subprocesses (up to `--max-concurrency`) +2. Each agent claims features atomically via `feature_claim_next` +3. Features blocked by unmet dependencies are skipped +4. Browser contexts are isolated per agent using `--isolated` flag +5. AgentTracker parses output and emits `agent_update` messages for UI ### Design System diff --git a/agent.py b/agent.py index c6199b4..79d585c 100644 --- a/agent.py +++ b/agent.py @@ -203,8 +203,14 @@ async def run_autonomous_agent( prompt = get_coding_prompt(project_dir) # Run session with async context manager - async with client: - status, response = await run_agent_session(client, prompt, project_dir) + # Wrap in try/except to handle MCP server startup failures gracefully + try: + async with client: + status, response = await run_agent_session(client, prompt, project_dir) + except Exception as e: + print(f"Client/MCP server error: {e}") + # Don't crash - return error status so the loop can retry + status, response = "error", str(e) # Handle status if status == "continue": diff --git a/api/dependency_resolver.py b/api/dependency_resolver.py index daaad17..3e1980b 100644 --- a/api/dependency_resolver.py +++ b/api/dependency_resolver.py @@ -245,6 +245,81 @@ def _detect_cycles(features: list[dict], feature_map: dict) -> list[list[int]]: return cycles +def compute_scheduling_scores(features: list[dict]) -> dict[int, float]: + """Compute scheduling scores for all features. + + Higher scores mean higher priority for scheduling. The algorithm considers: + 1. Unblocking potential - Features that unblock more downstream work score higher + 2. Depth in graph - Features with no dependencies (roots) are "shovel-ready" + 3. User priority - Existing priority field as tiebreaker + + Score formula: (1000 * unblock) + (100 * depth_score) + (10 * priority_factor) + + Args: + features: List of feature dicts with id, priority, dependencies fields + + Returns: + Dict mapping feature_id -> score (higher = schedule first) + """ + if not features: + return {} + + # Build adjacency lists + children: dict[int, list[int]] = {f["id"]: [] for f in features} # who depends on me + parents: dict[int, list[int]] = {f["id"]: [] for f in features} # who I depend on + + for f in features: + for dep_id in (f.get("dependencies") or []): + if dep_id in children: # Only valid deps + children[dep_id].append(f["id"]) + parents[f["id"]].append(dep_id) + + # Calculate depths via BFS from roots + depths: dict[int, int] = {} + roots = [f["id"] for f in features if not parents[f["id"]]] + queue = [(root, 0) for root in roots] + while queue: + node_id, depth = queue.pop(0) + if node_id not in depths or depth > depths[node_id]: + depths[node_id] = depth + for child_id in children[node_id]: + queue.append((child_id, depth + 1)) + + # Handle orphaned nodes (shouldn't happen but be safe) + for f in features: + if f["id"] not in depths: + depths[f["id"]] = 0 + + # Calculate transitive downstream counts (reverse topo order) + downstream: dict[int, int] = {f["id"]: 0 for f in features} + # Process in reverse depth order (leaves first) + for fid in sorted(depths.keys(), key=lambda x: -depths[x]): + for parent_id in parents[fid]: + downstream[parent_id] += 1 + downstream[fid] + + # Normalize and compute scores + max_depth = max(depths.values()) if depths else 0 + max_downstream = max(downstream.values()) if downstream else 0 + + scores: dict[int, float] = {} + for f in features: + fid = f["id"] + + # Unblocking score: 0-1, higher = unblocks more + unblock = downstream[fid] / max_downstream if max_downstream > 0 else 0 + + # Depth score: 0-1, higher = closer to root (no deps) + depth_score = 1 - (depths[fid] / max_depth) if max_depth > 0 else 1 + + # Priority factor: 0-1, lower priority number = higher factor + priority = f.get("priority", 999) + priority_factor = (10 - min(priority, 10)) / 10 + + scores[fid] = (1000 * unblock) + (100 * depth_score) + (10 * priority_factor) + + return scores + + def get_ready_features(features: list[dict], limit: int = 10) -> list[dict]: """Get features that are ready to be worked on. @@ -270,8 +345,9 @@ def get_ready_features(features: list[dict], limit: int = 10) -> list[dict]: if all(dep_id in passing_ids for dep_id in deps): ready.append(f) - # Sort by priority - ready.sort(key=lambda f: (f.get("priority", 999), f["id"])) + # Sort by scheduling score (higher = first), then priority, then id + scores = compute_scheduling_scores(features) + ready.sort(key=lambda f: (-scores.get(f["id"], 0), f.get("priority", 999), f["id"])) return ready[:limit] diff --git a/mcp_server/feature_mcp.py b/mcp_server/feature_mcp.py index f640fc5..f3f7c8d 100755 --- a/mcp_server/feature_mcp.py +++ b/mcp_server/feature_mcp.py @@ -41,6 +41,7 @@ from api.dependency_resolver import ( would_create_circular_dependency, are_dependencies_satisfied, get_blocking_dependencies, + compute_scheduling_scores, MAX_DEPENDENCIES_PER_FEATURE, ) @@ -178,7 +179,11 @@ def feature_get_next() -> str: # Get pending, non-in-progress features pending = [f for f in all_features if not f.passes and not f.in_progress] - pending.sort(key=lambda f: (f.priority, f.id)) + + # Sort by scheduling score (higher = first), then priority, then id + all_dicts = [f.to_dict() for f in all_features] + scores = compute_scheduling_scores(all_dicts) + pending.sort(key=lambda f: (-scores.get(f.id, 0), f.priority, f.id)) if not pending: if any(f.in_progress for f in all_features if not f.passes): @@ -247,7 +252,11 @@ def _feature_claim_next_internal(attempt: int = 0) -> str: # Get pending, non-in-progress features pending = [f for f in all_features if not f.passes and not f.in_progress] - pending.sort(key=lambda f: (f.priority, f.id)) + + # Sort by scheduling score (higher = first), then priority, then id + all_dicts = [f.to_dict() for f in all_features] + scores = compute_scheduling_scores(all_dicts) + pending.sort(key=lambda f: (-scores.get(f.id, 0), f.priority, f.id)) if not pending: if any(f.in_progress for f in all_features if not f.passes): @@ -814,6 +823,7 @@ def feature_get_ready( passing_ids = {f.id for f in all_features if f.passes} ready = [] + all_dicts = [f.to_dict() for f in all_features] for f in all_features: if f.passes or f.in_progress: continue @@ -821,8 +831,9 @@ def feature_get_ready( if all(dep_id in passing_ids for dep_id in deps): ready.append(f.to_dict()) - # Sort by priority - ready.sort(key=lambda f: (f["priority"], f["id"])) + # Sort by scheduling score (higher = first), then priority, then id + scores = compute_scheduling_scores(all_dicts) + ready.sort(key=lambda f: (-scores.get(f["id"], 0), f["priority"], f["id"])) return json.dumps({ "features": ready[:limit], diff --git a/parallel_orchestrator.py b/parallel_orchestrator.py index 35d03c4..8b634f6 100644 --- a/parallel_orchestrator.py +++ b/parallel_orchestrator.py @@ -20,7 +20,7 @@ from pathlib import Path from typing import Callable, Awaitable from api.database import Feature, create_database -from api.dependency_resolver import are_dependencies_satisfied +from api.dependency_resolver import are_dependencies_satisfied, compute_scheduling_scores # Root directory of autocoder (where this script and autonomous_agent_demo.py live) AUTOCODER_ROOT = Path(__file__).parent.resolve() @@ -103,8 +103,10 @@ class ParallelOrchestrator: continue resumable.append(f.to_dict()) - # Sort by priority (highest priority first) - resumable.sort(key=lambda f: (f["priority"], f["id"])) + # Sort by scheduling score (higher = first), then priority, then id + all_dicts = [f.to_dict() for f in session.query(Feature).all()] + scores = compute_scheduling_scores(all_dicts) + resumable.sort(key=lambda f: (-scores.get(f["id"], 0), f["priority"], f["id"])) return resumable finally: session.close() @@ -131,18 +133,25 @@ class ParallelOrchestrator: if are_dependencies_satisfied(f.to_dict(), all_dicts): ready.append(f.to_dict()) - # Sort by priority - ready.sort(key=lambda f: (f["priority"], f["id"])) + # Sort by scheduling score (higher = first), then priority, then id + scores = compute_scheduling_scores(all_dicts) + ready.sort(key=lambda f: (-scores.get(f["id"], 0), f["priority"], f["id"])) return ready finally: session.close() def get_all_complete(self) -> bool: - """Check if all features are complete.""" + """Check if all features are complete or permanently failed.""" session = self.get_session() try: - pending = session.query(Feature).filter(Feature.passes == False).count() - return pending == 0 + all_features = session.query(Feature).all() + for f in all_features: + if f.passes: + continue # Completed successfully + if self._failure_counts.get(f.id, 0) >= MAX_FEATURE_RETRIES: + continue # Permanently failed, count as "done" + return False # Still workable + return True finally: session.close() @@ -289,6 +298,7 @@ class ParallelOrchestrator: status = "completed" if return_code == 0 else "failed" if self.on_status: self.on_status(feature_id, status) + # CRITICAL: This print triggers the WebSocket to emit agent_update with state='error' or 'success' print(f"Feature #{feature_id} {status}", flush=True) def stop_feature(self, feature_id: int) -> tuple[bool, str]: diff --git a/ui/src/App.tsx b/ui/src/App.tsx index fbaff40..148dc66 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -336,6 +336,7 @@ function App() { )} diff --git a/ui/src/components/AgentCard.tsx b/ui/src/components/AgentCard.tsx index 0c5c510..2c027b2 100644 --- a/ui/src/components/AgentCard.tsx +++ b/ui/src/components/AgentCard.tsx @@ -1,30 +1,33 @@ -import { MessageCircle } from 'lucide-react' +import { MessageCircle, ScrollText, X, Copy, Check } from 'lucide-react' +import { useState } from 'react' +import { createPortal } from 'react-dom' import { AgentAvatar } from './AgentAvatar' -import type { ActiveAgent } from '../lib/types' +import type { ActiveAgent, AgentLogEntry } from '../lib/types' interface AgentCardProps { agent: ActiveAgent + onShowLogs?: (agentIndex: number) => void } // Get a friendly state description function getStateText(state: ActiveAgent['state']): string { switch (state) { case 'idle': - return 'Waiting...' + return 'Standing by...' case 'thinking': - return 'Thinking...' + return 'Pondering...' case 'working': - return 'Coding...' + return 'Coding away...' case 'testing': - return 'Testing...' + return 'Checking work...' case 'success': - return 'Done!' + return 'Nailed it!' case 'error': - return 'Hit an issue' + return 'Trying plan B...' case 'struggling': - return 'Retrying...' + return 'Being persistent...' default: - return 'Working...' + return 'Busy...' } } @@ -34,8 +37,9 @@ function getStateColor(state: ActiveAgent['state']): string { case 'success': return 'text-neo-done' case 'error': + return 'text-neo-pending' // Yellow - just pivoting, not a real error case 'struggling': - return 'text-neo-danger' + return 'text-orange-500' // Orange - working hard, being persistent case 'working': case 'testing': return 'text-neo-progress' @@ -46,8 +50,9 @@ function getStateColor(state: ActiveAgent['state']): string { } } -export function AgentCard({ agent }: AgentCardProps) { +export function AgentCard({ agent, onShowLogs }: AgentCardProps) { const isActive = ['thinking', 'working', 'testing'].includes(agent.state) + const hasLogs = agent.logs && agent.logs.length > 0 return (
+ {/* Log button */} + {hasLogs && onShowLogs && ( + + )} {/* Feature info */} @@ -97,3 +112,103 @@ export function AgentCard({ agent }: AgentCardProps) { ) } + +// Log viewer modal component +interface AgentLogModalProps { + agent: ActiveAgent + logs: AgentLogEntry[] + onClose: () => void +} + +export function AgentLogModal({ agent, logs, onClose }: AgentLogModalProps) { + const [copied, setCopied] = useState(false) + + const handleCopy = async () => { + const logText = logs + .map(log => `[${log.timestamp}] ${log.line}`) + .join('\n') + await navigator.clipboard.writeText(logText) + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } + + const getLogColor = (type: AgentLogEntry['type']) => { + switch (type) { + case 'error': + return 'text-neo-danger' + case 'state_change': + return 'text-neo-progress' + default: + return 'text-neo-text' + } + } + + // Use portal to render modal at document body level (avoids overflow:hidden issues) + return createPortal( +
{ + // Close when clicking backdrop + if (e.target === e.currentTarget) onClose() + }} + > +
+ {/* Header */} +
+
+ +
+

+ {agent.agentName} Logs +

+

+ Feature #{agent.featureId}: {agent.featureName} +

+
+
+
+ + +
+
+ + {/* Log content */} +
+ {logs.length === 0 ? ( +

No logs available

+ ) : ( +
+ {logs.map((log, idx) => ( +
+ + [{new Date(log.timestamp).toLocaleTimeString()}] + {' '} + {log.line} +
+ ))} +
+ )} +
+ + {/* Footer */} +
+ {logs.length} log entries +
+
+
, + document.body + ) +} diff --git a/ui/src/components/AgentMissionControl.tsx b/ui/src/components/AgentMissionControl.tsx index 8935d35..c4ed1b8 100644 --- a/ui/src/components/AgentMissionControl.tsx +++ b/ui/src/components/AgentMissionControl.tsx @@ -1,8 +1,8 @@ import { Rocket, ChevronDown, ChevronUp, Activity } from 'lucide-react' import { useState } from 'react' -import { AgentCard } from './AgentCard' +import { AgentCard, AgentLogModal } from './AgentCard' import { ActivityFeed } from './ActivityFeed' -import type { ActiveAgent } from '../lib/types' +import type { ActiveAgent, AgentLogEntry } from '../lib/types' const ACTIVITY_COLLAPSED_KEY = 'autocoder-activity-collapsed' @@ -15,12 +15,14 @@ interface AgentMissionControlProps { featureId: number }> isExpanded?: boolean + getAgentLogs?: (agentIndex: number) => AgentLogEntry[] } export function AgentMissionControl({ agents, recentActivity, isExpanded: defaultExpanded = true, + getAgentLogs, }: AgentMissionControlProps) { const [isExpanded, setIsExpanded] = useState(defaultExpanded) const [activityCollapsed, setActivityCollapsed] = useState(() => { @@ -30,6 +32,8 @@ export function AgentMissionControl({ return false } }) + // State for log modal + const [selectedAgentForLogs, setSelectedAgentForLogs] = useState(null) const toggleActivityCollapsed = () => { const newValue = !activityCollapsed @@ -80,7 +84,16 @@ export function AgentMissionControl({ {/* Agent Cards Row */}
{agents.map((agent) => ( - + { + const agentToShow = agents.find(a => a.agentIndex === agentIndex) + if (agentToShow) { + setSelectedAgentForLogs(agentToShow) + } + }} + /> ))}
@@ -116,6 +129,15 @@ export function AgentMissionControl({ )} + + {/* Log Modal */} + {selectedAgentForLogs && getAgentLogs && ( + setSelectedAgentForLogs(null)} + /> + )} ) } diff --git a/ui/src/hooks/useWebSocket.ts b/ui/src/hooks/useWebSocket.ts index e6b143c..f1b44ab 100644 --- a/ui/src/hooks/useWebSocket.ts +++ b/ui/src/hooks/useWebSocket.ts @@ -9,6 +9,7 @@ import type { DevServerStatus, ActiveAgent, AgentMascot, + AgentLogEntry, } from '../lib/types' // Activity item for the feed @@ -42,6 +43,8 @@ interface WebSocketState { // Multi-agent state activeAgents: ActiveAgent[] recentActivity: ActivityItem[] + // Per-agent logs for debugging (indexed by agentIndex) + agentLogs: Map // Celebration queue to handle rapid successes without race conditions celebrationQueue: CelebrationTrigger[] celebration: CelebrationTrigger | null @@ -49,6 +52,7 @@ interface WebSocketState { const MAX_LOGS = 100 // Keep last 100 log lines const MAX_ACTIVITY = 20 // Keep last 20 activity items +const MAX_AGENT_LOGS = 500 // Keep last 500 log lines per agent export function useProjectWebSocket(projectName: string | null) { const [state, setState] = useState({ @@ -61,6 +65,7 @@ export function useProjectWebSocket(projectName: string | null) { devLogs: [], activeAgents: [], recentActivity: [], + agentLogs: new Map(), celebrationQueue: [], celebration: null, }) @@ -111,9 +116,9 @@ export function useProjectWebSocket(projectName: string | null) { break case 'log': - setState(prev => ({ - ...prev, - logs: [ + setState(prev => { + // Update global logs + const newLogs = [ ...prev.logs.slice(-MAX_LOGS + 1), { line: message.line, @@ -121,8 +126,26 @@ export function useProjectWebSocket(projectName: string | null) { featureId: message.featureId, agentIndex: message.agentIndex, }, - ], - })) + ] + + // Also store in per-agent logs if we have an agentIndex + let newAgentLogs = prev.agentLogs + if (message.agentIndex !== undefined) { + newAgentLogs = new Map(prev.agentLogs) + const existingLogs = newAgentLogs.get(message.agentIndex) || [] + const logEntry: AgentLogEntry = { + line: message.line, + timestamp: message.timestamp, + type: 'output', + } + newAgentLogs.set( + message.agentIndex, + [...existingLogs.slice(-MAX_AGENT_LOGS + 1), logEntry] + ) + } + + return { ...prev, logs: newLogs, agentLogs: newAgentLogs } + }) break case 'feature_update': @@ -131,21 +154,38 @@ export function useProjectWebSocket(projectName: string | null) { case 'agent_update': setState(prev => { + // Log state change to per-agent logs + const newAgentLogs = new Map(prev.agentLogs) + const existingLogs = newAgentLogs.get(message.agentIndex) || [] + const stateLogEntry: AgentLogEntry = { + line: `[STATE] ${message.state}${message.thought ? `: ${message.thought}` : ''}`, + timestamp: message.timestamp, + type: message.state === 'error' ? 'error' : 'state_change', + } + newAgentLogs.set( + message.agentIndex, + [...existingLogs.slice(-MAX_AGENT_LOGS + 1), stateLogEntry] + ) + + // Get current logs for this agent to attach to ActiveAgent + const agentLogsArray = newAgentLogs.get(message.agentIndex) || [] + // Update or add the agent in activeAgents - const agentIndex = prev.activeAgents.findIndex( + const existingAgentIdx = prev.activeAgents.findIndex( a => a.agentIndex === message.agentIndex ) let newAgents: ActiveAgent[] - if (message.state === 'success') { - // Remove agent from active list on success + if (message.state === 'success' || message.state === 'error') { + // Remove agent from active list on completion (success or failure) + // But keep the logs in agentLogs map for debugging newAgents = prev.activeAgents.filter( a => a.agentIndex !== message.agentIndex ) - } else if (agentIndex >= 0) { + } else if (existingAgentIdx >= 0) { // Update existing agent newAgents = [...prev.activeAgents] - newAgents[agentIndex] = { + newAgents[existingAgentIdx] = { agentIndex: message.agentIndex, agentName: message.agentName, featureId: message.featureId, @@ -153,6 +193,7 @@ export function useProjectWebSocket(projectName: string | null) { state: message.state, thought: message.thought, timestamp: message.timestamp, + logs: agentLogsArray, } } else { // Add new agent @@ -166,6 +207,7 @@ export function useProjectWebSocket(projectName: string | null) { state: message.state, thought: message.thought, timestamp: message.timestamp, + logs: agentLogsArray, }, ] } @@ -207,6 +249,7 @@ export function useProjectWebSocket(projectName: string | null) { return { ...prev, activeAgents: newAgents, + agentLogs: newAgentLogs, recentActivity: newActivity, celebrationQueue: newCelebrationQueue, celebration: newCelebration, @@ -295,6 +338,7 @@ export function useProjectWebSocket(projectName: string | null) { devLogs: [], activeAgents: [], recentActivity: [], + agentLogs: new Map(), celebrationQueue: [], celebration: null, }) @@ -335,10 +379,26 @@ export function useProjectWebSocket(projectName: string | null) { setState(prev => ({ ...prev, devLogs: [] })) }, []) + // Get logs for a specific agent (useful for debugging even after agent completes/fails) + const getAgentLogs = useCallback((agentIndex: number): AgentLogEntry[] => { + return state.agentLogs.get(agentIndex) || [] + }, [state.agentLogs]) + + // Clear logs for a specific agent + const clearAgentLogs = useCallback((agentIndex: number) => { + setState(prev => { + const newAgentLogs = new Map(prev.agentLogs) + newAgentLogs.delete(agentIndex) + return { ...prev, agentLogs: newAgentLogs } + }) + }, []) + return { ...state, clearLogs, clearDevLogs, clearCelebration, + getAgentLogs, + clearAgentLogs, } } diff --git a/ui/src/lib/types.ts b/ui/src/lib/types.ts index 8b1ceed..e4573b9 100644 --- a/ui/src/lib/types.ts +++ b/ui/src/lib/types.ts @@ -177,6 +177,13 @@ export type AgentMascot = typeof AGENT_MASCOTS[number] // Agent state for Mission Control export type AgentState = 'idle' | 'thinking' | 'working' | 'testing' | 'success' | 'error' | 'struggling' +// Individual log entry for an agent +export interface AgentLogEntry { + line: string + timestamp: string + type: 'output' | 'state_change' | 'error' +} + // Agent update from backend export interface ActiveAgent { agentIndex: number @@ -186,6 +193,7 @@ export interface ActiveAgent { state: AgentState thought?: string timestamp: string + logs?: AgentLogEntry[] // Per-agent log history } // WebSocket message types