diff --git a/.automaker/feature_list.json b/.automaker/feature_list.json index 5e055931..62098c7a 100644 --- a/.automaker/feature_list.json +++ b/.automaker/feature_list.json @@ -53,12 +53,12 @@ "status": "verified" }, { - "id": "feature-1765316742659-rll51tb62", + "id": "feature-1765318148517-715isvwwb", "category": "Kanban", - "description": "We implemented the follow up prompt and waiting approval column its work well but when we click commit button its commiting with the title of follow up prompt i dont like this i would prefer the convencional commit that u as agent is creating when skip automated testing mode is off and u commit it in nice way look how we generate commit / handle it when automated testing is enabled and see how we do it when its disabled and its waiting for our approval and commit button how its handle it", + "description": "When agent finish work the cards is moved either to waiting approval or into verified one But mostly its include some type of summary at the end i want you to modify our prompts and ui so when its in both states we can see the feature summary of what was done / modified instead of relying on going to code editor to see what got changed etc.", "steps": [], - "status": "waiting_approval", - "startedAt": "2025-12-09T21:56:25.922Z", + "status": "verified", + "startedAt": "2025-12-09T22:09:13.684Z", "imagePaths": [], "skipTests": true } diff --git a/app/electron/services/feature-loader.js b/app/electron/services/feature-loader.js index 5ad27319..e8e1bbbd 100644 --- a/app/electron/services/feature-loader.js +++ b/app/electron/services/feature-loader.js @@ -32,8 +32,12 @@ class FeatureLoader { /** * Update feature status in .automaker/feature_list.json + * @param {string} featureId - The ID of the feature to update + * @param {string} status - The new status + * @param {string} projectPath - Path to the project + * @param {string} [summary] - Optional summary of what was done */ - async updateFeatureStatus(featureId, status, projectPath) { + async updateFeatureStatus(featureId, status, projectPath, summary) { const features = await this.loadFeatures(projectPath); const feature = features.find((f) => f.id === featureId); @@ -45,6 +49,11 @@ class FeatureLoader { // Update the status field feature.status = status; + // Update the summary field if provided + if (summary) { + feature.summary = summary; + } + // Save back to file const featuresPath = path.join( projectPath, @@ -72,11 +81,14 @@ class FeatureLoader { if (f.startedAt !== undefined) { featureData.startedAt = f.startedAt; } + if (f.summary !== undefined) { + featureData.summary = f.summary; + } return featureData; }); await fs.writeFile(featuresPath, JSON.stringify(toSave, null, 2), "utf-8"); - console.log(`[FeatureLoader] Updated feature ${featureId}: status=${status}`); + console.log(`[FeatureLoader] Updated feature ${featureId}: status=${status}${summary ? `, summary="${summary}"` : ""}`); } /** diff --git a/app/electron/services/mcp-server-factory.js b/app/electron/services/mcp-server-factory.js index 65102729..bbb57d5e 100644 --- a/app/electron/services/mcp-server-factory.js +++ b/app/electron/services/mcp-server-factory.js @@ -19,14 +19,15 @@ class McpServerFactory { tools: [ tool( "UpdateFeatureStatus", - "Update the status of a feature in the feature list. Use this tool instead of directly modifying feature_list.json to safely update feature status. IMPORTANT: If the feature has skipTests=true, you should NOT mark it as verified - instead it will automatically go to waiting_approval status for manual review.", + "Update the status of a feature in the feature list. Use this tool instead of directly modifying feature_list.json to safely update feature status. IMPORTANT: If the feature has skipTests=true, you should NOT mark it as verified - instead it will automatically go to waiting_approval status for manual review. Always include a summary of what was done.", { featureId: z.string().describe("The ID of the feature to update"), - status: z.enum(["backlog", "in_progress", "verified"]).describe("The new status for the feature. Note: If skipTests=true, verified will be converted to waiting_approval automatically.") + status: z.enum(["backlog", "in_progress", "verified"]).describe("The new status for the feature. Note: If skipTests=true, verified will be converted to waiting_approval automatically."), + summary: z.string().optional().describe("A brief summary of what was implemented/changed. This will be displayed on the Kanban card. Example: 'Added dark mode toggle. Modified: settings.tsx, theme-provider.tsx'") }, async (args) => { try { - console.log(`[McpServerFactory] UpdateFeatureStatus tool called: featureId=${args.featureId}, status=${args.status}`); + console.log(`[McpServerFactory] UpdateFeatureStatus tool called: featureId=${args.featureId}, status=${args.status}, summary=${args.summary || "(none)"}`); // Load the feature to check skipTests flag const features = await featureLoader.loadFeatures(projectPath); @@ -43,12 +44,12 @@ class McpServerFactory { finalStatus = "waiting_approval"; } - // Call the provided callback to update feature status - await updateFeatureStatusCallback(args.featureId, finalStatus, projectPath); + // Call the provided callback to update feature status with summary + await updateFeatureStatusCallback(args.featureId, finalStatus, projectPath, args.summary); const statusMessage = finalStatus !== args.status - ? `Successfully updated feature ${args.featureId} to status "${finalStatus}" (converted from "${args.status}" because skipTests=true)` - : `Successfully updated feature ${args.featureId} to status "${finalStatus}"`; + ? `Successfully updated feature ${args.featureId} to status "${finalStatus}" (converted from "${args.status}" because skipTests=true)${args.summary ? ` with summary: "${args.summary}"` : ""}` + : `Successfully updated feature ${args.featureId} to status "${finalStatus}"${args.summary ? ` with summary: "${args.summary}"` : ""}`; return { content: [{ diff --git a/app/electron/services/prompt-builder.js b/app/electron/services/prompt-builder.js index 8f6389bd..4ec4c257 100644 --- a/app/electron/services/prompt-builder.js +++ b/app/electron/services/prompt-builder.js @@ -6,7 +6,7 @@ class PromptBuilder { * Build the prompt for implementing a specific feature */ buildFeaturePrompt(feature) { - const skipTestsNote = feature.skipTests + const skipTestsNote = feature.skipTests ? `\n**⚠️ IMPORTANT - Manual Testing Mode:**\nThis feature has skipTests=true, which means:\n- DO NOT commit changes automatically\n- DO NOT mark as verified - it will automatically go to "waiting_approval" status\n- The user will manually review and commit the changes\n- Just implement the feature and mark it as verified (it will be converted to waiting_approval)\n` : ""; @@ -25,11 +25,11 @@ ${feature.steps.map((step, i) => `${i + 1}. ${step}`).join("\n")} 1. Read the project files to understand the current codebase structure 2. Implement the feature according to the description and steps -${feature.skipTests +${feature.skipTests ? "3. Test the implementation manually (no automated tests needed for skipTests features)" : "3. Write Playwright tests to verify the feature works correctly\n4. Run the tests and ensure they pass\n5. **DELETE the test file(s) you created** - tests are only for immediate verification"} ${feature.skipTests ? "4" : "6"}. **CRITICAL: Use the UpdateFeatureStatus tool to mark this feature as verified** - DO NOT manually edit .automaker/feature_list.json -${feature.skipTests +${feature.skipTests ? "5. **DO NOT commit changes** - the user will review and commit manually" : "7. Commit your changes with git"} @@ -37,20 +37,36 @@ ${feature.skipTests When you have completed the feature${feature.skipTests ? "" : " and all tests pass"}, you MUST use the \`mcp__automaker-tools__UpdateFeatureStatus\` tool to update the feature status: - Call the tool with: featureId="${feature.id}" and status="verified" +- **You can also include a summary parameter** to describe what was done: summary="Brief summary of changes" - **DO NOT manually edit the .automaker/feature_list.json file** - this can cause race conditions - The UpdateFeatureStatus tool safely updates the feature status without risk of corrupting other data - **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct behavior +**IMPORTANT - Feature Summary (REQUIRED):** + +When calling UpdateFeatureStatus, you MUST include a summary parameter that describes: +- What files were modified/created +- What functionality was added or changed +- Any notable implementation decisions + +Example: +\`\`\` +UpdateFeatureStatus(featureId="${feature.id}", status="verified", summary="Added dark mode toggle to settings. Modified: settings.tsx, theme-provider.tsx. Created new useTheme hook.") +\`\`\` + +The summary will be displayed on the Kanban card so the user can see what was done without checking the code. + **Important Guidelines:** - Focus ONLY on implementing this specific feature - Write clean, production-quality code - Add proper error handling -${feature.skipTests +${feature.skipTests ? "- Skip automated testing (skipTests=true) - user will manually verify" : "- Write comprehensive Playwright tests\n- Ensure all existing tests still pass\n- Mark the feature as passing only when all tests are green\n- **CRITICAL: Delete test files after verification** - tests accumulate and become brittle"} - **CRITICAL: Use UpdateFeatureStatus tool instead of editing feature_list.json directly** -${feature.skipTests +- **CRITICAL: Always include a summary when marking feature as verified** +${feature.skipTests ? "- **DO NOT commit changes** - user will review and commit manually" : "- Make a git commit when complete"} @@ -83,7 +99,7 @@ Begin by reading the project structure and then implementing the feature.`; * Build the prompt for verifying a specific feature */ buildVerificationPrompt(feature) { - const skipTestsNote = feature.skipTests + const skipTestsNote = feature.skipTests ? `\n**⚠️ IMPORTANT - Manual Testing Mode:**\nThis feature has skipTests=true, which means:\n- DO NOT commit changes automatically\n- DO NOT mark as verified - it will automatically go to "waiting_approval" status\n- The user will manually review and commit the changes\n- Just implement the feature and mark it as verified (it will be converted to waiting_approval)\n` : ""; @@ -103,7 +119,7 @@ ${feature.steps.map((step, i) => `${i + 1}. ${step}`).join("\n")} 1. Read the project files to understand the current implementation 2. If the feature is not fully implemented, continue implementing it -${feature.skipTests +${feature.skipTests ? "3. Test the implementation manually (no automated tests needed for skipTests features)" : `3. Write or update Playwright tests to verify the feature works correctly 4. Run the Playwright tests: npx playwright test tests/[feature-name].spec.ts @@ -117,7 +133,7 @@ ${feature.skipTests 7. **If ALL tests pass:** - **DELETE the test file(s) for this feature** - tests are only for immediate verification`} ${feature.skipTests ? "4" : "8"}. **CRITICAL: Use the UpdateFeatureStatus tool to mark this feature as verified** - DO NOT manually edit .automaker/feature_list.json -${feature.skipTests +${feature.skipTests ? "5. **DO NOT commit changes** - the user will review and commit manually" : "9. Explain what was implemented/fixed and that all tests passed\n10. Commit your changes with git"} @@ -125,10 +141,25 @@ ${feature.skipTests When you have completed the feature${feature.skipTests ? "" : " and all tests pass"}, you MUST use the \`mcp__automaker-tools__UpdateFeatureStatus\` tool to update the feature status: - Call the tool with: featureId="${feature.id}" and status="verified" +- **You can also include a summary parameter** to describe what was done: summary="Brief summary of changes" - **DO NOT manually edit the .automaker/feature_list.json file** - this can cause race conditions - The UpdateFeatureStatus tool safely updates the feature status without risk of corrupting other data - **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct behavior +**IMPORTANT - Feature Summary (REQUIRED):** + +When calling UpdateFeatureStatus, you MUST include a summary parameter that describes: +- What files were modified/created +- What functionality was added or changed +- Any notable implementation decisions + +Example: +\`\`\` +UpdateFeatureStatus(featureId="${feature.id}", status="verified", summary="Added dark mode toggle to settings. Modified: settings.tsx, theme-provider.tsx. Created new useTheme hook.") +\`\`\` + +The summary will be displayed on the Kanban card so the user can see what was done without checking the code. + **Testing Utilities:** - Check if tests/utils.ts exists and is being used - If utilities are outdated due to functionality changes, update them @@ -142,10 +173,11 @@ rm tests/[feature-name].spec.ts \`\`\` **Important:** -${feature.skipTests +${feature.skipTests ? "- Skip automated testing (skipTests=true) - user will manually verify\n- **DO NOT commit changes** - user will review and commit manually" : "- **CONTINUE IMPLEMENTING until all tests pass** - don't stop at the first failure\n- Only mark as verified if Playwright tests pass\n- **CRITICAL: Delete test files after they pass** - tests should not accumulate\n- Update test utilities if functionality changed\n- Make a git commit when the feature is complete\n- Be thorough and persistent in fixing issues"} - **CRITICAL: Use UpdateFeatureStatus tool instead of editing feature_list.json directly** +- **CRITICAL: Always include a summary when marking feature as verified** Begin by reading the project structure and understanding what needs to be implemented or fixed.`; } @@ -154,7 +186,7 @@ Begin by reading the project structure and understanding what needs to be implem * Build prompt for resuming feature with previous context */ buildResumePrompt(feature, previousContext) { - const skipTestsNote = feature.skipTests + const skipTestsNote = feature.skipTests ? `\n**⚠️ IMPORTANT - Manual Testing Mode:**\nThis feature has skipTests=true, which means:\n- DO NOT commit changes automatically\n- DO NOT mark as verified - it will automatically go to "waiting_approval" status\n- The user will manually review and commit the changes\n- Just implement the feature and mark it as verified (it will be converted to waiting_approval)\n` : ""; @@ -179,11 +211,11 @@ Continue where you left off and complete the feature implementation: 1. Review the previous work context above to understand what has been done 2. Continue implementing the feature according to the description and steps -${feature.skipTests +${feature.skipTests ? "3. Test the implementation manually (no automated tests needed for skipTests features)" : "3. Write Playwright tests to verify the feature works correctly (if not already done)\n4. Run the tests and ensure they pass\n5. **DELETE the test file(s) you created** - tests are only for immediate verification"} ${feature.skipTests ? "4" : "6"}. **CRITICAL: Use the UpdateFeatureStatus tool to mark this feature as verified** - DO NOT manually edit .automaker/feature_list.json -${feature.skipTests +${feature.skipTests ? "5. **DO NOT commit changes** - the user will review and commit manually" : "7. Commit your changes with git"} @@ -191,20 +223,36 @@ ${feature.skipTests When you have completed the feature${feature.skipTests ? "" : " and all tests pass"}, you MUST use the \`mcp__automaker-tools__UpdateFeatureStatus\` tool to update the feature status: - Call the tool with: featureId="${feature.id}" and status="verified" +- **You can also include a summary parameter** to describe what was done: summary="Brief summary of changes" - **DO NOT manually edit the .automaker/feature_list.json file** - this can cause race conditions - The UpdateFeatureStatus tool safely updates the feature status without risk of corrupting other data - **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct behavior +**IMPORTANT - Feature Summary (REQUIRED):** + +When calling UpdateFeatureStatus, you MUST include a summary parameter that describes: +- What files were modified/created +- What functionality was added or changed +- Any notable implementation decisions + +Example: +\`\`\` +UpdateFeatureStatus(featureId="${feature.id}", status="verified", summary="Added dark mode toggle to settings. Modified: settings.tsx, theme-provider.tsx. Created new useTheme hook.") +\`\`\` + +The summary will be displayed on the Kanban card so the user can see what was done without checking the code. + **Important Guidelines:** - Review what was already done in the previous context - Don't redo work that's already complete - continue from where it left off - Focus on completing any remaining tasks -${feature.skipTests +${feature.skipTests ? "- Skip automated testing (skipTests=true) - user will manually verify" : "- Write comprehensive Playwright tests if not already done\n- Ensure all tests pass before marking as verified\n- **CRITICAL: Delete test files after verification**"} - **CRITICAL: Use UpdateFeatureStatus tool instead of editing feature_list.json directly** -${feature.skipTests +- **CRITICAL: Always include a summary when marking feature as verified** +${feature.skipTests ? "- **DO NOT commit changes** - user will review and commit manually" : "- Make a git commit when complete"} @@ -305,6 +353,7 @@ Your role is to: - Ensure all tests pass before marking features complete (only if skipTests is false) - **DELETE test files after successful verification** - tests are only for immediate feature verification (only if skipTests is false) - **Use the UpdateFeatureStatus tool to mark features as verified** - NEVER manually edit feature_list.json +- **Always include a summary parameter when calling UpdateFeatureStatus** - describe what was done - Commit working code to git (only if skipTests is false - skipTests features require manual review) - Be thorough and detail-oriented @@ -316,12 +365,22 @@ If a feature has skipTests=true: - The user will manually verify and commit the changes **IMPORTANT - UpdateFeatureStatus Tool:** -You have access to the \`mcp__automaker-tools__UpdateFeatureStatus\` tool. When the feature is complete${""} (and all tests pass if skipTests is false), use this tool to update the feature status: -- Call with featureId and status="verified" +You have access to the \`mcp__automaker-tools__UpdateFeatureStatus\` tool. When the feature is complete (and all tests pass if skipTests is false), use this tool to update the feature status: +- Call with featureId, status="verified", and summary="Description of what was done" - **DO NOT manually edit .automaker/feature_list.json** - this can cause race conditions and restore old state - The tool safely updates the status without corrupting other feature data - **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct +**IMPORTANT - Feature Summary (REQUIRED):** +When calling UpdateFeatureStatus, you MUST include a summary parameter that describes: +- What files were modified/created +- What functionality was added or changed +- Any notable implementation decisions + +Example: summary="Added dark mode toggle. Modified: settings.tsx, theme-provider.tsx. Created useTheme hook." + +The summary will be displayed on the Kanban card so the user can quickly see what was done. + **Testing Utilities (CRITICAL):** - **Create and maintain tests/utils.ts** with helper functions for finding elements and common operations - **Always use utilities in tests** instead of repeating selectors @@ -366,6 +425,7 @@ Your role is to: - Continue rerunning tests and fixing issues until ALL tests pass (only if skipTests is false) - **DELETE test files after successful verification** - tests are only for immediate feature verification (only if skipTests is false) - **Use the UpdateFeatureStatus tool to mark features as verified** - NEVER manually edit feature_list.json +- **Always include a summary parameter when calling UpdateFeatureStatus** - describe what was done - **Update test utilities (tests/utils.ts) if functionality changed** - keep helpers in sync with code (only if skipTests is false) - Commit working code to git (only if skipTests is false - skipTests features require manual review) @@ -377,12 +437,22 @@ If a feature has skipTests=true: - The user will manually verify and commit the changes **IMPORTANT - UpdateFeatureStatus Tool:** -You have access to the \`mcp__automaker-tools__UpdateFeatureStatus\` tool. When the feature is complete${""} (and all tests pass if skipTests is false), use this tool to update the feature status: -- Call with featureId and status="verified" +You have access to the \`mcp__automaker-tools__UpdateFeatureStatus\` tool. When the feature is complete (and all tests pass if skipTests is false), use this tool to update the feature status: +- Call with featureId, status="verified", and summary="Description of what was done" - **DO NOT manually edit .automaker/feature_list.json** - this can cause race conditions and restore old state - The tool safely updates the status without corrupting other feature data - **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct +**IMPORTANT - Feature Summary (REQUIRED):** +When calling UpdateFeatureStatus, you MUST include a summary parameter that describes: +- What files were modified/created +- What functionality was added or changed +- Any notable implementation decisions + +Example: summary="Fixed login validation. Modified: auth.ts, login-form.tsx. Added password strength check." + +The summary will be displayed on the Kanban card so the user can quickly see what was done. + **Testing Utilities:** - Check if tests/utils.ts needs updates based on code changes - If a component's selectors or behavior changed, update the corresponding utility functions @@ -405,7 +475,7 @@ You have access to: - Make git commits - **UpdateFeatureStatus tool** (mcp__automaker-tools__UpdateFeatureStatus) - Use this to update feature status -**CRITICAL:** Be persistent and thorough - keep iterating on the implementation until all tests pass. Don't give up after the first failure. Always delete tests after they pass, use the UpdateFeatureStatus tool, and commit your work.`; +**CRITICAL:** Be persistent and thorough - keep iterating on the implementation until all tests pass. Don't give up after the first failure. Always delete tests after they pass, use the UpdateFeatureStatus tool with a summary, and commit your work.`; } /** diff --git a/app/src/components/views/board-view.tsx b/app/src/components/views/board-view.tsx index 82c1ba4e..67726939 100644 --- a/app/src/components/views/board-view.tsx +++ b/app/src/components/views/board-view.tsx @@ -404,6 +404,7 @@ export function BoardView() { startedAt: f.startedAt, imagePaths: f.imagePaths, skipTests: f.skipTests, + summary: f.summary, })); await api.writeFile( `${currentProject.path}/.automaker/feature_list.json`, diff --git a/app/src/components/views/kanban-card.tsx b/app/src/components/views/kanban-card.tsx index d6943206..169cc190 100644 --- a/app/src/components/views/kanban-card.tsx +++ b/app/src/components/views/kanban-card.tsx @@ -1,6 +1,6 @@ "use client"; -import { useState } from "react"; +import { useState, useEffect } from "react"; import { useSortable } from "@dnd-kit/sortable"; import { CSS } from "@dnd-kit/utilities"; import { cn } from "@/lib/utils"; @@ -36,8 +36,20 @@ import { ArrowLeft, MessageSquare, GitCommit, + Cpu, + Wrench, + ListTodo, + Sparkles, + Expand, } from "lucide-react"; import { CountUpTimer } from "@/components/ui/count-up-timer"; +import { getElectronAPI } from "@/lib/electron"; +import { + parseAgentContext, + AgentTaskInfo, + formatModelName, + DEFAULT_MODEL, +} from "@/lib/agent-context-parser"; interface KanbanCardProps { feature: Feature; @@ -54,6 +66,10 @@ interface KanbanCardProps { hasContext?: boolean; isCurrentAutoTask?: boolean; shortcutKey?: string; + /** Context content for extracting progress info */ + contextContent?: string; + /** Feature summary from agent completion */ + summary?: string; } export function KanbanCard({ @@ -71,8 +87,56 @@ export function KanbanCard({ hasContext, isCurrentAutoTask, shortcutKey, + contextContent, + summary, }: KanbanCardProps) { const [isDeleteDialogOpen, setIsDeleteDialogOpen] = useState(false); + const [isSummaryDialogOpen, setIsSummaryDialogOpen] = useState(false); + const [agentInfo, setAgentInfo] = useState(null); + + // Load context file for in_progress, waiting_approval, and verified features + useEffect(() => { + const loadContext = async () => { + // Use provided context or load from file + if (contextContent) { + const info = parseAgentContext(contextContent); + setAgentInfo(info); + return; + } + + // Only load for non-backlog features + if (feature.status === "backlog") { + setAgentInfo(null); + return; + } + + try { + const api = getElectronAPI(); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const currentProject = (window as any).__currentProject; + if (!currentProject?.path) return; + + const contextPath = `${currentProject.path}/.automaker/agents-context/${feature.id}.md`; + const result = await api.readFile(contextPath); + + if (result.success && result.content) { + const info = parseAgentContext(result.content); + setAgentInfo(info); + } + } catch { + // Context file might not exist + console.debug("[KanbanCard] No context file for feature:", feature.id); + } + }; + + loadContext(); + + // Reload context periodically while feature is running + if (isCurrentAutoTask) { + const interval = setInterval(loadContext, 3000); + return () => clearInterval(interval); + } + }, [feature.id, feature.status, contextContent, isCurrentAutoTask]); const handleDeleteClick = (e: React.MouseEvent) => { e.stopPropagation(); @@ -220,6 +284,141 @@ export function KanbanCard({ )} + {/* Agent Info Panel - shows for in_progress, waiting_approval, verified */} + {feature.status !== "backlog" && agentInfo && ( +
+ {/* Model & Progress Bar */} +
+
+ + {formatModelName(DEFAULT_MODEL)} +
+ {agentInfo.currentPhase && ( +
+ {agentInfo.currentPhase} +
+ )} +
+ + {/* Progress Indicator */} + {(isCurrentAutoTask || feature.status === "in_progress") && ( +
+
+
+
+
+
+ + + {agentInfo.toolCallCount} tools + + {agentInfo.lastToolUsed && ( + + {agentInfo.lastToolUsed} + + )} +
+ {Math.round(agentInfo.progressPercentage)}% +
+
+ )} + + {/* Task List Progress (if todos found) */} + {agentInfo.todos.length > 0 && ( +
+
+ + + {agentInfo.todos.filter(t => t.status === "completed").length}/{agentInfo.todos.length} tasks + +
+
+ {agentInfo.todos.slice(0, 3).map((todo, idx) => ( +
+ {todo.status === "completed" ? ( + + ) : todo.status === "in_progress" ? ( + + ) : ( + + )} + + {todo.content} + +
+ ))} + {agentInfo.todos.length > 3 && ( +

+ +{agentInfo.todos.length - 3} more +

+ )} +
+
+ )} + + {/* Summary for waiting_approval and verified - prioritize feature.summary from UpdateFeatureStatus */} + {(feature.status === "waiting_approval" || feature.status === "verified") && ( + <> + {(feature.summary || summary || agentInfo.summary) && ( +
+
+
+ + Summary +
+ +
+

+ {feature.summary || summary || agentInfo.summary} +

+
+ )} + {/* Show tool count even without summary */} + {!feature.summary && !summary && !agentInfo.summary && agentInfo.toolCallCount > 0 && ( +
+ + + {agentInfo.toolCallCount} tool calls + + {agentInfo.todos.length > 0 && ( + + + {agentInfo.todos.filter(t => t.status === "completed").length} tasks done + + )} +
+ )} + + )} +
+ )} + {/* Actions */}
{isCurrentAutoTask && ( @@ -471,6 +670,40 @@ export function KanbanCard({ + + {/* Summary Modal */} + + + + + + Implementation Summary + + + {feature.description} + + +
+
+
+                {feature.summary || summary || agentInfo?.summary || "No summary available"}
+              
+
+
+ + + +
+
); } diff --git a/app/src/lib/agent-context-parser.ts b/app/src/lib/agent-context-parser.ts new file mode 100644 index 00000000..d77ab166 --- /dev/null +++ b/app/src/lib/agent-context-parser.ts @@ -0,0 +1,246 @@ +/** + * Agent Context Parser + * Extracts useful information from agent context files for display in kanban cards + */ + +export interface AgentTaskInfo { + // Task list extracted from TodoWrite tool calls + todos: { + content: string; + status: "pending" | "in_progress" | "completed"; + }[]; + + // Progress stats + toolCallCount: number; + lastToolUsed?: string; + + // Phase info + currentPhase?: "planning" | "action" | "verification"; + + // Summary (if feature is completed) + summary?: string; + + // Estimated progress percentage based on phase and tool calls + progressPercentage: number; +} + +/** + * Default model used by the feature executor + */ +export const DEFAULT_MODEL = "claude-opus-4-5-20251101"; + +/** + * Formats a model name for display + */ +export function formatModelName(model: string): string { + if (model.includes("opus")) return "Opus 4.5"; + if (model.includes("sonnet")) return "Sonnet 4"; + if (model.includes("haiku")) return "Haiku 3.5"; + return model.split("-").slice(1, 3).join(" "); +} + +/** + * Extracts todos from the context content + * Looks for TodoWrite tool calls in the format: + * TodoWrite: [{"content": "...", "status": "..."}] + */ +function extractTodos(content: string): AgentTaskInfo["todos"] { + const todos: AgentTaskInfo["todos"] = []; + + // Look for TodoWrite tool inputs + const todoMatches = content.matchAll(/TodoWrite.*?(?:"todos"\s*:\s*)?(\[[\s\S]*?\](?=\s*(?:\}|$|🔧|📋|⚡|✅|❌)))/g); + + for (const match of todoMatches) { + try { + // Try to find JSON array in the match + const jsonStr = match[1] || match[0]; + const arrayMatch = jsonStr.match(/\[[\s\S]*?\]/); + if (arrayMatch) { + const parsed = JSON.parse(arrayMatch[0]); + if (Array.isArray(parsed)) { + for (const item of parsed) { + if (item.content && item.status) { + // Check if this todo already exists (avoid duplicates) + if (!todos.some(t => t.content === item.content)) { + todos.push({ + content: item.content, + status: item.status, + }); + } + } + } + } + } + } catch { + // Ignore parse errors + } + } + + // Also try to extract from markdown task lists + const markdownTodos = content.matchAll(/- \[([ xX])\] (.+)/g); + for (const match of markdownTodos) { + const isCompleted = match[1].toLowerCase() === "x"; + const content = match[2].trim(); + if (!todos.some(t => t.content === content)) { + todos.push({ + content, + status: isCompleted ? "completed" : "pending", + }); + } + } + + return todos; +} + +/** + * Counts tool calls in the content + */ +function countToolCalls(content: string): number { + const matches = content.match(/🔧\s*Tool:/g); + return matches?.length || 0; +} + +/** + * Gets the last tool used + */ +function getLastToolUsed(content: string): string | undefined { + const matches = [...content.matchAll(/🔧\s*Tool:\s*(\S+)/g)]; + if (matches.length > 0) { + return matches[matches.length - 1][1]; + } + return undefined; +} + +/** + * Determines the current phase from the content + */ +function getCurrentPhase(content: string): "planning" | "action" | "verification" | undefined { + // Find the last phase marker + const planningIndex = content.lastIndexOf("📋"); + const actionIndex = content.lastIndexOf("⚡"); + const verificationIndex = content.lastIndexOf("✅"); + + const maxIndex = Math.max(planningIndex, actionIndex, verificationIndex); + + if (maxIndex === -1) return undefined; + if (maxIndex === verificationIndex) return "verification"; + if (maxIndex === actionIndex) return "action"; + return "planning"; +} + +/** + * Extracts a summary from completed feature context + */ +function extractSummary(content: string): string | undefined { + // Look for completion markers and extract surrounding text + const completionMatch = content.match(/✓ (?:Feature|Verification|Task) (?:successfully|completed|verified)[^\n]*(?:\n[^\n]{1,200})?/i); + if (completionMatch) { + return completionMatch[0].trim(); + } + + // Look for summary sections + const summaryMatch = content.match(/## Summary[^\n]*\n([\s\S]{1,500}?)(?=\n##|\n🔧|$)/i); + if (summaryMatch) { + return summaryMatch[1].trim(); + } + + // Look for "What was done" type sections + const whatWasDoneMatch = content.match(/(?:What was done|Changes made|Implemented)[^\n]*\n([\s\S]{1,500}?)(?=\n##|\n🔧|$)/i); + if (whatWasDoneMatch) { + return whatWasDoneMatch[1].trim(); + } + + return undefined; +} + +/** + * Calculates progress percentage based on phase and context + * Uses a more dynamic approach that better reflects actual progress + */ +function calculateProgress(phase: AgentTaskInfo["currentPhase"], toolCallCount: number, todos: AgentTaskInfo["todos"]): number { + // If we have todos, primarily use them for progress calculation + if (todos.length > 0) { + const completedCount = todos.filter(t => t.status === "completed").length; + const inProgressCount = todos.filter(t => t.status === "in_progress").length; + + // Weight: completed = 1, in_progress = 0.5, pending = 0 + const progress = ((completedCount + inProgressCount * 0.5) / todos.length) * 90; + + // Add a small base amount and cap at 95% + return Math.min(5 + progress, 95); + } + + // Fallback: use phase-based progress with tool call scaling + let phaseProgress = 0; + switch (phase) { + case "planning": + // Planning phase: 5-25% + phaseProgress = 5 + Math.min(toolCallCount * 1, 20); + break; + case "action": + // Action phase: 25-75% based on tool calls (logarithmic scaling) + phaseProgress = 25 + Math.min(Math.log2(toolCallCount + 1) * 10, 50); + break; + case "verification": + // Verification phase: 75-95% + phaseProgress = 75 + Math.min(toolCallCount * 0.5, 20); + break; + default: + // Starting: just use tool calls + phaseProgress = Math.min(toolCallCount * 0.5, 10); + } + + return Math.min(Math.round(phaseProgress), 95); +} + +/** + * Parses agent context content and extracts useful information + */ +export function parseAgentContext(content: string): AgentTaskInfo { + if (!content || !content.trim()) { + return { + todos: [], + toolCallCount: 0, + progressPercentage: 0, + }; + } + + const todos = extractTodos(content); + const toolCallCount = countToolCalls(content); + const lastToolUsed = getLastToolUsed(content); + const currentPhase = getCurrentPhase(content); + const summary = extractSummary(content); + const progressPercentage = calculateProgress(currentPhase, toolCallCount, todos); + + return { + todos, + toolCallCount, + lastToolUsed, + currentPhase, + summary, + progressPercentage, + }; +} + +/** + * Quick stats for display in card badges + */ +export interface QuickStats { + toolCalls: number; + completedTasks: number; + totalTasks: number; + phase?: string; +} + +/** + * Extracts quick stats from context for compact display + */ +export function getQuickStats(content: string): QuickStats { + const info = parseAgentContext(content); + return { + toolCalls: info.toolCallCount, + completedTasks: info.todos.filter(t => t.status === "completed").length, + totalTasks: info.todos.length, + phase: info.currentPhase, + }; +} diff --git a/app/src/store/app-store.ts b/app/src/store/app-store.ts index 1ce968e4..4a7c0c38 100644 --- a/app/src/store/app-store.ts +++ b/app/src/store/app-store.ts @@ -61,6 +61,7 @@ export interface Feature { imagePaths?: FeatureImagePath[]; // Paths to temp files for agent context startedAt?: string; // ISO timestamp for when the card moved to in_progress skipTests?: boolean; // When true, skip TDD approach and require manual verification + summary?: string; // Summary of what was done/modified by the agent } export interface AppState {