Merge remote-tracking branch 'origin/main' into crunchyman/feat.add.mcp.2

This commit is contained in:
Eyal Toledano
2025-03-26 23:54:47 -04:00
23 changed files with 2397 additions and 288 deletions

View File

@@ -360,6 +360,43 @@ When testing ES modules (`"type": "module"` in package.json), traditional mockin
- ❌ **DON'T**: Write tests that depend on execution order
- ❌ **DON'T**: Define mock variables before `jest.mock()` calls (they won't be accessible due to hoisting)
- **Task File Operations**
- ✅ DO: Use test-specific file paths (e.g., 'test-tasks.json') for all operations
- ✅ DO: Mock `readJSON` and `writeJSON` to avoid real file system interactions
- ✅ DO: Verify file operations use the correct paths in `expect` statements
- ✅ DO: Use different paths for each test to avoid test interdependence
- ✅ DO: Verify modifications on the in-memory task objects passed to `writeJSON`
- ❌ DON'T: Modify real task files (tasks.json) during tests
- ❌ DON'T: Skip testing file operations because they're "just I/O"
```javascript
// ✅ DO: Test file operations without real file system changes
test('should update task status in tasks.json', async () => {
// Setup mock to return sample data
readJSON.mockResolvedValue(JSON.parse(JSON.stringify(sampleTasks)));
// Use test-specific file path
await setTaskStatus('test-tasks.json', '2', 'done');
// Verify correct file path was read
expect(readJSON).toHaveBeenCalledWith('test-tasks.json');
// Verify correct file path was written with updated content
expect(writeJSON).toHaveBeenCalledWith(
'test-tasks.json',
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({
id: 2,
status: 'done'
})
])
})
);
});
```
## Running Tests
```bash
@@ -396,6 +433,230 @@ npm test -- -t "pattern to match"
- Reset state in `beforeEach` and `afterEach` hooks
- Avoid global state modifications
## Common Testing Pitfalls and Solutions
- **Complex Library Mocking**
- **Problem**: Trying to create full mocks of complex libraries like Commander.js can be error-prone
- **Solution**: Instead of mocking the entire library, test the command handlers directly by calling your action handlers with the expected arguments
```javascript
// ❌ DON'T: Create complex mocks of Commander.js
class MockCommand {
constructor() { /* Complex mock implementation */ }
option() { /* ... */ }
action() { /* ... */ }
// Many methods to implement
}
// ✅ DO: Test the command handlers directly
test('should use default PRD path when no arguments provided', async () => {
// Call the action handler directly with the right params
await parsePrdAction(undefined, { numTasks: '10', output: 'tasks/tasks.json' });
// Assert on behavior
expect(mockParsePRD).toHaveBeenCalledWith('scripts/prd.txt', 'tasks/tasks.json', 10);
});
```
- **ES Module Mocking Challenges**
- **Problem**: ES modules don't support `require()` and imports are read-only
- **Solution**: Use Jest's module factory pattern and ensure mocks are defined before imports
```javascript
// ❌ DON'T: Try to modify imported modules
import { detectCamelCaseFlags } from '../../scripts/modules/utils.js';
detectCamelCaseFlags = jest.fn(); // Error: Assignment to constant variable
// ❌ DON'T: Try to use require with ES modules
const utils = require('../../scripts/modules/utils.js'); // Error in ES modules
// ✅ DO: Use Jest module factory pattern
jest.mock('../../scripts/modules/utils.js', () => ({
detectCamelCaseFlags: jest.fn(),
toKebabCase: jest.fn()
}));
// Import after mocks are defined
import { detectCamelCaseFlags } from '../../scripts/modules/utils.js';
```
- **Function Redeclaration Errors**
- **Problem**: Declaring the same function twice in a test file causes errors
- **Solution**: Use different function names or create local test-specific implementations
```javascript
// ❌ DON'T: Redefine imported functions with the same name
import { detectCamelCaseFlags } from '../../scripts/modules/utils.js';
function detectCamelCaseFlags() { /* Test implementation */ }
// Error: Identifier has already been declared
// ✅ DO: Use a different name for test implementations
function testDetectCamelCaseFlags() { /* Test implementation */ }
```
- **Console.log Circular References**
- **Problem**: Creating infinite recursion by spying on console.log while also allowing it to log
- **Solution**: Implement a mock that doesn't call the original function
```javascript
// ❌ DON'T: Create circular references with console.log
const mockConsoleLog = jest.spyOn(console, 'log');
mockConsoleLog.mockImplementation(console.log); // Creates infinite recursion
// ✅ DO: Use a non-recursive mock implementation
const mockConsoleLog = jest.spyOn(console, 'log').mockImplementation(() => {});
```
- **Mock Function Method Issues**
- **Problem**: Trying to use jest.fn() methods on imported functions that aren't properly mocked
- **Solution**: Create explicit jest.fn() mocks for functions you need to call jest methods on
```javascript
// ❌ DON'T: Try to use jest methods on imported functions without proper mocking
import { parsePRD } from '../../scripts/modules/task-manager.js';
parsePRD.mockClear(); // Error: parsePRD.mockClear is not a function
// ✅ DO: Create proper jest.fn() mocks
const mockParsePRD = jest.fn().mockResolvedValue(undefined);
jest.mock('../../scripts/modules/task-manager.js', () => ({
parsePRD: mockParsePRD
}));
// Now you can use:
mockParsePRD.mockClear();
```
- **EventEmitter Max Listeners Warning**
- **Problem**: Commander.js adds many listeners in complex mocks, causing warnings
- **Solution**: Either increase the max listeners limit or avoid deep mocking
```javascript
// Option 1: Increase max listeners if you must mock Commander
class MockCommand extends EventEmitter {
constructor() {
super();
this.setMaxListeners(20); // Avoid MaxListenersExceededWarning
}
}
// Option 2 (preferred): Test command handlers directly instead
// (as shown in the first example)
```
- **Test Isolation Issues**
- **Problem**: Tests affecting each other due to shared mock state
- **Solution**: Reset all mocks in beforeEach and use separate test-specific mocks
```javascript
// ❌ DON'T: Allow mock state to persist between tests
const globalMock = jest.fn().mockReturnValue('test');
// ✅ DO: Clear mocks before each test
beforeEach(() => {
jest.clearAllMocks();
// Set up test-specific mock behavior
mockFunction.mockReturnValue('test-specific value');
});
```
## Reliable Testing Techniques
- **Create Simplified Test Functions**
- Create simplified versions of complex functions that focus only on core logic
- Remove file system operations, API calls, and other external dependencies
- Pass all dependencies as parameters to make testing easier
```javascript
// Original function (hard to test)
const setTaskStatus = async (taskId, newStatus) => {
const tasksPath = 'tasks/tasks.json';
const data = await readJSON(tasksPath);
// Update task status logic
await writeJSON(tasksPath, data);
return data;
};
// Test-friendly simplified function (easy to test)
const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
// Same core logic without file operations
// Update task status logic on provided tasksData object
return tasksData; // Return updated data for assertions
};
```
- **Avoid Real File System Operations**
- Never write to real files during tests
- Create test-specific versions of file operation functions
- Mock all file system operations including read, write, exists, etc.
- Verify function behavior using the in-memory data structures
```javascript
// Mock file operations
const mockReadJSON = jest.fn();
const mockWriteJSON = jest.fn();
jest.mock('../../scripts/modules/utils.js', () => ({
readJSON: mockReadJSON,
writeJSON: mockWriteJSON,
}));
test('should update task status correctly', () => {
// Setup mock data
const testData = JSON.parse(JSON.stringify(sampleTasks));
mockReadJSON.mockReturnValue(testData);
// Call the function that would normally modify files
const result = testSetTaskStatus(testData, '1', 'done');
// Assert on the in-memory data structure
expect(result.tasks[0].status).toBe('done');
});
```
- **Data Isolation Between Tests**
- Always create fresh copies of test data for each test
- Use `JSON.parse(JSON.stringify(original))` for deep cloning
- Reset all mocks before each test with `jest.clearAllMocks()`
- Avoid state that persists between tests
```javascript
beforeEach(() => {
jest.clearAllMocks();
// Deep clone the test data
testTasksData = JSON.parse(JSON.stringify(sampleTasks));
});
```
- **Test All Path Variations**
- Regular tasks and subtasks
- Single items and multiple items
- Success paths and error paths
- Edge cases (empty data, invalid inputs, etc.)
```javascript
// Multiple test cases covering different scenarios
test('should update regular task status', () => {
/* test implementation */
});
test('should update subtask status', () => {
/* test implementation */
});
test('should update multiple tasks when given comma-separated IDs', () => {
/* test implementation */
});
test('should throw error for non-existent task ID', () => {
/* test implementation */
});
```
- **Stabilize Tests With Predictable Input/Output**
- Use consistent, predictable test fixtures
- Avoid random values or time-dependent data
- Make tests deterministic for reliable CI/CD
- Control all variables that might affect test outcomes
```javascript
// Use a specific known date instead of current date
const fixedDate = new Date('2023-01-01T12:00:00Z');
jest.spyOn(global, 'Date').mockImplementation(() => fixedDate);
```
See [tests/README.md](mdc:tests/README.md) for more details on the testing approach.
Refer to [jest.config.js](mdc:jest.config.js) for Jest configuration options.

6
.cursorignore Normal file
View File

@@ -0,0 +1,6 @@
package-lock.json
# Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv)
node_modules/

474
assets/.windsurfrules Normal file
View File

@@ -0,0 +1,474 @@
Below you will find a variety of important rules spanning:
- the dev_workflow
- the .windsurfrules document self-improvement workflow
- the template to follow when modifying or adding new sections/rules to this document.
---
DEV_WORKFLOW
---
description: Guide for using meta-development script (scripts/dev.js) to manage task-driven development workflows
globs: **/*
filesToApplyRule: **/*
alwaysApply: true
---
- **Global CLI Commands**
- Task Master now provides a global CLI through the `task-master` command
- All functionality from `scripts/dev.js` is available through this interface
- Install globally with `npm install -g claude-task-master` or use locally via `npx`
- Use `task-master <command>` instead of `node scripts/dev.js <command>`
- Examples:
- `task-master list` instead of `node scripts/dev.js list`
- `task-master next` instead of `node scripts/dev.js next`
- `task-master expand --id=3` instead of `node scripts/dev.js expand --id=3`
- All commands accept the same options as their script equivalents
- The CLI provides additional commands like `task-master init` for project setup
- **Development Workflow Process**
- Start new projects by running `task-master init` or `node scripts/dev.js parse-prd --input=<prd-file.txt>` to generate initial tasks.json
- Begin coding sessions with `task-master list` to see current tasks, status, and IDs
- Analyze task complexity with `task-master analyze-complexity --research` before breaking down tasks
- Select tasks based on dependencies (all marked 'done'), priority level, and ID order
- Clarify tasks by checking task files in tasks/ directory or asking for user input
- View specific task details using `task-master show <id>` to understand implementation requirements
- Break down complex tasks using `task-master expand --id=<id>` with appropriate flags
- Clear existing subtasks if needed using `task-master clear-subtasks --id=<id>` before regenerating
- Implement code following task details, dependencies, and project standards
- Verify tasks according to test strategies before marking as complete
- Mark completed tasks with `task-master set-status --id=<id> --status=done`
- Update dependent tasks when implementation differs from original plan
- Generate task files with `task-master generate` after updating tasks.json
- Maintain valid dependency structure with `task-master fix-dependencies` when needed
- Respect dependency chains and task priorities when selecting work
- Report progress regularly using the list command
- **Task Complexity Analysis**
- Run `node scripts/dev.js analyze-complexity --research` for comprehensive analysis
- Review complexity report in scripts/task-complexity-report.json
- Or use `node scripts/dev.js complexity-report` for a formatted, readable version of the report
- Focus on tasks with highest complexity scores (8-10) for detailed breakdown
- Use analysis results to determine appropriate subtask allocation
- Note that reports are automatically used by the expand command
- **Task Breakdown Process**
- For tasks with complexity analysis, use `node scripts/dev.js expand --id=<id>`
- Otherwise use `node scripts/dev.js expand --id=<id> --subtasks=<number>`
- Add `--research` flag to leverage Perplexity AI for research-backed expansion
- Use `--prompt="<context>"` to provide additional context when needed
- Review and adjust generated subtasks as necessary
- Use `--all` flag to expand multiple pending tasks at once
- If subtasks need regeneration, clear them first with `clear-subtasks` command
- **Implementation Drift Handling**
- When implementation differs significantly from planned approach
- When future tasks need modification due to current implementation choices
- When new dependencies or requirements emerge
- Call `node scripts/dev.js update --from=<futureTaskId> --prompt="<explanation>"` to update tasks.json
- **Task Status Management**
- Use 'pending' for tasks ready to be worked on
- Use 'done' for completed and verified tasks
- Use 'deferred' for postponed tasks
- Add custom status values as needed for project-specific workflows
- **Task File Format Reference**
```
# Task ID: <id>
# Title: <title>
# Status: <status>
# Dependencies: <comma-separated list of dependency IDs>
# Priority: <priority>
# Description: <brief description>
# Details:
<detailed implementation notes>
# Test Strategy:
<verification approach>
```
- **Command Reference: parse-prd**
- Legacy Syntax: `node scripts/dev.js parse-prd --input=<prd-file.txt>`
- CLI Syntax: `task-master parse-prd --input=<prd-file.txt>`
- Description: Parses a PRD document and generates a tasks.json file with structured tasks
- Parameters:
- `--input=<file>`: Path to the PRD text file (default: sample-prd.txt)
- Example: `task-master parse-prd --input=requirements.txt`
- Notes: Will overwrite existing tasks.json file. Use with caution.
- **Command Reference: update**
- Legacy Syntax: `node scripts/dev.js update --from=<id> --prompt="<prompt>"`
- CLI Syntax: `task-master update --from=<id> --prompt="<prompt>"`
- Description: Updates tasks with ID >= specified ID based on the provided prompt
- Parameters:
- `--from=<id>`: Task ID from which to start updating (required)
- `--prompt="<text>"`: Explanation of changes or new context (required)
- Example: `task-master update --from=4 --prompt="Now we are using Express instead of Fastify."`
- Notes: Only updates tasks not marked as 'done'. Completed tasks remain unchanged.
- **Command Reference: generate**
- Legacy Syntax: `node scripts/dev.js generate`
- CLI Syntax: `task-master generate`
- Description: Generates individual task files in tasks/ directory based on tasks.json
- Parameters:
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- `--output=<dir>, -o`: Output directory (default: 'tasks')
- Example: `task-master generate`
- Notes: Overwrites existing task files. Creates tasks/ directory if needed.
- **Command Reference: set-status**
- Legacy Syntax: `node scripts/dev.js set-status --id=<id> --status=<status>`
- CLI Syntax: `task-master set-status --id=<id> --status=<status>`
- Description: Updates the status of a specific task in tasks.json
- Parameters:
- `--id=<id>`: ID of the task to update (required)
- `--status=<status>`: New status value (required)
- Example: `task-master set-status --id=3 --status=done`
- Notes: Common values are 'done', 'pending', and 'deferred', but any string is accepted.
- **Command Reference: list**
- Legacy Syntax: `node scripts/dev.js list`
- CLI Syntax: `task-master list`
- Description: Lists all tasks in tasks.json with IDs, titles, and status
- Parameters:
- `--status=<status>, -s`: Filter by status
- `--with-subtasks`: Show subtasks for each task
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- Example: `task-master list`
- Notes: Provides quick overview of project progress. Use at start of sessions.
- **Command Reference: expand**
- Legacy Syntax: `node scripts/dev.js expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
- CLI Syntax: `task-master expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
- Description: Expands a task with subtasks for detailed implementation
- Parameters:
- `--id=<id>`: ID of task to expand (required unless using --all)
- `--all`: Expand all pending tasks, prioritized by complexity
- `--num=<number>`: Number of subtasks to generate (default: from complexity report)
- `--research`: Use Perplexity AI for research-backed generation
- `--prompt="<text>"`: Additional context for subtask generation
- `--force`: Regenerate subtasks even for tasks that already have them
- Example: `task-master expand --id=3 --num=5 --research --prompt="Focus on security aspects"`
- Notes: Uses complexity report recommendations if available.
- **Command Reference: analyze-complexity**
- Legacy Syntax: `node scripts/dev.js analyze-complexity [options]`
- CLI Syntax: `task-master analyze-complexity [options]`
- Description: Analyzes task complexity and generates expansion recommendations
- Parameters:
- `--output=<file>, -o`: Output file path (default: scripts/task-complexity-report.json)
- `--model=<model>, -m`: Override LLM model to use
- `--threshold=<number>, -t`: Minimum score for expansion recommendation (default: 5)
- `--file=<path>, -f`: Use alternative tasks.json file
- `--research, -r`: Use Perplexity AI for research-backed analysis
- Example: `task-master analyze-complexity --research`
- Notes: Report includes complexity scores, recommended subtasks, and tailored prompts.
- **Command Reference: clear-subtasks**
- Legacy Syntax: `node scripts/dev.js clear-subtasks --id=<id>`
- CLI Syntax: `task-master clear-subtasks --id=<id>`
- Description: Removes subtasks from specified tasks to allow regeneration
- Parameters:
- `--id=<id>`: ID or comma-separated IDs of tasks to clear subtasks from
- `--all`: Clear subtasks from all tasks
- Examples:
- `task-master clear-subtasks --id=3`
- `task-master clear-subtasks --id=1,2,3`
- `task-master clear-subtasks --all`
- Notes:
- Task files are automatically regenerated after clearing subtasks
- Can be combined with expand command to immediately generate new subtasks
- Works with both parent tasks and individual subtasks
- **Task Structure Fields**
- **id**: Unique identifier for the task (Example: `1`)
- **title**: Brief, descriptive title (Example: `"Initialize Repo"`)
- **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`)
- **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
- **dependencies**: IDs of prerequisite tasks (Example: `[1, 2]`)
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
- This helps quickly identify which prerequisite tasks are blocking work
- **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`)
- **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
- **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
- **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
- **Environment Variables Configuration**
- **ANTHROPIC_API_KEY** (Required): Your Anthropic API key for Claude (Example: `ANTHROPIC_API_KEY=sk-ant-api03-...`)
- **MODEL** (Default: `"claude-3-7-sonnet-20250219"`): Claude model to use (Example: `MODEL=claude-3-opus-20240229`)
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
- **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`)
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
- **PROJECT_VERSION** (Default: `"1.0.0"`): Version in metadata (Example: `PROJECT_VERSION=2.1.0`)
- **PERPLEXITY_API_KEY**: For research-backed features (Example: `PERPLEXITY_API_KEY=pplx-...`)
- **PERPLEXITY_MODEL** (Default: `"sonar-medium-online"`): Perplexity model (Example: `PERPLEXITY_MODEL=sonar-large-online`)
- **Determining the Next Task**
- Run `task-master next` to show the next task to work on
- The next command identifies tasks with all dependencies satisfied
- Tasks are prioritized by priority level, dependency count, and ID
- The command shows comprehensive task information including:
- Basic task details and description
- Implementation details
- Subtasks (if they exist)
- Contextual suggested actions
- Recommended before starting any new development work
- Respects your project's dependency structure
- Ensures tasks are completed in the appropriate sequence
- Provides ready-to-use commands for common task actions
- **Viewing Specific Task Details**
- Run `task-master show <id>` or `task-master show --id=<id>` to view a specific task
- Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1)
- Displays comprehensive information similar to the next command, but for a specific task
- For parent tasks, shows all subtasks and their current status
- For subtasks, shows parent task information and relationship
- Provides contextual suggested actions appropriate for the specific task
- Useful for examining task details before implementation or checking status
- **Managing Task Dependencies**
- Use `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency
- Use `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency
- The system prevents circular dependencies and duplicate dependency entries
- Dependencies are checked for existence before being added or removed
- Task files are automatically regenerated after dependency changes
- Dependencies are visualized with status indicators in task listings and files
- **Command Reference: add-dependency**
- Legacy Syntax: `node scripts/dev.js add-dependency --id=<id> --depends-on=<id>`
- CLI Syntax: `task-master add-dependency --id=<id> --depends-on=<id>`
- Description: Adds a dependency relationship between two tasks
- Parameters:
- `--id=<id>`: ID of task that will depend on another task (required)
- `--depends-on=<id>`: ID of task that will become a dependency (required)
- Example: `task-master add-dependency --id=22 --depends-on=21`
- Notes: Prevents circular dependencies and duplicates; updates task files automatically
- **Command Reference: remove-dependency**
- Legacy Syntax: `node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>`
- CLI Syntax: `task-master remove-dependency --id=<id> --depends-on=<id>`
- Description: Removes a dependency relationship between two tasks
- Parameters:
- `--id=<id>`: ID of task to remove dependency from (required)
- `--depends-on=<id>`: ID of task to remove as a dependency (required)
- Example: `task-master remove-dependency --id=22 --depends-on=21`
- Notes: Checks if dependency actually exists; updates task files automatically
- **Command Reference: validate-dependencies**
- Legacy Syntax: `node scripts/dev.js validate-dependencies [options]`
- CLI Syntax: `task-master validate-dependencies [options]`
- Description: Checks for and identifies invalid dependencies in tasks.json and task files
- Parameters:
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- Example: `task-master validate-dependencies`
- Notes:
- Reports all non-existent dependencies and self-dependencies without modifying files
- Provides detailed statistics on task dependency state
- Use before fix-dependencies to audit your task structure
- **Command Reference: fix-dependencies**
- Legacy Syntax: `node scripts/dev.js fix-dependencies [options]`
- CLI Syntax: `task-master fix-dependencies [options]`
- Description: Finds and fixes all invalid dependencies in tasks.json and task files
- Parameters:
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
- Example: `task-master fix-dependencies`
- Notes:
- Removes references to non-existent tasks and subtasks
- Eliminates self-dependencies (tasks depending on themselves)
- Regenerates task files with corrected dependencies
- Provides detailed report of all fixes made
- **Command Reference: complexity-report**
- Legacy Syntax: `node scripts/dev.js complexity-report [options]`
- CLI Syntax: `task-master complexity-report [options]`
- Description: Displays the task complexity analysis report in a formatted, easy-to-read way
- Parameters:
- `--file=<path>, -f`: Path to the complexity report file (default: 'scripts/task-complexity-report.json')
- Example: `task-master complexity-report`
- Notes:
- Shows tasks organized by complexity score with recommended actions
- Provides complexity distribution statistics
- Displays ready-to-use expansion commands for complex tasks
- If no report exists, offers to generate one interactively
- **Command Reference: add-task**
- CLI Syntax: `task-master add-task [options]`
- Description: Add a new task to tasks.json using AI
- Parameters:
- `--file=<path>, -f`: Path to the tasks file (default: 'tasks/tasks.json')
- `--prompt=<text>, -p`: Description of the task to add (required)
- `--dependencies=<ids>, -d`: Comma-separated list of task IDs this task depends on
- `--priority=<priority>`: Task priority (high, medium, low) (default: 'medium')
- Example: `task-master add-task --prompt="Create user authentication using Auth0"`
- Notes: Uses AI to convert description into structured task with appropriate details
- **Command Reference: init**
- CLI Syntax: `task-master init`
- Description: Initialize a new project with Task Master structure
- Parameters: None
- Example: `task-master init`
- Notes:
- Creates initial project structure with required files
- Prompts for project settings if not provided
- Merges with existing files when appropriate
- Can be used to bootstrap a new Task Master project quickly
- **Code Analysis & Refactoring Techniques**
- **Top-Level Function Search**
- Use grep pattern matching to find all exported functions across the codebase
- Command: `grep -E "export (function|const) \w+|function \w+\(|const \w+ = \(|module\.exports" --include="*.js" -r ./`
- Benefits:
- Quickly identify all public API functions without reading implementation details
- Compare functions between files during refactoring (e.g., monolithic to modular structure)
- Verify all expected functions exist in refactored modules
- Identify duplicate functionality or naming conflicts
- Usage examples:
- When migrating from `scripts/dev.js` to modular structure: `grep -E "function \w+\(" scripts/dev.js`
- Check function exports in a directory: `grep -E "export (function|const)" scripts/modules/`
- Find potential naming conflicts: `grep -E "function (get|set|create|update)\w+\(" -r ./`
- Variations:
- Add `-n` flag to include line numbers
- Add `--include="*.ts"` to filter by file extension
- Use with `| sort` to alphabetize results
- Integration with refactoring workflow:
- Start by mapping all functions in the source file
- Create target module files based on function grouping
- Verify all functions were properly migrated
- Check for any unintentional duplications or omissions
---
WINDSURF_RULES
---
description: Guidelines for creating and maintaining Windsurf rules to ensure consistency and effectiveness.
globs: .windsurfrules
filesToApplyRule: .windsurfrules
alwaysApply: true
---
The below describes how you should be structuring new rule sections in this document.
- **Required Rule Structure:**
```markdown
---
description: Clear, one-line description of what the rule enforces
globs: path/to/files/*.ext, other/path/**/*
alwaysApply: boolean
---
- **Main Points in Bold**
- Sub-points with details
- Examples and explanations
```
- **Section References:**
- Use `ALL_CAPS_SECTION` to reference files
- Example: `WINDSURF_RULES`
- **Code Examples:**
- Use language-specific code blocks
```typescript
// ✅ DO: Show good examples
const goodExample = true;
// ❌ DON'T: Show anti-patterns
const badExample = false;
```
- **Rule Content Guidelines:**
- Start with high-level overview
- Include specific, actionable requirements
- Show examples of correct implementation
- Reference existing code when possible
- Keep rules DRY by referencing other rules
- **Rule Maintenance:**
- Update rules when new patterns emerge
- Add examples from actual codebase
- Remove outdated patterns
- Cross-reference related rules
- **Best Practices:**
- Use bullet points for clarity
- Keep descriptions concise
- Include both DO and DON'T examples
- Reference actual code over theoretical examples
- Use consistent formatting across rules
---
SELF_IMPROVE
---
description: Guidelines for continuously improving this rules document based on emerging code patterns and best practices.
globs: **/*
filesToApplyRule: **/*
alwaysApply: true
---
- **Rule Improvement Triggers:**
- New code patterns not covered by existing rules
- Repeated similar implementations across files
- Common error patterns that could be prevented
- New libraries or tools being used consistently
- Emerging best practices in the codebase
- **Analysis Process:**
- Compare new code with existing rules
- Identify patterns that should be standardized
- Look for references to external documentation
- Check for consistent error handling patterns
- Monitor test patterns and coverage
- **Rule Updates:**
- **Add New Rules When:**
- A new technology/pattern is used in 3+ files
- Common bugs could be prevented by a rule
- Code reviews repeatedly mention the same feedback
- New security or performance patterns emerge
- **Modify Existing Rules When:**
- Better examples exist in the codebase
- Additional edge cases are discovered
- Related rules have been updated
- Implementation details have changed
- **Example Pattern Recognition:**
```typescript
// If you see repeated patterns like:
const data = await prisma.user.findMany({
select: { id: true, email: true },
where: { status: 'ACTIVE' }
});
// Consider adding a PRISMA section in the .windsurfrules:
// - Standard select fields
// - Common where conditions
// - Performance optimization patterns
```
- **Rule Quality Checks:**
- Rules should be actionable and specific
- Examples should come from actual code
- References should be up to date
- Patterns should be consistently enforced
- **Continuous Improvement:**
- Monitor code review comments
- Track common development questions
- Update rules after major refactors
- Add links to relevant documentation
- Cross-reference related rules
- **Rule Deprecation:**
- Mark outdated patterns as deprecated
- Remove rules that no longer apply
- Update references to deprecated rules
- Document migration paths for old patterns
- **Documentation Updates:**
- Keep examples synchronized with code
- Update references to external docs
- Maintain links between related rules
- Document breaking changes
Follow WINDSURF_RULES for proper rule formatting and structure of windsurf rule sections.

View File

@@ -12,6 +12,7 @@ import { spawn } from 'child_process';
import { Command } from 'commander';
import { displayHelp, displayBanner } from '../scripts/modules/ui.js';
import { registerCommands } from '../scripts/modules/commands.js';
import { detectCamelCaseFlags } from '../scripts/modules/utils.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -53,6 +54,9 @@ function runDevScript(args) {
});
}
// Helper function to detect camelCase and convert to kebab-case
const toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();
/**
* Create a wrapper action that passes the command to dev.js
* @param {string} commandName - The name of the command
@@ -60,21 +64,8 @@ function runDevScript(args) {
*/
function createDevScriptAction(commandName) {
return (options, cmd) => {
// Helper function to detect camelCase and convert to kebab-case
const toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();
// Check for camelCase flags and error out with helpful message
const camelCaseFlags = [];
for (const arg of process.argv) {
if (arg.startsWith('--') && /[A-Z]/.test(arg)) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
const kebabVersion = toKebabCase(flagName);
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
const camelCaseFlags = detectCamelCaseFlags(process.argv);
// If camelCase flags were found, show error and exit
if (camelCaseFlags.length > 0) {
@@ -307,3 +298,10 @@ if (process.argv.length <= 2) {
displayHelp();
process.exit(0);
}
// Add exports at the end of the file
if (typeof module !== 'undefined') {
module.exports = {
detectCamelCaseFlags
};
}

View File

@@ -1,6 +1,6 @@
{
"name": "task-master-ai",
"version": "0.9.28",
"version": "0.9.30",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",

View File

@@ -30,8 +30,12 @@ program
.version('1.0.0') // Will be replaced by prepare-package script
.option('-y, --yes', 'Skip prompts and use default values')
.option('-n, --name <name>', 'Project name')
.option('-my_name <name>', 'Project name (alias for --name)')
.option('-d, --description <description>', 'Project description')
.option('-my_description <description>', 'Project description (alias for --description)')
.option('-v, --version <version>', 'Project version')
.option('-my_version <version>', 'Project version (alias for --version)')
.option('--my_name <name>', 'Project name (alias for --name)')
.option('-a, --author <author>', 'Author name')
.option('--skip-install', 'Skip installing dependencies')
.option('--dry-run', 'Show what would be done without making changes')
@@ -39,6 +43,17 @@ program
const options = program.opts();
// Map custom aliases to standard options
if (options.my_name && !options.name) {
options.name = options.my_name;
}
if (options.my_description && !options.description) {
options.description = options.my_description;
}
if (options.my_version && !options.version) {
options.version = options.my_version;
}
// Define log levels
const LOG_LEVELS = {
debug: 0,
@@ -143,6 +158,9 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
case 'README-task-master.md':
sourcePath = path.join(__dirname, '..', 'README-task-master.md');
break;
case 'windsurfrules':
sourcePath = path.join(__dirname, '..', 'assets', '.windsurfrules');
break;
default:
// For other files like env.example, gitignore, etc. that don't have direct equivalents
sourcePath = path.join(__dirname, '..', 'assets', templateName);
@@ -190,6 +208,20 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
return;
}
// Handle .windsurfrules - append the entire content
if (filename === '.windsurfrules') {
log('info', `${targetPath} already exists, appending content instead of overwriting...`);
const existingContent = fs.readFileSync(targetPath, 'utf8');
// Add a separator comment before appending our content
const updatedContent = existingContent.trim() +
'\n\n# Added by Task Master - Development Workflow Rules\n\n' +
content;
fs.writeFileSync(targetPath, updatedContent);
log('success', `Updated ${targetPath} with additional rules`);
return;
}
// Handle package.json - merge dependencies
if (filename === 'package.json') {
log('info', `${targetPath} already exists, merging dependencies...`);
@@ -481,6 +513,9 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
// Copy self_improve.mdc
copyTemplateFile('self_improve.mdc', path.join(targetDir, '.cursor', 'rules', 'self_improve.mdc'));
// Copy .windsurfrules
copyTemplateFile('windsurfrules', path.join(targetDir, '.windsurfrules'));
// Copy scripts/dev.js
copyTemplateFile('dev.js', path.join(targetDir, 'scripts', 'dev.js'));

View File

@@ -3,11 +3,14 @@
* AI service interactions for the Task Master CLI
*/
// NOTE/TODO: Include the beta header output-128k-2025-02-19 in your API request to increase the maximum output token length to 128k tokens for Claude 3.7 Sonnet.
import { Anthropic } from '@anthropic-ai/sdk';
import OpenAI from 'openai';
import dotenv from 'dotenv';
import { CONFIG, log, sanitizePrompt } from './utils.js';
import { startLoadingIndicator, stopLoadingIndicator } from './ui.js';
import chalk from 'chalk';
// Load environment variables
dotenv.config();
@@ -15,6 +18,10 @@ dotenv.config();
// Configure Anthropic client
const anthropic = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
// Add beta header for 128k token output
defaultHeaders: {
'anthropic-beta': 'output-128k-2025-02-19'
}
});
// Lazy-loaded Perplexity client
@@ -37,6 +44,38 @@ function getPerplexityClient() {
return perplexity;
}
/**
* Handle Claude API errors with user-friendly messages
* @param {Error} error - The error from Claude API
* @returns {string} User-friendly error message
*/
function handleClaudeError(error) {
// Check if it's a structured error response
if (error.type === 'error' && error.error) {
switch (error.error.type) {
case 'overloaded_error':
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
case 'rate_limit_error':
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
case 'invalid_request_error':
return 'There was an issue with the request format. If this persists, please report it as a bug.';
default:
return `Claude API error: ${error.error.message}`;
}
}
// Check for network/timeout errors
if (error.message?.toLowerCase().includes('timeout')) {
return 'The request to Claude timed out. Please try again.';
}
if (error.message?.toLowerCase().includes('network')) {
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
}
// Default error message
return `Error communicating with Claude: ${error.message}`;
}
/**
* Call Claude to generate tasks from a PRD
* @param {string} prdContent - PRD content
@@ -99,14 +138,27 @@ Important: Your response must be valid JSON only, with no additional explanation
// Use streaming request to handle large responses and show progress
return await handleStreamingRequest(prdContent, prdPath, numTasks, CONFIG.maxTokens, systemPrompt);
} catch (error) {
log('error', 'Error calling Claude:', error.message);
// Get user-friendly error message
const userMessage = handleClaudeError(error);
log('error', userMessage);
// Retry logic
if (retryCount < 2) {
log('info', `Retrying (${retryCount + 1}/2)...`);
// Retry logic for certain errors
if (retryCount < 2 && (
error.error?.type === 'overloaded_error' ||
error.error?.type === 'rate_limit_error' ||
error.message?.toLowerCase().includes('timeout') ||
error.message?.toLowerCase().includes('network')
)) {
const waitTime = (retryCount + 1) * 5000; // 5s, then 10s
log('info', `Waiting ${waitTime/1000} seconds before retry ${retryCount + 1}/2...`);
await new Promise(resolve => setTimeout(resolve, waitTime));
return await callClaude(prdContent, prdPath, numTasks, retryCount + 1);
} else {
throw error;
console.error(chalk.red(userMessage));
if (CONFIG.debug) {
log('debug', 'Full error:', error);
}
throw new Error(userMessage);
}
}
}
@@ -166,7 +218,17 @@ async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens,
} catch (error) {
if (streamingInterval) clearInterval(streamingInterval);
stopLoadingIndicator(loadingIndicator);
throw error;
// Get user-friendly error message
const userMessage = handleClaudeError(error);
log('error', userMessage);
console.error(chalk.red(userMessage));
if (CONFIG.debug) {
log('debug', 'Full error:', error);
}
throw new Error(userMessage);
}
}
@@ -613,5 +675,6 @@ export {
generateSubtasks,
generateSubtasksWithPerplexity,
parseSubtasksFromText,
generateComplexityAnalysisPrompt
generateComplexityAnalysisPrompt,
handleClaudeError
};

View File

@@ -62,9 +62,21 @@ function registerCommands(programInstance) {
.action(async (file, options) => {
// Use input option if file argument not provided
const inputFile = file || options.input;
const defaultPrdPath = 'scripts/prd.txt';
// If no input file specified, check for default PRD location
if (!inputFile) {
console.log(chalk.yellow('No PRD file specified.'));
if (fs.existsSync(defaultPrdPath)) {
console.log(chalk.blue(`Using default PRD file: ${defaultPrdPath}`));
const numTasks = parseInt(options.numTasks, 10);
const outputPath = options.output;
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
await parsePRD(defaultPrdPath, outputPath, numTasks);
return;
}
console.log(chalk.yellow('No PRD file specified and default PRD file not found at scripts/prd.txt.'));
console.log(boxen(
chalk.white.bold('Parse PRD Help') + '\n\n' +
chalk.cyan('Usage:') + '\n' +
@@ -76,7 +88,10 @@ function registerCommands(programInstance) {
chalk.cyan('Example:') + '\n' +
' task-master parse-prd requirements.txt --num-tasks 15\n' +
' task-master parse-prd --input=requirements.txt\n\n' +
chalk.yellow('Note: This command will generate tasks from a PRD document and will overwrite any existing tasks.json file.'),
chalk.yellow('Note: This command will:') + '\n' +
' 1. Look for a PRD file at scripts/prd.txt by default\n' +
' 2. Use the file specified by --input or positional argument if provided\n' +
' 3. Generate tasks from the PRD and overwrite any existing tasks.json file',
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
));
return;
@@ -547,6 +562,29 @@ function registerCommands(programInstance) {
}
});
// init command (documentation only, implementation is in init.js)
programInstance
.command('init')
.description('Initialize a new project with Task Master structure')
.option('-n, --name <name>', 'Project name')
.option('-my_name <name>', 'Project name (alias for --name)')
.option('--my_name <name>', 'Project name (alias for --name)')
.option('-d, --description <description>', 'Project description')
.option('-my_description <description>', 'Project description (alias for --description)')
.option('-v, --version <version>', 'Project version')
.option('-my_version <version>', 'Project version (alias for --version)')
.option('-a, --author <author>', 'Author name')
.option('-y, --yes', 'Skip prompts and use default values')
.option('--skip-install', 'Skip installing dependencies')
.action(() => {
console.log(chalk.yellow('The init command must be run as a standalone command: task-master init'));
console.log(chalk.cyan('Example usage:'));
console.log(chalk.white(' task-master init -n "My Project" -d "Project description"'));
console.log(chalk.white(' task-master init -my_name "My Project" -my_description "Project description"'));
console.log(chalk.white(' task-master init -y'));
process.exit(0);
});
// Add more commands as needed...
return programInstance;

View File

@@ -835,14 +835,33 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false) {
}
// COMPLETELY REVISED TABLE APPROACH
// Define fixed column widths based on terminal size
const idWidth = 10;
const statusWidth = 20;
const priorityWidth = 10;
const depsWidth = 25;
// Define percentage-based column widths and calculate actual widths
// Adjust percentages based on content type and user requirements
// Calculate title width from available space
const titleWidth = terminalWidth - idWidth - statusWidth - priorityWidth - depsWidth - 10; // 10 for borders and padding
// Adjust ID width if showing subtasks (subtask IDs are longer: e.g., "1.2")
const idWidthPct = withSubtasks ? 10 : 7;
// Calculate max status length to accommodate "in-progress"
const statusWidthPct = 15;
// Increase priority column width as requested
const priorityWidthPct = 12;
// Make dependencies column smaller as requested (-20%)
const depsWidthPct = 20;
// Calculate title/description width as remaining space (+20% from dependencies reduction)
const titleWidthPct = 100 - idWidthPct - statusWidthPct - priorityWidthPct - depsWidthPct;
// Allow 10 characters for borders and padding
const availableWidth = terminalWidth - 10;
// Calculate actual column widths based on percentages
const idWidth = Math.floor(availableWidth * (idWidthPct / 100));
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
// Create a table with correct borders and spacing
const table = new Table({

View File

@@ -510,6 +510,21 @@ async function displayNextTask(tasksPath) {
{ padding: { top: 0, bottom: 0, left: 1, right: 1 }, margin: { top: 1, bottom: 0 }, borderColor: 'magenta', borderStyle: 'round' }
));
// Calculate available width for the subtask table
const availableWidth = process.stdout.columns - 10 || 100; // Default to 100 if can't detect
// Define percentage-based column widths
const idWidthPct = 8;
const statusWidthPct = 15;
const depsWidthPct = 25;
const titleWidthPct = 100 - idWidthPct - statusWidthPct - depsWidthPct;
// Calculate actual column widths
const idWidth = Math.floor(availableWidth * (idWidthPct / 100));
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
// Create a table for subtasks with improved handling
const subtaskTable = new Table({
head: [
@@ -518,7 +533,7 @@ async function displayNextTask(tasksPath) {
chalk.magenta.bold('Title'),
chalk.magenta.bold('Deps')
],
colWidths: [6, 12, Math.min(50, process.stdout.columns - 65 || 30), 30],
colWidths: [idWidth, statusWidth, titleWidth, depsWidth],
style: {
head: [],
border: [],
@@ -741,6 +756,21 @@ async function displayTaskById(tasksPath, taskId) {
{ padding: { top: 0, bottom: 0, left: 1, right: 1 }, margin: { top: 1, bottom: 0 }, borderColor: 'magenta', borderStyle: 'round' }
));
// Calculate available width for the subtask table
const availableWidth = process.stdout.columns - 10 || 100; // Default to 100 if can't detect
// Define percentage-based column widths
const idWidthPct = 8;
const statusWidthPct = 15;
const depsWidthPct = 25;
const titleWidthPct = 100 - idWidthPct - statusWidthPct - depsWidthPct;
// Calculate actual column widths
const idWidth = Math.floor(availableWidth * (idWidthPct / 100));
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
// Create a table for subtasks with improved handling
const subtaskTable = new Table({
head: [
@@ -749,7 +779,7 @@ async function displayTaskById(tasksPath, taskId) {
chalk.magenta.bold('Title'),
chalk.magenta.bold('Deps')
],
colWidths: [6, 12, Math.min(50, process.stdout.columns - 65 || 30), 30],
colWidths: [idWidth, statusWidth, titleWidth, depsWidth],
style: {
head: [],
border: [],
@@ -945,7 +975,7 @@ async function displayComplexityReport(reportPath) {
const terminalWidth = process.stdout.columns || 100; // Default to 100 if can't detect
// Calculate dynamic column widths
const idWidth = 5;
const idWidth = 12;
const titleWidth = Math.floor(terminalWidth * 0.25); // 25% of width
const scoreWidth = 8;
const subtasksWidth = 8;

View File

@@ -265,6 +265,62 @@ function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStac
return cyclesToBreak;
}
/**
* Convert a string from camelCase to kebab-case
* @param {string} str - The string to convert
* @returns {string} The kebab-case version of the string
*/
const toKebabCase = (str) => {
// Special handling for common acronyms
const withReplacedAcronyms = str
.replace(/ID/g, 'Id')
.replace(/API/g, 'Api')
.replace(/UI/g, 'Ui')
.replace(/URL/g, 'Url')
.replace(/URI/g, 'Uri')
.replace(/JSON/g, 'Json')
.replace(/XML/g, 'Xml')
.replace(/HTML/g, 'Html')
.replace(/CSS/g, 'Css');
// Insert hyphens before capital letters and convert to lowercase
return withReplacedAcronyms
.replace(/([A-Z])/g, '-$1')
.toLowerCase()
.replace(/^-/, ''); // Remove leading hyphen if present
};
/**
* Detect camelCase flags in command arguments
* @param {string[]} args - Command line arguments to check
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
*/
function detectCamelCaseFlags(args) {
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
}
// Export all utility functions and configuration
export {
CONFIG,
@@ -279,5 +335,7 @@ export {
formatTaskId,
findTaskById,
truncate,
findCycles
findCycles,
toKebabCase,
detectCamelCaseFlags
};

33
tasks/task_029.txt Normal file
View File

@@ -0,0 +1,33 @@
# Task ID: 29
# Title: Update Claude 3.7 Sonnet Integration with Beta Header for 128k Token Output
# Status: done
# Dependencies: None
# Priority: medium
# Description: Modify the ai-services.js file to include the beta header 'output-128k-2025-02-19' in Claude 3.7 Sonnet API requests to increase the maximum output token length to 128k tokens.
# Details:
The task involves updating the Claude 3.7 Sonnet integration in the ai-services.js file to take advantage of the new 128k token output capability. Specifically:
1. Locate the Claude 3.7 Sonnet API request configuration in ai-services.js
2. Add the beta header 'output-128k-2025-02-19' to the request headers
3. Update any related configuration parameters that might need adjustment for the increased token limit
4. Ensure that token counting and management logic is updated to account for the new 128k token output limit
5. Update any documentation comments in the code to reflect the new capability
6. Consider implementing a configuration option to enable/disable this feature, as it may be a beta feature subject to change
7. Verify that the token management logic correctly handles the increased limit without causing unexpected behavior
8. Ensure backward compatibility with existing code that might assume lower token limits
The implementation should be clean and maintainable, with appropriate error handling for cases where the beta header might not be supported in the future.
# Test Strategy:
Testing should verify that the beta header is correctly included and that the system properly handles the increased token limit:
1. Unit test: Verify that the API request to Claude 3.7 Sonnet includes the 'output-128k-2025-02-19' header
2. Integration test: Make an actual API call to Claude 3.7 Sonnet with the beta header and confirm a successful response
3. Test with a prompt designed to generate a very large response (>20k tokens but <128k tokens) and verify it completes successfully
4. Test the token counting logic with mock responses of various sizes to ensure it correctly handles responses approaching the 128k limit
5. Verify error handling by simulating API errors related to the beta header
6. Test any configuration options for enabling/disabling the feature
7. Performance test: Measure any impact on response time or system resources when handling very large responses
8. Regression test: Ensure existing functionality using Claude 3.7 Sonnet continues to work as expected
Document all test results, including any limitations or edge cases discovered during testing.

40
tasks/task_030.txt Normal file
View File

@@ -0,0 +1,40 @@
# Task ID: 30
# Title: Enhance parse-prd Command to Support Default PRD Path
# Status: done
# Dependencies: None
# Priority: medium
# Description: Modify the parse-prd command to automatically use a default PRD path when no path is explicitly provided, improving user experience by reducing the need for manual path specification.
# Details:
Currently, the parse-prd command requires users to explicitly specify the path to the PRD document. This enhancement should:
1. Implement a default PRD path configuration that can be set in the application settings or configuration file.
2. Update the parse-prd command to check for this default path when no path argument is provided.
3. Add a configuration option that allows users to set/update the default PRD path through a command like `config set default-prd-path <path>`.
4. Ensure backward compatibility by maintaining support for explicit path specification.
5. Add appropriate error handling for cases where the default path is not set or the file doesn't exist.
6. Update the command's help text to indicate that a default path will be used if none is specified.
7. Consider implementing path validation to ensure the default path points to a valid PRD document.
8. If multiple PRD formats are supported (Markdown, PDF, etc.), ensure the default path handling works with all supported formats.
9. Add logging for default path usage to help with debugging and usage analytics.
# Test Strategy:
1. Unit tests:
- Test that the command correctly uses the default path when no path is provided
- Test that explicit paths override the default path
- Test error handling when default path is not set
- Test error handling when default path is set but file doesn't exist
2. Integration tests:
- Test the full workflow of setting a default path and then using the parse-prd command without arguments
- Test with various file formats if multiple are supported
3. Manual testing:
- Verify the command works in a real environment with actual PRD documents
- Test the user experience of setting and using default paths
- Verify help text correctly explains the default path behavior
4. Edge cases to test:
- Relative vs. absolute paths for default path setting
- Path with special characters or spaces
- Very long paths approaching system limits
- Permissions issues with the default path location

42
tasks/task_031.txt Normal file
View File

@@ -0,0 +1,42 @@
# Task ID: 31
# Title: Add Config Flag Support to task-master init Command
# Status: done
# Dependencies: None
# Priority: low
# Description: Enhance the 'task-master init' command to accept configuration flags that allow users to bypass the interactive CLI questions and directly provide configuration values.
# Details:
Currently, the 'task-master init' command prompts users with a series of questions to set up the configuration. This task involves modifying the init command to accept command-line flags that can pre-populate these configuration values, allowing for a non-interactive setup process.
Implementation steps:
1. Identify all configuration options that are currently collected through CLI prompts during initialization
2. Create corresponding command-line flags for each configuration option (e.g., --project-name, --ai-provider, etc.)
3. Modify the init command handler to check for these flags before starting the interactive prompts
4. If a flag is provided, skip the corresponding prompt and use the provided value instead
5. If all required configuration values are provided via flags, skip the interactive process entirely
6. Update the command's help text to document all available flags and their usage
7. Ensure backward compatibility so the command still works with the interactive approach when no flags are provided
8. Consider adding a --non-interactive flag that will fail if any required configuration is missing rather than prompting for it (useful for scripts and CI/CD)
The implementation should follow the existing command structure and use the same configuration file format. Make sure to validate flag values with the same validation logic used for interactive inputs.
# Test Strategy:
Testing should verify both the interactive and non-interactive paths work correctly:
1. Unit tests:
- Test each flag individually to ensure it correctly overrides the corresponding prompt
- Test combinations of flags to ensure they work together properly
- Test validation of flag values to ensure invalid values are rejected
- Test the --non-interactive flag to ensure it fails when required values are missing
2. Integration tests:
- Test a complete initialization with all flags provided
- Test partial initialization with some flags and some interactive prompts
- Test initialization with no flags (fully interactive)
3. Manual testing scenarios:
- Run 'task-master init --project-name="Test Project" --ai-provider="openai"' and verify it skips those prompts
- Run 'task-master init --help' and verify all flags are documented
- Run 'task-master init --non-interactive' without required flags and verify it fails with a helpful error message
- Run a complete non-interactive initialization and verify the resulting configuration file matches expectations
Ensure the command's documentation is updated to reflect the new functionality, and verify that the help text accurately describes all available options.

56
tasks/task_032.txt Normal file
View File

@@ -0,0 +1,56 @@
# Task ID: 32
# Title: Implement 'learn' Command for Automatic Cursor Rule Generation
# Status: pending
# Dependencies: None
# Priority: high
# Description: Create a new 'learn' command that analyzes code changes and chat history to automatically generate or update Cursor rules in the .cursor/rules directory based on successful implementation patterns.
# Details:
Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns:
1. Create a new module `commands/learn.js` that implements the command logic
2. Update `index.js` to register the new command
3. The command should:
- Accept an optional parameter for specifying which patterns to focus on
- Use git diff to extract code changes since the last commit
- Access the Cursor chat history if possible (investigate API or file storage location)
- Call Claude via ai-services.js with the following context:
* Code diffs
* Chat history excerpts showing challenges and solutions
* Existing rules from .cursor/rules if present
- Parse Claude's response to extract rule definitions
- Create or update .mdc files in the .cursor/rules directory
- Provide a summary of what was learned and which rules were updated
4. Create helper functions to:
- Extract relevant patterns from diffs
- Format the prompt for Claude to focus on identifying reusable patterns
- Parse Claude's response into valid rule definitions
- Handle rule conflicts or duplications
5. Ensure the command handles errors gracefully, especially if chat history is inaccessible
6. Add appropriate logging to show the learning process
7. Document the command in the README.md file
# Test Strategy:
1. Unit tests:
- Create tests for each helper function in isolation
- Mock git diff responses and chat history data
- Verify rule extraction logic works with different input patterns
- Test error handling for various failure scenarios
2. Integration tests:
- Test the command in a repository with actual code changes
- Verify it correctly generates .mdc files in the .cursor/rules directory
- Check that generated rules follow the correct format
- Verify the command correctly updates existing rules without losing custom modifications
3. Manual testing scenarios:
- Run the command after implementing a feature with specific patterns
- Verify the generated rules capture the intended patterns
- Test the command with and without existing rules
- Verify the command works when chat history is available and when it isn't
- Test with large diffs to ensure performance remains acceptable
4. Validation:
- After generating rules, use them in Cursor to verify they correctly guide future implementations
- Have multiple team members test the command to ensure consistent results

44
tasks/task_033.txt Normal file
View File

@@ -0,0 +1,44 @@
# Task ID: 33
# Title: Create and Integrate Windsurf Rules Document from MDC Files
# Status: done
# Dependencies: None
# Priority: medium
# Description: Develop functionality to generate a .windsurfrules document by combining and refactoring content from three primary .mdc files used for Cursor Rules, ensuring it's properly integrated into the initialization pipeline.
# Details:
This task involves creating a mechanism to generate a Windsurf-specific rules document by combining three existing MDC (Markdown Content) files that are currently used for Cursor Rules. The implementation should:
1. Identify and locate the three primary .mdc files used for Cursor Rules
2. Extract content from these files and merge them into a single document
3. Refactor the content to make it Windsurf-specific, replacing Cursor-specific terminology and adapting guidelines as needed
4. Create a function that generates a .windsurfrules document from this content
5. Integrate this function into the initialization pipeline
6. Implement logic to check if a .windsurfrules document already exists:
- If it exists, append the new content to it
- If it doesn't exist, create a new document
7. Ensure proper error handling for file operations
8. Add appropriate logging to track the generation and modification of the .windsurfrules document
The implementation should be modular and maintainable, with clear separation of concerns between content extraction, refactoring, and file operations.
# Test Strategy:
Testing should verify both the content generation and the integration with the initialization pipeline:
1. Unit Tests:
- Test the content extraction function with mock .mdc files
- Test the content refactoring function to ensure Cursor-specific terms are properly replaced
- Test the file operation functions with mock filesystem
2. Integration Tests:
- Test the creation of a new .windsurfrules document when none exists
- Test appending to an existing .windsurfrules document
- Test the complete initialization pipeline with the new functionality
3. Manual Verification:
- Inspect the generated .windsurfrules document to ensure content is properly combined and refactored
- Verify that Cursor-specific terminology has been replaced with Windsurf-specific terminology
- Run the initialization process multiple times to verify idempotence (content isn't duplicated on multiple runs)
4. Edge Cases:
- Test with missing or corrupted .mdc files
- Test with an existing but empty .windsurfrules document
- Test with an existing .windsurfrules document that already contains some of the content

View File

@@ -1681,6 +1681,56 @@
"parentTaskId": 28
}
]
},
{
"id": 29,
"title": "Update Claude 3.7 Sonnet Integration with Beta Header for 128k Token Output",
"description": "Modify the ai-services.js file to include the beta header 'output-128k-2025-02-19' in Claude 3.7 Sonnet API requests to increase the maximum output token length to 128k tokens.",
"status": "done",
"dependencies": [],
"priority": "medium",
"details": "The task involves updating the Claude 3.7 Sonnet integration in the ai-services.js file to take advantage of the new 128k token output capability. Specifically:\n\n1. Locate the Claude 3.7 Sonnet API request configuration in ai-services.js\n2. Add the beta header 'output-128k-2025-02-19' to the request headers\n3. Update any related configuration parameters that might need adjustment for the increased token limit\n4. Ensure that token counting and management logic is updated to account for the new 128k token output limit\n5. Update any documentation comments in the code to reflect the new capability\n6. Consider implementing a configuration option to enable/disable this feature, as it may be a beta feature subject to change\n7. Verify that the token management logic correctly handles the increased limit without causing unexpected behavior\n8. Ensure backward compatibility with existing code that might assume lower token limits\n\nThe implementation should be clean and maintainable, with appropriate error handling for cases where the beta header might not be supported in the future.",
"testStrategy": "Testing should verify that the beta header is correctly included and that the system properly handles the increased token limit:\n\n1. Unit test: Verify that the API request to Claude 3.7 Sonnet includes the 'output-128k-2025-02-19' header\n2. Integration test: Make an actual API call to Claude 3.7 Sonnet with the beta header and confirm a successful response\n3. Test with a prompt designed to generate a very large response (>20k tokens but <128k tokens) and verify it completes successfully\n4. Test the token counting logic with mock responses of various sizes to ensure it correctly handles responses approaching the 128k limit\n5. Verify error handling by simulating API errors related to the beta header\n6. Test any configuration options for enabling/disabling the feature\n7. Performance test: Measure any impact on response time or system resources when handling very large responses\n8. Regression test: Ensure existing functionality using Claude 3.7 Sonnet continues to work as expected\n\nDocument all test results, including any limitations or edge cases discovered during testing."
},
{
"id": 30,
"title": "Enhance parse-prd Command to Support Default PRD Path",
"description": "Modify the parse-prd command to automatically use a default PRD path when no path is explicitly provided, improving user experience by reducing the need for manual path specification.",
"status": "done",
"dependencies": [],
"priority": "medium",
"details": "Currently, the parse-prd command requires users to explicitly specify the path to the PRD document. This enhancement should:\n\n1. Implement a default PRD path configuration that can be set in the application settings or configuration file.\n2. Update the parse-prd command to check for this default path when no path argument is provided.\n3. Add a configuration option that allows users to set/update the default PRD path through a command like `config set default-prd-path <path>`.\n4. Ensure backward compatibility by maintaining support for explicit path specification.\n5. Add appropriate error handling for cases where the default path is not set or the file doesn't exist.\n6. Update the command's help text to indicate that a default path will be used if none is specified.\n7. Consider implementing path validation to ensure the default path points to a valid PRD document.\n8. If multiple PRD formats are supported (Markdown, PDF, etc.), ensure the default path handling works with all supported formats.\n9. Add logging for default path usage to help with debugging and usage analytics.",
"testStrategy": "1. Unit tests:\n - Test that the command correctly uses the default path when no path is provided\n - Test that explicit paths override the default path\n - Test error handling when default path is not set\n - Test error handling when default path is set but file doesn't exist\n\n2. Integration tests:\n - Test the full workflow of setting a default path and then using the parse-prd command without arguments\n - Test with various file formats if multiple are supported\n\n3. Manual testing:\n - Verify the command works in a real environment with actual PRD documents\n - Test the user experience of setting and using default paths\n - Verify help text correctly explains the default path behavior\n\n4. Edge cases to test:\n - Relative vs. absolute paths for default path setting\n - Path with special characters or spaces\n - Very long paths approaching system limits\n - Permissions issues with the default path location"
},
{
"id": 31,
"title": "Add Config Flag Support to task-master init Command",
"description": "Enhance the 'task-master init' command to accept configuration flags that allow users to bypass the interactive CLI questions and directly provide configuration values.",
"status": "done",
"dependencies": [],
"priority": "low",
"details": "Currently, the 'task-master init' command prompts users with a series of questions to set up the configuration. This task involves modifying the init command to accept command-line flags that can pre-populate these configuration values, allowing for a non-interactive setup process.\n\nImplementation steps:\n1. Identify all configuration options that are currently collected through CLI prompts during initialization\n2. Create corresponding command-line flags for each configuration option (e.g., --project-name, --ai-provider, etc.)\n3. Modify the init command handler to check for these flags before starting the interactive prompts\n4. If a flag is provided, skip the corresponding prompt and use the provided value instead\n5. If all required configuration values are provided via flags, skip the interactive process entirely\n6. Update the command's help text to document all available flags and their usage\n7. Ensure backward compatibility so the command still works with the interactive approach when no flags are provided\n8. Consider adding a --non-interactive flag that will fail if any required configuration is missing rather than prompting for it (useful for scripts and CI/CD)\n\nThe implementation should follow the existing command structure and use the same configuration file format. Make sure to validate flag values with the same validation logic used for interactive inputs.",
"testStrategy": "Testing should verify both the interactive and non-interactive paths work correctly:\n\n1. Unit tests:\n - Test each flag individually to ensure it correctly overrides the corresponding prompt\n - Test combinations of flags to ensure they work together properly\n - Test validation of flag values to ensure invalid values are rejected\n - Test the --non-interactive flag to ensure it fails when required values are missing\n\n2. Integration tests:\n - Test a complete initialization with all flags provided\n - Test partial initialization with some flags and some interactive prompts\n - Test initialization with no flags (fully interactive)\n\n3. Manual testing scenarios:\n - Run 'task-master init --project-name=\"Test Project\" --ai-provider=\"openai\"' and verify it skips those prompts\n - Run 'task-master init --help' and verify all flags are documented\n - Run 'task-master init --non-interactive' without required flags and verify it fails with a helpful error message\n - Run a complete non-interactive initialization and verify the resulting configuration file matches expectations\n\nEnsure the command's documentation is updated to reflect the new functionality, and verify that the help text accurately describes all available options."
},
{
"id": 32,
"title": "Implement 'learn' Command for Automatic Cursor Rule Generation",
"description": "Create a new 'learn' command that analyzes code changes and chat history to automatically generate or update Cursor rules in the .cursor/rules directory based on successful implementation patterns.",
"status": "pending",
"dependencies": [],
"priority": "high",
"details": "Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns:\n\n1. Create a new module `commands/learn.js` that implements the command logic\n2. Update `index.js` to register the new command\n3. The command should:\n - Accept an optional parameter for specifying which patterns to focus on\n - Use git diff to extract code changes since the last commit\n - Access the Cursor chat history if possible (investigate API or file storage location)\n - Call Claude via ai-services.js with the following context:\n * Code diffs\n * Chat history excerpts showing challenges and solutions\n * Existing rules from .cursor/rules if present\n - Parse Claude's response to extract rule definitions\n - Create or update .mdc files in the .cursor/rules directory\n - Provide a summary of what was learned and which rules were updated\n\n4. Create helper functions to:\n - Extract relevant patterns from diffs\n - Format the prompt for Claude to focus on identifying reusable patterns\n - Parse Claude's response into valid rule definitions\n - Handle rule conflicts or duplications\n\n5. Ensure the command handles errors gracefully, especially if chat history is inaccessible\n6. Add appropriate logging to show the learning process\n7. Document the command in the README.md file",
"testStrategy": "1. Unit tests:\n - Create tests for each helper function in isolation\n - Mock git diff responses and chat history data\n - Verify rule extraction logic works with different input patterns\n - Test error handling for various failure scenarios\n\n2. Integration tests:\n - Test the command in a repository with actual code changes\n - Verify it correctly generates .mdc files in the .cursor/rules directory\n - Check that generated rules follow the correct format\n - Verify the command correctly updates existing rules without losing custom modifications\n\n3. Manual testing scenarios:\n - Run the command after implementing a feature with specific patterns\n - Verify the generated rules capture the intended patterns\n - Test the command with and without existing rules\n - Verify the command works when chat history is available and when it isn't\n - Test with large diffs to ensure performance remains acceptable\n\n4. Validation:\n - After generating rules, use them in Cursor to verify they correctly guide future implementations\n - Have multiple team members test the command to ensure consistent results"
},
{
"id": 33,
"title": "Create and Integrate Windsurf Rules Document from MDC Files",
"description": "Develop functionality to generate a .windsurfrules document by combining and refactoring content from three primary .mdc files used for Cursor Rules, ensuring it's properly integrated into the initialization pipeline.",
"status": "done",
"dependencies": [],
"priority": "medium",
"details": "This task involves creating a mechanism to generate a Windsurf-specific rules document by combining three existing MDC (Markdown Content) files that are currently used for Cursor Rules. The implementation should:\n\n1. Identify and locate the three primary .mdc files used for Cursor Rules\n2. Extract content from these files and merge them into a single document\n3. Refactor the content to make it Windsurf-specific, replacing Cursor-specific terminology and adapting guidelines as needed\n4. Create a function that generates a .windsurfrules document from this content\n5. Integrate this function into the initialization pipeline\n6. Implement logic to check if a .windsurfrules document already exists:\n - If it exists, append the new content to it\n - If it doesn't exist, create a new document\n7. Ensure proper error handling for file operations\n8. Add appropriate logging to track the generation and modification of the .windsurfrules document\n\nThe implementation should be modular and maintainable, with clear separation of concerns between content extraction, refactoring, and file operations.",
"testStrategy": "Testing should verify both the content generation and the integration with the initialization pipeline:\n\n1. Unit Tests:\n - Test the content extraction function with mock .mdc files\n - Test the content refactoring function to ensure Cursor-specific terms are properly replaced\n - Test the file operation functions with mock filesystem\n\n2. Integration Tests:\n - Test the creation of a new .windsurfrules document when none exists\n - Test appending to an existing .windsurfrules document\n - Test the complete initialization pipeline with the new functionality\n\n3. Manual Verification:\n - Inspect the generated .windsurfrules document to ensure content is properly combined and refactored\n - Verify that Cursor-specific terminology has been replaced with Windsurf-specific terminology\n - Run the initialization process multiple times to verify idempotence (content isn't duplicated on multiple runs)\n\n4. Edge Cases:\n - Test with missing or corrupted .mdc files\n - Test with an existing but empty .windsurfrules document\n - Test with an existing .windsurfrules document that already contains some of the content"
}
]
}

View File

@@ -10,14 +10,17 @@ const mockLog = jest.fn();
// Mock dependencies
jest.mock('@anthropic-ai/sdk', () => {
const mockCreate = jest.fn().mockResolvedValue({
content: [{ text: 'AI response' }],
});
const mockAnthropicInstance = {
messages: {
create: mockCreate
}
};
const mockAnthropicConstructor = jest.fn().mockImplementation(() => mockAnthropicInstance);
return {
Anthropic: jest.fn().mockImplementation(() => ({
messages: {
create: jest.fn().mockResolvedValue({
content: [{ text: 'AI response' }],
}),
},
})),
Anthropic: mockAnthropicConstructor
};
});
@@ -68,6 +71,9 @@ global.anthropic = {
// Mock process.env
const originalEnv = process.env;
// Import Anthropic for testing constructor arguments
import { Anthropic } from '@anthropic-ai/sdk';
describe('AI Services Module', () => {
beforeEach(() => {
jest.clearAllMocks();
@@ -285,4 +291,102 @@ These subtasks will help you implement the parent task efficiently.`;
});
});
});
describe('handleClaudeError function', () => {
// Import the function directly for testing
let handleClaudeError;
beforeAll(async () => {
// Dynamic import to get the actual function
const module = await import('../../scripts/modules/ai-services.js');
handleClaudeError = module.handleClaudeError;
});
test('should handle overloaded_error type', () => {
const error = {
type: 'error',
error: {
type: 'overloaded_error',
message: 'Claude is experiencing high volume'
}
};
const result = handleClaudeError(error);
expect(result).toContain('Claude is currently experiencing high demand');
expect(result).toContain('overloaded');
});
test('should handle rate_limit_error type', () => {
const error = {
type: 'error',
error: {
type: 'rate_limit_error',
message: 'Rate limit exceeded'
}
};
const result = handleClaudeError(error);
expect(result).toContain('exceeded the rate limit');
});
test('should handle invalid_request_error type', () => {
const error = {
type: 'error',
error: {
type: 'invalid_request_error',
message: 'Invalid request parameters'
}
};
const result = handleClaudeError(error);
expect(result).toContain('issue with the request format');
});
test('should handle timeout errors', () => {
const error = {
message: 'Request timed out after 60000ms'
};
const result = handleClaudeError(error);
expect(result).toContain('timed out');
});
test('should handle network errors', () => {
const error = {
message: 'Network error occurred'
};
const result = handleClaudeError(error);
expect(result).toContain('network error');
});
test('should handle generic errors', () => {
const error = {
message: 'Something unexpected happened'
};
const result = handleClaudeError(error);
expect(result).toContain('Error communicating with Claude');
expect(result).toContain('Something unexpected happened');
});
});
describe('Anthropic client configuration', () => {
test('should include output-128k beta header in client configuration', async () => {
// Read the file content to verify the change is present
const fs = await import('fs');
const path = await import('path');
const filePath = path.resolve('./scripts/modules/ai-services.js');
const fileContent = fs.readFileSync(filePath, 'utf8');
// Check if the beta header is in the file
expect(fileContent).toContain("'anthropic-beta': 'output-128k-2025-02-19'");
});
});
});

View File

@@ -4,116 +4,286 @@
import { jest } from '@jest/globals';
// Mock modules
jest.mock('commander');
jest.mock('fs');
jest.mock('path');
jest.mock('../../scripts/modules/ui.js', () => ({
displayBanner: jest.fn(),
displayHelp: jest.fn()
// Mock functions that need jest.fn methods
const mockParsePRD = jest.fn().mockResolvedValue(undefined);
const mockDisplayBanner = jest.fn();
const mockDisplayHelp = jest.fn();
const mockLog = jest.fn();
// Mock modules first
jest.mock('fs', () => ({
existsSync: jest.fn(),
readFileSync: jest.fn()
}));
jest.mock('../../scripts/modules/task-manager.js');
jest.mock('../../scripts/modules/dependency-manager.js');
jest.mock('path', () => ({
join: jest.fn((dir, file) => `${dir}/${file}`)
}));
jest.mock('chalk', () => ({
red: jest.fn(text => text),
blue: jest.fn(text => text),
green: jest.fn(text => text),
yellow: jest.fn(text => text),
white: jest.fn(text => ({
bold: jest.fn(text => text)
})),
reset: jest.fn(text => text)
}));
jest.mock('../../scripts/modules/ui.js', () => ({
displayBanner: mockDisplayBanner,
displayHelp: mockDisplayHelp
}));
jest.mock('../../scripts/modules/task-manager.js', () => ({
parsePRD: mockParsePRD
}));
// Add this function before the mock of utils.js
/**
* Convert camelCase to kebab-case
* @param {string} str - String to convert
* @returns {string} kebab-case version of the input
*/
const toKebabCase = (str) => {
return str
.replace(/([a-z0-9])([A-Z])/g, '$1-$2')
.toLowerCase()
.replace(/^-/, ''); // Remove leading hyphen if present
};
/**
* Detect camelCase flags in command arguments
* @param {string[]} args - Command line arguments to check
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
*/
function detectCamelCaseFlags(args) {
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip if it's a single word (no hyphens) or already in kebab-case
if (!flagName.includes('-')) {
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
}
return camelCaseFlags;
}
// Then update the utils.js mock to include these functions
jest.mock('../../scripts/modules/utils.js', () => ({
CONFIG: {
projectVersion: '1.5.0'
},
log: jest.fn()
log: mockLog,
toKebabCase: toKebabCase,
detectCamelCaseFlags: detectCamelCaseFlags
}));
// Import after mocking
import { setupCLI } from '../../scripts/modules/commands.js';
import { program } from 'commander';
// Import all modules after mocking
import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import { setupCLI } from '../../scripts/modules/commands.js';
// We'll use a simplified, direct test approach instead of Commander mocking
describe('Commands Module', () => {
// Set up spies on the mocked modules
const mockName = jest.spyOn(program, 'name').mockReturnValue(program);
const mockDescription = jest.spyOn(program, 'description').mockReturnValue(program);
const mockVersion = jest.spyOn(program, 'version').mockReturnValue(program);
const mockHelpOption = jest.spyOn(program, 'helpOption').mockReturnValue(program);
const mockAddHelpCommand = jest.spyOn(program, 'addHelpCommand').mockReturnValue(program);
const mockOn = jest.spyOn(program, 'on').mockReturnValue(program);
const mockExistsSync = jest.spyOn(fs, 'existsSync');
const mockReadFileSync = jest.spyOn(fs, 'readFileSync');
const mockJoin = jest.spyOn(path, 'join');
const mockConsoleLog = jest.spyOn(console, 'log').mockImplementation(() => {});
const mockConsoleError = jest.spyOn(console, 'error').mockImplementation(() => {});
const mockExit = jest.spyOn(process, 'exit').mockImplementation(() => {});
beforeEach(() => {
jest.clearAllMocks();
mockExistsSync.mockReturnValue(true);
});
afterAll(() => {
jest.restoreAllMocks();
});
describe('setupCLI function', () => {
test('should return Commander program instance', () => {
const result = setupCLI();
// Verify the program was properly configured
expect(mockName).toHaveBeenCalledWith('dev');
expect(mockDescription).toHaveBeenCalledWith('AI-driven development task management');
expect(mockVersion).toHaveBeenCalled();
expect(mockHelpOption).toHaveBeenCalledWith('-h, --help', 'Display help');
expect(mockAddHelpCommand).toHaveBeenCalledWith(false);
expect(mockOn).toHaveBeenCalled();
expect(result).toBeTruthy();
const program = setupCLI();
expect(program).toBeDefined();
expect(program.name()).toBe('dev');
});
test('should read version from package.json when available', () => {
// Setup mock for package.json existence and content
mockExistsSync.mockReturnValue(true);
mockReadFileSync.mockReturnValue(JSON.stringify({ version: '2.0.0' }));
mockJoin.mockReturnValue('/mock/path/package.json');
mockReadFileSync.mockReturnValue('{"version": "1.0.0"}');
mockJoin.mockReturnValue('package.json');
// Call the setup function
setupCLI();
// Get the version callback function
const versionCallback = mockVersion.mock.calls[0][0];
expect(typeof versionCallback).toBe('function');
// Execute the callback and check the result
const result = versionCallback();
expect(result).toBe('2.0.0');
// Verify the correct functions were called
expect(mockExistsSync).toHaveBeenCalled();
expect(mockReadFileSync).toHaveBeenCalled();
const program = setupCLI();
const version = program._version();
expect(mockReadFileSync).toHaveBeenCalledWith('package.json', 'utf8');
expect(version).toBe('1.0.0');
});
test('should use default version when package.json is not available', () => {
// Setup mock for package.json absence
mockExistsSync.mockReturnValue(false);
// Call the setup function
setupCLI();
// Get the version callback function
const versionCallback = mockVersion.mock.calls[0][0];
expect(typeof versionCallback).toBe('function');
// Execute the callback and check the result
const result = versionCallback();
expect(result).toBe('1.5.0'); // Updated to match the actual CONFIG.projectVersion
expect(mockExistsSync).toHaveBeenCalled();
const program = setupCLI();
const version = program._version();
expect(mockReadFileSync).not.toHaveBeenCalled();
expect(version).toBe('1.5.0');
});
test('should use default version when package.json reading throws an error', () => {
// Setup mock for package.json reading error
mockExistsSync.mockReturnValue(true);
mockReadFileSync.mockImplementation(() => {
throw new Error('Read error');
throw new Error('Invalid JSON');
});
// Call the setup function
setupCLI();
const program = setupCLI();
const version = program._version();
expect(mockReadFileSync).toHaveBeenCalled();
expect(version).toBe('1.5.0');
});
});
// Get the version callback function
const versionCallback = mockVersion.mock.calls[0][0];
expect(typeof versionCallback).toBe('function');
describe('Kebab Case Validation', () => {
test('should detect camelCase flags correctly', () => {
const args = ['node', 'task-master', '--camelCase', '--kebab-case'];
const camelCaseFlags = args.filter(arg =>
arg.startsWith('--') &&
/[A-Z]/.test(arg) &&
!arg.includes('-[A-Z]')
);
expect(camelCaseFlags).toContain('--camelCase');
expect(camelCaseFlags).not.toContain('--kebab-case');
});
// Execute the callback and check the result
const result = versionCallback();
expect(result).toBe('1.5.0'); // Updated to match the actual CONFIG.projectVersion
test('should accept kebab-case flags correctly', () => {
const args = ['node', 'task-master', '--kebab-case'];
const camelCaseFlags = args.filter(arg =>
arg.startsWith('--') &&
/[A-Z]/.test(arg) &&
!arg.includes('-[A-Z]')
);
expect(camelCaseFlags).toHaveLength(0);
});
});
describe('parse-prd command', () => {
// Since mocking Commander is complex, we'll test the action handler directly
// Recreate the action handler logic based on commands.js
async function parsePrdAction(file, options) {
// Use input option if file argument not provided
const inputFile = file || options.input;
const defaultPrdPath = 'scripts/prd.txt';
// If no input file specified, check for default PRD location
if (!inputFile) {
if (fs.existsSync(defaultPrdPath)) {
console.log(chalk.blue(`Using default PRD file: ${defaultPrdPath}`));
const numTasks = parseInt(options.numTasks, 10);
const outputPath = options.output;
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
await mockParsePRD(defaultPrdPath, outputPath, numTasks);
return;
}
console.log(chalk.yellow('No PRD file specified and default PRD file not found at scripts/prd.txt.'));
return;
}
const numTasks = parseInt(options.numTasks, 10);
const outputPath = options.output;
console.log(chalk.blue(`Parsing PRD file: ${inputFile}`));
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
await mockParsePRD(inputFile, outputPath, numTasks);
}
beforeEach(() => {
// Reset the parsePRD mock
mockParsePRD.mockClear();
});
test('should use default PRD path when no arguments provided', async () => {
// Arrange
mockExistsSync.mockReturnValue(true);
// Act - call the handler directly with the right params
await parsePrdAction(undefined, { numTasks: '10', output: 'tasks/tasks.json' });
// Assert
expect(mockExistsSync).toHaveBeenCalledWith('scripts/prd.txt');
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining('Using default PRD file'));
expect(mockParsePRD).toHaveBeenCalledWith(
'scripts/prd.txt',
'tasks/tasks.json',
10 // Default value from command definition
);
});
test('should display help when no arguments and no default PRD exists', async () => {
// Arrange
mockExistsSync.mockReturnValue(false);
// Act - call the handler directly with the right params
await parsePrdAction(undefined, { numTasks: '10', output: 'tasks/tasks.json' });
// Assert
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining('No PRD file specified'));
expect(mockParsePRD).not.toHaveBeenCalled();
});
test('should use explicitly provided file path', async () => {
// Arrange
const testFile = 'test/prd.txt';
// Act - call the handler directly with the right params
await parsePrdAction(testFile, { numTasks: '10', output: 'tasks/tasks.json' });
// Assert
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining(`Parsing PRD file: ${testFile}`));
expect(mockParsePRD).toHaveBeenCalledWith(testFile, 'tasks/tasks.json', 10);
expect(mockExistsSync).not.toHaveBeenCalledWith('scripts/prd.txt');
});
test('should use file path from input option when provided', async () => {
// Arrange
const testFile = 'test/prd.txt';
// Act - call the handler directly with the right params
await parsePrdAction(undefined, { input: testFile, numTasks: '10', output: 'tasks/tasks.json' });
// Assert
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining(`Parsing PRD file: ${testFile}`));
expect(mockParsePRD).toHaveBeenCalledWith(testFile, 'tasks/tasks.json', 10);
expect(mockExistsSync).not.toHaveBeenCalledWith('scripts/prd.txt');
});
test('should respect numTasks and output options', async () => {
// Arrange
const testFile = 'test/prd.txt';
const outputFile = 'custom/output.json';
const numTasks = 15;
// Act - call the handler directly with the right params
await parsePrdAction(testFile, { numTasks: numTasks.toString(), output: outputFile });
// Assert
expect(mockParsePRD).toHaveBeenCalledWith(testFile, outputFile, numTasks);
});
});
});

146
tests/unit/init.test.js Normal file
View File

@@ -0,0 +1,146 @@
import { jest } from '@jest/globals';
import fs from 'fs';
import path from 'path';
import os from 'os';
// Mock external modules
jest.mock('child_process', () => ({
execSync: jest.fn()
}));
jest.mock('readline', () => ({
createInterface: jest.fn(() => ({
question: jest.fn(),
close: jest.fn()
}))
}));
// Mock figlet for banner display
jest.mock('figlet', () => ({
default: {
textSync: jest.fn(() => 'Task Master')
}
}));
// Mock console methods
jest.mock('console', () => ({
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
clear: jest.fn()
}));
describe('Windsurf Rules File Handling', () => {
let tempDir;
beforeEach(() => {
jest.clearAllMocks();
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('.windsurfrules')) {
return 'Existing windsurf rules content';
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
// Mock specific file existence checks
if (filePath.toString().includes('package.json')) {
return true;
}
return false;
});
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
});
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
// Test function that simulates the behavior of .windsurfrules handling
function mockCopyTemplateFile(templateName, targetPath) {
if (templateName === 'windsurfrules') {
const filename = path.basename(targetPath);
if (filename === '.windsurfrules') {
if (fs.existsSync(targetPath)) {
// Should append content when file exists
const existingContent = fs.readFileSync(targetPath, 'utf8');
const updatedContent = existingContent.trim() +
'\n\n# Added by Claude Task Master - Development Workflow Rules\n\n' +
'New content';
fs.writeFileSync(targetPath, updatedContent);
return;
}
}
// If file doesn't exist, create it normally
fs.writeFileSync(targetPath, 'New content');
}
}
test('creates .windsurfrules when it does not exist', () => {
// Arrange
const targetPath = path.join(tempDir, '.windsurfrules');
// Act
mockCopyTemplateFile('windsurfrules', targetPath);
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(targetPath, 'New content');
});
test('appends content to existing .windsurfrules', () => {
// Arrange
const targetPath = path.join(tempDir, '.windsurfrules');
const existingContent = 'Existing windsurf rules content';
// Override the existsSync mock just for this test
fs.existsSync.mockReturnValueOnce(true); // Target file exists
fs.readFileSync.mockReturnValueOnce(existingContent);
// Act
mockCopyTemplateFile('windsurfrules', targetPath);
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
targetPath,
expect.stringContaining(existingContent)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
targetPath,
expect.stringContaining('Added by Claude Task Master')
);
});
test('includes .windsurfrules in project structure creation', () => {
// This test verifies the expected behavior by using a mock implementation
// that represents how createProjectStructure should work
// Mock implementation of createProjectStructure
function mockCreateProjectStructure(projectName) {
// Copy template files including .windsurfrules
mockCopyTemplateFile('windsurfrules', path.join(tempDir, '.windsurfrules'));
}
// Act - call our mock implementation
mockCreateProjectStructure('test-project');
// Assert - verify that .windsurfrules was created
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.windsurfrules'),
expect.any(String)
);
});
});

View File

@@ -0,0 +1,120 @@
/**
* Kebab case validation tests
*/
import { jest } from '@jest/globals';
import { toKebabCase } from '../../scripts/modules/utils.js';
// Create a test implementation of detectCamelCaseFlags
function testDetectCamelCaseFlags(args) {
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
}
describe('Kebab Case Validation', () => {
describe('toKebabCase', () => {
test('should convert camelCase to kebab-case', () => {
expect(toKebabCase('promptText')).toBe('prompt-text');
expect(toKebabCase('userID')).toBe('user-id');
expect(toKebabCase('numTasks')).toBe('num-tasks');
});
test('should handle already kebab-case strings', () => {
expect(toKebabCase('already-kebab-case')).toBe('already-kebab-case');
expect(toKebabCase('kebab-case')).toBe('kebab-case');
});
test('should handle single words', () => {
expect(toKebabCase('single')).toBe('single');
expect(toKebabCase('file')).toBe('file');
});
});
describe('detectCamelCaseFlags', () => {
test('should properly detect camelCase flags', () => {
const args = ['node', 'task-master', 'add-task', '--promptText=test', '--userID=123'];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(2);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
expect(flags).toContainEqual({
original: 'userID',
kebabCase: 'user-id'
});
});
test('should not flag kebab-case or lowercase flags', () => {
const args = ['node', 'task-master', 'add-task', '--prompt=test', '--user-id=123'];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(0);
});
test('should not flag any single-word flags regardless of case', () => {
const args = [
'node',
'task-master',
'add-task',
'--prompt=test', // lowercase
'--PROMPT=test', // uppercase
'--Prompt=test', // mixed case
'--file=test', // lowercase
'--FILE=test', // uppercase
'--File=test' // mixed case
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(0);
});
test('should handle mixed case flags correctly', () => {
const args = [
'node',
'task-master',
'add-task',
'--prompt=test', // single word, should pass
'--promptText=test', // camelCase, should flag
'--prompt-text=test', // kebab-case, should pass
'--ID=123', // single word, should pass
'--userId=123', // camelCase, should flag
'--user-id=123' // kebab-case, should pass
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(2);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
expect(flags).toContainEqual({
original: 'userId',
kebabCase: 'user-id'
});
});
});
});

View File

@@ -11,11 +11,13 @@ const mockReadFileSync = jest.fn();
const mockExistsSync = jest.fn();
const mockMkdirSync = jest.fn();
const mockDirname = jest.fn();
const mockCallClaude = jest.fn();
const mockCallClaude = jest.fn().mockResolvedValue({ tasks: [] }); // Default resolved value
const mockCallPerplexity = jest.fn().mockResolvedValue({ tasks: [] }); // Default resolved value
const mockWriteJSON = jest.fn();
const mockGenerateTaskFiles = jest.fn();
const mockWriteFileSync = jest.fn();
const mockFormatDependenciesWithStatus = jest.fn();
const mockDisplayTaskList = jest.fn();
const mockValidateAndFixDependencies = jest.fn();
const mockReadJSON = jest.fn();
const mockLog = jest.fn();
@@ -35,15 +37,11 @@ jest.mock('path', () => ({
join: jest.fn((dir, file) => `${dir}/${file}`)
}));
// Mock AI services
jest.mock('../../scripts/modules/ai-services.js', () => ({
callClaude: mockCallClaude
}));
// Mock ui
jest.mock('../../scripts/modules/ui.js', () => ({
formatDependenciesWithStatus: mockFormatDependenciesWithStatus,
displayBanner: jest.fn()
displayBanner: jest.fn(),
displayTaskList: mockDisplayTaskList
}));
// Mock dependency-manager
@@ -59,6 +57,12 @@ jest.mock('../../scripts/modules/utils.js', () => ({
log: mockLog
}));
// Mock AI services - This is the correct way to mock the module
jest.mock('../../scripts/modules/ai-services.js', () => ({
callClaude: mockCallClaude,
callPerplexity: mockCallPerplexity
}));
// Mock the task-manager module itself to control what gets imported
jest.mock('../../scripts/modules/task-manager.js', () => {
// Get the original module to preserve function implementations
@@ -93,6 +97,130 @@ const testParsePRD = async (prdPath, outputPath, numTasks) => {
}
};
// Create a simplified version of setTaskStatus for testing
const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
// Handle multiple task IDs (comma-separated)
const taskIds = taskIdInput.split(',').map(id => id.trim());
const updatedTasks = [];
// Update each task
for (const id of taskIds) {
testUpdateSingleTaskStatus(tasksData, id, newStatus);
updatedTasks.push(id);
}
return tasksData;
};
// Simplified version of updateSingleTaskStatus for testing
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
// Check if it's a subtask (e.g., "1.2")
if (taskIdInput.includes('.')) {
const [parentId, subtaskId] = taskIdInput.split('.').map(id => parseInt(id, 10));
// Find the parent task
const parentTask = tasksData.tasks.find(t => t.id === parentId);
if (!parentTask) {
throw new Error(`Parent task ${parentId} not found`);
}
// Find the subtask
if (!parentTask.subtasks) {
throw new Error(`Parent task ${parentId} has no subtasks`);
}
const subtask = parentTask.subtasks.find(st => st.id === subtaskId);
if (!subtask) {
throw new Error(`Subtask ${subtaskId} not found in parent task ${parentId}`);
}
// Update the subtask status
subtask.status = newStatus;
// Check if all subtasks are done (if setting to 'done')
if (newStatus.toLowerCase() === 'done' || newStatus.toLowerCase() === 'completed') {
const allSubtasksDone = parentTask.subtasks.every(st =>
st.status === 'done' || st.status === 'completed');
// For testing, we don't need to output suggestions
}
} else {
// Handle regular task
const taskId = parseInt(taskIdInput, 10);
const task = tasksData.tasks.find(t => t.id === taskId);
if (!task) {
throw new Error(`Task ${taskId} not found`);
}
// Update the task status
task.status = newStatus;
// If marking as done, also mark all subtasks as done
if ((newStatus.toLowerCase() === 'done' || newStatus.toLowerCase() === 'completed') &&
task.subtasks && task.subtasks.length > 0) {
task.subtasks.forEach(subtask => {
subtask.status = newStatus;
});
}
}
return true;
};
// Create a simplified version of listTasks for testing
const testListTasks = (tasksData, statusFilter, withSubtasks = false) => {
// Filter tasks by status if specified
const filteredTasks = statusFilter
? tasksData.tasks.filter(task =>
task.status && task.status.toLowerCase() === statusFilter.toLowerCase())
: tasksData.tasks;
// Call the displayTaskList mock for testing
mockDisplayTaskList(tasksData, statusFilter, withSubtasks);
return {
filteredTasks,
tasksData
};
};
// Create a simplified version of addTask for testing
const testAddTask = (tasksData, taskPrompt, dependencies = [], priority = 'medium') => {
// Create a new task with a higher ID
const highestId = Math.max(...tasksData.tasks.map(t => t.id));
const newId = highestId + 1;
// Create mock task based on what would be generated by AI
const newTask = {
id: newId,
title: `Task from prompt: ${taskPrompt.substring(0, 20)}...`,
description: `Task generated from: ${taskPrompt}`,
status: 'pending',
dependencies: dependencies,
priority: priority,
details: `Implementation details for task generated from prompt: ${taskPrompt}`,
testStrategy: 'Write unit tests to verify functionality'
};
// Check dependencies
for (const depId of dependencies) {
const dependency = tasksData.tasks.find(t => t.id === depId);
if (!dependency) {
throw new Error(`Dependency task ${depId} not found`);
}
}
// Add task to tasks array
tasksData.tasks.push(newTask);
return {
updatedData: tasksData,
newTask
};
};
// Import after mocks
import * as taskManager from '../../scripts/modules/task-manager.js';
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
@@ -237,58 +365,137 @@ describe('Task Manager Module', () => {
});
});
// Skipped tests for analyzeTaskComplexity
describe.skip('analyzeTaskComplexity function', () => {
// These tests are skipped because they require complex mocking
// but document what should be tested
// Setup common test variables
const tasksPath = 'tasks/tasks.json';
const reportPath = 'scripts/task-complexity-report.json';
const thresholdScore = 5;
const baseOptions = {
file: tasksPath,
output: reportPath,
threshold: thresholdScore.toString(),
research: false // Default to false
};
test('should handle valid JSON response from LLM', async () => {
// This test would verify that:
// 1. The function properly calls the AI model
// 2. It correctly parses a valid JSON response
// 3. It generates a properly formatted complexity report
// 4. The report includes all analyzed tasks with their complexity scores
expect(true).toBe(true);
// Sample response structure (simplified for these tests)
const sampleApiResponse = {
tasks: [
{ id: 1, complexity: 3, subtaskCount: 2 },
{ id: 2, complexity: 7, subtaskCount: 5 },
{ id: 3, complexity: 9, subtaskCount: 8 }
]
};
beforeEach(() => {
jest.clearAllMocks();
// Setup default mock implementations
mockReadJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));
mockWriteJSON.mockImplementation((path, data) => data); // Return data for chaining/assertions
// Just set the mock resolved values directly - no spies needed
mockCallClaude.mockResolvedValue(sampleApiResponse);
mockCallPerplexity.mockResolvedValue(sampleApiResponse);
// Mock console methods to prevent test output clutter
jest.spyOn(console, 'log').mockImplementation(() => {});
jest.spyOn(console, 'error').mockImplementation(() => {});
});
test('should handle and fix malformed JSON with unterminated strings', async () => {
// This test would verify that:
// 1. The function can handle JSON with unterminated strings
// 2. It applies regex fixes to repair the malformed JSON
// 3. It still produces a valid report despite receiving bad JSON
expect(true).toBe(true);
afterEach(() => {
// Restore console methods
console.log.mockRestore();
console.error.mockRestore();
});
test('should handle missing tasks in the response', async () => {
// This test would verify that:
// 1. When the AI response is missing some tasks
// 2. The function detects the missing tasks
// 3. It attempts to analyze just those missing tasks
// 4. The final report includes all tasks that could be analyzed
expect(true).toBe(true);
test('should call Claude when research flag is false', async () => {
// Arrange
const options = { ...baseOptions, research: false };
// Act
await taskManager.analyzeTaskComplexity(options);
// Assert
expect(mockCallClaude).toHaveBeenCalled();
expect(mockCallPerplexity).not.toHaveBeenCalled();
expect(mockWriteJSON).toHaveBeenCalledWith(reportPath, expect.any(Object));
});
test('should use Perplexity research when research flag is set', async () => {
// This test would verify that:
// 1. The function uses Perplexity API when the research flag is set
// 2. It correctly formats the prompt for Perplexity
// 3. It properly handles the Perplexity response
expect(true).toBe(true);
test('should call Perplexity when research flag is true', async () => {
// Arrange
const options = { ...baseOptions, research: true };
// Act
await taskManager.analyzeTaskComplexity(options);
// Assert
expect(mockCallPerplexity).toHaveBeenCalled();
expect(mockCallClaude).not.toHaveBeenCalled();
expect(mockWriteJSON).toHaveBeenCalledWith(reportPath, expect.any(Object));
});
test('should fall back to Claude when Perplexity is unavailable', async () => {
// This test would verify that:
// 1. The function falls back to Claude when Perplexity API is not available
// 2. It handles the fallback gracefully
// 3. It still produces a valid report using Claude
expect(true).toBe(true);
test('should handle valid JSON response from LLM (Claude)', async () => {
// Arrange
const options = { ...baseOptions, research: false };
// Act
await taskManager.analyzeTaskComplexity(options);
// Assert
expect(mockReadJSON).toHaveBeenCalledWith(tasksPath);
expect(mockCallClaude).toHaveBeenCalled();
expect(mockCallPerplexity).not.toHaveBeenCalled();
expect(mockWriteJSON).toHaveBeenCalledWith(
reportPath,
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({ id: 1 })
])
})
);
expect(mockLog).toHaveBeenCalledWith('info', expect.stringContaining('Successfully analyzed'));
});
test('should process multiple tasks in parallel', async () => {
// This test would verify that:
// 1. The function can analyze multiple tasks efficiently
// 2. It correctly aggregates the results
expect(true).toBe(true);
test('should handle and fix malformed JSON string response (Claude)', async () => {
// Arrange
const malformedJsonResponse = `{"tasks": [{"id": 1, "complexity": 3, "subtaskCount: 2}]}`;
mockCallClaude.mockResolvedValueOnce(malformedJsonResponse);
const options = { ...baseOptions, research: false };
// Act
await taskManager.analyzeTaskComplexity(options);
// Assert
expect(mockCallClaude).toHaveBeenCalled();
expect(mockCallPerplexity).not.toHaveBeenCalled();
expect(mockWriteJSON).toHaveBeenCalled();
expect(mockLog).toHaveBeenCalledWith('warn', expect.stringContaining('Malformed JSON'));
});
test('should handle missing tasks in the response (Claude)', async () => {
// Arrange
const incompleteResponse = { tasks: [sampleApiResponse.tasks[0]] };
mockCallClaude.mockResolvedValueOnce(incompleteResponse);
const missingTaskResponse = { tasks: [sampleApiResponse.tasks[1], sampleApiResponse.tasks[2]] };
mockCallClaude.mockResolvedValueOnce(missingTaskResponse);
const options = { ...baseOptions, research: false };
// Act
await taskManager.analyzeTaskComplexity(options);
// Assert
expect(mockCallClaude).toHaveBeenCalledTimes(2);
expect(mockCallPerplexity).not.toHaveBeenCalled();
expect(mockWriteJSON).toHaveBeenCalledWith(
reportPath,
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({ id: 1 }),
expect.objectContaining({ id: 2 }),
expect.objectContaining({ id: 3 })
])
})
);
});
});
@@ -546,125 +753,163 @@ describe('Task Manager Module', () => {
});
});
describe.skip('setTaskStatus function', () => {
describe('setTaskStatus function', () => {
test('should update task status in tasks.json', async () => {
// This test would verify that:
// 1. The function reads the tasks file correctly
// 2. It finds the target task by ID
// 3. It updates the task status
// 4. It writes the updated tasks back to the file
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
const updatedData = testSetTaskStatus(testTasksData, '2', 'done');
// Assert
expect(updatedData.tasks[1].id).toBe(2);
expect(updatedData.tasks[1].status).toBe('done');
});
test('should update subtask status when using dot notation', async () => {
// This test would verify that:
// 1. The function correctly parses the subtask ID in dot notation
// 2. It finds the parent task and subtask
// 3. It updates the subtask status
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
const updatedData = testSetTaskStatus(testTasksData, '3.1', 'done');
// Assert
const subtaskParent = updatedData.tasks.find(t => t.id === 3);
expect(subtaskParent).toBeDefined();
expect(subtaskParent.subtasks[0].status).toBe('done');
});
test('should update multiple tasks when given comma-separated IDs', async () => {
// This test would verify that:
// 1. The function handles comma-separated task IDs
// 2. It updates all specified tasks
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
const updatedData = testSetTaskStatus(testTasksData, '1,2', 'pending');
// Assert
expect(updatedData.tasks[0].status).toBe('pending');
expect(updatedData.tasks[1].status).toBe('pending');
});
test('should automatically mark subtasks as done when parent is marked done', async () => {
// This test would verify that:
// 1. When a parent task is marked as done
// 2. All its subtasks are also marked as done
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
const updatedData = testSetTaskStatus(testTasksData, '3', 'done');
// Assert
const parentTask = updatedData.tasks.find(t => t.id === 3);
expect(parentTask.status).toBe('done');
expect(parentTask.subtasks[0].status).toBe('done');
expect(parentTask.subtasks[1].status).toBe('done');
});
test('should suggest updating parent task when all subtasks are done', async () => {
// This test would verify that:
// 1. When all subtasks of a parent are marked as done
// 2. The function suggests updating the parent task status
expect(true).toBe(true);
});
test('should throw error for non-existent task ID', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
test('should handle non-existent task ID', async () => {
// This test would verify that:
// 1. The function throws an error for non-existent task ID
// 2. It provides a helpful error message
expect(true).toBe(true);
// Assert
expect(() => testSetTaskStatus(testTasksData, '99', 'done')).toThrow('Task 99 not found');
});
});
describe.skip('updateSingleTaskStatus function', () => {
describe('updateSingleTaskStatus function', () => {
test('should update regular task status', async () => {
// This test would verify that:
// 1. The function correctly updates a regular task's status
// 2. It handles the task data properly
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
const result = testUpdateSingleTaskStatus(testTasksData, '2', 'done');
// Assert
expect(result).toBe(true);
expect(testTasksData.tasks[1].status).toBe('done');
});
test('should update subtask status', async () => {
// This test would verify that:
// 1. The function correctly updates a subtask's status
// 2. It finds the parent task and subtask properly
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
const result = testUpdateSingleTaskStatus(testTasksData, '3.1', 'done');
// Assert
expect(result).toBe(true);
expect(testTasksData.tasks[2].subtasks[0].status).toBe('done');
});
test('should handle parent tasks without subtasks', async () => {
// This test would verify that:
// 1. The function handles attempts to update subtasks when none exist
// 2. It throws an appropriate error
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Remove subtasks from task 3
const taskWithoutSubtasks = { ...testTasksData.tasks[2] };
delete taskWithoutSubtasks.subtasks;
testTasksData.tasks[2] = taskWithoutSubtasks;
// Assert
expect(() => testUpdateSingleTaskStatus(testTasksData, '3.1', 'done')).toThrow('has no subtasks');
});
test('should handle non-existent subtask ID', async () => {
// This test would verify that:
// 1. The function handles attempts to update non-existent subtasks
// 2. It throws an appropriate error
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Assert
expect(() => testUpdateSingleTaskStatus(testTasksData, '3.99', 'done')).toThrow('Subtask 99 not found');
});
});
describe.skip('listTasks function', () => {
test('should display all tasks when no filter is provided', () => {
// This test would verify that:
// 1. The function reads the tasks file correctly
// 2. It displays all tasks without filtering
// 3. It formats the output correctly
expect(true).toBe(true);
describe('listTasks function', () => {
test('should display all tasks when no filter is provided', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
const result = testListTasks(testTasksData);
// Assert
expect(result.filteredTasks.length).toBe(testTasksData.tasks.length);
expect(mockDisplayTaskList).toHaveBeenCalledWith(testTasksData, undefined, false);
});
test('should filter tasks by status when filter is provided', () => {
// This test would verify that:
// 1. The function filters tasks by the provided status
// 2. It only displays tasks matching the filter
expect(true).toBe(true);
test('should filter tasks by status when filter is provided', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
const statusFilter = 'done';
// Act
const result = testListTasks(testTasksData, statusFilter);
// Assert
expect(result.filteredTasks.length).toBe(
testTasksData.tasks.filter(t => t.status === statusFilter).length
);
expect(mockDisplayTaskList).toHaveBeenCalledWith(testTasksData, statusFilter, false);
});
test('should display subtasks when withSubtasks flag is true', () => {
// This test would verify that:
// 1. The function displays subtasks when the flag is set
// 2. It formats subtasks correctly in the output
expect(true).toBe(true);
test('should display subtasks when withSubtasks flag is true', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Act
testListTasks(testTasksData, undefined, true);
// Assert
expect(mockDisplayTaskList).toHaveBeenCalledWith(testTasksData, undefined, true);
});
test('should display completion statistics', () => {
// This test would verify that:
// 1. The function calculates completion statistics correctly
// 2. It displays the progress bars and percentages
expect(true).toBe(true);
});
test('should handle empty tasks array', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(emptySampleTasks));
test('should identify and display the next task to work on', () => {
// This test would verify that:
// 1. The function correctly identifies the next task to work on
// 2. It displays the next task prominently
expect(true).toBe(true);
});
// Act
const result = testListTasks(testTasksData);
test('should handle empty tasks array', () => {
// This test would verify that:
// 1. The function handles an empty tasks array gracefully
// 2. It displays an appropriate message
expect(true).toBe(true);
// Assert
expect(result.filteredTasks.length).toBe(0);
expect(mockDisplayTaskList).toHaveBeenCalledWith(testTasksData, undefined, false);
});
});
@@ -884,48 +1129,51 @@ describe('Task Manager Module', () => {
});
});
describe.skip('addTask function', () => {
describe('addTask function', () => {
test('should add a new task using AI', async () => {
// This test would verify that:
// 1. The function reads the tasks file correctly
// 2. It determines the next available task ID
// 3. It calls the AI model with the correct prompt
// 4. It creates a properly structured task object
// 5. It adds the task to the tasks array
// 6. It writes the updated tasks back to the file
expect(true).toBe(true);
});
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
const prompt = "Create a new authentication system";
test('should handle Claude streaming responses', async () => {
// This test would verify that:
// 1. The function correctly handles streaming API calls
// 2. It processes the stream data properly
// 3. It combines the chunks into a complete response
expect(true).toBe(true);
// Act
const result = testAddTask(testTasksData, prompt);
// Assert
expect(result.newTask.id).toBe(Math.max(...sampleTasks.tasks.map(t => t.id)) + 1);
expect(result.newTask.status).toBe('pending');
expect(result.newTask.title).toContain(prompt.substring(0, 20));
expect(testTasksData.tasks.length).toBe(sampleTasks.tasks.length + 1);
});
test('should validate dependencies when adding a task', async () => {
// This test would verify that:
// 1. The function validates provided dependencies
// 2. It removes invalid dependencies
// 3. It logs appropriate messages
expect(true).toBe(true);
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
const prompt = "Create a new authentication system";
const validDependencies = [1, 2]; // These exist in sampleTasks
// Act
const result = testAddTask(testTasksData, prompt, validDependencies);
// Assert
expect(result.newTask.dependencies).toEqual(validDependencies);
// Test invalid dependency
expect(() => {
testAddTask(testTasksData, prompt, [999]); // Non-existent task ID
}).toThrow('Dependency task 999 not found');
});
test('should handle malformed AI responses', async () => {
// This test would verify that:
// 1. The function handles malformed JSON in AI responses
// 2. It provides appropriate error messages
// 3. It exits gracefully
expect(true).toBe(true);
});
test('should use specified priority', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
const prompt = "Create a new authentication system";
const priority = "high";
test('should use existing task context for better generation', async () => {
// This test would verify that:
// 1. The function uses existing tasks as context
// 2. It provides dependency context when dependencies are specified
// 3. It generates tasks that fit with the existing project
expect(true).toBe(true);
// Act
const result = testAddTask(testTasksData, prompt, [], priority);
// Assert
expect(result.newTask.priority).toBe(priority);
});
});

View File

@@ -20,9 +20,13 @@ import {
formatTaskId,
findCycles,
CONFIG,
LOG_LEVELS
LOG_LEVELS,
findTaskById,
toKebabCase
} from '../../scripts/modules/utils.js';
// Skip the import of detectCamelCaseFlags as we'll implement our own version for testing
// Mock chalk functions
jest.mock('chalk', () => ({
gray: jest.fn(text => `gray:${text}`),
@@ -32,6 +36,33 @@ jest.mock('chalk', () => ({
green: jest.fn(text => `green:${text}`)
}));
// Test implementation of detectCamelCaseFlags
function testDetectCamelCaseFlags(args) {
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
}
describe('Utils Module', () => {
// Setup fs mocks for each test
let fsReadFileSyncSpy;
@@ -478,3 +509,46 @@ describe('Utils Module', () => {
});
});
});
describe('CLI Flag Format Validation', () => {
test('toKebabCase should convert camelCase to kebab-case', () => {
expect(toKebabCase('promptText')).toBe('prompt-text');
expect(toKebabCase('userID')).toBe('user-id');
expect(toKebabCase('numTasks')).toBe('num-tasks');
expect(toKebabCase('alreadyKebabCase')).toBe('already-kebab-case');
});
test('detectCamelCaseFlags should identify camelCase flags', () => {
const args = ['node', 'task-master', 'add-task', '--promptText=test', '--userID=123'];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(2);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
expect(flags).toContainEqual({
original: 'userID',
kebabCase: 'user-id'
});
});
test('detectCamelCaseFlags should not flag kebab-case flags', () => {
const args = ['node', 'task-master', 'add-task', '--prompt-text=test', '--user-id=123'];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(0);
});
test('detectCamelCaseFlags should respect single-word flags', () => {
const args = ['node', 'task-master', 'add-task', '--prompt=test', '--file=test.json', '--priority=high', '--promptText=test'];
const flags = testDetectCamelCaseFlags(args);
// Should only flag promptText, not the single-word flags
expect(flags).toHaveLength(1);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
});
});