Compare commits

...

8 Commits

Author SHA1 Message Date
Parththipan Thaniperumkarunai
aa3713f4e4 fix(expand): Enhance context handling in expandAllTasks function
- Added `tag` to context destructuring for better context management.
- Updated `readJSON` call to include `contextTag` for improved data integrity.
- Ensured the correct tag is passed during task expansion to prevent tag corruption.
2025-06-20 16:06:28 +03:00
Ralph Khreish
1ab8fd5274 chore: fix package.json 2025-06-20 16:06:28 +03:00
github-actions[bot]
443a55e54d Version Packages 2025-06-20 16:06:28 +03:00
Parthy
9d3c942039 Delete .changeset/eighty-breads-wonder.md 2025-06-20 16:06:28 +03:00
Parththipan Thaniperumkarunai
75b75c3251 chore(changeset): combine duplicate changesets for expand tag corruption fix
Merge eighty-breads-wonder.md into bright-llamas-enter.md to consolidate
the expand command fix and its comprehensive E2E testing enhancements
into a single changeset entry.
2025-06-20 16:06:28 +03:00
Parththipan Thaniperumkarunai
72ffdd53ea fix(changeset): Update E2E test improvements changeset to properly reflect tag corruption fix verification 2025-06-20 16:06:28 +03:00
Parththipan Thaniperumkarunai
6c619cf62b test(e2e): Add comprehensive tag-aware expand testing to verify tag corruption fix - Add new test section for feature-expand tag creation and testing - Verify tag preservation during expand, force expand, and expand --all operations - Test that master tag remains intact and feature-expand tag receives subtasks correctly - Fix file path references to use correct .taskmaster/tasks/tasks.json location - Fix config file check to use .taskmaster/config.json instead of .taskmasterconfig - All tag corruption verification tests pass successfully in E2E test 2025-06-20 16:06:28 +03:00
Parththipan Thaniperumkarunai
554c1247f2 fix(expand): Fix tag corruption in expand command - Fix tag parameter passing through MCP expand-task flow - Add tag parameter to direct function and tool registration - Fix contextGatherer method name from _buildDependencyContext to _buildDependencyGraphs - Add comprehensive test coverage for tag handling in expand-task - Ensures tagged task structure is preserved during expansion - Prevents corruption when tag is undefined. Fixes expand command causing tag corruption in tagged task lists. All existing tests pass and new test coverage added. 2025-06-20 16:06:28 +03:00
14 changed files with 1983 additions and 66 deletions

View File

@@ -0,0 +1,12 @@
---
"task-master-ai": patch
---
Fix expand command preserving tagged task structure and preventing data corruption
- Enhance E2E tests with comprehensive tag-aware expand testing to verify tag corruption fix
- Add new test section for feature-expand tag creation and testing during expand operations
- Verify tag preservation during expand, force expand, and expand --all operations
- Test that master tag remains intact while feature-expand tag receives subtasks correctly
- Fix file path references to use correct .taskmaster/config.json and .taskmaster/tasks/tasks.json locations
- All tag corruption verification tests pass successfully, confirming the expand command tag corruption bug fix works as expected

View File

@@ -1,5 +0,0 @@
---
"task-master-ai": patch
---
Fix contextGatherer bug when adding a task `Cannot read properties of undefined (reading 'forEach')`

View File

@@ -1,5 +1,11 @@
# task-master-ai
## 0.17.1
### Patch Changes
- [#789](https://github.com/eyaltoledano/claude-task-master/pull/789) [`8cde6c2`](https://github.com/eyaltoledano/claude-task-master/commit/8cde6c27087f401d085fe267091ae75334309d96) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix contextGatherer bug when adding a task `Cannot read properties of undefined (reading 'forEach')`
## 0.17.0
### Minor Changes

View File

@@ -26,6 +26,7 @@ import { createLogWrapper } from '../../tools/utils.js';
* @param {string} [args.prompt] - Additional context to guide subtask generation.
* @param {boolean} [args.force] - Force expansion even if subtasks exist.
* @param {string} [args.projectRoot] - Project root directory.
* @param {string} [args.tag] - Tag for the task
* @param {Object} log - Logger object
* @param {Object} context - Context object containing session
* @param {Object} [context.session] - MCP Session object
@@ -34,7 +35,8 @@ import { createLogWrapper } from '../../tools/utils.js';
export async function expandTaskDirect(args, log, context = {}) {
const { session } = context; // Extract session
// Destructure expected args, including projectRoot
const { tasksJsonPath, id, num, research, prompt, force, projectRoot } = args;
const { tasksJsonPath, id, num, research, prompt, force, projectRoot, tag } =
args;
// Log session root data for debugging
log.info(
@@ -194,7 +196,8 @@ export async function expandTaskDirect(args, log, context = {}) {
session,
projectRoot,
commandName: 'expand-task',
outputType: 'mcp'
outputType: 'mcp',
tag
},
forceFlag
);

View File

@@ -45,7 +45,8 @@ export function registerExpandTaskTool(server) {
.boolean()
.optional()
.default(false)
.describe('Force expansion even if subtasks exist')
.describe('Force expansion even if subtasks exist'),
tag: z.string().optional().describe('Tag context to operate on')
}),
execute: withNormalizedProjectRoot(async (args, { log, session }) => {
try {
@@ -73,7 +74,8 @@ export function registerExpandTaskTool(server) {
research: args.research,
prompt: args.prompt,
force: args.force,
projectRoot: args.projectRoot
projectRoot: args.projectRoot,
tag: args.tag || 'master'
},
log,
{ session }

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "task-master-ai",
"version": "0.17.0",
"version": "0.17.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "task-master-ai",
"version": "0.17.0",
"version": "0.17.1",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@ai-sdk/amazon-bedrock": "^2.2.9",

View File

@@ -1,6 +1,6 @@
{
"name": "task-master-ai",
"version": "0.17.0",
"version": "0.17.1",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",

View File

@@ -32,7 +32,12 @@ async function expandAllTasks(
context = {},
outputFormat = 'text' // Assume text default for CLI
) {
const { session, mcpLog, projectRoot: providedProjectRoot } = context;
const {
session,
mcpLog,
projectRoot: providedProjectRoot,
tag: contextTag
} = context;
const isMCPCall = !!mcpLog; // Determine if called from MCP
const projectRoot = providedProjectRoot || findProjectRoot();
@@ -74,7 +79,7 @@ async function expandAllTasks(
try {
logger.info(`Reading tasks from ${tasksPath}`);
const data = readJSON(tasksPath, projectRoot);
const data = readJSON(tasksPath, projectRoot, contextTag);
if (!data || !data.tasks) {
throw new Error(`Invalid tasks data in ${tasksPath}`);
}
@@ -124,7 +129,7 @@ async function expandAllTasks(
numSubtasks,
useResearch,
additionalContext,
{ ...context, projectRoot }, // Pass the whole context object with projectRoot
{ ...context, projectRoot, tag: data.tag || contextTag }, // Pass the whole context object with projectRoot and resolved tag
force
);
expandedCount++;

View File

@@ -417,7 +417,7 @@ async function expandTask(
context = {},
force = false
) {
const { session, mcpLog, projectRoot: contextProjectRoot } = context;
const { session, mcpLog, projectRoot: contextProjectRoot, tag } = context;
const outputFormat = mcpLog ? 'json' : 'text';
// Determine projectRoot: Use from context if available, otherwise derive from tasksPath
@@ -439,7 +439,7 @@ async function expandTask(
try {
// --- Task Loading/Filtering (Unchanged) ---
logger.info(`Reading tasks from ${tasksPath}`);
const data = readJSON(tasksPath, projectRoot);
const data = readJSON(tasksPath, projectRoot, tag);
if (!data || !data.tasks)
throw new Error(`Invalid tasks data in ${tasksPath}`);
const taskIndex = data.tasks.findIndex(
@@ -668,7 +668,7 @@ async function expandTask(
// --- End Change: Append instead of replace ---
data.tasks[taskIndex] = task; // Assign the modified task back
writeJSON(tasksPath, data);
writeJSON(tasksPath, data, projectRoot, tag);
// await generateTaskFiles(tasksPath, path.dirname(tasksPath));
// Display AI Usage Summary for CLI

View File

@@ -333,8 +333,8 @@ log_step() {
log_step "Initializing Task Master project (non-interactive)"
task-master init -y --name="E2E Test $TIMESTAMP" --description="Automated E2E test run"
if [ ! -f ".taskmasterconfig" ]; then
log_error "Initialization failed: .taskmasterconfig not found."
if [ ! -f ".taskmaster/config.json" ]; then
log_error "Initialization failed: .taskmaster/config.json not found."
exit 1
fi
log_success "Project initialized."
@@ -344,8 +344,8 @@ log_step() {
exit_status_prd=$?
echo "$cmd_output_prd"
extract_and_sum_cost "$cmd_output_prd"
if [ $exit_status_prd -ne 0 ] || [ ! -s "tasks/tasks.json" ]; then
log_error "Parsing PRD failed: tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
if [ $exit_status_prd -ne 0 ] || [ ! -s ".taskmaster/tasks/tasks.json" ]; then
log_error "Parsing PRD failed: .taskmaster/tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
exit 1
else
log_success "PRD parsed successfully."
@@ -386,6 +386,95 @@ log_step() {
task-master list --with-subtasks > task_list_after_changes.log
log_success "Task list after changes saved to task_list_after_changes.log"
# === Start New Test Section: Tag-Aware Expand Testing ===
log_step "Creating additional tag for expand testing"
task-master add-tag feature-expand --description="Tag for testing expand command with tag preservation"
log_success "Created feature-expand tag."
log_step "Adding task to feature-expand tag"
task-master add-task --tag=feature-expand --prompt="Test task for tag-aware expansion" --priority=medium
# Get the new task ID dynamically
new_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
log_success "Added task $new_expand_task_id to feature-expand tag."
log_step "Verifying tags exist before expand test"
task-master tags > tags_before_expand.log
tag_count_before=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
log_success "Tag count before expand: $tag_count_before"
log_step "Expanding task in feature-expand tag (testing tag corruption fix)"
cmd_output_expand_tagged=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" 2>&1)
exit_status_expand_tagged=$?
echo "$cmd_output_expand_tagged"
extract_and_sum_cost "$cmd_output_expand_tagged"
if [ $exit_status_expand_tagged -ne 0 ]; then
log_error "Tagged expand failed. Exit status: $exit_status_expand_tagged"
else
log_success "Tagged expand completed."
fi
log_step "Verifying tag preservation after expand"
task-master tags > tags_after_expand.log
tag_count_after=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
if [ "$tag_count_before" -eq "$tag_count_after" ]; then
log_success "Tag count preserved: $tag_count_after (no corruption detected)"
else
log_error "Tag corruption detected! Before: $tag_count_before, After: $tag_count_after"
fi
log_step "Verifying master tag still exists and has tasks"
master_task_count=$(jq -r '.master.tasks | length' .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
if [ "$master_task_count" -gt "0" ]; then
log_success "Master tag preserved with $master_task_count tasks"
else
log_error "Master tag corrupted or empty after tagged expand"
fi
log_step "Verifying feature-expand tag has expanded subtasks"
expanded_subtask_count=$(jq -r ".\"feature-expand\".tasks[] | select(.id == $new_expand_task_id) | .subtasks | length" .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
if [ "$expanded_subtask_count" -gt "0" ]; then
log_success "Expand successful: $expanded_subtask_count subtasks created in feature-expand tag"
else
log_error "Expand failed: No subtasks found in feature-expand tag"
fi
log_step "Testing force expand with tag preservation"
cmd_output_force_expand=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" --force 2>&1)
exit_status_force_expand=$?
echo "$cmd_output_force_expand"
extract_and_sum_cost "$cmd_output_force_expand"
# Verify tags still preserved after force expand
tag_count_after_force=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
if [ "$tag_count_before" -eq "$tag_count_after_force" ]; then
log_success "Force expand preserved all tags"
else
log_error "Force expand caused tag corruption"
fi
log_step "Testing expand --all with tag preservation"
# Add another task to feature-expand for expand-all testing
task-master add-task --tag=feature-expand --prompt="Second task for expand-all testing" --priority=low
second_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
cmd_output_expand_all=$(task-master expand --tag=feature-expand --all 2>&1)
exit_status_expand_all=$?
echo "$cmd_output_expand_all"
extract_and_sum_cost "$cmd_output_expand_all"
# Verify tags preserved after expand-all
tag_count_after_all=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
if [ "$tag_count_before" -eq "$tag_count_after_all" ]; then
log_success "Expand --all preserved all tags"
else
log_error "Expand --all caused tag corruption"
fi
log_success "Completed expand --all tag preservation test."
# === End New Test Section: Tag-Aware Expand Testing ===
# === Test Model Commands ===
log_step "Checking initial model configuration"
task-master models > models_initial_config.log
@@ -626,7 +715,7 @@ log_step() {
# Find the next available task ID dynamically instead of hardcoding 11, 12
# Assuming tasks are added sequentially and we didn't remove any core tasks yet
last_task_id=$(jq '[.tasks[].id] | max' tasks/tasks.json)
last_task_id=$(jq '[.master.tasks[].id] | max' .taskmaster/tasks/tasks.json)
manual_task_id=$((last_task_id + 1))
ai_task_id=$((manual_task_id + 1))
@@ -747,30 +836,30 @@ log_step() {
task-master list --with-subtasks > task_list_after_clear_all.log
log_success "Task list after clear-all saved. (Manual/LLM check recommended to verify subtasks removed)"
log_step "Expanding Task 1 again (to have subtasks for next test)"
task-master expand --id=1
log_success "Attempted to expand Task 1 again."
# Verify 1.1 exists again
if ! jq -e '.tasks[] | select(.id == 1) | .subtasks[] | select(.id == 1)' tasks/tasks.json > /dev/null; then
log_error "Subtask 1.1 not found in tasks.json after re-expanding Task 1."
log_step "Expanding Task 3 again (to have subtasks for next test)"
task-master expand --id=3
log_success "Attempted to expand Task 3."
# Verify 3.1 exists
if ! jq -e '.master.tasks[] | select(.id == 3) | .subtasks[] | select(.id == 1)' .taskmaster/tasks/tasks.json > /dev/null; then
log_error "Subtask 3.1 not found in tasks.json after expanding Task 3."
exit 1
fi
log_step "Adding dependency: Task 3 depends on Subtask 1.1"
task-master add-dependency --id=3 --depends-on=1.1
log_success "Added dependency 3 -> 1.1."
log_step "Adding dependency: Task 4 depends on Subtask 3.1"
task-master add-dependency --id=4 --depends-on=3.1
log_success "Added dependency 4 -> 3.1."
log_step "Showing Task 3 details (after adding subtask dependency)"
task-master show 3 > task_3_details_after_dep_add.log
log_success "Task 3 details saved. (Manual/LLM check recommended for dependency [1.1])"
log_step "Showing Task 4 details (after adding subtask dependency)"
task-master show 4 > task_4_details_after_dep_add.log
log_success "Task 4 details saved. (Manual/LLM check recommended for dependency [3.1])"
log_step "Removing dependency: Task 3 depends on Subtask 1.1"
task-master remove-dependency --id=3 --depends-on=1.1
log_success "Removed dependency 3 -> 1.1."
log_step "Removing dependency: Task 4 depends on Subtask 3.1"
task-master remove-dependency --id=4 --depends-on=3.1
log_success "Removed dependency 4 -> 3.1."
log_step "Showing Task 3 details (after removing subtask dependency)"
task-master show 3 > task_3_details_after_dep_remove.log
log_success "Task 3 details saved. (Manual/LLM check recommended to verify dependency removed)"
log_step "Showing Task 4 details (after removing subtask dependency)"
task-master show 4 > task_4_details_after_dep_remove.log
log_success "Task 4 details saved. (Manual/LLM check recommended to verify dependency removed)"
# === End New Test Section ===

View File

@@ -625,19 +625,38 @@ describe('MCP Server Direct Functions', () => {
// For successful cases, record that functions were called but don't make real calls
mockEnableSilentMode();
// Mock expandAllTasks
// Mock expandAllTasks - now returns a structured object instead of undefined
const mockExpandAll = jest.fn().mockImplementation(async () => {
// Just simulate success without any real operations
return undefined; // expandAllTasks doesn't return anything
// Return the new structured response that matches the actual implementation
return {
success: true,
expandedCount: 2,
failedCount: 0,
skippedCount: 1,
tasksToExpand: 3,
telemetryData: {
timestamp: new Date().toISOString(),
commandName: 'expand-all-tasks',
totalCost: 0.05,
totalTokens: 1000,
inputTokens: 600,
outputTokens: 400
}
};
});
// Call mock expandAllTasks
await mockExpandAll(
args.num,
args.research || false,
args.prompt || '',
args.force || false,
{ mcpLog: mockLogger, session: options.session }
// Call mock expandAllTasks with the correct signature
const result = await mockExpandAll(
args.file, // tasksPath
args.num, // numSubtasks
args.research || false, // useResearch
args.prompt || '', // additionalContext
args.force || false, // force
{
mcpLog: mockLogger,
session: options.session,
projectRoot: args.projectRoot
}
);
mockDisableSilentMode();
@@ -645,13 +664,14 @@ describe('MCP Server Direct Functions', () => {
return {
success: true,
data: {
message: 'Successfully expanded all pending tasks with subtasks',
message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,
details: {
numSubtasks: args.num,
research: args.research || false,
prompt: args.prompt || '',
force: args.force || false
}
expandedCount: result.expandedCount,
failedCount: result.failedCount,
skippedCount: result.skippedCount,
tasksToExpand: result.tasksToExpand
},
telemetryData: result.telemetryData
}
};
}
@@ -671,10 +691,13 @@ describe('MCP Server Direct Functions', () => {
// Assert
expect(result.success).toBe(true);
expect(result.data.message).toBe(
'Successfully expanded all pending tasks with subtasks'
);
expect(result.data.details.numSubtasks).toBe(3);
expect(result.data.message).toMatch(/Expand all operation completed/);
expect(result.data.details.expandedCount).toBe(2);
expect(result.data.details.failedCount).toBe(0);
expect(result.data.details.skippedCount).toBe(1);
expect(result.data.details.tasksToExpand).toBe(3);
expect(result.data.telemetryData).toBeDefined();
expect(result.data.telemetryData.commandName).toBe('expand-all-tasks');
expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled();
});
@@ -695,7 +718,8 @@ describe('MCP Server Direct Functions', () => {
// Assert
expect(result.success).toBe(true);
expect(result.data.details.research).toBe(true);
expect(result.data.details.expandedCount).toBe(2);
expect(result.data.telemetryData).toBeDefined();
expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled();
});
@@ -715,7 +739,8 @@ describe('MCP Server Direct Functions', () => {
// Assert
expect(result.success).toBe(true);
expect(result.data.details.force).toBe(true);
expect(result.data.details.expandedCount).toBe(2);
expect(result.data.telemetryData).toBeDefined();
expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled();
});
@@ -735,11 +760,77 @@ describe('MCP Server Direct Functions', () => {
// Assert
expect(result.success).toBe(true);
expect(result.data.details.prompt).toBe(
'Additional context for subtasks'
);
expect(result.data.details.expandedCount).toBe(2);
expect(result.data.telemetryData).toBeDefined();
expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled();
});
test('should handle case with no eligible tasks', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath,
num: 3
};
// Act - Mock the scenario where no tasks are eligible for expansion
async function testNoEligibleTasks(args, mockLogger, options = {}) {
mockEnableSilentMode();
const mockExpandAll = jest.fn().mockImplementation(async () => {
return {
success: true,
expandedCount: 0,
failedCount: 0,
skippedCount: 0,
tasksToExpand: 0,
telemetryData: null,
message: 'No tasks eligible for expansion.'
};
});
const result = await mockExpandAll(
args.file,
args.num,
false,
'',
false,
{
mcpLog: mockLogger,
session: options.session,
projectRoot: args.projectRoot
},
'json'
);
mockDisableSilentMode();
return {
success: true,
data: {
message: result.message,
details: {
expandedCount: result.expandedCount,
failedCount: result.failedCount,
skippedCount: result.skippedCount,
tasksToExpand: result.tasksToExpand
},
telemetryData: result.telemetryData
}
};
}
const result = await testNoEligibleTasks(args, mockLogger, {
session: mockSession
});
// Assert
expect(result.success).toBe(true);
expect(result.data.message).toBe('No tasks eligible for expansion.');
expect(result.data.details.expandedCount).toBe(0);
expect(result.data.details.tasksToExpand).toBe(0);
expect(result.data.telemetryData).toBeNull();
});
});
});

View File

@@ -0,0 +1,324 @@
/**
* Tests for the expand-all MCP tool
*
* Note: This test does NOT test the actual implementation. It tests that:
* 1. The tool is registered correctly with the correct parameters
* 2. Arguments are passed correctly to expandAllTasksDirect
* 3. Error handling works as expected
*
* We do NOT import the real implementation - everything is mocked
*/
import { jest } from '@jest/globals';
// Mock EVERYTHING
const mockExpandAllTasksDirect = jest.fn();
jest.mock('../../../../mcp-server/src/core/task-master-core.js', () => ({
expandAllTasksDirect: mockExpandAllTasksDirect
}));
const mockHandleApiResult = jest.fn((result) => result);
const mockGetProjectRootFromSession = jest.fn(() => '/mock/project/root');
const mockCreateErrorResponse = jest.fn((msg) => ({
success: false,
error: { code: 'ERROR', message: msg }
}));
const mockWithNormalizedProjectRoot = jest.fn((fn) => fn);
jest.mock('../../../../mcp-server/src/tools/utils.js', () => ({
getProjectRootFromSession: mockGetProjectRootFromSession,
handleApiResult: mockHandleApiResult,
createErrorResponse: mockCreateErrorResponse,
withNormalizedProjectRoot: mockWithNormalizedProjectRoot
}));
// Mock the z object from zod
const mockZod = {
object: jest.fn(() => mockZod),
string: jest.fn(() => mockZod),
number: jest.fn(() => mockZod),
boolean: jest.fn(() => mockZod),
optional: jest.fn(() => mockZod),
describe: jest.fn(() => mockZod),
_def: {
shape: () => ({
num: {},
research: {},
prompt: {},
force: {},
tag: {},
projectRoot: {}
})
}
};
jest.mock('zod', () => ({
z: mockZod
}));
// DO NOT import the real module - create a fake implementation
// This is the fake implementation of registerExpandAllTool
const registerExpandAllTool = (server) => {
// Create simplified version of the tool config
const toolConfig = {
name: 'expand_all',
description: 'Use Taskmaster to expand all eligible pending tasks',
parameters: mockZod,
// Create a simplified mock of the execute function
execute: mockWithNormalizedProjectRoot(async (args, context) => {
const { log, session } = context;
try {
log.info &&
log.info(`Starting expand-all with args: ${JSON.stringify(args)}`);
// Call expandAllTasksDirect
const result = await mockExpandAllTasksDirect(args, log, { session });
// Handle result
return mockHandleApiResult(result, log);
} catch (error) {
log.error && log.error(`Error in expand-all tool: ${error.message}`);
return mockCreateErrorResponse(error.message);
}
})
};
// Register the tool with the server
server.addTool(toolConfig);
};
describe('MCP Tool: expand-all', () => {
// Create mock server
let mockServer;
let executeFunction;
// Create mock logger
const mockLogger = {
debug: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn()
};
// Test data
const validArgs = {
num: 3,
research: true,
prompt: 'additional context',
force: false,
tag: 'master',
projectRoot: '/test/project'
};
// Standard responses
const successResponse = {
success: true,
data: {
message:
'Expand all operation completed. Expanded: 2, Failed: 0, Skipped: 1',
details: {
expandedCount: 2,
failedCount: 0,
skippedCount: 1,
tasksToExpand: 3,
telemetryData: {
commandName: 'expand-all-tasks',
totalCost: 0.15,
totalTokens: 2500
}
}
}
};
const errorResponse = {
success: false,
error: {
code: 'EXPAND_ALL_ERROR',
message: 'Failed to expand tasks'
}
};
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Create mock server
mockServer = {
addTool: jest.fn((config) => {
executeFunction = config.execute;
})
};
// Setup default successful response
mockExpandAllTasksDirect.mockResolvedValue(successResponse);
// Register the tool
registerExpandAllTool(mockServer);
});
test('should register the tool correctly', () => {
// Verify tool was registered
expect(mockServer.addTool).toHaveBeenCalledWith(
expect.objectContaining({
name: 'expand_all',
description: expect.stringContaining('expand all eligible pending'),
parameters: expect.any(Object),
execute: expect.any(Function)
})
);
// Verify the tool config was passed
const toolConfig = mockServer.addTool.mock.calls[0][0];
expect(toolConfig).toHaveProperty('parameters');
expect(toolConfig).toHaveProperty('execute');
});
test('should execute the tool with valid parameters', async () => {
// Setup context
const mockContext = {
log: mockLogger,
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
const result = await executeFunction(validArgs, mockContext);
// Verify expandAllTasksDirect was called with correct arguments
expect(mockExpandAllTasksDirect).toHaveBeenCalledWith(
validArgs,
mockLogger,
{ session: mockContext.session }
);
// Verify handleApiResult was called
expect(mockHandleApiResult).toHaveBeenCalledWith(
successResponse,
mockLogger
);
expect(result).toEqual(successResponse);
});
test('should handle expand all with no eligible tasks', async () => {
// Arrange
const mockDirectResult = {
success: true,
data: {
message:
'Expand all operation completed. Expanded: 0, Failed: 0, Skipped: 0',
details: {
expandedCount: 0,
failedCount: 0,
skippedCount: 0,
tasksToExpand: 0,
telemetryData: null
}
}
};
mockExpandAllTasksDirect.mockResolvedValue(mockDirectResult);
mockHandleApiResult.mockReturnValue({
success: true,
data: mockDirectResult.data
});
// Act
const result = await executeFunction(validArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(result.success).toBe(true);
expect(result.data.details.expandedCount).toBe(0);
expect(result.data.details.tasksToExpand).toBe(0);
});
test('should handle expand all with mixed success/failure', async () => {
// Arrange
const mockDirectResult = {
success: true,
data: {
message:
'Expand all operation completed. Expanded: 2, Failed: 1, Skipped: 0',
details: {
expandedCount: 2,
failedCount: 1,
skippedCount: 0,
tasksToExpand: 3,
telemetryData: {
commandName: 'expand-all-tasks',
totalCost: 0.1,
totalTokens: 1500
}
}
}
};
mockExpandAllTasksDirect.mockResolvedValue(mockDirectResult);
mockHandleApiResult.mockReturnValue({
success: true,
data: mockDirectResult.data
});
// Act
const result = await executeFunction(validArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(result.success).toBe(true);
expect(result.data.details.expandedCount).toBe(2);
expect(result.data.details.failedCount).toBe(1);
});
test('should handle errors from expandAllTasksDirect', async () => {
// Arrange
mockExpandAllTasksDirect.mockRejectedValue(
new Error('Direct function error')
);
// Act
const result = await executeFunction(validArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(mockLogger.error).toHaveBeenCalledWith(
expect.stringContaining('Error in expand-all tool')
);
expect(mockCreateErrorResponse).toHaveBeenCalledWith(
'Direct function error'
);
});
test('should handle different argument combinations', async () => {
// Test with minimal args
const minimalArgs = {
projectRoot: '/test/project'
};
// Act
await executeFunction(minimalArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(mockExpandAllTasksDirect).toHaveBeenCalledWith(
minimalArgs,
mockLogger,
expect.any(Object)
);
});
test('should use withNormalizedProjectRoot wrapper correctly', () => {
// Verify that the execute function is wrapped with withNormalizedProjectRoot
expect(mockWithNormalizedProjectRoot).toHaveBeenCalledWith(
expect.any(Function)
);
});
});

View File

@@ -0,0 +1,502 @@
/**
* Tests for the expand-all-tasks.js module
*/
import { jest } from '@jest/globals';
// Mock the dependencies before importing the module under test
jest.unstable_mockModule(
'../../../../../scripts/modules/task-manager/expand-task.js',
() => ({
default: jest.fn()
})
);
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
readJSON: jest.fn(),
log: jest.fn(),
isSilentMode: jest.fn(() => false),
findProjectRoot: jest.fn(() => '/test/project'),
aggregateTelemetry: jest.fn()
}));
jest.unstable_mockModule(
'../../../../../scripts/modules/config-manager.js',
() => ({
getDebugFlag: jest.fn(() => false)
})
);
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
startLoadingIndicator: jest.fn(),
stopLoadingIndicator: jest.fn(),
displayAiUsageSummary: jest.fn()
}));
jest.unstable_mockModule('chalk', () => ({
default: {
white: { bold: jest.fn((text) => text) },
cyan: jest.fn((text) => text),
green: jest.fn((text) => text),
gray: jest.fn((text) => text),
red: jest.fn((text) => text),
bold: jest.fn((text) => text)
}
}));
jest.unstable_mockModule('boxen', () => ({
default: jest.fn((text) => text)
}));
// Import the mocked modules
const { default: expandTask } = await import(
'../../../../../scripts/modules/task-manager/expand-task.js'
);
const { readJSON, aggregateTelemetry, findProjectRoot } = await import(
'../../../../../scripts/modules/utils.js'
);
// Import the module under test
const { default: expandAllTasks } = await import(
'../../../../../scripts/modules/task-manager/expand-all-tasks.js'
);
const mockExpandTask = expandTask;
const mockReadJSON = readJSON;
const mockAggregateTelemetry = aggregateTelemetry;
const mockFindProjectRoot = findProjectRoot;
describe('expandAllTasks', () => {
const mockTasksPath = '/test/tasks.json';
const mockProjectRoot = '/test/project';
const mockSession = { userId: 'test-user' };
const mockMcpLog = {
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
debug: jest.fn()
};
const sampleTasksData = {
tag: 'master',
tasks: [
{
id: 1,
title: 'Pending Task 1',
status: 'pending',
subtasks: []
},
{
id: 2,
title: 'In Progress Task',
status: 'in-progress',
subtasks: []
},
{
id: 3,
title: 'Done Task',
status: 'done',
subtasks: []
},
{
id: 4,
title: 'Task with Subtasks',
status: 'pending',
subtasks: [{ id: '4.1', title: 'Existing subtask' }]
}
]
};
beforeEach(() => {
jest.clearAllMocks();
mockReadJSON.mockReturnValue(sampleTasksData);
mockAggregateTelemetry.mockReturnValue({
timestamp: '2024-01-01T00:00:00.000Z',
commandName: 'expand-all-tasks',
totalCost: 0.1,
totalTokens: 2000,
inputTokens: 1200,
outputTokens: 800
});
});
describe('successful expansion', () => {
test('should expand all eligible pending tasks', async () => {
// Arrange
const mockTelemetryData = {
timestamp: '2024-01-01T00:00:00.000Z',
commandName: 'expand-task',
totalCost: 0.05,
totalTokens: 1000
};
mockExpandTask.mockResolvedValue({
telemetryData: mockTelemetryData
});
// Act
const result = await expandAllTasks(
mockTasksPath,
3, // numSubtasks
false, // useResearch
'test context', // additionalContext
false, // force
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot,
tag: 'master'
},
'json' // outputFormat
);
// Assert
expect(result.success).toBe(true);
expect(result.expandedCount).toBe(2); // Tasks 1 and 2 (pending and in-progress)
expect(result.failedCount).toBe(0);
expect(result.skippedCount).toBe(0);
expect(result.tasksToExpand).toBe(2);
expect(result.telemetryData).toBeDefined();
// Verify readJSON was called correctly
expect(mockReadJSON).toHaveBeenCalledWith(
mockTasksPath,
mockProjectRoot,
'master'
);
// Verify expandTask was called for eligible tasks
expect(mockExpandTask).toHaveBeenCalledTimes(2);
expect(mockExpandTask).toHaveBeenCalledWith(
mockTasksPath,
1,
3,
false,
'test context',
expect.objectContaining({
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot,
tag: 'master'
}),
false
);
});
test('should handle force flag to expand tasks with existing subtasks', async () => {
// Arrange
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
2,
false,
'',
true, // force = true
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.expandedCount).toBe(3); // Tasks 1, 2, and 4 (including task with existing subtasks)
expect(mockExpandTask).toHaveBeenCalledTimes(3);
});
test('should handle research flag', async () => {
// Arrange
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.08 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
undefined, // numSubtasks not specified
true, // useResearch = true
'research context',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(mockExpandTask).toHaveBeenCalledWith(
mockTasksPath,
expect.any(Number),
undefined,
true, // research flag passed correctly
'research context',
expect.any(Object),
false
);
});
test('should return success with message when no tasks are eligible', async () => {
// Arrange - Mock tasks data with no eligible tasks
const noEligibleTasksData = {
tag: 'master',
tasks: [
{ id: 1, status: 'done', subtasks: [] },
{
id: 2,
status: 'pending',
subtasks: [{ id: '2.1', title: 'existing' }]
}
]
};
mockReadJSON.mockReturnValue(noEligibleTasksData);
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false, // force = false, so task with subtasks won't be expanded
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(result.expandedCount).toBe(0);
expect(result.failedCount).toBe(0);
expect(result.skippedCount).toBe(0);
expect(result.tasksToExpand).toBe(0);
expect(result.message).toBe('No tasks eligible for expansion.');
expect(mockExpandTask).not.toHaveBeenCalled();
});
});
describe('error handling', () => {
test('should handle expandTask failures gracefully', async () => {
// Arrange
mockExpandTask
.mockResolvedValueOnce({ telemetryData: { totalCost: 0.05 } }) // First task succeeds
.mockRejectedValueOnce(new Error('AI service error')); // Second task fails
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(result.expandedCount).toBe(1);
expect(result.failedCount).toBe(1);
});
test('should throw error when tasks.json is invalid', async () => {
// Arrange
mockReadJSON.mockReturnValue(null);
// Act & Assert
await expect(
expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
)
).rejects.toThrow('Invalid tasks data');
});
test('should throw error when project root cannot be determined', async () => {
// Arrange - Mock findProjectRoot to return null for this test
mockFindProjectRoot.mockReturnValueOnce(null);
// Act & Assert
await expect(
expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog
// No projectRoot provided, and findProjectRoot will return null
},
'json'
)
).rejects.toThrow('Could not determine project root directory');
});
});
describe('telemetry aggregation', () => {
test('should aggregate telemetry data from multiple expand operations', async () => {
// Arrange
const telemetryData1 = {
commandName: 'expand-task',
totalCost: 0.03,
totalTokens: 600
};
const telemetryData2 = {
commandName: 'expand-task',
totalCost: 0.04,
totalTokens: 800
};
mockExpandTask
.mockResolvedValueOnce({ telemetryData: telemetryData1 })
.mockResolvedValueOnce({ telemetryData: telemetryData2 });
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(mockAggregateTelemetry).toHaveBeenCalledWith(
[telemetryData1, telemetryData2],
'expand-all-tasks'
);
expect(result.telemetryData).toBeDefined();
expect(result.telemetryData.commandName).toBe('expand-all-tasks');
});
test('should handle missing telemetry data gracefully', async () => {
// Arrange
mockExpandTask.mockResolvedValue({}); // No telemetryData
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(mockAggregateTelemetry).toHaveBeenCalledWith(
[],
'expand-all-tasks'
);
});
});
describe('output format handling', () => {
test('should use text output format for CLI calls', async () => {
// Arrange
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
projectRoot: mockProjectRoot
// No mcpLog provided, should use CLI logger
},
'text' // CLI output format
);
// Assert
expect(result.success).toBe(true);
// In text mode, loading indicators and console output would be used
// This is harder to test directly but we can verify the result structure
});
test('should handle context tag properly', async () => {
// Arrange
const taggedTasksData = {
...sampleTasksData,
tag: 'feature-branch'
};
mockReadJSON.mockReturnValue(taggedTasksData);
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot,
tag: 'feature-branch'
},
'json'
);
// Assert
expect(mockReadJSON).toHaveBeenCalledWith(
mockTasksPath,
mockProjectRoot,
'feature-branch'
);
expect(mockExpandTask).toHaveBeenCalledWith(
mockTasksPath,
expect.any(Number),
3,
false,
'',
expect.objectContaining({
tag: 'feature-branch'
}),
false
);
});
});
});

View File

@@ -0,0 +1,888 @@
/**
* Tests for the expand-task.js module
*/
import { jest } from '@jest/globals';
import fs from 'fs';
// Mock the dependencies before importing the module under test
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
readJSON: jest.fn(),
writeJSON: jest.fn(),
log: jest.fn(),
CONFIG: {
model: 'mock-claude-model',
maxTokens: 4000,
temperature: 0.7,
debug: false
},
sanitizePrompt: jest.fn((prompt) => prompt),
truncate: jest.fn((text) => text),
isSilentMode: jest.fn(() => false),
findTaskById: jest.fn(),
findProjectRoot: jest.fn((tasksPath) => '/mock/project/root'),
getCurrentTag: jest.fn(() => 'master'),
ensureTagMetadata: jest.fn((tagObj) => tagObj),
flattenTasksWithSubtasks: jest.fn((tasks) => {
const allTasks = [];
const queue = [...(tasks || [])];
while (queue.length > 0) {
const task = queue.shift();
allTasks.push(task);
if (task.subtasks) {
for (const subtask of task.subtasks) {
queue.push({ ...subtask, id: `${task.id}.${subtask.id}` });
}
}
}
return allTasks;
}),
readComplexityReport: jest.fn(),
markMigrationForNotice: jest.fn(),
performCompleteTagMigration: jest.fn(),
setTasksForTag: jest.fn(),
getTasksForTag: jest.fn((data, tag) => data[tag]?.tasks || [])
}));
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
displayBanner: jest.fn(),
getStatusWithColor: jest.fn((status) => status),
startLoadingIndicator: jest.fn(),
stopLoadingIndicator: jest.fn(),
succeedLoadingIndicator: jest.fn(),
failLoadingIndicator: jest.fn(),
warnLoadingIndicator: jest.fn(),
infoLoadingIndicator: jest.fn(),
displayAiUsageSummary: jest.fn(),
displayContextAnalysis: jest.fn()
}));
jest.unstable_mockModule(
'../../../../../scripts/modules/ai-services-unified.js',
() => ({
generateTextService: jest.fn().mockResolvedValue({
mainResult: JSON.stringify({
subtasks: [
{
id: 1,
title: 'Set up project structure',
description:
'Create the basic project directory structure and configuration files',
dependencies: [],
details:
'Initialize package.json, create src/ and test/ directories, set up linting configuration',
status: 'pending',
testStrategy:
'Verify all expected files and directories are created'
},
{
id: 2,
title: 'Implement core functionality',
description: 'Develop the main application logic and core features',
dependencies: [1],
details:
'Create main classes, implement business logic, set up data models',
status: 'pending',
testStrategy: 'Unit tests for all core functions and classes'
},
{
id: 3,
title: 'Add user interface',
description: 'Create the user interface components and layouts',
dependencies: [2],
details:
'Design UI components, implement responsive layouts, add user interactions',
status: 'pending',
testStrategy: 'UI tests and visual regression testing'
}
]
}),
telemetryData: {
timestamp: new Date().toISOString(),
userId: '1234567890',
commandName: 'expand-task',
modelUsed: 'claude-3-5-sonnet',
providerName: 'anthropic',
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
totalCost: 0.012414,
currency: 'USD'
}
})
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/config-manager.js',
() => ({
getDefaultSubtasks: jest.fn(() => 3),
getDebugFlag: jest.fn(() => false)
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/utils/contextGatherer.js',
() => ({
ContextGatherer: jest.fn().mockImplementation(() => ({
gather: jest.fn().mockResolvedValue({
contextSummary: 'Mock context summary',
allRelatedTaskIds: [],
graphVisualization: 'Mock graph'
})
}))
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/task-manager/generate-task-files.js',
() => ({
default: jest.fn().mockResolvedValue()
})
);
// Mock external UI libraries
jest.unstable_mockModule('chalk', () => ({
default: {
white: { bold: jest.fn((text) => text) },
cyan: Object.assign(
jest.fn((text) => text),
{
bold: jest.fn((text) => text)
}
),
green: jest.fn((text) => text),
yellow: jest.fn((text) => text),
bold: jest.fn((text) => text)
}
}));
jest.unstable_mockModule('boxen', () => ({
default: jest.fn((text) => text)
}));
jest.unstable_mockModule('cli-table3', () => ({
default: jest.fn().mockImplementation(() => ({
push: jest.fn(),
toString: jest.fn(() => 'mocked table')
}))
}));
// Mock process.exit to prevent Jest worker crashes
const mockExit = jest.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit called with "${code}"`);
});
// Import the mocked modules
const {
readJSON,
writeJSON,
log,
findTaskById,
ensureTagMetadata,
readComplexityReport,
findProjectRoot
} = await import('../../../../../scripts/modules/utils.js');
const { generateTextService } = await import(
'../../../../../scripts/modules/ai-services-unified.js'
);
const generateTaskFiles = (
await import(
'../../../../../scripts/modules/task-manager/generate-task-files.js'
)
).default;
// Import the module under test
const { default: expandTask } = await import(
'../../../../../scripts/modules/task-manager/expand-task.js'
);
describe('expandTask', () => {
const sampleTasks = {
master: {
tasks: [
{
id: 1,
title: 'Task 1',
description: 'First task',
status: 'done',
dependencies: [],
details: 'Already completed task',
subtasks: []
},
{
id: 2,
title: 'Task 2',
description: 'Second task',
status: 'pending',
dependencies: [],
details: 'Task ready for expansion',
subtasks: []
},
{
id: 3,
title: 'Complex Task',
description: 'A complex task that needs breakdown',
status: 'pending',
dependencies: [1],
details: 'This task involves multiple steps',
subtasks: []
},
{
id: 4,
title: 'Task with existing subtasks',
description: 'Task that already has subtasks',
status: 'pending',
dependencies: [],
details: 'Has existing subtasks',
subtasks: [
{
id: 1,
title: 'Existing subtask',
description: 'Already exists',
status: 'pending',
dependencies: []
}
]
}
]
},
'feature-branch': {
tasks: [
{
id: 1,
title: 'Feature Task 1',
description: 'Task in feature branch',
status: 'pending',
dependencies: [],
details: 'Feature-specific task',
subtasks: []
}
]
}
};
// Create a helper function for consistent mcpLog mock
const createMcpLogMock = () => ({
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
success: jest.fn()
});
beforeEach(() => {
jest.clearAllMocks();
mockExit.mockClear();
// Default readJSON implementation - returns tagged structure
readJSON.mockImplementation((tasksPath, projectRoot, tag) => {
const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks));
const selectedTag = tag || 'master';
return {
...sampleTasksCopy[selectedTag],
tag: selectedTag,
_rawTaggedData: sampleTasksCopy
};
});
// Default findTaskById implementation
findTaskById.mockImplementation((tasks, taskId) => {
const id = parseInt(taskId, 10);
return tasks.find((t) => t.id === id);
});
// Default complexity report (no report available)
readComplexityReport.mockReturnValue(null);
// Mock findProjectRoot to return consistent path for complexity report
findProjectRoot.mockReturnValue('/mock/project/root');
writeJSON.mockResolvedValue();
generateTaskFiles.mockResolvedValue();
log.mockImplementation(() => {});
// Mock console.log to avoid output during tests
jest.spyOn(console, 'log').mockImplementation(() => {});
});
afterEach(() => {
console.log.mockRestore();
});
describe('Basic Functionality', () => {
test('should expand a task with AI-generated subtasks', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const numSubtasks = 3;
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
const result = await expandTask(
tasksPath,
taskId,
numSubtasks,
false,
'',
context,
false
);
// Assert
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
undefined
);
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({
id: 2,
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure',
status: 'pending'
}),
expect.objectContaining({
id: 2,
title: 'Implement core functionality',
status: 'pending'
}),
expect.objectContaining({
id: 3,
title: 'Add user interface',
status: 'pending'
})
])
})
]),
tag: 'master',
_rawTaggedData: expect.objectContaining({
master: expect.objectContaining({
tasks: expect.any(Array)
})
})
}),
'/mock/project/root',
undefined
);
expect(result).toEqual(
expect.objectContaining({
task: expect.objectContaining({
id: 2,
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure',
status: 'pending'
}),
expect.objectContaining({
id: 2,
title: 'Implement core functionality',
status: 'pending'
}),
expect.objectContaining({
id: 3,
title: 'Add user interface',
status: 'pending'
})
])
}),
telemetryData: expect.any(Object)
})
);
});
test('should handle research flag correctly', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const numSubtasks = 3;
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(
tasksPath,
taskId,
numSubtasks,
true, // useResearch = true
'Additional context for research',
context,
false
);
// Assert
expect(generateTextService).toHaveBeenCalledWith(
expect.objectContaining({
role: 'research',
commandName: expect.any(String)
})
);
});
test('should handle complexity report integration without errors', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act & Assert - Should complete without errors
const result = await expandTask(
tasksPath,
taskId,
undefined, // numSubtasks not specified
false,
'',
context,
false
);
// Assert - Should successfully expand and return expected structure
expect(result).toEqual(
expect.objectContaining({
task: expect.objectContaining({
id: 2,
subtasks: expect.any(Array)
}),
telemetryData: expect.any(Object)
})
);
expect(generateTextService).toHaveBeenCalled();
});
});
describe('Tag Handling (The Critical Bug Fix)', () => {
test('should preserve tagged structure when expanding with default tag', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root',
tag: 'master' // Explicit tag context
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - CRITICAL: Check tag is passed to readJSON and writeJSON
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
'master'
);
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tag: 'master',
_rawTaggedData: expect.objectContaining({
master: expect.any(Object),
'feature-branch': expect.any(Object)
})
}),
'/mock/project/root',
'master' // CRITICAL: Tag must be passed to writeJSON
);
});
test('should preserve tagged structure when expanding with non-default tag', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '1'; // Task in feature-branch
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root',
tag: 'feature-branch' // Different tag context
};
// Configure readJSON to return feature-branch data
readJSON.mockImplementation((tasksPath, projectRoot, tag) => {
const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks));
return {
...sampleTasksCopy['feature-branch'],
tag: 'feature-branch',
_rawTaggedData: sampleTasksCopy
};
});
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - CRITICAL: Check tag preservation for non-default tag
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
'feature-branch'
);
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tag: 'feature-branch',
_rawTaggedData: expect.objectContaining({
master: expect.any(Object),
'feature-branch': expect.any(Object)
})
}),
'/mock/project/root',
'feature-branch' // CRITICAL: Correct tag passed to writeJSON
);
});
test('should NOT corrupt tagged structure when tag is undefined', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
// No tag specified - should default gracefully
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should still preserve structure with undefined tag
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
undefined
);
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
_rawTaggedData: expect.objectContaining({
master: expect.any(Object)
})
}),
'/mock/project/root',
undefined
);
// CRITICAL: Verify structure is NOT flattened to old format
const writeCallArgs = writeJSON.mock.calls[0][1];
expect(writeCallArgs).toHaveProperty('tasks'); // Should have tasks property from readJSON mock
expect(writeCallArgs).toHaveProperty('_rawTaggedData'); // Should preserve tagged structure
});
});
describe('Force Flag Handling', () => {
test('should replace existing subtasks when force=true', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '4'; // Task with existing subtasks
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, true);
// Assert - Should replace existing subtasks
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({
id: 4,
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure'
})
])
})
])
}),
'/mock/project/root',
undefined
);
});
test('should append to existing subtasks when force=false', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '4'; // Task with existing subtasks
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should append to existing subtasks with proper ID increments
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({
id: 4,
subtasks: expect.arrayContaining([
// Should contain both existing and new subtasks
expect.any(Object),
expect.any(Object),
expect.any(Object),
expect.any(Object) // 1 existing + 3 new = 4 total
])
})
])
}),
'/mock/project/root',
undefined
);
});
});
describe('Error Handling', () => {
test('should handle non-existent task ID', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '999'; // Non-existent task
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
findTaskById.mockReturnValue(null);
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('Task 999 not found');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should expand tasks regardless of status (including done tasks)', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '1'; // Task with 'done' status
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
const result = await expandTask(
tasksPath,
taskId,
3,
false,
'',
context,
false
);
// Assert - Should successfully expand even 'done' tasks
expect(writeJSON).toHaveBeenCalled();
expect(result).toEqual(
expect.objectContaining({
task: expect.objectContaining({
id: 1,
status: 'done', // Status unchanged
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Set up project structure',
status: 'pending'
})
])
}),
telemetryData: expect.any(Object)
})
);
});
test('should handle AI service failures', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
generateTextService.mockRejectedValueOnce(new Error('AI service error'));
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('AI service error');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle file read errors', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
readJSON.mockImplementation(() => {
throw new Error('File read failed');
});
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('File read failed');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle invalid tasks data', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
readJSON.mockReturnValue(null);
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow();
});
});
describe('Output Format Handling', () => {
test('should display telemetry for CLI output format', async () => {
// Arrange
const { displayAiUsageSummary } = await import(
'../../../../../scripts/modules/ui.js'
);
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
projectRoot: '/mock/project/root'
// No mcpLog - should trigger CLI mode
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should display telemetry for CLI users
expect(displayAiUsageSummary).toHaveBeenCalledWith(
expect.objectContaining({
commandName: 'expand-task',
modelUsed: 'claude-3-5-sonnet',
totalCost: 0.012414
}),
'cli'
);
});
test('should not display telemetry for MCP output format', async () => {
// Arrange
const { displayAiUsageSummary } = await import(
'../../../../../scripts/modules/ui.js'
);
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should NOT display telemetry for MCP (handled at higher level)
expect(displayAiUsageSummary).not.toHaveBeenCalled();
});
});
describe('Edge Cases', () => {
test('should handle empty additional context', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should work with empty context (but may include project context)
expect(generateTextService).toHaveBeenCalledWith(
expect.objectContaining({
prompt: expect.stringMatching(/.*/) // Just ensure prompt exists
})
);
});
test('should handle additional context correctly', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const additionalContext = 'Use React hooks and TypeScript';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Act
await expandTask(
tasksPath,
taskId,
3,
false,
additionalContext,
context,
false
);
// Assert - Should include additional context in prompt
expect(generateTextService).toHaveBeenCalledWith(
expect.objectContaining({
prompt: expect.stringContaining('Use React hooks and TypeScript')
})
);
});
test('should handle missing project root in context', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock()
// No projectRoot in context
};
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should derive project root from tasksPath
expect(findProjectRoot).toHaveBeenCalledWith(tasksPath);
expect(readJSON).toHaveBeenCalledWith(
tasksPath,
'/mock/project/root',
undefined
);
});
});
});