* fix(expand): Fix tag corruption in expand command - Fix tag parameter passing through MCP expand-task flow - Add tag parameter to direct function and tool registration - Fix contextGatherer method name from _buildDependencyContext to _buildDependencyGraphs - Add comprehensive test coverage for tag handling in expand-task - Ensures tagged task structure is preserved during expansion - Prevents corruption when tag is undefined. Fixes expand command causing tag corruption in tagged task lists. All existing tests pass and new test coverage added. * test(e2e): Add comprehensive tag-aware expand testing to verify tag corruption fix - Add new test section for feature-expand tag creation and testing - Verify tag preservation during expand, force expand, and expand --all operations - Test that master tag remains intact and feature-expand tag receives subtasks correctly - Fix file path references to use correct .taskmaster/tasks/tasks.json location - Fix config file check to use .taskmaster/config.json instead of .taskmasterconfig - All tag corruption verification tests pass successfully in E2E test * fix(changeset): Update E2E test improvements changeset to properly reflect tag corruption fix verification * chore(changeset): combine duplicate changesets for expand tag corruption fix Merge eighty-breads-wonder.md into bright-llamas-enter.md to consolidate the expand command fix and its comprehensive E2E testing enhancements into a single changeset entry. * Delete .changeset/eighty-breads-wonder.md * Version Packages * chore: fix package.json * fix(expand): Enhance context handling in expandAllTasks function - Added `tag` to context destructuring for better context management. - Updated `readJSON` call to include `contextTag` for improved data integrity. - Ensured the correct tag is passed during task expansion to prevent tag corruption. --------- Co-authored-by: Parththipan Thaniperumkarunai <parththipan.thaniperumkarunai@milkmonkey.de> Co-authored-by: Parthy <52548018+mm-parthy@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
837 lines
22 KiB
JavaScript
837 lines
22 KiB
JavaScript
/**
|
|
* Integration test for direct function imports in MCP server
|
|
*/
|
|
|
|
import { jest } from '@jest/globals';
|
|
import path, { dirname } from 'path';
|
|
import { fileURLToPath } from 'url';
|
|
|
|
// Get the current module's directory
|
|
const __filename = fileURLToPath(import.meta.url);
|
|
const __dirname = dirname(__filename);
|
|
|
|
// Test file paths
|
|
const testProjectRoot = path.join(__dirname, '../../fixtures');
|
|
const testTasksPath = path.join(testProjectRoot, 'test-tasks.json');
|
|
|
|
// Create explicit mock functions
|
|
const mockExistsSync = jest.fn().mockReturnValue(true);
|
|
const mockWriteFileSync = jest.fn();
|
|
const mockReadFileSync = jest.fn();
|
|
const mockUnlinkSync = jest.fn();
|
|
const mockMkdirSync = jest.fn();
|
|
|
|
const mockFindTasksJsonPath = jest.fn().mockReturnValue(testTasksPath);
|
|
const mockReadJSON = jest.fn();
|
|
const mockWriteJSON = jest.fn();
|
|
const mockEnableSilentMode = jest.fn();
|
|
const mockDisableSilentMode = jest.fn();
|
|
const mockReadComplexityReport = jest.fn().mockReturnValue(null);
|
|
|
|
const mockGetAnthropicClient = jest.fn().mockReturnValue({});
|
|
const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
|
|
const mockHandleAnthropicStream = jest.fn().mockResolvedValue(
|
|
JSON.stringify([
|
|
{
|
|
id: 1,
|
|
title: 'Mock Subtask 1',
|
|
description: 'First mock subtask',
|
|
dependencies: [],
|
|
details: 'Implementation details for mock subtask 1'
|
|
},
|
|
{
|
|
id: 2,
|
|
title: 'Mock Subtask 2',
|
|
description: 'Second mock subtask',
|
|
dependencies: [1],
|
|
details: 'Implementation details for mock subtask 2'
|
|
}
|
|
])
|
|
);
|
|
const mockParseSubtasksFromText = jest.fn().mockReturnValue([
|
|
{
|
|
id: 1,
|
|
title: 'Mock Subtask 1',
|
|
description: 'First mock subtask',
|
|
status: 'pending',
|
|
dependencies: []
|
|
},
|
|
{
|
|
id: 2,
|
|
title: 'Mock Subtask 2',
|
|
description: 'Second mock subtask',
|
|
status: 'pending',
|
|
dependencies: [1]
|
|
}
|
|
]);
|
|
|
|
// Create a mock for expandTask that returns predefined responses instead of making real calls
|
|
const mockExpandTask = jest
|
|
.fn()
|
|
.mockImplementation(
|
|
(taskId, numSubtasks, useResearch, additionalContext, options) => {
|
|
const task = {
|
|
...(sampleTasks.tasks.find((t) => t.id === taskId) || {}),
|
|
subtasks: useResearch
|
|
? [
|
|
{
|
|
id: 1,
|
|
title: 'Research-Backed Subtask 1',
|
|
description: 'First research-backed subtask',
|
|
status: 'pending',
|
|
dependencies: []
|
|
},
|
|
{
|
|
id: 2,
|
|
title: 'Research-Backed Subtask 2',
|
|
description: 'Second research-backed subtask',
|
|
status: 'pending',
|
|
dependencies: [1]
|
|
}
|
|
]
|
|
: [
|
|
{
|
|
id: 1,
|
|
title: 'Mock Subtask 1',
|
|
description: 'First mock subtask',
|
|
status: 'pending',
|
|
dependencies: []
|
|
},
|
|
{
|
|
id: 2,
|
|
title: 'Mock Subtask 2',
|
|
description: 'Second mock subtask',
|
|
status: 'pending',
|
|
dependencies: [1]
|
|
}
|
|
]
|
|
};
|
|
|
|
return Promise.resolve(task);
|
|
}
|
|
);
|
|
|
|
const mockGenerateTaskFiles = jest.fn().mockResolvedValue(true);
|
|
const mockFindTaskById = jest.fn();
|
|
const mockTaskExists = jest.fn().mockReturnValue(true);
|
|
|
|
// Mock fs module to avoid file system operations
|
|
jest.mock('fs', () => ({
|
|
existsSync: mockExistsSync,
|
|
writeFileSync: mockWriteFileSync,
|
|
readFileSync: mockReadFileSync,
|
|
unlinkSync: mockUnlinkSync,
|
|
mkdirSync: mockMkdirSync
|
|
}));
|
|
|
|
// Mock utils functions to avoid actual file operations
|
|
jest.mock('../../../scripts/modules/utils.js', () => ({
|
|
readJSON: mockReadJSON,
|
|
writeJSON: mockWriteJSON,
|
|
enableSilentMode: mockEnableSilentMode,
|
|
disableSilentMode: mockDisableSilentMode,
|
|
readComplexityReport: mockReadComplexityReport,
|
|
CONFIG: {
|
|
model: 'claude-3-7-sonnet-20250219',
|
|
maxTokens: 64000,
|
|
temperature: 0.2,
|
|
defaultSubtasks: 5
|
|
}
|
|
}));
|
|
|
|
// Mock path-utils with findTasksJsonPath
|
|
jest.mock('../../../mcp-server/src/core/utils/path-utils.js', () => ({
|
|
findTasksJsonPath: mockFindTasksJsonPath
|
|
}));
|
|
|
|
// Mock the AI module to prevent any real API calls
|
|
jest.mock('../../../scripts/modules/ai-services-unified.js', () => ({
|
|
// Mock the functions exported by ai-services-unified.js as needed
|
|
// For example, if you are testing a function that uses generateTextService:
|
|
generateTextService: jest.fn().mockResolvedValue('Mock AI Response')
|
|
// Add other mocks for generateObjectService, streamTextService if used
|
|
}));
|
|
|
|
// Mock task-manager.js to avoid real operations
|
|
jest.mock('../../../scripts/modules/task-manager.js', () => ({
|
|
expandTask: mockExpandTask,
|
|
generateTaskFiles: mockGenerateTaskFiles,
|
|
findTaskById: mockFindTaskById,
|
|
taskExists: mockTaskExists
|
|
}));
|
|
|
|
// Import dependencies after mocks are set up
|
|
import { sampleTasks } from '../../fixtures/sample-tasks.js';
|
|
|
|
// Mock logger
|
|
const mockLogger = {
|
|
info: jest.fn(),
|
|
error: jest.fn(),
|
|
debug: jest.fn(),
|
|
warn: jest.fn()
|
|
};
|
|
|
|
// Mock session
|
|
const mockSession = {
|
|
env: {
|
|
ANTHROPIC_API_KEY: 'mock-api-key',
|
|
MODEL: 'claude-3-sonnet-20240229',
|
|
MAX_TOKENS: 4000,
|
|
TEMPERATURE: '0.2'
|
|
}
|
|
};
|
|
|
|
describe('MCP Server Direct Functions', () => {
|
|
// Set up before each test
|
|
beforeEach(() => {
|
|
jest.clearAllMocks();
|
|
|
|
// Default mockReadJSON implementation
|
|
mockReadJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));
|
|
|
|
// Default mockFindTaskById implementation
|
|
mockFindTaskById.mockImplementation((tasks, taskId) => {
|
|
const id = parseInt(taskId, 10);
|
|
return tasks.find((t) => t.id === id);
|
|
});
|
|
|
|
// Default mockTaskExists implementation
|
|
mockTaskExists.mockImplementation((tasks, taskId) => {
|
|
const id = parseInt(taskId, 10);
|
|
return tasks.some((t) => t.id === id);
|
|
});
|
|
|
|
// Default findTasksJsonPath implementation
|
|
mockFindTasksJsonPath.mockImplementation((args) => {
|
|
// Mock returning null for non-existent files
|
|
if (args.file === 'non-existent-file.json') {
|
|
return null;
|
|
}
|
|
return testTasksPath;
|
|
});
|
|
});
|
|
|
|
describe('listTasksDirect', () => {
|
|
// Sample complexity report for testing
|
|
const mockComplexityReport = {
|
|
meta: {
|
|
generatedAt: '2025-03-24T20:01:35.986Z',
|
|
tasksAnalyzed: 3,
|
|
thresholdScore: 5,
|
|
projectName: 'Test Project',
|
|
usedResearch: false
|
|
},
|
|
complexityAnalysis: [
|
|
{
|
|
taskId: 1,
|
|
taskTitle: 'Initialize Project',
|
|
complexityScore: 3,
|
|
recommendedSubtasks: 2
|
|
},
|
|
{
|
|
taskId: 2,
|
|
taskTitle: 'Create Core Functionality',
|
|
complexityScore: 8,
|
|
recommendedSubtasks: 5
|
|
},
|
|
{
|
|
taskId: 3,
|
|
taskTitle: 'Implement UI Components',
|
|
complexityScore: 6,
|
|
recommendedSubtasks: 4
|
|
}
|
|
]
|
|
};
|
|
|
|
// Test wrapper function that doesn't rely on the actual implementation
|
|
async function testListTasks(args, mockLogger) {
|
|
// File not found case
|
|
if (args.file === 'non-existent-file.json') {
|
|
mockLogger.error('Tasks file not found');
|
|
return {
|
|
success: false,
|
|
error: {
|
|
code: 'FILE_NOT_FOUND_ERROR',
|
|
message: 'Tasks file not found'
|
|
}
|
|
};
|
|
}
|
|
|
|
// Check for complexity report
|
|
const complexityReport = mockReadComplexityReport();
|
|
let tasksData = [...sampleTasks.tasks];
|
|
|
|
// Add complexity scores if report exists
|
|
if (complexityReport && complexityReport.complexityAnalysis) {
|
|
tasksData = tasksData.map((task) => {
|
|
const analysis = complexityReport.complexityAnalysis.find(
|
|
(a) => a.taskId === task.id
|
|
);
|
|
if (analysis) {
|
|
return { ...task, complexityScore: analysis.complexityScore };
|
|
}
|
|
return task;
|
|
});
|
|
}
|
|
|
|
// Success case
|
|
if (!args.status && !args.withSubtasks) {
|
|
return {
|
|
success: true,
|
|
data: {
|
|
tasks: tasksData,
|
|
stats: {
|
|
total: tasksData.length,
|
|
completed: tasksData.filter((t) => t.status === 'done').length,
|
|
inProgress: tasksData.filter((t) => t.status === 'in-progress')
|
|
.length,
|
|
pending: tasksData.filter((t) => t.status === 'pending').length
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
// Status filter case
|
|
if (args.status) {
|
|
const filteredTasks = tasksData.filter((t) => t.status === args.status);
|
|
return {
|
|
success: true,
|
|
data: {
|
|
tasks: filteredTasks,
|
|
filter: args.status,
|
|
stats: {
|
|
total: tasksData.length,
|
|
filtered: filteredTasks.length
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
// Include subtasks case
|
|
if (args.withSubtasks) {
|
|
return {
|
|
success: true,
|
|
data: {
|
|
tasks: tasksData,
|
|
includeSubtasks: true,
|
|
stats: {
|
|
total: tasksData.length
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
// Default case
|
|
return {
|
|
success: true,
|
|
data: { tasks: [] }
|
|
};
|
|
}
|
|
|
|
test('should return all tasks when no filter is provided', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath
|
|
};
|
|
|
|
// Act
|
|
const result = await testListTasks(args, mockLogger);
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.tasks.length).toBe(sampleTasks.tasks.length);
|
|
expect(result.data.stats.total).toBe(sampleTasks.tasks.length);
|
|
});
|
|
|
|
test('should filter tasks by status', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
status: 'pending'
|
|
};
|
|
|
|
// Act
|
|
const result = await testListTasks(args, mockLogger);
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.filter).toBe('pending');
|
|
// Should only include pending tasks
|
|
result.data.tasks.forEach((task) => {
|
|
expect(task.status).toBe('pending');
|
|
});
|
|
});
|
|
|
|
test('should include subtasks when requested', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
withSubtasks: true
|
|
};
|
|
|
|
// Act
|
|
const result = await testListTasks(args, mockLogger);
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.includeSubtasks).toBe(true);
|
|
|
|
// Verify subtasks are included for tasks that have them
|
|
const tasksWithSubtasks = result.data.tasks.filter(
|
|
(t) => t.subtasks && t.subtasks.length > 0
|
|
);
|
|
expect(tasksWithSubtasks.length).toBeGreaterThan(0);
|
|
});
|
|
|
|
test('should handle file not found errors', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: 'non-existent-file.json'
|
|
};
|
|
|
|
// Act
|
|
const result = await testListTasks(args, mockLogger);
|
|
|
|
// Assert
|
|
expect(result.success).toBe(false);
|
|
expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
|
|
expect(mockLogger.error).toHaveBeenCalled();
|
|
});
|
|
|
|
test('should include complexity scores when complexity report exists', async () => {
|
|
// Arrange
|
|
mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport);
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
withSubtasks: true
|
|
};
|
|
|
|
// Act
|
|
const result = await testListTasks(args, mockLogger);
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
|
|
// Check that tasks have complexity scores from the report
|
|
mockComplexityReport.complexityAnalysis.forEach((analysis) => {
|
|
const task = result.data.tasks.find((t) => t.id === analysis.taskId);
|
|
if (task) {
|
|
expect(task.complexityScore).toBe(analysis.complexityScore);
|
|
}
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('expandTaskDirect', () => {
|
|
// Test wrapper function that returns appropriate results based on the test case
|
|
async function testExpandTask(args, mockLogger, options = {}) {
|
|
// Missing task ID case
|
|
if (!args.id) {
|
|
mockLogger.error('Task ID is required');
|
|
return {
|
|
success: false,
|
|
error: {
|
|
code: 'INPUT_VALIDATION_ERROR',
|
|
message: 'Task ID is required'
|
|
}
|
|
};
|
|
}
|
|
|
|
// Non-existent task ID case
|
|
if (args.id === '999') {
|
|
mockLogger.error(`Task with ID ${args.id} not found`);
|
|
return {
|
|
success: false,
|
|
error: {
|
|
code: 'TASK_NOT_FOUND',
|
|
message: `Task with ID ${args.id} not found`
|
|
}
|
|
};
|
|
}
|
|
|
|
// Completed task case
|
|
if (args.id === '1') {
|
|
mockLogger.error(
|
|
`Task ${args.id} is already marked as done and cannot be expanded`
|
|
);
|
|
return {
|
|
success: false,
|
|
error: {
|
|
code: 'TASK_COMPLETED',
|
|
message: `Task ${args.id} is already marked as done and cannot be expanded`
|
|
}
|
|
};
|
|
}
|
|
|
|
// For successful cases, record that functions were called but don't make real calls
|
|
mockEnableSilentMode();
|
|
|
|
// This is just a mock call that won't make real API requests
|
|
// We're using mockExpandTask which is already a mock function
|
|
const expandedTask = await mockExpandTask(
|
|
parseInt(args.id, 10),
|
|
args.num,
|
|
args.research || false,
|
|
args.prompt || '',
|
|
{ mcpLog: mockLogger, session: options.session }
|
|
);
|
|
|
|
mockDisableSilentMode();
|
|
|
|
return {
|
|
success: true,
|
|
data: {
|
|
task: expandedTask,
|
|
subtasksAdded: expandedTask.subtasks.length,
|
|
hasExistingSubtasks: false
|
|
}
|
|
};
|
|
}
|
|
|
|
test('should expand a task with subtasks', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
id: '3', // ID 3 exists in sampleTasks with status 'pending'
|
|
num: 2
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandTask(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.task).toBeDefined();
|
|
expect(result.data.task.subtasks).toBeDefined();
|
|
expect(result.data.task.subtasks.length).toBe(2);
|
|
expect(mockExpandTask).toHaveBeenCalledWith(
|
|
3, // Task ID as number
|
|
2, // num parameter
|
|
false, // useResearch
|
|
'', // prompt
|
|
expect.objectContaining({
|
|
mcpLog: mockLogger,
|
|
session: mockSession
|
|
})
|
|
);
|
|
expect(mockEnableSilentMode).toHaveBeenCalled();
|
|
expect(mockDisableSilentMode).toHaveBeenCalled();
|
|
});
|
|
|
|
test('should handle missing task ID', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath
|
|
// id is intentionally missing
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandTask(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(false);
|
|
expect(result.error.code).toBe('INPUT_VALIDATION_ERROR');
|
|
expect(mockLogger.error).toHaveBeenCalled();
|
|
// Make sure no real expand calls were made
|
|
expect(mockExpandTask).not.toHaveBeenCalled();
|
|
});
|
|
|
|
test('should handle non-existent task ID', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
id: '999' // Non-existent task ID
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandTask(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(false);
|
|
expect(result.error.code).toBe('TASK_NOT_FOUND');
|
|
expect(mockLogger.error).toHaveBeenCalled();
|
|
// Make sure no real expand calls were made
|
|
expect(mockExpandTask).not.toHaveBeenCalled();
|
|
});
|
|
|
|
test('should handle completed tasks', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
id: '1' // Task with 'done' status in sampleTasks
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandTask(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(false);
|
|
expect(result.error.code).toBe('TASK_COMPLETED');
|
|
expect(mockLogger.error).toHaveBeenCalled();
|
|
// Make sure no real expand calls were made
|
|
expect(mockExpandTask).not.toHaveBeenCalled();
|
|
});
|
|
|
|
test('should use AI client when research flag is set', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
id: '3',
|
|
research: true
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandTask(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(mockExpandTask).toHaveBeenCalledWith(
|
|
3, // Task ID as number
|
|
undefined, // args.num is undefined
|
|
true, // useResearch should be true
|
|
'', // prompt
|
|
expect.objectContaining({
|
|
mcpLog: mockLogger,
|
|
session: mockSession
|
|
})
|
|
);
|
|
// Verify the result includes research-backed subtasks
|
|
expect(result.data.task.subtasks[0].title).toContain('Research-Backed');
|
|
});
|
|
});
|
|
|
|
describe('expandAllTasksDirect', () => {
|
|
// Test wrapper function that returns appropriate results based on the test case
|
|
async function testExpandAllTasks(args, mockLogger, options = {}) {
|
|
// For successful cases, record that functions were called but don't make real calls
|
|
mockEnableSilentMode();
|
|
|
|
// Mock expandAllTasks - now returns a structured object instead of undefined
|
|
const mockExpandAll = jest.fn().mockImplementation(async () => {
|
|
// Return the new structured response that matches the actual implementation
|
|
return {
|
|
success: true,
|
|
expandedCount: 2,
|
|
failedCount: 0,
|
|
skippedCount: 1,
|
|
tasksToExpand: 3,
|
|
telemetryData: {
|
|
timestamp: new Date().toISOString(),
|
|
commandName: 'expand-all-tasks',
|
|
totalCost: 0.05,
|
|
totalTokens: 1000,
|
|
inputTokens: 600,
|
|
outputTokens: 400
|
|
}
|
|
};
|
|
});
|
|
|
|
// Call mock expandAllTasks with the correct signature
|
|
const result = await mockExpandAll(
|
|
args.file, // tasksPath
|
|
args.num, // numSubtasks
|
|
args.research || false, // useResearch
|
|
args.prompt || '', // additionalContext
|
|
args.force || false, // force
|
|
{
|
|
mcpLog: mockLogger,
|
|
session: options.session,
|
|
projectRoot: args.projectRoot
|
|
}
|
|
);
|
|
|
|
mockDisableSilentMode();
|
|
|
|
return {
|
|
success: true,
|
|
data: {
|
|
message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,
|
|
details: {
|
|
expandedCount: result.expandedCount,
|
|
failedCount: result.failedCount,
|
|
skippedCount: result.skippedCount,
|
|
tasksToExpand: result.tasksToExpand
|
|
},
|
|
telemetryData: result.telemetryData
|
|
}
|
|
};
|
|
}
|
|
|
|
test('should expand all pending tasks with subtasks', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
num: 3
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandAllTasks(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.message).toMatch(/Expand all operation completed/);
|
|
expect(result.data.details.expandedCount).toBe(2);
|
|
expect(result.data.details.failedCount).toBe(0);
|
|
expect(result.data.details.skippedCount).toBe(1);
|
|
expect(result.data.details.tasksToExpand).toBe(3);
|
|
expect(result.data.telemetryData).toBeDefined();
|
|
expect(result.data.telemetryData.commandName).toBe('expand-all-tasks');
|
|
expect(mockEnableSilentMode).toHaveBeenCalled();
|
|
expect(mockDisableSilentMode).toHaveBeenCalled();
|
|
});
|
|
|
|
test('should handle research flag', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
research: true,
|
|
num: 2
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandAllTasks(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.details.expandedCount).toBe(2);
|
|
expect(result.data.telemetryData).toBeDefined();
|
|
expect(mockEnableSilentMode).toHaveBeenCalled();
|
|
expect(mockDisableSilentMode).toHaveBeenCalled();
|
|
});
|
|
|
|
test('should handle force flag', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
force: true
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandAllTasks(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.details.expandedCount).toBe(2);
|
|
expect(result.data.telemetryData).toBeDefined();
|
|
expect(mockEnableSilentMode).toHaveBeenCalled();
|
|
expect(mockDisableSilentMode).toHaveBeenCalled();
|
|
});
|
|
|
|
test('should handle additional context/prompt', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
prompt: 'Additional context for subtasks'
|
|
};
|
|
|
|
// Act
|
|
const result = await testExpandAllTasks(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.details.expandedCount).toBe(2);
|
|
expect(result.data.telemetryData).toBeDefined();
|
|
expect(mockEnableSilentMode).toHaveBeenCalled();
|
|
expect(mockDisableSilentMode).toHaveBeenCalled();
|
|
});
|
|
|
|
test('should handle case with no eligible tasks', async () => {
|
|
// Arrange
|
|
const args = {
|
|
projectRoot: testProjectRoot,
|
|
file: testTasksPath,
|
|
num: 3
|
|
};
|
|
|
|
// Act - Mock the scenario where no tasks are eligible for expansion
|
|
async function testNoEligibleTasks(args, mockLogger, options = {}) {
|
|
mockEnableSilentMode();
|
|
|
|
const mockExpandAll = jest.fn().mockImplementation(async () => {
|
|
return {
|
|
success: true,
|
|
expandedCount: 0,
|
|
failedCount: 0,
|
|
skippedCount: 0,
|
|
tasksToExpand: 0,
|
|
telemetryData: null,
|
|
message: 'No tasks eligible for expansion.'
|
|
};
|
|
});
|
|
|
|
const result = await mockExpandAll(
|
|
args.file,
|
|
args.num,
|
|
false,
|
|
'',
|
|
false,
|
|
{
|
|
mcpLog: mockLogger,
|
|
session: options.session,
|
|
projectRoot: args.projectRoot
|
|
},
|
|
'json'
|
|
);
|
|
|
|
mockDisableSilentMode();
|
|
|
|
return {
|
|
success: true,
|
|
data: {
|
|
message: result.message,
|
|
details: {
|
|
expandedCount: result.expandedCount,
|
|
failedCount: result.failedCount,
|
|
skippedCount: result.skippedCount,
|
|
tasksToExpand: result.tasksToExpand
|
|
},
|
|
telemetryData: result.telemetryData
|
|
}
|
|
};
|
|
}
|
|
|
|
const result = await testNoEligibleTasks(args, mockLogger, {
|
|
session: mockSession
|
|
});
|
|
|
|
// Assert
|
|
expect(result.success).toBe(true);
|
|
expect(result.data.message).toBe('No tasks eligible for expansion.');
|
|
expect(result.data.details.expandedCount).toBe(0);
|
|
expect(result.data.details.tasksToExpand).toBe(0);
|
|
expect(result.data.telemetryData).toBeNull();
|
|
});
|
|
});
|
|
});
|