fix(expand): Enhance context handling in expandAllTasks function

- Added `tag` to context destructuring for better context management.
- Updated `readJSON` call to include `contextTag` for improved data integrity.
- Ensured the correct tag is passed during task expansion to prevent tag corruption.
This commit is contained in:
Parththipan Thaniperumkarunai
2025-06-19 00:05:11 +02:00
committed by Ralph Khreish
parent 1ab8fd5274
commit aa3713f4e4
4 changed files with 950 additions and 28 deletions

View File

@@ -32,7 +32,12 @@ async function expandAllTasks(
context = {}, context = {},
outputFormat = 'text' // Assume text default for CLI outputFormat = 'text' // Assume text default for CLI
) { ) {
const { session, mcpLog, projectRoot: providedProjectRoot } = context; const {
session,
mcpLog,
projectRoot: providedProjectRoot,
tag: contextTag
} = context;
const isMCPCall = !!mcpLog; // Determine if called from MCP const isMCPCall = !!mcpLog; // Determine if called from MCP
const projectRoot = providedProjectRoot || findProjectRoot(); const projectRoot = providedProjectRoot || findProjectRoot();
@@ -74,7 +79,7 @@ async function expandAllTasks(
try { try {
logger.info(`Reading tasks from ${tasksPath}`); logger.info(`Reading tasks from ${tasksPath}`);
const data = readJSON(tasksPath, projectRoot); const data = readJSON(tasksPath, projectRoot, contextTag);
if (!data || !data.tasks) { if (!data || !data.tasks) {
throw new Error(`Invalid tasks data in ${tasksPath}`); throw new Error(`Invalid tasks data in ${tasksPath}`);
} }
@@ -124,7 +129,7 @@ async function expandAllTasks(
numSubtasks, numSubtasks,
useResearch, useResearch,
additionalContext, additionalContext,
{ ...context, projectRoot }, // Pass the whole context object with projectRoot { ...context, projectRoot, tag: data.tag || contextTag }, // Pass the whole context object with projectRoot and resolved tag
force force
); );
expandedCount++; expandedCount++;

View File

@@ -625,19 +625,38 @@ describe('MCP Server Direct Functions', () => {
// For successful cases, record that functions were called but don't make real calls // For successful cases, record that functions were called but don't make real calls
mockEnableSilentMode(); mockEnableSilentMode();
// Mock expandAllTasks // Mock expandAllTasks - now returns a structured object instead of undefined
const mockExpandAll = jest.fn().mockImplementation(async () => { const mockExpandAll = jest.fn().mockImplementation(async () => {
// Just simulate success without any real operations // Return the new structured response that matches the actual implementation
return undefined; // expandAllTasks doesn't return anything return {
success: true,
expandedCount: 2,
failedCount: 0,
skippedCount: 1,
tasksToExpand: 3,
telemetryData: {
timestamp: new Date().toISOString(),
commandName: 'expand-all-tasks',
totalCost: 0.05,
totalTokens: 1000,
inputTokens: 600,
outputTokens: 400
}
};
}); });
// Call mock expandAllTasks // Call mock expandAllTasks with the correct signature
await mockExpandAll( const result = await mockExpandAll(
args.num, args.file, // tasksPath
args.research || false, args.num, // numSubtasks
args.prompt || '', args.research || false, // useResearch
args.force || false, args.prompt || '', // additionalContext
{ mcpLog: mockLogger, session: options.session } args.force || false, // force
{
mcpLog: mockLogger,
session: options.session,
projectRoot: args.projectRoot
}
); );
mockDisableSilentMode(); mockDisableSilentMode();
@@ -645,13 +664,14 @@ describe('MCP Server Direct Functions', () => {
return { return {
success: true, success: true,
data: { data: {
message: 'Successfully expanded all pending tasks with subtasks', message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,
details: { details: {
numSubtasks: args.num, expandedCount: result.expandedCount,
research: args.research || false, failedCount: result.failedCount,
prompt: args.prompt || '', skippedCount: result.skippedCount,
force: args.force || false tasksToExpand: result.tasksToExpand
} },
telemetryData: result.telemetryData
} }
}; };
} }
@@ -671,10 +691,13 @@ describe('MCP Server Direct Functions', () => {
// Assert // Assert
expect(result.success).toBe(true); expect(result.success).toBe(true);
expect(result.data.message).toBe( expect(result.data.message).toMatch(/Expand all operation completed/);
'Successfully expanded all pending tasks with subtasks' expect(result.data.details.expandedCount).toBe(2);
); expect(result.data.details.failedCount).toBe(0);
expect(result.data.details.numSubtasks).toBe(3); expect(result.data.details.skippedCount).toBe(1);
expect(result.data.details.tasksToExpand).toBe(3);
expect(result.data.telemetryData).toBeDefined();
expect(result.data.telemetryData.commandName).toBe('expand-all-tasks');
expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled();
}); });
@@ -695,7 +718,8 @@ describe('MCP Server Direct Functions', () => {
// Assert // Assert
expect(result.success).toBe(true); expect(result.success).toBe(true);
expect(result.data.details.research).toBe(true); expect(result.data.details.expandedCount).toBe(2);
expect(result.data.telemetryData).toBeDefined();
expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled();
}); });
@@ -715,7 +739,8 @@ describe('MCP Server Direct Functions', () => {
// Assert // Assert
expect(result.success).toBe(true); expect(result.success).toBe(true);
expect(result.data.details.force).toBe(true); expect(result.data.details.expandedCount).toBe(2);
expect(result.data.telemetryData).toBeDefined();
expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled();
}); });
@@ -735,11 +760,77 @@ describe('MCP Server Direct Functions', () => {
// Assert // Assert
expect(result.success).toBe(true); expect(result.success).toBe(true);
expect(result.data.details.prompt).toBe( expect(result.data.details.expandedCount).toBe(2);
'Additional context for subtasks' expect(result.data.telemetryData).toBeDefined();
);
expect(mockEnableSilentMode).toHaveBeenCalled(); expect(mockEnableSilentMode).toHaveBeenCalled();
expect(mockDisableSilentMode).toHaveBeenCalled(); expect(mockDisableSilentMode).toHaveBeenCalled();
}); });
test('should handle case with no eligible tasks', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath,
num: 3
};
// Act - Mock the scenario where no tasks are eligible for expansion
async function testNoEligibleTasks(args, mockLogger, options = {}) {
mockEnableSilentMode();
const mockExpandAll = jest.fn().mockImplementation(async () => {
return {
success: true,
expandedCount: 0,
failedCount: 0,
skippedCount: 0,
tasksToExpand: 0,
telemetryData: null,
message: 'No tasks eligible for expansion.'
};
});
const result = await mockExpandAll(
args.file,
args.num,
false,
'',
false,
{
mcpLog: mockLogger,
session: options.session,
projectRoot: args.projectRoot
},
'json'
);
mockDisableSilentMode();
return {
success: true,
data: {
message: result.message,
details: {
expandedCount: result.expandedCount,
failedCount: result.failedCount,
skippedCount: result.skippedCount,
tasksToExpand: result.tasksToExpand
},
telemetryData: result.telemetryData
}
};
}
const result = await testNoEligibleTasks(args, mockLogger, {
session: mockSession
});
// Assert
expect(result.success).toBe(true);
expect(result.data.message).toBe('No tasks eligible for expansion.');
expect(result.data.details.expandedCount).toBe(0);
expect(result.data.details.tasksToExpand).toBe(0);
expect(result.data.telemetryData).toBeNull();
});
}); });
}); });

View File

@@ -0,0 +1,324 @@
/**
* Tests for the expand-all MCP tool
*
* Note: This test does NOT test the actual implementation. It tests that:
* 1. The tool is registered correctly with the correct parameters
* 2. Arguments are passed correctly to expandAllTasksDirect
* 3. Error handling works as expected
*
* We do NOT import the real implementation - everything is mocked
*/
import { jest } from '@jest/globals';
// Mock EVERYTHING
const mockExpandAllTasksDirect = jest.fn();
jest.mock('../../../../mcp-server/src/core/task-master-core.js', () => ({
expandAllTasksDirect: mockExpandAllTasksDirect
}));
const mockHandleApiResult = jest.fn((result) => result);
const mockGetProjectRootFromSession = jest.fn(() => '/mock/project/root');
const mockCreateErrorResponse = jest.fn((msg) => ({
success: false,
error: { code: 'ERROR', message: msg }
}));
const mockWithNormalizedProjectRoot = jest.fn((fn) => fn);
jest.mock('../../../../mcp-server/src/tools/utils.js', () => ({
getProjectRootFromSession: mockGetProjectRootFromSession,
handleApiResult: mockHandleApiResult,
createErrorResponse: mockCreateErrorResponse,
withNormalizedProjectRoot: mockWithNormalizedProjectRoot
}));
// Mock the z object from zod
const mockZod = {
object: jest.fn(() => mockZod),
string: jest.fn(() => mockZod),
number: jest.fn(() => mockZod),
boolean: jest.fn(() => mockZod),
optional: jest.fn(() => mockZod),
describe: jest.fn(() => mockZod),
_def: {
shape: () => ({
num: {},
research: {},
prompt: {},
force: {},
tag: {},
projectRoot: {}
})
}
};
jest.mock('zod', () => ({
z: mockZod
}));
// DO NOT import the real module - create a fake implementation
// This is the fake implementation of registerExpandAllTool
const registerExpandAllTool = (server) => {
// Create simplified version of the tool config
const toolConfig = {
name: 'expand_all',
description: 'Use Taskmaster to expand all eligible pending tasks',
parameters: mockZod,
// Create a simplified mock of the execute function
execute: mockWithNormalizedProjectRoot(async (args, context) => {
const { log, session } = context;
try {
log.info &&
log.info(`Starting expand-all with args: ${JSON.stringify(args)}`);
// Call expandAllTasksDirect
const result = await mockExpandAllTasksDirect(args, log, { session });
// Handle result
return mockHandleApiResult(result, log);
} catch (error) {
log.error && log.error(`Error in expand-all tool: ${error.message}`);
return mockCreateErrorResponse(error.message);
}
})
};
// Register the tool with the server
server.addTool(toolConfig);
};
describe('MCP Tool: expand-all', () => {
// Create mock server
let mockServer;
let executeFunction;
// Create mock logger
const mockLogger = {
debug: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn()
};
// Test data
const validArgs = {
num: 3,
research: true,
prompt: 'additional context',
force: false,
tag: 'master',
projectRoot: '/test/project'
};
// Standard responses
const successResponse = {
success: true,
data: {
message:
'Expand all operation completed. Expanded: 2, Failed: 0, Skipped: 1',
details: {
expandedCount: 2,
failedCount: 0,
skippedCount: 1,
tasksToExpand: 3,
telemetryData: {
commandName: 'expand-all-tasks',
totalCost: 0.15,
totalTokens: 2500
}
}
}
};
const errorResponse = {
success: false,
error: {
code: 'EXPAND_ALL_ERROR',
message: 'Failed to expand tasks'
}
};
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Create mock server
mockServer = {
addTool: jest.fn((config) => {
executeFunction = config.execute;
})
};
// Setup default successful response
mockExpandAllTasksDirect.mockResolvedValue(successResponse);
// Register the tool
registerExpandAllTool(mockServer);
});
test('should register the tool correctly', () => {
// Verify tool was registered
expect(mockServer.addTool).toHaveBeenCalledWith(
expect.objectContaining({
name: 'expand_all',
description: expect.stringContaining('expand all eligible pending'),
parameters: expect.any(Object),
execute: expect.any(Function)
})
);
// Verify the tool config was passed
const toolConfig = mockServer.addTool.mock.calls[0][0];
expect(toolConfig).toHaveProperty('parameters');
expect(toolConfig).toHaveProperty('execute');
});
test('should execute the tool with valid parameters', async () => {
// Setup context
const mockContext = {
log: mockLogger,
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
const result = await executeFunction(validArgs, mockContext);
// Verify expandAllTasksDirect was called with correct arguments
expect(mockExpandAllTasksDirect).toHaveBeenCalledWith(
validArgs,
mockLogger,
{ session: mockContext.session }
);
// Verify handleApiResult was called
expect(mockHandleApiResult).toHaveBeenCalledWith(
successResponse,
mockLogger
);
expect(result).toEqual(successResponse);
});
test('should handle expand all with no eligible tasks', async () => {
// Arrange
const mockDirectResult = {
success: true,
data: {
message:
'Expand all operation completed. Expanded: 0, Failed: 0, Skipped: 0',
details: {
expandedCount: 0,
failedCount: 0,
skippedCount: 0,
tasksToExpand: 0,
telemetryData: null
}
}
};
mockExpandAllTasksDirect.mockResolvedValue(mockDirectResult);
mockHandleApiResult.mockReturnValue({
success: true,
data: mockDirectResult.data
});
// Act
const result = await executeFunction(validArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(result.success).toBe(true);
expect(result.data.details.expandedCount).toBe(0);
expect(result.data.details.tasksToExpand).toBe(0);
});
test('should handle expand all with mixed success/failure', async () => {
// Arrange
const mockDirectResult = {
success: true,
data: {
message:
'Expand all operation completed. Expanded: 2, Failed: 1, Skipped: 0',
details: {
expandedCount: 2,
failedCount: 1,
skippedCount: 0,
tasksToExpand: 3,
telemetryData: {
commandName: 'expand-all-tasks',
totalCost: 0.1,
totalTokens: 1500
}
}
}
};
mockExpandAllTasksDirect.mockResolvedValue(mockDirectResult);
mockHandleApiResult.mockReturnValue({
success: true,
data: mockDirectResult.data
});
// Act
const result = await executeFunction(validArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(result.success).toBe(true);
expect(result.data.details.expandedCount).toBe(2);
expect(result.data.details.failedCount).toBe(1);
});
test('should handle errors from expandAllTasksDirect', async () => {
// Arrange
mockExpandAllTasksDirect.mockRejectedValue(
new Error('Direct function error')
);
// Act
const result = await executeFunction(validArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(mockLogger.error).toHaveBeenCalledWith(
expect.stringContaining('Error in expand-all tool')
);
expect(mockCreateErrorResponse).toHaveBeenCalledWith(
'Direct function error'
);
});
test('should handle different argument combinations', async () => {
// Test with minimal args
const minimalArgs = {
projectRoot: '/test/project'
};
// Act
await executeFunction(minimalArgs, {
log: mockLogger,
session: { workingDirectory: '/test' }
});
// Assert
expect(mockExpandAllTasksDirect).toHaveBeenCalledWith(
minimalArgs,
mockLogger,
expect.any(Object)
);
});
test('should use withNormalizedProjectRoot wrapper correctly', () => {
// Verify that the execute function is wrapped with withNormalizedProjectRoot
expect(mockWithNormalizedProjectRoot).toHaveBeenCalledWith(
expect.any(Function)
);
});
});

View File

@@ -0,0 +1,502 @@
/**
* Tests for the expand-all-tasks.js module
*/
import { jest } from '@jest/globals';
// Mock the dependencies before importing the module under test
jest.unstable_mockModule(
'../../../../../scripts/modules/task-manager/expand-task.js',
() => ({
default: jest.fn()
})
);
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
readJSON: jest.fn(),
log: jest.fn(),
isSilentMode: jest.fn(() => false),
findProjectRoot: jest.fn(() => '/test/project'),
aggregateTelemetry: jest.fn()
}));
jest.unstable_mockModule(
'../../../../../scripts/modules/config-manager.js',
() => ({
getDebugFlag: jest.fn(() => false)
})
);
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
startLoadingIndicator: jest.fn(),
stopLoadingIndicator: jest.fn(),
displayAiUsageSummary: jest.fn()
}));
jest.unstable_mockModule('chalk', () => ({
default: {
white: { bold: jest.fn((text) => text) },
cyan: jest.fn((text) => text),
green: jest.fn((text) => text),
gray: jest.fn((text) => text),
red: jest.fn((text) => text),
bold: jest.fn((text) => text)
}
}));
jest.unstable_mockModule('boxen', () => ({
default: jest.fn((text) => text)
}));
// Import the mocked modules
const { default: expandTask } = await import(
'../../../../../scripts/modules/task-manager/expand-task.js'
);
const { readJSON, aggregateTelemetry, findProjectRoot } = await import(
'../../../../../scripts/modules/utils.js'
);
// Import the module under test
const { default: expandAllTasks } = await import(
'../../../../../scripts/modules/task-manager/expand-all-tasks.js'
);
const mockExpandTask = expandTask;
const mockReadJSON = readJSON;
const mockAggregateTelemetry = aggregateTelemetry;
const mockFindProjectRoot = findProjectRoot;
describe('expandAllTasks', () => {
const mockTasksPath = '/test/tasks.json';
const mockProjectRoot = '/test/project';
const mockSession = { userId: 'test-user' };
const mockMcpLog = {
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
debug: jest.fn()
};
const sampleTasksData = {
tag: 'master',
tasks: [
{
id: 1,
title: 'Pending Task 1',
status: 'pending',
subtasks: []
},
{
id: 2,
title: 'In Progress Task',
status: 'in-progress',
subtasks: []
},
{
id: 3,
title: 'Done Task',
status: 'done',
subtasks: []
},
{
id: 4,
title: 'Task with Subtasks',
status: 'pending',
subtasks: [{ id: '4.1', title: 'Existing subtask' }]
}
]
};
beforeEach(() => {
jest.clearAllMocks();
mockReadJSON.mockReturnValue(sampleTasksData);
mockAggregateTelemetry.mockReturnValue({
timestamp: '2024-01-01T00:00:00.000Z',
commandName: 'expand-all-tasks',
totalCost: 0.1,
totalTokens: 2000,
inputTokens: 1200,
outputTokens: 800
});
});
describe('successful expansion', () => {
test('should expand all eligible pending tasks', async () => {
// Arrange
const mockTelemetryData = {
timestamp: '2024-01-01T00:00:00.000Z',
commandName: 'expand-task',
totalCost: 0.05,
totalTokens: 1000
};
mockExpandTask.mockResolvedValue({
telemetryData: mockTelemetryData
});
// Act
const result = await expandAllTasks(
mockTasksPath,
3, // numSubtasks
false, // useResearch
'test context', // additionalContext
false, // force
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot,
tag: 'master'
},
'json' // outputFormat
);
// Assert
expect(result.success).toBe(true);
expect(result.expandedCount).toBe(2); // Tasks 1 and 2 (pending and in-progress)
expect(result.failedCount).toBe(0);
expect(result.skippedCount).toBe(0);
expect(result.tasksToExpand).toBe(2);
expect(result.telemetryData).toBeDefined();
// Verify readJSON was called correctly
expect(mockReadJSON).toHaveBeenCalledWith(
mockTasksPath,
mockProjectRoot,
'master'
);
// Verify expandTask was called for eligible tasks
expect(mockExpandTask).toHaveBeenCalledTimes(2);
expect(mockExpandTask).toHaveBeenCalledWith(
mockTasksPath,
1,
3,
false,
'test context',
expect.objectContaining({
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot,
tag: 'master'
}),
false
);
});
test('should handle force flag to expand tasks with existing subtasks', async () => {
// Arrange
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
2,
false,
'',
true, // force = true
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.expandedCount).toBe(3); // Tasks 1, 2, and 4 (including task with existing subtasks)
expect(mockExpandTask).toHaveBeenCalledTimes(3);
});
test('should handle research flag', async () => {
// Arrange
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.08 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
undefined, // numSubtasks not specified
true, // useResearch = true
'research context',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(mockExpandTask).toHaveBeenCalledWith(
mockTasksPath,
expect.any(Number),
undefined,
true, // research flag passed correctly
'research context',
expect.any(Object),
false
);
});
test('should return success with message when no tasks are eligible', async () => {
// Arrange - Mock tasks data with no eligible tasks
const noEligibleTasksData = {
tag: 'master',
tasks: [
{ id: 1, status: 'done', subtasks: [] },
{
id: 2,
status: 'pending',
subtasks: [{ id: '2.1', title: 'existing' }]
}
]
};
mockReadJSON.mockReturnValue(noEligibleTasksData);
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false, // force = false, so task with subtasks won't be expanded
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(result.expandedCount).toBe(0);
expect(result.failedCount).toBe(0);
expect(result.skippedCount).toBe(0);
expect(result.tasksToExpand).toBe(0);
expect(result.message).toBe('No tasks eligible for expansion.');
expect(mockExpandTask).not.toHaveBeenCalled();
});
});
describe('error handling', () => {
test('should handle expandTask failures gracefully', async () => {
// Arrange
mockExpandTask
.mockResolvedValueOnce({ telemetryData: { totalCost: 0.05 } }) // First task succeeds
.mockRejectedValueOnce(new Error('AI service error')); // Second task fails
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(result.expandedCount).toBe(1);
expect(result.failedCount).toBe(1);
});
test('should throw error when tasks.json is invalid', async () => {
// Arrange
mockReadJSON.mockReturnValue(null);
// Act & Assert
await expect(
expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
)
).rejects.toThrow('Invalid tasks data');
});
test('should throw error when project root cannot be determined', async () => {
// Arrange - Mock findProjectRoot to return null for this test
mockFindProjectRoot.mockReturnValueOnce(null);
// Act & Assert
await expect(
expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog
// No projectRoot provided, and findProjectRoot will return null
},
'json'
)
).rejects.toThrow('Could not determine project root directory');
});
});
describe('telemetry aggregation', () => {
test('should aggregate telemetry data from multiple expand operations', async () => {
// Arrange
const telemetryData1 = {
commandName: 'expand-task',
totalCost: 0.03,
totalTokens: 600
};
const telemetryData2 = {
commandName: 'expand-task',
totalCost: 0.04,
totalTokens: 800
};
mockExpandTask
.mockResolvedValueOnce({ telemetryData: telemetryData1 })
.mockResolvedValueOnce({ telemetryData: telemetryData2 });
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(mockAggregateTelemetry).toHaveBeenCalledWith(
[telemetryData1, telemetryData2],
'expand-all-tasks'
);
expect(result.telemetryData).toBeDefined();
expect(result.telemetryData.commandName).toBe('expand-all-tasks');
});
test('should handle missing telemetry data gracefully', async () => {
// Arrange
mockExpandTask.mockResolvedValue({}); // No telemetryData
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot
},
'json'
);
// Assert
expect(result.success).toBe(true);
expect(mockAggregateTelemetry).toHaveBeenCalledWith(
[],
'expand-all-tasks'
);
});
});
describe('output format handling', () => {
test('should use text output format for CLI calls', async () => {
// Arrange
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
projectRoot: mockProjectRoot
// No mcpLog provided, should use CLI logger
},
'text' // CLI output format
);
// Assert
expect(result.success).toBe(true);
// In text mode, loading indicators and console output would be used
// This is harder to test directly but we can verify the result structure
});
test('should handle context tag properly', async () => {
// Arrange
const taggedTasksData = {
...sampleTasksData,
tag: 'feature-branch'
};
mockReadJSON.mockReturnValue(taggedTasksData);
mockExpandTask.mockResolvedValue({
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
});
// Act
const result = await expandAllTasks(
mockTasksPath,
3,
false,
'',
false,
{
session: mockSession,
mcpLog: mockMcpLog,
projectRoot: mockProjectRoot,
tag: 'feature-branch'
},
'json'
);
// Assert
expect(mockReadJSON).toHaveBeenCalledWith(
mockTasksPath,
mockProjectRoot,
'feature-branch'
);
expect(mockExpandTask).toHaveBeenCalledWith(
mockTasksPath,
expect.any(Number),
3,
false,
'',
expect.objectContaining({
tag: 'feature-branch'
}),
false
);
});
});
});