chore: prettier formatting

This commit is contained in:
Eyal Toledano
2025-04-09 18:20:47 -04:00
parent 8ad1749036
commit 0a657fb9b2
7 changed files with 487 additions and 376 deletions

View File

@@ -27,7 +27,7 @@ import {
* @param {string} [args.title] - Task title (for manual task creation)
* @param {string} [args.description] - Task description (for manual task creation)
* @param {string} [args.details] - Implementation details (for manual task creation)
* @param {string} [args.testStrategy] - Test strategy (for manual task creation)
* @param {string} [args.testStrategy] - Test strategy (for manual task creation)
* @param {string} [args.dependencies] - Comma-separated list of task IDs this task depends on
* @param {string} [args.priority='medium'] - Task priority (high, medium, low)
* @param {string} [args.file='tasks/tasks.json'] - Path to the tasks file
@@ -47,16 +47,19 @@ export async function addTaskDirect(args, log, context = {}) {
// Check if this is manual task creation or AI-driven task creation
const isManualCreation = args.title && args.description;
// Check required parameters
if (!args.prompt && !isManualCreation) {
log.error('Missing required parameters: either prompt or title+description must be provided');
log.error(
'Missing required parameters: either prompt or title+description must be provided'
);
disableSilentMode();
return {
success: false,
error: {
code: 'MISSING_PARAMETER',
message: 'Either the prompt parameter or both title and description parameters are required for adding a task'
message:
'Either the prompt parameter or both title and description parameters are required for adding a task'
}
};
}
@@ -76,7 +79,7 @@ export async function addTaskDirect(args, log, context = {}) {
const { session } = context;
let manualTaskData = null;
if (isManualCreation) {
// Create manual task data object
manualTaskData = {
@@ -85,11 +88,11 @@ export async function addTaskDirect(args, log, context = {}) {
details: args.details || '',
testStrategy: args.testStrategy || ''
};
log.info(
`Adding new task manually with title: "${args.title}", dependencies: [${dependencies.join(', ')}], priority: ${priority}`
);
// Call the addTask function with manual task data
const newTaskId = await addTask(
tasksPath,
@@ -104,10 +107,10 @@ export async function addTaskDirect(args, log, context = {}) {
null, // No custom environment
manualTaskData // Pass the manual task data
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {

View File

@@ -22,11 +22,28 @@ export function registerAddTaskTool(server) {
name: 'add_task',
description: 'Add a new task using AI',
parameters: z.object({
prompt: z.string().optional().describe('Description of the task to add (required if not using manual fields)'),
title: z.string().optional().describe('Task title (for manual task creation)'),
description: z.string().optional().describe('Task description (for manual task creation)'),
details: z.string().optional().describe('Implementation details (for manual task creation)'),
testStrategy: z.string().optional().describe('Test strategy (for manual task creation)'),
prompt: z
.string()
.optional()
.describe(
'Description of the task to add (required if not using manual fields)'
),
title: z
.string()
.optional()
.describe('Task title (for manual task creation)'),
description: z
.string()
.optional()
.describe('Task description (for manual task creation)'),
details: z
.string()
.optional()
.describe('Implementation details (for manual task creation)'),
testStrategy: z
.string()
.optional()
.describe('Test strategy (for manual task creation)'),
dependencies: z
.string()
.optional()
@@ -35,11 +52,16 @@ export function registerAddTaskTool(server) {
.string()
.optional()
.describe('Task priority (high, medium, low)'),
file: z.string().optional().describe('Path to the tasks file (default: tasks/tasks.json)'),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
projectRoot: z
.string()
.optional()
.describe('Root directory of the project (default: current working directory)'),
.describe(
'Root directory of the project (default: current working directory)'
),
research: z
.boolean()
.optional()

View File

@@ -791,20 +791,46 @@ function registerCommands(programInstance) {
.command('add-task')
.description('Add a new task using AI or manual input')
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.option('-p, --prompt <prompt>', 'Description of the task to add (required if not using manual fields)')
.option(
'-p, --prompt <prompt>',
'Description of the task to add (required if not using manual fields)'
)
.option('-t, --title <title>', 'Task title (for manual task creation)')
.option('-d, --description <description>', 'Task description (for manual task creation)')
.option('--details <details>', 'Implementation details (for manual task creation)')
.option('--test-strategy <testStrategy>', 'Test strategy (for manual task creation)')
.option('--dependencies <dependencies>', 'Comma-separated list of task IDs this task depends on')
.option('--priority <priority>', 'Task priority (high, medium, low)', 'medium')
.option('-r, --research', 'Whether to use research capabilities for task creation')
.option(
'-d, --description <description>',
'Task description (for manual task creation)'
)
.option(
'--details <details>',
'Implementation details (for manual task creation)'
)
.option(
'--test-strategy <testStrategy>',
'Test strategy (for manual task creation)'
)
.option(
'--dependencies <dependencies>',
'Comma-separated list of task IDs this task depends on'
)
.option(
'--priority <priority>',
'Task priority (high, medium, low)',
'medium'
)
.option(
'-r, --research',
'Whether to use research capabilities for task creation'
)
.action(async (options) => {
const isManualCreation = options.title && options.description;
// Validate that either prompt or title+description are provided
if (!options.prompt && !isManualCreation) {
console.error(chalk.red('Error: Either --prompt or both --title and --description must be provided'));
console.error(
chalk.red(
'Error: Either --prompt or both --title and --description must be provided'
)
);
process.exit(1);
}
@@ -812,7 +838,9 @@ function registerCommands(programInstance) {
// Prepare dependencies if provided
let dependencies = [];
if (options.dependencies) {
dependencies = options.dependencies.split(',').map(id => parseInt(id.trim(), 10));
dependencies = options.dependencies
.split(',')
.map((id) => parseInt(id.trim(), 10));
}
// Create manual task data if title and description are provided
@@ -825,17 +853,27 @@ function registerCommands(programInstance) {
testStrategy: options.testStrategy || ''
};
console.log(chalk.blue(`Creating task manually with title: "${options.title}"`));
console.log(
chalk.blue(`Creating task manually with title: "${options.title}"`)
);
if (dependencies.length > 0) {
console.log(chalk.blue(`Dependencies: [${dependencies.join(', ')}]`));
console.log(
chalk.blue(`Dependencies: [${dependencies.join(', ')}]`)
);
}
if (options.priority) {
console.log(chalk.blue(`Priority: ${options.priority}`));
}
} else {
console.log(chalk.blue(`Creating task with AI using prompt: "${options.prompt}"`));
console.log(
chalk.blue(
`Creating task with AI using prompt: "${options.prompt}"`
)
);
if (dependencies.length > 0) {
console.log(chalk.blue(`Dependencies: [${dependencies.join(', ')}]`));
console.log(
chalk.blue(`Dependencies: [${dependencies.join(', ')}]`)
);
}
if (options.priority) {
console.log(chalk.blue(`Priority: ${options.priority}`));

View File

@@ -3434,18 +3434,18 @@ async function addTask(
'Failed to generate task data after all model attempts'
);
}
// Set the AI-generated task data
taskData = aiGeneratedTaskData;
} catch (error) {
// Handle AI errors
log('error', `Error generating task with AI: ${error.message}`);
// Stop any loading indicator
if (outputFormat === 'text' && loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
}
throw error;
}
}
@@ -3506,7 +3506,9 @@ async function addTask(
'\n' +
chalk.white(`Status: ${getStatusWithColor(newTask.status)}`) +
'\n' +
chalk.white(`Priority: ${chalk.keyword(getPriorityColor(newTask.priority))(newTask.priority)}`) +
chalk.white(
`Priority: ${chalk.keyword(getPriorityColor(newTask.priority))(newTask.priority)}`
) +
'\n' +
(dependencies.length > 0
? chalk.white(`Dependencies: ${dependencies.join(', ')}`) + '\n'
@@ -3514,11 +3516,17 @@ async function addTask(
'\n' +
chalk.white.bold('Next Steps:') +
'\n' +
chalk.cyan(`1. Run ${chalk.yellow(`task-master show ${newTaskId}`)} to see complete task details`) +
chalk.cyan(
`1. Run ${chalk.yellow(`task-master show ${newTaskId}`)} to see complete task details`
) +
'\n' +
chalk.cyan(`2. Run ${chalk.yellow(`task-master set-status --id=${newTaskId} --status=in-progress`)} to start working on it`) +
chalk.cyan(
`2. Run ${chalk.yellow(`task-master set-status --id=${newTaskId} --status=in-progress`)} to start working on it`
) +
'\n' +
chalk.cyan(`3. Run ${chalk.yellow(`task-master expand --id=${newTaskId}`)} to break it down into subtasks`),
chalk.cyan(
`3. Run ${chalk.yellow(`task-master expand --id=${newTaskId}`)} to break it down into subtasks`
),
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
)
);

View File

@@ -1,14 +1,14 @@
{
"tasks": [
{
"id": 1,
"dependencies": [],
"subtasks": [
{
"id": 1,
"dependencies": []
}
]
}
]
}
"tasks": [
{
"id": 1,
"dependencies": [],
"subtasks": [
{
"id": 1,
"dependencies": []
}
]
}
]
}

View File

@@ -3,7 +3,10 @@
*/
import { jest } from '@jest/globals';
import { sampleTasks, emptySampleTasks } from '../../tests/fixtures/sample-tasks.js';
import {
sampleTasks,
emptySampleTasks
} from '../../tests/fixtures/sample-tasks.js';
// Mock functions that need jest.fn methods
const mockParsePRD = jest.fn().mockResolvedValue(undefined);
@@ -655,34 +658,49 @@ describe('Commands Module', () => {
existsSync: jest.fn().mockReturnValue(true),
readFileSync: jest.fn().mockReturnValue(JSON.stringify(sampleTasks))
};
// Create a mock task manager with an addTask function that resolves to taskId 5
mockTaskManager = {
addTask: jest.fn().mockImplementation((file, prompt, dependencies, priority, session, research, generateFiles, manualTaskData) => {
// Return the next ID after the last one in sample tasks
const newId = sampleTasks.tasks.length + 1;
return Promise.resolve(newId.toString());
})
addTask: jest
.fn()
.mockImplementation(
(
file,
prompt,
dependencies,
priority,
session,
research,
generateFiles,
manualTaskData
) => {
// Return the next ID after the last one in sample tasks
const newId = sampleTasks.tasks.length + 1;
return Promise.resolve(newId.toString());
}
)
};
// Create a simplified version of the add-task action function for testing
addTaskAction = async (cmd, options) => {
options = options || {}; // Ensure options is not undefined
const isManualCreation = options.title && options.description;
// Get prompt directly or from p shorthand
const prompt = options.prompt || options.p;
// Validate that either prompt or title+description are provided
if (!prompt && !isManualCreation) {
throw new Error('Either --prompt or both --title and --description must be provided');
throw new Error(
'Either --prompt or both --title and --description must be provided'
);
}
// Prepare dependencies if provided
let dependencies = [];
if (options.dependencies) {
dependencies = options.dependencies.split(',').map(id => id.trim());
dependencies = options.dependencies.split(',').map((id) => id.trim());
}
// Create manual task data if title and description are provided
@@ -695,13 +713,13 @@ describe('Commands Module', () => {
testStrategy: options.testStrategy || ''
};
}
// Call addTask with the right parameters
return await mockTaskManager.addTask(
options.file || 'tasks/tasks.json',
prompt,
dependencies,
options.priority || 'medium',
options.priority || 'medium',
{ session: process.env },
options.research || options.r || false,
null,
@@ -713,19 +731,21 @@ describe('Commands Module', () => {
test('should throw error if no prompt or manual task data provided', async () => {
// Call without required params
const options = { file: 'tasks/tasks.json' };
await expect(async () => {
await addTaskAction(undefined, options);
}).rejects.toThrow('Either --prompt or both --title and --description must be provided');
}).rejects.toThrow(
'Either --prompt or both --title and --description must be provided'
);
});
test('should handle short-hand flag -p for prompt', async () => {
// Use -p as prompt short-hand
const options = {
const options = {
p: 'Create a login component',
file: 'tasks/tasks.json'
};
await addTaskAction(undefined, options);
// Check that task manager was called with correct arguments
@@ -737,17 +757,17 @@ describe('Commands Module', () => {
{ session: process.env },
false, // Research flag
null, // Generate files parameter
null // Manual task data
null // Manual task data
);
});
test('should handle short-hand flag -r for research', async () => {
const options = {
const options = {
prompt: 'Create authentication system',
r: true,
file: 'tasks/tasks.json'
};
await addTaskAction(undefined, options);
// Check that task manager was called with correct research flag
@@ -759,18 +779,18 @@ describe('Commands Module', () => {
{ session: process.env },
true, // Research flag should be true
null, // Generate files parameter
null // Manual task data
null // Manual task data
);
});
test('should handle manual task creation with title and description', async () => {
const options = {
const options = {
title: 'Login Component',
description: 'Create a reusable login form',
details: 'Implementation details here',
file: 'tasks/tasks.json'
};
await addTaskAction(undefined, options);
// Check that task manager was called with correct manual task data
@@ -782,7 +802,8 @@ describe('Commands Module', () => {
{ session: process.env },
false,
null, // Generate files parameter
{ // Manual task data
{
// Manual task data
title: 'Login Component',
description: 'Create a reusable login form',
details: 'Implementation details here',
@@ -792,12 +813,12 @@ describe('Commands Module', () => {
});
test('should handle dependencies parameter', async () => {
const options = {
const options = {
prompt: 'Create user settings page',
dependencies: '1, 3, 5', // Dependencies with spaces
file: 'tasks/tasks.json'
};
await addTaskAction(undefined, options);
// Check that dependencies are parsed correctly
@@ -809,17 +830,17 @@ describe('Commands Module', () => {
{ session: process.env },
false,
null, // Generate files parameter
null // Manual task data
null // Manual task data
);
});
test('should handle priority parameter', async () => {
const options = {
const options = {
prompt: 'Create navigation menu',
priority: 'high',
file: 'tasks/tasks.json'
};
await addTaskAction(undefined, options);
// Check that priority is passed correctly
@@ -831,16 +852,16 @@ describe('Commands Module', () => {
{ session: process.env },
false,
null, // Generate files parameter
null // Manual task data
null // Manual task data
);
});
test('should use default values for optional parameters', async () => {
const options = {
const options = {
prompt: 'Basic task',
file: 'tasks/tasks.json'
};
await addTaskAction(undefined, options);
// Check that default values are used
@@ -852,7 +873,7 @@ describe('Commands Module', () => {
{ session: process.env },
false, // Research is false by default
null, // Generate files parameter
null // Manual task data
null // Manual task data
);
});
});

View File

@@ -1,326 +1,345 @@
/**
* Tests for the add-task MCP tool
*
*
* Note: This test does NOT test the actual implementation. It tests that:
* 1. The tool is registered correctly with the correct parameters
* 2. Arguments are passed correctly to addTaskDirect
* 3. Error handling works as expected
*
*
* We do NOT import the real implementation - everything is mocked
*/
import { jest } from '@jest/globals';
import { sampleTasks, emptySampleTasks } from '../../../fixtures/sample-tasks.js';
import {
sampleTasks,
emptySampleTasks
} from '../../../fixtures/sample-tasks.js';
// Mock EVERYTHING
const mockAddTaskDirect = jest.fn();
jest.mock('../../../../mcp-server/src/core/task-master-core.js', () => ({
addTaskDirect: mockAddTaskDirect
addTaskDirect: mockAddTaskDirect
}));
const mockHandleApiResult = jest.fn(result => result);
const mockHandleApiResult = jest.fn((result) => result);
const mockGetProjectRootFromSession = jest.fn(() => '/mock/project/root');
const mockCreateErrorResponse = jest.fn(msg => ({
success: false,
error: { code: 'ERROR', message: msg }
const mockCreateErrorResponse = jest.fn((msg) => ({
success: false,
error: { code: 'ERROR', message: msg }
}));
jest.mock('../../../../mcp-server/src/tools/utils.js', () => ({
getProjectRootFromSession: mockGetProjectRootFromSession,
handleApiResult: mockHandleApiResult,
createErrorResponse: mockCreateErrorResponse,
createContentResponse: jest.fn(content => ({ success: true, data: content })),
executeTaskMasterCommand: jest.fn()
getProjectRootFromSession: mockGetProjectRootFromSession,
handleApiResult: mockHandleApiResult,
createErrorResponse: mockCreateErrorResponse,
createContentResponse: jest.fn((content) => ({
success: true,
data: content
})),
executeTaskMasterCommand: jest.fn()
}));
// Mock the z object from zod
const mockZod = {
object: jest.fn(() => mockZod),
string: jest.fn(() => mockZod),
boolean: jest.fn(() => mockZod),
optional: jest.fn(() => mockZod),
describe: jest.fn(() => mockZod),
_def: { shape: () => ({
prompt: {},
dependencies: {},
priority: {},
research: {},
file: {},
projectRoot: {}
})}
object: jest.fn(() => mockZod),
string: jest.fn(() => mockZod),
boolean: jest.fn(() => mockZod),
optional: jest.fn(() => mockZod),
describe: jest.fn(() => mockZod),
_def: {
shape: () => ({
prompt: {},
dependencies: {},
priority: {},
research: {},
file: {},
projectRoot: {}
})
}
};
jest.mock('zod', () => ({
z: mockZod
z: mockZod
}));
// DO NOT import the real module - create a fake implementation
// This is the fake implementation of registerAddTaskTool
const registerAddTaskTool = (server) => {
// Create simplified version of the tool config
const toolConfig = {
name: 'add_task',
description: 'Add a new task using AI',
parameters: mockZod,
// Create a simplified mock of the execute function
execute: (args, context) => {
const { log, reportProgress, session } = context;
try {
log.info && log.info(`Starting add-task with args: ${JSON.stringify(args)}`);
// Get project root
const rootFolder = mockGetProjectRootFromSession(session, log);
// Call addTaskDirect
const result = mockAddTaskDirect({
...args,
projectRoot: rootFolder
}, log, { reportProgress, session });
// Handle result
return mockHandleApiResult(result, log);
} catch (error) {
log.error && log.error(`Error in add-task tool: ${error.message}`);
return mockCreateErrorResponse(error.message);
}
}
};
// Register the tool with the server
server.addTool(toolConfig);
// Create simplified version of the tool config
const toolConfig = {
name: 'add_task',
description: 'Add a new task using AI',
parameters: mockZod,
// Create a simplified mock of the execute function
execute: (args, context) => {
const { log, reportProgress, session } = context;
try {
log.info &&
log.info(`Starting add-task with args: ${JSON.stringify(args)}`);
// Get project root
const rootFolder = mockGetProjectRootFromSession(session, log);
// Call addTaskDirect
const result = mockAddTaskDirect(
{
...args,
projectRoot: rootFolder
},
log,
{ reportProgress, session }
);
// Handle result
return mockHandleApiResult(result, log);
} catch (error) {
log.error && log.error(`Error in add-task tool: ${error.message}`);
return mockCreateErrorResponse(error.message);
}
}
};
// Register the tool with the server
server.addTool(toolConfig);
};
describe('MCP Tool: add-task', () => {
// Create mock server
let mockServer;
let executeFunction;
// Create mock logger
const mockLogger = {
debug: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn()
};
// Test data
const validArgs = {
prompt: 'Create a new task',
dependencies: '1,2',
priority: 'high',
research: true
};
// Standard responses
const successResponse = {
success: true,
data: {
taskId: '5',
message: 'Successfully added new task #5'
}
};
const errorResponse = {
success: false,
error: {
code: 'ADD_TASK_ERROR',
message: 'Failed to add task'
}
};
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Create mock server
mockServer = {
addTool: jest.fn(config => {
executeFunction = config.execute;
})
};
// Setup default successful response
mockAddTaskDirect.mockReturnValue(successResponse);
// Register the tool
registerAddTaskTool(mockServer);
});
test('should register the tool correctly', () => {
// Verify tool was registered
expect(mockServer.addTool).toHaveBeenCalledWith(
expect.objectContaining({
name: 'add_task',
description: 'Add a new task using AI',
parameters: expect.any(Object),
execute: expect.any(Function)
})
);
// Verify the tool config was passed
const toolConfig = mockServer.addTool.mock.calls[0][0];
expect(toolConfig).toHaveProperty('parameters');
expect(toolConfig).toHaveProperty('execute');
});
test('should execute the tool with valid parameters', () => {
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
executeFunction(validArgs, mockContext);
// Verify getProjectRootFromSession was called
expect(mockGetProjectRootFromSession).toHaveBeenCalledWith(
mockContext.session,
mockLogger
);
// Verify addTaskDirect was called with correct arguments
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
...validArgs,
projectRoot: '/mock/project/root'
}),
mockLogger,
{
reportProgress: mockContext.reportProgress,
session: mockContext.session
}
);
// Verify handleApiResult was called
expect(mockHandleApiResult).toHaveBeenCalledWith(
successResponse,
mockLogger
);
});
test('should handle errors from addTaskDirect', () => {
// Setup error response
mockAddTaskDirect.mockReturnValueOnce(errorResponse);
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
executeFunction(validArgs, mockContext);
// Verify addTaskDirect was called
expect(mockAddTaskDirect).toHaveBeenCalled();
// Verify handleApiResult was called with error response
expect(mockHandleApiResult).toHaveBeenCalledWith(
errorResponse,
mockLogger
);
});
test('should handle unexpected errors', () => {
// Setup error
const testError = new Error('Unexpected error');
mockAddTaskDirect.mockImplementationOnce(() => {
throw testError;
});
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
executeFunction(validArgs, mockContext);
// Verify error was logged
expect(mockLogger.error).toHaveBeenCalledWith(
'Error in add-task tool: Unexpected error'
);
// Verify error response was created
expect(mockCreateErrorResponse).toHaveBeenCalledWith('Unexpected error');
});
test('should pass research parameter correctly', () => {
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Test with research=true
executeFunction({
...validArgs,
research: true
}, mockContext);
// Verify addTaskDirect was called with research=true
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
research: true
}),
expect.any(Object),
expect.any(Object)
);
// Reset mocks
jest.clearAllMocks();
// Test with research=false
executeFunction({
...validArgs,
research: false
}, mockContext);
// Verify addTaskDirect was called with research=false
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
research: false
}),
expect.any(Object),
expect.any(Object)
);
});
test('should pass priority parameter correctly', () => {
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Test different priority values
['high', 'medium', 'low'].forEach(priority => {
// Reset mocks
jest.clearAllMocks();
// Execute with specific priority
executeFunction({
...validArgs,
priority
}, mockContext);
// Verify addTaskDirect was called with correct priority
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
priority
}),
expect.any(Object),
expect.any(Object)
);
});
});
});
// Create mock server
let mockServer;
let executeFunction;
// Create mock logger
const mockLogger = {
debug: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn()
};
// Test data
const validArgs = {
prompt: 'Create a new task',
dependencies: '1,2',
priority: 'high',
research: true
};
// Standard responses
const successResponse = {
success: true,
data: {
taskId: '5',
message: 'Successfully added new task #5'
}
};
const errorResponse = {
success: false,
error: {
code: 'ADD_TASK_ERROR',
message: 'Failed to add task'
}
};
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Create mock server
mockServer = {
addTool: jest.fn((config) => {
executeFunction = config.execute;
})
};
// Setup default successful response
mockAddTaskDirect.mockReturnValue(successResponse);
// Register the tool
registerAddTaskTool(mockServer);
});
test('should register the tool correctly', () => {
// Verify tool was registered
expect(mockServer.addTool).toHaveBeenCalledWith(
expect.objectContaining({
name: 'add_task',
description: 'Add a new task using AI',
parameters: expect.any(Object),
execute: expect.any(Function)
})
);
// Verify the tool config was passed
const toolConfig = mockServer.addTool.mock.calls[0][0];
expect(toolConfig).toHaveProperty('parameters');
expect(toolConfig).toHaveProperty('execute');
});
test('should execute the tool with valid parameters', () => {
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
executeFunction(validArgs, mockContext);
// Verify getProjectRootFromSession was called
expect(mockGetProjectRootFromSession).toHaveBeenCalledWith(
mockContext.session,
mockLogger
);
// Verify addTaskDirect was called with correct arguments
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
...validArgs,
projectRoot: '/mock/project/root'
}),
mockLogger,
{
reportProgress: mockContext.reportProgress,
session: mockContext.session
}
);
// Verify handleApiResult was called
expect(mockHandleApiResult).toHaveBeenCalledWith(
successResponse,
mockLogger
);
});
test('should handle errors from addTaskDirect', () => {
// Setup error response
mockAddTaskDirect.mockReturnValueOnce(errorResponse);
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
executeFunction(validArgs, mockContext);
// Verify addTaskDirect was called
expect(mockAddTaskDirect).toHaveBeenCalled();
// Verify handleApiResult was called with error response
expect(mockHandleApiResult).toHaveBeenCalledWith(errorResponse, mockLogger);
});
test('should handle unexpected errors', () => {
// Setup error
const testError = new Error('Unexpected error');
mockAddTaskDirect.mockImplementationOnce(() => {
throw testError;
});
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Execute the function
executeFunction(validArgs, mockContext);
// Verify error was logged
expect(mockLogger.error).toHaveBeenCalledWith(
'Error in add-task tool: Unexpected error'
);
// Verify error response was created
expect(mockCreateErrorResponse).toHaveBeenCalledWith('Unexpected error');
});
test('should pass research parameter correctly', () => {
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Test with research=true
executeFunction(
{
...validArgs,
research: true
},
mockContext
);
// Verify addTaskDirect was called with research=true
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
research: true
}),
expect.any(Object),
expect.any(Object)
);
// Reset mocks
jest.clearAllMocks();
// Test with research=false
executeFunction(
{
...validArgs,
research: false
},
mockContext
);
// Verify addTaskDirect was called with research=false
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
research: false
}),
expect.any(Object),
expect.any(Object)
);
});
test('should pass priority parameter correctly', () => {
// Setup context
const mockContext = {
log: mockLogger,
reportProgress: jest.fn(),
session: { workingDirectory: '/mock/dir' }
};
// Test different priority values
['high', 'medium', 'low'].forEach((priority) => {
// Reset mocks
jest.clearAllMocks();
// Execute with specific priority
executeFunction(
{
...validArgs,
priority
},
mockContext
);
// Verify addTaskDirect was called with correct priority
expect(mockAddTaskDirect).toHaveBeenCalledWith(
expect.objectContaining({
priority
}),
expect.any(Object),
expect.any(Object)
);
});
});
});