wip: e2e tests improvements
This commit is contained in:
284
tests/e2e/tests/mcp/expand-task-cli.test.js
Normal file
284
tests/e2e/tests/mcp/expand-task-cli.test.js
Normal file
@@ -0,0 +1,284 @@
|
||||
const { exec } = require('child_process');
|
||||
const { promisify } = require('util');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
// Helper function to run MCP inspector CLI commands
|
||||
async function runMCPCommand(method, args = {}) {
|
||||
const serverPath = path.join(__dirname, '../../../../mcp-server/server.js');
|
||||
let command = `npx @modelcontextprotocol/inspector --cli node ${serverPath} --method ${method}`;
|
||||
|
||||
// Add tool-specific arguments
|
||||
if (args.toolName) {
|
||||
command += ` --tool-name ${args.toolName}`;
|
||||
}
|
||||
|
||||
// Add tool arguments
|
||||
if (args.toolArgs) {
|
||||
for (const [key, value] of Object.entries(args.toolArgs)) {
|
||||
command += ` --tool-arg ${key}=${value}`;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const { stdout, stderr } = await execAsync(command, {
|
||||
timeout: 60000, // 60 second timeout for AI operations
|
||||
env: { ...process.env, NODE_ENV: 'test' }
|
||||
});
|
||||
|
||||
if (stderr && !stderr.includes('DeprecationWarning')) {
|
||||
console.error('MCP Command stderr:', stderr);
|
||||
}
|
||||
|
||||
return { stdout, stderr };
|
||||
} catch (error) {
|
||||
console.error('MCP Command failed:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
describe('MCP Inspector CLI - expand_task Tool Tests', () => {
|
||||
const testProjectPath = path.join(__dirname, '../../../../test-fixtures/mcp-expand-test-project');
|
||||
const tasksDir = path.join(testProjectPath, '.taskmaster/tasks');
|
||||
const tasksFile = path.join(tasksDir, 'tasks.json');
|
||||
|
||||
beforeAll(async () => {
|
||||
// Create test project directory structure
|
||||
await fs.mkdir(tasksDir, { recursive: true });
|
||||
|
||||
// Create sample tasks data
|
||||
const sampleTasks = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
description: 'Implement user authentication system',
|
||||
status: 'pending',
|
||||
tags: ['master'],
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
description: 'Create API endpoints',
|
||||
status: 'pending',
|
||||
tags: ['master'],
|
||||
subtasks: [
|
||||
{
|
||||
id: '2.1',
|
||||
description: 'Setup Express server',
|
||||
status: 'pending'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
description: 'Design database schema',
|
||||
status: 'completed',
|
||||
tags: ['master']
|
||||
}
|
||||
],
|
||||
tags: {
|
||||
master: {
|
||||
name: 'master',
|
||||
description: 'Main development branch'
|
||||
}
|
||||
},
|
||||
activeTag: 'master',
|
||||
metadata: {
|
||||
nextId: 4,
|
||||
version: '1.0.0'
|
||||
}
|
||||
};
|
||||
|
||||
await fs.writeFile(tasksFile, JSON.stringify(sampleTasks, null, 2));
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
// Clean up test project
|
||||
await fs.rm(testProjectPath, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should list available tools including expand_task', async () => {
|
||||
const { stdout } = await runMCPCommand('tools/list');
|
||||
const response = JSON.parse(stdout);
|
||||
|
||||
expect(response).toHaveProperty('tools');
|
||||
expect(Array.isArray(response.tools)).toBe(true);
|
||||
|
||||
const expandTaskTool = response.tools.find(tool => tool.name === 'expand_task');
|
||||
expect(expandTaskTool).toBeDefined();
|
||||
expect(expandTaskTool.description).toContain('Expand a task into subtasks');
|
||||
});
|
||||
|
||||
it('should expand a task without existing subtasks', async () => {
|
||||
// Skip if no API key is set
|
||||
if (!process.env.ANTHROPIC_API_KEY && !process.env.OPENAI_API_KEY) {
|
||||
console.log('Skipping test: No AI API key found in environment');
|
||||
return;
|
||||
}
|
||||
|
||||
const { stdout } = await runMCPCommand('tools/call', {
|
||||
toolName: 'expand_task',
|
||||
toolArgs: {
|
||||
id: '1',
|
||||
projectRoot: testProjectPath,
|
||||
num: '3',
|
||||
prompt: 'Focus on security and authentication best practices'
|
||||
}
|
||||
});
|
||||
|
||||
const response = JSON.parse(stdout);
|
||||
expect(response).toHaveProperty('content');
|
||||
expect(Array.isArray(response.content)).toBe(true);
|
||||
|
||||
// Parse the text content to get result
|
||||
const textContent = response.content.find(c => c.type === 'text');
|
||||
expect(textContent).toBeDefined();
|
||||
|
||||
const result = JSON.parse(textContent.text);
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(1);
|
||||
expect(result.subtasksAdded).toBeGreaterThan(0);
|
||||
|
||||
// Verify the task was actually updated
|
||||
const updatedTasks = JSON.parse(await fs.readFile(tasksFile, 'utf8'));
|
||||
const expandedTask = updatedTasks.tasks.find(t => t.id === 1);
|
||||
expect(expandedTask.subtasks.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle expansion with force flag for task with existing subtasks', async () => {
|
||||
// Skip if no API key is set
|
||||
if (!process.env.ANTHROPIC_API_KEY && !process.env.OPENAI_API_KEY) {
|
||||
console.log('Skipping test: No AI API key found in environment');
|
||||
return;
|
||||
}
|
||||
|
||||
const { stdout } = await runMCPCommand('tools/call', {
|
||||
toolName: 'expand_task',
|
||||
toolArgs: {
|
||||
id: '2',
|
||||
projectRoot: testProjectPath,
|
||||
force: 'true',
|
||||
num: '2'
|
||||
}
|
||||
});
|
||||
|
||||
const response = JSON.parse(stdout);
|
||||
const textContent = response.content.find(c => c.type === 'text');
|
||||
const result = JSON.parse(textContent.text);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(2);
|
||||
expect(result.subtasksAdded).toBe(2);
|
||||
});
|
||||
|
||||
it('should reject expansion of completed task', async () => {
|
||||
const { stdout } = await runMCPCommand('tools/call', {
|
||||
toolName: 'expand_task',
|
||||
toolArgs: {
|
||||
id: '3',
|
||||
projectRoot: testProjectPath
|
||||
}
|
||||
});
|
||||
|
||||
const response = JSON.parse(stdout);
|
||||
expect(response).toHaveProperty('content');
|
||||
|
||||
const textContent = response.content.find(c => c.type === 'text');
|
||||
expect(textContent.text).toContain('Error');
|
||||
expect(textContent.text).toContain('completed');
|
||||
});
|
||||
|
||||
it('should handle invalid task ID', async () => {
|
||||
const { stdout } = await runMCPCommand('tools/call', {
|
||||
toolName: 'expand_task',
|
||||
toolArgs: {
|
||||
id: '999',
|
||||
projectRoot: testProjectPath
|
||||
}
|
||||
});
|
||||
|
||||
const response = JSON.parse(stdout);
|
||||
const textContent = response.content.find(c => c.type === 'text');
|
||||
expect(textContent.text).toContain('Error');
|
||||
expect(textContent.text).toContain('not found');
|
||||
});
|
||||
|
||||
it('should handle missing required parameters', async () => {
|
||||
try {
|
||||
await runMCPCommand('tools/call', {
|
||||
toolName: 'expand_task',
|
||||
toolArgs: {
|
||||
// Missing id and projectRoot
|
||||
num: '3'
|
||||
}
|
||||
});
|
||||
fail('Should have thrown an error');
|
||||
} catch (error) {
|
||||
expect(error.message).toContain('validation');
|
||||
}
|
||||
});
|
||||
|
||||
it('should work with custom tasks file path', async () => {
|
||||
// Skip if no API key is set
|
||||
if (!process.env.ANTHROPIC_API_KEY && !process.env.OPENAI_API_KEY) {
|
||||
console.log('Skipping test: No AI API key found in environment');
|
||||
return;
|
||||
}
|
||||
|
||||
// Create custom tasks file
|
||||
const customDir = path.join(testProjectPath, 'custom');
|
||||
await fs.mkdir(customDir, { recursive: true });
|
||||
const customTasksPath = path.join(customDir, 'my-tasks.json');
|
||||
await fs.copyFile(tasksFile, customTasksPath);
|
||||
|
||||
const { stdout } = await runMCPCommand('tools/call', {
|
||||
toolName: 'expand_task',
|
||||
toolArgs: {
|
||||
id: '1',
|
||||
projectRoot: testProjectPath,
|
||||
file: 'custom/my-tasks.json',
|
||||
num: '2'
|
||||
}
|
||||
});
|
||||
|
||||
const response = JSON.parse(stdout);
|
||||
const textContent = response.content.find(c => c.type === 'text');
|
||||
const result = JSON.parse(textContent.text);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.subtasksAdded).toBe(2);
|
||||
|
||||
// Verify the custom file was updated
|
||||
const updatedData = JSON.parse(await fs.readFile(customTasksPath, 'utf8'));
|
||||
const task = updatedData.tasks.find(t => t.id === 1);
|
||||
expect(task.subtasks.length).toBe(2);
|
||||
});
|
||||
|
||||
it('should handle expansion with research flag', async () => {
|
||||
// Skip if no API key is set
|
||||
if (!process.env.ANTHROPIC_API_KEY && !process.env.OPENAI_API_KEY && !process.env.PERPLEXITY_API_KEY) {
|
||||
console.log('Skipping test: No AI API key found in environment');
|
||||
return;
|
||||
}
|
||||
|
||||
const { stdout } = await runMCPCommand('tools/call', {
|
||||
toolName: 'expand_task',
|
||||
toolArgs: {
|
||||
id: '1',
|
||||
projectRoot: testProjectPath,
|
||||
research: 'true',
|
||||
num: '2'
|
||||
}
|
||||
});
|
||||
|
||||
const response = JSON.parse(stdout);
|
||||
const textContent = response.content.find(c => c.type === 'text');
|
||||
|
||||
// Even if research fails, expansion should still work
|
||||
const result = JSON.parse(textContent.text);
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.subtasksAdded).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
});
|
||||
@@ -1,146 +0,0 @@
|
||||
import { mcpTest } from 'mcp-jest';
|
||||
import { join, dirname } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import fs from 'fs';
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const projectRoot = join(__dirname, '../../../..');
|
||||
|
||||
// Create test tasks file for testing
|
||||
const testTasksPath = join(projectRoot, '.taskmaster/test-mcp-tasks.json');
|
||||
const testTasks = {
|
||||
tasks: [
|
||||
{
|
||||
id: 'mcp-test-001',
|
||||
description: 'MCP Test task 1',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
estimatedMinutes: 30,
|
||||
actualMinutes: 0,
|
||||
dependencies: [],
|
||||
tags: ['test'],
|
||||
subtasks: [
|
||||
{
|
||||
id: 'mcp-test-001-1',
|
||||
description: 'MCP Test subtask 1.1',
|
||||
status: 'pending',
|
||||
priority: 'medium',
|
||||
estimatedMinutes: 15,
|
||||
actualMinutes: 0
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 'mcp-test-002',
|
||||
description: 'MCP Test task 2',
|
||||
status: 'done',
|
||||
priority: 'medium',
|
||||
estimatedMinutes: 60,
|
||||
actualMinutes: 60,
|
||||
dependencies: ['mcp-test-001'],
|
||||
tags: ['test', 'demo'],
|
||||
subtasks: []
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Setup test data
|
||||
fs.mkdirSync(join(projectRoot, '.taskmaster'), { recursive: true });
|
||||
fs.writeFileSync(testTasksPath, JSON.stringify(testTasks, null, 2));
|
||||
|
||||
// Run MCP Jest tests
|
||||
async function runTests() {
|
||||
try {
|
||||
const results = await mcpTest(
|
||||
{
|
||||
command: 'node',
|
||||
args: [join(projectRoot, 'mcp-server/server.js')],
|
||||
env: process.env
|
||||
},
|
||||
{
|
||||
tools: {
|
||||
initialize_project: {
|
||||
args: { projectRoot: projectRoot },
|
||||
expect: (result) =>
|
||||
result.content[0].text.includes(
|
||||
'Project initialized successfully'
|
||||
)
|
||||
},
|
||||
get_tasks: [
|
||||
{
|
||||
name: 'get all tasks with subtasks',
|
||||
args: {
|
||||
projectRoot: projectRoot,
|
||||
file: '.taskmaster/test-mcp-tasks.json',
|
||||
withSubtasks: true
|
||||
},
|
||||
expect: (result) => {
|
||||
const text = result.content[0].text;
|
||||
return (
|
||||
!result.isError &&
|
||||
text.includes('2 tasks found') &&
|
||||
text.includes('MCP Test task 1') &&
|
||||
text.includes('MCP Test task 2') &&
|
||||
text.includes('MCP Test subtask 1.1')
|
||||
);
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'filter by done status',
|
||||
args: {
|
||||
projectRoot: projectRoot,
|
||||
file: '.taskmaster/test-mcp-tasks.json',
|
||||
status: 'done'
|
||||
},
|
||||
expect: (result) => {
|
||||
const text = result.content[0].text;
|
||||
return (
|
||||
!result.isError &&
|
||||
text.includes('1 task found') &&
|
||||
text.includes('MCP Test task 2') &&
|
||||
!text.includes('MCP Test task 1')
|
||||
);
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'handle non-existent file',
|
||||
args: {
|
||||
projectRoot: projectRoot,
|
||||
file: '.taskmaster/non-existent.json'
|
||||
},
|
||||
expect: (result) =>
|
||||
result.isError && result.content[0].text.includes('Error')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
console.log('\nTest Results:');
|
||||
console.log('=============');
|
||||
console.log(`✅ Passed: ${results.passed}/${results.total}`);
|
||||
|
||||
if (results.failed > 0) {
|
||||
console.error(`❌ Failed: ${results.failed}`);
|
||||
console.error('\nDetailed Results:');
|
||||
console.log(JSON.stringify(results, null, 2));
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
if (fs.existsSync(testTasksPath)) {
|
||||
fs.unlinkSync(testTasksPath);
|
||||
}
|
||||
|
||||
// Exit with appropriate code
|
||||
process.exit(results.failed > 0 ? 1 : 0);
|
||||
} catch (error) {
|
||||
console.error('Test execution failed:', error);
|
||||
// Cleanup on error
|
||||
if (fs.existsSync(testTasksPath)) {
|
||||
fs.unlinkSync(testTasksPath);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
runTests();
|
||||
@@ -1,254 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
||||
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
|
||||
import { join, dirname } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import fs from 'fs';
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const projectRoot = join(__dirname, '../../../..');
|
||||
|
||||
// Create test tasks file for testing
|
||||
const testTasksPath = join(projectRoot, '.taskmaster/test-tasks.json');
|
||||
const testTasks = {
|
||||
tasks: [
|
||||
{
|
||||
id: 'test-001',
|
||||
description: 'Test task 1',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
estimatedMinutes: 30,
|
||||
actualMinutes: 0,
|
||||
dependencies: [],
|
||||
tags: ['test'],
|
||||
subtasks: [
|
||||
{
|
||||
id: 'test-001-1',
|
||||
description: 'Test subtask 1.1',
|
||||
status: 'pending',
|
||||
priority: 'medium',
|
||||
estimatedMinutes: 15,
|
||||
actualMinutes: 0
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 'test-002',
|
||||
description: 'Test task 2',
|
||||
status: 'done',
|
||||
priority: 'medium',
|
||||
estimatedMinutes: 60,
|
||||
actualMinutes: 60,
|
||||
dependencies: ['test-001'],
|
||||
tags: ['test', 'demo'],
|
||||
subtasks: []
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
async function runTests() {
|
||||
console.log('Starting MCP server tests...\n');
|
||||
|
||||
// Setup test data
|
||||
fs.mkdirSync(join(projectRoot, '.taskmaster'), { recursive: true });
|
||||
fs.writeFileSync(testTasksPath, JSON.stringify(testTasks, null, 2));
|
||||
|
||||
// Create transport by spawning the server
|
||||
const transport = new StdioClientTransport({
|
||||
command: 'node',
|
||||
args: ['mcp-server/server.js'],
|
||||
env: process.env,
|
||||
cwd: projectRoot
|
||||
});
|
||||
|
||||
// Create client
|
||||
const client = new Client(
|
||||
{
|
||||
name: 'test-client',
|
||||
version: '1.0.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
sampling: {}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
let testResults = {
|
||||
total: 0,
|
||||
passed: 0,
|
||||
failed: 0,
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
testResults.total++;
|
||||
try {
|
||||
await testFn();
|
||||
testResults.passed++;
|
||||
testResults.tests.push({ name, status: 'passed' });
|
||||
console.log(`✅ ${name}`);
|
||||
} catch (error) {
|
||||
testResults.failed++;
|
||||
testResults.tests.push({ name, status: 'failed', error: error.message });
|
||||
console.error(`❌ ${name}`);
|
||||
console.error(` Error: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// Connect to server
|
||||
await client.connect(transport);
|
||||
console.log('Connected to MCP server\n');
|
||||
|
||||
// Test 1: List available tools
|
||||
await runTest('List available tools', async () => {
|
||||
const tools = await client.listTools();
|
||||
if (!tools.tools || tools.tools.length === 0) {
|
||||
throw new Error('No tools found');
|
||||
}
|
||||
const toolNames = tools.tools.map((t) => t.name);
|
||||
if (!toolNames.includes('get_tasks')) {
|
||||
throw new Error('get_tasks tool not found');
|
||||
}
|
||||
console.log(` Found ${tools.tools.length} tools`);
|
||||
});
|
||||
|
||||
// Test 2: Initialize project
|
||||
await runTest('Initialize project', async () => {
|
||||
const result = await client.callTool({
|
||||
name: 'initialize_project',
|
||||
arguments: {
|
||||
projectRoot: projectRoot
|
||||
}
|
||||
});
|
||||
if (
|
||||
!result.content[0].text.includes('Project initialized successfully')
|
||||
) {
|
||||
throw new Error('Project initialization failed');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Get all tasks
|
||||
await runTest('Get all tasks with subtasks', async () => {
|
||||
const result = await client.callTool({
|
||||
name: 'get_tasks',
|
||||
arguments: {
|
||||
projectRoot: projectRoot,
|
||||
file: '.taskmaster/test-tasks.json',
|
||||
withSubtasks: true
|
||||
}
|
||||
});
|
||||
|
||||
if (result.isError) {
|
||||
throw new Error(`Tool returned error: ${result.content[0].text}`);
|
||||
}
|
||||
|
||||
const text = result.content[0].text;
|
||||
const data = JSON.parse(text);
|
||||
|
||||
if (!data.data || !data.data.tasks) {
|
||||
throw new Error('Invalid response format');
|
||||
}
|
||||
|
||||
if (data.data.tasks.length !== 2) {
|
||||
throw new Error(`Expected 2 tasks, got ${data.data.tasks.length}`);
|
||||
}
|
||||
|
||||
const taskDescriptions = data.data.tasks.map((t) => t.description);
|
||||
if (
|
||||
!taskDescriptions.includes('Test task 1') ||
|
||||
!taskDescriptions.includes('Test task 2')
|
||||
) {
|
||||
throw new Error('Expected tasks not found');
|
||||
}
|
||||
|
||||
// Check for subtask
|
||||
const task1 = data.data.tasks.find((t) => t.id === 'test-001');
|
||||
if (!task1.subtasks || task1.subtasks.length === 0) {
|
||||
throw new Error('Subtasks not found');
|
||||
}
|
||||
if (task1.subtasks[0].description !== 'Test subtask 1.1') {
|
||||
throw new Error('Expected subtask not found');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Filter by status
|
||||
await runTest('Filter tasks by done status', async () => {
|
||||
const result = await client.callTool({
|
||||
name: 'get_tasks',
|
||||
arguments: {
|
||||
projectRoot: projectRoot,
|
||||
file: '.taskmaster/test-tasks.json',
|
||||
status: 'done'
|
||||
}
|
||||
});
|
||||
|
||||
if (result.isError) {
|
||||
throw new Error(`Tool returned error: ${result.content[0].text}`);
|
||||
}
|
||||
|
||||
const text = result.content[0].text;
|
||||
const data = JSON.parse(text);
|
||||
|
||||
if (!data.data || !data.data.tasks) {
|
||||
throw new Error('Invalid response format');
|
||||
}
|
||||
|
||||
if (data.data.tasks.length !== 1) {
|
||||
throw new Error(
|
||||
`Expected 1 task with done status, got ${data.data.tasks.length}`
|
||||
);
|
||||
}
|
||||
|
||||
const task = data.data.tasks[0];
|
||||
if (task.description !== 'Test task 2') {
|
||||
throw new Error(`Expected 'Test task 2', got '${task.description}'`);
|
||||
}
|
||||
if (task.status !== 'done') {
|
||||
throw new Error(`Expected status 'done', got '${task.status}'`);
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Handle non-existent file
|
||||
await runTest('Handle non-existent file gracefully', async () => {
|
||||
const result = await client.callTool({
|
||||
name: 'get_tasks',
|
||||
arguments: {
|
||||
projectRoot: projectRoot,
|
||||
file: '.taskmaster/non-existent.json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!result.isError) {
|
||||
throw new Error('Expected error for non-existent file');
|
||||
}
|
||||
if (!result.content[0].text.includes('Error')) {
|
||||
throw new Error('Expected error message');
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('\nConnection error:', error.message);
|
||||
testResults.failed = testResults.total;
|
||||
} finally {
|
||||
// Clean up
|
||||
await client.close();
|
||||
if (fs.existsSync(testTasksPath)) {
|
||||
fs.unlinkSync(testTasksPath);
|
||||
}
|
||||
|
||||
// Print summary
|
||||
console.log('\n' + '='.repeat(50));
|
||||
console.log('Test Summary:');
|
||||
console.log(`Total: ${testResults.total}`);
|
||||
console.log(`Passed: ${testResults.passed}`);
|
||||
console.log(`Failed: ${testResults.failed}`);
|
||||
console.log('='.repeat(50));
|
||||
|
||||
// Exit with appropriate code
|
||||
process.exit(testResults.failed > 0 ? 1 : 0);
|
||||
}
|
||||
}
|
||||
|
||||
runTests().catch(console.error);
|
||||
Reference in New Issue
Block a user