mirror of
https://github.com/eyaltoledano/claude-task-master.git
synced 2026-01-29 22:02:04 +00:00
Merge pull request #1577 from eyaltoledano/next
This commit is contained in:
9
.changeset/adopt-modifyjson-pattern.md
Normal file
9
.changeset/adopt-modifyjson-pattern.md
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Improve concurrency safety by adopting modifyJson pattern in file-storage
|
||||
|
||||
- Refactor saveTasks, createTag, deleteTag, renameTag to use modifyJson for atomic read-modify-write operations
|
||||
- This prevents lost updates when multiple processes concurrently modify tasks.json
|
||||
- Complements the cross-process file locking added in PR #1566
|
||||
13
.changeset/fix-file-locking.md
Normal file
13
.changeset/fix-file-locking.md
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix race condition when multiple Claude Code windows write to tasks.json simultaneously
|
||||
|
||||
- Add cross-process file locking to prevent concurrent write collisions
|
||||
- Implement atomic writes using temp file + rename pattern to prevent partial writes
|
||||
- Re-read file inside lock to get current state, preventing lost updates from stale snapshots
|
||||
- Add stale lock detection and automatic cleanup (10-second timeout)
|
||||
- Export `withFileLock` and `withFileLockSync` utilities for use by other modules
|
||||
|
||||
This fix prevents data loss that could occur when multiple Task Master instances (e.g., multiple Claude Code windows) access the same tasks.json file concurrently.
|
||||
16
.changeset/list-blocks-ready-filter.md
Normal file
16
.changeset/list-blocks-ready-filter.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add --ready and --blocking filters to list command for identifying parallelizable tasks
|
||||
|
||||
- Add `--ready` filter to show only tasks with satisfied dependencies (ready to work on)
|
||||
- Add `--blocking` filter to show only tasks that block other tasks
|
||||
- Combine `--ready --blocking` to find high-impact tasks (ready AND blocking others)
|
||||
- Add "Blocks" column to task table showing which tasks depend on each task
|
||||
- Blocks field included in JSON output for programmatic access
|
||||
- Add "Ready" column to `tags` command showing count of ready tasks per tag
|
||||
- Add `--ready` filter to `tags` command to show only tags with available work
|
||||
- Excludes deferred/blocked tasks from ready count (only actionable statuses)
|
||||
- Add `--all-tags` option to list ready tasks across all tags (use with `--ready`)
|
||||
- Tag column shown as first column when using `--all-tags` for easy scanning
|
||||
10
.changeset/loop-error-handling.md
Normal file
10
.changeset/loop-error-handling.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
"task-master-ai": "patch"
|
||||
---
|
||||
|
||||
Improve loop command error handling and use dangerously-skip-permissions
|
||||
|
||||
- Add proper spawn error handling (ENOENT, EACCES) with actionable messages
|
||||
- Return error info from checkSandboxAuth and runInteractiveAuth instead of silent failures
|
||||
- Use --dangerously-skip-permissions for unattended loop execution
|
||||
- Fix null exit code masking issue
|
||||
9
.changeset/loop-sandbox-optional.md
Normal file
9
.changeset/loop-sandbox-optional.md
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Make Docker sandbox mode opt-in for loop command
|
||||
|
||||
- Add `--sandbox` flag to `task-master loop` (default: use plain `claude -p`)
|
||||
- Preserve progress.txt between runs (append instead of overwrite)
|
||||
- Display execution mode in loop startup output
|
||||
10
.changeset/update-codex-cli-models.md
Normal file
10
.changeset/update-codex-cli-models.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Update Codex CLI supported models to match current available models
|
||||
|
||||
- Remove deprecated models: gpt-5, gpt-5-codex, gpt-5.1
|
||||
- Add gpt-5.2-codex as the current default model
|
||||
- Add gpt-5.1-codex-mini for faster, cheaper option
|
||||
- Keep gpt-5.1-codex-max and gpt-5.2
|
||||
@@ -16,8 +16,8 @@
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:unit": "vitest run -t unit",
|
||||
"test:integration": "vitest run -t integration",
|
||||
"test:unit": "vitest run '**/*.spec.ts'",
|
||||
"test:integration": "vitest run '**/*.test.ts'",
|
||||
"test:e2e": "vitest run --dir tests/e2e",
|
||||
"test:ci": "vitest run --coverage --reporter=dot"
|
||||
},
|
||||
|
||||
959
apps/cli/src/commands/list.command.spec.ts
Normal file
959
apps/cli/src/commands/list.command.spec.ts
Normal file
@@ -0,0 +1,959 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for ListTasksCommand
|
||||
*/
|
||||
|
||||
import type { TmCore } from '@tm/core';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
// Mock dependencies - use importOriginal to preserve real implementations
|
||||
// Only mock createTmCore since we inject a mock tmCore directly in tests
|
||||
vi.mock('@tm/core', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('@tm/core')>();
|
||||
return {
|
||||
...actual,
|
||||
createTmCore: vi.fn()
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('../utils/project-root.js', () => ({
|
||||
getProjectRoot: vi.fn((path?: string) => path || '/test/project')
|
||||
}));
|
||||
|
||||
vi.mock('../utils/error-handler.js', () => ({
|
||||
displayError: vi.fn()
|
||||
}));
|
||||
|
||||
vi.mock('../utils/display-helpers.js', () => ({
|
||||
displayCommandHeader: vi.fn()
|
||||
}));
|
||||
|
||||
vi.mock('../ui/index.js', () => ({
|
||||
calculateDependencyStatistics: vi.fn(() => ({ total: 0, blocked: 0 })),
|
||||
calculateSubtaskStatistics: vi.fn(() => ({ total: 0, completed: 0 })),
|
||||
calculateTaskStatistics: vi.fn(() => ({ total: 0, completed: 0 })),
|
||||
displayDashboards: vi.fn(),
|
||||
displayRecommendedNextTask: vi.fn(),
|
||||
displaySuggestedNextSteps: vi.fn(),
|
||||
getPriorityBreakdown: vi.fn(() => ({})),
|
||||
getTaskDescription: vi.fn(() => 'Test description')
|
||||
}));
|
||||
|
||||
vi.mock('../utils/ui.js', () => ({
|
||||
createTaskTable: vi.fn(() => 'Table output'),
|
||||
displayWarning: vi.fn()
|
||||
}));
|
||||
|
||||
import { ListTasksCommand } from './list.command.js';
|
||||
|
||||
describe('ListTasksCommand', () => {
|
||||
let consoleLogSpy: any;
|
||||
let mockTmCore: Partial<TmCore>;
|
||||
|
||||
beforeEach(() => {
|
||||
consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
|
||||
|
||||
mockTmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: [{ id: '1', title: 'Test Task', status: 'pending' }],
|
||||
total: 1,
|
||||
filtered: 1,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
} as any,
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
} as any
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
consoleLogSpy.mockRestore();
|
||||
});
|
||||
|
||||
describe('JSON output format', () => {
|
||||
it('should use JSON format when --json flag is set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Mock the tmCore initialization
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
// Execute with --json flag
|
||||
await (command as any).executeCommand({
|
||||
json: true,
|
||||
format: 'text' // Should be overridden by --json
|
||||
});
|
||||
|
||||
// Verify JSON output was called
|
||||
expect(consoleLogSpy).toHaveBeenCalled();
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
|
||||
// Should be valid JSON
|
||||
expect(() => JSON.parse(output)).not.toThrow();
|
||||
|
||||
const parsed = JSON.parse(output);
|
||||
expect(parsed).toHaveProperty('tasks');
|
||||
expect(parsed).toHaveProperty('metadata');
|
||||
});
|
||||
|
||||
it('should override --format when --json is set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
await (command as any).executeCommand({
|
||||
json: true,
|
||||
format: 'compact' // Should be overridden
|
||||
});
|
||||
|
||||
// Should output JSON, not compact format
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
expect(() => JSON.parse(output)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should use specified format when --json is not set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
await (command as any).executeCommand({
|
||||
format: 'compact'
|
||||
});
|
||||
|
||||
// Should use compact format (not JSON)
|
||||
const output = consoleLogSpy.mock.calls;
|
||||
// In compact mode, output is not JSON
|
||||
expect(output.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should default to text format when neither flag is set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
await (command as any).executeCommand({});
|
||||
|
||||
// Should use text format (not JSON)
|
||||
// If any console.log was called, verify it's not JSON
|
||||
if (consoleLogSpy.mock.calls.length > 0) {
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
// Text format output should not be parseable JSON
|
||||
// or should be the table string we mocked
|
||||
expect(
|
||||
output === 'Table output' ||
|
||||
(() => {
|
||||
try {
|
||||
JSON.parse(output);
|
||||
return false;
|
||||
} catch {
|
||||
return true;
|
||||
}
|
||||
})()
|
||||
).toBe(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('format validation', () => {
|
||||
it('should accept valid formats', () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
expect((command as any).validateOptions({ format: 'text' })).toBe(true);
|
||||
expect((command as any).validateOptions({ format: 'json' })).toBe(true);
|
||||
expect((command as any).validateOptions({ format: 'compact' })).toBe(
|
||||
true
|
||||
);
|
||||
});
|
||||
|
||||
it('should reject invalid formats', () => {
|
||||
const consoleErrorSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
expect((command as any).validateOptions({ format: 'invalid' })).toBe(
|
||||
false
|
||||
);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Invalid format: invalid')
|
||||
);
|
||||
|
||||
consoleErrorSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('--ready filter', () => {
|
||||
it('should filter to only tasks with all dependencies satisfied', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Mock tasks where some have satisfied deps and some don't
|
||||
const mockTasks = [
|
||||
{ id: '1', title: 'Task 1', status: 'done', dependencies: [] },
|
||||
{ id: '2', title: 'Task 2', status: 'pending', dependencies: ['1'] }, // deps satisfied (1 is done)
|
||||
{ id: '3', title: 'Task 3', status: 'pending', dependencies: ['2'] }, // deps NOT satisfied (2 is pending)
|
||||
{ id: '4', title: 'Task 4', status: 'pending', dependencies: [] } // no deps, ready
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 4,
|
||||
filtered: 4,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
ready: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Should only include tasks 2 and 4 (ready to work on)
|
||||
expect(parsed.tasks).toHaveLength(2);
|
||||
expect(parsed.tasks.map((t: any) => t.id)).toEqual(
|
||||
expect.arrayContaining(['2', '4'])
|
||||
);
|
||||
expect(parsed.tasks.map((t: any) => t.id)).not.toContain('3');
|
||||
});
|
||||
|
||||
it('should exclude done/cancelled tasks from ready filter', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const mockTasks = [
|
||||
{ id: '1', title: 'Task 1', status: 'done', dependencies: [] },
|
||||
{ id: '2', title: 'Task 2', status: 'cancelled', dependencies: [] },
|
||||
{ id: '3', title: 'Task 3', status: 'pending', dependencies: [] }
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 3,
|
||||
filtered: 3,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
ready: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Should only include task 3 (pending with no deps)
|
||||
expect(parsed.tasks).toHaveLength(1);
|
||||
expect(parsed.tasks[0].id).toBe('3');
|
||||
});
|
||||
|
||||
it('should exclude deferred and blocked tasks from ready filter', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const mockTasks = [
|
||||
{ id: '1', title: 'Task 1', status: 'pending', dependencies: [] },
|
||||
{ id: '2', title: 'Task 2', status: 'deferred', dependencies: [] },
|
||||
{ id: '3', title: 'Task 3', status: 'blocked', dependencies: [] },
|
||||
{ id: '4', title: 'Task 4', status: 'in-progress', dependencies: [] },
|
||||
{ id: '5', title: 'Task 5', status: 'review', dependencies: [] }
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 5,
|
||||
filtered: 5,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
ready: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Should only include pending, in-progress, and review tasks
|
||||
expect(parsed.tasks).toHaveLength(3);
|
||||
const ids = parsed.tasks.map((t: any) => t.id);
|
||||
expect(ids).toContain('1'); // pending
|
||||
expect(ids).toContain('4'); // in-progress
|
||||
expect(ids).toContain('5'); // review
|
||||
expect(ids).not.toContain('2'); // deferred - excluded
|
||||
expect(ids).not.toContain('3'); // blocked - excluded
|
||||
});
|
||||
});
|
||||
|
||||
describe('--blocking filter', () => {
|
||||
it('should filter to only tasks that block other tasks', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const mockTasks = [
|
||||
{ id: '1', title: 'Task 1', status: 'pending', dependencies: [] }, // blocks 2, 3
|
||||
{ id: '2', title: 'Task 2', status: 'pending', dependencies: ['1'] }, // blocks 4
|
||||
{ id: '3', title: 'Task 3', status: 'pending', dependencies: ['1'] }, // blocks nothing
|
||||
{ id: '4', title: 'Task 4', status: 'pending', dependencies: ['2'] } // blocks nothing
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 4,
|
||||
filtered: 4,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
blocking: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Should only include tasks 1 and 2 (they block other tasks)
|
||||
expect(parsed.tasks).toHaveLength(2);
|
||||
expect(parsed.tasks.map((t: any) => t.id)).toEqual(
|
||||
expect.arrayContaining(['1', '2'])
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('--ready --blocking combined filter', () => {
|
||||
it('should show high-impact tasks (ready AND blocking)', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const mockTasks = [
|
||||
{ id: '1', title: 'Task 1', status: 'done', dependencies: [] },
|
||||
{ id: '2', title: 'Task 2', status: 'pending', dependencies: ['1'] }, // ready (1 done), blocks 3,4
|
||||
{ id: '3', title: 'Task 3', status: 'pending', dependencies: ['2'] }, // not ready, blocks 5
|
||||
{ id: '4', title: 'Task 4', status: 'pending', dependencies: ['2'] }, // not ready, blocks nothing
|
||||
{ id: '5', title: 'Task 5', status: 'pending', dependencies: ['3'] }, // not ready, blocks nothing
|
||||
{ id: '6', title: 'Task 6', status: 'pending', dependencies: [] } // ready, blocks nothing
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 6,
|
||||
filtered: 6,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
ready: true,
|
||||
blocking: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Should only include task 2 (ready AND blocks other tasks)
|
||||
expect(parsed.tasks).toHaveLength(1);
|
||||
expect(parsed.tasks[0].id).toBe('2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('blocks field in output', () => {
|
||||
it('should include blocks field showing which tasks depend on each task', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const mockTasks = [
|
||||
{ id: '1', title: 'Task 1', status: 'pending', dependencies: [] },
|
||||
{ id: '2', title: 'Task 2', status: 'pending', dependencies: ['1'] },
|
||||
{ id: '3', title: 'Task 3', status: 'pending', dependencies: ['1'] },
|
||||
{
|
||||
id: '4',
|
||||
title: 'Task 4',
|
||||
status: 'pending',
|
||||
dependencies: ['2', '3']
|
||||
}
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 4,
|
||||
filtered: 4,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Task 1 blocks tasks 2 and 3
|
||||
const task1 = parsed.tasks.find((t: any) => t.id === '1');
|
||||
expect(task1.blocks).toEqual(expect.arrayContaining(['2', '3']));
|
||||
|
||||
// Task 2 blocks task 4
|
||||
const task2 = parsed.tasks.find((t: any) => t.id === '2');
|
||||
expect(task2.blocks).toEqual(['4']);
|
||||
|
||||
// Task 3 blocks task 4
|
||||
const task3 = parsed.tasks.find((t: any) => t.id === '3');
|
||||
expect(task3.blocks).toEqual(['4']);
|
||||
|
||||
// Task 4 blocks nothing
|
||||
const task4 = parsed.tasks.find((t: any) => t.id === '4');
|
||||
expect(task4.blocks).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('--ready filter edge cases', () => {
|
||||
it('should treat cancelled dependencies as satisfied for --ready filter', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Task 1 is cancelled, Task 2 depends on Task 1
|
||||
// Task 2 should be considered "ready" because cancelled = complete
|
||||
const mockTasks = [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Cancelled Task',
|
||||
status: 'cancelled',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Dependent Task',
|
||||
status: 'pending',
|
||||
dependencies: ['1']
|
||||
}
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
ready: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Task 2 should be ready because task 1 (cancelled) counts as complete
|
||||
expect(parsed.tasks).toHaveLength(1);
|
||||
expect(parsed.tasks[0].id).toBe('2');
|
||||
expect(parsed.tasks[0].status).toBe('pending');
|
||||
});
|
||||
|
||||
it('should apply status filter after ready filter', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Multiple ready tasks with different statuses
|
||||
const mockTasks = [
|
||||
{ id: '1', title: 'Done', status: 'done', dependencies: [] },
|
||||
{
|
||||
id: '2',
|
||||
title: 'Pending ready',
|
||||
status: 'pending',
|
||||
dependencies: ['1']
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
title: 'In-progress ready',
|
||||
status: 'in-progress',
|
||||
dependencies: ['1']
|
||||
}
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 3,
|
||||
filtered: 3,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
ready: true,
|
||||
status: 'pending',
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// With --ready --status=pending, should only show task 2
|
||||
// Task 2 is ready (dep 1 is done) and pending
|
||||
// Task 3 is ready but in-progress, not pending
|
||||
expect(parsed.tasks).toHaveLength(1);
|
||||
expect(parsed.tasks[0].id).toBe('2');
|
||||
expect(parsed.tasks[0].status).toBe('pending');
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildBlocksMap validation', () => {
|
||||
it('should warn about dependencies to non-existent tasks', async () => {
|
||||
const consoleWarnSpy = vi
|
||||
.spyOn(console, 'warn')
|
||||
.mockImplementation(() => {});
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const mockTasks = [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Task with bad dep',
|
||||
status: 'pending',
|
||||
dependencies: ['999']
|
||||
}
|
||||
];
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: mockTasks,
|
||||
total: 1,
|
||||
filtered: 1,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
json: true
|
||||
});
|
||||
|
||||
// Verify console.warn was called with warning about invalid dependency
|
||||
expect(consoleWarnSpy).toHaveBeenCalled();
|
||||
|
||||
// Find the call that mentions invalid dependency references
|
||||
const warnCalls = consoleWarnSpy.mock.calls.map((call) => call[0]);
|
||||
const hasInvalidDepWarning = warnCalls.some(
|
||||
(msg) =>
|
||||
typeof msg === 'string' &&
|
||||
msg.includes('invalid dependency reference')
|
||||
);
|
||||
const hasSpecificTaskWarning = warnCalls.some(
|
||||
(msg) =>
|
||||
typeof msg === 'string' &&
|
||||
msg.includes('Task 1') &&
|
||||
msg.includes('999')
|
||||
);
|
||||
|
||||
expect(hasInvalidDepWarning).toBe(true);
|
||||
expect(hasSpecificTaskWarning).toBe(true);
|
||||
|
||||
consoleWarnSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('--all-tags option', () => {
|
||||
it('should fetch tasks from multiple tags and include tagName field', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Mock tasks for different tags
|
||||
const featureATasksResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Feature A Task 1',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Feature A Task 2',
|
||||
status: 'done',
|
||||
dependencies: []
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
const featureBTasksResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Feature B Task 1',
|
||||
status: 'in-progress',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Feature B Task 2',
|
||||
status: 'pending',
|
||||
dependencies: ['1']
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
const listMock = vi
|
||||
.fn()
|
||||
.mockResolvedValueOnce(featureATasksResponse)
|
||||
.mockResolvedValueOnce(featureBTasksResponse);
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: listMock,
|
||||
getTagsWithStats: vi.fn().mockResolvedValue({
|
||||
tags: [
|
||||
{ name: 'feature-a', taskCount: 2 },
|
||||
{ name: 'feature-b', taskCount: 2 }
|
||||
]
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
allTags: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Should include tasks from both tags
|
||||
expect(parsed.tasks).toHaveLength(4);
|
||||
|
||||
// Each task should have tagName field
|
||||
const featureATasks = parsed.tasks.filter(
|
||||
(t: any) => t.tagName === 'feature-a'
|
||||
);
|
||||
const featureBTasks = parsed.tasks.filter(
|
||||
(t: any) => t.tagName === 'feature-b'
|
||||
);
|
||||
|
||||
expect(featureATasks).toHaveLength(2);
|
||||
expect(featureBTasks).toHaveLength(2);
|
||||
|
||||
// Verify metadata indicates all tags
|
||||
expect(parsed.metadata.allTags).toBe(true);
|
||||
expect(parsed.metadata.tag).toBe('all');
|
||||
expect(parsed.metadata.total).toBe(4);
|
||||
});
|
||||
|
||||
it('should apply --ready filter per-tag when combined with --all-tags', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Tag A: Task 1 is done, Task 2 depends on Task 1 (ready)
|
||||
const tagATasksResponse = {
|
||||
tasks: [
|
||||
{ id: '1', title: 'Tag A Task 1', status: 'done', dependencies: [] },
|
||||
{
|
||||
id: '2',
|
||||
title: 'Tag A Task 2',
|
||||
status: 'pending',
|
||||
dependencies: ['1']
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
// Tag B: Task 1 is pending (ready), Task 2 depends on Task 1 (not ready)
|
||||
// Note: Task IDs can overlap between tags, but dependencies are tag-scoped
|
||||
const tagBTasksResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Tag B Task 1',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Tag B Task 2',
|
||||
status: 'pending',
|
||||
dependencies: ['1']
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
const listMock = vi
|
||||
.fn()
|
||||
.mockResolvedValueOnce(tagATasksResponse)
|
||||
.mockResolvedValueOnce(tagBTasksResponse);
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: listMock,
|
||||
getTagsWithStats: vi.fn().mockResolvedValue({
|
||||
tags: [
|
||||
{ name: 'tag-a', taskCount: 2 },
|
||||
{ name: 'tag-b', taskCount: 2 }
|
||||
]
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
allTags: true,
|
||||
ready: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Tag A: Task 2 is ready (Task 1 is done)
|
||||
// Tag B: Task 1 is ready (no deps), Task 2 is NOT ready (Task 1 is pending)
|
||||
expect(parsed.tasks).toHaveLength(2);
|
||||
|
||||
const taskIds = parsed.tasks.map((t: any) => `${t.tagName}:${t.id}`);
|
||||
expect(taskIds).toContain('tag-a:2'); // Ready: deps satisfied
|
||||
expect(taskIds).toContain('tag-b:1'); // Ready: no deps
|
||||
expect(taskIds).not.toContain('tag-b:2'); // Not ready: depends on pending task
|
||||
});
|
||||
|
||||
it('should reject --all-tags combined with --watch', () => {
|
||||
const consoleErrorSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const isValid = (command as any).validateOptions({
|
||||
allTags: true,
|
||||
watch: true
|
||||
});
|
||||
|
||||
expect(isValid).toBe(false);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('--all-tags cannot be used with --watch mode')
|
||||
);
|
||||
|
||||
consoleErrorSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should apply --blocking filter with --all-tags', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Tag A: Task 1 blocks Task 2
|
||||
const tagATasksResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Tag A Task 1',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Tag A Task 2',
|
||||
status: 'pending',
|
||||
dependencies: ['1']
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
// Tag B: Task 1 blocks nothing (no other tasks depend on it)
|
||||
const tagBTasksResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Tag B Task 1',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
}
|
||||
],
|
||||
total: 1,
|
||||
filtered: 1,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
const listMock = vi
|
||||
.fn()
|
||||
.mockResolvedValueOnce(tagATasksResponse)
|
||||
.mockResolvedValueOnce(tagBTasksResponse);
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: listMock,
|
||||
getTagsWithStats: vi.fn().mockResolvedValue({
|
||||
tags: [
|
||||
{ name: 'tag-a', taskCount: 2 },
|
||||
{ name: 'tag-b', taskCount: 1 }
|
||||
]
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
allTags: true,
|
||||
blocking: true,
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Only Tag A Task 1 blocks other tasks
|
||||
expect(parsed.tasks).toHaveLength(1);
|
||||
expect(parsed.tasks[0].tagName).toBe('tag-a');
|
||||
expect(parsed.tasks[0].id).toBe('1');
|
||||
expect(parsed.tasks[0].blocks).toContain('2');
|
||||
});
|
||||
|
||||
it('should apply status filter with --all-tags', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
const tagATasksResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Tag A Task 1',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{ id: '2', title: 'Tag A Task 2', status: 'done', dependencies: [] }
|
||||
],
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
const tagBTasksResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Tag B Task 1',
|
||||
status: 'in-progress',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Tag B Task 2',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
}
|
||||
],
|
||||
total: 2,
|
||||
filtered: 2,
|
||||
storageType: 'json'
|
||||
};
|
||||
|
||||
const listMock = vi
|
||||
.fn()
|
||||
.mockResolvedValueOnce(tagATasksResponse)
|
||||
.mockResolvedValueOnce(tagBTasksResponse);
|
||||
|
||||
(command as any).tmCore = {
|
||||
tasks: {
|
||||
list: listMock,
|
||||
getTagsWithStats: vi.fn().mockResolvedValue({
|
||||
tags: [
|
||||
{ name: 'tag-a', taskCount: 2 },
|
||||
{ name: 'tag-b', taskCount: 2 }
|
||||
]
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
},
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
}
|
||||
};
|
||||
|
||||
await (command as any).executeCommand({
|
||||
allTags: true,
|
||||
status: 'pending',
|
||||
json: true
|
||||
});
|
||||
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
const parsed = JSON.parse(output);
|
||||
|
||||
// Only pending tasks should be included
|
||||
expect(parsed.tasks).toHaveLength(2);
|
||||
expect(parsed.tasks.every((t: any) => t.status === 'pending')).toBe(true);
|
||||
|
||||
const taskIds = parsed.tasks.map((t: any) => `${t.tagName}:${t.id}`);
|
||||
expect(taskIds).toContain('tag-a:1');
|
||||
expect(taskIds).toContain('tag-b:2');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -5,14 +5,20 @@
|
||||
|
||||
import {
|
||||
OUTPUT_FORMATS,
|
||||
buildBlocksMap,
|
||||
createTmCore,
|
||||
filterBlockingTasks,
|
||||
filterReadyTasks,
|
||||
isTaskComplete,
|
||||
type InvalidDependency,
|
||||
type OutputFormat,
|
||||
STATUS_ICONS,
|
||||
TASK_STATUSES,
|
||||
type Task,
|
||||
type TaskStatus,
|
||||
type TaskWithBlocks,
|
||||
type TmCore,
|
||||
type WatchSubscription,
|
||||
createTmCore
|
||||
type WatchSubscription
|
||||
} from '@tm/core';
|
||||
import type { StorageType } from '@tm/core';
|
||||
import chalk from 'chalk';
|
||||
@@ -33,7 +39,6 @@ import {
|
||||
import { displayCommandHeader } from '../utils/display-helpers.js';
|
||||
import { displayError } from '../utils/error-handler.js';
|
||||
import { getProjectRoot } from '../utils/project-root.js';
|
||||
import { isTaskComplete } from '../utils/task-status.js';
|
||||
import * as ui from '../utils/ui.js';
|
||||
|
||||
/**
|
||||
@@ -50,17 +55,26 @@ export interface ListCommandOptions {
|
||||
silent?: boolean;
|
||||
project?: string;
|
||||
watch?: boolean;
|
||||
ready?: boolean;
|
||||
blocking?: boolean;
|
||||
allTags?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Task with tag info for cross-tag listing
|
||||
*/
|
||||
export type TaskWithTag = TaskWithBlocks & { tagName: string };
|
||||
|
||||
/**
|
||||
* Result type from list command
|
||||
*/
|
||||
export interface ListTasksResult {
|
||||
tasks: Task[];
|
||||
tasks: TaskWithBlocks[] | TaskWithTag[];
|
||||
total: number;
|
||||
filtered: number;
|
||||
tag?: string;
|
||||
storageType: Exclude<StorageType, 'auto'>;
|
||||
allTags?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -104,6 +118,15 @@ export class ListTasksCommand extends Command {
|
||||
'Project root directory (auto-detected if not provided)'
|
||||
)
|
||||
.option('-w, --watch', 'Watch for changes and update list automatically')
|
||||
.option(
|
||||
'--ready',
|
||||
'Show only tasks ready to work on (dependencies satisfied)'
|
||||
)
|
||||
.option('--blocking', 'Show only tasks that block other tasks')
|
||||
.option(
|
||||
'--all-tags',
|
||||
'Show tasks from all tags (combine with --ready for actionable tasks)'
|
||||
)
|
||||
.action(async (statusArg?: string, options?: ListCommandOptions) => {
|
||||
// Handle special "all" keyword to show with subtasks
|
||||
let status = statusArg || options?.status;
|
||||
@@ -142,7 +165,10 @@ export class ListTasksCommand extends Command {
|
||||
if (options.watch) {
|
||||
await this.watchTasks(options);
|
||||
} else {
|
||||
const result = await this.getTasks(options);
|
||||
// Use cross-tag listing when --all-tags is specified
|
||||
const result = options.allTags
|
||||
? await this.getTasksFromAllTags(options)
|
||||
: await this.getTasks(options);
|
||||
|
||||
// Store result for programmatic access
|
||||
this.setLastResult(result);
|
||||
@@ -195,8 +221,15 @@ export class ListTasksCommand extends Command {
|
||||
// Show sync message with timestamp
|
||||
displaySyncMessage(storageType, lastSync);
|
||||
displayWatchFooter(storageType, lastSync);
|
||||
} catch {
|
||||
// Ignore errors during watch (e.g. partial writes)
|
||||
} catch (refreshError: unknown) {
|
||||
// Log warning but continue watching - don't crash on transient errors
|
||||
const message =
|
||||
refreshError instanceof Error
|
||||
? refreshError.message
|
||||
: String(refreshError);
|
||||
console.error(
|
||||
chalk.yellow(`\nWarning: Failed to refresh tasks: ${message}`)
|
||||
);
|
||||
}
|
||||
} else if (event.type === 'error' && event.error) {
|
||||
console.error(chalk.red(`\n⚠ Watch error: ${event.error.message}`));
|
||||
@@ -250,6 +283,17 @@ export class ListTasksCommand extends Command {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate --all-tags cannot be used with --watch
|
||||
if (options.allTags && options.watch) {
|
||||
console.error(chalk.red('--all-tags cannot be used with --watch mode'));
|
||||
console.error(
|
||||
chalk.gray(
|
||||
'Use --all-tags without --watch, or --watch without --all-tags'
|
||||
)
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -272,14 +316,20 @@ export class ListTasksCommand extends Command {
|
||||
throw new Error('TmCore not initialized');
|
||||
}
|
||||
|
||||
// Build filter
|
||||
const filter =
|
||||
// Parse status filter values
|
||||
const statusFilterValues =
|
||||
options.status && options.status !== 'all'
|
||||
? {
|
||||
status: options.status
|
||||
.split(',')
|
||||
.map((s: string) => s.trim() as TaskStatus)
|
||||
}
|
||||
? options.status.split(',').map((s: string) => s.trim() as TaskStatus)
|
||||
: undefined;
|
||||
|
||||
// When --ready is used, we need ALL tasks to correctly compute which dependencies are satisfied
|
||||
// So we fetch without status filter first, then apply status filter after ready/blocking
|
||||
const needsAllTasks = options.ready || options.blocking;
|
||||
|
||||
// Build filter - skip status filter if we need all tasks for ready/blocking computation
|
||||
const filter =
|
||||
statusFilterValues && !needsAllTasks
|
||||
? { status: statusFilterValues }
|
||||
: undefined;
|
||||
|
||||
// Call tm-core
|
||||
@@ -289,7 +339,174 @@ export class ListTasksCommand extends Command {
|
||||
includeSubtasks: options.withSubtasks
|
||||
});
|
||||
|
||||
return result as ListTasksResult;
|
||||
// Build blocks map and enrich tasks with blocks field
|
||||
const { blocksMap, invalidDependencies } = buildBlocksMap(result.tasks);
|
||||
this.displayInvalidDependencyWarnings(invalidDependencies);
|
||||
const enrichedTasks = result.tasks.map((task) => ({
|
||||
...task,
|
||||
blocks: blocksMap.get(String(task.id)) || []
|
||||
}));
|
||||
|
||||
// Apply ready/blocking filters (with full task context)
|
||||
let filteredTasks = enrichedTasks;
|
||||
|
||||
if (options.ready) {
|
||||
filteredTasks = filterReadyTasks(filteredTasks);
|
||||
}
|
||||
|
||||
if (options.blocking) {
|
||||
filteredTasks = filterBlockingTasks(filteredTasks);
|
||||
}
|
||||
|
||||
// Apply status filter AFTER ready/blocking if we deferred it earlier
|
||||
if (statusFilterValues && needsAllTasks) {
|
||||
filteredTasks = filteredTasks.filter((task) =>
|
||||
statusFilterValues.includes(task.status)
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
...result,
|
||||
tasks: filteredTasks,
|
||||
filtered: filteredTasks.length
|
||||
} as ListTasksResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get ready tasks from all tags
|
||||
* Fetches tasks from each tag and combines them with tag info
|
||||
*/
|
||||
private async getTasksFromAllTags(
|
||||
options: ListCommandOptions
|
||||
): Promise<ListTasksResult> {
|
||||
if (!this.tmCore) {
|
||||
throw new Error('TmCore not initialized');
|
||||
}
|
||||
|
||||
// Get all tags
|
||||
const tagsResult = await this.tmCore.tasks.getTagsWithStats();
|
||||
const allTaggedTasks: TaskWithTag[] = [];
|
||||
let totalTaskCount = 0;
|
||||
const failedTags: Array<{ name: string; error: string }> = [];
|
||||
|
||||
// Fetch tasks from each tag
|
||||
for (const tagInfo of tagsResult.tags) {
|
||||
const tagName = tagInfo.name;
|
||||
|
||||
try {
|
||||
// Get tasks for this tag
|
||||
const result = await this.tmCore.tasks.list({
|
||||
tag: tagName,
|
||||
includeSubtasks: options.withSubtasks
|
||||
});
|
||||
|
||||
// Track total count before any filtering (consistent with getTasks)
|
||||
totalTaskCount += result.tasks.length;
|
||||
|
||||
// Build blocks map for this tag's tasks
|
||||
const { blocksMap, invalidDependencies } = buildBlocksMap(result.tasks);
|
||||
this.displayInvalidDependencyWarnings(invalidDependencies, tagName);
|
||||
|
||||
// Enrich tasks with blocks field and tag name
|
||||
const enrichedTasks: TaskWithTag[] = result.tasks.map((task) => ({
|
||||
...task,
|
||||
blocks: blocksMap.get(String(task.id)) || [],
|
||||
tagName
|
||||
}));
|
||||
|
||||
// Apply ready filter per-tag to respect tag-scoped dependencies
|
||||
// (task IDs may overlap between tags, so we must filter within each tag)
|
||||
const tasksToAdd: TaskWithTag[] = options.ready
|
||||
? (filterReadyTasks(enrichedTasks) as TaskWithTag[])
|
||||
: enrichedTasks;
|
||||
|
||||
allTaggedTasks.push(...tasksToAdd);
|
||||
} catch (tagError: unknown) {
|
||||
const errorMessage =
|
||||
tagError instanceof Error ? tagError.message : String(tagError);
|
||||
failedTags.push({ name: tagName, error: errorMessage });
|
||||
continue; // Skip this tag but continue with others
|
||||
}
|
||||
}
|
||||
|
||||
// Warn about failed tags
|
||||
if (failedTags.length > 0) {
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`\nWarning: Could not fetch tasks from ${failedTags.length} tag(s):`
|
||||
)
|
||||
);
|
||||
failedTags.forEach(({ name, error }) => {
|
||||
console.warn(chalk.gray(` ${name}: ${error}`));
|
||||
});
|
||||
}
|
||||
|
||||
// If ALL tags failed, throw to surface the issue
|
||||
if (
|
||||
failedTags.length === tagsResult.tags.length &&
|
||||
tagsResult.tags.length > 0
|
||||
) {
|
||||
throw new Error(
|
||||
`Failed to fetch tasks from any tag. First error: ${failedTags[0].error}`
|
||||
);
|
||||
}
|
||||
|
||||
// Apply additional filters
|
||||
let filteredTasks: TaskWithTag[] = allTaggedTasks;
|
||||
|
||||
// Apply blocking filter if specified
|
||||
if (options.blocking) {
|
||||
filteredTasks = filteredTasks.filter((task) => task.blocks.length > 0);
|
||||
}
|
||||
|
||||
// Apply status filter if specified
|
||||
if (options.status && options.status !== 'all') {
|
||||
const statusValues = options.status
|
||||
.split(',')
|
||||
.map((s) => s.trim() as TaskStatus);
|
||||
filteredTasks = filteredTasks.filter((task) =>
|
||||
statusValues.includes(task.status)
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
tasks: filteredTasks,
|
||||
total: totalTaskCount,
|
||||
filtered: filteredTasks.length,
|
||||
storageType: this.tmCore.tasks.getStorageType(),
|
||||
allTags: true
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Display warnings for invalid dependency references
|
||||
* @param invalidDependencies - Array of invalid dependency references from buildBlocksMap
|
||||
* @param tagName - Optional tag name for context in multi-tag mode
|
||||
*/
|
||||
private displayInvalidDependencyWarnings(
|
||||
invalidDependencies: InvalidDependency[],
|
||||
tagName?: string
|
||||
): void {
|
||||
if (invalidDependencies.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const tagContext = tagName ? ` (tag: ${tagName})` : '';
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`\nWarning: ${invalidDependencies.length} invalid dependency reference(s) found${tagContext}:`
|
||||
)
|
||||
);
|
||||
invalidDependencies.slice(0, 5).forEach(({ taskId, depId }) => {
|
||||
console.warn(
|
||||
chalk.gray(` Task ${taskId} depends on non-existent task ${depId}`)
|
||||
);
|
||||
});
|
||||
if (invalidDependencies.length > 5) {
|
||||
console.warn(
|
||||
chalk.gray(` ...and ${invalidDependencies.length - 5} more`)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -300,13 +517,12 @@ export class ListTasksCommand extends Command {
|
||||
options: ListCommandOptions
|
||||
): void {
|
||||
// Resolve format: --json and --compact flags override --format option
|
||||
const format = (
|
||||
options.json
|
||||
? 'json'
|
||||
: options.compact
|
||||
? 'compact'
|
||||
: options.format || 'text'
|
||||
) as OutputFormat;
|
||||
let format: OutputFormat = options.format || 'text';
|
||||
if (options.json) {
|
||||
format = 'json';
|
||||
} else if (options.compact) {
|
||||
format = 'compact';
|
||||
}
|
||||
|
||||
switch (format) {
|
||||
case 'json':
|
||||
@@ -335,8 +551,9 @@ export class ListTasksCommand extends Command {
|
||||
metadata: {
|
||||
total: data.total,
|
||||
filtered: data.filtered,
|
||||
tag: data.tag,
|
||||
storageType: data.storageType
|
||||
tag: data.allTags ? 'all' : data.tag,
|
||||
storageType: data.storageType,
|
||||
allTags: data.allTags || false
|
||||
}
|
||||
},
|
||||
null,
|
||||
@@ -352,29 +569,32 @@ export class ListTasksCommand extends Command {
|
||||
data: ListTasksResult,
|
||||
options: ListCommandOptions
|
||||
): void {
|
||||
const { tasks, tag, storageType } = data;
|
||||
const { tasks, tag, storageType, allTags } = data;
|
||||
|
||||
// Display header unless --no-header is set
|
||||
if (options.noHeader !== true) {
|
||||
displayCommandHeader(this.tmCore, {
|
||||
tag: tag || 'master',
|
||||
tag: allTags ? 'all tags' : tag || 'master',
|
||||
storageType
|
||||
});
|
||||
}
|
||||
|
||||
tasks.forEach((task) => {
|
||||
for (const task of tasks) {
|
||||
const icon = STATUS_ICONS[task.status];
|
||||
console.log(`${chalk.cyan(task.id)} ${icon} ${task.title}`);
|
||||
// Show tag in compact format when listing all tags (tasks are TaskWithTag[])
|
||||
const tagPrefix =
|
||||
allTags && 'tagName' in task ? chalk.magenta(`[${task.tagName}] `) : '';
|
||||
console.log(`${tagPrefix}${chalk.cyan(task.id)} ${icon} ${task.title}`);
|
||||
|
||||
if (options.withSubtasks && task.subtasks?.length) {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
for (const subtask of task.subtasks) {
|
||||
const subIcon = STATUS_ICONS[subtask.status];
|
||||
console.log(
|
||||
` ${chalk.gray(String(subtask.id))} ${subIcon} ${chalk.gray(subtask.title)}`
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -384,12 +604,12 @@ export class ListTasksCommand extends Command {
|
||||
data: ListTasksResult,
|
||||
options: ListCommandOptions
|
||||
): void {
|
||||
const { tasks, tag, storageType } = data;
|
||||
const { tasks, tag, storageType, allTags } = data;
|
||||
|
||||
// Display header unless --no-header is set
|
||||
if (options.noHeader !== true) {
|
||||
displayCommandHeader(this.tmCore, {
|
||||
tag: tag || 'master',
|
||||
tag: allTags ? 'all tags' : tag || 'master',
|
||||
storageType
|
||||
});
|
||||
}
|
||||
@@ -414,43 +634,52 @@ export class ListTasksCommand extends Command {
|
||||
? tasks.find((t) => String(t.id) === String(nextTaskInfo.id))
|
||||
: undefined;
|
||||
|
||||
// Display dashboard boxes (nextTask already has complexity from storage enrichment)
|
||||
displayDashboards(
|
||||
taskStats,
|
||||
subtaskStats,
|
||||
priorityBreakdown,
|
||||
depStats,
|
||||
nextTask
|
||||
);
|
||||
// Display dashboard boxes unless filtering by --ready, --blocking, or --all-tags
|
||||
// (filtered/cross-tag dashboards would show misleading statistics)
|
||||
const isFiltered = options.ready || options.blocking || allTags;
|
||||
if (!isFiltered) {
|
||||
displayDashboards(
|
||||
taskStats,
|
||||
subtaskStats,
|
||||
priorityBreakdown,
|
||||
depStats,
|
||||
nextTask
|
||||
);
|
||||
}
|
||||
|
||||
// Task table
|
||||
console.log(
|
||||
ui.createTaskTable(tasks, {
|
||||
showSubtasks: options.withSubtasks,
|
||||
showDependencies: true,
|
||||
showComplexity: true // Enable complexity column
|
||||
showBlocks: true, // Show which tasks this one blocks
|
||||
showComplexity: true, // Enable complexity column
|
||||
showTag: allTags // Show tag column for cross-tag listing
|
||||
})
|
||||
);
|
||||
|
||||
// Display recommended next task section immediately after table
|
||||
// Don't show "no tasks available" message in list command - that's for tm next
|
||||
if (nextTask) {
|
||||
const description = getTaskDescription(nextTask);
|
||||
// Skip when filtering by --ready or --blocking (user already knows what they're looking at)
|
||||
if (!isFiltered) {
|
||||
// Don't show "no tasks available" message in list command - that's for tm next
|
||||
if (nextTask) {
|
||||
const description = getTaskDescription(nextTask);
|
||||
|
||||
displayRecommendedNextTask({
|
||||
id: nextTask.id,
|
||||
title: nextTask.title,
|
||||
priority: nextTask.priority,
|
||||
status: nextTask.status,
|
||||
dependencies: nextTask.dependencies,
|
||||
description,
|
||||
complexity: nextTask.complexity as number | undefined
|
||||
});
|
||||
displayRecommendedNextTask({
|
||||
id: nextTask.id,
|
||||
title: nextTask.title,
|
||||
priority: nextTask.priority,
|
||||
status: nextTask.status,
|
||||
dependencies: nextTask.dependencies,
|
||||
description,
|
||||
complexity: nextTask.complexity as number | undefined
|
||||
});
|
||||
}
|
||||
// If no next task, don't show any message - dashboard already shows the info
|
||||
|
||||
// Display suggested next steps at the end
|
||||
displaySuggestedNextSteps();
|
||||
}
|
||||
// If no next task, don't show any message - dashboard already shows the info
|
||||
|
||||
// Display suggested next steps at the end
|
||||
displaySuggestedNextSteps();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -77,8 +77,8 @@ describe('LoopCommand', () => {
|
||||
mockTmCore = {
|
||||
loop: {
|
||||
run: mockLoopRun,
|
||||
checkSandboxAuth: vi.fn().mockReturnValue(true),
|
||||
runInteractiveAuth: vi.fn(),
|
||||
checkSandboxAuth: vi.fn().mockReturnValue({ ready: true }),
|
||||
runInteractiveAuth: vi.fn().mockReturnValue({ success: true }),
|
||||
resolveIterations: vi.fn().mockImplementation((opts) => {
|
||||
// Mirror the real implementation logic for accurate testing
|
||||
if (opts.userIterations !== undefined) return opts.userIterations;
|
||||
@@ -389,27 +389,79 @@ describe('LoopCommand', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should check sandbox auth before running', async () => {
|
||||
it('should check sandbox auth when --sandbox flag is provided', async () => {
|
||||
const result = createMockResult();
|
||||
mockLoopRun.mockResolvedValue(result);
|
||||
mockTmCore.loop.checkSandboxAuth.mockReturnValue({ ready: true });
|
||||
|
||||
const execute = (loopCommand as any).execute.bind(loopCommand);
|
||||
await execute({});
|
||||
await execute({ sandbox: true });
|
||||
|
||||
expect(mockTmCore.loop.checkSandboxAuth).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should run interactive auth when sandbox not ready', async () => {
|
||||
mockTmCore.loop.checkSandboxAuth.mockReturnValue(false);
|
||||
it('should not check sandbox auth when --sandbox flag is not provided', async () => {
|
||||
const result = createMockResult();
|
||||
mockLoopRun.mockResolvedValue(result);
|
||||
|
||||
const execute = (loopCommand as any).execute.bind(loopCommand);
|
||||
await execute({});
|
||||
|
||||
expect(mockTmCore.loop.checkSandboxAuth).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should run interactive auth when sandbox not ready', async () => {
|
||||
mockTmCore.loop.checkSandboxAuth.mockReturnValue({ ready: false });
|
||||
mockTmCore.loop.runInteractiveAuth.mockReturnValue({ success: true });
|
||||
const result = createMockResult();
|
||||
mockLoopRun.mockResolvedValue(result);
|
||||
|
||||
const execute = (loopCommand as any).execute.bind(loopCommand);
|
||||
await execute({ sandbox: true });
|
||||
|
||||
expect(mockTmCore.loop.runInteractiveAuth).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should throw error when sandbox auth has error', async () => {
|
||||
mockTmCore.loop.checkSandboxAuth.mockReturnValue({
|
||||
error: 'Sandbox auth failed'
|
||||
});
|
||||
|
||||
const execute = (loopCommand as any).execute.bind(loopCommand);
|
||||
|
||||
try {
|
||||
await execute({ sandbox: true });
|
||||
} catch {
|
||||
// Expected - processExitSpy mock throws to simulate process.exit
|
||||
}
|
||||
|
||||
expect(displayError).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ message: 'Sandbox auth failed' }),
|
||||
{ skipExit: true }
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw error when interactive auth fails', async () => {
|
||||
mockTmCore.loop.checkSandboxAuth.mockReturnValue({ ready: false });
|
||||
mockTmCore.loop.runInteractiveAuth.mockReturnValue({
|
||||
success: false,
|
||||
error: 'Auth failed'
|
||||
});
|
||||
|
||||
const execute = (loopCommand as any).execute.bind(loopCommand);
|
||||
|
||||
try {
|
||||
await execute({ sandbox: true });
|
||||
} catch {
|
||||
// Expected - processExitSpy mock throws to simulate process.exit
|
||||
}
|
||||
|
||||
expect(displayError).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ message: 'Auth failed' }),
|
||||
{ skipExit: true }
|
||||
);
|
||||
});
|
||||
|
||||
it('should show next task before starting loop', async () => {
|
||||
const result = createMockResult();
|
||||
mockLoopRun.mockResolvedValue(result);
|
||||
|
||||
@@ -6,9 +6,9 @@ import path from 'node:path';
|
||||
import {
|
||||
type LoopConfig,
|
||||
type LoopResult,
|
||||
PRESET_NAMES,
|
||||
type TmCore,
|
||||
createTmCore,
|
||||
PRESET_NAMES
|
||||
createTmCore
|
||||
} from '@tm/core';
|
||||
import chalk from 'chalk';
|
||||
import { Command } from 'commander';
|
||||
@@ -22,6 +22,7 @@ export interface LoopCommandOptions {
|
||||
progressFile?: string;
|
||||
tag?: string;
|
||||
project?: string;
|
||||
sandbox?: boolean;
|
||||
}
|
||||
|
||||
export class LoopCommand extends Command {
|
||||
@@ -47,6 +48,7 @@ export class LoopCommand extends Command {
|
||||
'--project <path>',
|
||||
'Project root directory (auto-detected if not provided)'
|
||||
)
|
||||
.option('--sandbox', 'Run Claude in Docker sandbox mode')
|
||||
.action((options: LoopCommandOptions) => this.execute(options));
|
||||
}
|
||||
|
||||
@@ -80,11 +82,17 @@ export class LoopCommand extends Command {
|
||||
storageType: this.tmCore.tasks.getStorageType()
|
||||
});
|
||||
|
||||
this.handleSandboxAuth();
|
||||
// Only check sandbox auth when --sandbox flag is used
|
||||
if (options.sandbox) {
|
||||
this.handleSandboxAuth();
|
||||
}
|
||||
|
||||
console.log(chalk.cyan('Starting Task Master Loop...'));
|
||||
console.log(chalk.dim(`Preset: ${prompt}`));
|
||||
console.log(chalk.dim(`Max iterations: ${iterations}`));
|
||||
console.log(
|
||||
chalk.dim(`Mode: ${options.sandbox ? 'Docker sandbox' : 'Claude CLI'}`)
|
||||
);
|
||||
|
||||
// Show next task only for default preset (other presets don't use Task Master tasks)
|
||||
if (prompt === 'default') {
|
||||
@@ -105,7 +113,8 @@ export class LoopCommand extends Command {
|
||||
iterations,
|
||||
prompt,
|
||||
progressFile,
|
||||
tag: options.tag
|
||||
tag: options.tag,
|
||||
sandbox: options.sandbox
|
||||
};
|
||||
|
||||
const result = await this.tmCore.loop.run(config);
|
||||
@@ -118,9 +127,13 @@ export class LoopCommand extends Command {
|
||||
|
||||
private handleSandboxAuth(): void {
|
||||
console.log(chalk.dim('Checking sandbox auth...'));
|
||||
const isAuthed = this.tmCore.loop.checkSandboxAuth();
|
||||
const authCheck = this.tmCore.loop.checkSandboxAuth();
|
||||
|
||||
if (isAuthed) {
|
||||
if (authCheck.error) {
|
||||
throw new Error(authCheck.error);
|
||||
}
|
||||
|
||||
if (authCheck.ready) {
|
||||
console.log(chalk.green('✓ Sandbox ready'));
|
||||
return;
|
||||
}
|
||||
@@ -132,7 +145,10 @@ export class LoopCommand extends Command {
|
||||
);
|
||||
console.log(chalk.dim('Please complete auth, then Ctrl+C to continue.\n'));
|
||||
|
||||
this.tmCore.loop.runInteractiveAuth();
|
||||
const authResult = this.tmCore.loop.runInteractiveAuth();
|
||||
if (!authResult.success) {
|
||||
throw new Error(authResult.error || 'Interactive authentication failed');
|
||||
}
|
||||
console.log(chalk.green('✓ Auth complete\n'));
|
||||
}
|
||||
|
||||
|
||||
@@ -77,8 +77,10 @@ export class TagsCommand extends Command {
|
||||
constructor(name?: string) {
|
||||
super(name || 'tags');
|
||||
|
||||
// Configure the command
|
||||
this.description('Manage tags for task organization');
|
||||
// Configure the command with options that apply to default list action
|
||||
this.description('Manage tags for task organization')
|
||||
.option('--show-metadata', 'Show additional tag metadata')
|
||||
.option('--ready', 'Show only tags with ready tasks available');
|
||||
|
||||
// Add subcommands
|
||||
this.addListCommand();
|
||||
@@ -88,9 +90,9 @@ export class TagsCommand extends Command {
|
||||
this.addRenameCommand();
|
||||
this.addCopyCommand();
|
||||
|
||||
// Default action: list tags
|
||||
this.action(async () => {
|
||||
await this.executeList();
|
||||
// Default action: list tags (with options from parent command)
|
||||
this.action(async (options) => {
|
||||
await this.executeList(options);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -101,6 +103,7 @@ export class TagsCommand extends Command {
|
||||
this.command('list')
|
||||
.description('List all tags with statistics (default action)')
|
||||
.option('--show-metadata', 'Show additional tag metadata')
|
||||
.option('--ready', 'Show only tags with ready tasks available')
|
||||
.addHelpText(
|
||||
'after',
|
||||
`
|
||||
@@ -108,6 +111,7 @@ Examples:
|
||||
$ tm tags # List all tags (default)
|
||||
$ tm tags list # List all tags (explicit)
|
||||
$ tm tags list --show-metadata # List with metadata
|
||||
$ tm tags list --ready # Show only tags with parallelizable work
|
||||
`
|
||||
)
|
||||
.action(async (options) => {
|
||||
@@ -245,6 +249,7 @@ Examples:
|
||||
*/
|
||||
private async executeList(options?: {
|
||||
showMetadata?: boolean;
|
||||
ready?: boolean;
|
||||
}): Promise<void> {
|
||||
try {
|
||||
// Initialize tmCore first (needed by bridge functions)
|
||||
@@ -257,7 +262,8 @@ Examples:
|
||||
tasksPath,
|
||||
{
|
||||
showTaskCounts: true,
|
||||
showMetadata: options?.showMetadata || false
|
||||
showMetadata: options?.showMetadata || false,
|
||||
ready: options?.ready || false
|
||||
},
|
||||
{ projectRoot },
|
||||
'text'
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import type { Subtask, Task, TaskPriority } from '@tm/core';
|
||||
import chalk from 'chalk';
|
||||
import Table from 'cli-table3';
|
||||
import { isTaskComplete } from '../../utils/task-status.js';
|
||||
import { getComplexityWithColor } from '../formatters/complexity-formatters.js';
|
||||
import { getPriorityWithColor } from '../formatters/priority-formatters.js';
|
||||
import { getStatusWithColor } from '../formatters/status-formatters.js';
|
||||
@@ -16,59 +17,94 @@ import { getBoxWidth, truncate } from '../layout/helpers.js';
|
||||
*/
|
||||
const DEFAULT_PRIORITY: TaskPriority = 'medium';
|
||||
|
||||
/**
|
||||
* Task-like object that can optionally have blocks field and tag name
|
||||
* Used for table display - accepts both enriched TaskWithBlocks and regular Task/Subtask
|
||||
*/
|
||||
export type TaskTableItem = (Task | Subtask) & {
|
||||
blocks?: string[];
|
||||
tagName?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Column width ratios indexed by number of optional columns
|
||||
* Each array contains ratios for: [Tag?, ID, Title, Status, Priority, Dependencies?, Blocks?, Complexity?]
|
||||
*/
|
||||
const COLUMN_WIDTH_RATIOS: Record<number, number[]> = {
|
||||
0: [0.1, 0.5, 0.2, 0.2],
|
||||
1: [0.08, 0.4, 0.18, 0.14, 0.2],
|
||||
2: [0.08, 0.35, 0.14, 0.11, 0.16, 0.16],
|
||||
3: [0.07, 0.3, 0.12, 0.1, 0.14, 0.14, 0.1],
|
||||
4: [0.12, 0.06, 0.2, 0.1, 0.1, 0.12, 0.12, 0.1]
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a task table for display
|
||||
*/
|
||||
export function createTaskTable(
|
||||
tasks: (Task | Subtask)[],
|
||||
tasks: TaskTableItem[],
|
||||
options?: {
|
||||
showSubtasks?: boolean;
|
||||
showComplexity?: boolean;
|
||||
showDependencies?: boolean;
|
||||
showBlocks?: boolean;
|
||||
showTag?: boolean;
|
||||
}
|
||||
): string {
|
||||
const {
|
||||
showSubtasks = false,
|
||||
showComplexity = false,
|
||||
showDependencies = true
|
||||
showDependencies = true,
|
||||
showBlocks = false,
|
||||
showTag = false
|
||||
} = options || {};
|
||||
|
||||
// Calculate dynamic column widths based on terminal width
|
||||
const tableWidth = getBoxWidth(0.9, 100);
|
||||
// Adjust column widths to better match the original layout
|
||||
const baseColWidths = showComplexity
|
||||
? [
|
||||
Math.floor(tableWidth * 0.1),
|
||||
Math.floor(tableWidth * 0.4),
|
||||
Math.floor(tableWidth * 0.15),
|
||||
Math.floor(tableWidth * 0.1),
|
||||
Math.floor(tableWidth * 0.2),
|
||||
Math.floor(tableWidth * 0.1)
|
||||
] // ID, Title, Status, Priority, Dependencies, Complexity
|
||||
: [
|
||||
Math.floor(tableWidth * 0.08),
|
||||
Math.floor(tableWidth * 0.4),
|
||||
Math.floor(tableWidth * 0.18),
|
||||
Math.floor(tableWidth * 0.12),
|
||||
Math.floor(tableWidth * 0.2)
|
||||
]; // ID, Title, Status, Priority, Dependencies
|
||||
|
||||
const headers = [
|
||||
// Count optional columns and get corresponding width ratios
|
||||
const optionalCols =
|
||||
(showTag ? 1 : 0) +
|
||||
(showDependencies ? 1 : 0) +
|
||||
(showBlocks ? 1 : 0) +
|
||||
(showComplexity ? 1 : 0);
|
||||
|
||||
const ratios = COLUMN_WIDTH_RATIOS[optionalCols] || COLUMN_WIDTH_RATIOS[4];
|
||||
const baseColWidths = ratios.map((ratio) => Math.floor(tableWidth * ratio));
|
||||
|
||||
// Build headers and column widths dynamically
|
||||
const headers: string[] = [];
|
||||
const colWidths: number[] = [];
|
||||
let colIndex = 0;
|
||||
|
||||
if (showTag) {
|
||||
headers.push(chalk.blue.bold('Tag'));
|
||||
colWidths.push(baseColWidths[colIndex++]);
|
||||
}
|
||||
|
||||
// Core columns: ID, Title, Status, Priority
|
||||
headers.push(
|
||||
chalk.blue.bold('ID'),
|
||||
chalk.blue.bold('Title'),
|
||||
chalk.blue.bold('Status'),
|
||||
chalk.blue.bold('Priority')
|
||||
];
|
||||
const colWidths = baseColWidths.slice(0, 4);
|
||||
);
|
||||
colWidths.push(...baseColWidths.slice(colIndex, colIndex + 4));
|
||||
colIndex += 4;
|
||||
|
||||
if (showDependencies) {
|
||||
headers.push(chalk.blue.bold('Dependencies'));
|
||||
colWidths.push(baseColWidths[4]);
|
||||
colWidths.push(baseColWidths[colIndex++]);
|
||||
}
|
||||
|
||||
if (showBlocks) {
|
||||
headers.push(chalk.blue.bold('Blocks'));
|
||||
colWidths.push(baseColWidths[colIndex++]);
|
||||
}
|
||||
|
||||
if (showComplexity) {
|
||||
headers.push(chalk.blue.bold('Complexity'));
|
||||
colWidths.push(baseColWidths[5] || 12);
|
||||
colWidths.push(baseColWidths[colIndex] || 12);
|
||||
}
|
||||
|
||||
const table = new Table({
|
||||
@@ -79,17 +115,27 @@ export function createTaskTable(
|
||||
});
|
||||
|
||||
tasks.forEach((task) => {
|
||||
const row: string[] = [
|
||||
const row: string[] = [];
|
||||
|
||||
// Tag goes first when showing all tags
|
||||
if (showTag) {
|
||||
row.push(chalk.magenta(task.tagName || '-'));
|
||||
}
|
||||
|
||||
// Core columns: ID, Title, Status, Priority
|
||||
// Title column index depends on whether tag is shown
|
||||
const titleColIndex = showTag ? 2 : 1;
|
||||
row.push(
|
||||
chalk.cyan(task.id.toString()),
|
||||
truncate(task.title, colWidths[1] - 3),
|
||||
truncate(task.title, colWidths[titleColIndex] - 3),
|
||||
getStatusWithColor(task.status, true), // Use table version
|
||||
getPriorityWithColor(task.priority)
|
||||
];
|
||||
);
|
||||
|
||||
if (showDependencies) {
|
||||
// For table display, show simple format without status icons
|
||||
if (!task.dependencies || task.dependencies.length === 0) {
|
||||
row.push(chalk.gray('None'));
|
||||
row.push(chalk.gray('-'));
|
||||
} else {
|
||||
row.push(
|
||||
chalk.cyan(task.dependencies.map((d) => String(d)).join(', '))
|
||||
@@ -97,6 +143,21 @@ export function createTaskTable(
|
||||
}
|
||||
}
|
||||
|
||||
if (showBlocks) {
|
||||
// Show tasks that depend on this one
|
||||
if (!task.blocks || task.blocks.length === 0) {
|
||||
row.push(chalk.gray('-'));
|
||||
} else {
|
||||
// Gray out blocks for completed tasks (no longer blocking)
|
||||
const blocksText = task.blocks.join(', ');
|
||||
row.push(
|
||||
isTaskComplete(task.status)
|
||||
? chalk.gray(blocksText)
|
||||
: chalk.yellow(blocksText)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (showComplexity) {
|
||||
// Show complexity score from report if available
|
||||
if (typeof task.complexity === 'number') {
|
||||
@@ -111,23 +172,38 @@ export function createTaskTable(
|
||||
// Add subtasks if requested
|
||||
if (showSubtasks && task.subtasks && task.subtasks.length > 0) {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
const subRow: string[] = [
|
||||
const subRow: string[] = [];
|
||||
|
||||
// Tag goes first when showing all tags
|
||||
if (showTag) {
|
||||
// Subtasks inherit parent's tag, just show dash
|
||||
subRow.push(chalk.gray('-'));
|
||||
}
|
||||
|
||||
// Core subtask columns: ID, Title, Status, Priority
|
||||
const subTitleColIndex = showTag ? 2 : 1;
|
||||
subRow.push(
|
||||
chalk.gray(` └─ ${subtask.id}`),
|
||||
chalk.gray(truncate(subtask.title, colWidths[1] - 6)),
|
||||
chalk.gray(truncate(subtask.title, colWidths[subTitleColIndex] - 6)),
|
||||
chalk.gray(getStatusWithColor(subtask.status, true)),
|
||||
chalk.gray(subtask.priority || DEFAULT_PRIORITY)
|
||||
];
|
||||
);
|
||||
|
||||
if (showDependencies) {
|
||||
subRow.push(
|
||||
chalk.gray(
|
||||
subtask.dependencies && subtask.dependencies.length > 0
|
||||
? subtask.dependencies.map((dep) => String(dep)).join(', ')
|
||||
: 'None'
|
||||
: '-'
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (showBlocks) {
|
||||
// Subtasks don't typically have blocks, show dash
|
||||
subRow.push(chalk.gray('-'));
|
||||
}
|
||||
|
||||
if (showComplexity) {
|
||||
const complexityDisplay =
|
||||
typeof subtask.complexity === 'number'
|
||||
|
||||
@@ -14,9 +14,12 @@ import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { createTask, createTasksFile } from '@tm/core/testing';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { getCliBinPath } from '../../helpers/test-utils.js';
|
||||
|
||||
// Increase hook timeout for this file - init command can be slow in CI
|
||||
vi.setConfig({ hookTimeout: 30000 });
|
||||
|
||||
// Capture initial working directory at module load time
|
||||
const initialCwd = process.cwd();
|
||||
|
||||
@@ -144,11 +147,11 @@ describe('loop command', () => {
|
||||
expect(output).toContain('--tag');
|
||||
});
|
||||
|
||||
it('should show --json option in help', () => {
|
||||
it('should show --sandbox option in help', () => {
|
||||
const { output, exitCode } = runHelp();
|
||||
|
||||
expect(exitCode).toBe(0);
|
||||
expect(output).toContain('--json');
|
||||
expect(output).toContain('--sandbox');
|
||||
});
|
||||
|
||||
it('should show --progress-file option in help', () => {
|
||||
@@ -191,44 +194,6 @@ describe('loop command', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('option parsing', () => {
|
||||
it('should accept valid iterations', () => {
|
||||
// Command will fail when trying to run claude, but validation should pass
|
||||
const { output } = runLoop('-n 5');
|
||||
|
||||
// Should NOT contain validation error for iterations
|
||||
expect(output.toLowerCase()).not.toContain('invalid iterations');
|
||||
});
|
||||
|
||||
it('should accept custom prompt preset', () => {
|
||||
const { output } = runLoop('-p test-coverage');
|
||||
|
||||
// Should NOT contain validation error for prompt
|
||||
expect(output.toLowerCase()).not.toContain('invalid prompt');
|
||||
});
|
||||
|
||||
it('should accept tag filter', () => {
|
||||
const { output } = runLoop('-t feature');
|
||||
|
||||
// Should NOT contain validation error for tag
|
||||
expect(output.toLowerCase()).not.toContain('invalid tag');
|
||||
});
|
||||
|
||||
it('should accept progress-file option', () => {
|
||||
const { output } = runLoop('--progress-file /tmp/test-progress.txt');
|
||||
|
||||
// Should NOT contain validation error for progress-file
|
||||
expect(output.toLowerCase()).not.toContain('invalid progress');
|
||||
});
|
||||
|
||||
it('should accept multiple options together', () => {
|
||||
const { output } = runLoop('-n 3 -p default -t test');
|
||||
|
||||
// Should NOT contain validation errors
|
||||
expect(output.toLowerCase()).not.toContain('invalid iterations');
|
||||
});
|
||||
});
|
||||
|
||||
describe('error messages', () => {
|
||||
it('should show helpful error for invalid iterations', () => {
|
||||
const { output, exitCode } = runLoop('-n invalid');
|
||||
@@ -239,23 +204,4 @@ describe('loop command', () => {
|
||||
expect(output.toLowerCase()).toContain('positive');
|
||||
});
|
||||
});
|
||||
|
||||
describe('project detection', () => {
|
||||
it('should work in initialized project directory', () => {
|
||||
// The project is already initialized in beforeEach
|
||||
// Command will fail when trying to run claude, but project detection should work
|
||||
const { output } = runLoop('-n 1');
|
||||
|
||||
// Should NOT contain "not a task-master project" or similar
|
||||
expect(output.toLowerCase()).not.toContain('not initialized');
|
||||
expect(output.toLowerCase()).not.toContain('no project');
|
||||
});
|
||||
|
||||
it('should accept --project option for explicit path', () => {
|
||||
const { output } = runLoop(`--project "${testDir}" -n 1`);
|
||||
|
||||
// Should NOT contain validation error for project path
|
||||
expect(output.toLowerCase()).not.toContain('invalid project');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for ListTasksCommand
|
||||
*/
|
||||
|
||||
import type { TmCore } from '@tm/core';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@tm/core', () => ({
|
||||
createTmCore: vi.fn(),
|
||||
OUTPUT_FORMATS: ['text', 'json', 'compact'],
|
||||
TASK_STATUSES: [
|
||||
'pending',
|
||||
'in-progress',
|
||||
'done',
|
||||
'review',
|
||||
'deferred',
|
||||
'cancelled'
|
||||
],
|
||||
STATUS_ICONS: {
|
||||
pending: '⏳',
|
||||
'in-progress': '🔄',
|
||||
done: '✅',
|
||||
review: '👀',
|
||||
deferred: '⏸️',
|
||||
cancelled: '❌'
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/utils/project-root.js', () => ({
|
||||
getProjectRoot: vi.fn((path?: string) => path || '/test/project')
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/utils/error-handler.js', () => ({
|
||||
displayError: vi.fn()
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/utils/display-helpers.js', () => ({
|
||||
displayCommandHeader: vi.fn()
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/ui/index.js', () => ({
|
||||
calculateDependencyStatistics: vi.fn(() => ({ total: 0, blocked: 0 })),
|
||||
calculateSubtaskStatistics: vi.fn(() => ({ total: 0, completed: 0 })),
|
||||
calculateTaskStatistics: vi.fn(() => ({ total: 0, completed: 0 })),
|
||||
displayDashboards: vi.fn(),
|
||||
displayRecommendedNextTask: vi.fn(),
|
||||
displaySuggestedNextSteps: vi.fn(),
|
||||
getPriorityBreakdown: vi.fn(() => ({})),
|
||||
getTaskDescription: vi.fn(() => 'Test description')
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/utils/ui.js', () => ({
|
||||
createTaskTable: vi.fn(() => 'Table output'),
|
||||
displayWarning: vi.fn()
|
||||
}));
|
||||
|
||||
import { ListTasksCommand } from '../../../src/commands/list.command.js';
|
||||
|
||||
describe('ListTasksCommand', () => {
|
||||
let consoleLogSpy: any;
|
||||
let mockTmCore: Partial<TmCore>;
|
||||
|
||||
beforeEach(() => {
|
||||
consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
|
||||
|
||||
mockTmCore = {
|
||||
tasks: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
tasks: [{ id: '1', title: 'Test Task', status: 'pending' }],
|
||||
total: 1,
|
||||
filtered: 1,
|
||||
storageType: 'json'
|
||||
}),
|
||||
getStorageType: vi.fn().mockReturnValue('json')
|
||||
} as any,
|
||||
config: {
|
||||
getActiveTag: vi.fn().mockReturnValue('master')
|
||||
} as any
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
consoleLogSpy.mockRestore();
|
||||
});
|
||||
|
||||
describe('JSON output format', () => {
|
||||
it('should use JSON format when --json flag is set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
// Mock the tmCore initialization
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
// Execute with --json flag
|
||||
await (command as any).executeCommand({
|
||||
json: true,
|
||||
format: 'text' // Should be overridden by --json
|
||||
});
|
||||
|
||||
// Verify JSON output was called
|
||||
expect(consoleLogSpy).toHaveBeenCalled();
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
|
||||
// Should be valid JSON
|
||||
expect(() => JSON.parse(output)).not.toThrow();
|
||||
|
||||
const parsed = JSON.parse(output);
|
||||
expect(parsed).toHaveProperty('tasks');
|
||||
expect(parsed).toHaveProperty('metadata');
|
||||
});
|
||||
|
||||
it('should override --format when --json is set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
await (command as any).executeCommand({
|
||||
json: true,
|
||||
format: 'compact' // Should be overridden
|
||||
});
|
||||
|
||||
// Should output JSON, not compact format
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
expect(() => JSON.parse(output)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should use specified format when --json is not set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
await (command as any).executeCommand({
|
||||
format: 'compact'
|
||||
});
|
||||
|
||||
// Should use compact format (not JSON)
|
||||
const output = consoleLogSpy.mock.calls;
|
||||
// In compact mode, output is not JSON
|
||||
expect(output.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should default to text format when neither flag is set', async () => {
|
||||
const command = new ListTasksCommand();
|
||||
(command as any).tmCore = mockTmCore;
|
||||
|
||||
await (command as any).executeCommand({});
|
||||
|
||||
// Should use text format (not JSON)
|
||||
// If any console.log was called, verify it's not JSON
|
||||
if (consoleLogSpy.mock.calls.length > 0) {
|
||||
const output = consoleLogSpy.mock.calls[0][0];
|
||||
// Text format output should not be parseable JSON
|
||||
// or should be the table string we mocked
|
||||
expect(
|
||||
output === 'Table output' ||
|
||||
(() => {
|
||||
try {
|
||||
JSON.parse(output);
|
||||
return false;
|
||||
} catch {
|
||||
return true;
|
||||
}
|
||||
})()
|
||||
).toBe(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('format validation', () => {
|
||||
it('should accept valid formats', () => {
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
expect((command as any).validateOptions({ format: 'text' })).toBe(true);
|
||||
expect((command as any).validateOptions({ format: 'json' })).toBe(true);
|
||||
expect((command as any).validateOptions({ format: 'compact' })).toBe(
|
||||
true
|
||||
);
|
||||
});
|
||||
|
||||
it('should reject invalid formats', () => {
|
||||
const consoleErrorSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
const command = new ListTasksCommand();
|
||||
|
||||
expect((command as any).validateOptions({ format: 'invalid' })).toBe(
|
||||
false
|
||||
);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Invalid format: invalid')
|
||||
);
|
||||
|
||||
consoleErrorSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -5,10 +5,14 @@
|
||||
import type { TmCore } from '@tm/core';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@tm/core', () => ({
|
||||
createTmCore: vi.fn()
|
||||
}));
|
||||
// Mock dependencies - use partial mock to keep TaskIdSchema and other exports
|
||||
vi.mock('@tm/core', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('@tm/core')>();
|
||||
return {
|
||||
...actual,
|
||||
createTmCore: vi.fn()
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('../../../src/utils/project-root.js', () => ({
|
||||
getProjectRoot: vi.fn((path?: string) => path || '/test/project')
|
||||
|
||||
@@ -317,7 +317,7 @@ Without test strategy, the AI won't know what to test during implementation.
|
||||
|
||||
- [PRD Creation and Parsing Guide](/getting-started/quick-start/prd-quick)
|
||||
- [Task Structure Documentation](/capabilities/task-structure)
|
||||
- [Microsoft Research RPG Paper](https://arxiv.org/abs/2410.21376) (Original methodology)
|
||||
- [Microsoft Research RPG Paper](https://arxiv.org/html/2509.16198) (Original methodology)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:unit": "vitest run -t unit",
|
||||
"test:integration": "vitest run -t integration",
|
||||
"test:unit": "vitest run '**/*.spec.ts'",
|
||||
"test:integration": "vitest run '**/*.test.ts'",
|
||||
"test:ci": "vitest run --coverage --reporter=dot"
|
||||
},
|
||||
"dependencies": {
|
||||
|
||||
@@ -158,7 +158,7 @@ describe('generate MCP tool', () => {
|
||||
const response = callMCPTool('generate', { projectRoot: testDir });
|
||||
|
||||
expect(response.data.orphanedFilesRemoved).toBe(1);
|
||||
}, 15000);
|
||||
}, 30000); // Longer timeout: this test makes 2 MCP calls (generate, then regenerate)
|
||||
|
||||
it('should accept output parameter for custom directory', () => {
|
||||
const testData = createTasksFile({
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Available Models as of December 18, 2025
|
||||
# Available Models as of January 15, 2026
|
||||
|
||||
## Main Models
|
||||
|
||||
@@ -15,10 +15,9 @@
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| claude-code | haiku | 0.45 | 0 | 0 |
|
||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1 | 0.76 | 0 | 0 |
|
||||
| codex-cli | gpt-5.2-codex | 0.82 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1-codex-max | 0.78 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1-codex-mini | 0.72 | 0 | 0 |
|
||||
| codex-cli | gpt-5.2 | 0.8 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-3-flash-preview | — | 0 | 0 |
|
||||
@@ -132,10 +131,9 @@
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| claude-code | haiku | 0.45 | 0 | 0 |
|
||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1 | 0.76 | 0 | 0 |
|
||||
| codex-cli | gpt-5.2-codex | 0.82 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1-codex-max | 0.78 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1-codex-mini | 0.72 | 0 | 0 |
|
||||
| codex-cli | gpt-5.2 | 0.8 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-3-flash-preview | — | 0 | 0 |
|
||||
@@ -191,10 +189,9 @@
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| claude-code | haiku | 0.45 | 0 | 0 |
|
||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1 | 0.76 | 0 | 0 |
|
||||
| codex-cli | gpt-5.2-codex | 0.82 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1-codex-max | 0.78 | 0 | 0 |
|
||||
| codex-cli | gpt-5.1-codex-mini | 0.72 | 0 | 0 |
|
||||
| codex-cli | gpt-5.2 | 0.8 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-3-flash-preview | — | 0 | 0 |
|
||||
|
||||
50
package-lock.json
generated
50
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.41.0",
|
||||
"version": "0.40.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "task-master-ai",
|
||||
"version": "0.41.0",
|
||||
"version": "0.40.1",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"workspaces": [
|
||||
"apps/*",
|
||||
@@ -67,6 +67,7 @@
|
||||
"ollama-ai-provider-v2": "^1.3.1",
|
||||
"open": "^10.2.0",
|
||||
"ora": "^8.2.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"simple-git": "^3.28.0",
|
||||
"steno": "^4.0.2",
|
||||
"terminal-link": "^5.0.0",
|
||||
@@ -13713,6 +13714,16 @@
|
||||
"integrity": "sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/proper-lockfile": {
|
||||
"version": "4.1.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/proper-lockfile/-/proper-lockfile-4.1.4.tgz",
|
||||
"integrity": "sha512-uo2ABllncSqg9F1D4nugVl9v93RmjxF6LJzQLMLDdPaXCUIDPeOJ21Gbqi43xNKzBi/WQ0Q0dICqufzQbMjipQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/retry": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/react": {
|
||||
"version": "19.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz",
|
||||
@@ -13783,6 +13794,13 @@
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/retry": {
|
||||
"version": "0.12.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.5.tgz",
|
||||
"integrity": "sha512-3xSjTp3v03X/lSQLkczaN9UIEwJMoMCA1+Nb5HfbJEQWogdeQIyVtTvxPXDQjZ5zws8rFQfVfRdz03ARihPJgw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/stack-utils": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz",
|
||||
@@ -29634,6 +29652,23 @@
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/proper-lockfile": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/proper-lockfile/-/proper-lockfile-4.1.2.tgz",
|
||||
"integrity": "sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"graceful-fs": "^4.2.4",
|
||||
"retry": "^0.12.0",
|
||||
"signal-exit": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/proper-lockfile/node_modules/signal-exit": {
|
||||
"version": "3.0.7",
|
||||
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
|
||||
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/property-information": {
|
||||
"version": "7.1.0",
|
||||
"resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
|
||||
@@ -31111,6 +31146,15 @@
|
||||
"url": "https://opencollective.com/unified"
|
||||
}
|
||||
},
|
||||
"node_modules/retry": {
|
||||
"version": "0.12.0",
|
||||
"resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz",
|
||||
"integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 4"
|
||||
}
|
||||
},
|
||||
"node_modules/retry-request": {
|
||||
"version": "7.0.2",
|
||||
"resolved": "https://registry.npmjs.org/retry-request/-/retry-request-7.0.2.tgz",
|
||||
@@ -36324,6 +36368,7 @@
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"date-fns": "^4.1.0",
|
||||
"fs-extra": "^11.3.2",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"simple-git": "^3.28.0",
|
||||
"steno": "^4.0.2",
|
||||
"zod": "^4.1.11"
|
||||
@@ -36331,6 +36376,7 @@
|
||||
"devDependencies": {
|
||||
"@types/fs-extra": "^11.0.4",
|
||||
"@types/node": "^22.10.5",
|
||||
"@types/proper-lockfile": "^4.1.4",
|
||||
"@vitest/coverage-v8": "^4.0.10",
|
||||
"strip-literal": "3.1.0",
|
||||
"typescript": "^5.9.2",
|
||||
|
||||
@@ -20,6 +20,9 @@
|
||||
"turbo:dev": "turbo dev",
|
||||
"turbo:build": "turbo build",
|
||||
"turbo:typecheck": "turbo typecheck",
|
||||
"turbo:test": "turbo test",
|
||||
"turbo:test:unit": "turbo test:unit",
|
||||
"turbo:test:integration": "turbo test:integration",
|
||||
"build:build-config": "npm run build -w @tm/build-config",
|
||||
"test": "cross-env NODE_ENV=test node --experimental-vm-modules node_modules/.bin/jest",
|
||||
"test:unit": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=unit",
|
||||
@@ -117,7 +120,8 @@
|
||||
"turndown": "^7.2.2",
|
||||
"undici": "^7.16.0",
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^4.1.12"
|
||||
"zod": "^4.1.12",
|
||||
"proper-lockfile": "^4.1.2"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@anthropic-ai/claude-code": "^2.0.59",
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
},
|
||||
"scripts": {
|
||||
"test": "vitest run",
|
||||
"test:unit": "vitest run '**/*.spec.ts'",
|
||||
"test:integration": "vitest run '**/*.test.ts'",
|
||||
"test:watch": "vitest",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"lint": "biome check --write",
|
||||
@@ -34,12 +36,14 @@
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"date-fns": "^4.1.0",
|
||||
"fs-extra": "^11.3.2",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"simple-git": "^3.28.0",
|
||||
"steno": "^4.0.2",
|
||||
"zod": "^4.1.11"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/fs-extra": "^11.0.4",
|
||||
"@types/proper-lockfile": "^4.1.4",
|
||||
"@types/node": "^22.10.5",
|
||||
"@vitest/coverage-v8": "^4.0.10",
|
||||
"strip-literal": "3.1.0",
|
||||
|
||||
@@ -204,6 +204,17 @@ export {
|
||||
type GenerateTaskFilesResult
|
||||
} from './modules/tasks/services/task-file-generator.service.js';
|
||||
|
||||
// Task filtering utilities
|
||||
export {
|
||||
buildBlocksMap,
|
||||
filterReadyTasks,
|
||||
filterBlockingTasks,
|
||||
ACTIONABLE_STATUSES,
|
||||
type TaskWithBlocks,
|
||||
type InvalidDependency,
|
||||
type BuildBlocksMapResult
|
||||
} from './modules/tasks/utils/index.js';
|
||||
|
||||
// Integration - Advanced
|
||||
export {
|
||||
ExportService,
|
||||
|
||||
@@ -34,72 +34,64 @@ describe('ConfigManager', () => {
|
||||
}
|
||||
});
|
||||
|
||||
// Setup default mock behaviors
|
||||
vi.mocked(ConfigLoader).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
getDefaultConfig: vi.fn().mockReturnValue({
|
||||
models: { main: 'default-model', fallback: 'fallback-model' },
|
||||
storage: { type: 'file' },
|
||||
version: '1.0.0'
|
||||
}),
|
||||
loadLocalConfig: vi.fn().mockResolvedValue(null),
|
||||
loadGlobalConfig: vi.fn().mockResolvedValue(null),
|
||||
hasLocalConfig: vi.fn().mockResolvedValue(false),
|
||||
hasGlobalConfig: vi.fn().mockResolvedValue(false)
|
||||
}) as any
|
||||
);
|
||||
// Setup default mock behaviors using class syntax for proper constructor mocking
|
||||
vi.mocked(ConfigLoader).mockImplementation(function (this: any) {
|
||||
this.getDefaultConfig = vi.fn().mockReturnValue({
|
||||
models: { main: 'default-model', fallback: 'fallback-model' },
|
||||
storage: { type: 'file' },
|
||||
version: '1.0.0'
|
||||
});
|
||||
this.loadLocalConfig = vi.fn().mockResolvedValue(null);
|
||||
this.loadGlobalConfig = vi.fn().mockResolvedValue(null);
|
||||
this.hasLocalConfig = vi.fn().mockResolvedValue(false);
|
||||
this.hasGlobalConfig = vi.fn().mockResolvedValue(false);
|
||||
return this;
|
||||
} as any);
|
||||
|
||||
vi.mocked(ConfigMerger).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
addSource: vi.fn(),
|
||||
clearSources: vi.fn(),
|
||||
merge: vi.fn().mockReturnValue({
|
||||
models: { main: 'merged-model', fallback: 'fallback-model' },
|
||||
storage: { type: 'file' }
|
||||
}),
|
||||
getSources: vi.fn().mockReturnValue([]),
|
||||
hasSource: vi.fn().mockReturnValue(false),
|
||||
removeSource: vi.fn().mockReturnValue(false)
|
||||
}) as any
|
||||
);
|
||||
vi.mocked(ConfigMerger).mockImplementation(function (this: any) {
|
||||
this.addSource = vi.fn();
|
||||
this.clearSources = vi.fn();
|
||||
this.merge = vi.fn().mockReturnValue({
|
||||
models: { main: 'merged-model', fallback: 'fallback-model' },
|
||||
storage: { type: 'file' }
|
||||
});
|
||||
this.getSources = vi.fn().mockReturnValue([]);
|
||||
this.hasSource = vi.fn().mockReturnValue(false);
|
||||
this.removeSource = vi.fn().mockReturnValue(false);
|
||||
return this;
|
||||
} as any);
|
||||
|
||||
vi.mocked(RuntimeStateManager).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
loadState: vi.fn().mockResolvedValue({ activeTag: 'master' }),
|
||||
saveState: vi.fn().mockResolvedValue(undefined),
|
||||
getCurrentTag: vi.fn().mockReturnValue('master'),
|
||||
setCurrentTag: vi.fn().mockResolvedValue(undefined),
|
||||
getState: vi.fn().mockReturnValue({ activeTag: 'master' }),
|
||||
updateMetadata: vi.fn().mockResolvedValue(undefined),
|
||||
clearState: vi.fn().mockResolvedValue(undefined)
|
||||
}) as any
|
||||
);
|
||||
vi.mocked(RuntimeStateManager).mockImplementation(function (this: any) {
|
||||
this.loadState = vi.fn().mockResolvedValue({ activeTag: 'master' });
|
||||
this.saveState = vi.fn().mockResolvedValue(undefined);
|
||||
this.getCurrentTag = vi.fn().mockReturnValue('master');
|
||||
this.setCurrentTag = vi.fn().mockResolvedValue(undefined);
|
||||
this.getState = vi.fn().mockReturnValue({ activeTag: 'master' });
|
||||
this.updateMetadata = vi.fn().mockResolvedValue(undefined);
|
||||
this.clearState = vi.fn().mockResolvedValue(undefined);
|
||||
return this;
|
||||
} as any);
|
||||
|
||||
vi.mocked(ConfigPersistence).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
saveConfig: vi.fn().mockResolvedValue(undefined),
|
||||
configExists: vi.fn().mockResolvedValue(false),
|
||||
deleteConfig: vi.fn().mockResolvedValue(undefined),
|
||||
getBackups: vi.fn().mockResolvedValue([]),
|
||||
restoreFromBackup: vi.fn().mockResolvedValue(undefined)
|
||||
}) as any
|
||||
);
|
||||
vi.mocked(ConfigPersistence).mockImplementation(function (this: any) {
|
||||
this.saveConfig = vi.fn().mockResolvedValue(undefined);
|
||||
this.configExists = vi.fn().mockResolvedValue(false);
|
||||
this.deleteConfig = vi.fn().mockResolvedValue(undefined);
|
||||
this.getBackups = vi.fn().mockResolvedValue([]);
|
||||
this.restoreFromBackup = vi.fn().mockResolvedValue(undefined);
|
||||
return this;
|
||||
} as any);
|
||||
|
||||
vi.mocked(EnvironmentConfigProvider).mockImplementation(
|
||||
() =>
|
||||
({
|
||||
loadConfig: vi.fn().mockReturnValue({}),
|
||||
getRuntimeState: vi.fn().mockReturnValue({}),
|
||||
hasEnvVar: vi.fn().mockReturnValue(false),
|
||||
getAllTaskmasterEnvVars: vi.fn().mockReturnValue({}),
|
||||
addMapping: vi.fn(),
|
||||
getMappings: vi.fn().mockReturnValue([])
|
||||
}) as any
|
||||
);
|
||||
vi.mocked(EnvironmentConfigProvider).mockImplementation(function (
|
||||
this: any
|
||||
) {
|
||||
this.loadConfig = vi.fn().mockReturnValue({});
|
||||
this.getRuntimeState = vi.fn().mockReturnValue({});
|
||||
this.hasEnvVar = vi.fn().mockReturnValue(false);
|
||||
this.getAllTaskmasterEnvVars = vi.fn().mockReturnValue({});
|
||||
this.addMapping = vi.fn();
|
||||
this.getMappings = vi.fn().mockReturnValue([]);
|
||||
return this;
|
||||
} as any);
|
||||
|
||||
// Since constructor is private, we need to use the factory method
|
||||
// But for testing, we'll create a test instance using create()
|
||||
@@ -178,28 +170,30 @@ describe('ConfigManager', () => {
|
||||
|
||||
it('should return storage configuration', () => {
|
||||
const storage = manager.getStorageConfig();
|
||||
expect(storage).toEqual({ type: 'file' });
|
||||
expect(storage).toEqual({
|
||||
type: 'file',
|
||||
basePath: testProjectRoot,
|
||||
apiConfigured: false
|
||||
});
|
||||
});
|
||||
|
||||
it('should return API storage configuration when configured', async () => {
|
||||
// Create a new instance with API storage config
|
||||
vi.mocked(ConfigMerger).mockImplementationOnce(
|
||||
() =>
|
||||
({
|
||||
addSource: vi.fn(),
|
||||
clearSources: vi.fn(),
|
||||
merge: vi.fn().mockReturnValue({
|
||||
storage: {
|
||||
type: 'api',
|
||||
apiEndpoint: 'https://api.example.com',
|
||||
apiAccessToken: 'token123'
|
||||
}
|
||||
}),
|
||||
getSources: vi.fn().mockReturnValue([]),
|
||||
hasSource: vi.fn().mockReturnValue(false),
|
||||
removeSource: vi.fn().mockReturnValue(false)
|
||||
}) as any
|
||||
);
|
||||
vi.mocked(ConfigMerger).mockImplementationOnce(function (this: any) {
|
||||
this.addSource = vi.fn();
|
||||
this.clearSources = vi.fn();
|
||||
this.merge = vi.fn().mockReturnValue({
|
||||
storage: {
|
||||
type: 'api',
|
||||
apiEndpoint: 'https://api.example.com',
|
||||
apiAccessToken: 'token123'
|
||||
}
|
||||
});
|
||||
this.getSources = vi.fn().mockReturnValue([]);
|
||||
this.hasSource = vi.fn().mockReturnValue(false);
|
||||
this.removeSource = vi.fn().mockReturnValue(false);
|
||||
return this;
|
||||
} as any);
|
||||
|
||||
const apiManager = await ConfigManager.create(testProjectRoot);
|
||||
|
||||
@@ -207,7 +201,9 @@ describe('ConfigManager', () => {
|
||||
expect(storage).toEqual({
|
||||
type: 'api',
|
||||
apiEndpoint: 'https://api.example.com',
|
||||
apiAccessToken: 'token123'
|
||||
apiAccessToken: 'token123',
|
||||
basePath: testProjectRoot,
|
||||
apiConfigured: true
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -2,17 +2,12 @@
|
||||
* @fileoverview Unit tests for ConfigLoader service
|
||||
*/
|
||||
|
||||
import fs from 'node:fs/promises';
|
||||
import * as fsPromises from 'node:fs/promises';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { DEFAULT_CONFIG_VALUES } from '../../../common/interfaces/configuration.interface.js';
|
||||
import { ConfigLoader } from './config-loader.service.js';
|
||||
|
||||
vi.mock('node:fs', () => ({
|
||||
promises: {
|
||||
readFile: vi.fn(),
|
||||
access: vi.fn()
|
||||
}
|
||||
}));
|
||||
vi.mock('node:fs/promises');
|
||||
|
||||
describe('ConfigLoader', () => {
|
||||
let configLoader: ConfigLoader;
|
||||
@@ -56,11 +51,13 @@ describe('ConfigLoader', () => {
|
||||
storage: { type: 'api' as const }
|
||||
};
|
||||
|
||||
vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(mockConfig));
|
||||
vi.mocked(fsPromises.readFile).mockResolvedValue(
|
||||
JSON.stringify(mockConfig)
|
||||
);
|
||||
|
||||
const result = await configLoader.loadLocalConfig();
|
||||
|
||||
expect(fs.readFile).toHaveBeenCalledWith(
|
||||
expect(fsPromises.readFile).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json',
|
||||
'utf-8'
|
||||
);
|
||||
@@ -70,7 +67,7 @@ describe('ConfigLoader', () => {
|
||||
it('should return null when config file does not exist', async () => {
|
||||
const error = new Error('File not found') as any;
|
||||
error.code = 'ENOENT';
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
vi.mocked(fsPromises.readFile).mockRejectedValue(error);
|
||||
|
||||
const result = await configLoader.loadLocalConfig();
|
||||
|
||||
@@ -79,7 +76,7 @@ describe('ConfigLoader', () => {
|
||||
|
||||
it('should throw TaskMasterError for other file errors', async () => {
|
||||
const error = new Error('Permission denied');
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
vi.mocked(fsPromises.readFile).mockRejectedValue(error);
|
||||
|
||||
await expect(configLoader.loadLocalConfig()).rejects.toThrow(
|
||||
'Failed to load local configuration'
|
||||
@@ -87,7 +84,7 @@ describe('ConfigLoader', () => {
|
||||
});
|
||||
|
||||
it('should throw error for invalid JSON', async () => {
|
||||
vi.mocked(fs.readFile).mockResolvedValue('invalid json');
|
||||
vi.mocked(fsPromises.readFile).mockResolvedValue('invalid json');
|
||||
|
||||
await expect(configLoader.loadLocalConfig()).rejects.toThrow();
|
||||
});
|
||||
@@ -102,18 +99,18 @@ describe('ConfigLoader', () => {
|
||||
|
||||
describe('hasLocalConfig', () => {
|
||||
it('should return true when local config exists', async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fsPromises.access).mockResolvedValue(undefined);
|
||||
|
||||
const result = await configLoader.hasLocalConfig();
|
||||
|
||||
expect(fs.access).toHaveBeenCalledWith(
|
||||
expect(fsPromises.access).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json'
|
||||
);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when local config does not exist', async () => {
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error('Not found'));
|
||||
vi.mocked(fsPromises.access).mockRejectedValue(new Error('Not found'));
|
||||
|
||||
const result = await configLoader.hasLocalConfig();
|
||||
|
||||
@@ -123,18 +120,18 @@ describe('ConfigLoader', () => {
|
||||
|
||||
describe('hasGlobalConfig', () => {
|
||||
it('should check global config path', async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fsPromises.access).mockResolvedValue(undefined);
|
||||
|
||||
const result = await configLoader.hasGlobalConfig();
|
||||
|
||||
expect(fs.access).toHaveBeenCalledWith(
|
||||
expect(fsPromises.access).toHaveBeenCalledWith(
|
||||
expect.stringContaining('.taskmaster/config.json')
|
||||
);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when global config does not exist', async () => {
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error('Not found'));
|
||||
vi.mocked(fsPromises.access).mockRejectedValue(new Error('Not found'));
|
||||
|
||||
const result = await configLoader.hasGlobalConfig();
|
||||
|
||||
|
||||
@@ -2,19 +2,12 @@
|
||||
* @fileoverview Unit tests for RuntimeStateManager service
|
||||
*/
|
||||
|
||||
import fs from 'node:fs/promises';
|
||||
import * as fs from 'node:fs/promises';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { DEFAULT_CONFIG_VALUES } from '../../../common/interfaces/configuration.interface.js';
|
||||
import { RuntimeStateManager } from './runtime-state-manager.service.js';
|
||||
|
||||
vi.mock('node:fs', () => ({
|
||||
promises: {
|
||||
readFile: vi.fn(),
|
||||
writeFile: vi.fn(),
|
||||
mkdir: vi.fn(),
|
||||
unlink: vi.fn()
|
||||
}
|
||||
}));
|
||||
vi.mock('node:fs/promises');
|
||||
|
||||
describe('RuntimeStateManager', () => {
|
||||
let stateManager: RuntimeStateManager;
|
||||
@@ -96,15 +89,10 @@ describe('RuntimeStateManager', () => {
|
||||
it('should handle invalid JSON gracefully', async () => {
|
||||
vi.mocked(fs.readFile).mockResolvedValue('invalid json');
|
||||
|
||||
// Mock console.warn to avoid noise in tests
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
|
||||
const state = await stateManager.loadState();
|
||||
|
||||
// Should return default state when JSON is invalid
|
||||
expect(state.currentTag).toBe(DEFAULT_CONFIG_VALUES.TAGS.DEFAULT_TAG);
|
||||
expect(warnSpy).toHaveBeenCalled();
|
||||
|
||||
warnSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -124,7 +112,7 @@ describe('RuntimeStateManager', () => {
|
||||
// Verify writeFile was called with correct data
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/state.json',
|
||||
expect.stringContaining('"activeTag":"test-tag"'),
|
||||
expect.stringContaining('"currentTag": "test-tag"'),
|
||||
'utf-8'
|
||||
);
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ describe('LoopDomain', () => {
|
||||
tag: 'feature-branch'
|
||||
};
|
||||
const config = (loopDomain as any).buildConfig(fullConfig);
|
||||
expect(config).toEqual(fullConfig);
|
||||
expect(config).toMatchObject(fullConfig);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -31,9 +31,9 @@ export class LoopDomain {
|
||||
|
||||
/**
|
||||
* Check if Docker sandbox auth is ready
|
||||
* @returns true if ready, false if auth needed
|
||||
* @returns Object with ready status and optional error message
|
||||
*/
|
||||
checkSandboxAuth(): boolean {
|
||||
checkSandboxAuth(): { ready: boolean; error?: string } {
|
||||
const service = new LoopService({ projectRoot: this.projectRoot });
|
||||
return service.checkSandboxAuth();
|
||||
}
|
||||
@@ -41,10 +41,11 @@ export class LoopDomain {
|
||||
/**
|
||||
* Run Docker sandbox session for user authentication
|
||||
* Blocks until user completes auth
|
||||
* @returns Object with success status and optional error message
|
||||
*/
|
||||
runInteractiveAuth(): void {
|
||||
runInteractiveAuth(): { success: boolean; error?: string } {
|
||||
const service = new LoopService({ projectRoot: this.projectRoot });
|
||||
service.runInteractiveAuth();
|
||||
return service.runInteractiveAuth();
|
||||
}
|
||||
|
||||
// ========== Loop Operations ==========
|
||||
@@ -188,7 +189,8 @@ export class LoopDomain {
|
||||
partial.progressFile ??
|
||||
path.join(this.projectRoot, '.taskmaster', 'progress.txt'),
|
||||
sleepSeconds: partial.sleepSeconds ?? 5,
|
||||
tag: partial.tag
|
||||
tag: partial.tag,
|
||||
sandbox: partial.sandbox ?? false
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ describe('LoopService', () => {
|
||||
});
|
||||
|
||||
describe('checkSandboxAuth()', () => {
|
||||
it('should return true when output contains ok', () => {
|
||||
it('should return ready=true when output contains ok', () => {
|
||||
mockSpawnSync.mockReturnValue({
|
||||
stdout: 'OK',
|
||||
stderr: '',
|
||||
@@ -135,7 +135,7 @@ describe('LoopService', () => {
|
||||
const service = new LoopService(defaultOptions);
|
||||
const result = service.checkSandboxAuth();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(result.ready).toBe(true);
|
||||
expect(mockSpawnSync).toHaveBeenCalledWith(
|
||||
'docker',
|
||||
['sandbox', 'run', 'claude', '-p', 'Say OK'],
|
||||
@@ -146,7 +146,7 @@ describe('LoopService', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return false when output does not contain ok', () => {
|
||||
it('should return ready=false when output does not contain ok', () => {
|
||||
mockSpawnSync.mockReturnValue({
|
||||
stdout: 'Error: not authenticated',
|
||||
stderr: '',
|
||||
@@ -159,7 +159,7 @@ describe('LoopService', () => {
|
||||
const service = new LoopService(defaultOptions);
|
||||
const result = service.checkSandboxAuth();
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(result.ready).toBe(false);
|
||||
});
|
||||
|
||||
it('should check stderr as well as stdout', () => {
|
||||
@@ -175,7 +175,7 @@ describe('LoopService', () => {
|
||||
const service = new LoopService(defaultOptions);
|
||||
const result = service.checkSandboxAuth();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(result.ready).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -256,7 +256,7 @@ describe('LoopService', () => {
|
||||
expect(mockSpawnSync).toHaveBeenCalledTimes(3);
|
||||
});
|
||||
|
||||
it('should call spawnSync with docker sandbox run claude -p', async () => {
|
||||
it('should call spawnSync with claude -p by default (non-sandbox)', async () => {
|
||||
mockSpawnSync.mockReturnValue({
|
||||
stdout: 'Done',
|
||||
stderr: '',
|
||||
@@ -274,14 +274,8 @@ describe('LoopService', () => {
|
||||
});
|
||||
|
||||
expect(mockSpawnSync).toHaveBeenCalledWith(
|
||||
'docker',
|
||||
expect.arrayContaining([
|
||||
'sandbox',
|
||||
'run',
|
||||
'claude',
|
||||
'-p',
|
||||
expect.any(String)
|
||||
]),
|
||||
'claude',
|
||||
expect.arrayContaining(['-p', expect.any(String)]),
|
||||
expect.objectContaining({
|
||||
cwd: '/test/project'
|
||||
})
|
||||
@@ -397,7 +391,8 @@ describe('LoopService', () => {
|
||||
expect(fsPromises.mkdir).toHaveBeenCalledWith('/test', {
|
||||
recursive: true
|
||||
});
|
||||
expect(fsPromises.writeFile).toHaveBeenCalledWith(
|
||||
// Uses appendFile instead of writeFile to preserve existing progress
|
||||
expect(fsPromises.appendFile).toHaveBeenCalledWith(
|
||||
'/test/progress.txt',
|
||||
expect.stringContaining('# Task Master Loop Progress'),
|
||||
'utf-8'
|
||||
@@ -449,8 +444,8 @@ describe('LoopService', () => {
|
||||
|
||||
// Verify spawn was called with prompt containing iteration info
|
||||
const spawnCall = mockSpawnSync.mock.calls[0];
|
||||
// Args are ['sandbox', 'run', 'claude', '-p', prompt]
|
||||
const promptArg = spawnCall[1][4];
|
||||
// Args are ['-p', prompt, '--dangerously-skip-permissions'] for non-sandbox
|
||||
const promptArg = spawnCall[1][1];
|
||||
expect(promptArg).toContain('iteration 1 of 1');
|
||||
});
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
/**
|
||||
* @fileoverview Loop Service - Orchestrates running Claude Code in Docker sandbox iterations
|
||||
* @fileoverview Loop Service - Orchestrates running Claude Code iterations (sandbox or CLI mode)
|
||||
*/
|
||||
|
||||
import { spawnSync } from 'node:child_process';
|
||||
import { appendFile, mkdir, readFile, writeFile } from 'node:fs/promises';
|
||||
import { appendFile, mkdir, readFile } from 'node:fs/promises';
|
||||
import path from 'node:path';
|
||||
import { PRESETS, isPreset as checkIsPreset } from '../presets/index.js';
|
||||
import type {
|
||||
@@ -34,7 +34,7 @@ export class LoopService {
|
||||
}
|
||||
|
||||
/** Check if Docker sandbox auth is ready */
|
||||
checkSandboxAuth(): boolean {
|
||||
checkSandboxAuth(): { ready: boolean; error?: string } {
|
||||
const result = spawnSync(
|
||||
'docker',
|
||||
['sandbox', 'run', 'claude', '-p', 'Say OK'],
|
||||
@@ -42,16 +42,29 @@ export class LoopService {
|
||||
cwd: this.projectRoot,
|
||||
timeout: 30000,
|
||||
encoding: 'utf-8',
|
||||
stdio: ['inherit', 'pipe', 'pipe'] // stdin from terminal, capture stdout/stderr
|
||||
stdio: ['inherit', 'pipe', 'pipe']
|
||||
}
|
||||
);
|
||||
|
||||
if (result.error) {
|
||||
const code = (result.error as NodeJS.ErrnoException).code;
|
||||
if (code === 'ENOENT') {
|
||||
return {
|
||||
ready: false,
|
||||
error:
|
||||
'Docker is not installed. Install Docker Desktop to use --sandbox mode.'
|
||||
};
|
||||
}
|
||||
return { ready: false, error: `Docker error: ${result.error.message}` };
|
||||
}
|
||||
|
||||
const output = (result.stdout || '') + (result.stderr || '');
|
||||
return output.toLowerCase().includes('ok');
|
||||
return { ready: output.toLowerCase().includes('ok') };
|
||||
}
|
||||
|
||||
/** Run interactive Docker sandbox session for user authentication */
|
||||
runInteractiveAuth(): void {
|
||||
spawnSync(
|
||||
runInteractiveAuth(): { success: boolean; error?: string } {
|
||||
const result = spawnSync(
|
||||
'docker',
|
||||
[
|
||||
'sandbox',
|
||||
@@ -64,6 +77,34 @@ export class LoopService {
|
||||
stdio: 'inherit'
|
||||
}
|
||||
);
|
||||
|
||||
if (result.error) {
|
||||
const code = (result.error as NodeJS.ErrnoException).code;
|
||||
if (code === 'ENOENT') {
|
||||
return {
|
||||
success: false,
|
||||
error:
|
||||
'Docker is not installed. Install Docker Desktop to use --sandbox mode.'
|
||||
};
|
||||
}
|
||||
return { success: false, error: `Docker error: ${result.error.message}` };
|
||||
}
|
||||
|
||||
if (result.status === null) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Docker terminated abnormally (no exit code)'
|
||||
};
|
||||
}
|
||||
|
||||
if (result.status !== 0) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Docker exited with code ${result.status}`
|
||||
};
|
||||
}
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
/** Run a loop with the given configuration */
|
||||
@@ -80,7 +121,11 @@ export class LoopService {
|
||||
console.log(`━━━ Iteration ${i} of ${config.iterations} ━━━`);
|
||||
|
||||
const prompt = await this.buildPrompt(config, i);
|
||||
const iteration = this.executeIteration(prompt, i);
|
||||
const iteration = this.executeIteration(
|
||||
prompt,
|
||||
i,
|
||||
config.sandbox ?? false
|
||||
);
|
||||
iterations.push(iteration);
|
||||
|
||||
// Check for early exit conditions
|
||||
@@ -135,9 +180,11 @@ export class LoopService {
|
||||
private async initProgressFile(config: LoopConfig): Promise<void> {
|
||||
await mkdir(path.dirname(config.progressFile), { recursive: true });
|
||||
const tagLine = config.tag ? `# Tag: ${config.tag}\n` : '';
|
||||
await writeFile(
|
||||
// Append to existing progress file instead of overwriting
|
||||
await appendFile(
|
||||
config.progressFile,
|
||||
`# Task Master Loop Progress
|
||||
`
|
||||
# Task Master Loop Progress
|
||||
# Started: ${new Date().toISOString()}
|
||||
# Preset: ${config.prompt}
|
||||
# Max Iterations: ${config.iterations}
|
||||
@@ -217,30 +264,64 @@ Loop iteration ${iteration} of ${config.iterations}${tagInfo}`;
|
||||
|
||||
private executeIteration(
|
||||
prompt: string,
|
||||
iterationNum: number
|
||||
iterationNum: number,
|
||||
sandbox: boolean
|
||||
): LoopIteration {
|
||||
const startTime = Date.now();
|
||||
|
||||
const result = spawnSync(
|
||||
'docker',
|
||||
['sandbox', 'run', 'claude', '-p', prompt],
|
||||
{
|
||||
cwd: this.projectRoot,
|
||||
encoding: 'utf-8',
|
||||
maxBuffer: 50 * 1024 * 1024, // 50MB buffer
|
||||
stdio: ['inherit', 'pipe', 'pipe']
|
||||
// Use docker sandbox or plain claude based on config
|
||||
const command = sandbox ? 'docker' : 'claude';
|
||||
const args = sandbox
|
||||
? ['sandbox', 'run', 'claude', '-p', prompt]
|
||||
: ['-p', prompt, '--dangerously-skip-permissions'];
|
||||
|
||||
const result = spawnSync(command, args, {
|
||||
cwd: this.projectRoot,
|
||||
encoding: 'utf-8',
|
||||
maxBuffer: 50 * 1024 * 1024, // 50MB buffer
|
||||
stdio: ['inherit', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
// Check for spawn-level errors (command not found, permission denied, etc.)
|
||||
if (result.error) {
|
||||
const code = (result.error as NodeJS.ErrnoException).code;
|
||||
let errorMessage: string;
|
||||
|
||||
if (code === 'ENOENT') {
|
||||
errorMessage = sandbox
|
||||
? 'Docker is not installed. Install Docker Desktop to use --sandbox mode.'
|
||||
: 'Claude CLI is not installed. Install with: npm install -g @anthropic-ai/claude-code';
|
||||
} else if (code === 'EACCES') {
|
||||
errorMessage = `Permission denied executing '${command}'`;
|
||||
} else {
|
||||
errorMessage = `Failed to execute '${command}': ${result.error.message}`;
|
||||
}
|
||||
);
|
||||
|
||||
console.error(`[Loop Error] ${errorMessage}`);
|
||||
return {
|
||||
iteration: iterationNum,
|
||||
status: 'error',
|
||||
duration: Date.now() - startTime,
|
||||
message: errorMessage
|
||||
};
|
||||
}
|
||||
|
||||
const output = (result.stdout || '') + (result.stderr || '');
|
||||
|
||||
// Print output to console (spawnSync with pipe captures but doesn't display)
|
||||
if (output) console.log(output);
|
||||
|
||||
const { status, message } = this.parseCompletion(
|
||||
output,
|
||||
result.status ?? 1
|
||||
);
|
||||
// Handle null status (spawn failed but no error object - shouldn't happen but be safe)
|
||||
if (result.status === null) {
|
||||
return {
|
||||
iteration: iterationNum,
|
||||
status: 'error',
|
||||
duration: Date.now() - startTime,
|
||||
message: 'Command terminated abnormally (no exit code)'
|
||||
};
|
||||
}
|
||||
|
||||
const { status, message } = this.parseCompletion(output, result.status);
|
||||
return {
|
||||
iteration: iterationNum,
|
||||
status,
|
||||
|
||||
@@ -26,6 +26,8 @@ export interface LoopConfig {
|
||||
sleepSeconds: number;
|
||||
/** Tag context to operate on (optional) */
|
||||
tag?: string;
|
||||
/** Run Claude in Docker sandbox mode (default: false) */
|
||||
sandbox?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,264 @@
|
||||
/**
|
||||
* Tests for FileOperations class
|
||||
* Focuses on modifyJson and cross-process locking functionality
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import fs from 'node:fs/promises';
|
||||
import fsSync from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import os from 'node:os';
|
||||
import { FileOperations } from './file-operations.js';
|
||||
|
||||
describe('FileOperations', () => {
|
||||
let tempDir: string;
|
||||
let testFilePath: string;
|
||||
let fileOps: FileOperations;
|
||||
|
||||
beforeEach(async () => {
|
||||
// Create a temp directory for each test
|
||||
tempDir = fsSync.mkdtempSync(path.join(os.tmpdir(), 'tm-core-test-'));
|
||||
testFilePath = path.join(tempDir, 'test.json');
|
||||
fileOps = new FileOperations();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Clean up
|
||||
await fileOps.cleanup();
|
||||
if (tempDir && fsSync.existsSync(tempDir)) {
|
||||
fsSync.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('modifyJson', () => {
|
||||
it('should modify existing JSON data', async () => {
|
||||
// Set up initial data
|
||||
await fs.writeFile(testFilePath, JSON.stringify({ count: 0 }));
|
||||
|
||||
// Modify data
|
||||
await fileOps.modifyJson(testFilePath, (data: { count: number }) => ({
|
||||
...data,
|
||||
count: data.count + 1
|
||||
}));
|
||||
|
||||
// Verify
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.count).toBe(1);
|
||||
});
|
||||
|
||||
it('should create file if it does not exist', async () => {
|
||||
const newFilePath = path.join(tempDir, 'new-file.json');
|
||||
|
||||
await fileOps.modifyJson(newFilePath, () => ({ created: true }));
|
||||
|
||||
expect(fsSync.existsSync(newFilePath)).toBe(true);
|
||||
const result = JSON.parse(await fs.readFile(newFilePath, 'utf-8'));
|
||||
expect(result.created).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle async modifier functions', async () => {
|
||||
await fs.writeFile(testFilePath, JSON.stringify({ value: 'initial' }));
|
||||
|
||||
await fileOps.modifyJson(
|
||||
testFilePath,
|
||||
async (data: { value: string }) => {
|
||||
// Simulate async operation
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
return { ...data, value: 'modified' };
|
||||
}
|
||||
);
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.value).toBe('modified');
|
||||
});
|
||||
|
||||
it('should re-read file inside lock to prevent stale data', async () => {
|
||||
// Initial data
|
||||
await fs.writeFile(testFilePath, JSON.stringify({ version: 1 }));
|
||||
|
||||
// Simulate two sequential modifications
|
||||
await fileOps.modifyJson(testFilePath, (data: { version: number }) => ({
|
||||
version: data.version + 1
|
||||
}));
|
||||
|
||||
await fileOps.modifyJson(testFilePath, (data: { version: number }) => ({
|
||||
version: data.version + 1
|
||||
}));
|
||||
|
||||
// Both modifications should have been applied
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.version).toBe(3);
|
||||
});
|
||||
|
||||
it('should not leave lock files on success', async () => {
|
||||
await fs.writeFile(testFilePath, JSON.stringify({}));
|
||||
|
||||
await fileOps.modifyJson(testFilePath, (data) => ({
|
||||
...data,
|
||||
modified: true
|
||||
}));
|
||||
|
||||
// Check no lock files exist
|
||||
const files = await fs.readdir(tempDir);
|
||||
const lockFiles = files.filter((f) => f.endsWith('.lock'));
|
||||
expect(lockFiles).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should release lock even if modifier throws', async () => {
|
||||
await fs.writeFile(testFilePath, JSON.stringify({}));
|
||||
|
||||
await expect(
|
||||
fileOps.modifyJson(testFilePath, () => {
|
||||
throw new Error('Modifier error');
|
||||
})
|
||||
).rejects.toThrow('Modifier error');
|
||||
|
||||
// Should still be able to acquire lock for another operation
|
||||
await fileOps.modifyJson(testFilePath, () => ({ recovered: true }));
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.recovered).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle empty file gracefully', async () => {
|
||||
// Create empty file
|
||||
await fs.writeFile(testFilePath, '');
|
||||
|
||||
await fileOps.modifyJson(testFilePath, () => ({ initialized: true }));
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.initialized).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle file with only whitespace', async () => {
|
||||
await fs.writeFile(testFilePath, ' \n ');
|
||||
|
||||
await fileOps.modifyJson(testFilePath, () => ({ initialized: true }));
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.initialized).toBe(true);
|
||||
});
|
||||
|
||||
it('should throw on corrupted JSON', async () => {
|
||||
// Write invalid JSON that is not empty
|
||||
await fs.writeFile(testFilePath, '{ invalid json content');
|
||||
|
||||
await expect(
|
||||
fileOps.modifyJson(testFilePath, (data) => data)
|
||||
).rejects.toThrow(/Corrupted JSON/);
|
||||
});
|
||||
|
||||
it('should preserve complex nested structures', async () => {
|
||||
const complexData = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
subtasks: [{ id: '1.1', title: 'Subtask' }]
|
||||
}
|
||||
],
|
||||
metadata: {
|
||||
created: '2024-01-01',
|
||||
tags: ['tag1', 'tag2']
|
||||
}
|
||||
};
|
||||
await fs.writeFile(testFilePath, JSON.stringify(complexData, null, 2));
|
||||
|
||||
await fileOps.modifyJson(testFilePath, (data: typeof complexData) => ({
|
||||
...data,
|
||||
tasks: [...data.tasks, { id: 2, title: 'Task 2', subtasks: [] }]
|
||||
}));
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.tasks).toHaveLength(2);
|
||||
expect(result.tasks[0].subtasks).toHaveLength(1);
|
||||
expect(result.metadata.tags).toEqual(['tag1', 'tag2']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('concurrent operations', () => {
|
||||
it('should serialize truly concurrent modifyJson calls', async () => {
|
||||
// Initial data
|
||||
await fs.writeFile(testFilePath, JSON.stringify({ count: 0 }));
|
||||
|
||||
const numConcurrentWrites = 5;
|
||||
const writes = [];
|
||||
|
||||
for (let i = 0; i < numConcurrentWrites; i++) {
|
||||
writes.push(
|
||||
fileOps.modifyJson(testFilePath, (data: { count: number }) => ({
|
||||
count: data.count + 1
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
await Promise.all(writes);
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.count).toBe(numConcurrentWrites);
|
||||
});
|
||||
|
||||
it('should handle concurrent writes from multiple FileOperations instances', async () => {
|
||||
// Initial data
|
||||
await fs.writeFile(testFilePath, JSON.stringify({ count: 0 }));
|
||||
|
||||
const numInstances = 3;
|
||||
const instances = Array.from(
|
||||
{ length: numInstances },
|
||||
() => new FileOperations()
|
||||
);
|
||||
const writes = instances.map((ops) =>
|
||||
ops.modifyJson(testFilePath, (data: { count: number }) => ({
|
||||
count: data.count + 1
|
||||
}))
|
||||
);
|
||||
|
||||
await Promise.all(writes);
|
||||
|
||||
// Cleanup all instances
|
||||
await Promise.all(instances.map((ops) => ops.cleanup()));
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.count).toBe(numInstances);
|
||||
});
|
||||
});
|
||||
|
||||
describe('writeJson', () => {
|
||||
it('should write JSON atomically', async () => {
|
||||
const data = { test: 'value' };
|
||||
|
||||
await fileOps.writeJson(testFilePath, data);
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.test).toBe('value');
|
||||
});
|
||||
|
||||
it('should not leave temp files on success', async () => {
|
||||
await fileOps.writeJson(testFilePath, { test: true });
|
||||
|
||||
const files = await fs.readdir(tempDir);
|
||||
const tempFiles = files.filter((f) => f.includes('.tmp'));
|
||||
expect(tempFiles).toHaveLength(0);
|
||||
|
||||
// Also verify no lock files remain
|
||||
const lockFiles = files.filter((f) => f.endsWith('.lock'));
|
||||
expect(lockFiles).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('cleanup', () => {
|
||||
it('should clear cached writers', async () => {
|
||||
// Write to create a cached writer
|
||||
await fileOps.writeJson(testFilePath, { test: 1 });
|
||||
|
||||
// Cleanup
|
||||
await fileOps.cleanup();
|
||||
|
||||
// Should still work after cleanup (creates new writer)
|
||||
await fileOps.writeJson(testFilePath, { test: 2 });
|
||||
|
||||
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||
expect(result.test).toBe(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,16 +1,52 @@
|
||||
/**
|
||||
* @fileoverview File operations with atomic writes and locking
|
||||
* @fileoverview File operations with atomic writes and cross-process locking
|
||||
*
|
||||
* Uses steno for atomic writes (same pattern as workflow-state-manager.ts)
|
||||
* and proper-lockfile for cross-process locking to prevent lost updates.
|
||||
*/
|
||||
|
||||
import { constants } from 'node:fs';
|
||||
import fs from 'node:fs/promises';
|
||||
import path from 'node:path';
|
||||
import lockfile from 'proper-lockfile';
|
||||
import { Writer } from 'steno';
|
||||
import type { FileStorageData } from './format-handler.js';
|
||||
|
||||
/**
|
||||
* Handles atomic file operations with locking mechanism
|
||||
* File locking configuration for cross-process safety
|
||||
*/
|
||||
const LOCK_OPTIONS = {
|
||||
stale: 10000, // Consider lock stale after 10 seconds
|
||||
retries: {
|
||||
retries: 5,
|
||||
factor: 2,
|
||||
minTimeout: 100,
|
||||
maxTimeout: 1000
|
||||
},
|
||||
realpath: false // Don't resolve symlinks (faster)
|
||||
};
|
||||
|
||||
/**
|
||||
* Handles atomic file operations with cross-process locking mechanism.
|
||||
*
|
||||
* Writers are cached for reuse. Call {@link cleanup} when disposing of
|
||||
* long-lived instances to prevent memory leaks.
|
||||
*/
|
||||
export class FileOperations {
|
||||
private fileLocks: Map<string, Promise<void>> = new Map();
|
||||
/** Map of file paths to steno Writers for reuse */
|
||||
private writers = new Map<string, Writer>();
|
||||
|
||||
/**
|
||||
* Get or create a steno Writer for a file path
|
||||
*/
|
||||
private getWriter(filePath: string): Writer {
|
||||
let writer = this.writers.get(filePath);
|
||||
if (!writer) {
|
||||
writer = new Writer(filePath);
|
||||
this.writers.set(filePath, writer);
|
||||
}
|
||||
return writer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read and parse JSON file
|
||||
@@ -31,52 +67,132 @@ export class FileOperations {
|
||||
}
|
||||
|
||||
/**
|
||||
* Write JSON file with atomic operation and locking
|
||||
* Write JSON file with atomic operation and cross-process locking.
|
||||
* Uses steno for atomic writes and proper-lockfile for cross-process safety.
|
||||
* WARNING: This replaces the entire file. For concurrent modifications,
|
||||
* use modifyJson() instead to prevent lost updates.
|
||||
*/
|
||||
async writeJson(
|
||||
filePath: string,
|
||||
data: FileStorageData | any
|
||||
): Promise<void> {
|
||||
// Use file locking to prevent concurrent writes
|
||||
const lockKey = filePath;
|
||||
const existingLock = this.fileLocks.get(lockKey);
|
||||
|
||||
if (existingLock) {
|
||||
await existingLock;
|
||||
}
|
||||
|
||||
const lockPromise = this.performAtomicWrite(filePath, data);
|
||||
this.fileLocks.set(lockKey, lockPromise);
|
||||
// Ensure file exists for locking (proper-lockfile requires this)
|
||||
await this.ensureFileExists(filePath);
|
||||
|
||||
// Acquire cross-process lock
|
||||
let release: (() => Promise<void>) | null = null;
|
||||
try {
|
||||
await lockPromise;
|
||||
release = await lockfile.lock(filePath, LOCK_OPTIONS);
|
||||
|
||||
// Use steno Writer for atomic writes (same pattern as workflow-state-manager)
|
||||
const content = JSON.stringify(data, null, 2);
|
||||
const writer = this.getWriter(filePath);
|
||||
await writer.write(content);
|
||||
} finally {
|
||||
this.fileLocks.delete(lockKey);
|
||||
if (release) {
|
||||
try {
|
||||
await release();
|
||||
} catch (err: any) {
|
||||
// Log but don't throw - lock may have been released already
|
||||
// Other errors should be visible for debugging
|
||||
if (process.env.DEBUG || process.env.TASKMASTER_DEBUG === 'true') {
|
||||
console.warn(
|
||||
`[WARN] Lock release warning for ${filePath}: ${err.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform atomic write operation using temporary file
|
||||
* Read-modify-write JSON file with cross-process locking.
|
||||
* Uses steno for atomic writes and proper-lockfile for cross-process safety.
|
||||
* Re-reads file inside lock to prevent lost updates from stale snapshots.
|
||||
* @param filePath - Path to the JSON file
|
||||
* @param modifier - Function that receives current data and returns modified data
|
||||
*/
|
||||
private async performAtomicWrite(filePath: string, data: any): Promise<void> {
|
||||
const tempPath = `${filePath}.tmp`;
|
||||
async modifyJson<T = any>(
|
||||
filePath: string,
|
||||
modifier: (currentData: T) => T | Promise<T>
|
||||
): Promise<void> {
|
||||
// Ensure file exists for locking (proper-lockfile requires this)
|
||||
await this.ensureFileExists(filePath);
|
||||
|
||||
// Acquire cross-process lock
|
||||
let release: (() => Promise<void>) | null = null;
|
||||
try {
|
||||
// Write to temp file first
|
||||
const content = JSON.stringify(data, null, 2);
|
||||
await fs.writeFile(tempPath, content, 'utf-8');
|
||||
release = await lockfile.lock(filePath, LOCK_OPTIONS);
|
||||
|
||||
// Atomic rename
|
||||
await fs.rename(tempPath, filePath);
|
||||
} catch (error: any) {
|
||||
// Clean up temp file if it exists
|
||||
// Re-read file INSIDE lock to get current state
|
||||
// This prevents lost updates from stale snapshots
|
||||
let currentData: T;
|
||||
try {
|
||||
await fs.unlink(tempPath);
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
const content = await fs.readFile(filePath, 'utf-8');
|
||||
currentData = JSON.parse(content);
|
||||
} catch (err: any) {
|
||||
// Distinguish between expected empty/new files and actual corruption
|
||||
if (err.code === 'ENOENT') {
|
||||
// File doesn't exist yet - start fresh
|
||||
currentData = {} as T;
|
||||
} else if (err instanceof SyntaxError) {
|
||||
// Check if it's just an empty file (our ensureFileExists writes '{}')
|
||||
const content = await fs.readFile(filePath, 'utf-8').catch(() => '');
|
||||
if (content.trim() === '' || content.trim() === '{}') {
|
||||
currentData = {} as T;
|
||||
} else {
|
||||
// Actual JSON corruption - this is a serious error
|
||||
throw new Error(
|
||||
`Corrupted JSON in ${filePath}: ${err.message}. File contains: ${content.substring(0, 100)}...`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Other errors (permission, I/O) should be surfaced
|
||||
throw new Error(
|
||||
`Failed to read ${filePath} for modification: ${err.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Failed to write file ${filePath}: ${error.message}`);
|
||||
// Apply modification
|
||||
const newData = await modifier(currentData);
|
||||
|
||||
// Write atomically using steno (same pattern as workflow-state-manager)
|
||||
const content = JSON.stringify(newData, null, 2);
|
||||
const writer = this.getWriter(filePath);
|
||||
await writer.write(content);
|
||||
} finally {
|
||||
if (release) {
|
||||
try {
|
||||
await release();
|
||||
} catch (err: any) {
|
||||
// Log but don't throw - lock may have been released already
|
||||
// Other errors should be visible for debugging
|
||||
if (process.env.DEBUG || process.env.TASKMASTER_DEBUG === 'true') {
|
||||
console.warn(
|
||||
`[WARN] Lock release warning for ${filePath}: ${err.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure file exists for locking (proper-lockfile requires the file to exist).
|
||||
* Uses atomic creation with 'wx' flag to prevent TOCTOU race conditions.
|
||||
*/
|
||||
private async ensureFileExists(filePath: string): Promise<void> {
|
||||
const dir = path.dirname(filePath);
|
||||
await fs.mkdir(dir, { recursive: true });
|
||||
try {
|
||||
// Use 'wx' flag for atomic create - fails if file exists (prevents race)
|
||||
await fs.writeFile(filePath, '{}', { flag: 'wx' });
|
||||
} catch (err: any) {
|
||||
// EEXIST is expected if another process created the file - that's fine
|
||||
if (err.code !== 'EEXIST') {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,13 +275,14 @@ export class FileOperations {
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up all pending file operations
|
||||
* Clean up resources - releases cached steno Writers
|
||||
* Call this when the FileOperations instance is no longer needed
|
||||
* to prevent memory leaks in long-running processes.
|
||||
*/
|
||||
async cleanup(): Promise<void> {
|
||||
const locks = Array.from(this.fileLocks.values());
|
||||
if (locks.length > 0) {
|
||||
await Promise.all(locks);
|
||||
}
|
||||
this.fileLocks.clear();
|
||||
// Clear cached Writers to allow garbage collection
|
||||
// Note: steno Writers don't have explicit close methods;
|
||||
// they handle file descriptor cleanup internally
|
||||
this.writers.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,6 +220,7 @@ export class FileStorage implements IStorage {
|
||||
|
||||
/**
|
||||
* Save tasks for a specific tag in the single tasks.json file
|
||||
* Uses modifyJson for atomic read-modify-write to prevent lost updates
|
||||
*/
|
||||
async saveTasks(tasks: Task[], tag?: string): Promise<void> {
|
||||
const filePath = this.pathResolver.getTasksPath();
|
||||
@@ -228,17 +229,6 @@ export class FileStorage implements IStorage {
|
||||
// Ensure directory exists
|
||||
await this.fileOps.ensureDir(this.pathResolver.getTasksDir());
|
||||
|
||||
// Get existing data from the file
|
||||
let existingData: any = {};
|
||||
try {
|
||||
existingData = await this.fileOps.readJson(filePath);
|
||||
} catch (error: any) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw new Error(`Failed to read existing tasks: ${error.message}`);
|
||||
}
|
||||
// File doesn't exist, start with empty data
|
||||
}
|
||||
|
||||
// Create metadata for this tag
|
||||
const metadata: TaskMetadata = {
|
||||
version: '1.0.0',
|
||||
@@ -251,43 +241,44 @@ export class FileStorage implements IStorage {
|
||||
// Normalize tasks
|
||||
const normalizedTasks = this.normalizeTaskIds(tasks);
|
||||
|
||||
// Update the specific tag in the existing data structure
|
||||
if (
|
||||
this.formatHandler.detectFormat(existingData) === 'legacy' ||
|
||||
Object.keys(existingData).some(
|
||||
(key) => key !== 'tasks' && key !== 'metadata'
|
||||
)
|
||||
) {
|
||||
// Legacy format - update/add the tag
|
||||
existingData[resolvedTag] = {
|
||||
tasks: normalizedTasks,
|
||||
metadata
|
||||
};
|
||||
} else if (resolvedTag === 'master') {
|
||||
// Standard format for master tag
|
||||
existingData = {
|
||||
tasks: normalizedTasks,
|
||||
metadata
|
||||
};
|
||||
} else {
|
||||
// Convert to legacy format when adding non-master tags
|
||||
const masterTasks = existingData.tasks || [];
|
||||
const masterMetadata = existingData.metadata || metadata;
|
||||
|
||||
existingData = {
|
||||
master: {
|
||||
tasks: masterTasks,
|
||||
metadata: masterMetadata
|
||||
},
|
||||
[resolvedTag]: {
|
||||
// Use modifyJson for atomic read-modify-write
|
||||
await this.fileOps.modifyJson(filePath, (existingData: any) => {
|
||||
// Update the specific tag in the existing data structure
|
||||
if (
|
||||
this.formatHandler.detectFormat(existingData) === 'legacy' ||
|
||||
Object.keys(existingData).some(
|
||||
(key) => key !== 'tasks' && key !== 'metadata'
|
||||
)
|
||||
) {
|
||||
// Legacy format - update/add the tag
|
||||
existingData[resolvedTag] = {
|
||||
tasks: normalizedTasks,
|
||||
metadata
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
return existingData;
|
||||
} else if (resolvedTag === 'master') {
|
||||
// Standard format for master tag
|
||||
return {
|
||||
tasks: normalizedTasks,
|
||||
metadata
|
||||
};
|
||||
} else {
|
||||
// Convert to legacy format when adding non-master tags
|
||||
const masterTasks = existingData.tasks || [];
|
||||
const masterMetadata = existingData.metadata || metadata;
|
||||
|
||||
// Write the updated file
|
||||
await this.fileOps.writeJson(filePath, existingData);
|
||||
return {
|
||||
master: {
|
||||
tasks: masterTasks,
|
||||
metadata: masterMetadata
|
||||
},
|
||||
[resolvedTag]: {
|
||||
tasks: normalizedTasks,
|
||||
metadata
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -594,6 +585,7 @@ export class FileStorage implements IStorage {
|
||||
|
||||
/**
|
||||
* Create a new tag in the tasks.json file
|
||||
* Uses modifyJson for atomic read-modify-write to prevent lost updates
|
||||
*/
|
||||
async createTag(
|
||||
tagName: string,
|
||||
@@ -602,62 +594,33 @@ export class FileStorage implements IStorage {
|
||||
const filePath = this.pathResolver.getTasksPath();
|
||||
|
||||
try {
|
||||
const existingData = await this.fileOps.readJson(filePath);
|
||||
const format = this.formatHandler.detectFormat(existingData);
|
||||
await this.fileOps.modifyJson(filePath, (existingData: any) => {
|
||||
const format = this.formatHandler.detectFormat(existingData);
|
||||
|
||||
if (format === 'legacy') {
|
||||
// Legacy format - add new tag key
|
||||
if (tagName in existingData) {
|
||||
throw new TaskMasterError(
|
||||
`Tag ${tagName} already exists`,
|
||||
ERROR_CODES.VALIDATION_ERROR
|
||||
);
|
||||
}
|
||||
|
||||
// Get tasks to copy if specified
|
||||
let tasksToCopy = [];
|
||||
if (options?.copyFrom) {
|
||||
if (
|
||||
options.copyFrom in existingData &&
|
||||
existingData[options.copyFrom].tasks
|
||||
) {
|
||||
tasksToCopy = JSON.parse(
|
||||
JSON.stringify(existingData[options.copyFrom].tasks)
|
||||
if (format === 'legacy') {
|
||||
// Legacy format - add new tag key
|
||||
if (tagName in existingData) {
|
||||
throw new TaskMasterError(
|
||||
`Tag ${tagName} already exists`,
|
||||
ERROR_CODES.VALIDATION_ERROR
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Create new tag structure
|
||||
existingData[tagName] = {
|
||||
tasks: tasksToCopy,
|
||||
metadata: {
|
||||
created: new Date().toISOString(),
|
||||
updatedAt: new Date().toISOString(),
|
||||
description:
|
||||
options?.description ||
|
||||
`Tag created on ${new Date().toLocaleDateString()}`,
|
||||
tags: [tagName]
|
||||
// Get tasks to copy if specified
|
||||
let tasksToCopy: any[] = [];
|
||||
if (options?.copyFrom) {
|
||||
if (
|
||||
options.copyFrom in existingData &&
|
||||
existingData[options.copyFrom].tasks
|
||||
) {
|
||||
tasksToCopy = JSON.parse(
|
||||
JSON.stringify(existingData[options.copyFrom].tasks)
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
await this.fileOps.writeJson(filePath, existingData);
|
||||
} else {
|
||||
// Standard format - need to convert to legacy format first
|
||||
const masterTasks = existingData.tasks || [];
|
||||
const masterMetadata = existingData.metadata || {};
|
||||
|
||||
// Get tasks to copy (from master in this case)
|
||||
let tasksToCopy = [];
|
||||
if (options?.copyFrom === 'master' || !options?.copyFrom) {
|
||||
tasksToCopy = JSON.parse(JSON.stringify(masterTasks));
|
||||
}
|
||||
|
||||
const newData = {
|
||||
master: {
|
||||
tasks: masterTasks,
|
||||
metadata: { ...masterMetadata, tags: ['master'] }
|
||||
},
|
||||
[tagName]: {
|
||||
// Create new tag structure
|
||||
existingData[tagName] = {
|
||||
tasks: tasksToCopy,
|
||||
metadata: {
|
||||
created: new Date().toISOString(),
|
||||
@@ -667,11 +630,39 @@ export class FileStorage implements IStorage {
|
||||
`Tag created on ${new Date().toLocaleDateString()}`,
|
||||
tags: [tagName]
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
await this.fileOps.writeJson(filePath, newData);
|
||||
}
|
||||
return existingData;
|
||||
} else {
|
||||
// Standard format - need to convert to legacy format first
|
||||
const masterTasks = existingData.tasks || [];
|
||||
const masterMetadata = existingData.metadata || {};
|
||||
|
||||
// Get tasks to copy (from master in this case)
|
||||
let tasksToCopy: any[] = [];
|
||||
if (options?.copyFrom === 'master' || !options?.copyFrom) {
|
||||
tasksToCopy = JSON.parse(JSON.stringify(masterTasks));
|
||||
}
|
||||
|
||||
return {
|
||||
master: {
|
||||
tasks: masterTasks,
|
||||
metadata: { ...masterMetadata, tags: ['master'] }
|
||||
},
|
||||
[tagName]: {
|
||||
tasks: tasksToCopy,
|
||||
metadata: {
|
||||
created: new Date().toISOString(),
|
||||
updatedAt: new Date().toISOString(),
|
||||
description:
|
||||
options?.description ||
|
||||
`Tag created on ${new Date().toLocaleDateString()}`,
|
||||
tags: [tagName]
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
throw new Error('Tasks file not found - initialize project first');
|
||||
@@ -682,26 +673,41 @@ export class FileStorage implements IStorage {
|
||||
|
||||
/**
|
||||
* Delete a tag from the single tasks.json file
|
||||
* Uses modifyJson for atomic read-modify-write to prevent lost updates
|
||||
*/
|
||||
async deleteTag(tag: string): Promise<void> {
|
||||
const filePath = this.pathResolver.getTasksPath();
|
||||
|
||||
try {
|
||||
const existingData = await this.fileOps.readJson(filePath);
|
||||
// Use modifyJson to handle all cases atomically
|
||||
let shouldDeleteFile = false;
|
||||
|
||||
if (this.formatHandler.detectFormat(existingData) === 'legacy') {
|
||||
// Legacy format - remove the tag key
|
||||
if (tag in existingData) {
|
||||
delete existingData[tag];
|
||||
await this.fileOps.writeJson(filePath, existingData);
|
||||
} else {
|
||||
throw new Error(`Tag ${tag} not found`);
|
||||
await this.fileOps.modifyJson(filePath, (data: any) => {
|
||||
if (
|
||||
this.formatHandler.detectFormat(data) !== 'legacy' &&
|
||||
tag === 'master'
|
||||
) {
|
||||
// Standard format - mark for file deletion after lock release
|
||||
shouldDeleteFile = true;
|
||||
return data; // Return unchanged, we'll delete the file after
|
||||
}
|
||||
} else if (tag === 'master') {
|
||||
// Standard format - delete the entire file for master tag
|
||||
|
||||
if (this.formatHandler.detectFormat(data) === 'legacy') {
|
||||
// Legacy format - remove the tag key
|
||||
if (tag in data) {
|
||||
delete data[tag];
|
||||
return data;
|
||||
} else {
|
||||
throw new Error(`Tag ${tag} not found`);
|
||||
}
|
||||
} else {
|
||||
throw new Error(`Tag ${tag} not found in standard format`);
|
||||
}
|
||||
});
|
||||
|
||||
// Delete the file if we're removing master tag from standard format
|
||||
if (shouldDeleteFile) {
|
||||
await this.fileOps.deleteFile(filePath);
|
||||
} else {
|
||||
throw new Error(`Tag ${tag} not found in standard format`);
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
@@ -713,44 +719,43 @@ export class FileStorage implements IStorage {
|
||||
|
||||
/**
|
||||
* Rename a tag within the single tasks.json file
|
||||
* Uses modifyJson for atomic read-modify-write to prevent lost updates
|
||||
*/
|
||||
async renameTag(oldTag: string, newTag: string): Promise<void> {
|
||||
const filePath = this.pathResolver.getTasksPath();
|
||||
|
||||
try {
|
||||
const existingData = await this.fileOps.readJson(filePath);
|
||||
await this.fileOps.modifyJson(filePath, (existingData: any) => {
|
||||
if (this.formatHandler.detectFormat(existingData) === 'legacy') {
|
||||
// Legacy format - rename the tag key
|
||||
if (oldTag in existingData) {
|
||||
existingData[newTag] = existingData[oldTag];
|
||||
delete existingData[oldTag];
|
||||
|
||||
if (this.formatHandler.detectFormat(existingData) === 'legacy') {
|
||||
// Legacy format - rename the tag key
|
||||
if (oldTag in existingData) {
|
||||
existingData[newTag] = existingData[oldTag];
|
||||
delete existingData[oldTag];
|
||||
// Update metadata tags array
|
||||
if (existingData[newTag].metadata) {
|
||||
existingData[newTag].metadata.tags = [newTag];
|
||||
}
|
||||
|
||||
// Update metadata tags array
|
||||
if (existingData[newTag].metadata) {
|
||||
existingData[newTag].metadata.tags = [newTag];
|
||||
return existingData;
|
||||
} else {
|
||||
throw new Error(`Tag ${oldTag} not found`);
|
||||
}
|
||||
} else if (oldTag === 'master') {
|
||||
// Convert standard format to legacy when renaming master
|
||||
const masterTasks = existingData.tasks || [];
|
||||
const masterMetadata = existingData.metadata || {};
|
||||
|
||||
await this.fileOps.writeJson(filePath, existingData);
|
||||
return {
|
||||
[newTag]: {
|
||||
tasks: masterTasks,
|
||||
metadata: { ...masterMetadata, tags: [newTag] }
|
||||
}
|
||||
};
|
||||
} else {
|
||||
throw new Error(`Tag ${oldTag} not found`);
|
||||
throw new Error(`Tag ${oldTag} not found in standard format`);
|
||||
}
|
||||
} else if (oldTag === 'master') {
|
||||
// Convert standard format to legacy when renaming master
|
||||
const masterTasks = existingData.tasks || [];
|
||||
const masterMetadata = existingData.metadata || {};
|
||||
|
||||
const newData = {
|
||||
[newTag]: {
|
||||
tasks: masterTasks,
|
||||
metadata: { ...masterMetadata, tags: [newTag] }
|
||||
}
|
||||
};
|
||||
|
||||
await this.fileOps.writeJson(filePath, newData);
|
||||
} else {
|
||||
throw new Error(`Tag ${oldTag} not found in standard format`);
|
||||
}
|
||||
});
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
throw new Error(`Tag ${oldTag} not found - file doesn't exist`);
|
||||
|
||||
6
packages/tm-core/src/modules/tasks/utils/index.ts
Normal file
6
packages/tm-core/src/modules/tasks/utils/index.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
/**
|
||||
* @fileoverview Task utility exports
|
||||
* Re-exports task filtering and analysis utilities
|
||||
*/
|
||||
|
||||
export * from './task-filters.js';
|
||||
168
packages/tm-core/src/modules/tasks/utils/task-filters.ts
Normal file
168
packages/tm-core/src/modules/tasks/utils/task-filters.ts
Normal file
@@ -0,0 +1,168 @@
|
||||
/**
|
||||
* @fileoverview Task filtering utilities for dependency and readiness analysis
|
||||
* Business logic for filtering tasks by actionable status, dependencies, and blocking relationships
|
||||
*/
|
||||
|
||||
import type { Task, TaskStatus } from '../../../common/types/index.js';
|
||||
import {
|
||||
TASK_STATUSES,
|
||||
isTaskComplete
|
||||
} from '../../../common/constants/index.js';
|
||||
import { getLogger } from '../../../common/logger/index.js';
|
||||
|
||||
const logger = getLogger('TaskFilters');
|
||||
|
||||
/**
|
||||
* Task with blocks field (inverse of dependencies)
|
||||
* A task's blocks array contains IDs of tasks that depend on it
|
||||
*/
|
||||
export type TaskWithBlocks = Task & { blocks: string[] };
|
||||
|
||||
/**
|
||||
* Statuses that are actionable (not deferred, blocked, or terminal)
|
||||
* Tasks with these statuses can be worked on when dependencies are satisfied
|
||||
*/
|
||||
export const ACTIONABLE_STATUSES: readonly TaskStatus[] = [
|
||||
'pending',
|
||||
'in-progress',
|
||||
'review'
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Invalid dependency reference (task depends on non-existent task)
|
||||
*/
|
||||
export interface InvalidDependency {
|
||||
/** ID of the task with the invalid dependency */
|
||||
taskId: string;
|
||||
/** ID of the non-existent dependency */
|
||||
depId: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of building the blocks map with validation information
|
||||
*/
|
||||
export interface BuildBlocksMapResult {
|
||||
/** Map of task ID -> array of task IDs that depend on it */
|
||||
blocksMap: Map<string, string[]>;
|
||||
/** Array of invalid dependency references (dependencies to non-existent tasks) */
|
||||
invalidDependencies: InvalidDependency[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a map of task ID -> array of task IDs that depend on it (blocks)
|
||||
* This is the inverse of the dependencies relationship
|
||||
*
|
||||
* Also validates dependencies and returns any references to non-existent tasks.
|
||||
*
|
||||
* @param tasks - Array of tasks to analyze
|
||||
* @returns Object containing the blocks map and any invalid dependency references
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const tasks = [
|
||||
* { id: '1', dependencies: [] },
|
||||
* { id: '2', dependencies: ['1'] },
|
||||
* { id: '3', dependencies: ['1', '2'] }
|
||||
* ];
|
||||
* const { blocksMap, invalidDependencies } = buildBlocksMap(tasks);
|
||||
* // blocksMap.get('1') => ['2', '3'] // Task 1 blocks tasks 2 and 3
|
||||
* // blocksMap.get('2') => ['3'] // Task 2 blocks task 3
|
||||
* // blocksMap.get('3') => [] // Task 3 blocks nothing
|
||||
* // invalidDependencies => [] // No invalid deps in this example
|
||||
* ```
|
||||
*/
|
||||
export function buildBlocksMap(tasks: Task[]): BuildBlocksMapResult {
|
||||
const blocksMap = new Map<string, string[]>(
|
||||
tasks.map((task) => [String(task.id), []])
|
||||
);
|
||||
const invalidDependencies: InvalidDependency[] = [];
|
||||
|
||||
// For each task, add it to the blocks list of each of its dependencies
|
||||
for (const task of tasks) {
|
||||
for (const depId of task.dependencies ?? []) {
|
||||
const depIdStr = String(depId);
|
||||
const blocks = blocksMap.get(depIdStr);
|
||||
if (blocks) {
|
||||
blocks.push(String(task.id));
|
||||
} else {
|
||||
// Dependency references a non-existent task
|
||||
invalidDependencies.push({
|
||||
taskId: String(task.id),
|
||||
depId: depIdStr
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { blocksMap, invalidDependencies };
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to only tasks that are ready to work on
|
||||
* A task is ready when:
|
||||
* 1. It has an actionable status (pending, in-progress, or review)
|
||||
* 2. All its dependencies are complete (done, completed, or cancelled)
|
||||
*
|
||||
* @param tasks - Array of tasks with blocks information
|
||||
* @returns Filtered array of tasks that are ready to work on
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const tasks = [
|
||||
* { id: '1', status: 'done', dependencies: [], blocks: ['2'] },
|
||||
* { id: '2', status: 'pending', dependencies: ['1'], blocks: [] },
|
||||
* { id: '3', status: 'pending', dependencies: ['2'], blocks: [] }
|
||||
* ];
|
||||
* const readyTasks = filterReadyTasks(tasks);
|
||||
* // Returns only task 2: status is actionable and dependency '1' is done
|
||||
* // Task 3 is not ready because dependency '2' is still pending
|
||||
* ```
|
||||
*/
|
||||
export function filterReadyTasks(tasks: TaskWithBlocks[]): TaskWithBlocks[] {
|
||||
// Build set of completed task IDs for dependency checking
|
||||
const completedIds = new Set<string>(
|
||||
tasks.filter((t) => isTaskComplete(t.status)).map((t) => String(t.id))
|
||||
);
|
||||
|
||||
return tasks.filter((task) => {
|
||||
// Validate status is a known value
|
||||
if (!TASK_STATUSES.includes(task.status)) {
|
||||
logger.warn(
|
||||
`Task ${task.id} has unexpected status "${task.status}". Valid statuses are: ${TASK_STATUSES.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Must be in an actionable status (excludes deferred, blocked, done, cancelled)
|
||||
if (!ACTIONABLE_STATUSES.includes(task.status)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Ready if no dependencies or all dependencies are completed
|
||||
const deps = task.dependencies ?? [];
|
||||
return deps.every((depId) => completedIds.has(String(depId)));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter to only tasks that block other tasks
|
||||
* These are tasks that have at least one other task depending on them
|
||||
*
|
||||
* @param tasks - Array of tasks with blocks information
|
||||
* @returns Filtered array of tasks that have dependents (block other tasks)
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const tasks = [
|
||||
* { id: '1', blocks: ['2', '3'] }, // Blocks tasks 2 and 3
|
||||
* { id: '2', blocks: [] }, // Blocks nothing
|
||||
* { id: '3', blocks: [] } // Blocks nothing
|
||||
* ];
|
||||
* const blockingTasks = filterBlockingTasks(tasks);
|
||||
* // Returns only task 1 (the only task with non-empty blocks)
|
||||
* ```
|
||||
*/
|
||||
export function filterBlockingTasks(
|
||||
tasks: TaskWithBlocks[]
|
||||
): TaskWithBlocks[] {
|
||||
return tasks.filter((task) => task.blocks.length > 0);
|
||||
}
|
||||
@@ -6,14 +6,15 @@
|
||||
*
|
||||
* FILE STORAGE (local):
|
||||
* - Main tasks: "1", "2", "15"
|
||||
* - Subtasks: "1.2", "15.3" (one level only)
|
||||
* - Subtasks: "1.2", "15.3"
|
||||
* - Sub-subtasks: "1.2.3", "15.3.1"
|
||||
*
|
||||
* API STORAGE (Hamster):
|
||||
* - Main tasks: "HAM-1", "ham-1", "HAM1", "ham1" (all normalized to "HAM-1")
|
||||
* - Variable-length prefixes: "PROJ-456", "TAS-1", "abc-999"
|
||||
* - No subtasks (API doesn't use dot notation)
|
||||
*
|
||||
* NOT supported:
|
||||
* - Deep nesting: "1.2.3" (file storage only has one subtask level)
|
||||
* - API subtasks: "HAM-1.2" (doesn't exist)
|
||||
*/
|
||||
|
||||
@@ -24,10 +25,10 @@ import { normalizeDisplayId } from '../../../common/schemas/task-id.schema.js';
|
||||
* Pattern for validating a single task ID
|
||||
* Permissive input - accepts with or without hyphen for API IDs
|
||||
* - Numeric: "1", "15", "999"
|
||||
* - Numeric subtasks: "1.2" (one level only)
|
||||
* - API display IDs: "HAM-1", "ham-1", "HAM1", "ham1"
|
||||
* - Numeric subtasks: "1.2", "1.2.3" (multi-level supported)
|
||||
* - API display IDs: "HAM-1", "PROJ-456", "TAS-1", "abc-999"
|
||||
*/
|
||||
export const TASK_ID_PATTERN = /^(\d+(\.\d+)?|[A-Za-z]{3}-?\d+)$/;
|
||||
export const TASK_ID_PATTERN = /^(\d+(\.\d+)*|[A-Za-z]+-?\d+)$/;
|
||||
|
||||
/**
|
||||
* Validates a single task ID string
|
||||
@@ -39,9 +40,10 @@ export const TASK_ID_PATTERN = /^(\d+(\.\d+)?|[A-Za-z]{3}-?\d+)$/;
|
||||
* ```typescript
|
||||
* isValidTaskIdFormat("1"); // true
|
||||
* isValidTaskIdFormat("1.2"); // true
|
||||
* isValidTaskIdFormat("1.2.3"); // true (multi-level subtasks)
|
||||
* isValidTaskIdFormat("HAM-1"); // true
|
||||
* isValidTaskIdFormat("PROJ-456"); // true (variable-length prefix)
|
||||
* isValidTaskIdFormat("ham1"); // true (permissive input)
|
||||
* isValidTaskIdFormat("1.2.3"); // false (too deep)
|
||||
* isValidTaskIdFormat("HAM-1.2"); // false (no API subtasks)
|
||||
* isValidTaskIdFormat("abc"); // false
|
||||
* ```
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
},
|
||||
"scripts": {
|
||||
"test": "vitest run",
|
||||
"test:unit": "vitest run '**/*.spec.ts'",
|
||||
"test:integration": "vitest run '**/*.test.ts'",
|
||||
"test:watch": "vitest",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"lint": "biome check --write",
|
||||
|
||||
@@ -130,41 +130,21 @@
|
||||
],
|
||||
"codex-cli": [
|
||||
{
|
||||
"id": "gpt-5",
|
||||
"swe_score": 0.749,
|
||||
"id": "gpt-5.2-codex",
|
||||
"name": "GPT-5.2 Codex",
|
||||
"swe_score": 0.82,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 128000,
|
||||
"supported": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5-codex",
|
||||
"swe_score": 0.749,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 128000,
|
||||
"supported": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.1",
|
||||
"swe_score": 0.76,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 128000,
|
||||
"reasoning_efforts": ["none", "low", "medium", "high"],
|
||||
"reasoning_efforts": ["none", "low", "medium", "high", "xhigh"],
|
||||
"supported": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.1-codex-max",
|
||||
"name": "GPT-5.1 Codex Max",
|
||||
"swe_score": 0.78,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
@@ -175,8 +155,22 @@
|
||||
"reasoning_efforts": ["none", "low", "medium", "high", "xhigh"],
|
||||
"supported": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.1-codex-mini",
|
||||
"name": "GPT-5.1 Codex Mini",
|
||||
"swe_score": 0.72,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 128000,
|
||||
"reasoning_efforts": ["none", "low", "medium", "high"],
|
||||
"supported": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.2",
|
||||
"name": "GPT-5.2",
|
||||
"swe_score": 0.8,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
tryListTagsViaRemote,
|
||||
tryUseTagViaRemote
|
||||
} from '@tm/bridge';
|
||||
import { filterReadyTasks, isTaskComplete } from '@tm/core';
|
||||
import { displayBanner, getStatusWithColor } from '../ui.js';
|
||||
import {
|
||||
findProjectRoot,
|
||||
@@ -531,6 +532,7 @@ async function enhanceTagsWithMetadata(tasksPath, rawData, context = {}) {
|
||||
* @param {Object} options - Options object
|
||||
* @param {boolean} [options.showTaskCounts=true] - Whether to show task counts
|
||||
* @param {boolean} [options.showMetadata=false] - Whether to show metadata
|
||||
* @param {boolean} [options.ready=false] - Whether to filter to only tags with ready tasks
|
||||
* @param {Object} context - Context object containing session and projectRoot
|
||||
* @param {string} [context.projectRoot] - Project root path
|
||||
* @param {Object} [context.mcpLog] - MCP logger object (optional)
|
||||
@@ -544,7 +546,11 @@ async function tags(
|
||||
outputFormat = 'text'
|
||||
) {
|
||||
const { mcpLog, projectRoot } = context;
|
||||
const { showTaskCounts = true, showMetadata = false } = options;
|
||||
const {
|
||||
showTaskCounts = true,
|
||||
showMetadata = false,
|
||||
ready = false
|
||||
} = options;
|
||||
|
||||
// Create a consistent logFn object regardless of context
|
||||
const logFn = mcpLog || {
|
||||
@@ -619,12 +625,16 @@ async function tags(
|
||||
const tasks = tagData.tasks || [];
|
||||
const metadata = tagData.metadata || {};
|
||||
|
||||
// Use centralized filtering from @tm/core
|
||||
// Note: filterReadyTasks expects TaskWithBlocks[] but only uses status/dependencies at runtime
|
||||
const tasksWithBlocks = tasks.map((t) => ({ ...t, blocks: [] }));
|
||||
const readyTasks = filterReadyTasks(tasksWithBlocks);
|
||||
|
||||
tagList.push({
|
||||
name: tagName,
|
||||
isCurrent: tagName === currentTag,
|
||||
completedTasks: tasks.filter(
|
||||
(t) => t.status === 'done' || t.status === 'completed'
|
||||
).length,
|
||||
completedTasks: tasks.filter((t) => isTaskComplete(t.status)).length,
|
||||
readyTasks: readyTasks.length,
|
||||
tasks: tasks || [],
|
||||
created: metadata.created || 'Unknown',
|
||||
description: metadata.description || 'No description'
|
||||
@@ -638,22 +648,32 @@ async function tags(
|
||||
return a.name.localeCompare(b.name);
|
||||
});
|
||||
|
||||
logFn.success(`Found ${tagList.length} tags`);
|
||||
// Filter to only tags with ready tasks if --ready flag is set
|
||||
let filteredTagList = tagList;
|
||||
if (ready) {
|
||||
filteredTagList = tagList.filter((tag) => tag.readyTasks > 0);
|
||||
logFn.info(`Filtered to ${filteredTagList.length} tags with ready tasks`);
|
||||
}
|
||||
|
||||
logFn.success(`Found ${filteredTagList.length} tags`);
|
||||
|
||||
// For JSON output, return structured data
|
||||
if (outputFormat === 'json') {
|
||||
return {
|
||||
tags: tagList,
|
||||
tags: filteredTagList,
|
||||
currentTag,
|
||||
totalTags: tagList.length
|
||||
totalTags: filteredTagList.length
|
||||
};
|
||||
}
|
||||
|
||||
// For text output, display formatted table
|
||||
if (outputFormat === 'text') {
|
||||
if (tagList.length === 0) {
|
||||
if (filteredTagList.length === 0) {
|
||||
const message = ready
|
||||
? 'No tags with ready tasks found'
|
||||
: 'No tags found';
|
||||
console.log(
|
||||
boxen(chalk.yellow('No tags found'), {
|
||||
boxen(chalk.yellow(message), {
|
||||
padding: 1,
|
||||
borderColor: 'yellow',
|
||||
borderStyle: 'round',
|
||||
@@ -667,7 +687,8 @@ async function tags(
|
||||
const headers = [chalk.cyan.bold('Tag Name')];
|
||||
if (showTaskCounts) {
|
||||
headers.push(chalk.cyan.bold('Tasks'));
|
||||
headers.push(chalk.cyan.bold('Completed'));
|
||||
headers.push(chalk.cyan.bold('Ready'));
|
||||
headers.push(chalk.cyan.bold('Done'));
|
||||
}
|
||||
if (showMetadata) {
|
||||
headers.push(chalk.cyan.bold('Created'));
|
||||
@@ -680,16 +701,16 @@ async function tags(
|
||||
|
||||
let colWidths;
|
||||
if (showMetadata) {
|
||||
// With metadata: Tag Name, Tasks, Completed, Created, Description
|
||||
const widths = [0.25, 0.1, 0.12, 0.15, 0.38];
|
||||
// With metadata: Tag Name, Tasks, Ready, Done, Created, Description
|
||||
const widths = [0.22, 0.08, 0.08, 0.08, 0.14, 0.38];
|
||||
colWidths = widths.map((w, i) =>
|
||||
Math.max(Math.floor(usableWidth * w), i === 0 ? 15 : 8)
|
||||
Math.max(Math.floor(usableWidth * w), i === 0 ? 15 : 6)
|
||||
);
|
||||
} else {
|
||||
// Without metadata: Tag Name, Tasks, Completed
|
||||
const widths = [0.7, 0.15, 0.15];
|
||||
// Without metadata: Tag Name, Tasks, Ready, Done
|
||||
const widths = [0.6, 0.13, 0.13, 0.13];
|
||||
colWidths = widths.map((w, i) =>
|
||||
Math.max(Math.floor(usableWidth * w), i === 0 ? 20 : 10)
|
||||
Math.max(Math.floor(usableWidth * w), i === 0 ? 20 : 8)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -700,7 +721,7 @@ async function tags(
|
||||
});
|
||||
|
||||
// Add rows
|
||||
tagList.forEach((tag) => {
|
||||
filteredTagList.forEach((tag) => {
|
||||
const row = [];
|
||||
|
||||
// Tag name with current indicator
|
||||
@@ -711,6 +732,11 @@ async function tags(
|
||||
|
||||
if (showTaskCounts) {
|
||||
row.push(chalk.white(tag.tasks.length.toString()));
|
||||
row.push(
|
||||
tag.readyTasks > 0
|
||||
? chalk.yellow(tag.readyTasks.toString())
|
||||
: chalk.gray('0')
|
||||
);
|
||||
row.push(chalk.green(tag.completedTasks.toString()));
|
||||
}
|
||||
|
||||
@@ -743,9 +769,9 @@ async function tags(
|
||||
}
|
||||
|
||||
return {
|
||||
tags: tagList,
|
||||
tags: filteredTagList,
|
||||
currentTag,
|
||||
totalTags: tagList.length
|
||||
totalTags: filteredTagList.length
|
||||
};
|
||||
} catch (error) {
|
||||
logFn.error(`Error listing tags: ${error.message}`);
|
||||
|
||||
@@ -19,6 +19,278 @@ import * as gitUtils from './utils/git-utils.js';
|
||||
// Global silent mode flag
|
||||
let silentMode = false;
|
||||
|
||||
// File locking configuration for cross-process safety
|
||||
const LOCK_CONFIG = {
|
||||
maxRetries: 5,
|
||||
retryDelay: 100, // ms
|
||||
staleLockAge: 10000 // 10 seconds
|
||||
};
|
||||
|
||||
/**
|
||||
* Async sleep helper
|
||||
*/
|
||||
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
||||
|
||||
/**
|
||||
* Synchronous sleep helper with Atomics.wait fallback
|
||||
* Uses Atomics.wait when SharedArrayBuffer is available (proper non-busy wait),
|
||||
* otherwise falls back to a busy-wait loop (less efficient but always works).
|
||||
* @param {number} ms - Milliseconds to sleep
|
||||
*/
|
||||
function sleepSync(ms) {
|
||||
// Check if SharedArrayBuffer and Atomics.wait are available
|
||||
// They may not be available in some environments (e.g., browsers without proper headers)
|
||||
if (
|
||||
typeof SharedArrayBuffer !== 'undefined' &&
|
||||
typeof Atomics !== 'undefined' &&
|
||||
typeof Atomics.wait === 'function'
|
||||
) {
|
||||
try {
|
||||
const sharedBuffer = new SharedArrayBuffer(4);
|
||||
const int32 = new Int32Array(sharedBuffer);
|
||||
Atomics.wait(int32, 0, 0, ms);
|
||||
return;
|
||||
} catch {
|
||||
// Fall through to busy-wait fallback
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: busy-wait loop (less efficient but universally compatible)
|
||||
// Note: This may cause high CPU usage for longer waits. Consider if this
|
||||
// becomes an issue with large exponential backoff delays.
|
||||
const end = Date.now() + ms;
|
||||
while (Date.now() < end) {
|
||||
// Busy wait - intentionally empty
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires an exclusive lock on a file and executes a callback
|
||||
* Uses same lock file format as withFileLockSync for cross-process compatibility
|
||||
* @param {string} filepath - Path to the file to lock
|
||||
* @param {Function} callback - Async function to execute while holding the lock
|
||||
* @param {Object} [options] - Options for lock behavior
|
||||
* @param {boolean} [options.createIfMissing=false] - If true, creates the file with '{}' if it doesn't exist.
|
||||
* Set to true for write operations. Leave false for read-only operations that should handle
|
||||
* file-not-found scenarios in the callback.
|
||||
* @returns {Promise<*>} Result of the callback
|
||||
*/
|
||||
async function withFileLock(filepath, callback, options = {}) {
|
||||
const { createIfMissing = false } = options;
|
||||
const fsPromises = fs.promises;
|
||||
|
||||
// Ensure parent directory exists
|
||||
const dir = path.dirname(filepath);
|
||||
await fsPromises.mkdir(dir, { recursive: true });
|
||||
|
||||
// Only create the file if explicitly requested (for write operations)
|
||||
if (createIfMissing) {
|
||||
try {
|
||||
// Use 'wx' flag for atomic create - fails if file exists (prevents race)
|
||||
await fsPromises.writeFile(filepath, '{}', { flag: 'wx' });
|
||||
} catch (err) {
|
||||
// EEXIST is expected if another process created the file - that's fine
|
||||
if (err.code !== 'EEXIST') {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const lockPath = `${filepath}.lock`;
|
||||
const { maxRetries, retryDelay, staleLockAge } = LOCK_CONFIG;
|
||||
|
||||
// Try to acquire lock with retries
|
||||
let acquired = false;
|
||||
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||
try {
|
||||
// Try to create lock file exclusively first
|
||||
const lockContent = JSON.stringify({
|
||||
pid: process.pid,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
await fsPromises.writeFile(lockPath, lockContent, { flag: 'wx' });
|
||||
acquired = true;
|
||||
break;
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
// Lock file exists - check if it's stale
|
||||
try {
|
||||
const lockStat = await fsPromises.stat(lockPath);
|
||||
const age = Date.now() - lockStat.mtimeMs;
|
||||
if (age > staleLockAge) {
|
||||
// Stale lock - use atomic rename to safely take ownership
|
||||
// This prevents race where we delete another process's fresh lock
|
||||
const stalePath = `${lockPath}.stale.${process.pid}.${Date.now()}`;
|
||||
try {
|
||||
await fsPromises.rename(lockPath, stalePath);
|
||||
// We successfully took ownership of the stale lock
|
||||
// Clean it up and retry immediately
|
||||
try {
|
||||
await fsPromises.unlink(stalePath);
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
continue; // Retry lock acquisition
|
||||
} catch {
|
||||
// Rename failed - another process handled it or lock was refreshed
|
||||
// Just continue to retry
|
||||
}
|
||||
}
|
||||
} catch (statErr) {
|
||||
// Lock was removed between writeFile and stat - retry immediately
|
||||
if (statErr.code === 'ENOENT') {
|
||||
continue;
|
||||
}
|
||||
throw statErr;
|
||||
}
|
||||
|
||||
// Lock exists and isn't stale (or we couldn't handle it), wait and retry
|
||||
if (attempt < maxRetries - 1) {
|
||||
const waitMs = retryDelay * Math.pow(2, attempt);
|
||||
await sleep(waitMs);
|
||||
}
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!acquired) {
|
||||
throw new Error(
|
||||
`Failed to acquire lock on ${filepath} after ${maxRetries} attempts`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
return await callback();
|
||||
} finally {
|
||||
// Release lock
|
||||
try {
|
||||
await fsPromises.unlink(lockPath);
|
||||
} catch (releaseError) {
|
||||
// Always log lock release failures - they indicate potential issues
|
||||
log(
|
||||
'warn',
|
||||
`Failed to release lock for ${filepath}: ${releaseError.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Synchronous version of file locking for compatibility with existing sync code
|
||||
* Uses a lock file approach with retries and stale lock detection
|
||||
* @param {string} filepath - Path to the file to lock
|
||||
* @param {Function} callback - Sync function to execute while holding the lock
|
||||
* @param {Object} [options] - Options for lock behavior
|
||||
* @param {boolean} [options.createIfMissing=false] - If true, creates the file with '{}' if it doesn't exist.
|
||||
* Set to true for write operations. Leave false for read-only operations that should handle
|
||||
* file-not-found scenarios in the callback.
|
||||
* @returns {*} Result of the callback
|
||||
*/
|
||||
function withFileLockSync(filepath, callback, options = {}) {
|
||||
const { createIfMissing = false } = options;
|
||||
|
||||
// Ensure parent directory exists
|
||||
const dir = path.dirname(filepath);
|
||||
if (!fs.existsSync(dir)) {
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
}
|
||||
|
||||
// Only create the file if explicitly requested (for write operations)
|
||||
if (createIfMissing) {
|
||||
try {
|
||||
// Use 'wx' flag for atomic create - fails if file exists (prevents race)
|
||||
fs.writeFileSync(filepath, '{}', { flag: 'wx' });
|
||||
} catch (err) {
|
||||
// EEXIST is expected if another process created the file - that's fine
|
||||
if (err.code !== 'EEXIST') {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const lockPath = `${filepath}.lock`;
|
||||
const { maxRetries, retryDelay, staleLockAge } = LOCK_CONFIG;
|
||||
|
||||
// Try to acquire lock with retries
|
||||
let acquired = false;
|
||||
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||
try {
|
||||
// Try to create lock file exclusively first
|
||||
const lockContent = JSON.stringify({
|
||||
pid: process.pid,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
fs.writeFileSync(lockPath, lockContent, { flag: 'wx' });
|
||||
acquired = true;
|
||||
break;
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
// Lock file exists - check if it's stale
|
||||
try {
|
||||
const lockStat = fs.statSync(lockPath);
|
||||
const age = Date.now() - lockStat.mtimeMs;
|
||||
if (age > staleLockAge) {
|
||||
// Stale lock - use atomic rename to safely take ownership
|
||||
// This prevents race where we delete another process's fresh lock
|
||||
const stalePath = `${lockPath}.stale.${process.pid}.${Date.now()}`;
|
||||
try {
|
||||
fs.renameSync(lockPath, stalePath);
|
||||
// We successfully took ownership of the stale lock
|
||||
// Clean it up and retry immediately
|
||||
try {
|
||||
fs.unlinkSync(stalePath);
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
continue; // Retry lock acquisition
|
||||
} catch {
|
||||
// Rename failed - another process handled it or lock was refreshed
|
||||
// Just continue to retry
|
||||
}
|
||||
}
|
||||
} catch (statErr) {
|
||||
// Lock was removed between writeFile and stat - retry immediately
|
||||
if (statErr.code === 'ENOENT') {
|
||||
continue;
|
||||
}
|
||||
throw statErr;
|
||||
}
|
||||
|
||||
// Lock exists and isn't stale (or we couldn't handle it), wait and retry
|
||||
if (attempt < maxRetries - 1) {
|
||||
const waitMs = retryDelay * Math.pow(2, attempt);
|
||||
sleepSync(waitMs);
|
||||
}
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!acquired) {
|
||||
throw new Error(
|
||||
`Failed to acquire lock on ${filepath} after ${maxRetries} attempts`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
return callback();
|
||||
} finally {
|
||||
// Release lock
|
||||
try {
|
||||
fs.unlinkSync(lockPath);
|
||||
} catch (releaseError) {
|
||||
// Always log lock release failures - they indicate potential issues
|
||||
log(
|
||||
'warn',
|
||||
`Failed to release lock for ${filepath}: ${releaseError.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Environment Variable Resolution Utility ---
|
||||
/**
|
||||
* Resolves an environment variable's value.
|
||||
@@ -703,6 +975,7 @@ function markMigrationForNotice(tasksJsonPath) {
|
||||
|
||||
/**
|
||||
* Writes and saves a JSON file. Handles tagged task lists properly.
|
||||
* Uses cross-process file locking and atomic writes to prevent race conditions.
|
||||
* @param {string} filepath - Path to the JSON file
|
||||
* @param {Object} data - Data to write (can be resolved tag data or raw tagged data)
|
||||
* @param {string} projectRoot - Optional project root for tag context
|
||||
@@ -712,107 +985,172 @@ function writeJSON(filepath, data, projectRoot = null, tag = null) {
|
||||
const isDebug = process.env.TASKMASTER_DEBUG === 'true';
|
||||
|
||||
try {
|
||||
let finalData = data;
|
||||
// Use file locking to prevent concurrent write race conditions
|
||||
// This ensures the entire read-modify-write cycle is atomic
|
||||
// createIfMissing: true because writeJSON is a write operation
|
||||
withFileLockSync(
|
||||
filepath,
|
||||
() => {
|
||||
let finalData = data;
|
||||
|
||||
// If data represents resolved tag data but lost _rawTaggedData (edge-case observed in MCP path)
|
||||
if (
|
||||
!data._rawTaggedData &&
|
||||
projectRoot &&
|
||||
Array.isArray(data.tasks) &&
|
||||
!hasTaggedStructure(data)
|
||||
) {
|
||||
const resolvedTag = tag || getCurrentTag(projectRoot);
|
||||
// If data represents resolved tag data but lost _rawTaggedData (edge-case observed in MCP path)
|
||||
if (
|
||||
data &&
|
||||
!data._rawTaggedData &&
|
||||
projectRoot &&
|
||||
Array.isArray(data.tasks) &&
|
||||
!hasTaggedStructure(data)
|
||||
) {
|
||||
const resolvedTag = tag || getCurrentTag(projectRoot);
|
||||
|
||||
if (isDebug) {
|
||||
console.log(
|
||||
`writeJSON: Detected resolved tag data missing _rawTaggedData. Re-reading raw data to prevent data loss for tag '${resolvedTag}'.`
|
||||
);
|
||||
}
|
||||
if (isDebug) {
|
||||
console.log(
|
||||
`writeJSON: Detected resolved tag data missing _rawTaggedData. Re-reading raw data to prevent data loss for tag '${resolvedTag}'.`
|
||||
);
|
||||
}
|
||||
|
||||
// Re-read the full file to get the complete tagged structure
|
||||
const rawFullData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
||||
|
||||
// Merge the updated data into the full structure
|
||||
finalData = {
|
||||
...rawFullData,
|
||||
[resolvedTag]: {
|
||||
// Preserve existing tag metadata if it exists, otherwise use what's passed
|
||||
...(rawFullData[resolvedTag]?.metadata || {}),
|
||||
...(data.metadata ? { metadata: data.metadata } : {}),
|
||||
tasks: data.tasks // The updated tasks array is the source of truth here
|
||||
}
|
||||
};
|
||||
}
|
||||
// If we have _rawTaggedData, this means we're working with resolved tag data
|
||||
// and need to merge it back into the full tagged structure
|
||||
else if (data && data._rawTaggedData && projectRoot) {
|
||||
const resolvedTag = tag || getCurrentTag(projectRoot);
|
||||
|
||||
// Get the original tagged data
|
||||
const originalTaggedData = data._rawTaggedData;
|
||||
|
||||
// Create a clean copy of the current resolved data (without internal properties)
|
||||
const { _rawTaggedData, tag: _, ...cleanResolvedData } = data;
|
||||
|
||||
// Update the specific tag with the resolved data
|
||||
finalData = {
|
||||
...originalTaggedData,
|
||||
[resolvedTag]: cleanResolvedData
|
||||
};
|
||||
|
||||
if (isDebug) {
|
||||
console.log(
|
||||
`writeJSON: Merging resolved data back into tag '${resolvedTag}'`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any internal properties that shouldn't be persisted
|
||||
let cleanData = finalData;
|
||||
if (cleanData && typeof cleanData === 'object') {
|
||||
// Remove any _rawTaggedData or tag properties from root level
|
||||
const { _rawTaggedData, tag: tagProp, ...rootCleanData } = cleanData;
|
||||
cleanData = rootCleanData;
|
||||
|
||||
// Additional cleanup for tag objects
|
||||
if (typeof cleanData === 'object' && !Array.isArray(cleanData)) {
|
||||
const finalCleanData = {};
|
||||
for (const [key, value] of Object.entries(cleanData)) {
|
||||
if (
|
||||
value &&
|
||||
typeof value === 'object' &&
|
||||
Array.isArray(value.tasks)
|
||||
) {
|
||||
// This is a tag object - clean up any rogue root-level properties
|
||||
const { created, description, ...cleanTagData } = value;
|
||||
|
||||
// Only keep the description if there's no metadata.description
|
||||
if (
|
||||
description &&
|
||||
(!cleanTagData.metadata || !cleanTagData.metadata.description)
|
||||
) {
|
||||
cleanTagData.description = description;
|
||||
// Re-read the full file to get the complete tagged structure
|
||||
// This is now safe because we hold the lock
|
||||
let rawFullData = {};
|
||||
try {
|
||||
rawFullData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
||||
} catch (readError) {
|
||||
// File might be empty or invalid, start fresh
|
||||
if (isDebug) {
|
||||
console.log(
|
||||
`writeJSON: Could not read existing file, starting fresh: ${readError.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
finalCleanData[key] = cleanTagData;
|
||||
} else {
|
||||
finalCleanData[key] = value;
|
||||
// Merge the updated data into the full structure
|
||||
finalData = {
|
||||
...rawFullData,
|
||||
[resolvedTag]: {
|
||||
// Preserve existing tag metadata, merged with any new metadata
|
||||
metadata: {
|
||||
...(rawFullData[resolvedTag]?.metadata || {}),
|
||||
...(data.metadata || {})
|
||||
},
|
||||
tasks: data.tasks // The updated tasks array is the source of truth here
|
||||
}
|
||||
};
|
||||
}
|
||||
// If we have _rawTaggedData, this means we're working with resolved tag data
|
||||
// and need to merge it back into the full tagged structure
|
||||
else if (data && data._rawTaggedData && projectRoot) {
|
||||
const resolvedTag = tag || getCurrentTag(projectRoot);
|
||||
|
||||
// IMPORTANT: Re-read the file to get the CURRENT state instead of using
|
||||
// potentially stale _rawTaggedData. This prevents lost updates from other processes.
|
||||
let currentTaggedData;
|
||||
try {
|
||||
currentTaggedData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
||||
} catch (readError) {
|
||||
// Fall back to _rawTaggedData if file can't be read
|
||||
currentTaggedData = data._rawTaggedData;
|
||||
if (isDebug) {
|
||||
console.log(
|
||||
`writeJSON: Using _rawTaggedData as fallback: ${readError.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Create a clean copy of the current resolved data (without internal properties)
|
||||
const { _rawTaggedData, tag: _, ...cleanResolvedData } = data;
|
||||
|
||||
// Update the specific tag with the resolved data, preserving other tags
|
||||
finalData = {
|
||||
...currentTaggedData,
|
||||
[resolvedTag]: cleanResolvedData
|
||||
};
|
||||
|
||||
if (isDebug) {
|
||||
console.log(
|
||||
`writeJSON: Merging resolved data back into tag '${resolvedTag}'`
|
||||
);
|
||||
}
|
||||
}
|
||||
cleanData = finalCleanData;
|
||||
}
|
||||
}
|
||||
|
||||
fs.writeFileSync(filepath, JSON.stringify(cleanData, null, 2), 'utf8');
|
||||
// Clean up any internal properties that shouldn't be persisted
|
||||
let cleanData = finalData;
|
||||
if (cleanData && typeof cleanData === 'object') {
|
||||
// Remove any _rawTaggedData or tag properties from root level
|
||||
const { _rawTaggedData, tag: tagProp, ...rootCleanData } = cleanData;
|
||||
cleanData = rootCleanData;
|
||||
|
||||
if (isDebug) {
|
||||
console.log(`writeJSON: Successfully wrote to ${filepath}`);
|
||||
}
|
||||
// Additional cleanup for tag objects
|
||||
if (typeof cleanData === 'object' && !Array.isArray(cleanData)) {
|
||||
const finalCleanData = {};
|
||||
for (const [key, value] of Object.entries(cleanData)) {
|
||||
if (
|
||||
value &&
|
||||
typeof value === 'object' &&
|
||||
Array.isArray(value.tasks)
|
||||
) {
|
||||
// This is a tag object - clean up any rogue root-level properties
|
||||
// Move created/description to metadata if they're at root level
|
||||
const { created, description, ...cleanTagData } = value;
|
||||
|
||||
// Ensure metadata object exists
|
||||
if (!cleanTagData.metadata) {
|
||||
cleanTagData.metadata = {};
|
||||
}
|
||||
|
||||
// Preserve created timestamp in metadata if it exists at root level
|
||||
if (created && !cleanTagData.metadata.created) {
|
||||
cleanTagData.metadata.created = created;
|
||||
}
|
||||
|
||||
// Preserve description in metadata if it exists at root level
|
||||
if (description && !cleanTagData.metadata.description) {
|
||||
cleanTagData.metadata.description = description;
|
||||
}
|
||||
|
||||
finalCleanData[key] = cleanTagData;
|
||||
} else {
|
||||
finalCleanData[key] = value;
|
||||
}
|
||||
}
|
||||
cleanData = finalCleanData;
|
||||
}
|
||||
}
|
||||
|
||||
// Use atomic write: write to temp file then rename
|
||||
// This prevents partial writes from corrupting the file
|
||||
const tempPath = `${filepath}.tmp.${process.pid}`;
|
||||
try {
|
||||
fs.writeFileSync(
|
||||
tempPath,
|
||||
JSON.stringify(cleanData, null, 2),
|
||||
'utf8'
|
||||
);
|
||||
fs.renameSync(tempPath, filepath);
|
||||
} catch (writeError) {
|
||||
// Clean up temp file on failure
|
||||
try {
|
||||
if (fs.existsSync(tempPath)) {
|
||||
fs.unlinkSync(tempPath);
|
||||
}
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
throw writeError;
|
||||
}
|
||||
|
||||
if (isDebug) {
|
||||
console.log(`writeJSON: Successfully wrote to ${filepath}`);
|
||||
}
|
||||
},
|
||||
{ createIfMissing: true }
|
||||
);
|
||||
} catch (error) {
|
||||
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
||||
if (isDebug) {
|
||||
log('error', 'Full error details:', error);
|
||||
}
|
||||
// Re-throw so callers know the write failed
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1616,5 +1954,7 @@ export {
|
||||
flattenTasksWithSubtasks,
|
||||
ensureTagMetadata,
|
||||
stripAnsiCodes,
|
||||
normalizeTaskIds
|
||||
normalizeTaskIds,
|
||||
withFileLock,
|
||||
withFileLockSync
|
||||
};
|
||||
|
||||
595
tests/unit/file-locking.test.js
Normal file
595
tests/unit/file-locking.test.js
Normal file
@@ -0,0 +1,595 @@
|
||||
/**
|
||||
* Tests for file locking and atomic write functionality
|
||||
* Verifies that concurrent access to tasks.json is properly serialized
|
||||
*/
|
||||
|
||||
import fs from 'fs';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { afterEach, beforeEach, describe, expect, it } from '@jest/globals';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
// Import the utils module
|
||||
const utilsPath = path.join(__dirname, '../../scripts/modules/utils.js');
|
||||
|
||||
describe('File Locking and Atomic Writes', () => {
|
||||
let tempDir;
|
||||
let testFilePath;
|
||||
let utils;
|
||||
|
||||
beforeEach(async () => {
|
||||
// Create a temp directory for each test
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-test-'));
|
||||
testFilePath = path.join(tempDir, 'tasks.json');
|
||||
|
||||
// Initialize with empty tasks structure
|
||||
fs.writeFileSync(
|
||||
testFilePath,
|
||||
JSON.stringify(
|
||||
{
|
||||
master: {
|
||||
tasks: [],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
);
|
||||
|
||||
// Import utils fresh for each test
|
||||
utils = await import(utilsPath + `?cachebust=${Date.now()}`);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up temp directory and any lock files
|
||||
if (tempDir && fs.existsSync(tempDir)) {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('withFileLockSync', () => {
|
||||
it('should execute callback while holding lock', () => {
|
||||
const result = utils.withFileLockSync(testFilePath, () => {
|
||||
return 'callback executed';
|
||||
});
|
||||
|
||||
expect(result).toBe('callback executed');
|
||||
});
|
||||
|
||||
it('should release lock after callback completes', () => {
|
||||
utils.withFileLockSync(testFilePath, () => {
|
||||
// First lock
|
||||
});
|
||||
|
||||
// Should be able to acquire lock again
|
||||
const result = utils.withFileLockSync(testFilePath, () => {
|
||||
return 'second lock acquired';
|
||||
});
|
||||
|
||||
expect(result).toBe('second lock acquired');
|
||||
});
|
||||
|
||||
it('should release lock even if callback throws', () => {
|
||||
expect(() => {
|
||||
utils.withFileLockSync(testFilePath, () => {
|
||||
throw new Error('Test error');
|
||||
});
|
||||
}).toThrow('Test error');
|
||||
|
||||
// Should still be able to acquire lock
|
||||
const result = utils.withFileLockSync(testFilePath, () => 'recovered');
|
||||
expect(result).toBe('recovered');
|
||||
});
|
||||
|
||||
it('should create file if createIfMissing is true', () => {
|
||||
const newFilePath = path.join(tempDir, 'new-file.json');
|
||||
|
||||
utils.withFileLockSync(
|
||||
newFilePath,
|
||||
() => {
|
||||
// Lock acquired on new file
|
||||
},
|
||||
{ createIfMissing: true }
|
||||
);
|
||||
|
||||
expect(fs.existsSync(newFilePath)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not create file if createIfMissing is false (default)', () => {
|
||||
const newFilePath = path.join(tempDir, 'should-not-exist.json');
|
||||
|
||||
utils.withFileLockSync(newFilePath, () => {
|
||||
// Lock acquired, but file should not be created
|
||||
});
|
||||
|
||||
expect(fs.existsSync(newFilePath)).toBe(false);
|
||||
});
|
||||
|
||||
it('should clean up lock file after completion', () => {
|
||||
utils.withFileLockSync(testFilePath, () => {
|
||||
// Do something
|
||||
});
|
||||
|
||||
// Lock file should be cleaned up
|
||||
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||
});
|
||||
|
||||
it('should clean up lock file even on error', () => {
|
||||
try {
|
||||
utils.withFileLockSync(testFilePath, () => {
|
||||
throw new Error('Test error');
|
||||
});
|
||||
} catch {
|
||||
// Expected
|
||||
}
|
||||
|
||||
// Lock file should be cleaned up
|
||||
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('withFileLock (async)', () => {
|
||||
it('should execute async callback while holding lock', async () => {
|
||||
const result = await utils.withFileLock(testFilePath, async () => {
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
return 'async callback executed';
|
||||
});
|
||||
|
||||
expect(result).toBe('async callback executed');
|
||||
});
|
||||
|
||||
it('should release lock after async callback completes', async () => {
|
||||
await utils.withFileLock(testFilePath, async () => {
|
||||
// First lock
|
||||
});
|
||||
|
||||
// Should be able to acquire lock again
|
||||
const result = await utils.withFileLock(testFilePath, async () => {
|
||||
return 'second lock acquired';
|
||||
});
|
||||
|
||||
expect(result).toBe('second lock acquired');
|
||||
});
|
||||
|
||||
it('should release lock even if async callback rejects', async () => {
|
||||
await expect(
|
||||
utils.withFileLock(testFilePath, async () => {
|
||||
throw new Error('Async error');
|
||||
})
|
||||
).rejects.toThrow('Async error');
|
||||
|
||||
// Should still be able to acquire lock
|
||||
const result = await utils.withFileLock(
|
||||
testFilePath,
|
||||
async () => 'recovered'
|
||||
);
|
||||
expect(result).toBe('recovered');
|
||||
});
|
||||
|
||||
it('should create file if createIfMissing is true', async () => {
|
||||
const newFilePath = path.join(tempDir, 'new-async-file.json');
|
||||
|
||||
await utils.withFileLock(
|
||||
newFilePath,
|
||||
async () => {
|
||||
// Lock acquired on new file
|
||||
},
|
||||
{ createIfMissing: true }
|
||||
);
|
||||
|
||||
expect(fs.existsSync(newFilePath)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not create file if createIfMissing is false (default)', async () => {
|
||||
const newFilePath = path.join(tempDir, 'should-not-exist-async.json');
|
||||
|
||||
await utils.withFileLock(newFilePath, async () => {
|
||||
// Lock acquired, but file should not be created
|
||||
});
|
||||
|
||||
expect(fs.existsSync(newFilePath)).toBe(false);
|
||||
});
|
||||
|
||||
it('should clean up lock file after completion', async () => {
|
||||
await utils.withFileLock(testFilePath, async () => {
|
||||
// Do something
|
||||
});
|
||||
|
||||
// Lock file should be cleaned up
|
||||
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||
});
|
||||
|
||||
it('should clean up lock file even on error', async () => {
|
||||
try {
|
||||
await utils.withFileLock(testFilePath, async () => {
|
||||
throw new Error('Test error');
|
||||
});
|
||||
} catch {
|
||||
// Expected
|
||||
}
|
||||
|
||||
// Lock file should be cleaned up
|
||||
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||
});
|
||||
|
||||
it('should serialize truly concurrent writes', async () => {
|
||||
const numConcurrentWrites = 5;
|
||||
const writes = [];
|
||||
|
||||
for (let i = 0; i < numConcurrentWrites; i++) {
|
||||
writes.push(
|
||||
utils.withFileLock(testFilePath, async () => {
|
||||
const data = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||
data.master.tasks.push({
|
||||
id: String(data.master.tasks.length + 1)
|
||||
});
|
||||
fs.writeFileSync(testFilePath, JSON.stringify(data, null, 2));
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
await Promise.all(writes);
|
||||
|
||||
const finalData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||
expect(finalData.master.tasks).toHaveLength(numConcurrentWrites);
|
||||
});
|
||||
});
|
||||
|
||||
describe('writeJSON atomic writes', () => {
|
||||
it('should not leave temp files on success', () => {
|
||||
// Create a tagged structure that writeJSON expects
|
||||
const taggedData = {
|
||||
master: {
|
||||
tasks: [{ id: '1', title: 'Test task', status: 'pending' }],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
|
||||
utils.writeJSON(testFilePath, taggedData, null, null);
|
||||
|
||||
const files = fs.readdirSync(tempDir);
|
||||
const tempFiles = files.filter((f) => f.includes('.tmp'));
|
||||
expect(tempFiles).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should preserve data from other tags when writing to one tag', () => {
|
||||
// Set up initial data with multiple tags
|
||||
const initialData = {
|
||||
master: {
|
||||
tasks: [{ id: '1', title: 'Master task', status: 'pending' }],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
},
|
||||
feature: {
|
||||
tasks: [{ id: '1', title: 'Feature task', status: 'pending' }],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(testFilePath, JSON.stringify(initialData, null, 2));
|
||||
|
||||
// Write directly with tagged structure (simulating what commands do internally)
|
||||
const updatedData = {
|
||||
...initialData,
|
||||
master: {
|
||||
...initialData.master,
|
||||
tasks: [
|
||||
{ id: '1', title: 'Updated master task', status: 'pending' },
|
||||
{ id: '2', title: 'New task', status: 'pending' }
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
utils.writeJSON(testFilePath, updatedData, null, null);
|
||||
|
||||
const written = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||
|
||||
// Master should be updated
|
||||
expect(written.master.tasks).toHaveLength(2);
|
||||
expect(written.master.tasks[0].title).toBe('Updated master task');
|
||||
|
||||
// Feature should be preserved
|
||||
expect(written.feature.tasks).toHaveLength(1);
|
||||
expect(written.feature.tasks[0].title).toBe('Feature task');
|
||||
});
|
||||
|
||||
it('should not leave lock files on success', () => {
|
||||
const taggedData = {
|
||||
master: {
|
||||
tasks: [{ id: '1', title: 'Test task', status: 'pending' }],
|
||||
metadata: {}
|
||||
}
|
||||
};
|
||||
|
||||
utils.writeJSON(testFilePath, taggedData, null, null);
|
||||
|
||||
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Concurrent write simulation', () => {
|
||||
it('should handle rapid sequential writes without data loss', () => {
|
||||
// Perform many rapid writes
|
||||
const numWrites = 10;
|
||||
|
||||
for (let i = 0; i < numWrites; i++) {
|
||||
// Read current data
|
||||
let currentData;
|
||||
try {
|
||||
currentData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||
} catch {
|
||||
currentData = { master: { tasks: [], metadata: {} } };
|
||||
}
|
||||
|
||||
// Add a new task
|
||||
currentData.master.tasks.push({
|
||||
id: String(i + 1),
|
||||
title: `Task ${i + 1}`,
|
||||
status: 'pending'
|
||||
});
|
||||
|
||||
// Write with locking
|
||||
utils.writeJSON(testFilePath, currentData, null, null);
|
||||
}
|
||||
|
||||
const finalData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||
expect(finalData.master.tasks).toHaveLength(numWrites);
|
||||
});
|
||||
});
|
||||
|
||||
describe('True concurrent process writes', () => {
|
||||
it('should handle multiple processes writing simultaneously without data loss', async () => {
|
||||
const { spawn } = await import('child_process');
|
||||
|
||||
const numProcesses = 5;
|
||||
const tasksPerProcess = 3;
|
||||
|
||||
// Create a worker script with inline locking implementation
|
||||
// This mirrors the withFileLockSync implementation but without external dependencies
|
||||
const workerScript = `
|
||||
import fs from 'fs';
|
||||
|
||||
const filepath = process.argv[2];
|
||||
const processId = process.argv[3];
|
||||
const numTasks = parseInt(process.argv[4], 10);
|
||||
|
||||
const LOCK_CONFIG = {
|
||||
maxRetries: 10,
|
||||
retryDelay: 50,
|
||||
staleLockAge: 10000
|
||||
};
|
||||
|
||||
function sleepSync(ms) {
|
||||
const end = Date.now() + ms;
|
||||
while (Date.now() < end) {
|
||||
// Busy wait
|
||||
}
|
||||
}
|
||||
|
||||
function withFileLockSync(filepath, callback) {
|
||||
const lockPath = filepath + '.lock';
|
||||
const { maxRetries, retryDelay, staleLockAge } = LOCK_CONFIG;
|
||||
|
||||
let acquired = false;
|
||||
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||
try {
|
||||
const lockContent = JSON.stringify({
|
||||
pid: process.pid,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
fs.writeFileSync(lockPath, lockContent, { flag: 'wx' });
|
||||
acquired = true;
|
||||
break;
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
try {
|
||||
const lockStat = fs.statSync(lockPath);
|
||||
const age = Date.now() - lockStat.mtimeMs;
|
||||
if (age > staleLockAge) {
|
||||
const stalePath = lockPath + '.stale.' + process.pid + '.' + Date.now();
|
||||
try {
|
||||
fs.renameSync(lockPath, stalePath);
|
||||
try { fs.unlinkSync(stalePath); } catch {}
|
||||
continue;
|
||||
} catch {}
|
||||
}
|
||||
} catch (statErr) {
|
||||
if (statErr.code === 'ENOENT') continue;
|
||||
throw statErr;
|
||||
}
|
||||
if (attempt < maxRetries - 1) {
|
||||
const waitMs = retryDelay * Math.pow(2, attempt);
|
||||
sleepSync(waitMs);
|
||||
}
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!acquired) {
|
||||
throw new Error('Failed to acquire lock on ' + filepath + ' after ' + maxRetries + ' attempts');
|
||||
}
|
||||
|
||||
try {
|
||||
return callback();
|
||||
} finally {
|
||||
try {
|
||||
fs.unlinkSync(lockPath);
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
for (let i = 0; i < numTasks; i++) {
|
||||
withFileLockSync(filepath, () => {
|
||||
let currentData;
|
||||
try {
|
||||
currentData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
||||
} catch {
|
||||
currentData = { master: { tasks: [], metadata: {} } };
|
||||
}
|
||||
|
||||
currentData.master.tasks.push({
|
||||
id: 'P' + processId + '-' + (i + 1),
|
||||
title: 'Task from process ' + processId + ' #' + (i + 1),
|
||||
status: 'pending'
|
||||
});
|
||||
|
||||
fs.writeFileSync(filepath, JSON.stringify(currentData, null, 2), 'utf8');
|
||||
});
|
||||
|
||||
// Small delay to increase chance of interleaving
|
||||
await new Promise(r => setTimeout(r, 10));
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
`;
|
||||
|
||||
// Write worker script to temp file
|
||||
const workerPath = path.join(tempDir, 'worker.mjs');
|
||||
fs.writeFileSync(workerPath, workerScript);
|
||||
|
||||
// Spawn multiple processes that write concurrently
|
||||
const processes = [];
|
||||
for (let i = 0; i < numProcesses; i++) {
|
||||
const proc = spawn(
|
||||
'node',
|
||||
[workerPath, testFilePath, String(i), String(tasksPerProcess)],
|
||||
{
|
||||
stdio: ['ignore', 'pipe', 'pipe']
|
||||
}
|
||||
);
|
||||
processes.push(
|
||||
new Promise((resolve, reject) => {
|
||||
let stderr = '';
|
||||
proc.stderr.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
proc.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(
|
||||
new Error(`Process ${i} exited with code ${code}: ${stderr}`)
|
||||
);
|
||||
}
|
||||
});
|
||||
proc.on('error', reject);
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// Wait for all processes to complete
|
||||
await Promise.all(processes);
|
||||
|
||||
// Verify all tasks were written
|
||||
const finalData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||
const expectedTasks = numProcesses * tasksPerProcess;
|
||||
|
||||
expect(finalData.master.tasks.length).toBe(expectedTasks);
|
||||
|
||||
// Verify we have tasks from all processes
|
||||
for (let i = 0; i < numProcesses; i++) {
|
||||
const tasksFromProcess = finalData.master.tasks.filter((t) =>
|
||||
t.id.startsWith(`P${i}-`)
|
||||
);
|
||||
expect(tasksFromProcess.length).toBe(tasksPerProcess);
|
||||
}
|
||||
}, 30000); // 30 second timeout for concurrent test
|
||||
});
|
||||
});
|
||||
|
||||
describe('readJSON', () => {
|
||||
let tempDir;
|
||||
let testFilePath;
|
||||
let utils;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-test-'));
|
||||
testFilePath = path.join(tempDir, 'tasks.json');
|
||||
|
||||
// Create .taskmaster directory for state.json
|
||||
fs.mkdirSync(path.join(tempDir, '.taskmaster'), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(tempDir, '.taskmaster', 'state.json'),
|
||||
JSON.stringify({
|
||||
currentTag: 'master'
|
||||
})
|
||||
);
|
||||
|
||||
utils = await import(utilsPath + `?cachebust=${Date.now()}`);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (tempDir && fs.existsSync(tempDir)) {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('should read tagged task data correctly', () => {
|
||||
const data = {
|
||||
master: {
|
||||
tasks: [{ id: '1', title: 'Test', status: 'pending' }],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(testFilePath, JSON.stringify(data, null, 2));
|
||||
|
||||
const result = utils.readJSON(testFilePath, tempDir, 'master');
|
||||
|
||||
expect(result.tasks).toHaveLength(1);
|
||||
expect(result.tasks[0].title).toBe('Test');
|
||||
});
|
||||
|
||||
it('should return null for non-existent file', () => {
|
||||
const result = utils.readJSON(path.join(tempDir, 'nonexistent.json'));
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Lock file stale detection', () => {
|
||||
let tempDir;
|
||||
let testFilePath;
|
||||
let utils;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-test-'));
|
||||
testFilePath = path.join(tempDir, 'tasks.json');
|
||||
fs.writeFileSync(testFilePath, '{}');
|
||||
utils = await import(utilsPath + `?cachebust=${Date.now()}`);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (tempDir && fs.existsSync(tempDir)) {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('should remove stale lock files', () => {
|
||||
const lockPath = `${testFilePath}.lock`;
|
||||
|
||||
// Create a lock file with old timestamp
|
||||
fs.writeFileSync(
|
||||
lockPath,
|
||||
JSON.stringify({
|
||||
pid: 99999, // Non-existent PID
|
||||
timestamp: Date.now() - 20000 // 20 seconds ago
|
||||
})
|
||||
);
|
||||
|
||||
// Touch the file to make it old
|
||||
const pastTime = new Date(Date.now() - 20000);
|
||||
fs.utimesSync(lockPath, pastTime, pastTime);
|
||||
|
||||
// Should be able to acquire lock despite existing lock file
|
||||
const result = utils.withFileLockSync(testFilePath, () => 'acquired');
|
||||
expect(result).toBe('acquired');
|
||||
});
|
||||
});
|
||||
@@ -113,3 +113,180 @@ describe('Tag Management – writeJSON context preservation', () => {
|
||||
expect(tagNames).not.toContain('copy');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tag Management – ready tasks count', () => {
|
||||
beforeEach(() => {
|
||||
fs.mkdirSync(TEMP_DIR, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(TEMP_DIR, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should count tasks with no dependencies as ready', async () => {
|
||||
const data = {
|
||||
master: {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Task 1', status: 'pending', dependencies: [] },
|
||||
{ id: 2, title: 'Task 2', status: 'pending', dependencies: [] },
|
||||
{ id: 3, title: 'Task 3', status: 'done', dependencies: [] }
|
||||
],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(TASKS_PATH, JSON.stringify(data, null, 2));
|
||||
|
||||
const result = await listTags(
|
||||
TASKS_PATH,
|
||||
{ showTaskCounts: true },
|
||||
{ projectRoot: TEMP_DIR },
|
||||
'json'
|
||||
);
|
||||
|
||||
const masterTag = result.tags.find((t) => t.name === 'master');
|
||||
expect(masterTag.readyTasks).toBe(2); // 2 pending, 1 done (not ready)
|
||||
});
|
||||
|
||||
it('should count tasks with satisfied dependencies as ready', async () => {
|
||||
const data = {
|
||||
master: {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Task 1', status: 'done', dependencies: [] },
|
||||
{ id: 2, title: 'Task 2', status: 'pending', dependencies: [1] }, // deps satisfied
|
||||
{ id: 3, title: 'Task 3', status: 'pending', dependencies: [2] } // deps NOT satisfied
|
||||
],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(TASKS_PATH, JSON.stringify(data, null, 2));
|
||||
|
||||
const result = await listTags(
|
||||
TASKS_PATH,
|
||||
{ showTaskCounts: true },
|
||||
{ projectRoot: TEMP_DIR },
|
||||
'json'
|
||||
);
|
||||
|
||||
const masterTag = result.tags.find((t) => t.name === 'master');
|
||||
expect(masterTag.readyTasks).toBe(1); // only task 2 is ready
|
||||
});
|
||||
|
||||
it('should exclude deferred and blocked tasks from ready count', async () => {
|
||||
const data = {
|
||||
master: {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Task 1', status: 'pending', dependencies: [] },
|
||||
{ id: 2, title: 'Task 2', status: 'deferred', dependencies: [] },
|
||||
{ id: 3, title: 'Task 3', status: 'blocked', dependencies: [] },
|
||||
{ id: 4, title: 'Task 4', status: 'in-progress', dependencies: [] },
|
||||
{ id: 5, title: 'Task 5', status: 'review', dependencies: [] }
|
||||
],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(TASKS_PATH, JSON.stringify(data, null, 2));
|
||||
|
||||
const result = await listTags(
|
||||
TASKS_PATH,
|
||||
{ showTaskCounts: true },
|
||||
{ projectRoot: TEMP_DIR },
|
||||
'json'
|
||||
);
|
||||
|
||||
const masterTag = result.tags.find((t) => t.name === 'master');
|
||||
// Only pending, in-progress, review are actionable
|
||||
expect(masterTag.readyTasks).toBe(3); // tasks 1, 4, 5
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tag Management – --ready filter', () => {
|
||||
beforeEach(() => {
|
||||
fs.mkdirSync(TEMP_DIR, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(TEMP_DIR, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should filter out tags with no ready tasks when --ready is set', async () => {
|
||||
const data = {
|
||||
'has-ready': {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Task 1', status: 'pending', dependencies: [] }
|
||||
],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
},
|
||||
'no-ready': {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Task 1', status: 'done', dependencies: [] },
|
||||
{ id: 2, title: 'Task 2', status: 'deferred', dependencies: [] }
|
||||
],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
},
|
||||
'all-blocked': {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Task 1', status: 'blocked', dependencies: [] }
|
||||
],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(TASKS_PATH, JSON.stringify(data, null, 2));
|
||||
|
||||
const result = await listTags(
|
||||
TASKS_PATH,
|
||||
{ showTaskCounts: true, ready: true },
|
||||
{ projectRoot: TEMP_DIR },
|
||||
'json'
|
||||
);
|
||||
|
||||
expect(result.tags.length).toBe(1);
|
||||
expect(result.tags[0].name).toBe('has-ready');
|
||||
expect(result.totalTags).toBe(1);
|
||||
});
|
||||
|
||||
it('should include all tags when --ready is not set', async () => {
|
||||
const data = {
|
||||
'has-ready': {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Task 1', status: 'pending', dependencies: [] }
|
||||
],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
},
|
||||
'no-ready': {
|
||||
tasks: [{ id: 1, title: 'Task 1', status: 'done', dependencies: [] }],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(TASKS_PATH, JSON.stringify(data, null, 2));
|
||||
|
||||
const result = await listTags(
|
||||
TASKS_PATH,
|
||||
{ showTaskCounts: true, ready: false },
|
||||
{ projectRoot: TEMP_DIR },
|
||||
'json'
|
||||
);
|
||||
|
||||
expect(result.tags.length).toBe(2);
|
||||
expect(result.totalTags).toBe(2);
|
||||
});
|
||||
|
||||
it('should return empty list when no tags have ready tasks', async () => {
|
||||
const data = {
|
||||
'all-done': {
|
||||
tasks: [{ id: 1, title: 'Task 1', status: 'done', dependencies: [] }],
|
||||
metadata: { created: new Date().toISOString() }
|
||||
}
|
||||
};
|
||||
fs.writeFileSync(TASKS_PATH, JSON.stringify(data, null, 2));
|
||||
|
||||
const result = await listTags(
|
||||
TASKS_PATH,
|
||||
{ showTaskCounts: true, ready: true },
|
||||
{ projectRoot: TEMP_DIR },
|
||||
'json'
|
||||
);
|
||||
|
||||
expect(result.tags.length).toBe(0);
|
||||
expect(result.totalTags).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
30
turbo.json
30
turbo.json
@@ -39,6 +39,36 @@
|
||||
"!{packages,apps}/**/node_modules/**"
|
||||
],
|
||||
"outputLogs": "new-only"
|
||||
},
|
||||
"test": {
|
||||
"dependsOn": ["^build"],
|
||||
"inputs": [
|
||||
"$TURBO_DEFAULT$",
|
||||
"!{packages,apps}/**/dist/**",
|
||||
"!{packages,apps}/**/node_modules/**"
|
||||
],
|
||||
"outputLogs": "new-only",
|
||||
"cache": false
|
||||
},
|
||||
"test:unit": {
|
||||
"dependsOn": ["^build"],
|
||||
"inputs": [
|
||||
"$TURBO_DEFAULT$",
|
||||
"!{packages,apps}/**/dist/**",
|
||||
"!{packages,apps}/**/node_modules/**"
|
||||
],
|
||||
"outputLogs": "new-only",
|
||||
"cache": false
|
||||
},
|
||||
"test:integration": {
|
||||
"dependsOn": ["^build"],
|
||||
"inputs": [
|
||||
"$TURBO_DEFAULT$",
|
||||
"!{packages,apps}/**/dist/**",
|
||||
"!{packages,apps}/**/node_modules/**"
|
||||
],
|
||||
"outputLogs": "new-only",
|
||||
"cache": false
|
||||
}
|
||||
},
|
||||
"globalDependencies": ["turbo.json", "tsconfig.json", ".env*"]
|
||||
|
||||
Reference in New Issue
Block a user