mirror of
https://github.com/eyaltoledano/claude-task-master.git
synced 2026-01-30 06:12:05 +00:00
fix: Add cross-process file locking to prevent race conditions (#1566)
Co-authored-by: Ben Coombs <bjcoombs@users.noreply.github.com> Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
This commit is contained in:
13
.changeset/fix-file-locking.md
Normal file
13
.changeset/fix-file-locking.md
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix race condition when multiple Claude Code windows write to tasks.json simultaneously
|
||||||
|
|
||||||
|
- Add cross-process file locking to prevent concurrent write collisions
|
||||||
|
- Implement atomic writes using temp file + rename pattern to prevent partial writes
|
||||||
|
- Re-read file inside lock to get current state, preventing lost updates from stale snapshots
|
||||||
|
- Add stale lock detection and automatic cleanup (10-second timeout)
|
||||||
|
- Export `withFileLock` and `withFileLockSync` utilities for use by other modules
|
||||||
|
|
||||||
|
This fix prevents data loss that could occur when multiple Task Master instances (e.g., multiple Claude Code windows) access the same tasks.json file concurrently.
|
||||||
45
package-lock.json
generated
45
package-lock.json
generated
@@ -13713,6 +13713,16 @@
|
|||||||
"integrity": "sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==",
|
"integrity": "sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/@types/proper-lockfile": {
|
||||||
|
"version": "4.1.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/proper-lockfile/-/proper-lockfile-4.1.4.tgz",
|
||||||
|
"integrity": "sha512-uo2ABllncSqg9F1D4nugVl9v93RmjxF6LJzQLMLDdPaXCUIDPeOJ21Gbqi43xNKzBi/WQ0Q0dICqufzQbMjipQ==",
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@types/retry": "*"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@types/react": {
|
"node_modules/@types/react": {
|
||||||
"version": "19.1.8",
|
"version": "19.1.8",
|
||||||
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz",
|
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz",
|
||||||
@@ -13783,6 +13793,13 @@
|
|||||||
"node": ">= 0.6"
|
"node": ">= 0.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@types/retry": {
|
||||||
|
"version": "0.12.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.5.tgz",
|
||||||
|
"integrity": "sha512-3xSjTp3v03X/lSQLkczaN9UIEwJMoMCA1+Nb5HfbJEQWogdeQIyVtTvxPXDQjZ5zws8rFQfVfRdz03ARihPJgw==",
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/@types/stack-utils": {
|
"node_modules/@types/stack-utils": {
|
||||||
"version": "2.0.3",
|
"version": "2.0.3",
|
||||||
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz",
|
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz",
|
||||||
@@ -29634,6 +29651,23 @@
|
|||||||
"node": ">= 6"
|
"node": ">= 6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/proper-lockfile": {
|
||||||
|
"version": "4.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/proper-lockfile/-/proper-lockfile-4.1.2.tgz",
|
||||||
|
"integrity": "sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"graceful-fs": "^4.2.4",
|
||||||
|
"retry": "^0.12.0",
|
||||||
|
"signal-exit": "^3.0.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/proper-lockfile/node_modules/signal-exit": {
|
||||||
|
"version": "3.0.7",
|
||||||
|
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
|
||||||
|
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
|
||||||
|
"license": "ISC"
|
||||||
|
},
|
||||||
"node_modules/property-information": {
|
"node_modules/property-information": {
|
||||||
"version": "7.1.0",
|
"version": "7.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
|
||||||
@@ -31111,6 +31145,15 @@
|
|||||||
"url": "https://opencollective.com/unified"
|
"url": "https://opencollective.com/unified"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/retry": {
|
||||||
|
"version": "0.12.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz",
|
||||||
|
"integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/retry-request": {
|
"node_modules/retry-request": {
|
||||||
"version": "7.0.2",
|
"version": "7.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/retry-request/-/retry-request-7.0.2.tgz",
|
"resolved": "https://registry.npmjs.org/retry-request/-/retry-request-7.0.2.tgz",
|
||||||
@@ -36324,6 +36367,7 @@
|
|||||||
"@supabase/supabase-js": "^2.57.4",
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
"date-fns": "^4.1.0",
|
"date-fns": "^4.1.0",
|
||||||
"fs-extra": "^11.3.2",
|
"fs-extra": "^11.3.2",
|
||||||
|
"proper-lockfile": "^4.1.2",
|
||||||
"simple-git": "^3.28.0",
|
"simple-git": "^3.28.0",
|
||||||
"steno": "^4.0.2",
|
"steno": "^4.0.2",
|
||||||
"zod": "^4.1.11"
|
"zod": "^4.1.11"
|
||||||
@@ -36331,6 +36375,7 @@
|
|||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/fs-extra": "^11.0.4",
|
"@types/fs-extra": "^11.0.4",
|
||||||
"@types/node": "^22.10.5",
|
"@types/node": "^22.10.5",
|
||||||
|
"@types/proper-lockfile": "^4.1.4",
|
||||||
"@vitest/coverage-v8": "^4.0.10",
|
"@vitest/coverage-v8": "^4.0.10",
|
||||||
"strip-literal": "3.1.0",
|
"strip-literal": "3.1.0",
|
||||||
"typescript": "^5.9.2",
|
"typescript": "^5.9.2",
|
||||||
|
|||||||
@@ -34,12 +34,14 @@
|
|||||||
"@supabase/supabase-js": "^2.57.4",
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
"date-fns": "^4.1.0",
|
"date-fns": "^4.1.0",
|
||||||
"fs-extra": "^11.3.2",
|
"fs-extra": "^11.3.2",
|
||||||
|
"proper-lockfile": "^4.1.2",
|
||||||
"simple-git": "^3.28.0",
|
"simple-git": "^3.28.0",
|
||||||
"steno": "^4.0.2",
|
"steno": "^4.0.2",
|
||||||
"zod": "^4.1.11"
|
"zod": "^4.1.11"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/fs-extra": "^11.0.4",
|
"@types/fs-extra": "^11.0.4",
|
||||||
|
"@types/proper-lockfile": "^4.1.4",
|
||||||
"@types/node": "^22.10.5",
|
"@types/node": "^22.10.5",
|
||||||
"@vitest/coverage-v8": "^4.0.10",
|
"@vitest/coverage-v8": "^4.0.10",
|
||||||
"strip-literal": "3.1.0",
|
"strip-literal": "3.1.0",
|
||||||
|
|||||||
@@ -0,0 +1,264 @@
|
|||||||
|
/**
|
||||||
|
* Tests for FileOperations class
|
||||||
|
* Focuses on modifyJson and cross-process locking functionality
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import fs from 'node:fs/promises';
|
||||||
|
import fsSync from 'node:fs';
|
||||||
|
import path from 'node:path';
|
||||||
|
import os from 'node:os';
|
||||||
|
import { FileOperations } from './file-operations.js';
|
||||||
|
|
||||||
|
describe('FileOperations', () => {
|
||||||
|
let tempDir: string;
|
||||||
|
let testFilePath: string;
|
||||||
|
let fileOps: FileOperations;
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
// Create a temp directory for each test
|
||||||
|
tempDir = fsSync.mkdtempSync(path.join(os.tmpdir(), 'tm-core-test-'));
|
||||||
|
testFilePath = path.join(tempDir, 'test.json');
|
||||||
|
fileOps = new FileOperations();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Clean up
|
||||||
|
await fileOps.cleanup();
|
||||||
|
if (tempDir && fsSync.existsSync(tempDir)) {
|
||||||
|
fsSync.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('modifyJson', () => {
|
||||||
|
it('should modify existing JSON data', async () => {
|
||||||
|
// Set up initial data
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify({ count: 0 }));
|
||||||
|
|
||||||
|
// Modify data
|
||||||
|
await fileOps.modifyJson(testFilePath, (data: { count: number }) => ({
|
||||||
|
...data,
|
||||||
|
count: data.count + 1
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Verify
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.count).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create file if it does not exist', async () => {
|
||||||
|
const newFilePath = path.join(tempDir, 'new-file.json');
|
||||||
|
|
||||||
|
await fileOps.modifyJson(newFilePath, () => ({ created: true }));
|
||||||
|
|
||||||
|
expect(fsSync.existsSync(newFilePath)).toBe(true);
|
||||||
|
const result = JSON.parse(await fs.readFile(newFilePath, 'utf-8'));
|
||||||
|
expect(result.created).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle async modifier functions', async () => {
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify({ value: 'initial' }));
|
||||||
|
|
||||||
|
await fileOps.modifyJson(
|
||||||
|
testFilePath,
|
||||||
|
async (data: { value: string }) => {
|
||||||
|
// Simulate async operation
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||||
|
return { ...data, value: 'modified' };
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.value).toBe('modified');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should re-read file inside lock to prevent stale data', async () => {
|
||||||
|
// Initial data
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify({ version: 1 }));
|
||||||
|
|
||||||
|
// Simulate two sequential modifications
|
||||||
|
await fileOps.modifyJson(testFilePath, (data: { version: number }) => ({
|
||||||
|
version: data.version + 1
|
||||||
|
}));
|
||||||
|
|
||||||
|
await fileOps.modifyJson(testFilePath, (data: { version: number }) => ({
|
||||||
|
version: data.version + 1
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Both modifications should have been applied
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.version).toBe(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not leave lock files on success', async () => {
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify({}));
|
||||||
|
|
||||||
|
await fileOps.modifyJson(testFilePath, (data) => ({
|
||||||
|
...data,
|
||||||
|
modified: true
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Check no lock files exist
|
||||||
|
const files = await fs.readdir(tempDir);
|
||||||
|
const lockFiles = files.filter((f) => f.endsWith('.lock'));
|
||||||
|
expect(lockFiles).toHaveLength(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should release lock even if modifier throws', async () => {
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify({}));
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
fileOps.modifyJson(testFilePath, () => {
|
||||||
|
throw new Error('Modifier error');
|
||||||
|
})
|
||||||
|
).rejects.toThrow('Modifier error');
|
||||||
|
|
||||||
|
// Should still be able to acquire lock for another operation
|
||||||
|
await fileOps.modifyJson(testFilePath, () => ({ recovered: true }));
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.recovered).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty file gracefully', async () => {
|
||||||
|
// Create empty file
|
||||||
|
await fs.writeFile(testFilePath, '');
|
||||||
|
|
||||||
|
await fileOps.modifyJson(testFilePath, () => ({ initialized: true }));
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.initialized).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle file with only whitespace', async () => {
|
||||||
|
await fs.writeFile(testFilePath, ' \n ');
|
||||||
|
|
||||||
|
await fileOps.modifyJson(testFilePath, () => ({ initialized: true }));
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.initialized).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw on corrupted JSON', async () => {
|
||||||
|
// Write invalid JSON that is not empty
|
||||||
|
await fs.writeFile(testFilePath, '{ invalid json content');
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
fileOps.modifyJson(testFilePath, (data) => data)
|
||||||
|
).rejects.toThrow(/Corrupted JSON/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve complex nested structures', async () => {
|
||||||
|
const complexData = {
|
||||||
|
tasks: [
|
||||||
|
{
|
||||||
|
id: 1,
|
||||||
|
title: 'Task 1',
|
||||||
|
subtasks: [{ id: '1.1', title: 'Subtask' }]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
metadata: {
|
||||||
|
created: '2024-01-01',
|
||||||
|
tags: ['tag1', 'tag2']
|
||||||
|
}
|
||||||
|
};
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify(complexData, null, 2));
|
||||||
|
|
||||||
|
await fileOps.modifyJson(testFilePath, (data: typeof complexData) => ({
|
||||||
|
...data,
|
||||||
|
tasks: [...data.tasks, { id: 2, title: 'Task 2', subtasks: [] }]
|
||||||
|
}));
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.tasks).toHaveLength(2);
|
||||||
|
expect(result.tasks[0].subtasks).toHaveLength(1);
|
||||||
|
expect(result.metadata.tags).toEqual(['tag1', 'tag2']);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('concurrent operations', () => {
|
||||||
|
it('should serialize truly concurrent modifyJson calls', async () => {
|
||||||
|
// Initial data
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify({ count: 0 }));
|
||||||
|
|
||||||
|
const numConcurrentWrites = 5;
|
||||||
|
const writes = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < numConcurrentWrites; i++) {
|
||||||
|
writes.push(
|
||||||
|
fileOps.modifyJson(testFilePath, (data: { count: number }) => ({
|
||||||
|
count: data.count + 1
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
await Promise.all(writes);
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.count).toBe(numConcurrentWrites);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle concurrent writes from multiple FileOperations instances', async () => {
|
||||||
|
// Initial data
|
||||||
|
await fs.writeFile(testFilePath, JSON.stringify({ count: 0 }));
|
||||||
|
|
||||||
|
const numInstances = 3;
|
||||||
|
const instances = Array.from(
|
||||||
|
{ length: numInstances },
|
||||||
|
() => new FileOperations()
|
||||||
|
);
|
||||||
|
const writes = instances.map((ops) =>
|
||||||
|
ops.modifyJson(testFilePath, (data: { count: number }) => ({
|
||||||
|
count: data.count + 1
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
await Promise.all(writes);
|
||||||
|
|
||||||
|
// Cleanup all instances
|
||||||
|
await Promise.all(instances.map((ops) => ops.cleanup()));
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.count).toBe(numInstances);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('writeJson', () => {
|
||||||
|
it('should write JSON atomically', async () => {
|
||||||
|
const data = { test: 'value' };
|
||||||
|
|
||||||
|
await fileOps.writeJson(testFilePath, data);
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.test).toBe('value');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not leave temp files on success', async () => {
|
||||||
|
await fileOps.writeJson(testFilePath, { test: true });
|
||||||
|
|
||||||
|
const files = await fs.readdir(tempDir);
|
||||||
|
const tempFiles = files.filter((f) => f.includes('.tmp'));
|
||||||
|
expect(tempFiles).toHaveLength(0);
|
||||||
|
|
||||||
|
// Also verify no lock files remain
|
||||||
|
const lockFiles = files.filter((f) => f.endsWith('.lock'));
|
||||||
|
expect(lockFiles).toHaveLength(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('cleanup', () => {
|
||||||
|
it('should clear cached writers', async () => {
|
||||||
|
// Write to create a cached writer
|
||||||
|
await fileOps.writeJson(testFilePath, { test: 1 });
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
await fileOps.cleanup();
|
||||||
|
|
||||||
|
// Should still work after cleanup (creates new writer)
|
||||||
|
await fileOps.writeJson(testFilePath, { test: 2 });
|
||||||
|
|
||||||
|
const result = JSON.parse(await fs.readFile(testFilePath, 'utf-8'));
|
||||||
|
expect(result.test).toBe(2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -1,16 +1,52 @@
|
|||||||
/**
|
/**
|
||||||
* @fileoverview File operations with atomic writes and locking
|
* @fileoverview File operations with atomic writes and cross-process locking
|
||||||
|
*
|
||||||
|
* Uses steno for atomic writes (same pattern as workflow-state-manager.ts)
|
||||||
|
* and proper-lockfile for cross-process locking to prevent lost updates.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { constants } from 'node:fs';
|
import { constants } from 'node:fs';
|
||||||
import fs from 'node:fs/promises';
|
import fs from 'node:fs/promises';
|
||||||
|
import path from 'node:path';
|
||||||
|
import lockfile from 'proper-lockfile';
|
||||||
|
import { Writer } from 'steno';
|
||||||
import type { FileStorageData } from './format-handler.js';
|
import type { FileStorageData } from './format-handler.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handles atomic file operations with locking mechanism
|
* File locking configuration for cross-process safety
|
||||||
|
*/
|
||||||
|
const LOCK_OPTIONS = {
|
||||||
|
stale: 10000, // Consider lock stale after 10 seconds
|
||||||
|
retries: {
|
||||||
|
retries: 5,
|
||||||
|
factor: 2,
|
||||||
|
minTimeout: 100,
|
||||||
|
maxTimeout: 1000
|
||||||
|
},
|
||||||
|
realpath: false // Don't resolve symlinks (faster)
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles atomic file operations with cross-process locking mechanism.
|
||||||
|
*
|
||||||
|
* Writers are cached for reuse. Call {@link cleanup} when disposing of
|
||||||
|
* long-lived instances to prevent memory leaks.
|
||||||
*/
|
*/
|
||||||
export class FileOperations {
|
export class FileOperations {
|
||||||
private fileLocks: Map<string, Promise<void>> = new Map();
|
/** Map of file paths to steno Writers for reuse */
|
||||||
|
private writers = new Map<string, Writer>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get or create a steno Writer for a file path
|
||||||
|
*/
|
||||||
|
private getWriter(filePath: string): Writer {
|
||||||
|
let writer = this.writers.get(filePath);
|
||||||
|
if (!writer) {
|
||||||
|
writer = new Writer(filePath);
|
||||||
|
this.writers.set(filePath, writer);
|
||||||
|
}
|
||||||
|
return writer;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read and parse JSON file
|
* Read and parse JSON file
|
||||||
@@ -31,52 +67,132 @@ export class FileOperations {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write JSON file with atomic operation and locking
|
* Write JSON file with atomic operation and cross-process locking.
|
||||||
|
* Uses steno for atomic writes and proper-lockfile for cross-process safety.
|
||||||
|
* WARNING: This replaces the entire file. For concurrent modifications,
|
||||||
|
* use modifyJson() instead to prevent lost updates.
|
||||||
*/
|
*/
|
||||||
async writeJson(
|
async writeJson(
|
||||||
filePath: string,
|
filePath: string,
|
||||||
data: FileStorageData | any
|
data: FileStorageData | any
|
||||||
): Promise<void> {
|
): Promise<void> {
|
||||||
// Use file locking to prevent concurrent writes
|
// Ensure file exists for locking (proper-lockfile requires this)
|
||||||
const lockKey = filePath;
|
await this.ensureFileExists(filePath);
|
||||||
const existingLock = this.fileLocks.get(lockKey);
|
|
||||||
|
|
||||||
if (existingLock) {
|
|
||||||
await existingLock;
|
|
||||||
}
|
|
||||||
|
|
||||||
const lockPromise = this.performAtomicWrite(filePath, data);
|
|
||||||
this.fileLocks.set(lockKey, lockPromise);
|
|
||||||
|
|
||||||
|
// Acquire cross-process lock
|
||||||
|
let release: (() => Promise<void>) | null = null;
|
||||||
try {
|
try {
|
||||||
await lockPromise;
|
release = await lockfile.lock(filePath, LOCK_OPTIONS);
|
||||||
|
|
||||||
|
// Use steno Writer for atomic writes (same pattern as workflow-state-manager)
|
||||||
|
const content = JSON.stringify(data, null, 2);
|
||||||
|
const writer = this.getWriter(filePath);
|
||||||
|
await writer.write(content);
|
||||||
} finally {
|
} finally {
|
||||||
this.fileLocks.delete(lockKey);
|
if (release) {
|
||||||
|
try {
|
||||||
|
await release();
|
||||||
|
} catch (err: any) {
|
||||||
|
// Log but don't throw - lock may have been released already
|
||||||
|
// Other errors should be visible for debugging
|
||||||
|
if (process.env.DEBUG || process.env.TASKMASTER_DEBUG === 'true') {
|
||||||
|
console.warn(
|
||||||
|
`[WARN] Lock release warning for ${filePath}: ${err.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Perform atomic write operation using temporary file
|
* Read-modify-write JSON file with cross-process locking.
|
||||||
|
* Uses steno for atomic writes and proper-lockfile for cross-process safety.
|
||||||
|
* Re-reads file inside lock to prevent lost updates from stale snapshots.
|
||||||
|
* @param filePath - Path to the JSON file
|
||||||
|
* @param modifier - Function that receives current data and returns modified data
|
||||||
*/
|
*/
|
||||||
private async performAtomicWrite(filePath: string, data: any): Promise<void> {
|
async modifyJson<T = any>(
|
||||||
const tempPath = `${filePath}.tmp`;
|
filePath: string,
|
||||||
|
modifier: (currentData: T) => T | Promise<T>
|
||||||
|
): Promise<void> {
|
||||||
|
// Ensure file exists for locking (proper-lockfile requires this)
|
||||||
|
await this.ensureFileExists(filePath);
|
||||||
|
|
||||||
|
// Acquire cross-process lock
|
||||||
|
let release: (() => Promise<void>) | null = null;
|
||||||
try {
|
try {
|
||||||
// Write to temp file first
|
release = await lockfile.lock(filePath, LOCK_OPTIONS);
|
||||||
const content = JSON.stringify(data, null, 2);
|
|
||||||
await fs.writeFile(tempPath, content, 'utf-8');
|
|
||||||
|
|
||||||
// Atomic rename
|
// Re-read file INSIDE lock to get current state
|
||||||
await fs.rename(tempPath, filePath);
|
// This prevents lost updates from stale snapshots
|
||||||
} catch (error: any) {
|
let currentData: T;
|
||||||
// Clean up temp file if it exists
|
|
||||||
try {
|
try {
|
||||||
await fs.unlink(tempPath);
|
const content = await fs.readFile(filePath, 'utf-8');
|
||||||
} catch {
|
currentData = JSON.parse(content);
|
||||||
// Ignore cleanup errors
|
} catch (err: any) {
|
||||||
|
// Distinguish between expected empty/new files and actual corruption
|
||||||
|
if (err.code === 'ENOENT') {
|
||||||
|
// File doesn't exist yet - start fresh
|
||||||
|
currentData = {} as T;
|
||||||
|
} else if (err instanceof SyntaxError) {
|
||||||
|
// Check if it's just an empty file (our ensureFileExists writes '{}')
|
||||||
|
const content = await fs.readFile(filePath, 'utf-8').catch(() => '');
|
||||||
|
if (content.trim() === '' || content.trim() === '{}') {
|
||||||
|
currentData = {} as T;
|
||||||
|
} else {
|
||||||
|
// Actual JSON corruption - this is a serious error
|
||||||
|
throw new Error(
|
||||||
|
`Corrupted JSON in ${filePath}: ${err.message}. File contains: ${content.substring(0, 100)}...`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Other errors (permission, I/O) should be surfaced
|
||||||
|
throw new Error(
|
||||||
|
`Failed to read ${filePath} for modification: ${err.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new Error(`Failed to write file ${filePath}: ${error.message}`);
|
// Apply modification
|
||||||
|
const newData = await modifier(currentData);
|
||||||
|
|
||||||
|
// Write atomically using steno (same pattern as workflow-state-manager)
|
||||||
|
const content = JSON.stringify(newData, null, 2);
|
||||||
|
const writer = this.getWriter(filePath);
|
||||||
|
await writer.write(content);
|
||||||
|
} finally {
|
||||||
|
if (release) {
|
||||||
|
try {
|
||||||
|
await release();
|
||||||
|
} catch (err: any) {
|
||||||
|
// Log but don't throw - lock may have been released already
|
||||||
|
// Other errors should be visible for debugging
|
||||||
|
if (process.env.DEBUG || process.env.TASKMASTER_DEBUG === 'true') {
|
||||||
|
console.warn(
|
||||||
|
`[WARN] Lock release warning for ${filePath}: ${err.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure file exists for locking (proper-lockfile requires the file to exist).
|
||||||
|
* Uses atomic creation with 'wx' flag to prevent TOCTOU race conditions.
|
||||||
|
*/
|
||||||
|
private async ensureFileExists(filePath: string): Promise<void> {
|
||||||
|
const dir = path.dirname(filePath);
|
||||||
|
await fs.mkdir(dir, { recursive: true });
|
||||||
|
try {
|
||||||
|
// Use 'wx' flag for atomic create - fails if file exists (prevents race)
|
||||||
|
await fs.writeFile(filePath, '{}', { flag: 'wx' });
|
||||||
|
} catch (err: any) {
|
||||||
|
// EEXIST is expected if another process created the file - that's fine
|
||||||
|
if (err.code !== 'EEXIST') {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,13 +275,14 @@ export class FileOperations {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clean up all pending file operations
|
* Clean up resources - releases cached steno Writers
|
||||||
|
* Call this when the FileOperations instance is no longer needed
|
||||||
|
* to prevent memory leaks in long-running processes.
|
||||||
*/
|
*/
|
||||||
async cleanup(): Promise<void> {
|
async cleanup(): Promise<void> {
|
||||||
const locks = Array.from(this.fileLocks.values());
|
// Clear cached Writers to allow garbage collection
|
||||||
if (locks.length > 0) {
|
// Note: steno Writers don't have explicit close methods;
|
||||||
await Promise.all(locks);
|
// they handle file descriptor cleanup internally
|
||||||
}
|
this.writers.clear();
|
||||||
this.fileLocks.clear();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,6 +19,278 @@ import * as gitUtils from './utils/git-utils.js';
|
|||||||
// Global silent mode flag
|
// Global silent mode flag
|
||||||
let silentMode = false;
|
let silentMode = false;
|
||||||
|
|
||||||
|
// File locking configuration for cross-process safety
|
||||||
|
const LOCK_CONFIG = {
|
||||||
|
maxRetries: 5,
|
||||||
|
retryDelay: 100, // ms
|
||||||
|
staleLockAge: 10000 // 10 seconds
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Async sleep helper
|
||||||
|
*/
|
||||||
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronous sleep helper with Atomics.wait fallback
|
||||||
|
* Uses Atomics.wait when SharedArrayBuffer is available (proper non-busy wait),
|
||||||
|
* otherwise falls back to a busy-wait loop (less efficient but always works).
|
||||||
|
* @param {number} ms - Milliseconds to sleep
|
||||||
|
*/
|
||||||
|
function sleepSync(ms) {
|
||||||
|
// Check if SharedArrayBuffer and Atomics.wait are available
|
||||||
|
// They may not be available in some environments (e.g., browsers without proper headers)
|
||||||
|
if (
|
||||||
|
typeof SharedArrayBuffer !== 'undefined' &&
|
||||||
|
typeof Atomics !== 'undefined' &&
|
||||||
|
typeof Atomics.wait === 'function'
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
const sharedBuffer = new SharedArrayBuffer(4);
|
||||||
|
const int32 = new Int32Array(sharedBuffer);
|
||||||
|
Atomics.wait(int32, 0, 0, ms);
|
||||||
|
return;
|
||||||
|
} catch {
|
||||||
|
// Fall through to busy-wait fallback
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: busy-wait loop (less efficient but universally compatible)
|
||||||
|
// Note: This may cause high CPU usage for longer waits. Consider if this
|
||||||
|
// becomes an issue with large exponential backoff delays.
|
||||||
|
const end = Date.now() + ms;
|
||||||
|
while (Date.now() < end) {
|
||||||
|
// Busy wait - intentionally empty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquires an exclusive lock on a file and executes a callback
|
||||||
|
* Uses same lock file format as withFileLockSync for cross-process compatibility
|
||||||
|
* @param {string} filepath - Path to the file to lock
|
||||||
|
* @param {Function} callback - Async function to execute while holding the lock
|
||||||
|
* @param {Object} [options] - Options for lock behavior
|
||||||
|
* @param {boolean} [options.createIfMissing=false] - If true, creates the file with '{}' if it doesn't exist.
|
||||||
|
* Set to true for write operations. Leave false for read-only operations that should handle
|
||||||
|
* file-not-found scenarios in the callback.
|
||||||
|
* @returns {Promise<*>} Result of the callback
|
||||||
|
*/
|
||||||
|
async function withFileLock(filepath, callback, options = {}) {
|
||||||
|
const { createIfMissing = false } = options;
|
||||||
|
const fsPromises = fs.promises;
|
||||||
|
|
||||||
|
// Ensure parent directory exists
|
||||||
|
const dir = path.dirname(filepath);
|
||||||
|
await fsPromises.mkdir(dir, { recursive: true });
|
||||||
|
|
||||||
|
// Only create the file if explicitly requested (for write operations)
|
||||||
|
if (createIfMissing) {
|
||||||
|
try {
|
||||||
|
// Use 'wx' flag for atomic create - fails if file exists (prevents race)
|
||||||
|
await fsPromises.writeFile(filepath, '{}', { flag: 'wx' });
|
||||||
|
} catch (err) {
|
||||||
|
// EEXIST is expected if another process created the file - that's fine
|
||||||
|
if (err.code !== 'EEXIST') {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const lockPath = `${filepath}.lock`;
|
||||||
|
const { maxRetries, retryDelay, staleLockAge } = LOCK_CONFIG;
|
||||||
|
|
||||||
|
// Try to acquire lock with retries
|
||||||
|
let acquired = false;
|
||||||
|
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||||
|
try {
|
||||||
|
// Try to create lock file exclusively first
|
||||||
|
const lockContent = JSON.stringify({
|
||||||
|
pid: process.pid,
|
||||||
|
timestamp: Date.now()
|
||||||
|
});
|
||||||
|
await fsPromises.writeFile(lockPath, lockContent, { flag: 'wx' });
|
||||||
|
acquired = true;
|
||||||
|
break;
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code === 'EEXIST') {
|
||||||
|
// Lock file exists - check if it's stale
|
||||||
|
try {
|
||||||
|
const lockStat = await fsPromises.stat(lockPath);
|
||||||
|
const age = Date.now() - lockStat.mtimeMs;
|
||||||
|
if (age > staleLockAge) {
|
||||||
|
// Stale lock - use atomic rename to safely take ownership
|
||||||
|
// This prevents race where we delete another process's fresh lock
|
||||||
|
const stalePath = `${lockPath}.stale.${process.pid}.${Date.now()}`;
|
||||||
|
try {
|
||||||
|
await fsPromises.rename(lockPath, stalePath);
|
||||||
|
// We successfully took ownership of the stale lock
|
||||||
|
// Clean it up and retry immediately
|
||||||
|
try {
|
||||||
|
await fsPromises.unlink(stalePath);
|
||||||
|
} catch {
|
||||||
|
// Ignore cleanup errors
|
||||||
|
}
|
||||||
|
continue; // Retry lock acquisition
|
||||||
|
} catch {
|
||||||
|
// Rename failed - another process handled it or lock was refreshed
|
||||||
|
// Just continue to retry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (statErr) {
|
||||||
|
// Lock was removed between writeFile and stat - retry immediately
|
||||||
|
if (statErr.code === 'ENOENT') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
throw statErr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock exists and isn't stale (or we couldn't handle it), wait and retry
|
||||||
|
if (attempt < maxRetries - 1) {
|
||||||
|
const waitMs = retryDelay * Math.pow(2, attempt);
|
||||||
|
await sleep(waitMs);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!acquired) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to acquire lock on ${filepath} after ${maxRetries} attempts`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
return await callback();
|
||||||
|
} finally {
|
||||||
|
// Release lock
|
||||||
|
try {
|
||||||
|
await fsPromises.unlink(lockPath);
|
||||||
|
} catch (releaseError) {
|
||||||
|
// Always log lock release failures - they indicate potential issues
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
`Failed to release lock for ${filepath}: ${releaseError.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronous version of file locking for compatibility with existing sync code
|
||||||
|
* Uses a lock file approach with retries and stale lock detection
|
||||||
|
* @param {string} filepath - Path to the file to lock
|
||||||
|
* @param {Function} callback - Sync function to execute while holding the lock
|
||||||
|
* @param {Object} [options] - Options for lock behavior
|
||||||
|
* @param {boolean} [options.createIfMissing=false] - If true, creates the file with '{}' if it doesn't exist.
|
||||||
|
* Set to true for write operations. Leave false for read-only operations that should handle
|
||||||
|
* file-not-found scenarios in the callback.
|
||||||
|
* @returns {*} Result of the callback
|
||||||
|
*/
|
||||||
|
function withFileLockSync(filepath, callback, options = {}) {
|
||||||
|
const { createIfMissing = false } = options;
|
||||||
|
|
||||||
|
// Ensure parent directory exists
|
||||||
|
const dir = path.dirname(filepath);
|
||||||
|
if (!fs.existsSync(dir)) {
|
||||||
|
fs.mkdirSync(dir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only create the file if explicitly requested (for write operations)
|
||||||
|
if (createIfMissing) {
|
||||||
|
try {
|
||||||
|
// Use 'wx' flag for atomic create - fails if file exists (prevents race)
|
||||||
|
fs.writeFileSync(filepath, '{}', { flag: 'wx' });
|
||||||
|
} catch (err) {
|
||||||
|
// EEXIST is expected if another process created the file - that's fine
|
||||||
|
if (err.code !== 'EEXIST') {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const lockPath = `${filepath}.lock`;
|
||||||
|
const { maxRetries, retryDelay, staleLockAge } = LOCK_CONFIG;
|
||||||
|
|
||||||
|
// Try to acquire lock with retries
|
||||||
|
let acquired = false;
|
||||||
|
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||||
|
try {
|
||||||
|
// Try to create lock file exclusively first
|
||||||
|
const lockContent = JSON.stringify({
|
||||||
|
pid: process.pid,
|
||||||
|
timestamp: Date.now()
|
||||||
|
});
|
||||||
|
fs.writeFileSync(lockPath, lockContent, { flag: 'wx' });
|
||||||
|
acquired = true;
|
||||||
|
break;
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code === 'EEXIST') {
|
||||||
|
// Lock file exists - check if it's stale
|
||||||
|
try {
|
||||||
|
const lockStat = fs.statSync(lockPath);
|
||||||
|
const age = Date.now() - lockStat.mtimeMs;
|
||||||
|
if (age > staleLockAge) {
|
||||||
|
// Stale lock - use atomic rename to safely take ownership
|
||||||
|
// This prevents race where we delete another process's fresh lock
|
||||||
|
const stalePath = `${lockPath}.stale.${process.pid}.${Date.now()}`;
|
||||||
|
try {
|
||||||
|
fs.renameSync(lockPath, stalePath);
|
||||||
|
// We successfully took ownership of the stale lock
|
||||||
|
// Clean it up and retry immediately
|
||||||
|
try {
|
||||||
|
fs.unlinkSync(stalePath);
|
||||||
|
} catch {
|
||||||
|
// Ignore cleanup errors
|
||||||
|
}
|
||||||
|
continue; // Retry lock acquisition
|
||||||
|
} catch {
|
||||||
|
// Rename failed - another process handled it or lock was refreshed
|
||||||
|
// Just continue to retry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (statErr) {
|
||||||
|
// Lock was removed between writeFile and stat - retry immediately
|
||||||
|
if (statErr.code === 'ENOENT') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
throw statErr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock exists and isn't stale (or we couldn't handle it), wait and retry
|
||||||
|
if (attempt < maxRetries - 1) {
|
||||||
|
const waitMs = retryDelay * Math.pow(2, attempt);
|
||||||
|
sleepSync(waitMs);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!acquired) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to acquire lock on ${filepath} after ${maxRetries} attempts`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
return callback();
|
||||||
|
} finally {
|
||||||
|
// Release lock
|
||||||
|
try {
|
||||||
|
fs.unlinkSync(lockPath);
|
||||||
|
} catch (releaseError) {
|
||||||
|
// Always log lock release failures - they indicate potential issues
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
`Failed to release lock for ${filepath}: ${releaseError.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// --- Environment Variable Resolution Utility ---
|
// --- Environment Variable Resolution Utility ---
|
||||||
/**
|
/**
|
||||||
* Resolves an environment variable's value.
|
* Resolves an environment variable's value.
|
||||||
@@ -703,6 +975,7 @@ function markMigrationForNotice(tasksJsonPath) {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Writes and saves a JSON file. Handles tagged task lists properly.
|
* Writes and saves a JSON file. Handles tagged task lists properly.
|
||||||
|
* Uses cross-process file locking and atomic writes to prevent race conditions.
|
||||||
* @param {string} filepath - Path to the JSON file
|
* @param {string} filepath - Path to the JSON file
|
||||||
* @param {Object} data - Data to write (can be resolved tag data or raw tagged data)
|
* @param {Object} data - Data to write (can be resolved tag data or raw tagged data)
|
||||||
* @param {string} projectRoot - Optional project root for tag context
|
* @param {string} projectRoot - Optional project root for tag context
|
||||||
@@ -712,107 +985,172 @@ function writeJSON(filepath, data, projectRoot = null, tag = null) {
|
|||||||
const isDebug = process.env.TASKMASTER_DEBUG === 'true';
|
const isDebug = process.env.TASKMASTER_DEBUG === 'true';
|
||||||
|
|
||||||
try {
|
try {
|
||||||
let finalData = data;
|
// Use file locking to prevent concurrent write race conditions
|
||||||
|
// This ensures the entire read-modify-write cycle is atomic
|
||||||
|
// createIfMissing: true because writeJSON is a write operation
|
||||||
|
withFileLockSync(
|
||||||
|
filepath,
|
||||||
|
() => {
|
||||||
|
let finalData = data;
|
||||||
|
|
||||||
// If data represents resolved tag data but lost _rawTaggedData (edge-case observed in MCP path)
|
// If data represents resolved tag data but lost _rawTaggedData (edge-case observed in MCP path)
|
||||||
if (
|
if (
|
||||||
!data._rawTaggedData &&
|
data &&
|
||||||
projectRoot &&
|
!data._rawTaggedData &&
|
||||||
Array.isArray(data.tasks) &&
|
projectRoot &&
|
||||||
!hasTaggedStructure(data)
|
Array.isArray(data.tasks) &&
|
||||||
) {
|
!hasTaggedStructure(data)
|
||||||
const resolvedTag = tag || getCurrentTag(projectRoot);
|
) {
|
||||||
|
const resolvedTag = tag || getCurrentTag(projectRoot);
|
||||||
|
|
||||||
if (isDebug) {
|
if (isDebug) {
|
||||||
console.log(
|
console.log(
|
||||||
`writeJSON: Detected resolved tag data missing _rawTaggedData. Re-reading raw data to prevent data loss for tag '${resolvedTag}'.`
|
`writeJSON: Detected resolved tag data missing _rawTaggedData. Re-reading raw data to prevent data loss for tag '${resolvedTag}'.`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-read the full file to get the complete tagged structure
|
// Re-read the full file to get the complete tagged structure
|
||||||
const rawFullData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
// This is now safe because we hold the lock
|
||||||
|
let rawFullData = {};
|
||||||
// Merge the updated data into the full structure
|
try {
|
||||||
finalData = {
|
rawFullData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
||||||
...rawFullData,
|
} catch (readError) {
|
||||||
[resolvedTag]: {
|
// File might be empty or invalid, start fresh
|
||||||
// Preserve existing tag metadata if it exists, otherwise use what's passed
|
if (isDebug) {
|
||||||
...(rawFullData[resolvedTag]?.metadata || {}),
|
console.log(
|
||||||
...(data.metadata ? { metadata: data.metadata } : {}),
|
`writeJSON: Could not read existing file, starting fresh: ${readError.message}`
|
||||||
tasks: data.tasks // The updated tasks array is the source of truth here
|
);
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
// If we have _rawTaggedData, this means we're working with resolved tag data
|
|
||||||
// and need to merge it back into the full tagged structure
|
|
||||||
else if (data && data._rawTaggedData && projectRoot) {
|
|
||||||
const resolvedTag = tag || getCurrentTag(projectRoot);
|
|
||||||
|
|
||||||
// Get the original tagged data
|
|
||||||
const originalTaggedData = data._rawTaggedData;
|
|
||||||
|
|
||||||
// Create a clean copy of the current resolved data (without internal properties)
|
|
||||||
const { _rawTaggedData, tag: _, ...cleanResolvedData } = data;
|
|
||||||
|
|
||||||
// Update the specific tag with the resolved data
|
|
||||||
finalData = {
|
|
||||||
...originalTaggedData,
|
|
||||||
[resolvedTag]: cleanResolvedData
|
|
||||||
};
|
|
||||||
|
|
||||||
if (isDebug) {
|
|
||||||
console.log(
|
|
||||||
`writeJSON: Merging resolved data back into tag '${resolvedTag}'`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up any internal properties that shouldn't be persisted
|
|
||||||
let cleanData = finalData;
|
|
||||||
if (cleanData && typeof cleanData === 'object') {
|
|
||||||
// Remove any _rawTaggedData or tag properties from root level
|
|
||||||
const { _rawTaggedData, tag: tagProp, ...rootCleanData } = cleanData;
|
|
||||||
cleanData = rootCleanData;
|
|
||||||
|
|
||||||
// Additional cleanup for tag objects
|
|
||||||
if (typeof cleanData === 'object' && !Array.isArray(cleanData)) {
|
|
||||||
const finalCleanData = {};
|
|
||||||
for (const [key, value] of Object.entries(cleanData)) {
|
|
||||||
if (
|
|
||||||
value &&
|
|
||||||
typeof value === 'object' &&
|
|
||||||
Array.isArray(value.tasks)
|
|
||||||
) {
|
|
||||||
// This is a tag object - clean up any rogue root-level properties
|
|
||||||
const { created, description, ...cleanTagData } = value;
|
|
||||||
|
|
||||||
// Only keep the description if there's no metadata.description
|
|
||||||
if (
|
|
||||||
description &&
|
|
||||||
(!cleanTagData.metadata || !cleanTagData.metadata.description)
|
|
||||||
) {
|
|
||||||
cleanTagData.description = description;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
finalCleanData[key] = cleanTagData;
|
// Merge the updated data into the full structure
|
||||||
} else {
|
finalData = {
|
||||||
finalCleanData[key] = value;
|
...rawFullData,
|
||||||
|
[resolvedTag]: {
|
||||||
|
// Preserve existing tag metadata, merged with any new metadata
|
||||||
|
metadata: {
|
||||||
|
...(rawFullData[resolvedTag]?.metadata || {}),
|
||||||
|
...(data.metadata || {})
|
||||||
|
},
|
||||||
|
tasks: data.tasks // The updated tasks array is the source of truth here
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// If we have _rawTaggedData, this means we're working with resolved tag data
|
||||||
|
// and need to merge it back into the full tagged structure
|
||||||
|
else if (data && data._rawTaggedData && projectRoot) {
|
||||||
|
const resolvedTag = tag || getCurrentTag(projectRoot);
|
||||||
|
|
||||||
|
// IMPORTANT: Re-read the file to get the CURRENT state instead of using
|
||||||
|
// potentially stale _rawTaggedData. This prevents lost updates from other processes.
|
||||||
|
let currentTaggedData;
|
||||||
|
try {
|
||||||
|
currentTaggedData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
||||||
|
} catch (readError) {
|
||||||
|
// Fall back to _rawTaggedData if file can't be read
|
||||||
|
currentTaggedData = data._rawTaggedData;
|
||||||
|
if (isDebug) {
|
||||||
|
console.log(
|
||||||
|
`writeJSON: Using _rawTaggedData as fallback: ${readError.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a clean copy of the current resolved data (without internal properties)
|
||||||
|
const { _rawTaggedData, tag: _, ...cleanResolvedData } = data;
|
||||||
|
|
||||||
|
// Update the specific tag with the resolved data, preserving other tags
|
||||||
|
finalData = {
|
||||||
|
...currentTaggedData,
|
||||||
|
[resolvedTag]: cleanResolvedData
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isDebug) {
|
||||||
|
console.log(
|
||||||
|
`writeJSON: Merging resolved data back into tag '${resolvedTag}'`
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cleanData = finalCleanData;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.writeFileSync(filepath, JSON.stringify(cleanData, null, 2), 'utf8');
|
// Clean up any internal properties that shouldn't be persisted
|
||||||
|
let cleanData = finalData;
|
||||||
|
if (cleanData && typeof cleanData === 'object') {
|
||||||
|
// Remove any _rawTaggedData or tag properties from root level
|
||||||
|
const { _rawTaggedData, tag: tagProp, ...rootCleanData } = cleanData;
|
||||||
|
cleanData = rootCleanData;
|
||||||
|
|
||||||
if (isDebug) {
|
// Additional cleanup for tag objects
|
||||||
console.log(`writeJSON: Successfully wrote to ${filepath}`);
|
if (typeof cleanData === 'object' && !Array.isArray(cleanData)) {
|
||||||
}
|
const finalCleanData = {};
|
||||||
|
for (const [key, value] of Object.entries(cleanData)) {
|
||||||
|
if (
|
||||||
|
value &&
|
||||||
|
typeof value === 'object' &&
|
||||||
|
Array.isArray(value.tasks)
|
||||||
|
) {
|
||||||
|
// This is a tag object - clean up any rogue root-level properties
|
||||||
|
// Move created/description to metadata if they're at root level
|
||||||
|
const { created, description, ...cleanTagData } = value;
|
||||||
|
|
||||||
|
// Ensure metadata object exists
|
||||||
|
if (!cleanTagData.metadata) {
|
||||||
|
cleanTagData.metadata = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve created timestamp in metadata if it exists at root level
|
||||||
|
if (created && !cleanTagData.metadata.created) {
|
||||||
|
cleanTagData.metadata.created = created;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve description in metadata if it exists at root level
|
||||||
|
if (description && !cleanTagData.metadata.description) {
|
||||||
|
cleanTagData.metadata.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
|
finalCleanData[key] = cleanTagData;
|
||||||
|
} else {
|
||||||
|
finalCleanData[key] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanData = finalCleanData;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use atomic write: write to temp file then rename
|
||||||
|
// This prevents partial writes from corrupting the file
|
||||||
|
const tempPath = `${filepath}.tmp.${process.pid}`;
|
||||||
|
try {
|
||||||
|
fs.writeFileSync(
|
||||||
|
tempPath,
|
||||||
|
JSON.stringify(cleanData, null, 2),
|
||||||
|
'utf8'
|
||||||
|
);
|
||||||
|
fs.renameSync(tempPath, filepath);
|
||||||
|
} catch (writeError) {
|
||||||
|
// Clean up temp file on failure
|
||||||
|
try {
|
||||||
|
if (fs.existsSync(tempPath)) {
|
||||||
|
fs.unlinkSync(tempPath);
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Ignore cleanup errors
|
||||||
|
}
|
||||||
|
throw writeError;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isDebug) {
|
||||||
|
console.log(`writeJSON: Successfully wrote to ${filepath}`);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ createIfMissing: true }
|
||||||
|
);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
||||||
if (isDebug) {
|
if (isDebug) {
|
||||||
log('error', 'Full error details:', error);
|
log('error', 'Full error details:', error);
|
||||||
}
|
}
|
||||||
|
// Re-throw so callers know the write failed
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1616,5 +1954,7 @@ export {
|
|||||||
flattenTasksWithSubtasks,
|
flattenTasksWithSubtasks,
|
||||||
ensureTagMetadata,
|
ensureTagMetadata,
|
||||||
stripAnsiCodes,
|
stripAnsiCodes,
|
||||||
normalizeTaskIds
|
normalizeTaskIds,
|
||||||
|
withFileLock,
|
||||||
|
withFileLockSync
|
||||||
};
|
};
|
||||||
|
|||||||
602
tests/unit/file-locking.test.js
Normal file
602
tests/unit/file-locking.test.js
Normal file
@@ -0,0 +1,602 @@
|
|||||||
|
/**
|
||||||
|
* Tests for file locking and atomic write functionality
|
||||||
|
* Verifies that concurrent access to tasks.json is properly serialized
|
||||||
|
*/
|
||||||
|
|
||||||
|
import {
|
||||||
|
jest,
|
||||||
|
describe,
|
||||||
|
it,
|
||||||
|
expect,
|
||||||
|
beforeEach,
|
||||||
|
afterEach
|
||||||
|
} from '@jest/globals';
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import os from 'os';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
|
||||||
|
// Import the utils module
|
||||||
|
const utilsPath = path.join(__dirname, '../../scripts/modules/utils.js');
|
||||||
|
|
||||||
|
describe('File Locking and Atomic Writes', () => {
|
||||||
|
let tempDir;
|
||||||
|
let testFilePath;
|
||||||
|
let utils;
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
// Create a temp directory for each test
|
||||||
|
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-test-'));
|
||||||
|
testFilePath = path.join(tempDir, 'tasks.json');
|
||||||
|
|
||||||
|
// Initialize with empty tasks structure
|
||||||
|
fs.writeFileSync(
|
||||||
|
testFilePath,
|
||||||
|
JSON.stringify(
|
||||||
|
{
|
||||||
|
master: {
|
||||||
|
tasks: [],
|
||||||
|
metadata: { created: new Date().toISOString() }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
null,
|
||||||
|
2
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Import utils fresh for each test
|
||||||
|
utils = await import(utilsPath + `?cachebust=${Date.now()}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
// Clean up temp directory and any lock files
|
||||||
|
if (tempDir && fs.existsSync(tempDir)) {
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('withFileLockSync', () => {
|
||||||
|
it('should execute callback while holding lock', () => {
|
||||||
|
const result = utils.withFileLockSync(testFilePath, () => {
|
||||||
|
return 'callback executed';
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBe('callback executed');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should release lock after callback completes', () => {
|
||||||
|
utils.withFileLockSync(testFilePath, () => {
|
||||||
|
// First lock
|
||||||
|
});
|
||||||
|
|
||||||
|
// Should be able to acquire lock again
|
||||||
|
const result = utils.withFileLockSync(testFilePath, () => {
|
||||||
|
return 'second lock acquired';
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBe('second lock acquired');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should release lock even if callback throws', () => {
|
||||||
|
expect(() => {
|
||||||
|
utils.withFileLockSync(testFilePath, () => {
|
||||||
|
throw new Error('Test error');
|
||||||
|
});
|
||||||
|
}).toThrow('Test error');
|
||||||
|
|
||||||
|
// Should still be able to acquire lock
|
||||||
|
const result = utils.withFileLockSync(testFilePath, () => 'recovered');
|
||||||
|
expect(result).toBe('recovered');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create file if createIfMissing is true', () => {
|
||||||
|
const newFilePath = path.join(tempDir, 'new-file.json');
|
||||||
|
|
||||||
|
utils.withFileLockSync(
|
||||||
|
newFilePath,
|
||||||
|
() => {
|
||||||
|
// Lock acquired on new file
|
||||||
|
},
|
||||||
|
{ createIfMissing: true }
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(fs.existsSync(newFilePath)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not create file if createIfMissing is false (default)', () => {
|
||||||
|
const newFilePath = path.join(tempDir, 'should-not-exist.json');
|
||||||
|
|
||||||
|
utils.withFileLockSync(newFilePath, () => {
|
||||||
|
// Lock acquired, but file should not be created
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(fs.existsSync(newFilePath)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should clean up lock file after completion', () => {
|
||||||
|
utils.withFileLockSync(testFilePath, () => {
|
||||||
|
// Do something
|
||||||
|
});
|
||||||
|
|
||||||
|
// Lock file should be cleaned up
|
||||||
|
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should clean up lock file even on error', () => {
|
||||||
|
try {
|
||||||
|
utils.withFileLockSync(testFilePath, () => {
|
||||||
|
throw new Error('Test error');
|
||||||
|
});
|
||||||
|
} catch {
|
||||||
|
// Expected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock file should be cleaned up
|
||||||
|
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('withFileLock (async)', () => {
|
||||||
|
it('should execute async callback while holding lock', async () => {
|
||||||
|
const result = await utils.withFileLock(testFilePath, async () => {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||||
|
return 'async callback executed';
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBe('async callback executed');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should release lock after async callback completes', async () => {
|
||||||
|
await utils.withFileLock(testFilePath, async () => {
|
||||||
|
// First lock
|
||||||
|
});
|
||||||
|
|
||||||
|
// Should be able to acquire lock again
|
||||||
|
const result = await utils.withFileLock(testFilePath, async () => {
|
||||||
|
return 'second lock acquired';
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBe('second lock acquired');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should release lock even if async callback rejects', async () => {
|
||||||
|
await expect(
|
||||||
|
utils.withFileLock(testFilePath, async () => {
|
||||||
|
throw new Error('Async error');
|
||||||
|
})
|
||||||
|
).rejects.toThrow('Async error');
|
||||||
|
|
||||||
|
// Should still be able to acquire lock
|
||||||
|
const result = await utils.withFileLock(
|
||||||
|
testFilePath,
|
||||||
|
async () => 'recovered'
|
||||||
|
);
|
||||||
|
expect(result).toBe('recovered');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create file if createIfMissing is true', async () => {
|
||||||
|
const newFilePath = path.join(tempDir, 'new-async-file.json');
|
||||||
|
|
||||||
|
await utils.withFileLock(
|
||||||
|
newFilePath,
|
||||||
|
async () => {
|
||||||
|
// Lock acquired on new file
|
||||||
|
},
|
||||||
|
{ createIfMissing: true }
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(fs.existsSync(newFilePath)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not create file if createIfMissing is false (default)', async () => {
|
||||||
|
const newFilePath = path.join(tempDir, 'should-not-exist-async.json');
|
||||||
|
|
||||||
|
await utils.withFileLock(newFilePath, async () => {
|
||||||
|
// Lock acquired, but file should not be created
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(fs.existsSync(newFilePath)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should clean up lock file after completion', async () => {
|
||||||
|
await utils.withFileLock(testFilePath, async () => {
|
||||||
|
// Do something
|
||||||
|
});
|
||||||
|
|
||||||
|
// Lock file should be cleaned up
|
||||||
|
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should clean up lock file even on error', async () => {
|
||||||
|
try {
|
||||||
|
await utils.withFileLock(testFilePath, async () => {
|
||||||
|
throw new Error('Test error');
|
||||||
|
});
|
||||||
|
} catch {
|
||||||
|
// Expected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock file should be cleaned up
|
||||||
|
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should serialize truly concurrent writes', async () => {
|
||||||
|
const numConcurrentWrites = 5;
|
||||||
|
const writes = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < numConcurrentWrites; i++) {
|
||||||
|
writes.push(
|
||||||
|
utils.withFileLock(testFilePath, async () => {
|
||||||
|
const data = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||||
|
data.master.tasks.push({
|
||||||
|
id: String(data.master.tasks.length + 1)
|
||||||
|
});
|
||||||
|
fs.writeFileSync(testFilePath, JSON.stringify(data, null, 2));
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
await Promise.all(writes);
|
||||||
|
|
||||||
|
const finalData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||||
|
expect(finalData.master.tasks).toHaveLength(numConcurrentWrites);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('writeJSON atomic writes', () => {
|
||||||
|
it('should not leave temp files on success', () => {
|
||||||
|
// Create a tagged structure that writeJSON expects
|
||||||
|
const taggedData = {
|
||||||
|
master: {
|
||||||
|
tasks: [{ id: '1', title: 'Test task', status: 'pending' }],
|
||||||
|
metadata: { created: new Date().toISOString() }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
utils.writeJSON(testFilePath, taggedData, null, null);
|
||||||
|
|
||||||
|
const files = fs.readdirSync(tempDir);
|
||||||
|
const tempFiles = files.filter((f) => f.includes('.tmp'));
|
||||||
|
expect(tempFiles).toHaveLength(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve data from other tags when writing to one tag', () => {
|
||||||
|
// Set up initial data with multiple tags
|
||||||
|
const initialData = {
|
||||||
|
master: {
|
||||||
|
tasks: [{ id: '1', title: 'Master task', status: 'pending' }],
|
||||||
|
metadata: { created: new Date().toISOString() }
|
||||||
|
},
|
||||||
|
feature: {
|
||||||
|
tasks: [{ id: '1', title: 'Feature task', status: 'pending' }],
|
||||||
|
metadata: { created: new Date().toISOString() }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fs.writeFileSync(testFilePath, JSON.stringify(initialData, null, 2));
|
||||||
|
|
||||||
|
// Write directly with tagged structure (simulating what commands do internally)
|
||||||
|
const updatedData = {
|
||||||
|
...initialData,
|
||||||
|
master: {
|
||||||
|
...initialData.master,
|
||||||
|
tasks: [
|
||||||
|
{ id: '1', title: 'Updated master task', status: 'pending' },
|
||||||
|
{ id: '2', title: 'New task', status: 'pending' }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
utils.writeJSON(testFilePath, updatedData, null, null);
|
||||||
|
|
||||||
|
const written = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||||
|
|
||||||
|
// Master should be updated
|
||||||
|
expect(written.master.tasks).toHaveLength(2);
|
||||||
|
expect(written.master.tasks[0].title).toBe('Updated master task');
|
||||||
|
|
||||||
|
// Feature should be preserved
|
||||||
|
expect(written.feature.tasks).toHaveLength(1);
|
||||||
|
expect(written.feature.tasks[0].title).toBe('Feature task');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not leave lock files on success', () => {
|
||||||
|
const taggedData = {
|
||||||
|
master: {
|
||||||
|
tasks: [{ id: '1', title: 'Test task', status: 'pending' }],
|
||||||
|
metadata: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
utils.writeJSON(testFilePath, taggedData, null, null);
|
||||||
|
|
||||||
|
expect(fs.existsSync(`${testFilePath}.lock`)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Concurrent write simulation', () => {
|
||||||
|
it('should handle rapid sequential writes without data loss', () => {
|
||||||
|
// Perform many rapid writes
|
||||||
|
const numWrites = 10;
|
||||||
|
|
||||||
|
for (let i = 0; i < numWrites; i++) {
|
||||||
|
// Read current data
|
||||||
|
let currentData;
|
||||||
|
try {
|
||||||
|
currentData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||||
|
} catch {
|
||||||
|
currentData = { master: { tasks: [], metadata: {} } };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a new task
|
||||||
|
currentData.master.tasks.push({
|
||||||
|
id: String(i + 1),
|
||||||
|
title: `Task ${i + 1}`,
|
||||||
|
status: 'pending'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Write with locking
|
||||||
|
utils.writeJSON(testFilePath, currentData, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
const finalData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||||
|
expect(finalData.master.tasks).toHaveLength(numWrites);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('True concurrent process writes', () => {
|
||||||
|
it('should handle multiple processes writing simultaneously without data loss', async () => {
|
||||||
|
const { spawn } = await import('child_process');
|
||||||
|
|
||||||
|
const numProcesses = 5;
|
||||||
|
const tasksPerProcess = 3;
|
||||||
|
|
||||||
|
// Create a worker script with inline locking implementation
|
||||||
|
// This mirrors the withFileLockSync implementation but without external dependencies
|
||||||
|
const workerScript = `
|
||||||
|
import fs from 'fs';
|
||||||
|
|
||||||
|
const filepath = process.argv[2];
|
||||||
|
const processId = process.argv[3];
|
||||||
|
const numTasks = parseInt(process.argv[4], 10);
|
||||||
|
|
||||||
|
const LOCK_CONFIG = {
|
||||||
|
maxRetries: 10,
|
||||||
|
retryDelay: 50,
|
||||||
|
staleLockAge: 10000
|
||||||
|
};
|
||||||
|
|
||||||
|
function sleepSync(ms) {
|
||||||
|
const end = Date.now() + ms;
|
||||||
|
while (Date.now() < end) {
|
||||||
|
// Busy wait
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function withFileLockSync(filepath, callback) {
|
||||||
|
const lockPath = filepath + '.lock';
|
||||||
|
const { maxRetries, retryDelay, staleLockAge } = LOCK_CONFIG;
|
||||||
|
|
||||||
|
let acquired = false;
|
||||||
|
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||||
|
try {
|
||||||
|
const lockContent = JSON.stringify({
|
||||||
|
pid: process.pid,
|
||||||
|
timestamp: Date.now()
|
||||||
|
});
|
||||||
|
fs.writeFileSync(lockPath, lockContent, { flag: 'wx' });
|
||||||
|
acquired = true;
|
||||||
|
break;
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code === 'EEXIST') {
|
||||||
|
try {
|
||||||
|
const lockStat = fs.statSync(lockPath);
|
||||||
|
const age = Date.now() - lockStat.mtimeMs;
|
||||||
|
if (age > staleLockAge) {
|
||||||
|
const stalePath = lockPath + '.stale.' + process.pid + '.' + Date.now();
|
||||||
|
try {
|
||||||
|
fs.renameSync(lockPath, stalePath);
|
||||||
|
try { fs.unlinkSync(stalePath); } catch {}
|
||||||
|
continue;
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
} catch (statErr) {
|
||||||
|
if (statErr.code === 'ENOENT') continue;
|
||||||
|
throw statErr;
|
||||||
|
}
|
||||||
|
if (attempt < maxRetries - 1) {
|
||||||
|
const waitMs = retryDelay * Math.pow(2, attempt);
|
||||||
|
sleepSync(waitMs);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!acquired) {
|
||||||
|
throw new Error('Failed to acquire lock on ' + filepath + ' after ' + maxRetries + ' attempts');
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
return callback();
|
||||||
|
} finally {
|
||||||
|
try {
|
||||||
|
fs.unlinkSync(lockPath);
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
for (let i = 0; i < numTasks; i++) {
|
||||||
|
withFileLockSync(filepath, () => {
|
||||||
|
let currentData;
|
||||||
|
try {
|
||||||
|
currentData = JSON.parse(fs.readFileSync(filepath, 'utf8'));
|
||||||
|
} catch {
|
||||||
|
currentData = { master: { tasks: [], metadata: {} } };
|
||||||
|
}
|
||||||
|
|
||||||
|
currentData.master.tasks.push({
|
||||||
|
id: 'P' + processId + '-' + (i + 1),
|
||||||
|
title: 'Task from process ' + processId + ' #' + (i + 1),
|
||||||
|
status: 'pending'
|
||||||
|
});
|
||||||
|
|
||||||
|
fs.writeFileSync(filepath, JSON.stringify(currentData, null, 2), 'utf8');
|
||||||
|
});
|
||||||
|
|
||||||
|
// Small delay to increase chance of interleaving
|
||||||
|
await new Promise(r => setTimeout(r, 10));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
main().catch(err => {
|
||||||
|
console.error(err);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Write worker script to temp file
|
||||||
|
const workerPath = path.join(tempDir, 'worker.mjs');
|
||||||
|
fs.writeFileSync(workerPath, workerScript);
|
||||||
|
|
||||||
|
// Spawn multiple processes that write concurrently
|
||||||
|
const processes = [];
|
||||||
|
for (let i = 0; i < numProcesses; i++) {
|
||||||
|
const proc = spawn(
|
||||||
|
'node',
|
||||||
|
[workerPath, testFilePath, String(i), String(tasksPerProcess)],
|
||||||
|
{
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe']
|
||||||
|
}
|
||||||
|
);
|
||||||
|
processes.push(
|
||||||
|
new Promise((resolve, reject) => {
|
||||||
|
let stderr = '';
|
||||||
|
proc.stderr.on('data', (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
proc.on('close', (code) => {
|
||||||
|
if (code === 0) {
|
||||||
|
resolve();
|
||||||
|
} else {
|
||||||
|
reject(
|
||||||
|
new Error(`Process ${i} exited with code ${code}: ${stderr}`)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
proc.on('error', reject);
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all processes to complete
|
||||||
|
await Promise.all(processes);
|
||||||
|
|
||||||
|
// Verify all tasks were written
|
||||||
|
const finalData = JSON.parse(fs.readFileSync(testFilePath, 'utf8'));
|
||||||
|
const expectedTasks = numProcesses * tasksPerProcess;
|
||||||
|
|
||||||
|
expect(finalData.master.tasks.length).toBe(expectedTasks);
|
||||||
|
|
||||||
|
// Verify we have tasks from all processes
|
||||||
|
for (let i = 0; i < numProcesses; i++) {
|
||||||
|
const tasksFromProcess = finalData.master.tasks.filter((t) =>
|
||||||
|
t.id.startsWith(`P${i}-`)
|
||||||
|
);
|
||||||
|
expect(tasksFromProcess.length).toBe(tasksPerProcess);
|
||||||
|
}
|
||||||
|
}, 30000); // 30 second timeout for concurrent test
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('readJSON', () => {
|
||||||
|
let tempDir;
|
||||||
|
let testFilePath;
|
||||||
|
let utils;
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-test-'));
|
||||||
|
testFilePath = path.join(tempDir, 'tasks.json');
|
||||||
|
|
||||||
|
// Create .taskmaster directory for state.json
|
||||||
|
fs.mkdirSync(path.join(tempDir, '.taskmaster'), { recursive: true });
|
||||||
|
fs.writeFileSync(
|
||||||
|
path.join(tempDir, '.taskmaster', 'state.json'),
|
||||||
|
JSON.stringify({
|
||||||
|
currentTag: 'master'
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
utils = await import(utilsPath + `?cachebust=${Date.now()}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (tempDir && fs.existsSync(tempDir)) {
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should read tagged task data correctly', () => {
|
||||||
|
const data = {
|
||||||
|
master: {
|
||||||
|
tasks: [{ id: '1', title: 'Test', status: 'pending' }],
|
||||||
|
metadata: { created: new Date().toISOString() }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fs.writeFileSync(testFilePath, JSON.stringify(data, null, 2));
|
||||||
|
|
||||||
|
const result = utils.readJSON(testFilePath, tempDir, 'master');
|
||||||
|
|
||||||
|
expect(result.tasks).toHaveLength(1);
|
||||||
|
expect(result.tasks[0].title).toBe('Test');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null for non-existent file', () => {
|
||||||
|
const result = utils.readJSON(path.join(tempDir, 'nonexistent.json'));
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Lock file stale detection', () => {
|
||||||
|
let tempDir;
|
||||||
|
let testFilePath;
|
||||||
|
let utils;
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-test-'));
|
||||||
|
testFilePath = path.join(tempDir, 'tasks.json');
|
||||||
|
fs.writeFileSync(testFilePath, '{}');
|
||||||
|
utils = await import(utilsPath + `?cachebust=${Date.now()}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (tempDir && fs.existsSync(tempDir)) {
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should remove stale lock files', () => {
|
||||||
|
const lockPath = `${testFilePath}.lock`;
|
||||||
|
|
||||||
|
// Create a lock file with old timestamp
|
||||||
|
fs.writeFileSync(
|
||||||
|
lockPath,
|
||||||
|
JSON.stringify({
|
||||||
|
pid: 99999, // Non-existent PID
|
||||||
|
timestamp: Date.now() - 20000 // 20 seconds ago
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Touch the file to make it old
|
||||||
|
const pastTime = new Date(Date.now() - 20000);
|
||||||
|
fs.utimesSync(lockPath, pastTime, pastTime);
|
||||||
|
|
||||||
|
// Should be able to acquire lock despite existing lock file
|
||||||
|
const result = utils.withFileLockSync(testFilePath, () => 'acquired');
|
||||||
|
expect(result).toBe('acquired');
|
||||||
|
});
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user