mirror of
https://github.com/eyaltoledano/claude-task-master.git
synced 2026-01-30 06:12:05 +00:00
fix: auth refresh issue (#1450)
This commit is contained in:
@@ -3,32 +3,60 @@
|
||||
* Bug fix: Cancelled tasks should be treated as complete
|
||||
*/
|
||||
|
||||
import type { Task } from '@tm/core';
|
||||
import type { Subtask, Task, TaskStatus } from '@tm/core';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import {
|
||||
type TaskStatistics,
|
||||
calculateDependencyStatistics,
|
||||
calculateSubtaskStatistics,
|
||||
calculateTaskStatistics
|
||||
} from '../../../src/ui/components/dashboard.component.js';
|
||||
} from './dashboard.component.js';
|
||||
|
||||
/**
|
||||
* Local test helpers for dashboard statistics tests.
|
||||
*
|
||||
* These helpers create minimal task structures focused on status for statistics
|
||||
* calculations. Only `id` and `status` are typically needed - all other fields
|
||||
* have sensible defaults.
|
||||
*/
|
||||
const createTestTask = (
|
||||
overrides: Partial<Omit<Task, 'id'>> & Pick<Task, 'id'>
|
||||
): Task => ({
|
||||
title: '',
|
||||
description: '',
|
||||
status: 'pending',
|
||||
priority: 'medium',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: [],
|
||||
...overrides
|
||||
});
|
||||
|
||||
const createTestSubtask = (
|
||||
id: number | string,
|
||||
parentId: string,
|
||||
status: TaskStatus
|
||||
): Subtask => ({
|
||||
id,
|
||||
parentId,
|
||||
title: '',
|
||||
status,
|
||||
description: '',
|
||||
priority: 'medium',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: ''
|
||||
});
|
||||
|
||||
describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
describe('calculateTaskStatistics', () => {
|
||||
it('should treat cancelled tasks as complete in percentage calculation', () => {
|
||||
// Arrange: 14 done, 1 cancelled = 100% complete
|
||||
const tasks: Task[] = [
|
||||
...Array.from({ length: 14 }, (_, i) => ({
|
||||
id: i + 1,
|
||||
title: `Task ${i + 1}`,
|
||||
status: 'done' as const,
|
||||
dependencies: []
|
||||
})),
|
||||
{
|
||||
id: 15,
|
||||
title: 'Cancelled Task',
|
||||
status: 'cancelled' as const,
|
||||
dependencies: []
|
||||
}
|
||||
...Array.from({ length: 14 }, (_, i) =>
|
||||
createTestTask({ id: String(i + 1), status: 'done' })
|
||||
),
|
||||
createTestTask({ id: '15', status: 'cancelled' })
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -39,37 +67,16 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
expect(stats.done).toBe(14);
|
||||
expect(stats.cancelled).toBe(1);
|
||||
expect(stats.completedCount).toBe(15); // done + cancelled
|
||||
// BUG: Current code shows 93% (14/15), should be 100% (15/15)
|
||||
expect(stats.completionPercentage).toBe(100);
|
||||
});
|
||||
|
||||
it('should treat completed status as complete in percentage calculation', () => {
|
||||
// Arrange: Mix of done, completed, cancelled
|
||||
// Arrange: Mix of done, completed, cancelled, pending
|
||||
const tasks: Task[] = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Done Task',
|
||||
status: 'done' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Completed Task',
|
||||
status: 'completed' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Cancelled Task',
|
||||
status: 'cancelled' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 4,
|
||||
title: 'Pending Task',
|
||||
status: 'pending' as const,
|
||||
dependencies: []
|
||||
}
|
||||
createTestTask({ id: '1', status: 'done' }),
|
||||
createTestTask({ id: '2', status: 'completed' }),
|
||||
createTestTask({ id: '3', status: 'cancelled' }),
|
||||
createTestTask({ id: '4', status: 'pending' })
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -87,18 +94,8 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
it('should show 100% completion when all tasks are cancelled', () => {
|
||||
// Arrange
|
||||
const tasks: Task[] = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Cancelled 1',
|
||||
status: 'cancelled' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Cancelled 2',
|
||||
status: 'cancelled' as const,
|
||||
dependencies: []
|
||||
}
|
||||
createTestTask({ id: '1', status: 'cancelled' }),
|
||||
createTestTask({ id: '2', status: 'cancelled' })
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -108,25 +105,14 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
expect(stats.total).toBe(2);
|
||||
expect(stats.cancelled).toBe(2);
|
||||
expect(stats.completedCount).toBe(2); // All cancelled = all complete
|
||||
// BUG: Current code shows 0%, should be 100%
|
||||
expect(stats.completionPercentage).toBe(100);
|
||||
});
|
||||
|
||||
it('should show 0% completion when no tasks are complete', () => {
|
||||
// Arrange
|
||||
const tasks: Task[] = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Pending Task',
|
||||
status: 'pending' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'In Progress Task',
|
||||
status: 'in-progress' as const,
|
||||
dependencies: []
|
||||
}
|
||||
createTestTask({ id: '1', status: 'pending' }),
|
||||
createTestTask({ id: '2', status: 'in-progress' })
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -141,18 +127,16 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
it('should treat cancelled subtasks as complete in percentage calculation', () => {
|
||||
// Arrange: Task with 3 done subtasks and 1 cancelled = 100%
|
||||
const tasks: Task[] = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Parent Task',
|
||||
status: 'in-progress' as const,
|
||||
dependencies: [],
|
||||
createTestTask({
|
||||
id: '1',
|
||||
status: 'in-progress',
|
||||
subtasks: [
|
||||
{ id: '1', title: 'Sub 1', status: 'done' },
|
||||
{ id: '2', title: 'Sub 2', status: 'done' },
|
||||
{ id: '3', title: 'Sub 3', status: 'done' },
|
||||
{ id: '4', title: 'Sub 4', status: 'cancelled' }
|
||||
createTestSubtask('1', '1', 'done'),
|
||||
createTestSubtask('2', '1', 'done'),
|
||||
createTestSubtask('3', '1', 'done'),
|
||||
createTestSubtask('4', '1', 'cancelled')
|
||||
]
|
||||
}
|
||||
})
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -163,24 +147,21 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
expect(stats.done).toBe(3);
|
||||
expect(stats.cancelled).toBe(1);
|
||||
expect(stats.completedCount).toBe(4); // done + cancelled
|
||||
// BUG: Current code shows 75% (3/4), should be 100% (4/4)
|
||||
expect(stats.completionPercentage).toBe(100);
|
||||
});
|
||||
|
||||
it('should handle completed status in subtasks', () => {
|
||||
// Arrange
|
||||
const tasks: Task[] = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Parent Task',
|
||||
status: 'in-progress' as const,
|
||||
dependencies: [],
|
||||
createTestTask({
|
||||
id: '1',
|
||||
status: 'in-progress',
|
||||
subtasks: [
|
||||
{ id: '1', title: 'Sub 1', status: 'done' },
|
||||
{ id: '2', title: 'Sub 2', status: 'completed' },
|
||||
{ id: '3', title: 'Sub 3', status: 'pending' }
|
||||
createTestSubtask('1', '1', 'done'),
|
||||
createTestSubtask('2', '1', 'completed'),
|
||||
createTestSubtask('3', '1', 'pending')
|
||||
]
|
||||
}
|
||||
})
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -198,51 +179,26 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
it('should treat cancelled tasks as satisfied dependencies', () => {
|
||||
// Arrange: Task 15 depends on cancelled task 14
|
||||
const tasks: Task[] = [
|
||||
...Array.from({ length: 13 }, (_, i) => ({
|
||||
id: i + 1,
|
||||
title: `Task ${i + 1}`,
|
||||
status: 'done' as const,
|
||||
dependencies: []
|
||||
})),
|
||||
{
|
||||
id: 14,
|
||||
title: 'Cancelled Dependency',
|
||||
status: 'cancelled' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 15,
|
||||
title: 'Dependent Task',
|
||||
status: 'pending' as const,
|
||||
dependencies: [14]
|
||||
}
|
||||
...Array.from({ length: 13 }, (_, i) =>
|
||||
createTestTask({ id: String(i + 1), status: 'done' })
|
||||
),
|
||||
createTestTask({ id: '14', status: 'cancelled' }),
|
||||
createTestTask({ id: '15', status: 'pending', dependencies: ['14'] })
|
||||
];
|
||||
|
||||
// Act
|
||||
const stats = calculateDependencyStatistics(tasks);
|
||||
|
||||
// Assert
|
||||
// Task 15 should be ready to work on since its dependency (14) is cancelled
|
||||
// BUG: Current code shows task 15 as blocked, should show as ready
|
||||
// Assert: Task 15 should be ready since its dependency (14) is cancelled
|
||||
expect(stats.tasksBlockedByDeps).toBe(0);
|
||||
expect(stats.tasksReadyToWork).toBeGreaterThan(0);
|
||||
expect(stats.tasksReadyToWork).toBe(1);
|
||||
});
|
||||
|
||||
it('should treat completed status as satisfied dependencies', () => {
|
||||
// Arrange
|
||||
const tasks: Task[] = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Completed Dependency',
|
||||
status: 'completed' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Dependent Task',
|
||||
status: 'pending' as const,
|
||||
dependencies: [1]
|
||||
}
|
||||
createTestTask({ id: '1', status: 'completed' }),
|
||||
createTestTask({ id: '2', status: 'pending', dependencies: ['1'] })
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -254,26 +210,11 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
});
|
||||
|
||||
it('should count tasks with cancelled dependencies as ready', () => {
|
||||
// Arrange: Multiple tasks depending on cancelled tasks
|
||||
// Arrange: Multiple tasks depending on cancelled task
|
||||
const tasks: Task[] = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Cancelled Task',
|
||||
status: 'cancelled' as const,
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Dependent 1',
|
||||
status: 'pending' as const,
|
||||
dependencies: [1]
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Dependent 2',
|
||||
status: 'pending' as const,
|
||||
dependencies: [1]
|
||||
}
|
||||
createTestTask({ id: '1', status: 'cancelled' }),
|
||||
createTestTask({ id: '2', status: 'pending', dependencies: ['1'] }),
|
||||
createTestTask({ id: '3', status: 'pending', dependencies: ['1'] })
|
||||
];
|
||||
|
||||
// Act
|
||||
@@ -283,5 +224,21 @@ describe('dashboard.component - Bug Fix: Cancelled Tasks as Complete', () => {
|
||||
expect(stats.tasksBlockedByDeps).toBe(0);
|
||||
expect(stats.tasksReadyToWork).toBe(2); // Both dependents should be ready
|
||||
});
|
||||
|
||||
it('should block tasks when only some dependencies are complete', () => {
|
||||
// Arrange: Task 3 depends on task 1 (cancelled) and task 2 (pending)
|
||||
const tasks: Task[] = [
|
||||
createTestTask({ id: '1', status: 'cancelled' }),
|
||||
createTestTask({ id: '2', status: 'pending' }),
|
||||
createTestTask({ id: '3', status: 'pending', dependencies: ['1', '2'] })
|
||||
];
|
||||
|
||||
// Act
|
||||
const stats = calculateDependencyStatistics(tasks);
|
||||
|
||||
// Assert: Task 3 blocked by pending task 2, only task 2 is ready
|
||||
expect(stats.tasksBlockedByDeps).toBe(1);
|
||||
expect(stats.tasksReadyToWork).toBe(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -11,9 +11,9 @@ import { execSync } from 'node:child_process';
|
||||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { createTask, createTasksFile } from '@tm/core/testing';
|
||||
import { getCliBinPath } from '../../helpers/test-utils';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { getCliBinPath } from '../../helpers/test-utils.js';
|
||||
|
||||
// Capture initial working directory at module load time
|
||||
const initialCwd = process.cwd();
|
||||
|
||||
@@ -12,7 +12,7 @@ import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { createSubtask, createTask, createTasksFile } from '@tm/core/testing';
|
||||
import { getCliBinPath } from '../../helpers/test-utils';
|
||||
import { getCliBinPath } from '../../helpers/test-utils.js';
|
||||
|
||||
// Capture initial working directory at module load time
|
||||
const initialCwd = process.cwd();
|
||||
|
||||
@@ -12,7 +12,7 @@ import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { createTask, createTasksFile } from '@tm/core/testing';
|
||||
import { getCliBinPath } from '../../helpers/test-utils';
|
||||
import { getCliBinPath } from '../../helpers/test-utils.js';
|
||||
|
||||
// Capture initial working directory at module load time
|
||||
const initialCwd = process.cwd();
|
||||
|
||||
@@ -13,7 +13,7 @@ import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { createTask, createTasksFile } from '@tm/core/testing';
|
||||
import { getCliBinPath } from '../../helpers/test-utils';
|
||||
import { getCliBinPath } from '../../helpers/test-utils.js';
|
||||
|
||||
// Capture initial working directory at module load time
|
||||
const initialCwd = process.cwd();
|
||||
|
||||
@@ -12,7 +12,7 @@ import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { createTask, createTasksFile, createSubtask } from '@tm/core/testing';
|
||||
import { getCliBinPath } from '../../helpers/test-utils';
|
||||
import { getCliBinPath } from '../../helpers/test-utils.js';
|
||||
|
||||
// Capture initial working directory at module load time
|
||||
const initialCwd = process.cwd();
|
||||
|
||||
@@ -20,7 +20,7 @@ import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { getCliBinPath } from '../helpers/test-utils';
|
||||
import { getCliBinPath } from '../helpers/test-utils.js';
|
||||
|
||||
// Capture initial working directory at module load time
|
||||
const initialCwd = process.cwd();
|
||||
|
||||
@@ -26,9 +26,6 @@ vi.mock('fs-extra', () => ({
|
||||
}));
|
||||
|
||||
describe('Autopilot Shared Utilities', () => {
|
||||
const projectRoot = '/test/project';
|
||||
const statePath = `${projectRoot}/.taskmaster/workflow-state.json`;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"sourceMap": true,
|
||||
"outDir": "./dist",
|
||||
"baseUrl": ".",
|
||||
"rootDir": "./src",
|
||||
"rootDir": ".",
|
||||
"strict": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": true,
|
||||
@@ -26,11 +26,11 @@
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "NodeNext",
|
||||
"moduleDetection": "force",
|
||||
"types": ["node"],
|
||||
"types": ["node", "vitest/globals"],
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"allowImportingTsExtensions": false
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||
"include": ["src/**/*", "tests/**/*"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"sourceMap": true,
|
||||
"outDir": "./dist",
|
||||
"baseUrl": ".",
|
||||
"rootDir": "./src",
|
||||
"rootDir": ".",
|
||||
"strict": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": true,
|
||||
@@ -26,11 +26,11 @@
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "NodeNext",
|
||||
"moduleDetection": "force",
|
||||
"types": ["node"],
|
||||
"types": ["node", "vitest/globals"],
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"allowImportingTsExtensions": false
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||
"include": ["src/**/*", "tests/**/*"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
|
||||
@@ -117,6 +117,8 @@ describe('isAuthenticationError', () => {
|
||||
it('should return true for APICallError with 401 exit code', () => {
|
||||
const error = new APICallError({
|
||||
message: 'Unauthorized',
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: {},
|
||||
data: { exitCode: 401 }
|
||||
});
|
||||
expect(isAuthenticationError(error)).toBe(true);
|
||||
@@ -132,13 +134,19 @@ describe('isTimeoutError', () => {
|
||||
it('should return true for timeout APICallError', () => {
|
||||
const error = new APICallError({
|
||||
message: 'Timeout',
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: {},
|
||||
data: { code: 'TIMEOUT' }
|
||||
});
|
||||
expect(isTimeoutError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other errors', () => {
|
||||
const error = new APICallError({ message: 'Other error' });
|
||||
const error = new APICallError({
|
||||
message: 'Other error',
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: {}
|
||||
});
|
||||
expect(isTimeoutError(error)).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -147,13 +155,18 @@ describe('isInstallationError', () => {
|
||||
it('should return true for installation APICallError', () => {
|
||||
const error = new APICallError({
|
||||
message: 'Not installed',
|
||||
url: 'grok-cli://installation'
|
||||
url: 'grok-cli://installation',
|
||||
requestBodyValues: {}
|
||||
});
|
||||
expect(isInstallationError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other errors', () => {
|
||||
const error = new APICallError({ message: 'Other error' });
|
||||
const error = new APICallError({
|
||||
message: 'Other error',
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: {}
|
||||
});
|
||||
expect(isInstallationError(error)).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -167,6 +180,8 @@ describe('getErrorMetadata', () => {
|
||||
};
|
||||
const error = new APICallError({
|
||||
message: 'Test error',
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: {},
|
||||
data: metadata
|
||||
});
|
||||
|
||||
@@ -181,7 +196,11 @@ describe('getErrorMetadata', () => {
|
||||
});
|
||||
|
||||
it('should return undefined for APICallError without data', () => {
|
||||
const error = new APICallError({ message: 'Test error' });
|
||||
const error = new APICallError({
|
||||
message: 'Test error',
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: {}
|
||||
});
|
||||
const result = getErrorMetadata(error);
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
@@ -37,7 +37,7 @@ describe('createGrokCli', () => {
|
||||
};
|
||||
const provider = createGrokCli({ defaultSettings });
|
||||
|
||||
const model = provider('grok-2-mini');
|
||||
provider('grok-2-mini');
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2-mini',
|
||||
@@ -50,7 +50,7 @@ describe('createGrokCli', () => {
|
||||
const provider = createGrokCli({ defaultSettings });
|
||||
|
||||
const modelSettings = { apiKey: 'test-key' };
|
||||
const model = provider('grok-2', modelSettings);
|
||||
provider('grok-2', modelSettings);
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2',
|
||||
@@ -60,7 +60,7 @@ describe('createGrokCli', () => {
|
||||
|
||||
it('should create models via languageModel method', () => {
|
||||
const provider = createGrokCli();
|
||||
const model = provider.languageModel('grok-2-mini', { timeout: 1000 });
|
||||
provider.languageModel('grok-2-mini', { timeout: 1000 });
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2-mini',
|
||||
@@ -70,7 +70,7 @@ describe('createGrokCli', () => {
|
||||
|
||||
it('should create models via chat method (alias)', () => {
|
||||
const provider = createGrokCli();
|
||||
const model = provider.chat('grok-2');
|
||||
provider.chat('grok-2');
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2',
|
||||
@@ -111,7 +111,7 @@ describe('default grokCli provider', () => {
|
||||
});
|
||||
|
||||
it('should create models with default configuration', () => {
|
||||
const model = grokCli('grok-2-mini');
|
||||
grokCli('grok-2-mini');
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2-mini',
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"sourceMap": true,
|
||||
"outDir": "./dist",
|
||||
"baseUrl": ".",
|
||||
"rootDir": "./src",
|
||||
"rootDir": ".",
|
||||
"strict": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": true,
|
||||
@@ -26,11 +26,11 @@
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "bundler",
|
||||
"moduleDetection": "force",
|
||||
"types": ["node"],
|
||||
"types": ["node", "vitest/globals"],
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"allowImportingTsExtensions": false
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||
"include": ["src/**/*", "tests/**/*"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"sourceMap": true,
|
||||
"outDir": "./dist",
|
||||
"baseUrl": ".",
|
||||
"rootDir": "./src",
|
||||
"rootDir": ".",
|
||||
"strict": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": true,
|
||||
@@ -27,11 +27,11 @@
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "NodeNext",
|
||||
"moduleDetection": "force",
|
||||
"types": ["node"],
|
||||
"types": ["node", "vitest/globals"],
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"allowImportingTsExtensions": false
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||
"include": ["src/**/*", "tests/**/*"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
import { describe, expect, it, vi } from 'vitest';
|
||||
import type { Tables } from '../types/database.types.js';
|
||||
import { TaskMapper } from './TaskMapper.js';
|
||||
|
||||
type TaskRow = Tables<'tasks'>;
|
||||
|
||||
describe('TaskMapper', () => {
|
||||
describe('extractMetadataField', () => {
|
||||
it('should extract string field from metadata', () => {
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: {
|
||||
details: 'Some details',
|
||||
testStrategy: 'Test with unit tests'
|
||||
},
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
expect(task.details).toBe('Some details');
|
||||
expect(task.testStrategy).toBe('Test with unit tests');
|
||||
});
|
||||
|
||||
it('should use default value when metadata field is missing', () => {
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: {},
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
expect(task.details).toBe('');
|
||||
expect(task.testStrategy).toBe('');
|
||||
});
|
||||
|
||||
it('should use default value when metadata is null', () => {
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: null,
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
expect(task.details).toBe('');
|
||||
expect(task.testStrategy).toBe('');
|
||||
});
|
||||
|
||||
it('should use default value and warn when metadata field has wrong type', () => {
|
||||
const consoleWarnSpy = vi
|
||||
.spyOn(console, 'warn')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: {
|
||||
details: 12345, // Wrong type: number instead of string
|
||||
testStrategy: ['test1', 'test2'] // Wrong type: array instead of string
|
||||
},
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
// Should use empty string defaults when type doesn't match
|
||||
expect(task.details).toBe('');
|
||||
expect(task.testStrategy).toBe('');
|
||||
|
||||
// Should have logged warnings
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Type mismatch in metadata field "details"')
|
||||
);
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'Type mismatch in metadata field "testStrategy"'
|
||||
)
|
||||
);
|
||||
|
||||
consoleWarnSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('mapStatus', () => {
|
||||
it('should map database status to internal status', () => {
|
||||
expect(TaskMapper.mapStatus('todo')).toBe('pending');
|
||||
expect(TaskMapper.mapStatus('in_progress')).toBe('in-progress');
|
||||
expect(TaskMapper.mapStatus('done')).toBe('done');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -6,13 +6,11 @@ import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { AuthDomain } from './auth-domain.js';
|
||||
|
||||
describe('AuthDomain', () => {
|
||||
let authDomain: AuthDomain;
|
||||
let originalEnv: NodeJS.ProcessEnv;
|
||||
|
||||
beforeEach(() => {
|
||||
// Save original environment
|
||||
originalEnv = { ...process.env };
|
||||
authDomain = new AuthDomain();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
|
||||
@@ -42,7 +42,8 @@ export class AuthManager {
|
||||
|
||||
private constructor(config?: Partial<AuthConfig>) {
|
||||
this.contextStore = ContextStore.getInstance();
|
||||
this.supabaseClient = new SupabaseAuthClient();
|
||||
// Use singleton SupabaseAuthClient to prevent refresh token race conditions
|
||||
this.supabaseClient = SupabaseAuthClient.getInstance();
|
||||
|
||||
// Initialize session manager (handles session lifecycle)
|
||||
this.sessionManager = new SessionManager(
|
||||
@@ -75,10 +76,12 @@ export class AuthManager {
|
||||
|
||||
/**
|
||||
* Reset the singleton instance (useful for testing)
|
||||
* Also resets SupabaseAuthClient to ensure clean state for test isolation
|
||||
*/
|
||||
static resetInstance(): void {
|
||||
AuthManager.instance = null;
|
||||
ContextStore.resetInstance();
|
||||
SupabaseAuthClient.resetInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import fs from 'fs/promises';
|
||||
import fsSync from 'fs';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
|
||||
@@ -16,7 +16,7 @@ describe('ConfigMerger', () => {
|
||||
it('should add configuration source', () => {
|
||||
const source = {
|
||||
name: 'test',
|
||||
config: { test: true },
|
||||
config: { test: true } as any,
|
||||
precedence: 1
|
||||
};
|
||||
|
||||
@@ -48,13 +48,13 @@ describe('ConfigMerger', () => {
|
||||
it('should merge configurations based on precedence', () => {
|
||||
merger.addSource({
|
||||
name: 'low',
|
||||
config: { a: 1, b: 2 },
|
||||
config: { a: 1, b: 2 } as any,
|
||||
precedence: 1
|
||||
});
|
||||
|
||||
merger.addSource({
|
||||
name: 'high',
|
||||
config: { a: 3, c: 4 },
|
||||
config: { a: 3, c: 4 } as any,
|
||||
precedence: 2
|
||||
});
|
||||
|
||||
@@ -72,8 +72,8 @@ describe('ConfigMerger', () => {
|
||||
name: 'base',
|
||||
config: {
|
||||
models: { main: 'model1', fallback: 'model2' },
|
||||
storage: { type: 'file' as const }
|
||||
},
|
||||
storage: { type: 'file' }
|
||||
} as any,
|
||||
precedence: 1
|
||||
});
|
||||
|
||||
@@ -81,8 +81,8 @@ describe('ConfigMerger', () => {
|
||||
name: 'override',
|
||||
config: {
|
||||
models: { main: 'model3' },
|
||||
storage: { encoding: 'utf8' as const }
|
||||
},
|
||||
storage: { encoding: 'utf8' }
|
||||
} as any,
|
||||
precedence: 2
|
||||
});
|
||||
|
||||
@@ -103,17 +103,17 @@ describe('ConfigMerger', () => {
|
||||
it('should handle arrays by replacement', () => {
|
||||
merger.addSource({
|
||||
name: 'base',
|
||||
config: { items: [1, 2, 3] },
|
||||
config: { items: [1, 2, 3] } as any,
|
||||
precedence: 1
|
||||
});
|
||||
|
||||
merger.addSource({
|
||||
name: 'override',
|
||||
config: { items: [4, 5] },
|
||||
config: { items: [4, 5] } as any,
|
||||
precedence: 2
|
||||
});
|
||||
|
||||
const result = merger.merge();
|
||||
const result = merger.merge() as any;
|
||||
|
||||
expect(result.items).toEqual([4, 5]); // Arrays are replaced, not merged
|
||||
});
|
||||
@@ -121,7 +121,7 @@ describe('ConfigMerger', () => {
|
||||
it('should ignore null and undefined values', () => {
|
||||
merger.addSource({
|
||||
name: 'base',
|
||||
config: { a: 1, b: 2 },
|
||||
config: { a: 1, b: 2 } as any,
|
||||
precedence: 1
|
||||
});
|
||||
|
||||
@@ -148,23 +148,23 @@ describe('ConfigMerger', () => {
|
||||
it('should use CONFIG_PRECEDENCE constants correctly', () => {
|
||||
merger.addSource({
|
||||
name: 'defaults',
|
||||
config: { level: 'default' },
|
||||
config: { level: 'default' } as any,
|
||||
precedence: CONFIG_PRECEDENCE.DEFAULTS
|
||||
});
|
||||
|
||||
merger.addSource({
|
||||
name: 'local',
|
||||
config: { level: 'local' },
|
||||
config: { level: 'local' } as any,
|
||||
precedence: CONFIG_PRECEDENCE.LOCAL
|
||||
});
|
||||
|
||||
merger.addSource({
|
||||
name: 'environment',
|
||||
config: { level: 'env' },
|
||||
config: { level: 'env' } as any,
|
||||
precedence: CONFIG_PRECEDENCE.ENVIRONMENT
|
||||
});
|
||||
|
||||
const result = merger.merge();
|
||||
const result = merger.merge() as any;
|
||||
|
||||
expect(result.level).toBe('env'); // Highest precedence wins
|
||||
});
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for ConfigPersistence service
|
||||
*/
|
||||
|
||||
import fs from 'node:fs/promises';
|
||||
import type { PartialConfiguration } from '@tm/core/common/interfaces/configuration.interface.js';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { ConfigPersistence } from './config-persistence.service.js';
|
||||
|
||||
vi.mock('node:fs', () => ({
|
||||
promises: {
|
||||
readFile: vi.fn(),
|
||||
writeFile: vi.fn(),
|
||||
mkdir: vi.fn(),
|
||||
unlink: vi.fn(),
|
||||
access: vi.fn(),
|
||||
readdir: vi.fn(),
|
||||
rename: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
describe('ConfigPersistence', () => {
|
||||
let persistence: ConfigPersistence;
|
||||
const testProjectRoot = '/test/project';
|
||||
|
||||
beforeEach(() => {
|
||||
persistence = new ConfigPersistence(testProjectRoot);
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('saveConfig', () => {
|
||||
const mockConfig: PartialConfiguration = {
|
||||
models: { main: 'test-model', fallback: 'test-fallback' },
|
||||
storage: {
|
||||
type: 'file' as const,
|
||||
enableBackup: true,
|
||||
maxBackups: 5,
|
||||
enableCompression: true,
|
||||
encoding: 'utf-8',
|
||||
atomicOperations: true
|
||||
}
|
||||
};
|
||||
|
||||
it('should save configuration to file', async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
await persistence.saveConfig(mockConfig);
|
||||
|
||||
expect(fs.mkdir).toHaveBeenCalledWith('/test/project/.taskmaster', {
|
||||
recursive: true
|
||||
});
|
||||
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json',
|
||||
JSON.stringify(mockConfig, null, 2),
|
||||
'utf-8'
|
||||
);
|
||||
});
|
||||
|
||||
it('should use atomic write when specified', async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.rename).mockResolvedValue(undefined);
|
||||
|
||||
await persistence.saveConfig(mockConfig, { atomic: true });
|
||||
|
||||
// Should write to temp file first
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json.tmp',
|
||||
JSON.stringify(mockConfig, null, 2),
|
||||
'utf-8'
|
||||
);
|
||||
|
||||
// Then rename to final location
|
||||
expect(fs.rename).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json.tmp',
|
||||
'/test/project/.taskmaster/config.json'
|
||||
);
|
||||
});
|
||||
|
||||
it('should create backup when requested', async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined); // Config exists
|
||||
vi.mocked(fs.readFile).mockResolvedValue('{"old": "config"}');
|
||||
vi.mocked(fs.readdir).mockResolvedValue([]);
|
||||
|
||||
await persistence.saveConfig(mockConfig, { createBackup: true });
|
||||
|
||||
// Should create backup directory
|
||||
expect(fs.mkdir).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/backups',
|
||||
{ recursive: true }
|
||||
);
|
||||
|
||||
// Should read existing config for backup
|
||||
expect(fs.readFile).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json',
|
||||
'utf-8'
|
||||
);
|
||||
|
||||
// Should write backup file
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
expect.stringContaining('/test/project/.taskmaster/backups/config-'),
|
||||
'{"old": "config"}',
|
||||
'utf-8'
|
||||
);
|
||||
});
|
||||
|
||||
it('should not create backup if config does not exist', async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error('Not found'));
|
||||
|
||||
await persistence.saveConfig(mockConfig, { createBackup: true });
|
||||
|
||||
// Should not read or create backup
|
||||
expect(fs.readFile).not.toHaveBeenCalled();
|
||||
expect(fs.writeFile).toHaveBeenCalledTimes(1); // Only the main config
|
||||
});
|
||||
|
||||
it('should throw TaskMasterError on save failure', async () => {
|
||||
vi.mocked(fs.mkdir).mockRejectedValue(new Error('Disk full'));
|
||||
|
||||
await expect(persistence.saveConfig(mockConfig)).rejects.toThrow(
|
||||
'Failed to save configuration'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('configExists', () => {
|
||||
it('should return true when config exists', async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
|
||||
const exists = await persistence.configExists();
|
||||
|
||||
expect(fs.access).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json'
|
||||
);
|
||||
expect(exists).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when config does not exist', async () => {
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error('Not found'));
|
||||
|
||||
const exists = await persistence.configExists();
|
||||
|
||||
expect(exists).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('deleteConfig', () => {
|
||||
it('should delete configuration file', async () => {
|
||||
vi.mocked(fs.unlink).mockResolvedValue(undefined);
|
||||
|
||||
await persistence.deleteConfig();
|
||||
|
||||
expect(fs.unlink).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json'
|
||||
);
|
||||
});
|
||||
|
||||
it('should not throw when file does not exist', async () => {
|
||||
const error = new Error('File not found') as any;
|
||||
error.code = 'ENOENT';
|
||||
vi.mocked(fs.unlink).mockRejectedValue(error);
|
||||
|
||||
await expect(persistence.deleteConfig()).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
it('should throw TaskMasterError for other errors', async () => {
|
||||
vi.mocked(fs.unlink).mockRejectedValue(new Error('Permission denied'));
|
||||
|
||||
await expect(persistence.deleteConfig()).rejects.toThrow(
|
||||
'Failed to delete configuration'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getBackups', () => {
|
||||
it('should return list of backup files sorted newest first', async () => {
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
'config-2024-01-01T10-00-00-000Z.json',
|
||||
'config-2024-01-02T10-00-00-000Z.json',
|
||||
'config-2024-01-03T10-00-00-000Z.json',
|
||||
'other-file.txt'
|
||||
] as any);
|
||||
|
||||
const backups = await persistence.getBackups();
|
||||
|
||||
expect(fs.readdir).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/backups'
|
||||
);
|
||||
|
||||
expect(backups).toEqual([
|
||||
'config-2024-01-03T10-00-00-000Z.json',
|
||||
'config-2024-01-02T10-00-00-000Z.json',
|
||||
'config-2024-01-01T10-00-00-000Z.json'
|
||||
]);
|
||||
});
|
||||
|
||||
it('should return empty array when backup directory does not exist', async () => {
|
||||
vi.mocked(fs.readdir).mockRejectedValue(new Error('Not found'));
|
||||
|
||||
const backups = await persistence.getBackups();
|
||||
|
||||
expect(backups).toEqual([]);
|
||||
});
|
||||
|
||||
it('should filter out non-backup files', async () => {
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
'config-2024-01-01T10-00-00-000Z.json',
|
||||
'README.md',
|
||||
'.DS_Store',
|
||||
'config.json',
|
||||
'config-backup.json' // Wrong format
|
||||
] as any);
|
||||
|
||||
const backups = await persistence.getBackups();
|
||||
|
||||
expect(backups).toEqual(['config-2024-01-01T10-00-00-000Z.json']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('restoreFromBackup', () => {
|
||||
const backupFile = 'config-2024-01-01T10-00-00-000Z.json';
|
||||
const backupContent = '{"restored": "config"}';
|
||||
|
||||
it('should restore configuration from backup', async () => {
|
||||
vi.mocked(fs.readFile).mockResolvedValue(backupContent);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
await persistence.restoreFromBackup(backupFile);
|
||||
|
||||
expect(fs.readFile).toHaveBeenCalledWith(
|
||||
`/test/project/.taskmaster/backups/${backupFile}`,
|
||||
'utf-8'
|
||||
);
|
||||
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/config.json',
|
||||
backupContent,
|
||||
'utf-8'
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw TaskMasterError when backup file not found', async () => {
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error('File not found'));
|
||||
|
||||
await expect(
|
||||
persistence.restoreFromBackup('nonexistent.json')
|
||||
).rejects.toThrow('Failed to restore from backup');
|
||||
});
|
||||
|
||||
it('should throw TaskMasterError on write failure', async () => {
|
||||
vi.mocked(fs.readFile).mockResolvedValue(backupContent);
|
||||
vi.mocked(fs.writeFile).mockRejectedValue(new Error('Disk full'));
|
||||
|
||||
await expect(persistence.restoreFromBackup(backupFile)).rejects.toThrow(
|
||||
'Failed to restore from backup'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('backup management', () => {
|
||||
it('should clean old backups when limit exceeded', async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readFile).mockResolvedValue('{"old": "config"}');
|
||||
vi.mocked(fs.unlink).mockResolvedValue(undefined);
|
||||
|
||||
// Mock 7 existing backups
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
'config-2024-01-01T10-00-00-000Z.json',
|
||||
'config-2024-01-02T10-00-00-000Z.json',
|
||||
'config-2024-01-03T10-00-00-000Z.json',
|
||||
'config-2024-01-04T10-00-00-000Z.json',
|
||||
'config-2024-01-05T10-00-00-000Z.json',
|
||||
'config-2024-01-06T10-00-00-000Z.json',
|
||||
'config-2024-01-07T10-00-00-000Z.json'
|
||||
] as any);
|
||||
|
||||
await persistence.saveConfig({}, { createBackup: true });
|
||||
|
||||
// Should delete oldest backups (keeping 5)
|
||||
expect(fs.unlink).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/backups/config-2024-01-01T10-00-00-000Z.json'
|
||||
);
|
||||
expect(fs.unlink).toHaveBeenCalledWith(
|
||||
'/test/project/.taskmaster/backups/config-2024-01-02T10-00-00-000Z.json'
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle backup cleanup errors gracefully', async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readFile).mockResolvedValue('{"old": "config"}');
|
||||
vi.mocked(fs.readdir).mockResolvedValue(['config-old.json'] as any);
|
||||
vi.mocked(fs.unlink).mockRejectedValue(new Error('Permission denied'));
|
||||
|
||||
// Mock console.warn to verify it's called
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
|
||||
// Should not throw even if cleanup fails
|
||||
await expect(
|
||||
persistence.saveConfig({}, { createBackup: true })
|
||||
).resolves.not.toThrow();
|
||||
|
||||
expect(warnSpy).toHaveBeenCalledWith(
|
||||
'Failed to clean old backups:',
|
||||
expect.any(Error)
|
||||
);
|
||||
|
||||
warnSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,343 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for EnvironmentConfigProvider service
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { EnvironmentConfigProvider } from './environment-config-provider.service.js';
|
||||
|
||||
describe('EnvironmentConfigProvider', () => {
|
||||
let provider: EnvironmentConfigProvider;
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
beforeEach(() => {
|
||||
// Clear all TASKMASTER_ env vars
|
||||
Object.keys(process.env).forEach((key) => {
|
||||
if (key.startsWith('TASKMASTER_')) {
|
||||
delete process.env[key];
|
||||
}
|
||||
});
|
||||
provider = new EnvironmentConfigProvider();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
describe('loadConfig', () => {
|
||||
it('should load configuration from environment variables', () => {
|
||||
process.env.TASKMASTER_STORAGE_TYPE = 'api';
|
||||
process.env.TASKMASTER_API_ENDPOINT = 'https://api.example.com';
|
||||
process.env.TASKMASTER_MODEL_MAIN = 'gpt-4';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toEqual({
|
||||
storage: {
|
||||
type: 'api',
|
||||
apiEndpoint: 'https://api.example.com'
|
||||
},
|
||||
models: {
|
||||
main: 'gpt-4'
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should return empty object when no env vars are set', () => {
|
||||
const config = provider.loadConfig();
|
||||
expect(config).toEqual({});
|
||||
});
|
||||
|
||||
it('should skip runtime state variables', () => {
|
||||
process.env.TASKMASTER_TAG = 'feature-branch';
|
||||
process.env.TASKMASTER_MODEL_MAIN = 'claude-3';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toEqual({
|
||||
models: { main: 'claude-3' }
|
||||
});
|
||||
expect(config).not.toHaveProperty('activeTag');
|
||||
});
|
||||
|
||||
it('should validate storage type values', () => {
|
||||
// Mock console.warn to check validation
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
|
||||
process.env.TASKMASTER_STORAGE_TYPE = 'invalid';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toEqual({});
|
||||
expect(warnSpy).toHaveBeenCalledWith(
|
||||
'Invalid value for TASKMASTER_STORAGE_TYPE: invalid'
|
||||
);
|
||||
|
||||
warnSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should accept valid storage type values', () => {
|
||||
process.env.TASKMASTER_STORAGE_TYPE = 'file';
|
||||
let config = provider.loadConfig();
|
||||
expect(config.storage?.type).toBe('file');
|
||||
|
||||
process.env.TASKMASTER_STORAGE_TYPE = 'api';
|
||||
provider = new EnvironmentConfigProvider(); // Reset provider
|
||||
config = provider.loadConfig();
|
||||
expect(config.storage?.type).toBe('api');
|
||||
});
|
||||
|
||||
it('should handle nested configuration paths', () => {
|
||||
process.env.TASKMASTER_MODEL_MAIN = 'model1';
|
||||
process.env.TASKMASTER_MODEL_RESEARCH = 'model2';
|
||||
process.env.TASKMASTER_MODEL_FALLBACK = 'model3';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toEqual({
|
||||
models: {
|
||||
main: 'model1',
|
||||
research: 'model2',
|
||||
fallback: 'model3'
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle custom response language', () => {
|
||||
process.env.TASKMASTER_RESPONSE_LANGUAGE = 'Spanish';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toEqual({
|
||||
custom: {
|
||||
responseLanguage: 'Spanish'
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should ignore empty string values', () => {
|
||||
process.env.TASKMASTER_MODEL_MAIN = '';
|
||||
process.env.TASKMASTER_MODEL_FALLBACK = 'fallback-model';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toEqual({
|
||||
models: {
|
||||
fallback: 'fallback-model'
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getRuntimeState', () => {
|
||||
it('should extract runtime state variables', () => {
|
||||
process.env.TASKMASTER_TAG = 'develop';
|
||||
process.env.TASKMASTER_MODEL_MAIN = 'model'; // Should not be included
|
||||
|
||||
const state = provider.getRuntimeState();
|
||||
|
||||
expect(state).toEqual({
|
||||
activeTag: 'develop'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return empty object when no runtime state vars', () => {
|
||||
process.env.TASKMASTER_MODEL_MAIN = 'model';
|
||||
|
||||
const state = provider.getRuntimeState();
|
||||
|
||||
expect(state).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe('hasEnvVar', () => {
|
||||
it('should return true when env var exists', () => {
|
||||
process.env.TASKMASTER_MODEL_MAIN = 'test';
|
||||
|
||||
expect(provider.hasEnvVar('TASKMASTER_MODEL_MAIN')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when env var does not exist', () => {
|
||||
expect(provider.hasEnvVar('TASKMASTER_NONEXISTENT')).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for undefined values', () => {
|
||||
process.env.TASKMASTER_TEST = undefined as any;
|
||||
|
||||
expect(provider.hasEnvVar('TASKMASTER_TEST')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getAllTaskmasterEnvVars', () => {
|
||||
it('should return all TASKMASTER_ prefixed variables', () => {
|
||||
process.env.TASKMASTER_VAR1 = 'value1';
|
||||
process.env.TASKMASTER_VAR2 = 'value2';
|
||||
process.env.OTHER_VAR = 'other';
|
||||
process.env.TASK_MASTER = 'wrong-prefix';
|
||||
|
||||
const vars = provider.getAllTaskmasterEnvVars();
|
||||
|
||||
expect(vars).toEqual({
|
||||
TASKMASTER_VAR1: 'value1',
|
||||
TASKMASTER_VAR2: 'value2'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return empty object when no TASKMASTER_ vars', () => {
|
||||
process.env.OTHER_VAR = 'value';
|
||||
|
||||
const vars = provider.getAllTaskmasterEnvVars();
|
||||
|
||||
expect(vars).toEqual({});
|
||||
});
|
||||
|
||||
it('should filter out undefined values', () => {
|
||||
process.env.TASKMASTER_DEFINED = 'value';
|
||||
process.env.TASKMASTER_UNDEFINED = undefined as any;
|
||||
|
||||
const vars = provider.getAllTaskmasterEnvVars();
|
||||
|
||||
expect(vars).toEqual({
|
||||
TASKMASTER_DEFINED: 'value'
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('custom mappings', () => {
|
||||
it('should use custom mappings when provided', () => {
|
||||
const customMappings = [{ env: 'CUSTOM_VAR', path: ['custom', 'value'] }];
|
||||
|
||||
const customProvider = new EnvironmentConfigProvider(customMappings);
|
||||
process.env.CUSTOM_VAR = 'test-value';
|
||||
|
||||
const config = customProvider.loadConfig();
|
||||
|
||||
expect(config).toEqual({
|
||||
custom: {
|
||||
value: 'test-value'
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should add new mapping with addMapping', () => {
|
||||
process.env.NEW_MAPPING = 'new-value';
|
||||
|
||||
provider.addMapping({
|
||||
env: 'NEW_MAPPING',
|
||||
path: ['new', 'mapping']
|
||||
});
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toHaveProperty('new.mapping', 'new-value');
|
||||
});
|
||||
|
||||
it('should return current mappings with getMappings', () => {
|
||||
const mappings = provider.getMappings();
|
||||
|
||||
expect(mappings).toBeInstanceOf(Array);
|
||||
expect(mappings.length).toBeGreaterThan(0);
|
||||
|
||||
// Check for some expected mappings
|
||||
const envNames = mappings.map((m) => m.env);
|
||||
expect(envNames).toContain('TASKMASTER_STORAGE_TYPE');
|
||||
expect(envNames).toContain('TASKMASTER_MODEL_MAIN');
|
||||
expect(envNames).toContain('TASKMASTER_TAG');
|
||||
});
|
||||
|
||||
it('should return copy of mappings array', () => {
|
||||
const mappings1 = provider.getMappings();
|
||||
const mappings2 = provider.getMappings();
|
||||
|
||||
expect(mappings1).not.toBe(mappings2); // Different instances
|
||||
expect(mappings1).toEqual(mappings2); // Same content
|
||||
});
|
||||
});
|
||||
|
||||
describe('validation', () => {
|
||||
it('should validate values when validator is provided', () => {
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
|
||||
process.env.TASKMASTER_STORAGE_TYPE = 'database'; // Invalid
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config).toEqual({});
|
||||
expect(warnSpy).toHaveBeenCalledWith(
|
||||
'Invalid value for TASKMASTER_STORAGE_TYPE: database'
|
||||
);
|
||||
|
||||
warnSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should accept values that pass validation', () => {
|
||||
process.env.TASKMASTER_STORAGE_TYPE = 'file';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config.storage?.type).toBe('file');
|
||||
});
|
||||
|
||||
it('should work with custom validators', () => {
|
||||
const customProvider = new EnvironmentConfigProvider([
|
||||
{
|
||||
env: 'CUSTOM_NUMBER',
|
||||
path: ['custom', 'number'],
|
||||
validate: (v) => !isNaN(Number(v))
|
||||
}
|
||||
]);
|
||||
|
||||
process.env.CUSTOM_NUMBER = '123';
|
||||
let config = customProvider.loadConfig();
|
||||
expect(config.custom?.number).toBe('123');
|
||||
|
||||
process.env.CUSTOM_NUMBER = 'not-a-number';
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
customProvider = new EnvironmentConfigProvider([
|
||||
{
|
||||
env: 'CUSTOM_NUMBER',
|
||||
path: ['custom', 'number'],
|
||||
validate: (v) => !isNaN(Number(v))
|
||||
}
|
||||
]);
|
||||
config = customProvider.loadConfig();
|
||||
expect(config).toEqual({});
|
||||
expect(warnSpy).toHaveBeenCalled();
|
||||
|
||||
warnSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle special characters in values', () => {
|
||||
process.env.TASKMASTER_API_ENDPOINT =
|
||||
'https://api.example.com/v1?key=abc&token=xyz';
|
||||
process.env.TASKMASTER_API_TOKEN = 'Bearer abc123!@#$%^&*()';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config.storage?.apiEndpoint).toBe(
|
||||
'https://api.example.com/v1?key=abc&token=xyz'
|
||||
);
|
||||
expect(config.storage?.apiAccessToken).toBe('Bearer abc123!@#$%^&*()');
|
||||
});
|
||||
|
||||
it('should handle whitespace in values', () => {
|
||||
process.env.TASKMASTER_MODEL_MAIN = ' claude-3 ';
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
// Note: We're not trimming, preserving the value as-is
|
||||
expect(config.models?.main).toBe(' claude-3 ');
|
||||
});
|
||||
|
||||
it('should handle very long values', () => {
|
||||
const longValue = 'a'.repeat(10000);
|
||||
process.env.TASKMASTER_API_TOKEN = longValue;
|
||||
|
||||
const config = provider.loadConfig();
|
||||
|
||||
expect(config.storage?.apiAccessToken).toBe(longValue);
|
||||
});
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
* Tests for SupabaseAuthClient
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type { Session } from '@supabase/supabase-js';
|
||||
|
||||
// Mock logger
|
||||
@@ -34,7 +34,7 @@ import { SupabaseAuthClient } from './supabase-client.js';
|
||||
import { AuthenticationError } from '../../auth/types.js';
|
||||
|
||||
describe('SupabaseAuthClient', () => {
|
||||
let authClient: InstanceType<typeof SupabaseAuthClient>;
|
||||
let authClient: SupabaseAuthClient;
|
||||
let mockSupabaseClient: any;
|
||||
|
||||
// Store original env values for cleanup
|
||||
@@ -42,6 +42,9 @@ describe('SupabaseAuthClient', () => {
|
||||
let originalSupabaseAnonKey: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset singleton before each test
|
||||
SupabaseAuthClient.resetInstance();
|
||||
|
||||
// Store original values
|
||||
originalSupabaseUrl = process.env.TM_SUPABASE_URL;
|
||||
originalSupabaseAnonKey = process.env.TM_SUPABASE_ANON_KEY;
|
||||
@@ -50,7 +53,8 @@ describe('SupabaseAuthClient', () => {
|
||||
process.env.TM_SUPABASE_URL = 'https://test.supabase.co';
|
||||
process.env.TM_SUPABASE_ANON_KEY = 'test-anon-key';
|
||||
|
||||
authClient = new SupabaseAuthClient();
|
||||
// Use getInstance() instead of new
|
||||
authClient = SupabaseAuthClient.getInstance();
|
||||
|
||||
// Create mock Supabase client
|
||||
mockSupabaseClient = {
|
||||
@@ -76,6 +80,9 @@ describe('SupabaseAuthClient', () => {
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Reset singleton after each test
|
||||
SupabaseAuthClient.resetInstance();
|
||||
|
||||
// Restore original env values
|
||||
if (originalSupabaseUrl === undefined) {
|
||||
delete process.env.TM_SUPABASE_URL;
|
||||
@@ -90,6 +97,23 @@ describe('SupabaseAuthClient', () => {
|
||||
}
|
||||
});
|
||||
|
||||
describe('Singleton Pattern', () => {
|
||||
it('should return the same instance on multiple getInstance() calls', () => {
|
||||
const instance1 = SupabaseAuthClient.getInstance();
|
||||
const instance2 = SupabaseAuthClient.getInstance();
|
||||
|
||||
expect(instance1).toBe(instance2);
|
||||
});
|
||||
|
||||
it('should return a new instance after resetInstance()', () => {
|
||||
const instance1 = SupabaseAuthClient.getInstance();
|
||||
SupabaseAuthClient.resetInstance();
|
||||
const instance2 = SupabaseAuthClient.getInstance();
|
||||
|
||||
expect(instance1).not.toBe(instance2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('verifyMFA', () => {
|
||||
it('should verify MFA and refresh session to upgrade to AAL2', async () => {
|
||||
// Mock the challenge response
|
||||
|
||||
@@ -13,14 +13,43 @@ import { SupabaseSessionStorage } from '../../auth/services/supabase-session-sto
|
||||
import { AuthenticationError } from '../../auth/types.js';
|
||||
|
||||
export class SupabaseAuthClient {
|
||||
private static instance: SupabaseAuthClient | null = null;
|
||||
private client: SupabaseJSClient | null = null;
|
||||
private sessionStorage: SupabaseSessionStorage;
|
||||
private logger = getLogger('SupabaseAuthClient');
|
||||
|
||||
constructor() {
|
||||
/**
|
||||
* Private constructor to enforce singleton pattern.
|
||||
* Use SupabaseAuthClient.getInstance() instead.
|
||||
*/
|
||||
private constructor() {
|
||||
this.sessionStorage = new SupabaseSessionStorage();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the singleton instance of SupabaseAuthClient.
|
||||
* This ensures only one Supabase client exists to prevent
|
||||
* "refresh_token_already_used" errors from concurrent refresh attempts.
|
||||
*/
|
||||
static getInstance(): SupabaseAuthClient {
|
||||
if (!SupabaseAuthClient.instance) {
|
||||
SupabaseAuthClient.instance = new SupabaseAuthClient();
|
||||
}
|
||||
return SupabaseAuthClient.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the singleton instance (for testing purposes only)
|
||||
* Also nullifies the internal client to ensure no stale Supabase client
|
||||
* references persist across test resets
|
||||
*/
|
||||
static resetInstance(): void {
|
||||
if (SupabaseAuthClient.instance) {
|
||||
SupabaseAuthClient.instance.client = null;
|
||||
}
|
||||
SupabaseAuthClient.instance = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Supabase client with proper session management
|
||||
*/
|
||||
|
||||
@@ -174,11 +174,15 @@ export class StorageFactory {
|
||||
|
||||
/**
|
||||
* Create API storage implementation
|
||||
*
|
||||
* IMPORTANT: Uses SupabaseAuthClient.getInstance() singleton to prevent
|
||||
* "refresh_token_already_used" errors. Multiple SupabaseAuthClient instances
|
||||
* each have their own Supabase client with autoRefreshToken enabled, causing
|
||||
* race conditions when both try to refresh an expired token simultaneously.
|
||||
*/
|
||||
private static createApiStorage(config: Partial<IConfiguration>): ApiStorage {
|
||||
// Use our SupabaseAuthClient instead of creating a raw Supabase client
|
||||
const supabaseAuthClient = new SupabaseAuthClient();
|
||||
const supabaseClient = supabaseAuthClient.getClient();
|
||||
// Use singleton SupabaseAuthClient to prevent refresh token race conditions
|
||||
const supabaseClient = SupabaseAuthClient.getInstance().getClient();
|
||||
|
||||
return new ApiStorage({
|
||||
supabaseClient,
|
||||
|
||||
@@ -1,385 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for TaskEntity validation
|
||||
* Tests that validation errors are properly thrown with correct error codes
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import { TaskEntity } from './task.entity.js';
|
||||
import { ERROR_CODES, TaskMasterError } from '../../../common/errors/task-master-error.js';
|
||||
import type { Task } from '../../../common/types/index.js';
|
||||
|
||||
describe('TaskEntity', () => {
|
||||
describe('validation', () => {
|
||||
it('should create a valid task entity', () => {
|
||||
const validTask: Task = {
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'A valid test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: 'Some details',
|
||||
testStrategy: 'Unit tests',
|
||||
subtasks: []
|
||||
};
|
||||
|
||||
const entity = new TaskEntity(validTask);
|
||||
|
||||
expect(entity.id).toBe('1');
|
||||
expect(entity.title).toBe('Test Task');
|
||||
expect(entity.description).toBe('A valid test task');
|
||||
expect(entity.status).toBe('pending');
|
||||
expect(entity.priority).toBe('high');
|
||||
});
|
||||
|
||||
it('should throw VALIDATION_ERROR when id is missing', () => {
|
||||
const invalidTask = {
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
} as any;
|
||||
|
||||
expect(() => new TaskEntity(invalidTask)).toThrow(TaskMasterError);
|
||||
|
||||
try {
|
||||
new TaskEntity(invalidTask);
|
||||
expect.fail('Should have thrown an error');
|
||||
} catch (error: any) {
|
||||
expect(error).toBeInstanceOf(TaskMasterError);
|
||||
expect(error.code).toBe(ERROR_CODES.VALIDATION_ERROR);
|
||||
expect(error.message).toContain('Task ID is required');
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw VALIDATION_ERROR when title is missing', () => {
|
||||
const invalidTask = {
|
||||
id: '1',
|
||||
title: '',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
} as Task;
|
||||
|
||||
expect(() => new TaskEntity(invalidTask)).toThrow(TaskMasterError);
|
||||
|
||||
try {
|
||||
new TaskEntity(invalidTask);
|
||||
expect.fail('Should have thrown an error');
|
||||
} catch (error: any) {
|
||||
expect(error).toBeInstanceOf(TaskMasterError);
|
||||
expect(error.code).toBe(ERROR_CODES.VALIDATION_ERROR);
|
||||
expect(error.message).toContain('Task title is required');
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw VALIDATION_ERROR when description is missing', () => {
|
||||
const invalidTask = {
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: '',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
} as Task;
|
||||
|
||||
expect(() => new TaskEntity(invalidTask)).toThrow(TaskMasterError);
|
||||
|
||||
try {
|
||||
new TaskEntity(invalidTask);
|
||||
expect.fail('Should have thrown an error');
|
||||
} catch (error: any) {
|
||||
expect(error).toBeInstanceOf(TaskMasterError);
|
||||
expect(error.code).toBe(ERROR_CODES.VALIDATION_ERROR);
|
||||
expect(error.message).toContain('Task description is required');
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw VALIDATION_ERROR when title is only whitespace', () => {
|
||||
const invalidTask = {
|
||||
id: '1',
|
||||
title: ' ',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
} as Task;
|
||||
|
||||
expect(() => new TaskEntity(invalidTask)).toThrow(TaskMasterError);
|
||||
|
||||
try {
|
||||
new TaskEntity(invalidTask);
|
||||
expect.fail('Should have thrown an error');
|
||||
} catch (error: any) {
|
||||
expect(error).toBeInstanceOf(TaskMasterError);
|
||||
expect(error.code).toBe(ERROR_CODES.VALIDATION_ERROR);
|
||||
expect(error.message).toContain('Task title is required');
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw VALIDATION_ERROR when description is only whitespace', () => {
|
||||
const invalidTask = {
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: ' ',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
} as Task;
|
||||
|
||||
expect(() => new TaskEntity(invalidTask)).toThrow(TaskMasterError);
|
||||
|
||||
try {
|
||||
new TaskEntity(invalidTask);
|
||||
expect.fail('Should have thrown an error');
|
||||
} catch (error: any) {
|
||||
expect(error).toBeInstanceOf(TaskMasterError);
|
||||
expect(error.code).toBe(ERROR_CODES.VALIDATION_ERROR);
|
||||
expect(error.message).toContain('Task description is required');
|
||||
}
|
||||
});
|
||||
|
||||
it('should convert numeric id to string', () => {
|
||||
const taskWithNumericId = {
|
||||
id: 123,
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
} as any;
|
||||
|
||||
const entity = new TaskEntity(taskWithNumericId);
|
||||
|
||||
expect(entity.id).toBe('123');
|
||||
expect(typeof entity.id).toBe('string');
|
||||
});
|
||||
|
||||
it('should convert dependency ids to strings', () => {
|
||||
const taskWithNumericDeps = {
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [1, 2, '3'] as any,
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
};
|
||||
|
||||
const entity = new TaskEntity(taskWithNumericDeps);
|
||||
|
||||
expect(entity.dependencies).toEqual(['1', '2', '3']);
|
||||
entity.dependencies.forEach((dep) => {
|
||||
expect(typeof dep).toBe('string');
|
||||
});
|
||||
});
|
||||
|
||||
it('should normalize subtask ids to strings for parent and numbers for subtask', () => {
|
||||
const taskWithSubtasks = {
|
||||
id: '1',
|
||||
title: 'Parent Task',
|
||||
description: 'A parent task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: [
|
||||
{
|
||||
id: '1' as any,
|
||||
parentId: '1',
|
||||
title: 'Subtask 1',
|
||||
description: 'First subtask',
|
||||
status: 'pending',
|
||||
priority: 'medium',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: ''
|
||||
},
|
||||
{
|
||||
id: 2 as any,
|
||||
parentId: 1 as any,
|
||||
title: 'Subtask 2',
|
||||
description: 'Second subtask',
|
||||
status: 'pending',
|
||||
priority: 'medium',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: ''
|
||||
}
|
||||
]
|
||||
} as Task;
|
||||
|
||||
const entity = new TaskEntity(taskWithSubtasks);
|
||||
|
||||
expect(entity.subtasks[0].id).toBe(1);
|
||||
expect(typeof entity.subtasks[0].id).toBe('number');
|
||||
expect(entity.subtasks[0].parentId).toBe('1');
|
||||
expect(typeof entity.subtasks[0].parentId).toBe('string');
|
||||
|
||||
expect(entity.subtasks[1].id).toBe(2);
|
||||
expect(typeof entity.subtasks[1].id).toBe('number');
|
||||
expect(entity.subtasks[1].parentId).toBe('1');
|
||||
expect(typeof entity.subtasks[1].parentId).toBe('string');
|
||||
});
|
||||
});
|
||||
|
||||
describe('fromObject', () => {
|
||||
it('should create TaskEntity from plain object', () => {
|
||||
const plainTask: Task = {
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
};
|
||||
|
||||
const entity = TaskEntity.fromObject(plainTask);
|
||||
|
||||
expect(entity).toBeInstanceOf(TaskEntity);
|
||||
expect(entity.id).toBe('1');
|
||||
expect(entity.title).toBe('Test Task');
|
||||
});
|
||||
|
||||
it('should throw validation error for invalid object', () => {
|
||||
const invalidTask = {
|
||||
id: '1',
|
||||
title: '',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
} as Task;
|
||||
|
||||
expect(() => TaskEntity.fromObject(invalidTask)).toThrow(TaskMasterError);
|
||||
});
|
||||
});
|
||||
|
||||
describe('fromArray', () => {
|
||||
it('should create array of TaskEntities from plain objects', () => {
|
||||
const plainTasks: Task[] = [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Task 1',
|
||||
description: 'First task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Task 2',
|
||||
description: 'Second task',
|
||||
status: 'in-progress',
|
||||
priority: 'medium',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
}
|
||||
];
|
||||
|
||||
const entities = TaskEntity.fromArray(plainTasks);
|
||||
|
||||
expect(entities).toHaveLength(2);
|
||||
expect(entities[0]).toBeInstanceOf(TaskEntity);
|
||||
expect(entities[1]).toBeInstanceOf(TaskEntity);
|
||||
expect(entities[0].id).toBe('1');
|
||||
expect(entities[1].id).toBe('2');
|
||||
});
|
||||
|
||||
it('should throw validation error if any task is invalid', () => {
|
||||
const tasksWithInvalid: Task[] = [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Valid Task',
|
||||
description: 'First task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Invalid Task',
|
||||
description: '', // Invalid - missing description
|
||||
status: 'pending',
|
||||
priority: 'medium',
|
||||
dependencies: [],
|
||||
details: '',
|
||||
testStrategy: '',
|
||||
subtasks: []
|
||||
}
|
||||
];
|
||||
|
||||
expect(() => TaskEntity.fromArray(tasksWithInvalid)).toThrow(
|
||||
TaskMasterError
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('toJSON', () => {
|
||||
it('should convert TaskEntity to plain object', () => {
|
||||
const taskData: Task = {
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: ['2', '3'],
|
||||
details: 'Some details',
|
||||
testStrategy: 'Unit tests',
|
||||
subtasks: []
|
||||
};
|
||||
|
||||
const entity = new TaskEntity(taskData);
|
||||
const json = entity.toJSON();
|
||||
|
||||
expect(json).toEqual({
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
priority: 'high',
|
||||
dependencies: ['2', '3'],
|
||||
details: 'Some details',
|
||||
testStrategy: 'Unit tests',
|
||||
subtasks: []
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,7 +1,6 @@
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { WorkflowOrchestrator } from '../orchestrators/workflow-orchestrator.js';
|
||||
import { TestResultValidator } from '../services/test-result-validator.js';
|
||||
import type { TestResult } from '../services/test-result-validator.types.js';
|
||||
import type {
|
||||
WorkflowContext,
|
||||
WorkflowError,
|
||||
@@ -1458,7 +1457,7 @@ describe('WorkflowOrchestrator - State Machine Structure', () => {
|
||||
it('should support git adapter hooks', () => {
|
||||
const gitOperations: string[] = [];
|
||||
|
||||
orchestrator.onGitOperation((operation, data) => {
|
||||
orchestrator.onGitOperation((operation, _data) => {
|
||||
gitOperations.push(operation);
|
||||
});
|
||||
|
||||
@@ -1475,7 +1474,7 @@ describe('WorkflowOrchestrator - State Machine Structure', () => {
|
||||
it('should support executor adapter hooks', () => {
|
||||
const executions: string[] = [];
|
||||
|
||||
orchestrator.onExecute((command, context) => {
|
||||
orchestrator.onExecute((command, _context) => {
|
||||
executions.push(command);
|
||||
});
|
||||
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import { TestResultValidator } from './test-result-validator.js';
|
||||
import type {
|
||||
TestPhase,
|
||||
TestResult,
|
||||
ValidationResult
|
||||
} from './test-result-validator.types.js';
|
||||
import type { TestResult } from './test-result-validator.types.js';
|
||||
|
||||
describe('TestResultValidator - Input Validation', () => {
|
||||
const validator = new TestResultValidator();
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
/**
|
||||
* Test file documenting subpath export usage
|
||||
* This demonstrates how consumers can use granular imports for better tree-shaking
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
describe('Subpath Exports', () => {
|
||||
it('should allow importing from auth subpath', async () => {
|
||||
// Instead of: import { AuthManager } from '@tm/core';
|
||||
// Use: import { AuthManager } from '@tm/core/auth';
|
||||
const authModule = await import('./auth');
|
||||
expect(authModule.AuthManager).toBeDefined();
|
||||
expect(authModule.AuthenticationError).toBeDefined();
|
||||
});
|
||||
|
||||
it('should allow importing from storage subpath', async () => {
|
||||
// Instead of: import { FileStorage } from '@tm/core';
|
||||
// Use: import { FileStorage } from '@tm/core/storage';
|
||||
const storageModule = await import('./storage');
|
||||
expect(storageModule.FileStorage).toBeDefined();
|
||||
expect(storageModule.ApiStorage).toBeDefined();
|
||||
expect(storageModule.StorageFactory).toBeDefined();
|
||||
});
|
||||
|
||||
it('should allow importing from config subpath', async () => {
|
||||
// Instead of: import { ConfigManager } from '@tm/core';
|
||||
// Use: import { ConfigManager } from '@tm/core/config';
|
||||
const configModule = await import('./config');
|
||||
expect(configModule.ConfigManager).toBeDefined();
|
||||
});
|
||||
|
||||
it('should allow importing from errors subpath', async () => {
|
||||
// Instead of: import { TaskMasterError } from '@tm/core';
|
||||
// Use: import { TaskMasterError } from '@tm/core/errors';
|
||||
const errorsModule = await import('./errors');
|
||||
expect(errorsModule.TaskMasterError).toBeDefined();
|
||||
expect(errorsModule.ERROR_CODES).toBeDefined();
|
||||
});
|
||||
|
||||
it('should allow importing from logger subpath', async () => {
|
||||
// Instead of: import { getLogger } from '@tm/core';
|
||||
// Use: import { getLogger } from '@tm/core/logger';
|
||||
const loggerModule = await import('./logger');
|
||||
expect(loggerModule.getLogger).toBeDefined();
|
||||
expect(loggerModule.createLogger).toBeDefined();
|
||||
});
|
||||
|
||||
it('should allow importing from providers subpath', async () => {
|
||||
// Instead of: import { BaseProvider } from '@tm/core';
|
||||
// Use: import { BaseProvider } from '@tm/core/providers';
|
||||
const providersModule = await import('./providers');
|
||||
expect(providersModule.BaseProvider).toBeDefined();
|
||||
});
|
||||
|
||||
it('should allow importing from services subpath', async () => {
|
||||
// Instead of: import { TaskService } from '@tm/core';
|
||||
// Use: import { TaskService } from '@tm/core/services';
|
||||
const servicesModule = await import('./services');
|
||||
expect(servicesModule.TaskService).toBeDefined();
|
||||
});
|
||||
|
||||
it('should allow importing from utils subpath', async () => {
|
||||
// Instead of: import { generateId } from '@tm/core';
|
||||
// Use: import { generateId } from '@tm/core/utils';
|
||||
const utilsModule = await import('./utils');
|
||||
expect(utilsModule.generateId).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Usage Examples for Consumers:
|
||||
*
|
||||
* 1. Import only authentication (smaller bundle):
|
||||
* ```typescript
|
||||
* import { AuthManager, AuthenticationError } from '@tm/core/auth';
|
||||
* ```
|
||||
*
|
||||
* 2. Import only storage (no auth code bundled):
|
||||
* ```typescript
|
||||
* import { FileStorage, StorageFactory } from '@tm/core/storage';
|
||||
* ```
|
||||
*
|
||||
* 3. Import only errors (minimal bundle):
|
||||
* ```typescript
|
||||
* import { TaskMasterError, ERROR_CODES } from '@tm/core/errors';
|
||||
* ```
|
||||
*
|
||||
* 4. Still support convenience imports (larger bundle but better DX):
|
||||
* ```typescript
|
||||
* import { AuthManager, FileStorage, TaskMasterError } from '@tm/core';
|
||||
* ```
|
||||
*
|
||||
* Benefits:
|
||||
* - Better tree-shaking: unused modules are not bundled
|
||||
* - Clearer dependencies: explicit about what parts of the library you use
|
||||
* - Faster builds: bundlers can optimize better with granular imports
|
||||
* - Smaller bundles: especially important for browser/edge deployments
|
||||
*/
|
||||
58
packages/tm-core/src/testing/auth-mocks.ts
Normal file
58
packages/tm-core/src/testing/auth-mocks.ts
Normal file
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* @fileoverview Shared mock implementations for auth-related tests
|
||||
*
|
||||
* These mocks provide consistent test doubles for Supabase authentication
|
||||
* components across unit and integration tests.
|
||||
*
|
||||
* USAGE:
|
||||
* ```ts
|
||||
* // In your test file, mock with the shared implementation
|
||||
* vi.mock('../path/to/supabase-session-storage.js', () => ({
|
||||
* SupabaseSessionStorage: MockSupabaseSessionStorage
|
||||
* }));
|
||||
* ```
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mock implementation of SupabaseSessionStorage with in-memory Map storage.
|
||||
*
|
||||
* Use this for tests that need to exercise storage behavior (storing/retrieving sessions).
|
||||
* The Map-based implementation allows tests to verify session persistence.
|
||||
*/
|
||||
export class MockSupabaseSessionStorage {
|
||||
private data = new Map<string, string>();
|
||||
|
||||
clear(): void {
|
||||
this.data.clear();
|
||||
}
|
||||
|
||||
async getItem(key: string): Promise<string | null> {
|
||||
return this.data.get(key) ?? null;
|
||||
}
|
||||
|
||||
async setItem(key: string, value: string): Promise<void> {
|
||||
this.data.set(key, value);
|
||||
}
|
||||
|
||||
async removeItem(key: string): Promise<void> {
|
||||
this.data.delete(key);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Minimal mock implementation of SupabaseSessionStorage with no-op methods.
|
||||
*
|
||||
* Use this for tests that don't need to exercise storage behavior,
|
||||
* such as singleton pattern validation tests.
|
||||
*/
|
||||
export class MockSupabaseSessionStorageMinimal {
|
||||
clear(): void {}
|
||||
|
||||
async getItem(): Promise<null> {
|
||||
return null;
|
||||
}
|
||||
|
||||
async setItem(): Promise<void> {}
|
||||
|
||||
async removeItem(): Promise<void> {}
|
||||
}
|
||||
167
packages/tm-core/src/testing/config-fixtures.ts
Normal file
167
packages/tm-core/src/testing/config-fixtures.ts
Normal file
@@ -0,0 +1,167 @@
|
||||
/**
|
||||
* @fileoverview Test fixtures for creating valid configuration data structures
|
||||
*
|
||||
* WHY FIXTURES:
|
||||
* - Ensures all required fields are present (prevents type errors)
|
||||
* - Provides consistent, realistic test data
|
||||
* - Easy to override specific fields for test scenarios
|
||||
* - Single source of truth for valid config structures
|
||||
*
|
||||
* USAGE:
|
||||
* ```ts
|
||||
* import { createApiStorageConfig } from '@tm/core/testing';
|
||||
*
|
||||
* // Create API storage config with defaults
|
||||
* const config = createApiStorageConfig();
|
||||
*
|
||||
* // Create with custom endpoint
|
||||
* const customConfig = createApiStorageConfig({
|
||||
* storage: { apiEndpoint: 'https://custom.api.com' }
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
|
||||
import type {
|
||||
IConfiguration,
|
||||
StorageSettings
|
||||
} from '../common/interfaces/configuration.interface.js';
|
||||
|
||||
/**
|
||||
* Default storage settings for tests
|
||||
*/
|
||||
const defaultStorageSettings: Omit<StorageSettings, 'type'> = {
|
||||
enableBackup: false,
|
||||
maxBackups: 0,
|
||||
enableCompression: false,
|
||||
encoding: 'utf-8',
|
||||
atomicOperations: false
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a valid StorageSettings object for API storage
|
||||
*
|
||||
* DEFAULTS:
|
||||
* - type: 'api'
|
||||
* - apiEndpoint: 'https://api.example.com'
|
||||
* - apiAccessToken: 'test-token'
|
||||
* - enableBackup: false
|
||||
* - maxBackups: 0
|
||||
* - enableCompression: false
|
||||
* - encoding: 'utf-8'
|
||||
* - atomicOperations: false
|
||||
*/
|
||||
export function createApiStorageSettings(
|
||||
overrides?: Partial<StorageSettings>
|
||||
): StorageSettings {
|
||||
return {
|
||||
...defaultStorageSettings,
|
||||
type: 'api',
|
||||
apiEndpoint: 'https://api.example.com',
|
||||
apiAccessToken: 'test-token',
|
||||
...overrides
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a valid StorageSettings object for file storage
|
||||
*
|
||||
* DEFAULTS:
|
||||
* - type: 'file'
|
||||
* - basePath: '/test/project'
|
||||
* - enableBackup: false
|
||||
* - maxBackups: 0
|
||||
* - enableCompression: false
|
||||
* - encoding: 'utf-8'
|
||||
* - atomicOperations: false
|
||||
*/
|
||||
export function createFileStorageSettings(
|
||||
overrides?: Partial<StorageSettings>
|
||||
): StorageSettings {
|
||||
return {
|
||||
...defaultStorageSettings,
|
||||
type: 'file',
|
||||
basePath: '/test/project',
|
||||
...overrides
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a partial IConfiguration with API storage settings
|
||||
*
|
||||
* DEFAULTS:
|
||||
* - storage: API storage with test endpoint and token
|
||||
* - projectPath: '/test/project'
|
||||
*/
|
||||
export function createApiStorageConfig(
|
||||
overrides?: Partial<{
|
||||
storage: Partial<StorageSettings>;
|
||||
projectPath: string;
|
||||
}>
|
||||
): Partial<IConfiguration> {
|
||||
return {
|
||||
storage: createApiStorageSettings(overrides?.storage),
|
||||
projectPath: overrides?.projectPath ?? '/test/project'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a partial IConfiguration with file storage settings
|
||||
*
|
||||
* DEFAULTS:
|
||||
* - storage: File storage with test base path
|
||||
* - projectPath: '/test/project'
|
||||
*/
|
||||
export function createFileStorageConfig(
|
||||
overrides?: Partial<{
|
||||
storage: Partial<StorageSettings>;
|
||||
projectPath: string;
|
||||
}>
|
||||
): Partial<IConfiguration> {
|
||||
return {
|
||||
storage: createFileStorageSettings(overrides?.storage),
|
||||
projectPath: overrides?.projectPath ?? '/test/project'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-built configuration scenarios for common test cases
|
||||
*/
|
||||
export const ConfigScenarios = {
|
||||
/**
|
||||
* API storage with default test credentials
|
||||
*/
|
||||
apiStorage: () => createApiStorageConfig(),
|
||||
|
||||
/**
|
||||
* API storage with custom endpoint
|
||||
*/
|
||||
apiStorageCustomEndpoint: (endpoint: string) =>
|
||||
createApiStorageConfig({
|
||||
storage: { apiEndpoint: endpoint }
|
||||
}),
|
||||
|
||||
/**
|
||||
* File storage with default test path
|
||||
*/
|
||||
fileStorage: () => createFileStorageConfig(),
|
||||
|
||||
/**
|
||||
* File storage with custom base path
|
||||
*/
|
||||
fileStorageCustomPath: (basePath: string) =>
|
||||
createFileStorageConfig({
|
||||
storage: { basePath }
|
||||
}),
|
||||
|
||||
/**
|
||||
* Auto storage (will detect based on available config)
|
||||
*/
|
||||
autoStorage: () =>
|
||||
({
|
||||
storage: {
|
||||
...defaultStorageSettings,
|
||||
type: 'auto'
|
||||
},
|
||||
projectPath: '/test/project'
|
||||
}) as Partial<IConfiguration>
|
||||
};
|
||||
@@ -7,9 +7,11 @@
|
||||
* @example
|
||||
* ```ts
|
||||
* import { createTask, createTasksFile, TaskScenarios } from '@tm/core/testing';
|
||||
* import { createApiStorageConfig, ConfigScenarios } from '@tm/core/testing';
|
||||
*
|
||||
* const task = createTask({ id: 1, title: 'Test Task' });
|
||||
* const tasksFile = TaskScenarios.linearDependencyChain();
|
||||
* const config = ConfigScenarios.apiStorage();
|
||||
* ```
|
||||
*/
|
||||
|
||||
@@ -20,3 +22,18 @@ export {
|
||||
TaskScenarios,
|
||||
type TasksFile
|
||||
} from './task-fixtures.js';
|
||||
|
||||
export {
|
||||
createApiStorageSettings,
|
||||
createFileStorageSettings,
|
||||
createApiStorageConfig,
|
||||
createFileStorageConfig,
|
||||
ConfigScenarios
|
||||
} from './config-fixtures.js';
|
||||
|
||||
export {
|
||||
MockSupabaseSessionStorage,
|
||||
MockSupabaseSessionStorageMinimal
|
||||
} from './auth-mocks.js';
|
||||
|
||||
export { createMockLogger } from './test-mocks.js';
|
||||
|
||||
30
packages/tm-core/src/testing/test-mocks.ts
Normal file
30
packages/tm-core/src/testing/test-mocks.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
/**
|
||||
* @fileoverview General-purpose mock implementations for tests
|
||||
*
|
||||
* These mocks provide consistent test doubles for common dependencies
|
||||
* across unit and integration tests.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mock logger factory for suppressing log output in tests.
|
||||
*
|
||||
* Returns a logger with all methods stubbed as no-ops.
|
||||
*
|
||||
* USAGE:
|
||||
* ```ts
|
||||
* vi.mock('../path/to/logger/index.js', () => ({
|
||||
* getLogger: createMockLogger
|
||||
* }));
|
||||
* ```
|
||||
*/
|
||||
export const createMockLogger = (): {
|
||||
warn: () => void;
|
||||
info: () => void;
|
||||
debug: () => void;
|
||||
error: () => void;
|
||||
} => ({
|
||||
warn: () => {},
|
||||
info: () => {},
|
||||
debug: () => {},
|
||||
error: () => {}
|
||||
});
|
||||
@@ -1,139 +0,0 @@
|
||||
import fs from 'fs';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import type { Session } from '@supabase/supabase-js';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { AuthManager } from '../../src/auth/auth-manager';
|
||||
import { CredentialStore } from '../../src/auth/credential-store';
|
||||
import type { AuthCredentials } from '../../src/auth/types';
|
||||
|
||||
describe('AuthManager Token Refresh', () => {
|
||||
let authManager: AuthManager;
|
||||
let credentialStore: CredentialStore;
|
||||
let tmpDir: string;
|
||||
let authFile: string;
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset singletons
|
||||
AuthManager.resetInstance();
|
||||
CredentialStore.resetInstance();
|
||||
|
||||
// Create temporary directory for test isolation
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'tm-auth-refresh-'));
|
||||
authFile = path.join(tmpDir, 'auth.json');
|
||||
|
||||
// Initialize AuthManager with test config (this will create CredentialStore internally)
|
||||
authManager = AuthManager.getInstance({
|
||||
configDir: tmpDir,
|
||||
configFile: authFile
|
||||
});
|
||||
|
||||
// Get the CredentialStore instance that AuthManager created
|
||||
credentialStore = CredentialStore.getInstance();
|
||||
credentialStore.clearCredentials();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up
|
||||
try {
|
||||
credentialStore.clearCredentials();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
AuthManager.resetInstance();
|
||||
CredentialStore.resetInstance();
|
||||
vi.restoreAllMocks();
|
||||
|
||||
// Remove temporary directory
|
||||
if (tmpDir && fs.existsSync(tmpDir)) {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('should return expired credentials to enable refresh flows', () => {
|
||||
// Set up expired credentials with refresh token
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired_access_token',
|
||||
refreshToken: 'valid_refresh_token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 1000).toISOString(), // Expired 1 second ago
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
// Get credentials should return them even if expired
|
||||
// Refresh will be handled by explicit calls or client operations
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('expired_access_token');
|
||||
expect(credentials?.refreshToken).toBe('valid_refresh_token');
|
||||
});
|
||||
|
||||
it('should return valid credentials', () => {
|
||||
// Set up valid (non-expired) credentials
|
||||
const validCredentials: AuthCredentials = {
|
||||
token: 'valid_access_token',
|
||||
refreshToken: 'valid_refresh_token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() + 3600000).toISOString(), // Expires in 1 hour
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(validCredentials);
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
expect(credentials?.token).toBe('valid_access_token');
|
||||
});
|
||||
|
||||
it('should return expired credentials even without refresh token', () => {
|
||||
// Set up expired credentials WITHOUT refresh token
|
||||
// We still return them - it's up to the caller to handle
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired_access_token',
|
||||
refreshToken: undefined,
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 1000).toISOString(), // Expired 1 second ago
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
// Returns credentials even if expired
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('expired_access_token');
|
||||
});
|
||||
|
||||
it('should return null if no credentials exist', () => {
|
||||
const credentials = authManager.getCredentials();
|
||||
expect(credentials).toBeNull();
|
||||
});
|
||||
|
||||
it('should return credentials regardless of refresh token validity', () => {
|
||||
// Set up expired credentials with refresh token
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired_access_token',
|
||||
refreshToken: 'invalid_refresh_token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 1000).toISOString(),
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
// Returns credentials - refresh will be attempted by the client which will handle failure
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('expired_access_token');
|
||||
expect(credentials?.refreshToken).toBe('invalid_refresh_token');
|
||||
});
|
||||
});
|
||||
@@ -1,336 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Integration tests for JWT token auto-refresh functionality
|
||||
*
|
||||
* These tests verify that expired tokens are automatically refreshed
|
||||
* when making API calls through AuthManager.
|
||||
*/
|
||||
|
||||
import fs from 'fs';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import type { Session } from '@supabase/supabase-js';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { AuthManager } from '../../src/modules/auth/managers/auth-manager.js';
|
||||
import { CredentialStore } from '../../src/modules/auth/services/credential-store.js';
|
||||
import type { AuthCredentials } from '../../src/modules/auth/types.js';
|
||||
|
||||
describe('AuthManager - Token Auto-Refresh Integration', () => {
|
||||
let authManager: AuthManager;
|
||||
let credentialStore: CredentialStore;
|
||||
let tmpDir: string;
|
||||
let authFile: string;
|
||||
|
||||
// Mock Supabase session that will be returned on refresh
|
||||
const mockRefreshedSession: Session = {
|
||||
access_token: 'new-access-token-xyz',
|
||||
refresh_token: 'new-refresh-token-xyz',
|
||||
token_type: 'bearer',
|
||||
expires_at: Math.floor(Date.now() / 1000) + 3600, // 1 hour from now
|
||||
expires_in: 3600,
|
||||
user: {
|
||||
id: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
aud: 'authenticated',
|
||||
role: 'authenticated',
|
||||
app_metadata: {},
|
||||
user_metadata: {},
|
||||
created_at: new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset singletons
|
||||
AuthManager.resetInstance();
|
||||
CredentialStore.resetInstance();
|
||||
|
||||
// Create temporary directory for test isolation
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'tm-auth-integration-'));
|
||||
authFile = path.join(tmpDir, 'auth.json');
|
||||
|
||||
// Initialize AuthManager with test config (this will create CredentialStore internally)
|
||||
authManager = AuthManager.getInstance({
|
||||
configDir: tmpDir,
|
||||
configFile: authFile
|
||||
});
|
||||
|
||||
// Get the CredentialStore instance that AuthManager created
|
||||
credentialStore = CredentialStore.getInstance();
|
||||
credentialStore.clearCredentials();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up
|
||||
try {
|
||||
credentialStore.clearCredentials();
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
AuthManager.resetInstance();
|
||||
CredentialStore.resetInstance();
|
||||
vi.restoreAllMocks();
|
||||
|
||||
// Remove temporary directory
|
||||
if (tmpDir && fs.existsSync(tmpDir)) {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('Expired Token Detection', () => {
|
||||
it('should return expired token for Supabase to refresh', () => {
|
||||
// Set up expired credentials
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired-token',
|
||||
refreshToken: 'valid-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 60000).toISOString(), // 1 minute ago
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
// Get credentials returns them even if expired
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('expired-token');
|
||||
expect(credentials?.refreshToken).toBe('valid-refresh-token');
|
||||
});
|
||||
|
||||
it('should return valid token', () => {
|
||||
// Set up valid credentials
|
||||
const validCredentials: AuthCredentials = {
|
||||
token: 'valid-token',
|
||||
refreshToken: 'valid-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() + 3600000).toISOString(), // 1 hour from now
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(validCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
expect(credentials?.token).toBe('valid-token');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Token Refresh Flow', () => {
|
||||
it('should manually refresh expired token and save new credentials', async () => {
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'old-token',
|
||||
refreshToken: 'old-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 60000).toISOString(),
|
||||
savedAt: new Date(Date.now() - 3600000).toISOString(),
|
||||
selectedContext: {
|
||||
orgId: 'test-org',
|
||||
briefId: 'test-brief',
|
||||
updatedAt: new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
vi.spyOn(
|
||||
authManager['supabaseClient'],
|
||||
'refreshSession'
|
||||
).mockResolvedValue(mockRefreshedSession);
|
||||
|
||||
// Explicitly call refreshToken() method
|
||||
const refreshedCredentials = await authManager.refreshToken();
|
||||
|
||||
expect(refreshedCredentials).not.toBeNull();
|
||||
expect(refreshedCredentials.token).toBe('new-access-token-xyz');
|
||||
expect(refreshedCredentials.refreshToken).toBe('new-refresh-token-xyz');
|
||||
|
||||
// Verify context was preserved
|
||||
expect(refreshedCredentials.selectedContext?.orgId).toBe('test-org');
|
||||
expect(refreshedCredentials.selectedContext?.briefId).toBe('test-brief');
|
||||
|
||||
// Verify new expiration is in the future
|
||||
const newExpiry = new Date(refreshedCredentials.expiresAt!).getTime();
|
||||
const now = Date.now();
|
||||
expect(newExpiry).toBeGreaterThan(now);
|
||||
});
|
||||
|
||||
it('should throw error if manual refresh fails', async () => {
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired-token',
|
||||
refreshToken: 'invalid-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 60000).toISOString(),
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
// Mock refresh to fail
|
||||
vi.spyOn(
|
||||
authManager['supabaseClient'],
|
||||
'refreshSession'
|
||||
).mockRejectedValue(new Error('Refresh token expired'));
|
||||
|
||||
// Explicit refreshToken() call should throw
|
||||
await expect(authManager.refreshToken()).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should return expired credentials even without refresh token', () => {
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired-token',
|
||||
// No refresh token
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 60000).toISOString(),
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
// Credentials are returned even without refresh token
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('expired-token');
|
||||
expect(credentials?.refreshToken).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return null if credentials missing expiresAt', () => {
|
||||
const credentialsWithoutExpiry: AuthCredentials = {
|
||||
token: 'test-token',
|
||||
refreshToken: 'refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
// Missing expiresAt - invalid token
|
||||
savedAt: new Date().toISOString()
|
||||
} as any;
|
||||
|
||||
credentialStore.saveCredentials(credentialsWithoutExpiry);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
// Tokens without valid expiration are considered invalid
|
||||
expect(credentials).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Clock Skew Tolerance', () => {
|
||||
it('should return credentials within 30-second expiry window', () => {
|
||||
// Token expires in 15 seconds (within 30-second buffer)
|
||||
// Supabase will handle refresh automatically
|
||||
const almostExpiredCredentials: AuthCredentials = {
|
||||
token: 'almost-expired-token',
|
||||
refreshToken: 'valid-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() + 15000).toISOString(), // 15 seconds from now
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(almostExpiredCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
// Credentials are returned (Supabase handles auto-refresh in background)
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('almost-expired-token');
|
||||
expect(credentials?.refreshToken).toBe('valid-refresh-token');
|
||||
});
|
||||
|
||||
it('should return valid token well before expiry', () => {
|
||||
// Token expires in 5 minutes
|
||||
const validCredentials: AuthCredentials = {
|
||||
token: 'valid-token',
|
||||
refreshToken: 'valid-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() + 300000).toISOString(), // 5 minutes
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(validCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
// Valid credentials are returned as-is
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('valid-token');
|
||||
expect(credentials?.refreshToken).toBe('valid-refresh-token');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Synchronous vs Async Methods', () => {
|
||||
it('getCredentials should return expired credentials', () => {
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired-token',
|
||||
refreshToken: 'valid-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 60000).toISOString(),
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
// Returns credentials even if expired - Supabase will handle refresh
|
||||
const credentials = authManager.getCredentials();
|
||||
|
||||
expect(credentials).not.toBeNull();
|
||||
expect(credentials?.token).toBe('expired-token');
|
||||
expect(credentials?.refreshToken).toBe('valid-refresh-token');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Multiple Concurrent Calls', () => {
|
||||
it('should handle concurrent getCredentials calls gracefully', () => {
|
||||
const expiredCredentials: AuthCredentials = {
|
||||
token: 'expired-token',
|
||||
refreshToken: 'valid-refresh-token',
|
||||
userId: 'test-user-id',
|
||||
email: 'test@example.com',
|
||||
expiresAt: new Date(Date.now() - 60000).toISOString(),
|
||||
savedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
credentialStore.saveCredentials(expiredCredentials);
|
||||
|
||||
authManager = AuthManager.getInstance();
|
||||
|
||||
// Make multiple concurrent calls (synchronous now)
|
||||
const creds1 = authManager.getCredentials();
|
||||
const creds2 = authManager.getCredentials();
|
||||
const creds3 = authManager.getCredentials();
|
||||
|
||||
// All should get the same credentials (even if expired)
|
||||
expect(creds1?.token).toBe('expired-token');
|
||||
expect(creds2?.token).toBe('expired-token');
|
||||
expect(creds3?.token).toBe('expired-token');
|
||||
|
||||
// All include refresh token for Supabase to use
|
||||
expect(creds1?.refreshToken).toBe('valid-refresh-token');
|
||||
expect(creds2?.refreshToken).toBe('valid-refresh-token');
|
||||
expect(creds3?.refreshToken).toBe('valid-refresh-token');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,258 @@
|
||||
/**
|
||||
* @fileoverview Integration tests for token refresh with singleton pattern
|
||||
*
|
||||
* These tests verify that the singleton SupabaseAuthClient prevents
|
||||
* "refresh_token_already_used" errors when multiple code paths
|
||||
* try to access the Supabase client with an expired token.
|
||||
*
|
||||
* The bug scenario (before fix):
|
||||
* 1. User authenticates, gets session with access_token + refresh_token
|
||||
* 2. Time passes (access token expires after ~1 hour)
|
||||
* 3. User runs a command like `tm show HAM-1945`
|
||||
* 4. AuthManager.hasValidSession() calls getSession() → triggers auto-refresh
|
||||
* 5. StorageFactory.createApiStorage() creates NEW SupabaseAuthClient
|
||||
* 6. This new client ALSO calls getSession() → triggers ANOTHER auto-refresh
|
||||
* 7. First refresh succeeds, rotates the token
|
||||
* 8. Second refresh fails with "refresh_token_already_used"
|
||||
*
|
||||
* The fix: SupabaseAuthClient is now a singleton, so all code paths
|
||||
* share the same Supabase client and there's only one auto-refresh.
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import {
|
||||
MockSupabaseSessionStorage,
|
||||
createMockLogger,
|
||||
createApiStorageConfig
|
||||
} from '../../../src/testing/index.js';
|
||||
|
||||
// Mock logger using shared mock factory
|
||||
vi.mock('../../../src/common/logger/index.js', () => ({
|
||||
getLogger: createMockLogger
|
||||
}));
|
||||
|
||||
// Mock SupabaseSessionStorage using shared Map-based mock
|
||||
// (this test may exercise storage behavior in future scenarios)
|
||||
vi.mock(
|
||||
'../../../src/modules/auth/services/supabase-session-storage.js',
|
||||
() => ({
|
||||
SupabaseSessionStorage: MockSupabaseSessionStorage
|
||||
})
|
||||
);
|
||||
|
||||
// Import after mocking
|
||||
import { SupabaseAuthClient } from '../../../src/modules/integration/clients/supabase-client.js';
|
||||
import { AuthManager } from '../../../src/modules/auth/managers/auth-manager.js';
|
||||
import { StorageFactory } from '../../../src/modules/storage/services/storage-factory.js';
|
||||
|
||||
describe('Token Refresh - Singleton Integration', () => {
|
||||
let originalSupabaseUrl: string | undefined;
|
||||
let originalSupabaseAnonKey: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
// Store original values
|
||||
originalSupabaseUrl = process.env.TM_SUPABASE_URL;
|
||||
originalSupabaseAnonKey = process.env.TM_SUPABASE_ANON_KEY;
|
||||
|
||||
// Set required environment variables
|
||||
process.env.TM_SUPABASE_URL = 'https://test.supabase.co';
|
||||
process.env.TM_SUPABASE_ANON_KEY = 'test-anon-key';
|
||||
|
||||
// Reset singletons
|
||||
SupabaseAuthClient.resetInstance();
|
||||
AuthManager.resetInstance();
|
||||
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Reset singletons
|
||||
SupabaseAuthClient.resetInstance();
|
||||
AuthManager.resetInstance();
|
||||
|
||||
// Restore original env values
|
||||
if (originalSupabaseUrl === undefined) {
|
||||
delete process.env.TM_SUPABASE_URL;
|
||||
} else {
|
||||
process.env.TM_SUPABASE_URL = originalSupabaseUrl;
|
||||
}
|
||||
|
||||
if (originalSupabaseAnonKey === undefined) {
|
||||
delete process.env.TM_SUPABASE_ANON_KEY;
|
||||
} else {
|
||||
process.env.TM_SUPABASE_ANON_KEY = originalSupabaseAnonKey;
|
||||
}
|
||||
});
|
||||
|
||||
describe('Simulated Expired Token Scenario', () => {
|
||||
it('should use only ONE Supabase client instance across AuthManager and StorageFactory', async () => {
|
||||
// Get the singleton instance and its internal client
|
||||
const supabaseAuthClient = SupabaseAuthClient.getInstance();
|
||||
const internalClient = supabaseAuthClient.getClient();
|
||||
|
||||
// Get AuthManager (which uses the singleton)
|
||||
const authManager = AuthManager.getInstance();
|
||||
|
||||
// Verify AuthManager uses the same singleton
|
||||
expect(authManager.supabaseClient).toBe(supabaseAuthClient);
|
||||
expect(authManager.supabaseClient.getClient()).toBe(internalClient);
|
||||
|
||||
// Create API storage (which also uses the singleton)
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// Verify the singleton still returns the same client
|
||||
expect(SupabaseAuthClient.getInstance().getClient()).toBe(internalClient);
|
||||
});
|
||||
|
||||
it('should prevent multiple refresh token uses by sharing single client', async () => {
|
||||
// This test validates that the singleton pattern enables proper mock tracking.
|
||||
//
|
||||
// The key insight: with a singleton, we can spy on the single shared client
|
||||
// and verify that refresh is only called once. Before the singleton fix,
|
||||
// AuthManager and StorageFactory each created their own SupabaseAuthClient,
|
||||
// so we couldn't track refresh calls across all instances with a single spy.
|
||||
//
|
||||
// Note: This test explicitly calls refreshSession() once to verify the mock
|
||||
// infrastructure works. The actual race condition prevention is validated in
|
||||
// expired-token-refresh.test.ts which uses time-based simulation.
|
||||
|
||||
const supabaseAuthClient = SupabaseAuthClient.getInstance();
|
||||
const internalClient = supabaseAuthClient.getClient();
|
||||
|
||||
// Track how many times refreshSession would be called
|
||||
let mockRefreshCount = 0;
|
||||
vi.spyOn(internalClient.auth, 'refreshSession').mockImplementation(
|
||||
async () => {
|
||||
mockRefreshCount++;
|
||||
// Simulate successful refresh
|
||||
return {
|
||||
data: {
|
||||
session: {
|
||||
access_token: `new-token-${mockRefreshCount}`,
|
||||
refresh_token: `new-refresh-${mockRefreshCount}`,
|
||||
expires_in: 3600,
|
||||
expires_at: Math.floor(Date.now() / 1000) + 3600,
|
||||
token_type: 'bearer',
|
||||
user: {
|
||||
id: 'user-123',
|
||||
email: 'test@example.com',
|
||||
app_metadata: {},
|
||||
user_metadata: {},
|
||||
aud: 'authenticated',
|
||||
created_at: new Date().toISOString()
|
||||
}
|
||||
},
|
||||
user: null
|
||||
},
|
||||
error: null
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
// Verify AuthManager and StorageFactory share the same spied client
|
||||
const authManager = AuthManager.getInstance();
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// Both should reference the same underlying Supabase client we spied on
|
||||
expect(authManager.supabaseClient.getClient()).toBe(internalClient);
|
||||
expect(SupabaseAuthClient.getInstance().getClient()).toBe(internalClient);
|
||||
|
||||
// Now trigger one refresh - our single spy tracks it
|
||||
await supabaseAuthClient.refreshSession();
|
||||
|
||||
// The key assertion: we can track refresh calls because there's only one client
|
||||
expect(mockRefreshCount).toBe(1);
|
||||
|
||||
// Restore
|
||||
vi.mocked(internalClient.auth.refreshSession).mockRestore();
|
||||
});
|
||||
|
||||
it('should allow multiple sequential refreshes on the same client', async () => {
|
||||
// This test verifies that sequential refreshes work correctly
|
||||
// (as opposed to the race condition from parallel refreshes)
|
||||
|
||||
const supabaseAuthClient = SupabaseAuthClient.getInstance();
|
||||
const internalClient = supabaseAuthClient.getClient();
|
||||
|
||||
let mockRefreshCount = 0;
|
||||
vi.spyOn(internalClient.auth, 'refreshSession').mockImplementation(
|
||||
async () => {
|
||||
mockRefreshCount++;
|
||||
return {
|
||||
data: {
|
||||
session: {
|
||||
access_token: `token-${mockRefreshCount}`,
|
||||
refresh_token: `refresh-${mockRefreshCount}`,
|
||||
expires_in: 3600,
|
||||
expires_at: Math.floor(Date.now() / 1000) + 3600,
|
||||
token_type: 'bearer',
|
||||
user: {
|
||||
id: 'user-123',
|
||||
email: 'test@example.com',
|
||||
app_metadata: {},
|
||||
user_metadata: {},
|
||||
aud: 'authenticated',
|
||||
created_at: new Date().toISOString()
|
||||
}
|
||||
},
|
||||
user: null
|
||||
},
|
||||
error: null
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
// Sequential refreshes should work fine
|
||||
const result1 = await supabaseAuthClient.refreshSession();
|
||||
const result2 = await supabaseAuthClient.refreshSession();
|
||||
|
||||
expect(result1?.access_token).toBe('token-1');
|
||||
expect(result2?.access_token).toBe('token-2');
|
||||
expect(mockRefreshCount).toBe(2);
|
||||
|
||||
vi.mocked(internalClient.auth.refreshSession).mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Concurrent Access Safety', () => {
|
||||
it('getInstance() is safe to call from multiple places simultaneously', () => {
|
||||
// Simulate multiple parts of the codebase calling getInstance() at once
|
||||
const instances: SupabaseAuthClient[] = [];
|
||||
|
||||
// Create 10 "concurrent" calls
|
||||
for (let i = 0; i < 10; i++) {
|
||||
instances.push(SupabaseAuthClient.getInstance());
|
||||
}
|
||||
|
||||
// All should be the exact same instance
|
||||
const firstInstance = instances[0];
|
||||
for (const instance of instances) {
|
||||
expect(instance).toBe(firstInstance);
|
||||
}
|
||||
});
|
||||
|
||||
it('AuthManager and StorageFactory always get the same underlying Supabase client', async () => {
|
||||
// This is the core fix validation
|
||||
|
||||
// Step 1: AuthManager creates its singleton
|
||||
const authManager = AuthManager.getInstance();
|
||||
const authManagerClient = authManager.supabaseClient.getClient();
|
||||
|
||||
// Step 2: StorageFactory creates API storage
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// Step 3: Get the singleton client directly
|
||||
const singletonClient = SupabaseAuthClient.getInstance().getClient();
|
||||
|
||||
// All three should be the exact same object
|
||||
expect(authManagerClient).toBe(singletonClient);
|
||||
|
||||
// This is what the fix ensures: only ONE Supabase client exists
|
||||
// so there's only ONE autoRefreshToken handler
|
||||
// and only ONE possible refresh at a time
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,396 @@
|
||||
/**
|
||||
* @fileoverview Integration tests for expired token handling with time manipulation
|
||||
*
|
||||
* These tests use vi.setSystemTime to simulate real token expiration scenarios
|
||||
* and verify that:
|
||||
* 1. The singleton pattern prevents duplicate refresh attempts
|
||||
* 2. Token refresh is only called once even when multiple code paths access the client
|
||||
*
|
||||
* This tests the fix for "refresh_token_already_used" errors that occurred
|
||||
* when multiple SupabaseAuthClient instances each tried to refresh the same token.
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { AuthError } from '@supabase/supabase-js';
|
||||
import type { Session, User } from '@supabase/supabase-js';
|
||||
import {
|
||||
MockSupabaseSessionStorage,
|
||||
createMockLogger,
|
||||
createApiStorageConfig
|
||||
} from '../../../src/testing/index.js';
|
||||
|
||||
// Mock logger using shared mock factory
|
||||
vi.mock('../../../src/common/logger/index.js', () => ({
|
||||
getLogger: createMockLogger
|
||||
}));
|
||||
|
||||
// Mock SupabaseSessionStorage using shared Map-based mock
|
||||
vi.mock(
|
||||
'../../../src/modules/auth/services/supabase-session-storage.js',
|
||||
() => ({
|
||||
SupabaseSessionStorage: MockSupabaseSessionStorage
|
||||
})
|
||||
);
|
||||
|
||||
// Import after mocking
|
||||
import { SupabaseAuthClient } from '../../../src/modules/integration/clients/supabase-client.js';
|
||||
import { AuthManager } from '../../../src/modules/auth/managers/auth-manager.js';
|
||||
import { StorageFactory } from '../../../src/modules/storage/services/storage-factory.js';
|
||||
|
||||
// Helper to create a session that expires at a specific time
|
||||
const createSessionExpiringAt = (expiresAt: Date): Session => ({
|
||||
access_token: 'test-access-token',
|
||||
refresh_token: 'test-refresh-token',
|
||||
token_type: 'bearer',
|
||||
expires_in: 3600,
|
||||
expires_at: Math.floor(expiresAt.getTime() / 1000),
|
||||
user: {
|
||||
id: 'user-123',
|
||||
email: 'test@example.com',
|
||||
app_metadata: {},
|
||||
user_metadata: {},
|
||||
aud: 'authenticated',
|
||||
created_at: new Date().toISOString()
|
||||
} as User
|
||||
});
|
||||
|
||||
// Helper to create a refreshed session
|
||||
const createRefreshedSession = (): Session => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
token_type: 'bearer',
|
||||
expires_in: 3600,
|
||||
expires_at: Math.floor(Date.now() / 1000) + 3600,
|
||||
user: {
|
||||
id: 'user-123',
|
||||
email: 'test@example.com',
|
||||
app_metadata: {},
|
||||
user_metadata: {},
|
||||
aud: 'authenticated',
|
||||
created_at: new Date().toISOString()
|
||||
} as User
|
||||
});
|
||||
|
||||
describe('Expired Token Refresh - Time-Based Integration', () => {
|
||||
let originalSupabaseUrl: string | undefined;
|
||||
let originalSupabaseAnonKey: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
// Store original values
|
||||
originalSupabaseUrl = process.env.TM_SUPABASE_URL;
|
||||
originalSupabaseAnonKey = process.env.TM_SUPABASE_ANON_KEY;
|
||||
|
||||
// Set required environment variables
|
||||
process.env.TM_SUPABASE_URL = 'https://test.supabase.co';
|
||||
process.env.TM_SUPABASE_ANON_KEY = 'test-anon-key';
|
||||
|
||||
// Reset singletons
|
||||
SupabaseAuthClient.resetInstance();
|
||||
AuthManager.resetInstance();
|
||||
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore real timers
|
||||
vi.useRealTimers();
|
||||
|
||||
// Reset singletons
|
||||
SupabaseAuthClient.resetInstance();
|
||||
AuthManager.resetInstance();
|
||||
|
||||
// Restore original env values
|
||||
if (originalSupabaseUrl === undefined) {
|
||||
delete process.env.TM_SUPABASE_URL;
|
||||
} else {
|
||||
process.env.TM_SUPABASE_URL = originalSupabaseUrl;
|
||||
}
|
||||
|
||||
if (originalSupabaseAnonKey === undefined) {
|
||||
delete process.env.TM_SUPABASE_ANON_KEY;
|
||||
} else {
|
||||
process.env.TM_SUPABASE_ANON_KEY = originalSupabaseAnonKey;
|
||||
}
|
||||
});
|
||||
|
||||
describe('Time-Based Token Expiration', () => {
|
||||
it('should detect expired token after time passes', () => {
|
||||
// Set a fixed "now" time
|
||||
const now = new Date('2024-01-15T10:00:00Z');
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now);
|
||||
|
||||
// Create a session that expires in 1 hour
|
||||
const expiresAt = new Date(now.getTime() + 60 * 60 * 1000); // +1 hour
|
||||
const session = createSessionExpiringAt(expiresAt);
|
||||
|
||||
// Session should NOT be expired yet
|
||||
const currentTime = Math.floor(Date.now() / 1000);
|
||||
expect(session.expires_at).toBeGreaterThan(currentTime);
|
||||
|
||||
// Jump forward 2 hours
|
||||
vi.setSystemTime(new Date(now.getTime() + 2 * 60 * 60 * 1000));
|
||||
|
||||
// Now the session SHOULD be expired
|
||||
const newCurrentTime = Math.floor(Date.now() / 1000);
|
||||
expect(session.expires_at).toBeLessThan(newCurrentTime);
|
||||
});
|
||||
|
||||
it('should share same singleton across time jumps', async () => {
|
||||
const now = new Date('2024-01-15T10:00:00Z');
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now);
|
||||
|
||||
// Get singleton at time T
|
||||
const client1 = SupabaseAuthClient.getInstance();
|
||||
|
||||
// Jump forward 2 hours
|
||||
vi.setSystemTime(new Date(now.getTime() + 2 * 60 * 60 * 1000));
|
||||
|
||||
// Get singleton at time T+2h - should be the same instance
|
||||
const client2 = SupabaseAuthClient.getInstance();
|
||||
|
||||
expect(client1).toBe(client2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Singleton Pattern with Expired Token Scenario', () => {
|
||||
it('should use same Supabase client regardless of when getInstance is called', async () => {
|
||||
const now = new Date('2024-01-15T10:00:00Z');
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now);
|
||||
|
||||
// Spy on getInstance to verify StorageFactory uses the singleton
|
||||
const getInstanceSpy = vi.spyOn(SupabaseAuthClient, 'getInstance');
|
||||
|
||||
// Simulate the bug scenario:
|
||||
|
||||
// T=0: User authenticates
|
||||
const supabaseAuthClient = SupabaseAuthClient.getInstance();
|
||||
const internalClient = supabaseAuthClient.getClient();
|
||||
|
||||
// T=0: AuthManager is created
|
||||
const authManager = AuthManager.getInstance();
|
||||
expect(authManager.supabaseClient.getClient()).toBe(internalClient);
|
||||
|
||||
const callsBeforeStorage = getInstanceSpy.mock.calls.length;
|
||||
|
||||
// T+2h: Token expires, user runs a command
|
||||
vi.setSystemTime(new Date(now.getTime() + 2 * 60 * 60 * 1000));
|
||||
|
||||
// StorageFactory creates API storage (which also accesses the singleton)
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// REGRESSION GUARD: Verify StorageFactory called getInstance
|
||||
// If this fails, StorageFactory bypassed the singleton (the original bug)
|
||||
expect(getInstanceSpy.mock.calls.length).toBeGreaterThan(
|
||||
callsBeforeStorage
|
||||
);
|
||||
|
||||
// CRITICAL: The singleton should still return the same client
|
||||
// Before the fix, StorageFactory would create a NEW SupabaseAuthClient
|
||||
expect(SupabaseAuthClient.getInstance().getClient()).toBe(internalClient);
|
||||
expect(authManager.supabaseClient.getClient()).toBe(internalClient);
|
||||
|
||||
getInstanceSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should track refresh calls on the single shared client', async () => {
|
||||
const now = new Date('2024-01-15T10:00:00Z');
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now);
|
||||
|
||||
// Spy on getInstance to verify both code paths use the singleton
|
||||
const getInstanceSpy = vi.spyOn(SupabaseAuthClient, 'getInstance');
|
||||
|
||||
// Get the singleton
|
||||
const supabaseAuthClient = SupabaseAuthClient.getInstance();
|
||||
const internalClient = supabaseAuthClient.getClient();
|
||||
|
||||
// Mock refreshSession to track calls
|
||||
let refreshCallCount = 0;
|
||||
vi.spyOn(internalClient.auth, 'refreshSession').mockImplementation(
|
||||
async (_options?: { refresh_token: string }) => {
|
||||
refreshCallCount++;
|
||||
return {
|
||||
data: {
|
||||
session: createRefreshedSession(),
|
||||
user: createRefreshedSession().user
|
||||
},
|
||||
error: null
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
// T+2h: Token expires
|
||||
vi.setSystemTime(new Date(now.getTime() + 2 * 60 * 60 * 1000));
|
||||
|
||||
const callsBeforeAccess = getInstanceSpy.mock.calls.length;
|
||||
|
||||
// Multiple code paths access the singleton
|
||||
const authManager = AuthManager.getInstance();
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// REGRESSION GUARD: Verify both AuthManager and StorageFactory called getInstance
|
||||
// This proves they're using the singleton rather than creating independent clients
|
||||
expect(getInstanceSpy.mock.calls.length).toBeGreaterThan(
|
||||
callsBeforeAccess
|
||||
);
|
||||
|
||||
// Trigger refresh from one path
|
||||
await supabaseAuthClient.refreshSession();
|
||||
|
||||
// The key assertion: refreshCallCount is 1 because:
|
||||
// 1. StorageFactory.create and AuthManager.getInstance don't trigger refresh on their own
|
||||
// 2. Only the explicit refreshSession() call above triggered refresh
|
||||
// 3. Because all code paths share the same SupabaseAuthClient singleton,
|
||||
// we can spy on a single mock and verify no other code path called refresh.
|
||||
// Before the singleton fix, StorageFactory would create a new client that could
|
||||
// trigger its own independent refresh, leading to "refresh_token_already_used" errors.
|
||||
expect(refreshCallCount).toBe(1);
|
||||
|
||||
// Verify it's the same client everywhere
|
||||
expect(authManager.supabaseClient.getClient()).toBe(internalClient);
|
||||
|
||||
vi.mocked(internalClient.auth.refreshSession).mockRestore();
|
||||
getInstanceSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should prevent the "refresh_token_already_used" race condition', async () => {
|
||||
const now = new Date('2024-01-15T10:00:00Z');
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now);
|
||||
|
||||
// Spy on getInstance to verify all code paths use the singleton
|
||||
const getInstanceSpy = vi.spyOn(SupabaseAuthClient, 'getInstance');
|
||||
|
||||
// Get the singleton
|
||||
const supabaseAuthClient = SupabaseAuthClient.getInstance();
|
||||
const internalClient = supabaseAuthClient.getClient();
|
||||
|
||||
// Track refresh attempts
|
||||
let refreshCallCount = 0;
|
||||
|
||||
// Simulate Supabase's behavior: first refresh rotates the token,
|
||||
// subsequent refreshes with the OLD token fail
|
||||
vi.spyOn(internalClient.auth, 'refreshSession').mockImplementation(
|
||||
async (_options?: { refresh_token: string }) => {
|
||||
refreshCallCount++;
|
||||
if (refreshCallCount === 1) {
|
||||
// First refresh succeeds
|
||||
return {
|
||||
data: {
|
||||
session: createRefreshedSession(),
|
||||
user: createRefreshedSession().user
|
||||
},
|
||||
error: null
|
||||
};
|
||||
} else {
|
||||
// If this were a second client with the old token, it would fail
|
||||
// This simulates the "refresh_token_already_used" error
|
||||
return {
|
||||
data: { session: null, user: null },
|
||||
error: new AuthError('Invalid Refresh Token: Already Used', 400)
|
||||
};
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// T+2h: Token expires
|
||||
vi.setSystemTime(new Date(now.getTime() + 2 * 60 * 60 * 1000));
|
||||
|
||||
const callsBeforeFlow = getInstanceSpy.mock.calls.length;
|
||||
|
||||
// Simulate the typical command flow:
|
||||
// 1. AuthManager checks session
|
||||
const authManager = AuthManager.getInstance();
|
||||
|
||||
// 2. StorageFactory creates storage
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// REGRESSION GUARD: Both code paths must use the singleton
|
||||
expect(getInstanceSpy.mock.calls.length).toBeGreaterThan(callsBeforeFlow);
|
||||
|
||||
// 3. One of them triggers a refresh
|
||||
const result1 = await authManager.supabaseClient.refreshSession();
|
||||
|
||||
// With singleton pattern, first refresh succeeds
|
||||
expect(result1?.access_token).toBe('new-access-token');
|
||||
expect(refreshCallCount).toBe(1);
|
||||
|
||||
// If we HAD multiple clients (the bug), a second client would try to
|
||||
// refresh with the now-rotated token and fail.
|
||||
// With singleton, subsequent calls go through the same (now refreshed) client.
|
||||
|
||||
vi.mocked(internalClient.auth.refreshSession).mockRestore();
|
||||
getInstanceSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Real-World Command Simulation', () => {
|
||||
it('simulates tm show HAM-1945 after 1 hour idle', async () => {
|
||||
// This test simulates the exact scenario from the bug report
|
||||
const loginTime = new Date('2024-01-15T09:00:00Z');
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(loginTime);
|
||||
|
||||
// Spy on getInstance to verify StorageFactory uses the singleton
|
||||
const getInstanceSpy = vi.spyOn(SupabaseAuthClient, 'getInstance');
|
||||
|
||||
// User logs in at 9:00 AM
|
||||
const authManager = AuthManager.getInstance();
|
||||
const supabaseClient = authManager.supabaseClient.getClient();
|
||||
|
||||
// Track refresh calls
|
||||
let refreshCount = 0;
|
||||
vi.spyOn(supabaseClient.auth, 'refreshSession').mockImplementation(
|
||||
async (_options?: { refresh_token: string }) => {
|
||||
refreshCount++;
|
||||
return {
|
||||
data: {
|
||||
session: createRefreshedSession(),
|
||||
user: createRefreshedSession().user
|
||||
},
|
||||
error: null
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
// User comes back at 10:15 AM (token expired at 10:00 AM)
|
||||
const commandTime = new Date('2024-01-15T10:15:00Z');
|
||||
vi.setSystemTime(commandTime);
|
||||
|
||||
const callsBeforeCommand = getInstanceSpy.mock.calls.length;
|
||||
|
||||
// User runs: tm show HAM-1945
|
||||
// This triggers:
|
||||
// 1. AuthManager.hasValidSession() -> getSession() -> auto-refresh
|
||||
// 2. StorageFactory.createApiStorage() -> gets singleton (NOT new client)
|
||||
|
||||
// Simulate the command flow
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// REGRESSION GUARD: StorageFactory must call getInstance (not create its own client)
|
||||
expect(getInstanceSpy.mock.calls.length).toBeGreaterThan(
|
||||
callsBeforeCommand
|
||||
);
|
||||
|
||||
// If we trigger a refresh, it should only happen once
|
||||
await authManager.supabaseClient.refreshSession();
|
||||
|
||||
// Before the fix: refreshCount would be 2 (race condition)
|
||||
// After the fix: refreshCount is 1 (singleton prevents race)
|
||||
expect(refreshCount).toBe(1);
|
||||
|
||||
// Verify singleton is maintained
|
||||
expect(SupabaseAuthClient.getInstance()).toBe(authManager.supabaseClient);
|
||||
|
||||
vi.mocked(supabaseClient.auth.refreshSession).mockRestore();
|
||||
getInstanceSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,211 @@
|
||||
/**
|
||||
* Tests for SupabaseAuthClient singleton pattern
|
||||
*
|
||||
* This test validates that the SupabaseAuthClient singleton is used consistently
|
||||
* across the codebase to prevent "refresh_token_already_used" errors.
|
||||
*
|
||||
* The bug scenario (before fix):
|
||||
* 1. AuthManager creates its own SupabaseAuthClient instance
|
||||
* 2. StorageFactory.createApiStorage() creates ANOTHER SupabaseAuthClient instance
|
||||
* 3. Each instance has its own Supabase client with autoRefreshToken: true
|
||||
* 4. When access token expires, both clients try to refresh using the same refresh_token
|
||||
* 5. First client succeeds and rotates the token
|
||||
* 6. Second client fails with "refresh_token_already_used"
|
||||
*
|
||||
* The fix: SupabaseAuthClient is now a proper singleton with getInstance().
|
||||
* All code paths use the same instance.
|
||||
*
|
||||
* Related tests:
|
||||
* - auth-token-refresh-singleton.test.ts: Focuses on refresh behavior and mock infrastructure
|
||||
* - expired-token-refresh.test.ts: Focuses on time-based token expiration simulation
|
||||
* This file focuses on validating the singleton pattern itself (getInstance behavior,
|
||||
* client identity checks). Some tests overlap intentionally for comprehensive coverage.
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import {
|
||||
MockSupabaseSessionStorageMinimal,
|
||||
createMockLogger,
|
||||
createApiStorageConfig
|
||||
} from '../../../src/testing/index.js';
|
||||
|
||||
// Mock logger using shared mock factory
|
||||
vi.mock('../../../src/common/logger/index.js', () => ({
|
||||
getLogger: createMockLogger
|
||||
}));
|
||||
|
||||
// Mock SupabaseSessionStorage using shared minimal mock
|
||||
// (this test doesn't exercise storage behavior, only singleton identity)
|
||||
vi.mock(
|
||||
'../../../src/modules/auth/services/supabase-session-storage.js',
|
||||
() => ({
|
||||
SupabaseSessionStorage: MockSupabaseSessionStorageMinimal
|
||||
})
|
||||
);
|
||||
|
||||
// Import after mocking
|
||||
import { SupabaseAuthClient } from '../../../src/modules/integration/clients/supabase-client.js';
|
||||
import { AuthManager } from '../../../src/modules/auth/managers/auth-manager.js';
|
||||
import { StorageFactory } from '../../../src/modules/storage/services/storage-factory.js';
|
||||
|
||||
describe('SupabaseAuthClient - Singleton Pattern Validation', () => {
|
||||
let originalSupabaseUrl: string | undefined;
|
||||
let originalSupabaseAnonKey: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
// Store original values
|
||||
originalSupabaseUrl = process.env.TM_SUPABASE_URL;
|
||||
originalSupabaseAnonKey = process.env.TM_SUPABASE_ANON_KEY;
|
||||
|
||||
// Set required environment variables
|
||||
process.env.TM_SUPABASE_URL = 'https://test.supabase.co';
|
||||
process.env.TM_SUPABASE_ANON_KEY = 'test-anon-key';
|
||||
|
||||
// Reset singletons before each test
|
||||
SupabaseAuthClient.resetInstance();
|
||||
AuthManager.resetInstance();
|
||||
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Reset singletons after each test
|
||||
SupabaseAuthClient.resetInstance();
|
||||
AuthManager.resetInstance();
|
||||
|
||||
// Restore original env values
|
||||
if (originalSupabaseUrl === undefined) {
|
||||
delete process.env.TM_SUPABASE_URL;
|
||||
} else {
|
||||
process.env.TM_SUPABASE_URL = originalSupabaseUrl;
|
||||
}
|
||||
|
||||
if (originalSupabaseAnonKey === undefined) {
|
||||
delete process.env.TM_SUPABASE_ANON_KEY;
|
||||
} else {
|
||||
process.env.TM_SUPABASE_ANON_KEY = originalSupabaseAnonKey;
|
||||
}
|
||||
});
|
||||
|
||||
describe('Singleton Enforcement', () => {
|
||||
it('should return the same instance on multiple getInstance() calls', () => {
|
||||
const instance1 = SupabaseAuthClient.getInstance();
|
||||
const instance2 = SupabaseAuthClient.getInstance();
|
||||
const instance3 = SupabaseAuthClient.getInstance();
|
||||
|
||||
expect(instance1).toBe(instance2);
|
||||
expect(instance2).toBe(instance3);
|
||||
});
|
||||
|
||||
it('should return same Supabase client from multiple getInstance().getClient() calls', () => {
|
||||
const client1 = SupabaseAuthClient.getInstance().getClient();
|
||||
const client2 = SupabaseAuthClient.getInstance().getClient();
|
||||
|
||||
expect(client1).toBe(client2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('AuthManager Integration', () => {
|
||||
it('AuthManager should use SupabaseAuthClient singleton', () => {
|
||||
const authManager = AuthManager.getInstance();
|
||||
const directInstance = SupabaseAuthClient.getInstance();
|
||||
|
||||
// AuthManager.supabaseClient should be the same singleton instance
|
||||
expect(authManager.supabaseClient).toBe(directInstance);
|
||||
});
|
||||
|
||||
it('AuthManager.supabaseClient.getClient() should return same client as direct getInstance()', () => {
|
||||
const authManager = AuthManager.getInstance();
|
||||
const directClient = SupabaseAuthClient.getInstance().getClient();
|
||||
|
||||
expect(authManager.supabaseClient.getClient()).toBe(directClient);
|
||||
});
|
||||
});
|
||||
|
||||
describe('StorageFactory Integration', () => {
|
||||
it('StorageFactory.createApiStorage should use the singleton Supabase client', async () => {
|
||||
// Spy on getInstance to verify it's called during StorageFactory.create
|
||||
const getInstanceSpy = vi.spyOn(SupabaseAuthClient, 'getInstance');
|
||||
|
||||
// Get the singleton client first (this call is tracked)
|
||||
const singletonClient = SupabaseAuthClient.getInstance().getClient();
|
||||
const callCountBeforeStorage = getInstanceSpy.mock.calls.length;
|
||||
|
||||
// Create API storage using fixture
|
||||
const config = createApiStorageConfig();
|
||||
const storage = await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// Verify getInstance was called during StorageFactory.create
|
||||
// This ensures StorageFactory is using the singleton, not creating its own client
|
||||
expect(getInstanceSpy.mock.calls.length).toBeGreaterThan(
|
||||
callCountBeforeStorage
|
||||
);
|
||||
|
||||
// The storage should use the same Supabase client instance
|
||||
const clientAfterStorage = SupabaseAuthClient.getInstance().getClient();
|
||||
expect(clientAfterStorage).toBe(singletonClient);
|
||||
|
||||
// Storage was created (basic sanity check)
|
||||
expect(storage).toBeDefined();
|
||||
|
||||
getInstanceSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('StorageFactory should call getInstance (regression guard)', async () => {
|
||||
// This test explicitly guards against StorageFactory creating its own
|
||||
// SupabaseAuthClient instance, which caused "refresh_token_already_used" errors
|
||||
const getInstanceSpy = vi.spyOn(SupabaseAuthClient, 'getInstance');
|
||||
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// StorageFactory MUST call getInstance at least once during API storage creation
|
||||
// If this fails, StorageFactory is bypassing the singleton (the original bug)
|
||||
expect(getInstanceSpy).toHaveBeenCalled();
|
||||
|
||||
getInstanceSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Concurrent Access Prevention', () => {
|
||||
it('multiple rapid getInstance() calls should all return the same instance', () => {
|
||||
// Simulate concurrent access
|
||||
const instances = Array.from({ length: 100 }, () =>
|
||||
SupabaseAuthClient.getInstance()
|
||||
);
|
||||
|
||||
// All instances should be the same object
|
||||
const firstInstance = instances[0];
|
||||
instances.forEach((instance) => {
|
||||
expect(instance).toBe(firstInstance);
|
||||
});
|
||||
});
|
||||
|
||||
it('AuthManager and StorageFactory should share the same Supabase client', async () => {
|
||||
// Spy on getInstance to track all access paths
|
||||
const getInstanceSpy = vi.spyOn(SupabaseAuthClient, 'getInstance');
|
||||
|
||||
// AuthManager uses the singleton
|
||||
const authManager = AuthManager.getInstance();
|
||||
const authManagerClient = authManager.supabaseClient.getClient();
|
||||
const callsAfterAuthManager = getInstanceSpy.mock.calls.length;
|
||||
|
||||
// Create storage (which internally uses SupabaseAuthClient.getInstance())
|
||||
const config = createApiStorageConfig();
|
||||
await StorageFactory.create(config, '/test/project');
|
||||
|
||||
// Verify both AuthManager and StorageFactory accessed the singleton
|
||||
expect(getInstanceSpy.mock.calls.length).toBeGreaterThan(
|
||||
callsAfterAuthManager
|
||||
);
|
||||
|
||||
// After StorageFactory creates storage, the singleton should still be the same
|
||||
const singletonClient = SupabaseAuthClient.getInstance().getClient();
|
||||
|
||||
// Critical assertion: both code paths share the same underlying client
|
||||
expect(authManagerClient).toBe(singletonClient);
|
||||
|
||||
getInstanceSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,422 +0,0 @@
|
||||
/**
|
||||
* @fileoverview End-to-end integration test for listTasks functionality
|
||||
*/
|
||||
|
||||
import { promises as fs } from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import {
|
||||
type Task,
|
||||
type TaskMasterCore,
|
||||
type TaskStatus,
|
||||
createTaskMasterCore
|
||||
} from '../../src/index';
|
||||
|
||||
describe('TaskMasterCore - listTasks E2E', () => {
|
||||
let tmpDir: string;
|
||||
let tmCore: TaskMasterCore;
|
||||
|
||||
// Sample tasks data
|
||||
const sampleTasks: Task[] = [
|
||||
{
|
||||
id: '1',
|
||||
title: 'Setup project',
|
||||
description: 'Initialize the project structure',
|
||||
status: 'done',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: 'Create all necessary directories and config files',
|
||||
testStrategy: 'Manual verification',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
parentId: '1',
|
||||
title: 'Create directories',
|
||||
description: 'Create project directories',
|
||||
status: 'done',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: 'Create src, tests, docs directories',
|
||||
testStrategy: 'Check directories exist'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
parentId: '1',
|
||||
title: 'Initialize package.json',
|
||||
description: 'Create package.json file',
|
||||
status: 'done',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: 'Run npm init',
|
||||
testStrategy: 'Verify package.json exists'
|
||||
}
|
||||
],
|
||||
tags: ['setup', 'infrastructure']
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
title: 'Implement core features',
|
||||
description: 'Build the main functionality',
|
||||
status: 'in-progress',
|
||||
priority: 'high',
|
||||
dependencies: ['1'],
|
||||
details: 'Implement all core business logic',
|
||||
testStrategy: 'Unit tests for all features',
|
||||
subtasks: [],
|
||||
tags: ['feature', 'core'],
|
||||
assignee: 'developer1'
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
title: 'Write documentation',
|
||||
description: 'Create user and developer docs',
|
||||
status: 'pending',
|
||||
priority: 'medium',
|
||||
dependencies: ['2'],
|
||||
details: 'Write comprehensive documentation',
|
||||
testStrategy: 'Review by team',
|
||||
subtasks: [],
|
||||
tags: ['documentation'],
|
||||
complexity: 'simple'
|
||||
},
|
||||
{
|
||||
id: '4',
|
||||
title: 'Performance optimization',
|
||||
description: 'Optimize for speed and efficiency',
|
||||
status: 'blocked',
|
||||
priority: 'low',
|
||||
dependencies: ['2'],
|
||||
details: 'Profile and optimize bottlenecks',
|
||||
testStrategy: 'Performance benchmarks',
|
||||
subtasks: [],
|
||||
assignee: 'developer2',
|
||||
complexity: 'complex'
|
||||
},
|
||||
{
|
||||
id: '5',
|
||||
title: 'Security audit',
|
||||
description: 'Review security vulnerabilities',
|
||||
status: 'deferred',
|
||||
priority: 'critical',
|
||||
dependencies: [],
|
||||
details: 'Complete security assessment',
|
||||
testStrategy: 'Security scanning tools',
|
||||
subtasks: [],
|
||||
tags: ['security', 'audit']
|
||||
}
|
||||
];
|
||||
|
||||
beforeEach(async () => {
|
||||
// Create temp directory for testing
|
||||
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'tm-core-test-'));
|
||||
|
||||
// Create .taskmaster/tasks directory
|
||||
const tasksDir = path.join(tmpDir, '.taskmaster', 'tasks');
|
||||
await fs.mkdir(tasksDir, { recursive: true });
|
||||
|
||||
// Write sample tasks.json
|
||||
const tasksFile = path.join(tasksDir, 'tasks.json');
|
||||
const tasksData = {
|
||||
tasks: sampleTasks,
|
||||
metadata: {
|
||||
version: '1.0.0',
|
||||
lastModified: new Date().toISOString(),
|
||||
taskCount: sampleTasks.length,
|
||||
completedCount: 1
|
||||
}
|
||||
};
|
||||
await fs.writeFile(tasksFile, JSON.stringify(tasksData, null, 2));
|
||||
|
||||
// Create TaskMasterCore instance
|
||||
tmCore = createTaskMasterCore(tmpDir);
|
||||
await tmCore.initialize();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Cleanup
|
||||
if (tmCore) {
|
||||
await tmCore.close();
|
||||
}
|
||||
|
||||
// Remove temp directory
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
describe('Basic listing', () => {
|
||||
it('should list all tasks', async () => {
|
||||
const result = await tmCore.listTasks();
|
||||
|
||||
expect(result.tasks).toHaveLength(5);
|
||||
expect(result.total).toBe(5);
|
||||
expect(result.filtered).toBe(5);
|
||||
expect(result.tag).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should include subtasks by default', async () => {
|
||||
const result = await tmCore.listTasks();
|
||||
const setupTask = result.tasks.find((t) => t.id === '1');
|
||||
|
||||
expect(setupTask?.subtasks).toHaveLength(2);
|
||||
expect(setupTask?.subtasks[0].title).toBe('Create directories');
|
||||
});
|
||||
|
||||
it('should exclude subtasks when requested', async () => {
|
||||
const result = await tmCore.listTasks({ includeSubtasks: false });
|
||||
const setupTask = result.tasks.find((t) => t.id === '1');
|
||||
|
||||
expect(setupTask?.subtasks).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Filtering', () => {
|
||||
it('should filter by status', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: { status: 'done' }
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(1);
|
||||
expect(result.tasks[0].id).toBe('1');
|
||||
});
|
||||
|
||||
it('should filter by multiple statuses', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: { status: ['done', 'in-progress'] }
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(2);
|
||||
const ids = result.tasks.map((t) => t.id);
|
||||
expect(ids).toContain('1');
|
||||
expect(ids).toContain('2');
|
||||
});
|
||||
|
||||
it('should filter by priority', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: { priority: 'high' }
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(2);
|
||||
});
|
||||
|
||||
it('should filter by tags', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: { tags: ['setup'] }
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(1);
|
||||
expect(result.tasks[0].id).toBe('1');
|
||||
});
|
||||
|
||||
it('should filter by assignee', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: { assignee: 'developer1' }
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(1);
|
||||
expect(result.tasks[0].id).toBe('2');
|
||||
});
|
||||
|
||||
it('should filter by complexity', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: { complexity: 'complex' }
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(1);
|
||||
expect(result.tasks[0].id).toBe('4');
|
||||
});
|
||||
|
||||
it('should filter by search term', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: { search: 'documentation' }
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(1);
|
||||
expect(result.tasks[0].id).toBe('3');
|
||||
});
|
||||
|
||||
it('should filter by hasSubtasks', async () => {
|
||||
const withSubtasks = await tmCore.listTasks({
|
||||
filter: { hasSubtasks: true }
|
||||
});
|
||||
|
||||
expect(withSubtasks.filtered).toBe(1);
|
||||
expect(withSubtasks.tasks[0].id).toBe('1');
|
||||
|
||||
const withoutSubtasks = await tmCore.listTasks({
|
||||
filter: { hasSubtasks: false }
|
||||
});
|
||||
|
||||
expect(withoutSubtasks.filtered).toBe(4);
|
||||
});
|
||||
|
||||
it('should handle combined filters', async () => {
|
||||
const result = await tmCore.listTasks({
|
||||
filter: {
|
||||
priority: ['high', 'critical'],
|
||||
status: ['pending', 'deferred']
|
||||
}
|
||||
});
|
||||
|
||||
expect(result.filtered).toBe(1);
|
||||
expect(result.tasks[0].id).toBe('5'); // Critical priority, deferred status
|
||||
});
|
||||
});
|
||||
|
||||
describe('Helper methods', () => {
|
||||
it('should get task by ID', async () => {
|
||||
const task = await tmCore.getTask('2');
|
||||
|
||||
expect(task).not.toBeNull();
|
||||
expect(task?.title).toBe('Implement core features');
|
||||
});
|
||||
|
||||
it('should return null for non-existent task', async () => {
|
||||
const task = await tmCore.getTask('999');
|
||||
|
||||
expect(task).toBeNull();
|
||||
});
|
||||
|
||||
it('should get tasks by status', async () => {
|
||||
const pendingTasks = await tmCore.getTasksByStatus('pending');
|
||||
|
||||
expect(pendingTasks).toHaveLength(1);
|
||||
expect(pendingTasks[0].id).toBe('3');
|
||||
|
||||
const multipleTasks = await tmCore.getTasksByStatus(['done', 'blocked']);
|
||||
|
||||
expect(multipleTasks).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should get task statistics', async () => {
|
||||
const stats = await tmCore.getTaskStats();
|
||||
|
||||
expect(stats.total).toBe(5);
|
||||
expect(stats.byStatus.done).toBe(1);
|
||||
expect(stats.byStatus['in-progress']).toBe(1);
|
||||
expect(stats.byStatus.pending).toBe(1);
|
||||
expect(stats.byStatus.blocked).toBe(1);
|
||||
expect(stats.byStatus.deferred).toBe(1);
|
||||
expect(stats.byStatus.cancelled).toBe(0);
|
||||
expect(stats.byStatus.review).toBe(0);
|
||||
expect(stats.withSubtasks).toBe(1);
|
||||
expect(stats.blocked).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error handling', () => {
|
||||
it('should handle missing tasks file gracefully', async () => {
|
||||
// Create new instance with empty directory
|
||||
const emptyDir = await fs.mkdtemp(path.join(os.tmpdir(), 'tm-empty-'));
|
||||
const emptyCore = createTaskMasterCore(emptyDir);
|
||||
|
||||
try {
|
||||
const result = await emptyCore.listTasks();
|
||||
|
||||
expect(result.tasks).toHaveLength(0);
|
||||
expect(result.total).toBe(0);
|
||||
expect(result.filtered).toBe(0);
|
||||
} finally {
|
||||
await emptyCore.close();
|
||||
await fs.rm(emptyDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('should validate task entities', async () => {
|
||||
// Write invalid task data
|
||||
const invalidDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), 'tm-invalid-')
|
||||
);
|
||||
const tasksDir = path.join(invalidDir, '.taskmaster', 'tasks');
|
||||
await fs.mkdir(tasksDir, { recursive: true });
|
||||
|
||||
const invalidData = {
|
||||
tasks: [
|
||||
{
|
||||
id: '', // Invalid: empty ID
|
||||
title: 'Test',
|
||||
description: 'Test',
|
||||
status: 'done',
|
||||
priority: 'high',
|
||||
dependencies: [],
|
||||
details: 'Test',
|
||||
testStrategy: 'Test',
|
||||
subtasks: []
|
||||
}
|
||||
],
|
||||
metadata: {
|
||||
version: '1.0.0',
|
||||
lastModified: new Date().toISOString(),
|
||||
taskCount: 1,
|
||||
completedCount: 0
|
||||
}
|
||||
};
|
||||
|
||||
await fs.writeFile(
|
||||
path.join(tasksDir, 'tasks.json'),
|
||||
JSON.stringify(invalidData)
|
||||
);
|
||||
|
||||
const invalidCore = createTaskMasterCore(invalidDir);
|
||||
|
||||
try {
|
||||
await expect(invalidCore.listTasks()).rejects.toThrow();
|
||||
} finally {
|
||||
await invalidCore.close();
|
||||
await fs.rm(invalidDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tags support', () => {
|
||||
beforeEach(async () => {
|
||||
// Create tasks for a different tag
|
||||
const taggedTasks = [
|
||||
{
|
||||
id: 'tag-1',
|
||||
title: 'Tagged task',
|
||||
description: 'Task with tag',
|
||||
status: 'pending' as TaskStatus,
|
||||
priority: 'medium' as const,
|
||||
dependencies: [],
|
||||
details: 'Tagged task details',
|
||||
testStrategy: 'Test',
|
||||
subtasks: []
|
||||
}
|
||||
];
|
||||
|
||||
const tagFile = path.join(
|
||||
tmpDir,
|
||||
'.taskmaster',
|
||||
'tasks',
|
||||
'feature-branch.json'
|
||||
);
|
||||
await fs.writeFile(
|
||||
tagFile,
|
||||
JSON.stringify({
|
||||
tasks: taggedTasks,
|
||||
metadata: {
|
||||
version: '1.0.0',
|
||||
lastModified: new Date().toISOString(),
|
||||
taskCount: 1,
|
||||
completedCount: 0
|
||||
}
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should list tasks for specific tag', async () => {
|
||||
const result = await tmCore.listTasks({ tag: 'feature-branch' });
|
||||
|
||||
expect(result.tasks).toHaveLength(1);
|
||||
expect(result.tasks[0].id).toBe('tag-1');
|
||||
expect(result.tag).toBe('feature-branch');
|
||||
});
|
||||
|
||||
it('should list default tasks when no tag specified', async () => {
|
||||
const result = await tmCore.listTasks();
|
||||
|
||||
expect(result.tasks).toHaveLength(5);
|
||||
expect(result.tasks[0].id).toBe('1');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,401 +0,0 @@
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import fs from 'fs-extra';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import {
|
||||
filterActivityLog,
|
||||
logActivity,
|
||||
readActivityLog
|
||||
} from '../../../src/storage/activity-logger.js';
|
||||
|
||||
describe('Activity Logger', () => {
|
||||
let testDir: string;
|
||||
let activityPath: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
// Create a unique temporary test directory
|
||||
const prefix = path.join(os.tmpdir(), 'activity-test-');
|
||||
testDir = await fs.mkdtemp(prefix);
|
||||
activityPath = path.join(testDir, 'activity.jsonl');
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Clean up test directory
|
||||
await fs.remove(testDir);
|
||||
});
|
||||
|
||||
describe('logActivity', () => {
|
||||
it('should create activity log file on first write', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'phase-start',
|
||||
phase: 'red',
|
||||
data: {}
|
||||
});
|
||||
|
||||
const exists = await fs.pathExists(activityPath);
|
||||
expect(exists).toBe(true);
|
||||
});
|
||||
|
||||
it('should append event to log file', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'phase-start',
|
||||
phase: 'red'
|
||||
});
|
||||
|
||||
const content = await fs.readFile(activityPath, 'utf-8');
|
||||
const lines = content.trim().split(/\r?\n/);
|
||||
|
||||
expect(lines.length).toBe(1);
|
||||
});
|
||||
|
||||
it('should write valid JSONL format', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'test-run',
|
||||
result: 'pass'
|
||||
});
|
||||
|
||||
const content = await fs.readFile(activityPath, 'utf-8');
|
||||
const line = content.trim();
|
||||
const parsed = JSON.parse(line);
|
||||
|
||||
expect(parsed).toBeDefined();
|
||||
expect(parsed.type).toBe('test-run');
|
||||
});
|
||||
|
||||
it('should include timestamp in log entry', async () => {
|
||||
const before = new Date().toISOString();
|
||||
await logActivity(activityPath, {
|
||||
type: 'phase-start',
|
||||
phase: 'red'
|
||||
});
|
||||
const after = new Date().toISOString();
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].timestamp).toBeDefined();
|
||||
expect(logs[0].timestamp >= before).toBe(true);
|
||||
expect(logs[0].timestamp <= after).toBe(true);
|
||||
});
|
||||
|
||||
it('should append multiple events', async () => {
|
||||
await logActivity(activityPath, { type: 'event1' });
|
||||
await logActivity(activityPath, { type: 'event2' });
|
||||
await logActivity(activityPath, { type: 'event3' });
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs.length).toBe(3);
|
||||
expect(logs[0].type).toBe('event1');
|
||||
expect(logs[1].type).toBe('event2');
|
||||
expect(logs[2].type).toBe('event3');
|
||||
});
|
||||
|
||||
it('should preserve event data', async () => {
|
||||
const eventData = {
|
||||
type: 'git-commit',
|
||||
hash: 'abc123',
|
||||
message: 'test commit',
|
||||
files: ['file1.ts', 'file2.ts']
|
||||
};
|
||||
|
||||
await logActivity(activityPath, eventData);
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].type).toBe('git-commit');
|
||||
expect(logs[0].hash).toBe('abc123');
|
||||
expect(logs[0].message).toBe('test commit');
|
||||
expect(logs[0].files).toEqual(['file1.ts', 'file2.ts']);
|
||||
});
|
||||
|
||||
it('should handle nested objects in event data', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'test-results',
|
||||
results: {
|
||||
passed: 10,
|
||||
failed: 2,
|
||||
details: { coverage: 85 }
|
||||
}
|
||||
});
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].results.details.coverage).toBe(85);
|
||||
});
|
||||
|
||||
it('should handle special characters in event data', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'error',
|
||||
message: 'Error: "Something went wrong"\nLine 2'
|
||||
});
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].message).toBe('Error: "Something went wrong"\nLine 2');
|
||||
});
|
||||
|
||||
it('should create parent directory if it does not exist', async () => {
|
||||
const nestedPath = path.join(testDir, 'nested', 'dir', 'activity.jsonl');
|
||||
|
||||
await logActivity(nestedPath, { type: 'test' });
|
||||
|
||||
const exists = await fs.pathExists(nestedPath);
|
||||
expect(exists).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('readActivityLog', () => {
|
||||
it('should read all events from log', async () => {
|
||||
await logActivity(activityPath, { type: 'event1' });
|
||||
await logActivity(activityPath, { type: 'event2' });
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
|
||||
expect(logs.length).toBe(2);
|
||||
expect(logs[0].type).toBe('event1');
|
||||
expect(logs[1].type).toBe('event2');
|
||||
});
|
||||
|
||||
it('should return empty array for non-existent file', async () => {
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs).toEqual([]);
|
||||
});
|
||||
|
||||
it('should parse JSONL correctly', async () => {
|
||||
await logActivity(activityPath, { type: 'event1', data: 'test1' });
|
||||
await logActivity(activityPath, { type: 'event2', data: 'test2' });
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
|
||||
expect(logs[0].data).toBe('test1');
|
||||
expect(logs[1].data).toBe('test2');
|
||||
});
|
||||
|
||||
it('should handle empty lines', async () => {
|
||||
await fs.writeFile(
|
||||
activityPath,
|
||||
'{"type":"event1"}\n\n{"type":"event2"}\n'
|
||||
);
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
|
||||
expect(logs.length).toBe(2);
|
||||
expect(logs[0].type).toBe('event1');
|
||||
expect(logs[1].type).toBe('event2');
|
||||
});
|
||||
|
||||
it('should throw error for invalid JSON line', async () => {
|
||||
await fs.writeFile(activityPath, '{"type":"event1"}\ninvalid json\n');
|
||||
|
||||
await expect(readActivityLog(activityPath)).rejects.toThrow(
|
||||
/Invalid JSON/i
|
||||
);
|
||||
});
|
||||
|
||||
it('should preserve chronological order', async () => {
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await logActivity(activityPath, { type: 'event', index: i });
|
||||
}
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
|
||||
for (let i = 0; i < 10; i++) {
|
||||
expect(logs[i].index).toBe(i);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('filterActivityLog', () => {
|
||||
beforeEach(async () => {
|
||||
// Create sample log entries
|
||||
await logActivity(activityPath, { type: 'phase-start', phase: 'red' });
|
||||
await logActivity(activityPath, { type: 'test-run', result: 'fail' });
|
||||
await logActivity(activityPath, { type: 'phase-start', phase: 'green' });
|
||||
await logActivity(activityPath, { type: 'test-run', result: 'pass' });
|
||||
await logActivity(activityPath, { type: 'git-commit', hash: 'abc123' });
|
||||
});
|
||||
|
||||
it('should filter by event type', async () => {
|
||||
const filtered = await filterActivityLog(activityPath, {
|
||||
type: 'phase-start'
|
||||
});
|
||||
|
||||
expect(filtered.length).toBe(2);
|
||||
expect(filtered[0].type).toBe('phase-start');
|
||||
expect(filtered[1].type).toBe('phase-start');
|
||||
});
|
||||
|
||||
it('should filter by multiple criteria', async () => {
|
||||
const filtered = await filterActivityLog(activityPath, {
|
||||
type: 'test-run',
|
||||
result: 'pass'
|
||||
});
|
||||
|
||||
expect(filtered.length).toBe(1);
|
||||
expect(filtered[0].result).toBe('pass');
|
||||
});
|
||||
|
||||
it('should return all events when no filter provided', async () => {
|
||||
const filtered = await filterActivityLog(activityPath, {});
|
||||
|
||||
expect(filtered.length).toBe(5);
|
||||
});
|
||||
|
||||
it('should filter by timestamp range', async () => {
|
||||
const logs = await readActivityLog(activityPath);
|
||||
const midpoint = logs[2].timestamp;
|
||||
|
||||
const filtered = await filterActivityLog(activityPath, {
|
||||
timestampFrom: midpoint
|
||||
});
|
||||
|
||||
// Should get events from midpoint onwards (inclusive)
|
||||
// Expect at least 3 events, may be more due to timestamp collisions
|
||||
expect(filtered.length).toBeGreaterThanOrEqual(3);
|
||||
expect(filtered.length).toBeLessThanOrEqual(5);
|
||||
});
|
||||
|
||||
it('should filter by custom predicate', async () => {
|
||||
const filtered = await filterActivityLog(activityPath, {
|
||||
predicate: (event: any) => event.phase === 'red'
|
||||
});
|
||||
|
||||
expect(filtered.length).toBe(1);
|
||||
expect(filtered[0].phase).toBe('red');
|
||||
});
|
||||
|
||||
it('should return empty array for non-matching filter', async () => {
|
||||
const filtered = await filterActivityLog(activityPath, {
|
||||
type: 'non-existent'
|
||||
});
|
||||
|
||||
expect(filtered).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle nested property filters', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'test-results',
|
||||
results: { coverage: 85 }
|
||||
});
|
||||
|
||||
const filtered = await filterActivityLog(activityPath, {
|
||||
predicate: (event: any) => event.results?.coverage > 80
|
||||
});
|
||||
|
||||
expect(filtered.length).toBe(1);
|
||||
expect(filtered[0].results.coverage).toBe(85);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Event types', () => {
|
||||
it('should support phase-transition events', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'phase-transition',
|
||||
from: 'red',
|
||||
to: 'green'
|
||||
});
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].type).toBe('phase-transition');
|
||||
expect(logs[0].from).toBe('red');
|
||||
expect(logs[0].to).toBe('green');
|
||||
});
|
||||
|
||||
it('should support test-run events', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'test-run',
|
||||
result: 'pass',
|
||||
testsRun: 50,
|
||||
testsPassed: 50,
|
||||
testsFailed: 0,
|
||||
coverage: 85.5
|
||||
});
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].testsRun).toBe(50);
|
||||
expect(logs[0].coverage).toBe(85.5);
|
||||
});
|
||||
|
||||
it('should support git-operation events', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'git-commit',
|
||||
hash: 'abc123def456',
|
||||
message: 'feat: add new feature',
|
||||
files: ['file1.ts', 'file2.ts']
|
||||
});
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].hash).toBe('abc123def456');
|
||||
expect(logs[0].files.length).toBe(2);
|
||||
});
|
||||
|
||||
it('should support error events', async () => {
|
||||
await logActivity(activityPath, {
|
||||
type: 'error',
|
||||
phase: 'red',
|
||||
error: 'Test failed',
|
||||
stack: 'Error stack trace...'
|
||||
});
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs[0].type).toBe('error');
|
||||
expect(logs[0].error).toBe('Test failed');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Concurrency handling', () => {
|
||||
it('should handle rapid concurrent writes', async () => {
|
||||
const writes: Promise<void>[] = [];
|
||||
for (let i = 0; i < 50; i++) {
|
||||
writes.push(logActivity(activityPath, { type: 'event', index: i }));
|
||||
}
|
||||
|
||||
await Promise.all(writes);
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
expect(logs.length).toBe(50);
|
||||
});
|
||||
|
||||
it('should maintain data integrity with concurrent writes', async () => {
|
||||
const writes: Promise<void>[] = [];
|
||||
for (let i = 0; i < 20; i++) {
|
||||
writes.push(
|
||||
logActivity(activityPath, {
|
||||
type: 'concurrent-test',
|
||||
id: i,
|
||||
data: `data-${i}`
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
await Promise.all(writes);
|
||||
|
||||
const logs = await readActivityLog(activityPath);
|
||||
|
||||
// All events should be present
|
||||
expect(logs.length).toBe(20);
|
||||
// Validate ids set
|
||||
const ids = new Set(logs.map((l) => l.id));
|
||||
expect([...ids].sort((a, b) => a - b)).toEqual([...Array(20).keys()]);
|
||||
// Validate shape
|
||||
for (const log of logs) {
|
||||
expect(log.type).toBe('concurrent-test');
|
||||
expect(typeof log.id).toBe('number');
|
||||
expect(log.data).toMatch(/^data-\d+$/);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('File integrity', () => {
|
||||
it('should maintain valid JSONL after many operations', async () => {
|
||||
for (let i = 0; i < 100; i++) {
|
||||
await logActivity(activityPath, { type: 'test', iteration: i });
|
||||
}
|
||||
|
||||
const content = await fs.readFile(activityPath, 'utf-8');
|
||||
const lines = content.trim().split(/\r?\n/);
|
||||
|
||||
expect(lines.length).toBe(100);
|
||||
|
||||
// All lines should be valid JSON
|
||||
for (const line of lines) {
|
||||
expect(() => JSON.parse(line)).not.toThrow();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,210 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Mock provider for testing BaseProvider functionality
|
||||
*/
|
||||
|
||||
import type {
|
||||
AIModel,
|
||||
AIOptions,
|
||||
AIResponse,
|
||||
ProviderInfo,
|
||||
ProviderUsageStats
|
||||
} from '../../src/interfaces/ai-provider.interface';
|
||||
import {
|
||||
BaseProvider,
|
||||
type BaseProviderConfig,
|
||||
type CompletionResult
|
||||
} from '../../src/providers/ai/base-provider';
|
||||
|
||||
/**
|
||||
* Configuration for MockProvider behavior
|
||||
*/
|
||||
export interface MockProviderOptions extends BaseProviderConfig {
|
||||
shouldFail?: boolean;
|
||||
failAfterAttempts?: number;
|
||||
simulateRateLimit?: boolean;
|
||||
simulateTimeout?: boolean;
|
||||
responseDelay?: number;
|
||||
tokenMultiplier?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock provider for testing BaseProvider functionality
|
||||
*/
|
||||
export class MockProvider extends BaseProvider {
|
||||
private attemptCount = 0;
|
||||
private readonly options: MockProviderOptions;
|
||||
|
||||
constructor(options: MockProviderOptions) {
|
||||
super(options);
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulate completion generation with configurable behavior
|
||||
*/
|
||||
protected async generateCompletionInternal(
|
||||
prompt: string,
|
||||
_options?: AIOptions
|
||||
): Promise<CompletionResult> {
|
||||
this.attemptCount++;
|
||||
|
||||
// Simulate delay if configured
|
||||
if (this.options.responseDelay) {
|
||||
await this.sleep(this.options.responseDelay);
|
||||
}
|
||||
|
||||
// Simulate failures based on configuration
|
||||
if (this.options.shouldFail) {
|
||||
throw new Error('Mock provider error');
|
||||
}
|
||||
|
||||
if (
|
||||
this.options.failAfterAttempts &&
|
||||
this.attemptCount <= this.options.failAfterAttempts
|
||||
) {
|
||||
if (this.options.simulateRateLimit) {
|
||||
throw new Error('Rate limit exceeded - too many requests (429)');
|
||||
}
|
||||
if (this.options.simulateTimeout) {
|
||||
throw new Error('Request timeout - ECONNRESET');
|
||||
}
|
||||
throw new Error('Temporary failure');
|
||||
}
|
||||
|
||||
// Return successful mock response
|
||||
return {
|
||||
content: `Mock response to: ${prompt}`,
|
||||
inputTokens: this.calculateTokens(prompt),
|
||||
outputTokens: this.calculateTokens(`Mock response to: ${prompt}`),
|
||||
finishReason: 'complete',
|
||||
model: this.model
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple token calculation for testing
|
||||
*/
|
||||
calculateTokens(text: string, _model?: string): number {
|
||||
const multiplier = this.options.tokenMultiplier || 1;
|
||||
// Rough approximation: 1 token per 4 characters
|
||||
return Math.ceil((text.length / 4) * multiplier);
|
||||
}
|
||||
|
||||
getName(): string {
|
||||
return 'mock';
|
||||
}
|
||||
|
||||
getDefaultModel(): string {
|
||||
return 'mock-model-v1';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of attempts made
|
||||
*/
|
||||
getAttemptCount(): number {
|
||||
return this.attemptCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset attempt counter
|
||||
*/
|
||||
resetAttempts(): void {
|
||||
this.attemptCount = 0;
|
||||
}
|
||||
|
||||
// Implement remaining abstract methods
|
||||
async generateStreamingCompletion(
|
||||
prompt: string,
|
||||
_options?: AIOptions
|
||||
): AsyncIterator<Partial<AIResponse>> {
|
||||
// Simple mock implementation
|
||||
const response: Partial<AIResponse> = {
|
||||
content: `Mock streaming response to: ${prompt}`,
|
||||
provider: this.getName(),
|
||||
model: this.model
|
||||
};
|
||||
|
||||
return {
|
||||
async next() {
|
||||
return { value: response, done: true };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
async isAvailable(): Promise<boolean> {
|
||||
return !this.options.shouldFail;
|
||||
}
|
||||
|
||||
getProviderInfo(): ProviderInfo {
|
||||
return {
|
||||
name: 'mock',
|
||||
displayName: 'Mock Provider',
|
||||
description: 'Mock provider for testing',
|
||||
models: this.getAvailableModels(),
|
||||
defaultModel: this.getDefaultModel(),
|
||||
requiresApiKey: true,
|
||||
features: {
|
||||
streaming: true,
|
||||
functions: false,
|
||||
vision: false,
|
||||
embeddings: false
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
getAvailableModels(): AIModel[] {
|
||||
return [
|
||||
{
|
||||
id: 'mock-model-v1',
|
||||
name: 'Mock Model v1',
|
||||
description: 'First mock model',
|
||||
contextLength: 4096,
|
||||
inputCostPer1K: 0.001,
|
||||
outputCostPer1K: 0.002,
|
||||
supportsStreaming: true
|
||||
},
|
||||
{
|
||||
id: 'mock-model-v2',
|
||||
name: 'Mock Model v2',
|
||||
description: 'Second mock model',
|
||||
contextLength: 8192,
|
||||
inputCostPer1K: 0.002,
|
||||
outputCostPer1K: 0.004,
|
||||
supportsStreaming: true
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
async validateCredentials(): Promise<boolean> {
|
||||
return this.apiKey === 'valid-key';
|
||||
}
|
||||
|
||||
async getUsageStats(): Promise<ProviderUsageStats | null> {
|
||||
return {
|
||||
totalRequests: this.attemptCount,
|
||||
totalTokens: 1000,
|
||||
totalCost: 0.01,
|
||||
requestsToday: this.attemptCount,
|
||||
tokensToday: 1000,
|
||||
costToday: 0.01,
|
||||
averageResponseTime: 100,
|
||||
successRate: 0.9,
|
||||
lastRequestAt: new Date().toISOString()
|
||||
};
|
||||
}
|
||||
|
||||
async initialize(): Promise<void> {
|
||||
// No-op for mock
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
// No-op for mock
|
||||
}
|
||||
|
||||
// Override retry configuration for testing
|
||||
protected getMaxRetries(): number {
|
||||
return this.options.failAfterAttempts
|
||||
? this.options.failAfterAttempts + 1
|
||||
: 3;
|
||||
}
|
||||
}
|
||||
@@ -1,265 +0,0 @@
|
||||
/**
|
||||
* @fileoverview Unit tests for BaseProvider abstract class
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, expect, it } from 'vitest';
|
||||
import {
|
||||
ERROR_CODES,
|
||||
TaskMasterError
|
||||
} from '../../src/errors/task-master-error';
|
||||
import { MockProvider } from '../mocks/mock-provider';
|
||||
|
||||
describe('BaseProvider', () => {
|
||||
describe('constructor', () => {
|
||||
it('should require an API key', () => {
|
||||
expect(() => {
|
||||
new MockProvider({ apiKey: '' });
|
||||
}).toThrow(TaskMasterError);
|
||||
});
|
||||
|
||||
it('should initialize with provided API key and model', () => {
|
||||
const provider = new MockProvider({
|
||||
apiKey: 'test-key',
|
||||
model: 'mock-model-v2'
|
||||
});
|
||||
|
||||
expect(provider.getModel()).toBe('mock-model-v2');
|
||||
});
|
||||
|
||||
it('should use default model if not provided', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
expect(provider.getModel()).toBe('mock-model-v1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateCompletion', () => {
|
||||
let provider: MockProvider;
|
||||
|
||||
beforeEach(() => {
|
||||
provider = new MockProvider({ apiKey: 'test-key' });
|
||||
});
|
||||
|
||||
it('should successfully generate a completion', async () => {
|
||||
const response = await provider.generateCompletion('Test prompt');
|
||||
|
||||
expect(response).toMatchObject({
|
||||
content: 'Mock response to: Test prompt',
|
||||
provider: 'mock',
|
||||
model: 'mock-model-v1',
|
||||
inputTokens: expect.any(Number),
|
||||
outputTokens: expect.any(Number),
|
||||
totalTokens: expect.any(Number),
|
||||
duration: expect.any(Number),
|
||||
timestamp: expect.any(String)
|
||||
});
|
||||
});
|
||||
|
||||
it('should validate empty prompts', async () => {
|
||||
await expect(provider.generateCompletion('')).rejects.toThrow(
|
||||
'Prompt must be a non-empty string'
|
||||
);
|
||||
});
|
||||
|
||||
it('should validate prompt type', async () => {
|
||||
await expect(provider.generateCompletion(null as any)).rejects.toThrow(
|
||||
'Prompt must be a non-empty string'
|
||||
);
|
||||
});
|
||||
|
||||
it('should validate temperature range', async () => {
|
||||
await expect(
|
||||
provider.generateCompletion('Test', { temperature: 3 })
|
||||
).rejects.toThrow('Temperature must be between 0 and 2');
|
||||
});
|
||||
|
||||
it('should validate maxTokens range', async () => {
|
||||
await expect(
|
||||
provider.generateCompletion('Test', { maxTokens: 0 })
|
||||
).rejects.toThrow('Max tokens must be between 1 and 100000');
|
||||
});
|
||||
|
||||
it('should validate topP range', async () => {
|
||||
await expect(
|
||||
provider.generateCompletion('Test', { topP: 1.5 })
|
||||
).rejects.toThrow('Top-p must be between 0 and 1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('retry logic', () => {
|
||||
it('should retry on rate limit errors', async () => {
|
||||
const provider = new MockProvider({
|
||||
apiKey: 'test-key',
|
||||
failAfterAttempts: 2,
|
||||
simulateRateLimit: true,
|
||||
responseDelay: 10
|
||||
});
|
||||
|
||||
const response = await provider.generateCompletion('Test prompt');
|
||||
|
||||
expect(response.content).toBe('Mock response to: Test prompt');
|
||||
expect(provider.getAttemptCount()).toBe(3); // 2 failures + 1 success
|
||||
});
|
||||
|
||||
it('should retry on timeout errors', async () => {
|
||||
const provider = new MockProvider({
|
||||
apiKey: 'test-key',
|
||||
failAfterAttempts: 1,
|
||||
simulateTimeout: true
|
||||
});
|
||||
|
||||
const response = await provider.generateCompletion('Test prompt');
|
||||
|
||||
expect(response.content).toBe('Mock response to: Test prompt');
|
||||
expect(provider.getAttemptCount()).toBe(2); // 1 failure + 1 success
|
||||
});
|
||||
|
||||
it('should fail after max retries', async () => {
|
||||
const provider = new MockProvider({
|
||||
apiKey: 'test-key',
|
||||
shouldFail: true
|
||||
});
|
||||
|
||||
await expect(provider.generateCompletion('Test prompt')).rejects.toThrow(
|
||||
'mock provider error'
|
||||
);
|
||||
});
|
||||
|
||||
it('should calculate exponential backoff delays', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
|
||||
// Access protected method through type assertion
|
||||
const calculateDelay = (provider as any).calculateBackoffDelay.bind(
|
||||
provider
|
||||
);
|
||||
|
||||
const delay1 = calculateDelay(1);
|
||||
const delay2 = calculateDelay(2);
|
||||
const delay3 = calculateDelay(3);
|
||||
|
||||
// Check exponential growth (with jitter, so use ranges)
|
||||
expect(delay1).toBeGreaterThanOrEqual(900);
|
||||
expect(delay1).toBeLessThanOrEqual(1100);
|
||||
|
||||
expect(delay2).toBeGreaterThanOrEqual(1800);
|
||||
expect(delay2).toBeLessThanOrEqual(2200);
|
||||
|
||||
expect(delay3).toBeGreaterThanOrEqual(3600);
|
||||
expect(delay3).toBeLessThanOrEqual(4400);
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should wrap provider errors properly', async () => {
|
||||
const provider = new MockProvider({
|
||||
apiKey: 'test-key',
|
||||
shouldFail: true
|
||||
});
|
||||
|
||||
try {
|
||||
await provider.generateCompletion('Test prompt');
|
||||
expect.fail('Should have thrown an error');
|
||||
} catch (error) {
|
||||
expect(error).toBeInstanceOf(TaskMasterError);
|
||||
const tmError = error as TaskMasterError;
|
||||
expect(tmError.code).toBe(ERROR_CODES.PROVIDER_ERROR);
|
||||
expect(tmError.context.operation).toBe('generateCompletion');
|
||||
expect(tmError.context.resource).toBe('mock');
|
||||
}
|
||||
});
|
||||
|
||||
it('should identify rate limit errors correctly', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
const isRateLimitError = (provider as any).isRateLimitError.bind(
|
||||
provider
|
||||
);
|
||||
|
||||
expect(isRateLimitError(new Error('Rate limit exceeded'))).toBe(true);
|
||||
expect(isRateLimitError(new Error('Too many requests'))).toBe(true);
|
||||
expect(isRateLimitError(new Error('Status: 429'))).toBe(true);
|
||||
expect(isRateLimitError(new Error('Some other error'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should identify timeout errors correctly', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
const isTimeoutError = (provider as any).isTimeoutError.bind(provider);
|
||||
|
||||
expect(isTimeoutError(new Error('Request timeout'))).toBe(true);
|
||||
expect(isTimeoutError(new Error('Operation timed out'))).toBe(true);
|
||||
expect(isTimeoutError(new Error('ECONNRESET'))).toBe(true);
|
||||
expect(isTimeoutError(new Error('Some other error'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should identify network errors correctly', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
const isNetworkError = (provider as any).isNetworkError.bind(provider);
|
||||
|
||||
expect(isNetworkError(new Error('Network error'))).toBe(true);
|
||||
expect(isNetworkError(new Error('ENOTFOUND'))).toBe(true);
|
||||
expect(isNetworkError(new Error('ECONNREFUSED'))).toBe(true);
|
||||
expect(isNetworkError(new Error('Some other error'))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('model management', () => {
|
||||
it('should get and set model', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
|
||||
expect(provider.getModel()).toBe('mock-model-v1');
|
||||
|
||||
provider.setModel('mock-model-v2');
|
||||
expect(provider.getModel()).toBe('mock-model-v2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider information', () => {
|
||||
it('should return provider info', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
const info = provider.getProviderInfo();
|
||||
|
||||
expect(info.name).toBe('mock');
|
||||
expect(info.displayName).toBe('Mock Provider');
|
||||
expect(info.requiresApiKey).toBe(true);
|
||||
expect(info.models).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should return available models', () => {
|
||||
const provider = new MockProvider({ apiKey: 'test-key' });
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
expect(models).toHaveLength(2);
|
||||
expect(models[0].id).toBe('mock-model-v1');
|
||||
expect(models[1].id).toBe('mock-model-v2');
|
||||
});
|
||||
|
||||
it('should validate credentials', async () => {
|
||||
const validProvider = new MockProvider({ apiKey: 'valid-key' });
|
||||
const invalidProvider = new MockProvider({ apiKey: 'invalid-key' });
|
||||
|
||||
expect(await validProvider.validateCredentials()).toBe(true);
|
||||
expect(await invalidProvider.validateCredentials()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('template method pattern', () => {
|
||||
it('should follow the template method flow', async () => {
|
||||
const provider = new MockProvider({
|
||||
apiKey: 'test-key',
|
||||
responseDelay: 50
|
||||
});
|
||||
|
||||
const startTime = Date.now();
|
||||
const response = await provider.generateCompletion('Test prompt', {
|
||||
temperature: 0.5,
|
||||
maxTokens: 100
|
||||
});
|
||||
const endTime = Date.now();
|
||||
|
||||
// Verify the response was processed through the template
|
||||
expect(response.content).toBeDefined();
|
||||
expect(response.duration).toBeGreaterThanOrEqual(50);
|
||||
expect(response.duration).toBeLessThanOrEqual(endTime - startTime + 10);
|
||||
expect(response.timestamp).toBeDefined();
|
||||
expect(response.provider).toBe('mock');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,82 +0,0 @@
|
||||
/**
|
||||
* Tests for executor functionality
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import {
|
||||
ClaudeExecutor,
|
||||
ExecutorFactory,
|
||||
type ExecutorOptions
|
||||
} from '../../src/executors/index.js';
|
||||
|
||||
describe('ExecutorFactory', () => {
|
||||
const mockProjectRoot = '/test/project';
|
||||
|
||||
it('should create a Claude executor', () => {
|
||||
const options: ExecutorOptions = {
|
||||
type: 'claude',
|
||||
projectRoot: mockProjectRoot
|
||||
};
|
||||
|
||||
const executor = ExecutorFactory.create(options);
|
||||
expect(executor).toBeInstanceOf(ClaudeExecutor);
|
||||
});
|
||||
|
||||
it('should throw error for unimplemented executor types', () => {
|
||||
const options: ExecutorOptions = {
|
||||
type: 'shell',
|
||||
projectRoot: mockProjectRoot
|
||||
};
|
||||
|
||||
expect(() => ExecutorFactory.create(options)).toThrow(
|
||||
'Shell executor not yet implemented'
|
||||
);
|
||||
});
|
||||
|
||||
it('should get available executor types', () => {
|
||||
const types = ExecutorFactory.getAvailableTypes();
|
||||
expect(types).toContain('claude');
|
||||
expect(types).toContain('shell');
|
||||
expect(types).toContain('custom');
|
||||
});
|
||||
});
|
||||
|
||||
describe('ClaudeExecutor', () => {
|
||||
const mockProjectRoot = '/test/project';
|
||||
let executor: ClaudeExecutor;
|
||||
|
||||
beforeEach(() => {
|
||||
executor = new ClaudeExecutor(mockProjectRoot);
|
||||
});
|
||||
|
||||
it('should return claude as executor type', () => {
|
||||
expect(executor.getType()).toBe('claude');
|
||||
});
|
||||
|
||||
it('should format task prompt correctly', () => {
|
||||
const mockTask = {
|
||||
id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'pending' as const,
|
||||
priority: 'high' as const,
|
||||
dependencies: [],
|
||||
details: 'Implementation details',
|
||||
testStrategy: 'Unit tests',
|
||||
subtasks: []
|
||||
};
|
||||
|
||||
// Access protected method through any type assertion for testing
|
||||
const formattedPrompt = (executor as any).formatTaskPrompt(mockTask);
|
||||
|
||||
expect(formattedPrompt).toContain('Task ID: 1');
|
||||
expect(formattedPrompt).toContain('Title: Test Task');
|
||||
expect(formattedPrompt).toContain('Description:\nTest description');
|
||||
expect(formattedPrompt).toContain(
|
||||
'Implementation Details:\nImplementation details'
|
||||
);
|
||||
expect(formattedPrompt).toContain('Test Strategy:\nUnit tests');
|
||||
expect(formattedPrompt).toContain('Status: pending');
|
||||
expect(formattedPrompt).toContain('Priority: high');
|
||||
});
|
||||
});
|
||||
@@ -1,139 +0,0 @@
|
||||
/**
|
||||
* Smoke tests to verify basic package functionality and imports
|
||||
*/
|
||||
|
||||
import {
|
||||
PlaceholderParser,
|
||||
PlaceholderStorage,
|
||||
StorageError,
|
||||
TaskNotFoundError,
|
||||
TmCoreError,
|
||||
ValidationError,
|
||||
formatDate,
|
||||
generateTaskId,
|
||||
isValidTaskId,
|
||||
name,
|
||||
version
|
||||
} from '@tm/core';
|
||||
|
||||
import type {
|
||||
PlaceholderTask,
|
||||
TaskId,
|
||||
TaskPriority,
|
||||
TaskStatus
|
||||
} from '@tm/core';
|
||||
|
||||
describe('tm-core smoke tests', () => {
|
||||
describe('package metadata', () => {
|
||||
it('should export correct package name and version', () => {
|
||||
expect(name).toBe('@task-master/tm-core');
|
||||
expect(version).toBe('1.0.0');
|
||||
});
|
||||
});
|
||||
|
||||
describe('utility functions', () => {
|
||||
it('should generate valid task IDs', () => {
|
||||
const id1 = generateTaskId();
|
||||
const id2 = generateTaskId();
|
||||
|
||||
expect(typeof id1).toBe('string');
|
||||
expect(typeof id2).toBe('string');
|
||||
expect(id1).not.toBe(id2); // Should be unique
|
||||
expect(isValidTaskId(id1)).toBe(true);
|
||||
expect(isValidTaskId('')).toBe(false);
|
||||
});
|
||||
|
||||
it('should format dates', () => {
|
||||
const date = new Date('2023-01-01T00:00:00.000Z');
|
||||
const formatted = formatDate(date);
|
||||
expect(formatted).toBe('2023-01-01T00:00:00.000Z');
|
||||
});
|
||||
});
|
||||
|
||||
describe('placeholder storage', () => {
|
||||
it('should perform basic storage operations', async () => {
|
||||
const storage = new PlaceholderStorage();
|
||||
const testPath = 'test/path';
|
||||
const testData = 'test data';
|
||||
|
||||
// Initially should not exist
|
||||
expect(await storage.exists(testPath)).toBe(false);
|
||||
expect(await storage.read(testPath)).toBe(null);
|
||||
|
||||
// Write and verify
|
||||
await storage.write(testPath, testData);
|
||||
expect(await storage.exists(testPath)).toBe(true);
|
||||
expect(await storage.read(testPath)).toBe(testData);
|
||||
|
||||
// Delete and verify
|
||||
await storage.delete(testPath);
|
||||
expect(await storage.exists(testPath)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('placeholder parser', () => {
|
||||
it('should parse simple task lists', async () => {
|
||||
const parser = new PlaceholderParser();
|
||||
const content = `
|
||||
- Task 1
|
||||
- Task 2
|
||||
- Task 3
|
||||
`;
|
||||
|
||||
const isValid = await parser.validate(content);
|
||||
expect(isValid).toBe(true);
|
||||
|
||||
const tasks = await parser.parse(content);
|
||||
expect(tasks).toHaveLength(3);
|
||||
expect(tasks[0]?.title).toBe('Task 1');
|
||||
expect(tasks[1]?.title).toBe('Task 2');
|
||||
expect(tasks[2]?.title).toBe('Task 3');
|
||||
|
||||
tasks.forEach((task) => {
|
||||
expect(task.status).toBe('pending');
|
||||
expect(task.priority).toBe('medium');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('error classes', () => {
|
||||
it('should create and throw custom errors', () => {
|
||||
const baseError = new TmCoreError('Base error');
|
||||
expect(baseError.name).toBe('TmCoreError');
|
||||
expect(baseError.message).toBe('Base error');
|
||||
|
||||
const taskNotFound = new TaskNotFoundError('task-123');
|
||||
expect(taskNotFound.name).toBe('TaskNotFoundError');
|
||||
expect(taskNotFound.code).toBe('TASK_NOT_FOUND');
|
||||
expect(taskNotFound.message).toContain('task-123');
|
||||
|
||||
const validationError = new ValidationError('Invalid data');
|
||||
expect(validationError.name).toBe('ValidationError');
|
||||
expect(validationError.code).toBe('VALIDATION_ERROR');
|
||||
|
||||
const storageError = new StorageError('Storage failed');
|
||||
expect(storageError.name).toBe('StorageError');
|
||||
expect(storageError.code).toBe('STORAGE_ERROR');
|
||||
});
|
||||
});
|
||||
|
||||
describe('type definitions', () => {
|
||||
it('should have correct types available', () => {
|
||||
// These are compile-time checks that verify types exist
|
||||
const taskId: TaskId = 'test-id';
|
||||
const status: TaskStatus = 'pending';
|
||||
const priority: TaskPriority = 'high';
|
||||
|
||||
const task: PlaceholderTask = {
|
||||
id: taskId,
|
||||
title: 'Test Task',
|
||||
status: status,
|
||||
priority: priority
|
||||
};
|
||||
|
||||
expect(task.id).toBe('test-id');
|
||||
expect(task.status).toBe('pending');
|
||||
expect(task.priority).toBe('high');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -8,7 +8,7 @@
|
||||
"sourceMap": true,
|
||||
"outDir": "./dist",
|
||||
"baseUrl": ".",
|
||||
"rootDir": "./src",
|
||||
"rootDir": ".",
|
||||
"strict": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": true,
|
||||
@@ -26,11 +26,11 @@
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "NodeNext",
|
||||
"moduleDetection": "force",
|
||||
"types": ["node"],
|
||||
"types": ["node", "vitest/globals"],
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"allowImportingTsExtensions": false
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||
"include": ["src/**/*", "tests/**/*"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user