feat: Phase 1 - Complete TDD Workflow Automation System (#1289)
Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -1,3 +1,3 @@
|
|||||||
reviews:
|
reviews:
|
||||||
profile: assertive
|
profile: chill
|
||||||
poem: false
|
poem: false
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -97,3 +97,6 @@ apps/extension/vsix-build/
|
|||||||
|
|
||||||
# turbo
|
# turbo
|
||||||
.turbo
|
.turbo
|
||||||
|
|
||||||
|
# TaskMaster Workflow State (now stored in ~/.taskmaster/sessions/)
|
||||||
|
# No longer needed in .gitignore as state is stored globally
|
||||||
File diff suppressed because one or more lines are too long
16
CLAUDE.md
16
CLAUDE.md
@@ -6,13 +6,20 @@
|
|||||||
|
|
||||||
## Test Guidelines
|
## Test Guidelines
|
||||||
|
|
||||||
|
### Test File Placement
|
||||||
|
|
||||||
|
- **Package & tests**: Place in `packages/<package-name>/src/<module>/<file>.spec.ts` or `apps/<app-name>/src/<module>/<file.spec.ts>` alongside source
|
||||||
|
- **Package integration tests**: Place in `packages/<package-name>/tests/integration/<module>/<file>.test.ts` or `apps/<app-name>/tests/integration/<module>/<file>.test.ts` alongside source
|
||||||
|
- **Isolated unit tests**: Use `tests/unit/packages/<package-name>/` only when parallel placement isn't possible
|
||||||
|
- **Test extension**: Always use `.ts` for TypeScript tests, never `.js`
|
||||||
|
|
||||||
### Synchronous Tests
|
### Synchronous Tests
|
||||||
- **NEVER use async/await in test functions** unless testing actual asynchronous operations
|
- **NEVER use async/await in test functions** unless testing actual asynchronous operations
|
||||||
- Use synchronous top-level imports instead of dynamic `await import()`
|
- Use synchronous top-level imports instead of dynamic `await import()`
|
||||||
- Test bodies should be synchronous whenever possible
|
- Test bodies should be synchronous whenever possible
|
||||||
- Example:
|
- Example:
|
||||||
```javascript
|
```typescript
|
||||||
// ✅ CORRECT - Synchronous imports
|
// ✅ CORRECT - Synchronous imports with .ts extension
|
||||||
import { MyClass } from '../src/my-class.js';
|
import { MyClass } from '../src/my-class.js';
|
||||||
|
|
||||||
it('should verify behavior', () => {
|
it('should verify behavior', () => {
|
||||||
@@ -26,6 +33,11 @@
|
|||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Documentation Guidelines
|
||||||
|
|
||||||
|
- **Documentation location**: Write docs in `apps/docs/` (Mintlify site source), not `docs/`
|
||||||
|
- **Documentation URL**: Reference docs at https://docs.task-master.dev, not local file paths
|
||||||
|
|
||||||
## Changeset Guidelines
|
## Changeset Guidelines
|
||||||
|
|
||||||
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
||||||
@@ -14,7 +14,7 @@ import { ContextCommand } from './commands/context.command.js';
|
|||||||
import { StartCommand } from './commands/start.command.js';
|
import { StartCommand } from './commands/start.command.js';
|
||||||
import { SetStatusCommand } from './commands/set-status.command.js';
|
import { SetStatusCommand } from './commands/set-status.command.js';
|
||||||
import { ExportCommand } from './commands/export.command.js';
|
import { ExportCommand } from './commands/export.command.js';
|
||||||
import { AutopilotCommand } from './commands/autopilot.command.js';
|
import { AutopilotCommand } from './commands/autopilot/index.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Command metadata for registration
|
* Command metadata for registration
|
||||||
@@ -73,7 +73,8 @@ export class CommandRegistry {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'autopilot',
|
name: 'autopilot',
|
||||||
description: 'Execute a task autonomously using TDD workflow',
|
description:
|
||||||
|
'AI agent orchestration for TDD workflow (start, resume, next, complete, commit, status, abort)',
|
||||||
commandClass: AutopilotCommand as any,
|
commandClass: AutopilotCommand as any,
|
||||||
category: 'development'
|
category: 'development'
|
||||||
},
|
},
|
||||||
|
|||||||
119
apps/cli/src/commands/autopilot/abort.command.ts
Normal file
119
apps/cli/src/commands/autopilot/abort.command.ts
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Abort Command - Safely terminate workflow
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { WorkflowOrchestrator } from '@tm/core';
|
||||||
|
import {
|
||||||
|
AutopilotBaseOptions,
|
||||||
|
hasWorkflowState,
|
||||||
|
loadWorkflowState,
|
||||||
|
deleteWorkflowState,
|
||||||
|
OutputFormatter
|
||||||
|
} from './shared.js';
|
||||||
|
import inquirer from 'inquirer';
|
||||||
|
|
||||||
|
interface AbortOptions extends AutopilotBaseOptions {
|
||||||
|
force?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abort Command - Safely terminate workflow and clean up state
|
||||||
|
*/
|
||||||
|
export class AbortCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('abort');
|
||||||
|
|
||||||
|
this.description('Abort the current TDD workflow and clean up state')
|
||||||
|
.option('-f, --force', 'Force abort without confirmation')
|
||||||
|
.action(async (options: AbortOptions) => {
|
||||||
|
await this.execute(options);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private async execute(options: AbortOptions): Promise<void> {
|
||||||
|
// Inherit parent options
|
||||||
|
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
|
||||||
|
const mergedOptions: AbortOptions = {
|
||||||
|
...parentOpts,
|
||||||
|
...options,
|
||||||
|
projectRoot:
|
||||||
|
options.projectRoot || parentOpts?.projectRoot || process.cwd()
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatter = new OutputFormatter(mergedOptions.json || false);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check for workflow state
|
||||||
|
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!hasState) {
|
||||||
|
formatter.warning('No active workflow to abort');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load state
|
||||||
|
const state = await loadWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!state) {
|
||||||
|
formatter.error('Failed to load workflow state');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore orchestrator
|
||||||
|
const orchestrator = new WorkflowOrchestrator(state.context);
|
||||||
|
orchestrator.restoreState(state);
|
||||||
|
|
||||||
|
// Get progress before abort
|
||||||
|
const progress = orchestrator.getProgress();
|
||||||
|
const currentSubtask = orchestrator.getCurrentSubtask();
|
||||||
|
|
||||||
|
// Confirm abort if not forced or in JSON mode
|
||||||
|
if (!mergedOptions.force && !mergedOptions.json) {
|
||||||
|
const { confirmed } = await inquirer.prompt([
|
||||||
|
{
|
||||||
|
type: 'confirm',
|
||||||
|
name: 'confirmed',
|
||||||
|
message:
|
||||||
|
`This will abort the workflow for task ${state.context.taskId}. ` +
|
||||||
|
`Progress: ${progress.completed}/${progress.total} subtasks completed. ` +
|
||||||
|
`Continue?`,
|
||||||
|
default: false
|
||||||
|
}
|
||||||
|
]);
|
||||||
|
|
||||||
|
if (!confirmed) {
|
||||||
|
formatter.info('Abort cancelled');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trigger abort in orchestrator
|
||||||
|
orchestrator.transition({ type: 'ABORT' });
|
||||||
|
|
||||||
|
// Delete workflow state
|
||||||
|
await deleteWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
|
||||||
|
// Output result
|
||||||
|
formatter.success('Workflow aborted', {
|
||||||
|
taskId: state.context.taskId,
|
||||||
|
branchName: state.context.branchName,
|
||||||
|
progress: {
|
||||||
|
completed: progress.completed,
|
||||||
|
total: progress.total
|
||||||
|
},
|
||||||
|
lastSubtask: currentSubtask
|
||||||
|
? {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title
|
||||||
|
}
|
||||||
|
: null,
|
||||||
|
note: 'Branch and commits remain. Clean up manually if needed.'
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error((error as Error).message);
|
||||||
|
if (mergedOptions.verbose) {
|
||||||
|
console.error((error as Error).stack);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
169
apps/cli/src/commands/autopilot/commit.command.ts
Normal file
169
apps/cli/src/commands/autopilot/commit.command.ts
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Commit Command - Create commit with enhanced message generation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { WorkflowOrchestrator } from '@tm/core';
|
||||||
|
import {
|
||||||
|
AutopilotBaseOptions,
|
||||||
|
hasWorkflowState,
|
||||||
|
loadWorkflowState,
|
||||||
|
createGitAdapter,
|
||||||
|
createCommitMessageGenerator,
|
||||||
|
OutputFormatter,
|
||||||
|
saveWorkflowState
|
||||||
|
} from './shared.js';
|
||||||
|
|
||||||
|
type CommitOptions = AutopilotBaseOptions;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit Command - Create commit using enhanced message generator
|
||||||
|
*/
|
||||||
|
export class CommitCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('commit');
|
||||||
|
|
||||||
|
this.description('Create a commit for the completed GREEN phase').action(
|
||||||
|
async (options: CommitOptions) => {
|
||||||
|
await this.execute(options);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async execute(options: CommitOptions): Promise<void> {
|
||||||
|
// Inherit parent options
|
||||||
|
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
|
||||||
|
const mergedOptions: CommitOptions = {
|
||||||
|
...parentOpts,
|
||||||
|
...options,
|
||||||
|
projectRoot:
|
||||||
|
options.projectRoot || parentOpts?.projectRoot || process.cwd()
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatter = new OutputFormatter(mergedOptions.json || false);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check for workflow state
|
||||||
|
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!hasState) {
|
||||||
|
formatter.error('No active workflow', {
|
||||||
|
suggestion: 'Start a workflow with: autopilot start <taskId>'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load state
|
||||||
|
const state = await loadWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!state) {
|
||||||
|
formatter.error('Failed to load workflow state');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const orchestrator = new WorkflowOrchestrator(state.context);
|
||||||
|
orchestrator.restoreState(state);
|
||||||
|
orchestrator.enableAutoPersist(async (newState) => {
|
||||||
|
await saveWorkflowState(mergedOptions.projectRoot!, newState);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Verify in COMMIT phase
|
||||||
|
const tddPhase = orchestrator.getCurrentTDDPhase();
|
||||||
|
if (tddPhase !== 'COMMIT') {
|
||||||
|
formatter.error('Not in COMMIT phase', {
|
||||||
|
currentPhase: tddPhase || orchestrator.getCurrentPhase(),
|
||||||
|
suggestion: 'Complete RED and GREEN phases first'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current subtask
|
||||||
|
const currentSubtask = orchestrator.getCurrentSubtask();
|
||||||
|
if (!currentSubtask) {
|
||||||
|
formatter.error('No current subtask');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize git adapter
|
||||||
|
const gitAdapter = createGitAdapter(mergedOptions.projectRoot!);
|
||||||
|
await gitAdapter.ensureGitRepository();
|
||||||
|
|
||||||
|
// Check for staged changes
|
||||||
|
const hasStagedChanges = await gitAdapter.hasStagedChanges();
|
||||||
|
if (!hasStagedChanges) {
|
||||||
|
// Stage all changes
|
||||||
|
formatter.info('No staged changes, staging all changes...');
|
||||||
|
await gitAdapter.stageFiles(['.']);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get changed files for scope detection
|
||||||
|
const status = await gitAdapter.getStatus();
|
||||||
|
const changedFiles = [...status.staged, ...status.modified];
|
||||||
|
|
||||||
|
// Generate commit message
|
||||||
|
const messageGenerator = createCommitMessageGenerator();
|
||||||
|
const testResults = state.context.lastTestResults;
|
||||||
|
|
||||||
|
const commitMessage = messageGenerator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: currentSubtask.title,
|
||||||
|
changedFiles,
|
||||||
|
taskId: state.context.taskId,
|
||||||
|
phase: 'TDD',
|
||||||
|
tag: (state.context.metadata.tag as string) || undefined,
|
||||||
|
testsPassing: testResults?.passed,
|
||||||
|
testsFailing: testResults?.failed,
|
||||||
|
coveragePercent: undefined // Could be added if available
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create commit with metadata
|
||||||
|
await gitAdapter.createCommit(commitMessage, {
|
||||||
|
metadata: {
|
||||||
|
taskId: state.context.taskId,
|
||||||
|
subtaskId: currentSubtask.id,
|
||||||
|
phase: 'COMMIT',
|
||||||
|
tddCycle: 'complete'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get commit info
|
||||||
|
const lastCommit = await gitAdapter.getLastCommit();
|
||||||
|
|
||||||
|
// Complete COMMIT phase (this marks subtask as completed)
|
||||||
|
orchestrator.transition({ type: 'COMMIT_COMPLETE' });
|
||||||
|
|
||||||
|
// Check if should advance to next subtask
|
||||||
|
const progress = orchestrator.getProgress();
|
||||||
|
if (progress.current < progress.total) {
|
||||||
|
orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
|
||||||
|
} else {
|
||||||
|
// All subtasks complete
|
||||||
|
orchestrator.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output success
|
||||||
|
formatter.success('Commit created', {
|
||||||
|
commitHash: lastCommit.hash.substring(0, 7),
|
||||||
|
message: commitMessage.split('\n')[0], // First line only
|
||||||
|
subtask: {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title,
|
||||||
|
status: currentSubtask.status
|
||||||
|
},
|
||||||
|
progress: {
|
||||||
|
completed: progress.completed,
|
||||||
|
total: progress.total,
|
||||||
|
percentage: progress.percentage
|
||||||
|
},
|
||||||
|
nextAction:
|
||||||
|
progress.completed < progress.total
|
||||||
|
? 'Start next subtask with RED phase'
|
||||||
|
: 'All subtasks complete. Run: autopilot status'
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error((error as Error).message);
|
||||||
|
if (mergedOptions.verbose) {
|
||||||
|
console.error((error as Error).stack);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
172
apps/cli/src/commands/autopilot/complete.command.ts
Normal file
172
apps/cli/src/commands/autopilot/complete.command.ts
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Complete Command - Complete current TDD phase with validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { WorkflowOrchestrator, TestResult } from '@tm/core';
|
||||||
|
import {
|
||||||
|
AutopilotBaseOptions,
|
||||||
|
hasWorkflowState,
|
||||||
|
loadWorkflowState,
|
||||||
|
OutputFormatter
|
||||||
|
} from './shared.js';
|
||||||
|
|
||||||
|
interface CompleteOptions extends AutopilotBaseOptions {
|
||||||
|
results?: string;
|
||||||
|
coverage?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete Command - Mark current phase as complete with validation
|
||||||
|
*/
|
||||||
|
export class CompleteCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('complete');
|
||||||
|
|
||||||
|
this.description('Complete the current TDD phase with result validation')
|
||||||
|
.option(
|
||||||
|
'-r, --results <json>',
|
||||||
|
'Test results JSON (with total, passed, failed, skipped)'
|
||||||
|
)
|
||||||
|
.option('-c, --coverage <percent>', 'Coverage percentage')
|
||||||
|
.action(async (options: CompleteOptions) => {
|
||||||
|
await this.execute(options);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private async execute(options: CompleteOptions): Promise<void> {
|
||||||
|
// Inherit parent options
|
||||||
|
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
|
||||||
|
const mergedOptions: CompleteOptions = {
|
||||||
|
...parentOpts,
|
||||||
|
...options,
|
||||||
|
projectRoot:
|
||||||
|
options.projectRoot || parentOpts?.projectRoot || process.cwd()
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatter = new OutputFormatter(mergedOptions.json || false);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check for workflow state
|
||||||
|
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!hasState) {
|
||||||
|
formatter.error('No active workflow', {
|
||||||
|
suggestion: 'Start a workflow with: autopilot start <taskId>'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load state
|
||||||
|
const state = await loadWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!state) {
|
||||||
|
formatter.error('Failed to load workflow state');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore orchestrator with persistence
|
||||||
|
const { saveWorkflowState } = await import('./shared.js');
|
||||||
|
const orchestrator = new WorkflowOrchestrator(state.context);
|
||||||
|
orchestrator.restoreState(state);
|
||||||
|
orchestrator.enableAutoPersist(async (newState) => {
|
||||||
|
await saveWorkflowState(mergedOptions.projectRoot!, newState);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get current phase
|
||||||
|
const tddPhase = orchestrator.getCurrentTDDPhase();
|
||||||
|
const currentSubtask = orchestrator.getCurrentSubtask();
|
||||||
|
|
||||||
|
if (!tddPhase) {
|
||||||
|
formatter.error('Not in a TDD phase', {
|
||||||
|
phase: orchestrator.getCurrentPhase()
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate based on phase
|
||||||
|
if (tddPhase === 'RED' || tddPhase === 'GREEN') {
|
||||||
|
if (!mergedOptions.results) {
|
||||||
|
formatter.error('Test results required for RED/GREEN phase', {
|
||||||
|
usage:
|
||||||
|
'--results \'{"total":10,"passed":9,"failed":1,"skipped":0}\''
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse test results
|
||||||
|
let testResults: TestResult;
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(mergedOptions.results);
|
||||||
|
testResults = {
|
||||||
|
total: parsed.total || 0,
|
||||||
|
passed: parsed.passed || 0,
|
||||||
|
failed: parsed.failed || 0,
|
||||||
|
skipped: parsed.skipped || 0,
|
||||||
|
phase: tddPhase
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error('Invalid test results JSON', {
|
||||||
|
error: (error as Error).message
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate RED phase requirements
|
||||||
|
if (tddPhase === 'RED' && testResults.failed === 0) {
|
||||||
|
formatter.error('RED phase validation failed', {
|
||||||
|
reason: 'At least one test must be failing',
|
||||||
|
actual: {
|
||||||
|
passed: testResults.passed,
|
||||||
|
failed: testResults.failed
|
||||||
|
}
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate GREEN phase requirements
|
||||||
|
if (tddPhase === 'GREEN' && testResults.failed !== 0) {
|
||||||
|
formatter.error('GREEN phase validation failed', {
|
||||||
|
reason: 'All tests must pass',
|
||||||
|
actual: {
|
||||||
|
passed: testResults.passed,
|
||||||
|
failed: testResults.failed
|
||||||
|
}
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complete phase with test results
|
||||||
|
if (tddPhase === 'RED') {
|
||||||
|
orchestrator.transition({
|
||||||
|
type: 'RED_PHASE_COMPLETE',
|
||||||
|
testResults
|
||||||
|
});
|
||||||
|
formatter.success('RED phase completed', {
|
||||||
|
nextPhase: 'GREEN',
|
||||||
|
testResults,
|
||||||
|
subtask: currentSubtask?.title
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
orchestrator.transition({
|
||||||
|
type: 'GREEN_PHASE_COMPLETE',
|
||||||
|
testResults
|
||||||
|
});
|
||||||
|
formatter.success('GREEN phase completed', {
|
||||||
|
nextPhase: 'COMMIT',
|
||||||
|
testResults,
|
||||||
|
subtask: currentSubtask?.title,
|
||||||
|
suggestion: 'Run: autopilot commit'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else if (tddPhase === 'COMMIT') {
|
||||||
|
formatter.error('Use "autopilot commit" to complete COMMIT phase');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error((error as Error).message);
|
||||||
|
if (mergedOptions.verbose) {
|
||||||
|
console.error((error as Error).stack);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
82
apps/cli/src/commands/autopilot/index.ts
Normal file
82
apps/cli/src/commands/autopilot/index.ts
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Autopilot CLI Commands for AI Agent Orchestration
|
||||||
|
* Provides subcommands for starting, resuming, and advancing the TDD workflow
|
||||||
|
* with JSON output for machine parsing.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { StartCommand } from './start.command.js';
|
||||||
|
import { ResumeCommand } from './resume.command.js';
|
||||||
|
import { NextCommand } from './next.command.js';
|
||||||
|
import { CompleteCommand } from './complete.command.js';
|
||||||
|
import { CommitCommand } from './commit.command.js';
|
||||||
|
import { StatusCommand } from './status.command.js';
|
||||||
|
import { AbortCommand } from './abort.command.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shared command options for all autopilot commands
|
||||||
|
*/
|
||||||
|
export interface AutopilotBaseOptions {
|
||||||
|
json?: boolean;
|
||||||
|
verbose?: boolean;
|
||||||
|
projectRoot?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* AutopilotCommand with subcommands for TDD workflow orchestration
|
||||||
|
*/
|
||||||
|
export class AutopilotCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('autopilot');
|
||||||
|
|
||||||
|
// Configure main command
|
||||||
|
this.description('AI agent orchestration for TDD workflow execution')
|
||||||
|
.alias('ap')
|
||||||
|
// Global options for all subcommands
|
||||||
|
.option('--json', 'Output in JSON format for machine parsing')
|
||||||
|
.option('-v, --verbose', 'Enable verbose output')
|
||||||
|
.option(
|
||||||
|
'-p, --project-root <path>',
|
||||||
|
'Project root directory',
|
||||||
|
process.cwd()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Register subcommands
|
||||||
|
this.registerSubcommands();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register all autopilot subcommands
|
||||||
|
*/
|
||||||
|
private registerSubcommands(): void {
|
||||||
|
// Start new TDD workflow
|
||||||
|
this.addCommand(new StartCommand());
|
||||||
|
|
||||||
|
// Resume existing workflow
|
||||||
|
this.addCommand(new ResumeCommand());
|
||||||
|
|
||||||
|
// Get next action
|
||||||
|
this.addCommand(new NextCommand());
|
||||||
|
|
||||||
|
// Complete current phase
|
||||||
|
this.addCommand(new CompleteCommand());
|
||||||
|
|
||||||
|
// Create commit
|
||||||
|
this.addCommand(new CommitCommand());
|
||||||
|
|
||||||
|
// Show status
|
||||||
|
this.addCommand(new StatusCommand());
|
||||||
|
|
||||||
|
// Abort workflow
|
||||||
|
this.addCommand(new AbortCommand());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register this command on an existing program
|
||||||
|
*/
|
||||||
|
static register(program: Command): AutopilotCommand {
|
||||||
|
const autopilotCommand = new AutopilotCommand();
|
||||||
|
program.addCommand(autopilotCommand);
|
||||||
|
return autopilotCommand;
|
||||||
|
}
|
||||||
|
}
|
||||||
164
apps/cli/src/commands/autopilot/next.command.ts
Normal file
164
apps/cli/src/commands/autopilot/next.command.ts
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Next Command - Get next action in TDD workflow
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { WorkflowOrchestrator } from '@tm/core';
|
||||||
|
import {
|
||||||
|
AutopilotBaseOptions,
|
||||||
|
hasWorkflowState,
|
||||||
|
loadWorkflowState,
|
||||||
|
OutputFormatter
|
||||||
|
} from './shared.js';
|
||||||
|
|
||||||
|
type NextOptions = AutopilotBaseOptions;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Next Command - Get next action details
|
||||||
|
*/
|
||||||
|
export class NextCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('next');
|
||||||
|
|
||||||
|
this.description(
|
||||||
|
'Get the next action to perform in the TDD workflow'
|
||||||
|
).action(async (options: NextOptions) => {
|
||||||
|
await this.execute(options);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private async execute(options: NextOptions): Promise<void> {
|
||||||
|
// Inherit parent options
|
||||||
|
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
|
||||||
|
const mergedOptions: NextOptions = {
|
||||||
|
...parentOpts,
|
||||||
|
...options,
|
||||||
|
projectRoot:
|
||||||
|
options.projectRoot || parentOpts?.projectRoot || process.cwd()
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatter = new OutputFormatter(mergedOptions.json || false);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check for workflow state
|
||||||
|
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!hasState) {
|
||||||
|
formatter.error('No active workflow', {
|
||||||
|
suggestion: 'Start a workflow with: autopilot start <taskId>'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load state
|
||||||
|
const state = await loadWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!state) {
|
||||||
|
formatter.error('Failed to load workflow state');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore orchestrator
|
||||||
|
const orchestrator = new WorkflowOrchestrator(state.context);
|
||||||
|
orchestrator.restoreState(state);
|
||||||
|
|
||||||
|
// Get current phase and subtask
|
||||||
|
const phase = orchestrator.getCurrentPhase();
|
||||||
|
const tddPhase = orchestrator.getCurrentTDDPhase();
|
||||||
|
const currentSubtask = orchestrator.getCurrentSubtask();
|
||||||
|
|
||||||
|
// Determine next action based on phase
|
||||||
|
let actionType: string;
|
||||||
|
let actionDescription: string;
|
||||||
|
let actionDetails: Record<string, unknown> = {};
|
||||||
|
|
||||||
|
if (phase === 'COMPLETE') {
|
||||||
|
formatter.success('Workflow complete', {
|
||||||
|
message: 'All subtasks have been completed',
|
||||||
|
taskId: state.context.taskId
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (phase === 'SUBTASK_LOOP' && tddPhase) {
|
||||||
|
switch (tddPhase) {
|
||||||
|
case 'RED':
|
||||||
|
actionType = 'generate_test';
|
||||||
|
actionDescription = 'Write failing test for current subtask';
|
||||||
|
actionDetails = {
|
||||||
|
subtask: currentSubtask
|
||||||
|
? {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title,
|
||||||
|
attempts: currentSubtask.attempts
|
||||||
|
}
|
||||||
|
: null,
|
||||||
|
testCommand: 'npm test', // Could be customized based on config
|
||||||
|
expectedOutcome: 'Test should fail'
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'GREEN':
|
||||||
|
actionType = 'implement_code';
|
||||||
|
actionDescription = 'Implement code to pass the failing test';
|
||||||
|
actionDetails = {
|
||||||
|
subtask: currentSubtask
|
||||||
|
? {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title,
|
||||||
|
attempts: currentSubtask.attempts
|
||||||
|
}
|
||||||
|
: null,
|
||||||
|
testCommand: 'npm test',
|
||||||
|
expectedOutcome: 'All tests should pass',
|
||||||
|
lastTestResults: state.context.lastTestResults
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'COMMIT':
|
||||||
|
actionType = 'commit_changes';
|
||||||
|
actionDescription = 'Commit the changes';
|
||||||
|
actionDetails = {
|
||||||
|
subtask: currentSubtask
|
||||||
|
? {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title,
|
||||||
|
attempts: currentSubtask.attempts
|
||||||
|
}
|
||||||
|
: null,
|
||||||
|
suggestion: 'Use: autopilot commit'
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
actionType = 'unknown';
|
||||||
|
actionDescription = 'Unknown TDD phase';
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
actionType = 'workflow_phase';
|
||||||
|
actionDescription = `Currently in ${phase} phase`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output next action
|
||||||
|
const output = {
|
||||||
|
action: actionType,
|
||||||
|
description: actionDescription,
|
||||||
|
phase,
|
||||||
|
tddPhase,
|
||||||
|
taskId: state.context.taskId,
|
||||||
|
branchName: state.context.branchName,
|
||||||
|
...actionDetails
|
||||||
|
};
|
||||||
|
|
||||||
|
if (mergedOptions.json) {
|
||||||
|
formatter.output(output);
|
||||||
|
} else {
|
||||||
|
formatter.success('Next action', output);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error((error as Error).message);
|
||||||
|
if (mergedOptions.verbose) {
|
||||||
|
console.error((error as Error).stack);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
111
apps/cli/src/commands/autopilot/resume.command.ts
Normal file
111
apps/cli/src/commands/autopilot/resume.command.ts
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Resume Command - Restore and resume TDD workflow
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { WorkflowOrchestrator } from '@tm/core';
|
||||||
|
import {
|
||||||
|
AutopilotBaseOptions,
|
||||||
|
hasWorkflowState,
|
||||||
|
loadWorkflowState,
|
||||||
|
OutputFormatter
|
||||||
|
} from './shared.js';
|
||||||
|
|
||||||
|
type ResumeOptions = AutopilotBaseOptions;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resume Command - Restore workflow from saved state
|
||||||
|
*/
|
||||||
|
export class ResumeCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('resume');
|
||||||
|
|
||||||
|
this.description('Resume a previously started TDD workflow').action(
|
||||||
|
async (options: ResumeOptions) => {
|
||||||
|
await this.execute(options);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async execute(options: ResumeOptions): Promise<void> {
|
||||||
|
// Inherit parent options (autopilot command)
|
||||||
|
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
|
||||||
|
const mergedOptions: ResumeOptions = {
|
||||||
|
...parentOpts,
|
||||||
|
...options,
|
||||||
|
projectRoot:
|
||||||
|
options.projectRoot || parentOpts?.projectRoot || process.cwd()
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatter = new OutputFormatter(mergedOptions.json || false);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check for workflow state
|
||||||
|
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!hasState) {
|
||||||
|
formatter.error('No workflow state found', {
|
||||||
|
suggestion: 'Start a new workflow with: autopilot start <taskId>'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load state
|
||||||
|
formatter.info('Loading workflow state...');
|
||||||
|
const state = await loadWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
|
||||||
|
if (!state) {
|
||||||
|
formatter.error('Failed to load workflow state');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate state can be resumed
|
||||||
|
const orchestrator = new WorkflowOrchestrator(state.context);
|
||||||
|
if (!orchestrator.canResumeFromState(state)) {
|
||||||
|
formatter.error('Invalid workflow state', {
|
||||||
|
suggestion:
|
||||||
|
'State file may be corrupted. Consider starting a new workflow.'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore state
|
||||||
|
orchestrator.restoreState(state);
|
||||||
|
|
||||||
|
// Re-enable auto-persistence
|
||||||
|
const { saveWorkflowState } = await import('./shared.js');
|
||||||
|
orchestrator.enableAutoPersist(async (newState) => {
|
||||||
|
await saveWorkflowState(mergedOptions.projectRoot!, newState);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get progress
|
||||||
|
const progress = orchestrator.getProgress();
|
||||||
|
const currentSubtask = orchestrator.getCurrentSubtask();
|
||||||
|
|
||||||
|
// Output success
|
||||||
|
formatter.success('Workflow resumed', {
|
||||||
|
taskId: state.context.taskId,
|
||||||
|
phase: orchestrator.getCurrentPhase(),
|
||||||
|
tddPhase: orchestrator.getCurrentTDDPhase(),
|
||||||
|
branchName: state.context.branchName,
|
||||||
|
progress: {
|
||||||
|
completed: progress.completed,
|
||||||
|
total: progress.total,
|
||||||
|
percentage: progress.percentage
|
||||||
|
},
|
||||||
|
currentSubtask: currentSubtask
|
||||||
|
? {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title,
|
||||||
|
attempts: currentSubtask.attempts
|
||||||
|
}
|
||||||
|
: null
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error((error as Error).message);
|
||||||
|
if (mergedOptions.verbose) {
|
||||||
|
console.error((error as Error).stack);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
262
apps/cli/src/commands/autopilot/shared.ts
Normal file
262
apps/cli/src/commands/autopilot/shared.ts
Normal file
@@ -0,0 +1,262 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Shared utilities for autopilot commands
|
||||||
|
*/
|
||||||
|
|
||||||
|
import {
|
||||||
|
WorkflowOrchestrator,
|
||||||
|
WorkflowStateManager,
|
||||||
|
GitAdapter,
|
||||||
|
CommitMessageGenerator
|
||||||
|
} from '@tm/core';
|
||||||
|
import type { WorkflowState, WorkflowContext, SubtaskInfo } from '@tm/core';
|
||||||
|
import chalk from 'chalk';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base options interface for all autopilot commands
|
||||||
|
*/
|
||||||
|
export interface AutopilotBaseOptions {
|
||||||
|
projectRoot?: string;
|
||||||
|
json?: boolean;
|
||||||
|
verbose?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load workflow state from disk using WorkflowStateManager
|
||||||
|
*/
|
||||||
|
export async function loadWorkflowState(
|
||||||
|
projectRoot: string
|
||||||
|
): Promise<WorkflowState | null> {
|
||||||
|
const stateManager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
if (!(await stateManager.exists())) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
return await stateManager.load();
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to load workflow state: ${(error as Error).message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save workflow state to disk using WorkflowStateManager
|
||||||
|
*/
|
||||||
|
export async function saveWorkflowState(
|
||||||
|
projectRoot: string,
|
||||||
|
state: WorkflowState
|
||||||
|
): Promise<void> {
|
||||||
|
const stateManager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await stateManager.save(state);
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to save workflow state: ${(error as Error).message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete workflow state from disk using WorkflowStateManager
|
||||||
|
*/
|
||||||
|
export async function deleteWorkflowState(projectRoot: string): Promise<void> {
|
||||||
|
const stateManager = new WorkflowStateManager(projectRoot);
|
||||||
|
await stateManager.delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if workflow state exists using WorkflowStateManager
|
||||||
|
*/
|
||||||
|
export async function hasWorkflowState(projectRoot: string): Promise<boolean> {
|
||||||
|
const stateManager = new WorkflowStateManager(projectRoot);
|
||||||
|
return await stateManager.exists();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize WorkflowOrchestrator with persistence
|
||||||
|
*/
|
||||||
|
export function createOrchestrator(
|
||||||
|
context: WorkflowContext,
|
||||||
|
projectRoot: string
|
||||||
|
): WorkflowOrchestrator {
|
||||||
|
const orchestrator = new WorkflowOrchestrator(context);
|
||||||
|
const stateManager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
// Enable auto-persistence
|
||||||
|
orchestrator.enableAutoPersist(async (state: WorkflowState) => {
|
||||||
|
await stateManager.save(state);
|
||||||
|
});
|
||||||
|
|
||||||
|
return orchestrator;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize GitAdapter for project
|
||||||
|
*/
|
||||||
|
export function createGitAdapter(projectRoot: string): GitAdapter {
|
||||||
|
return new GitAdapter(projectRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize CommitMessageGenerator
|
||||||
|
*/
|
||||||
|
export function createCommitMessageGenerator(): CommitMessageGenerator {
|
||||||
|
return new CommitMessageGenerator();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output formatter for JSON and text modes
|
||||||
|
*/
|
||||||
|
export class OutputFormatter {
|
||||||
|
constructor(private useJson: boolean) {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output data in appropriate format
|
||||||
|
*/
|
||||||
|
output(data: Record<string, unknown>): void {
|
||||||
|
if (this.useJson) {
|
||||||
|
console.log(JSON.stringify(data, null, 2));
|
||||||
|
} else {
|
||||||
|
this.outputText(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output data in human-readable text format
|
||||||
|
*/
|
||||||
|
private outputText(data: Record<string, unknown>): void {
|
||||||
|
for (const [key, value] of Object.entries(data)) {
|
||||||
|
if (typeof value === 'object' && value !== null) {
|
||||||
|
console.log(chalk.cyan(`${key}:`));
|
||||||
|
this.outputObject(value as Record<string, unknown>, ' ');
|
||||||
|
} else {
|
||||||
|
console.log(chalk.white(`${key}: ${value}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output nested object with indentation
|
||||||
|
*/
|
||||||
|
private outputObject(obj: Record<string, unknown>, indent: string): void {
|
||||||
|
for (const [key, value] of Object.entries(obj)) {
|
||||||
|
if (typeof value === 'object' && value !== null) {
|
||||||
|
console.log(chalk.cyan(`${indent}${key}:`));
|
||||||
|
this.outputObject(value as Record<string, unknown>, indent + ' ');
|
||||||
|
} else {
|
||||||
|
console.log(chalk.gray(`${indent}${key}: ${value}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output error message
|
||||||
|
*/
|
||||||
|
error(message: string, details?: Record<string, unknown>): void {
|
||||||
|
if (this.useJson) {
|
||||||
|
console.error(
|
||||||
|
JSON.stringify(
|
||||||
|
{
|
||||||
|
error: message,
|
||||||
|
...details
|
||||||
|
},
|
||||||
|
null,
|
||||||
|
2
|
||||||
|
)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
console.error(chalk.red(`Error: ${message}`));
|
||||||
|
if (details) {
|
||||||
|
for (const [key, value] of Object.entries(details)) {
|
||||||
|
console.error(chalk.gray(` ${key}: ${value}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output success message
|
||||||
|
*/
|
||||||
|
success(message: string, data?: Record<string, unknown>): void {
|
||||||
|
if (this.useJson) {
|
||||||
|
console.log(
|
||||||
|
JSON.stringify(
|
||||||
|
{
|
||||||
|
success: true,
|
||||||
|
message,
|
||||||
|
...data
|
||||||
|
},
|
||||||
|
null,
|
||||||
|
2
|
||||||
|
)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
console.log(chalk.green(`✓ ${message}`));
|
||||||
|
if (data) {
|
||||||
|
this.output(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output warning message
|
||||||
|
*/
|
||||||
|
warning(message: string): void {
|
||||||
|
if (this.useJson) {
|
||||||
|
console.warn(
|
||||||
|
JSON.stringify(
|
||||||
|
{
|
||||||
|
warning: message
|
||||||
|
},
|
||||||
|
null,
|
||||||
|
2
|
||||||
|
)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
console.warn(chalk.yellow(`⚠ ${message}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output info message
|
||||||
|
*/
|
||||||
|
info(message: string): void {
|
||||||
|
if (this.useJson) {
|
||||||
|
// Don't output info messages in JSON mode
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
console.log(chalk.blue(`ℹ ${message}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate task ID format
|
||||||
|
*/
|
||||||
|
export function validateTaskId(taskId: string): boolean {
|
||||||
|
// Task ID should be in format: number or number.number (e.g., "1" or "1.2")
|
||||||
|
const pattern = /^\d+(\.\d+)*$/;
|
||||||
|
return pattern.test(taskId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse subtasks from task data
|
||||||
|
*/
|
||||||
|
export function parseSubtasks(
|
||||||
|
task: any,
|
||||||
|
maxAttempts: number = 3
|
||||||
|
): SubtaskInfo[] {
|
||||||
|
if (!task.subtasks || !Array.isArray(task.subtasks)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
return task.subtasks.map((subtask: any) => ({
|
||||||
|
id: subtask.id,
|
||||||
|
title: subtask.title,
|
||||||
|
status: subtask.status === 'done' ? 'completed' : 'pending',
|
||||||
|
attempts: 0,
|
||||||
|
maxAttempts
|
||||||
|
}));
|
||||||
|
}
|
||||||
168
apps/cli/src/commands/autopilot/start.command.ts
Normal file
168
apps/cli/src/commands/autopilot/start.command.ts
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Start Command - Initialize and start TDD workflow
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { createTaskMasterCore, type WorkflowContext } from '@tm/core';
|
||||||
|
import {
|
||||||
|
AutopilotBaseOptions,
|
||||||
|
hasWorkflowState,
|
||||||
|
createOrchestrator,
|
||||||
|
createGitAdapter,
|
||||||
|
OutputFormatter,
|
||||||
|
validateTaskId,
|
||||||
|
parseSubtasks
|
||||||
|
} from './shared.js';
|
||||||
|
|
||||||
|
interface StartOptions extends AutopilotBaseOptions {
|
||||||
|
force?: boolean;
|
||||||
|
maxAttempts?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Start Command - Initialize new TDD workflow
|
||||||
|
*/
|
||||||
|
export class StartCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('start');
|
||||||
|
|
||||||
|
this.description('Initialize and start a new TDD workflow for a task')
|
||||||
|
.argument('<taskId>', 'Task ID to start workflow for')
|
||||||
|
.option('-f, --force', 'Force start even if workflow state exists')
|
||||||
|
.option('--max-attempts <number>', 'Maximum attempts per subtask', '3')
|
||||||
|
.action(async (taskId: string, options: StartOptions) => {
|
||||||
|
await this.execute(taskId, options);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private async execute(taskId: string, options: StartOptions): Promise<void> {
|
||||||
|
// Inherit parent options
|
||||||
|
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
|
||||||
|
const mergedOptions: StartOptions = {
|
||||||
|
...parentOpts,
|
||||||
|
...options,
|
||||||
|
projectRoot:
|
||||||
|
options.projectRoot || parentOpts?.projectRoot || process.cwd()
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatter = new OutputFormatter(mergedOptions.json || false);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Validate task ID
|
||||||
|
if (!validateTaskId(taskId)) {
|
||||||
|
formatter.error('Invalid task ID format', {
|
||||||
|
taskId,
|
||||||
|
expected: 'Format: number or number.number (e.g., "1" or "1.2")'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for existing workflow state
|
||||||
|
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (hasState && !mergedOptions.force) {
|
||||||
|
formatter.error(
|
||||||
|
'Workflow state already exists. Use --force to overwrite or resume with "autopilot resume"'
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize Task Master Core
|
||||||
|
const tmCore = await createTaskMasterCore({
|
||||||
|
projectPath: mergedOptions.projectRoot!
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get current tag from ConfigManager
|
||||||
|
const currentTag = tmCore.getActiveTag();
|
||||||
|
|
||||||
|
// Load task
|
||||||
|
formatter.info(`Loading task ${taskId}...`);
|
||||||
|
const { task } = await tmCore.getTaskWithSubtask(taskId);
|
||||||
|
|
||||||
|
if (!task) {
|
||||||
|
formatter.error('Task not found', { taskId });
|
||||||
|
await tmCore.close();
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate task has subtasks
|
||||||
|
if (!task.subtasks || task.subtasks.length === 0) {
|
||||||
|
formatter.error('Task has no subtasks. Expand task first.', {
|
||||||
|
taskId,
|
||||||
|
suggestion: `Run: task-master expand --id=${taskId}`
|
||||||
|
});
|
||||||
|
await tmCore.close();
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize Git adapter
|
||||||
|
const gitAdapter = createGitAdapter(mergedOptions.projectRoot!);
|
||||||
|
await gitAdapter.ensureGitRepository();
|
||||||
|
await gitAdapter.ensureCleanWorkingTree();
|
||||||
|
|
||||||
|
// Parse subtasks
|
||||||
|
const maxAttempts = parseInt(mergedOptions.maxAttempts || '3', 10);
|
||||||
|
const subtasks = parseSubtasks(task, maxAttempts);
|
||||||
|
|
||||||
|
// Create workflow context
|
||||||
|
const context: WorkflowContext = {
|
||||||
|
taskId: task.id,
|
||||||
|
subtasks,
|
||||||
|
currentSubtaskIndex: 0,
|
||||||
|
errors: [],
|
||||||
|
metadata: {
|
||||||
|
startedAt: new Date().toISOString(),
|
||||||
|
tags: task.tags || []
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create orchestrator with persistence
|
||||||
|
const orchestrator = createOrchestrator(
|
||||||
|
context,
|
||||||
|
mergedOptions.projectRoot!
|
||||||
|
);
|
||||||
|
|
||||||
|
// Complete PREFLIGHT phase
|
||||||
|
orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
|
||||||
|
|
||||||
|
// Generate descriptive branch name
|
||||||
|
const sanitizedTitle = task.title
|
||||||
|
.toLowerCase()
|
||||||
|
.replace(/[^a-z0-9]+/g, '-')
|
||||||
|
.replace(/^-+|-+$/g, '')
|
||||||
|
.substring(0, 50);
|
||||||
|
const formattedTaskId = taskId.replace(/\./g, '-');
|
||||||
|
const tagPrefix = currentTag ? `${currentTag}/` : '';
|
||||||
|
const branchName = `${tagPrefix}task-${formattedTaskId}-${sanitizedTitle}`;
|
||||||
|
|
||||||
|
// Create and checkout branch
|
||||||
|
formatter.info(`Creating branch: ${branchName}`);
|
||||||
|
await gitAdapter.createAndCheckoutBranch(branchName);
|
||||||
|
|
||||||
|
// Transition to SUBTASK_LOOP
|
||||||
|
orchestrator.transition({
|
||||||
|
type: 'BRANCH_CREATED',
|
||||||
|
branchName
|
||||||
|
});
|
||||||
|
|
||||||
|
// Output success
|
||||||
|
formatter.success('TDD workflow started', {
|
||||||
|
taskId: task.id,
|
||||||
|
title: task.title,
|
||||||
|
phase: orchestrator.getCurrentPhase(),
|
||||||
|
tddPhase: orchestrator.getCurrentTDDPhase(),
|
||||||
|
branchName,
|
||||||
|
subtasks: subtasks.length,
|
||||||
|
currentSubtask: subtasks[0]?.title
|
||||||
|
});
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
await tmCore.close();
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error((error as Error).message);
|
||||||
|
if (mergedOptions.verbose) {
|
||||||
|
console.error((error as Error).stack);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
114
apps/cli/src/commands/autopilot/status.command.ts
Normal file
114
apps/cli/src/commands/autopilot/status.command.ts
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Status Command - Show workflow progress
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { WorkflowOrchestrator } from '@tm/core';
|
||||||
|
import {
|
||||||
|
AutopilotBaseOptions,
|
||||||
|
hasWorkflowState,
|
||||||
|
loadWorkflowState,
|
||||||
|
OutputFormatter
|
||||||
|
} from './shared.js';
|
||||||
|
|
||||||
|
type StatusOptions = AutopilotBaseOptions;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Status Command - Show current workflow status
|
||||||
|
*/
|
||||||
|
export class StatusCommand extends Command {
|
||||||
|
constructor() {
|
||||||
|
super('status');
|
||||||
|
|
||||||
|
this.description('Show current TDD workflow status and progress').action(
|
||||||
|
async (options: StatusOptions) => {
|
||||||
|
await this.execute(options);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async execute(options: StatusOptions): Promise<void> {
|
||||||
|
// Inherit parent options
|
||||||
|
const parentOpts = this.parent?.opts() as AutopilotBaseOptions;
|
||||||
|
const mergedOptions: StatusOptions = {
|
||||||
|
...parentOpts,
|
||||||
|
...options,
|
||||||
|
projectRoot:
|
||||||
|
options.projectRoot || parentOpts?.projectRoot || process.cwd()
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatter = new OutputFormatter(mergedOptions.json || false);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check for workflow state
|
||||||
|
const hasState = await hasWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!hasState) {
|
||||||
|
formatter.error('No active workflow', {
|
||||||
|
suggestion: 'Start a workflow with: autopilot start <taskId>'
|
||||||
|
});
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load state
|
||||||
|
const state = await loadWorkflowState(mergedOptions.projectRoot!);
|
||||||
|
if (!state) {
|
||||||
|
formatter.error('Failed to load workflow state');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore orchestrator
|
||||||
|
const orchestrator = new WorkflowOrchestrator(state.context);
|
||||||
|
orchestrator.restoreState(state);
|
||||||
|
|
||||||
|
// Get status information
|
||||||
|
const phase = orchestrator.getCurrentPhase();
|
||||||
|
const tddPhase = orchestrator.getCurrentTDDPhase();
|
||||||
|
const progress = orchestrator.getProgress();
|
||||||
|
const currentSubtask = orchestrator.getCurrentSubtask();
|
||||||
|
const errors = state.context.errors ?? [];
|
||||||
|
|
||||||
|
// Build status output
|
||||||
|
const status = {
|
||||||
|
taskId: state.context.taskId,
|
||||||
|
phase,
|
||||||
|
tddPhase,
|
||||||
|
branchName: state.context.branchName,
|
||||||
|
progress: {
|
||||||
|
completed: progress.completed,
|
||||||
|
total: progress.total,
|
||||||
|
current: progress.current,
|
||||||
|
percentage: progress.percentage
|
||||||
|
},
|
||||||
|
currentSubtask: currentSubtask
|
||||||
|
? {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title,
|
||||||
|
status: currentSubtask.status,
|
||||||
|
attempts: currentSubtask.attempts,
|
||||||
|
maxAttempts: currentSubtask.maxAttempts
|
||||||
|
}
|
||||||
|
: null,
|
||||||
|
subtasks: state.context.subtasks.map((st) => ({
|
||||||
|
id: st.id,
|
||||||
|
title: st.title,
|
||||||
|
status: st.status,
|
||||||
|
attempts: st.attempts
|
||||||
|
})),
|
||||||
|
errors: errors.length > 0 ? errors : undefined,
|
||||||
|
metadata: state.context.metadata
|
||||||
|
};
|
||||||
|
|
||||||
|
if (mergedOptions.json) {
|
||||||
|
formatter.output(status);
|
||||||
|
} else {
|
||||||
|
formatter.success('Workflow status', status);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
formatter.error((error as Error).message);
|
||||||
|
if (mergedOptions.verbose) {
|
||||||
|
console.error((error as Error).stack);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
540
apps/cli/tests/integration/commands/autopilot/workflow.test.ts
Normal file
540
apps/cli/tests/integration/commands/autopilot/workflow.test.ts
Normal file
@@ -0,0 +1,540 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Integration tests for autopilot workflow commands
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||||
|
import type { WorkflowState } from '@tm/core';
|
||||||
|
|
||||||
|
// Track file system state in memory - must be in vi.hoisted() for mock access
|
||||||
|
const {
|
||||||
|
mockFileSystem,
|
||||||
|
pathExistsFn,
|
||||||
|
readJSONFn,
|
||||||
|
writeJSONFn,
|
||||||
|
ensureDirFn,
|
||||||
|
removeFn
|
||||||
|
} = vi.hoisted(() => {
|
||||||
|
const mockFileSystem = new Map<string, string>();
|
||||||
|
|
||||||
|
return {
|
||||||
|
mockFileSystem,
|
||||||
|
pathExistsFn: vi.fn((path: string) =>
|
||||||
|
Promise.resolve(mockFileSystem.has(path))
|
||||||
|
),
|
||||||
|
readJSONFn: vi.fn((path: string) => {
|
||||||
|
const data = mockFileSystem.get(path);
|
||||||
|
return data
|
||||||
|
? Promise.resolve(JSON.parse(data))
|
||||||
|
: Promise.reject(new Error('File not found'));
|
||||||
|
}),
|
||||||
|
writeJSONFn: vi.fn((path: string, data: any) => {
|
||||||
|
mockFileSystem.set(path, JSON.stringify(data));
|
||||||
|
return Promise.resolve();
|
||||||
|
}),
|
||||||
|
ensureDirFn: vi.fn(() => Promise.resolve()),
|
||||||
|
removeFn: vi.fn((path: string) => {
|
||||||
|
mockFileSystem.delete(path);
|
||||||
|
return Promise.resolve();
|
||||||
|
})
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock fs-extra before any imports
|
||||||
|
vi.mock('fs-extra', () => ({
|
||||||
|
default: {
|
||||||
|
pathExists: pathExistsFn,
|
||||||
|
readJSON: readJSONFn,
|
||||||
|
writeJSON: writeJSONFn,
|
||||||
|
ensureDir: ensureDirFn,
|
||||||
|
remove: removeFn
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock('@tm/core', () => ({
|
||||||
|
WorkflowOrchestrator: vi.fn().mockImplementation((context) => ({
|
||||||
|
getCurrentPhase: vi.fn().mockReturnValue('SUBTASK_LOOP'),
|
||||||
|
getCurrentTDDPhase: vi.fn().mockReturnValue('RED'),
|
||||||
|
getContext: vi.fn().mockReturnValue(context),
|
||||||
|
transition: vi.fn(),
|
||||||
|
restoreState: vi.fn(),
|
||||||
|
getState: vi.fn().mockReturnValue({ phase: 'SUBTASK_LOOP', context }),
|
||||||
|
enableAutoPersist: vi.fn(),
|
||||||
|
canResumeFromState: vi.fn().mockReturnValue(true),
|
||||||
|
getCurrentSubtask: vi.fn().mockReturnValue({
|
||||||
|
id: '1',
|
||||||
|
title: 'Test Subtask',
|
||||||
|
status: 'pending',
|
||||||
|
attempts: 0
|
||||||
|
}),
|
||||||
|
getProgress: vi.fn().mockReturnValue({
|
||||||
|
completed: 0,
|
||||||
|
total: 3,
|
||||||
|
current: 1,
|
||||||
|
percentage: 0
|
||||||
|
}),
|
||||||
|
canProceed: vi.fn().mockReturnValue(false)
|
||||||
|
})),
|
||||||
|
GitAdapter: vi.fn().mockImplementation(() => ({
|
||||||
|
ensureGitRepository: vi.fn().mockResolvedValue(undefined),
|
||||||
|
ensureCleanWorkingTree: vi.fn().mockResolvedValue(undefined),
|
||||||
|
createAndCheckoutBranch: vi.fn().mockResolvedValue(undefined),
|
||||||
|
hasStagedChanges: vi.fn().mockResolvedValue(true),
|
||||||
|
getStatus: vi.fn().mockResolvedValue({
|
||||||
|
staged: ['file1.ts'],
|
||||||
|
modified: ['file2.ts']
|
||||||
|
}),
|
||||||
|
createCommit: vi.fn().mockResolvedValue(undefined),
|
||||||
|
getLastCommit: vi.fn().mockResolvedValue({
|
||||||
|
hash: 'abc123def456',
|
||||||
|
message: 'test commit'
|
||||||
|
}),
|
||||||
|
stageFiles: vi.fn().mockResolvedValue(undefined)
|
||||||
|
})),
|
||||||
|
CommitMessageGenerator: vi.fn().mockImplementation(() => ({
|
||||||
|
generateMessage: vi.fn().mockReturnValue('feat: test commit message')
|
||||||
|
})),
|
||||||
|
createTaskMasterCore: vi.fn().mockResolvedValue({
|
||||||
|
getTaskWithSubtask: vi.fn().mockResolvedValue({
|
||||||
|
task: {
|
||||||
|
id: '1',
|
||||||
|
title: 'Test Task',
|
||||||
|
subtasks: [
|
||||||
|
{ id: '1', title: 'Subtask 1', status: 'pending' },
|
||||||
|
{ id: '2', title: 'Subtask 2', status: 'pending' },
|
||||||
|
{ id: '3', title: 'Subtask 3', status: 'pending' }
|
||||||
|
],
|
||||||
|
tag: 'test'
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
close: vi.fn().mockResolvedValue(undefined)
|
||||||
|
})
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Import after mocks are set up
|
||||||
|
import { Command } from 'commander';
|
||||||
|
import { AutopilotCommand } from '../../../../src/commands/autopilot/index.js';
|
||||||
|
|
||||||
|
describe('Autopilot Workflow Integration Tests', () => {
|
||||||
|
const projectRoot = '/test/project';
|
||||||
|
let program: Command;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockFileSystem.clear();
|
||||||
|
|
||||||
|
// Clear mock call history
|
||||||
|
pathExistsFn.mockClear();
|
||||||
|
readJSONFn.mockClear();
|
||||||
|
writeJSONFn.mockClear();
|
||||||
|
ensureDirFn.mockClear();
|
||||||
|
removeFn.mockClear();
|
||||||
|
|
||||||
|
program = new Command();
|
||||||
|
AutopilotCommand.register(program);
|
||||||
|
|
||||||
|
// Use exitOverride to handle Commander exits in tests
|
||||||
|
program.exitOverride();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
mockFileSystem.clear();
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('start command', () => {
|
||||||
|
it('should initialize workflow and create branch', async () => {
|
||||||
|
const consoleLogSpy = vi
|
||||||
|
.spyOn(console, 'log')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'start',
|
||||||
|
'1',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--json'
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Verify writeJSON was called with state
|
||||||
|
expect(writeJSONFn).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('workflow-state.json'),
|
||||||
|
expect.objectContaining({
|
||||||
|
phase: expect.any(String),
|
||||||
|
context: expect.any(Object)
|
||||||
|
}),
|
||||||
|
expect.any(Object)
|
||||||
|
);
|
||||||
|
|
||||||
|
consoleLogSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject invalid task ID', async () => {
|
||||||
|
const consoleErrorSpy = vi
|
||||||
|
.spyOn(console, 'error')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'start',
|
||||||
|
'invalid',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--json'
|
||||||
|
])
|
||||||
|
).rejects.toMatchObject({ exitCode: 1 });
|
||||||
|
|
||||||
|
consoleErrorSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject starting when workflow exists without force', async () => {
|
||||||
|
// Create existing state
|
||||||
|
const mockState: WorkflowState = {
|
||||||
|
phase: 'SUBTASK_LOOP',
|
||||||
|
context: {
|
||||||
|
taskId: '1',
|
||||||
|
subtasks: [],
|
||||||
|
currentSubtaskIndex: 0,
|
||||||
|
errors: [],
|
||||||
|
metadata: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mockFileSystem.set(
|
||||||
|
`${projectRoot}/.taskmaster/workflow-state.json`,
|
||||||
|
JSON.stringify(mockState)
|
||||||
|
);
|
||||||
|
|
||||||
|
const consoleErrorSpy = vi
|
||||||
|
.spyOn(console, 'error')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'start',
|
||||||
|
'1',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--json'
|
||||||
|
])
|
||||||
|
).rejects.toMatchObject({ exitCode: 1 });
|
||||||
|
|
||||||
|
consoleErrorSpy.mockRestore();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resume command', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
// Create saved state
|
||||||
|
const mockState: WorkflowState = {
|
||||||
|
phase: 'SUBTASK_LOOP',
|
||||||
|
context: {
|
||||||
|
taskId: '1',
|
||||||
|
subtasks: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
title: 'Test Subtask',
|
||||||
|
status: 'pending',
|
||||||
|
attempts: 0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
currentSubtaskIndex: 0,
|
||||||
|
currentTDDPhase: 'RED',
|
||||||
|
branchName: 'task-1',
|
||||||
|
errors: [],
|
||||||
|
metadata: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mockFileSystem.set(
|
||||||
|
`${projectRoot}/.taskmaster/workflow-state.json`,
|
||||||
|
JSON.stringify(mockState)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should restore workflow from saved state', async () => {
|
||||||
|
const consoleLogSpy = vi
|
||||||
|
.spyOn(console, 'log')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'resume',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--json'
|
||||||
|
]);
|
||||||
|
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalled();
|
||||||
|
const output = JSON.parse(consoleLogSpy.mock.calls[0][0]);
|
||||||
|
expect(output.success).toBe(true);
|
||||||
|
expect(output.taskId).toBe('1');
|
||||||
|
|
||||||
|
consoleLogSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should error when no state exists', async () => {
|
||||||
|
mockFileSystem.clear();
|
||||||
|
|
||||||
|
const consoleErrorSpy = vi
|
||||||
|
.spyOn(console, 'error')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'resume',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--json'
|
||||||
|
])
|
||||||
|
).rejects.toMatchObject({ exitCode: 1 });
|
||||||
|
|
||||||
|
consoleErrorSpy.mockRestore();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('next command', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
const mockState: WorkflowState = {
|
||||||
|
phase: 'SUBTASK_LOOP',
|
||||||
|
context: {
|
||||||
|
taskId: '1',
|
||||||
|
subtasks: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
title: 'Test Subtask',
|
||||||
|
status: 'pending',
|
||||||
|
attempts: 0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
currentSubtaskIndex: 0,
|
||||||
|
currentTDDPhase: 'RED',
|
||||||
|
branchName: 'task-1',
|
||||||
|
errors: [],
|
||||||
|
metadata: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mockFileSystem.set(
|
||||||
|
`${projectRoot}/.taskmaster/workflow-state.json`,
|
||||||
|
JSON.stringify(mockState)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return next action in JSON format', async () => {
|
||||||
|
const consoleLogSpy = vi
|
||||||
|
.spyOn(console, 'log')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'next',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--json'
|
||||||
|
]);
|
||||||
|
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalled();
|
||||||
|
const output = JSON.parse(consoleLogSpy.mock.calls[0][0]);
|
||||||
|
expect(output.action).toBe('generate_test');
|
||||||
|
expect(output.phase).toBe('SUBTASK_LOOP');
|
||||||
|
expect(output.tddPhase).toBe('RED');
|
||||||
|
|
||||||
|
consoleLogSpy.mockRestore();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('status command', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
const mockState: WorkflowState = {
|
||||||
|
phase: 'SUBTASK_LOOP',
|
||||||
|
context: {
|
||||||
|
taskId: '1',
|
||||||
|
subtasks: [
|
||||||
|
{ id: '1', title: 'Subtask 1', status: 'completed', attempts: 1 },
|
||||||
|
{ id: '2', title: 'Subtask 2', status: 'pending', attempts: 0 },
|
||||||
|
{ id: '3', title: 'Subtask 3', status: 'pending', attempts: 0 }
|
||||||
|
],
|
||||||
|
currentSubtaskIndex: 1,
|
||||||
|
currentTDDPhase: 'RED',
|
||||||
|
branchName: 'task-1',
|
||||||
|
errors: [],
|
||||||
|
metadata: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mockFileSystem.set(
|
||||||
|
`${projectRoot}/.taskmaster/workflow-state.json`,
|
||||||
|
JSON.stringify(mockState)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should display workflow progress', async () => {
|
||||||
|
const consoleLogSpy = vi
|
||||||
|
.spyOn(console, 'log')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'status',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--json'
|
||||||
|
]);
|
||||||
|
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalled();
|
||||||
|
const output = JSON.parse(consoleLogSpy.mock.calls[0][0]);
|
||||||
|
expect(output.taskId).toBe('1');
|
||||||
|
expect(output.phase).toBe('SUBTASK_LOOP');
|
||||||
|
expect(output.progress).toBeDefined();
|
||||||
|
expect(output.subtasks).toHaveLength(3);
|
||||||
|
|
||||||
|
consoleLogSpy.mockRestore();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('complete command', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
const mockState: WorkflowState = {
|
||||||
|
phase: 'SUBTASK_LOOP',
|
||||||
|
context: {
|
||||||
|
taskId: '1',
|
||||||
|
subtasks: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
title: 'Test Subtask',
|
||||||
|
status: 'in-progress',
|
||||||
|
attempts: 0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
currentSubtaskIndex: 0,
|
||||||
|
currentTDDPhase: 'RED',
|
||||||
|
branchName: 'task-1',
|
||||||
|
errors: [],
|
||||||
|
metadata: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mockFileSystem.set(
|
||||||
|
`${projectRoot}/.taskmaster/workflow-state.json`,
|
||||||
|
JSON.stringify(mockState)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should validate RED phase has failures', async () => {
|
||||||
|
const consoleErrorSpy = vi
|
||||||
|
.spyOn(console, 'error')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'complete',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--results',
|
||||||
|
'{"total":10,"passed":10,"failed":0,"skipped":0}',
|
||||||
|
'--json'
|
||||||
|
])
|
||||||
|
).rejects.toMatchObject({ exitCode: 1 });
|
||||||
|
|
||||||
|
consoleErrorSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should complete RED phase with failures', async () => {
|
||||||
|
const consoleLogSpy = vi
|
||||||
|
.spyOn(console, 'log')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'complete',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--results',
|
||||||
|
'{"total":10,"passed":9,"failed":1,"skipped":0}',
|
||||||
|
'--json'
|
||||||
|
]);
|
||||||
|
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalled();
|
||||||
|
const output = JSON.parse(consoleLogSpy.mock.calls[0][0]);
|
||||||
|
expect(output.success).toBe(true);
|
||||||
|
expect(output.nextPhase).toBe('GREEN');
|
||||||
|
|
||||||
|
consoleLogSpy.mockRestore();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('abort command', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
const mockState: WorkflowState = {
|
||||||
|
phase: 'SUBTASK_LOOP',
|
||||||
|
context: {
|
||||||
|
taskId: '1',
|
||||||
|
subtasks: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
title: 'Test Subtask',
|
||||||
|
status: 'pending',
|
||||||
|
attempts: 0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
currentSubtaskIndex: 0,
|
||||||
|
currentTDDPhase: 'RED',
|
||||||
|
branchName: 'task-1',
|
||||||
|
errors: [],
|
||||||
|
metadata: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
mockFileSystem.set(
|
||||||
|
`${projectRoot}/.taskmaster/workflow-state.json`,
|
||||||
|
JSON.stringify(mockState)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should abort workflow and delete state', async () => {
|
||||||
|
const consoleLogSpy = vi
|
||||||
|
.spyOn(console, 'log')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
|
||||||
|
await program.parseAsync([
|
||||||
|
'node',
|
||||||
|
'test',
|
||||||
|
'autopilot',
|
||||||
|
'abort',
|
||||||
|
'--project-root',
|
||||||
|
projectRoot,
|
||||||
|
'--force',
|
||||||
|
'--json'
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Verify remove was called
|
||||||
|
expect(removeFn).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('workflow-state.json')
|
||||||
|
);
|
||||||
|
|
||||||
|
consoleLogSpy.mockRestore();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
202
apps/cli/tests/unit/commands/autopilot/shared.test.ts
Normal file
202
apps/cli/tests/unit/commands/autopilot/shared.test.ts
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Unit tests for autopilot shared utilities
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||||
|
import {
|
||||||
|
validateTaskId,
|
||||||
|
parseSubtasks,
|
||||||
|
OutputFormatter
|
||||||
|
} from '../../../../src/commands/autopilot/shared.js';
|
||||||
|
|
||||||
|
// Mock fs-extra
|
||||||
|
vi.mock('fs-extra', () => ({
|
||||||
|
default: {
|
||||||
|
pathExists: vi.fn(),
|
||||||
|
readJSON: vi.fn(),
|
||||||
|
writeJSON: vi.fn(),
|
||||||
|
ensureDir: vi.fn(),
|
||||||
|
remove: vi.fn()
|
||||||
|
},
|
||||||
|
pathExists: vi.fn(),
|
||||||
|
readJSON: vi.fn(),
|
||||||
|
writeJSON: vi.fn(),
|
||||||
|
ensureDir: vi.fn(),
|
||||||
|
remove: vi.fn()
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe('Autopilot Shared Utilities', () => {
|
||||||
|
const projectRoot = '/test/project';
|
||||||
|
const statePath = `${projectRoot}/.taskmaster/workflow-state.json`;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('validateTaskId', () => {
|
||||||
|
it('should validate simple task IDs', () => {
|
||||||
|
expect(validateTaskId('1')).toBe(true);
|
||||||
|
expect(validateTaskId('10')).toBe(true);
|
||||||
|
expect(validateTaskId('999')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should validate subtask IDs', () => {
|
||||||
|
expect(validateTaskId('1.1')).toBe(true);
|
||||||
|
expect(validateTaskId('1.2')).toBe(true);
|
||||||
|
expect(validateTaskId('10.5')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should validate nested subtask IDs', () => {
|
||||||
|
expect(validateTaskId('1.1.1')).toBe(true);
|
||||||
|
expect(validateTaskId('1.2.3')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject invalid formats', () => {
|
||||||
|
expect(validateTaskId('')).toBe(false);
|
||||||
|
expect(validateTaskId('abc')).toBe(false);
|
||||||
|
expect(validateTaskId('1.')).toBe(false);
|
||||||
|
expect(validateTaskId('.1')).toBe(false);
|
||||||
|
expect(validateTaskId('1..2')).toBe(false);
|
||||||
|
expect(validateTaskId('1.2.3.')).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('parseSubtasks', () => {
|
||||||
|
it('should parse subtasks from task data', () => {
|
||||||
|
const task = {
|
||||||
|
id: '1',
|
||||||
|
title: 'Test Task',
|
||||||
|
subtasks: [
|
||||||
|
{ id: '1', title: 'Subtask 1', status: 'pending' },
|
||||||
|
{ id: '2', title: 'Subtask 2', status: 'done' },
|
||||||
|
{ id: '3', title: 'Subtask 3', status: 'in-progress' }
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = parseSubtasks(task, 5);
|
||||||
|
|
||||||
|
expect(result).toHaveLength(3);
|
||||||
|
expect(result[0]).toEqual({
|
||||||
|
id: '1',
|
||||||
|
title: 'Subtask 1',
|
||||||
|
status: 'pending',
|
||||||
|
attempts: 0,
|
||||||
|
maxAttempts: 5
|
||||||
|
});
|
||||||
|
expect(result[1]).toEqual({
|
||||||
|
id: '2',
|
||||||
|
title: 'Subtask 2',
|
||||||
|
status: 'completed',
|
||||||
|
attempts: 0,
|
||||||
|
maxAttempts: 5
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array for missing subtasks', () => {
|
||||||
|
const task = { id: '1', title: 'Test Task' };
|
||||||
|
expect(parseSubtasks(task)).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use default maxAttempts', () => {
|
||||||
|
const task = {
|
||||||
|
subtasks: [{ id: '1', title: 'Subtask 1', status: 'pending' }]
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = parseSubtasks(task);
|
||||||
|
expect(result[0].maxAttempts).toBe(3);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// State persistence tests omitted - covered in integration tests
|
||||||
|
|
||||||
|
describe('OutputFormatter', () => {
|
||||||
|
let consoleLogSpy: any;
|
||||||
|
let consoleErrorSpy: any;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
|
||||||
|
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
consoleLogSpy.mockRestore();
|
||||||
|
consoleErrorSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('JSON mode', () => {
|
||||||
|
it('should output JSON for success', () => {
|
||||||
|
const formatter = new OutputFormatter(true);
|
||||||
|
formatter.success('Test message', { key: 'value' });
|
||||||
|
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalled();
|
||||||
|
const output = JSON.parse(consoleLogSpy.mock.calls[0][0]);
|
||||||
|
expect(output.success).toBe(true);
|
||||||
|
expect(output.message).toBe('Test message');
|
||||||
|
expect(output.key).toBe('value');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should output JSON for error', () => {
|
||||||
|
const formatter = new OutputFormatter(true);
|
||||||
|
formatter.error('Error message', { code: 'ERR001' });
|
||||||
|
|
||||||
|
expect(consoleErrorSpy).toHaveBeenCalled();
|
||||||
|
const output = JSON.parse(consoleErrorSpy.mock.calls[0][0]);
|
||||||
|
expect(output.error).toBe('Error message');
|
||||||
|
expect(output.code).toBe('ERR001');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should output JSON for data', () => {
|
||||||
|
const formatter = new OutputFormatter(true);
|
||||||
|
formatter.output({ test: 'data' });
|
||||||
|
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalled();
|
||||||
|
const output = JSON.parse(consoleLogSpy.mock.calls[0][0]);
|
||||||
|
expect(output.test).toBe('data');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Text mode', () => {
|
||||||
|
it('should output formatted text for success', () => {
|
||||||
|
const formatter = new OutputFormatter(false);
|
||||||
|
formatter.success('Test message');
|
||||||
|
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('✓ Test message')
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should output formatted text for error', () => {
|
||||||
|
const formatter = new OutputFormatter(false);
|
||||||
|
formatter.error('Error message');
|
||||||
|
|
||||||
|
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('Error: Error message')
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should output formatted text for warning', () => {
|
||||||
|
const consoleWarnSpy = vi
|
||||||
|
.spyOn(console, 'warn')
|
||||||
|
.mockImplementation(() => {});
|
||||||
|
const formatter = new OutputFormatter(false);
|
||||||
|
formatter.warning('Warning message');
|
||||||
|
|
||||||
|
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('⚠ Warning message')
|
||||||
|
);
|
||||||
|
consoleWarnSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not output info in JSON mode', () => {
|
||||||
|
const formatter = new OutputFormatter(true);
|
||||||
|
formatter.info('Info message');
|
||||||
|
|
||||||
|
expect(consoleLogSpy).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
25
apps/cli/vitest.config.ts
Normal file
25
apps/cli/vitest.config.ts
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
import { defineConfig } from 'vitest/config';
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
test: {
|
||||||
|
globals: true,
|
||||||
|
environment: 'node',
|
||||||
|
include: ['tests/**/*.test.ts', 'tests/**/*.spec.ts'],
|
||||||
|
coverage: {
|
||||||
|
provider: 'v8',
|
||||||
|
reporter: ['text', 'json', 'html'],
|
||||||
|
include: ['src/**/*.ts'],
|
||||||
|
exclude: [
|
||||||
|
'node_modules/',
|
||||||
|
'dist/',
|
||||||
|
'tests/',
|
||||||
|
'**/*.test.ts',
|
||||||
|
'**/*.spec.ts',
|
||||||
|
'**/*.d.ts',
|
||||||
|
'**/mocks/**',
|
||||||
|
'**/fixtures/**',
|
||||||
|
'vitest.config.ts'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
@@ -52,6 +52,20 @@
|
|||||||
"capabilities/cli-root-commands",
|
"capabilities/cli-root-commands",
|
||||||
"capabilities/task-structure"
|
"capabilities/task-structure"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "TDD Workflow (Autopilot)",
|
||||||
|
"pages": [
|
||||||
|
"tdd-workflow/quickstart",
|
||||||
|
"tdd-workflow/ai-agent-integration",
|
||||||
|
{
|
||||||
|
"group": "Templates & Examples",
|
||||||
|
"pages": [
|
||||||
|
"tdd-workflow/templates/claude-template",
|
||||||
|
"tdd-workflow/templates/example-prompts"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
944
apps/docs/tdd-workflow/ai-agent-integration.mdx
Normal file
944
apps/docs/tdd-workflow/ai-agent-integration.mdx
Normal file
@@ -0,0 +1,944 @@
|
|||||||
|
---
|
||||||
|
title: "AI Agent Integration Guide"
|
||||||
|
description: "Complete guide for integrating AI agents with TaskMaster's autonomous TDD workflow system"
|
||||||
|
---
|
||||||
|
|
||||||
|
Complete guide for integrating AI agents with TaskMaster's autonomous TDD workflow system.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
TaskMaster provides a complete TDD workflow orchestration system that enables AI agents to autonomously implement features following strict Test-Driven Development practices. The system manages workflow state, git operations, test validation, and progress tracking.
|
||||||
|
|
||||||
|
### Key Features
|
||||||
|
|
||||||
|
- **TDD State Machine**: Enforces RED → GREEN → COMMIT cycle
|
||||||
|
- **Git Integration**: Automated branch creation, commits with metadata
|
||||||
|
- **Test Validation**: Ensures RED phase has failures, GREEN phase passes
|
||||||
|
- **Progress Tracking**: Subtask completion, attempt counting, error logging
|
||||||
|
- **State Persistence**: Automatic workflow state management
|
||||||
|
- **Dual Interface**: CLI commands and MCP tools for flexibility
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────┐
|
||||||
|
│ AI Agent │
|
||||||
|
│ (Claude Code, Custom Agent, etc.) │
|
||||||
|
└─────────────┬───────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
│ Uses CLI or MCP
|
||||||
|
│
|
||||||
|
┌─────────────▼───────────────────────────────────────┐
|
||||||
|
│ TaskMaster Interface │
|
||||||
|
│ ┌──────────────┐ ┌──────────────┐ │
|
||||||
|
│ │ CLI Commands │ │ MCP Tools │ │
|
||||||
|
│ └──────┬───────┘ └──────┬───────┘ │
|
||||||
|
└─────────┼────────────────────────┼─────────────────┘
|
||||||
|
│ │
|
||||||
|
┌─────────▼────────────────────────▼─────────────────┐
|
||||||
|
│ WorkflowOrchestrator (Core) │
|
||||||
|
│ ┌─────────────────────────────────────────────┐ │
|
||||||
|
│ │ State Machine: RED → GREEN → COMMIT │ │
|
||||||
|
│ └─────────────────────────────────────────────┘ │
|
||||||
|
│ ┌──────────┐ ┌──────────┐ ┌──────────────┐ │
|
||||||
|
│ │GitAdapter│ │TestResult│ │CommitMessage │ │
|
||||||
|
│ │ │ │Validator │ │Generator │ │
|
||||||
|
│ └──────────┘ └──────────┘ └──────────────┘ │
|
||||||
|
└────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
│ Persists to
|
||||||
|
│
|
||||||
|
┌─────────▼───────────────────────────────────────────┐
|
||||||
|
│ .taskmaster/workflow-state.json │
|
||||||
|
└──────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Component Responsibilities
|
||||||
|
|
||||||
|
**WorkflowOrchestrator**
|
||||||
|
- Manages TDD state machine transitions
|
||||||
|
- Tracks current subtask and progress
|
||||||
|
- Enforces workflow rules and validations
|
||||||
|
- Emits events for state changes
|
||||||
|
|
||||||
|
**GitAdapter**
|
||||||
|
- Creates and manages workflow branches
|
||||||
|
- Stages files and creates commits
|
||||||
|
- Validates git repository state
|
||||||
|
- Provides safety checks (clean working tree, etc.)
|
||||||
|
|
||||||
|
**TestResultValidator**
|
||||||
|
- Validates RED phase has test failures
|
||||||
|
- Validates GREEN phase has all tests passing
|
||||||
|
- Parses test results from various formats
|
||||||
|
|
||||||
|
**CommitMessageGenerator**
|
||||||
|
- Generates conventional commit messages
|
||||||
|
- Embeds workflow metadata (subtask ID, phase, etc.)
|
||||||
|
- Follows project commit conventions
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
1. TaskMaster initialized project with subtasks
|
||||||
|
2. Git repository with clean working tree
|
||||||
|
3. Test framework configured (vitest, jest, etc.)
|
||||||
|
|
||||||
|
### Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Initialize workflow for a task
|
||||||
|
tm autopilot start 7
|
||||||
|
|
||||||
|
# 2. Check what to do next
|
||||||
|
tm autopilot next
|
||||||
|
|
||||||
|
# 3. Write failing test (RED phase)
|
||||||
|
# ... create test file ...
|
||||||
|
|
||||||
|
# 4. Run tests and complete RED phase
|
||||||
|
tm autopilot complete --results '{"total":1,"passed":0,"failed":1,"skipped":0}'
|
||||||
|
|
||||||
|
# 5. Implement code to pass tests (GREEN phase)
|
||||||
|
# ... write implementation ...
|
||||||
|
|
||||||
|
# 6. Run tests and complete GREEN phase
|
||||||
|
tm autopilot complete --results '{"total":1,"passed":1,"failed":0,"skipped":0}'
|
||||||
|
|
||||||
|
# 7. Commit changes
|
||||||
|
tm autopilot commit
|
||||||
|
|
||||||
|
# 8. Repeat for next subtask (automatically advanced)
|
||||||
|
tm autopilot next
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI Commands
|
||||||
|
|
||||||
|
All commands support `--json` flag for machine-readable output.
|
||||||
|
|
||||||
|
### `tm autopilot start <taskId>`
|
||||||
|
|
||||||
|
Initialize a new TDD workflow for a task.
|
||||||
|
|
||||||
|
**Options:**
|
||||||
|
- `--max-attempts <number>`: Maximum attempts per subtask (default: 3)
|
||||||
|
- `--force`: Force start even if workflow exists
|
||||||
|
- `--project-root <path>`: Project root directory
|
||||||
|
- `--json`: Output JSON
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```bash
|
||||||
|
tm autopilot start 7 --max-attempts 5 --json
|
||||||
|
```
|
||||||
|
|
||||||
|
**JSON Output:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Workflow started for task 7",
|
||||||
|
"taskId": "7",
|
||||||
|
"branchName": "task-7",
|
||||||
|
"phase": "SUBTASK_LOOP",
|
||||||
|
"tddPhase": "RED",
|
||||||
|
"progress": {
|
||||||
|
"completed": 0,
|
||||||
|
"total": 5,
|
||||||
|
"percentage": 0
|
||||||
|
},
|
||||||
|
"currentSubtask": {
|
||||||
|
"id": "1",
|
||||||
|
"title": "Implement start command",
|
||||||
|
"status": "in-progress",
|
||||||
|
"attempts": 0
|
||||||
|
},
|
||||||
|
"nextAction": "generate_test"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `tm autopilot resume`
|
||||||
|
|
||||||
|
Resume a previously started workflow from saved state.
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```bash
|
||||||
|
tm autopilot resume --json
|
||||||
|
```
|
||||||
|
|
||||||
|
### `tm autopilot next`
|
||||||
|
|
||||||
|
Get the next action to perform with detailed context.
|
||||||
|
|
||||||
|
**JSON Output:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"action": "generate_test",
|
||||||
|
"actionDescription": "Write a failing test for the current subtask",
|
||||||
|
"phase": "SUBTASK_LOOP",
|
||||||
|
"tddPhase": "RED",
|
||||||
|
"taskId": "7",
|
||||||
|
"branchName": "task-7",
|
||||||
|
"progress": {
|
||||||
|
"completed": 0,
|
||||||
|
"total": 5,
|
||||||
|
"current": 1,
|
||||||
|
"percentage": 0
|
||||||
|
},
|
||||||
|
"currentSubtask": {
|
||||||
|
"id": "1",
|
||||||
|
"title": "Implement start command",
|
||||||
|
"status": "in-progress",
|
||||||
|
"attempts": 0,
|
||||||
|
"maxAttempts": 3
|
||||||
|
},
|
||||||
|
"expectedFiles": ["test file"],
|
||||||
|
"context": {
|
||||||
|
"canProceed": false,
|
||||||
|
"errors": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `tm autopilot status`
|
||||||
|
|
||||||
|
Get comprehensive workflow progress and state information.
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```bash
|
||||||
|
tm autopilot status --json
|
||||||
|
```
|
||||||
|
|
||||||
|
### `tm autopilot complete`
|
||||||
|
|
||||||
|
Complete the current TDD phase with test result validation.
|
||||||
|
|
||||||
|
**Options:**
|
||||||
|
- `--results <json>`: Test results JSON string
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```bash
|
||||||
|
tm autopilot complete --results '{"total":10,"passed":9,"failed":1,"skipped":0}' --json
|
||||||
|
```
|
||||||
|
|
||||||
|
**Validation Rules:**
|
||||||
|
- **RED Phase**: Must have at least one failing test
|
||||||
|
- **GREEN Phase**: All tests must pass (failed === 0)
|
||||||
|
|
||||||
|
### `tm autopilot commit`
|
||||||
|
|
||||||
|
Create a git commit with enhanced message generation.
|
||||||
|
|
||||||
|
**Options:**
|
||||||
|
- `--message <text>`: Custom commit message (optional)
|
||||||
|
- `--files <paths...>`: Specific files to stage (optional)
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```bash
|
||||||
|
tm autopilot commit --json
|
||||||
|
```
|
||||||
|
|
||||||
|
### `tm autopilot abort`
|
||||||
|
|
||||||
|
Abort the workflow and clean up state (preserves git branch and code).
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```bash
|
||||||
|
tm autopilot abort --force --json
|
||||||
|
```
|
||||||
|
|
||||||
|
## MCP Tools
|
||||||
|
|
||||||
|
MCP tools provide the same functionality as CLI commands for programmatic integration.
|
||||||
|
|
||||||
|
### `autopilot_start`
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
taskId: string; // Required: Task ID (e.g., "7", "2.3")
|
||||||
|
projectRoot: string; // Required: Absolute path to project
|
||||||
|
tag?: string; // Optional: Tag context
|
||||||
|
maxAttempts?: number; // Optional: Default 3
|
||||||
|
force?: boolean; // Optional: Default false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Returns:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
success: boolean;
|
||||||
|
message: string;
|
||||||
|
taskId: string;
|
||||||
|
branchName: string;
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
tddPhase: TDDPhase;
|
||||||
|
progress: {
|
||||||
|
completed: number;
|
||||||
|
total: number;
|
||||||
|
percentage: number;
|
||||||
|
};
|
||||||
|
currentSubtask: SubtaskInfo | null;
|
||||||
|
nextAction: string;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `autopilot_resume`
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
projectRoot: string; // Required: Absolute path to project
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `autopilot_next`
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
projectRoot: string; // Required: Absolute path to project
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Returns:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
action: string; // 'generate_test' | 'implement_code' | 'commit_changes'
|
||||||
|
actionDescription: string;
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
tddPhase: TDDPhase;
|
||||||
|
taskId: string;
|
||||||
|
branchName: string;
|
||||||
|
progress: ProgressInfo;
|
||||||
|
currentSubtask: SubtaskInfo | null;
|
||||||
|
expectedFiles: string[];
|
||||||
|
context: {
|
||||||
|
canProceed: boolean;
|
||||||
|
errors: string[];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `autopilot_status`
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
projectRoot: string; // Required: Absolute path to project
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Returns:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
taskId: string;
|
||||||
|
branchName: string;
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
tddPhase: TDDPhase;
|
||||||
|
progress: ProgressInfo;
|
||||||
|
currentSubtask: SubtaskInfo | null;
|
||||||
|
subtasks: SubtaskInfo[];
|
||||||
|
errors: string[];
|
||||||
|
metadata: Record<string, any>;
|
||||||
|
canProceed: boolean;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `autopilot_complete_phase`
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
projectRoot: string; // Required: Absolute path to project
|
||||||
|
testResults: {
|
||||||
|
total: number; // Required: Total tests
|
||||||
|
passed: number; // Required: Passing tests
|
||||||
|
failed: number; // Required: Failing tests
|
||||||
|
skipped?: number; // Optional: Skipped tests
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `autopilot_commit`
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
projectRoot: string; // Required: Absolute path to project
|
||||||
|
files?: string[]; // Optional: Files to stage
|
||||||
|
customMessage?: string; // Optional: Custom commit message
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `autopilot_abort`
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
projectRoot: string; // Required: Absolute path to project
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Workflow Phases
|
||||||
|
|
||||||
|
### Phase Diagram
|
||||||
|
|
||||||
|
```
|
||||||
|
PREFLIGHT → BRANCH_SETUP → SUBTASK_LOOP → FINALIZE → COMPLETE
|
||||||
|
↓
|
||||||
|
RED → GREEN → COMMIT
|
||||||
|
↑ ↓
|
||||||
|
└──────────────┘
|
||||||
|
(Next Subtask)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase Descriptions
|
||||||
|
|
||||||
|
**PREFLIGHT**
|
||||||
|
- Validate task has subtasks
|
||||||
|
- Check git repository state
|
||||||
|
- Verify preconditions
|
||||||
|
|
||||||
|
**BRANCH_SETUP**
|
||||||
|
- Create workflow branch: `task-{taskId}`
|
||||||
|
- Checkout new branch
|
||||||
|
- Initialize workflow context
|
||||||
|
|
||||||
|
**SUBTASK_LOOP**
|
||||||
|
- **RED Phase**: Write failing tests
|
||||||
|
- Action: `generate_test`
|
||||||
|
- Validation: At least one test must fail
|
||||||
|
- Files: Test files
|
||||||
|
|
||||||
|
- **GREEN Phase**: Implement code
|
||||||
|
- Action: `implement_code`
|
||||||
|
- Validation: All tests must pass
|
||||||
|
- Files: Implementation files
|
||||||
|
|
||||||
|
- **COMMIT Phase**: Create commit
|
||||||
|
- Action: `commit_changes`
|
||||||
|
- Auto-generates commit message
|
||||||
|
- Advances to next subtask
|
||||||
|
|
||||||
|
**FINALIZE**
|
||||||
|
- All subtasks complete
|
||||||
|
- Workflow ready for review/merge
|
||||||
|
|
||||||
|
**COMPLETE**
|
||||||
|
- Workflow finished
|
||||||
|
- State can be cleaned up
|
||||||
|
|
||||||
|
## Responsibility Matrix
|
||||||
|
|
||||||
|
Clear division of responsibilities between AI Agent and TaskMaster.
|
||||||
|
|
||||||
|
| Responsibility | AI Agent | TaskMaster |
|
||||||
|
|---------------|----------|------------|
|
||||||
|
| **Workflow Orchestration** | | ✓ |
|
||||||
|
| Start/resume workflow | Call CLI/MCP | Execute & validate |
|
||||||
|
| Track workflow state | Read state | Persist state |
|
||||||
|
| Manage TDD phases | Request transitions | Enforce transitions |
|
||||||
|
| Validate phase completion | | ✓ (RED must fail, GREEN must pass) |
|
||||||
|
| **Test Management** | | |
|
||||||
|
| Write test code | ✓ | |
|
||||||
|
| Run tests | ✓ | |
|
||||||
|
| Parse test output | ✓ | |
|
||||||
|
| Report test results | Provide JSON | Validate results |
|
||||||
|
| **Implementation** | | |
|
||||||
|
| Write implementation code | ✓ | |
|
||||||
|
| Ensure tests pass | ✓ | |
|
||||||
|
| Follow TDD cycle | ✓ (guided by TaskMaster) | Enforce rules |
|
||||||
|
| **Git Operations** | | |
|
||||||
|
| Create workflow branch | Request | ✓ Execute |
|
||||||
|
| Stage files | Request (optional) | ✓ Execute |
|
||||||
|
| Generate commit messages | | ✓ |
|
||||||
|
| Create commits | Request | ✓ Execute |
|
||||||
|
| **Progress Tracking** | | |
|
||||||
|
| Query progress | Call status | ✓ Provide data |
|
||||||
|
| Advance subtasks | | ✓ (automatic on commit) |
|
||||||
|
| Count attempts | | ✓ |
|
||||||
|
| Log activity | | ✓ |
|
||||||
|
|
||||||
|
### AI Agent Responsibilities
|
||||||
|
|
||||||
|
1. **Read and understand subtask requirements**
|
||||||
|
2. **Write test code** that validates the requirement
|
||||||
|
3. **Run test suite** using project's test command
|
||||||
|
4. **Parse test output** into JSON format
|
||||||
|
5. **Report results** to TaskMaster for validation
|
||||||
|
6. **Write implementation** to satisfy tests
|
||||||
|
7. **Request commits** when GREEN phase complete
|
||||||
|
8. **Handle errors** and retry within attempt limits
|
||||||
|
|
||||||
|
### TaskMaster Responsibilities
|
||||||
|
|
||||||
|
1. **Manage workflow state machine**
|
||||||
|
2. **Enforce TDD rules** (RED must fail, GREEN must pass)
|
||||||
|
3. **Track progress** (completed, current, attempts)
|
||||||
|
4. **Create git commits** with enhanced messages
|
||||||
|
5. **Manage git branches** and repository safety
|
||||||
|
6. **Validate transitions** between phases
|
||||||
|
7. **Persist state** for resumability
|
||||||
|
8. **Generate reports** and activity logs
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Complete TDD Cycle Example
|
||||||
|
|
||||||
|
#### 1. Start Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot start 7 --json
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"taskId": "7",
|
||||||
|
"branchName": "task-7",
|
||||||
|
"tddPhase": "RED",
|
||||||
|
"currentSubtask": {
|
||||||
|
"id": "1",
|
||||||
|
"title": "Implement start command",
|
||||||
|
"status": "in-progress"
|
||||||
|
},
|
||||||
|
"nextAction": "generate_test"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 2. Get Next Action
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot next --json
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"action": "generate_test",
|
||||||
|
"actionDescription": "Write a failing test for the current subtask",
|
||||||
|
"tddPhase": "RED",
|
||||||
|
"currentSubtask": {
|
||||||
|
"id": "1",
|
||||||
|
"title": "Implement start command"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 3. Write Failing Test
|
||||||
|
|
||||||
|
AI Agent creates `tests/start.test.ts`:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { StartCommand } from '../src/commands/start.js';
|
||||||
|
|
||||||
|
describe('StartCommand', () => {
|
||||||
|
it('should initialize workflow and create branch', async () => {
|
||||||
|
const command = new StartCommand();
|
||||||
|
const result = await command.execute({ taskId: '7' });
|
||||||
|
expect(result.success).toBe(true);
|
||||||
|
expect(result.branchName).toBe('task-7');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 4. Run Tests (Should Fail)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ npm test
|
||||||
|
# Output: 1 test failed (expected)
|
||||||
|
```
|
||||||
|
|
||||||
|
Parse output to JSON:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"total": 1,
|
||||||
|
"passed": 0,
|
||||||
|
"failed": 1,
|
||||||
|
"skipped": 0
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 5. Complete RED Phase
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot complete --results '{"total":1,"passed":0,"failed":1,"skipped":0}' --json
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Completed RED phase",
|
||||||
|
"previousPhase": "RED",
|
||||||
|
"currentPhase": "GREEN",
|
||||||
|
"nextAction": "implement_code"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 6. Implement Code
|
||||||
|
|
||||||
|
AI Agent creates `src/commands/start.ts`:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
export class StartCommand {
|
||||||
|
async execute(options: { taskId: string }) {
|
||||||
|
// Implementation that makes test pass
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
branchName: `task-${options.taskId}`
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 7. Run Tests (Should Pass)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ npm test
|
||||||
|
# Output: 1 test passed
|
||||||
|
```
|
||||||
|
|
||||||
|
Parse output:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"total": 1,
|
||||||
|
"passed": 1,
|
||||||
|
"failed": 0,
|
||||||
|
"skipped": 0
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 8. Complete GREEN Phase
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot complete --results '{"total":1,"passed":1,"failed":0,"skipped":0}' --json
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"previousPhase": "GREEN",
|
||||||
|
"currentPhase": "COMMIT",
|
||||||
|
"nextAction": "commit_changes"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 9. Commit Changes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot commit --json
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"commit": {
|
||||||
|
"hash": "abc123",
|
||||||
|
"message": "feat(autopilot): implement start command (Task 7.1)\n\n..."
|
||||||
|
},
|
||||||
|
"subtaskCompleted": "1",
|
||||||
|
"currentSubtask": {
|
||||||
|
"id": "2",
|
||||||
|
"title": "Implement resume command"
|
||||||
|
},
|
||||||
|
"nextAction": "generate_test"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### MCP Integration Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// AI Agent using MCP tools
|
||||||
|
|
||||||
|
async function implementTask(taskId: string) {
|
||||||
|
// Start workflow
|
||||||
|
const start = await mcp.call('autopilot_start', {
|
||||||
|
taskId,
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Started task ${start.taskId} on branch ${start.branchName}`);
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
// Get next action
|
||||||
|
const next = await mcp.call('autopilot_next', {
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Next action: ${next.action}`);
|
||||||
|
|
||||||
|
if (next.action === 'generate_test') {
|
||||||
|
// AI generates test
|
||||||
|
const testCode = await generateTest(next.currentSubtask);
|
||||||
|
await writeFile(testCode);
|
||||||
|
|
||||||
|
// Run tests
|
||||||
|
const results = await runTests();
|
||||||
|
|
||||||
|
// Complete RED phase
|
||||||
|
await mcp.call('autopilot_complete_phase', {
|
||||||
|
projectRoot: '/path/to/project',
|
||||||
|
testResults: results
|
||||||
|
});
|
||||||
|
|
||||||
|
} else if (next.action === 'implement_code') {
|
||||||
|
// AI generates implementation
|
||||||
|
const implCode = await generateImplementation(next.currentSubtask);
|
||||||
|
await writeFile(implCode);
|
||||||
|
|
||||||
|
// Run tests
|
||||||
|
const results = await runTests();
|
||||||
|
|
||||||
|
// Complete GREEN phase
|
||||||
|
await mcp.call('autopilot_complete_phase', {
|
||||||
|
projectRoot: '/path/to/project',
|
||||||
|
testResults: results
|
||||||
|
});
|
||||||
|
|
||||||
|
} else if (next.action === 'commit_changes') {
|
||||||
|
// Commit
|
||||||
|
const commit = await mcp.call('autopilot_commit', {
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Committed: ${commit.commit.hash}`);
|
||||||
|
|
||||||
|
if (commit.isComplete) {
|
||||||
|
console.log('Task complete!');
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Common Errors and Solutions
|
||||||
|
|
||||||
|
#### Workflow Already Exists
|
||||||
|
|
||||||
|
**Error:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": "Workflow already in progress",
|
||||||
|
"suggestion": "Use autopilot_resume to continue the existing workflow"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
```bash
|
||||||
|
# Resume existing workflow
|
||||||
|
tm autopilot resume
|
||||||
|
|
||||||
|
# OR force start new workflow
|
||||||
|
tm autopilot start 7 --force
|
||||||
|
```
|
||||||
|
|
||||||
|
#### RED Phase Validation Failed
|
||||||
|
|
||||||
|
**Error:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": "RED phase validation failed",
|
||||||
|
"reason": "At least one test must be failing in RED phase",
|
||||||
|
"actual": { "passed": 10, "failed": 0 },
|
||||||
|
"suggestion": "Ensure you have written a failing test before proceeding"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution:** The test isn't actually testing the new feature. Write a test that validates the new behavior that doesn't exist yet.
|
||||||
|
|
||||||
|
#### GREEN Phase Validation Failed
|
||||||
|
|
||||||
|
**Error:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": "GREEN phase validation failed",
|
||||||
|
"reason": "All tests must pass in GREEN phase",
|
||||||
|
"actual": { "passed": 9, "failed": 1 },
|
||||||
|
"suggestion": "Fix the implementation to make all tests pass"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution:** Implementation isn't complete. Debug failing test and fix implementation.
|
||||||
|
|
||||||
|
#### No Staged Changes
|
||||||
|
|
||||||
|
**Error:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": "No staged changes to commit",
|
||||||
|
"suggestion": "Make code changes before committing"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution:** Ensure you've actually created/modified files before committing.
|
||||||
|
|
||||||
|
#### Git Working Tree Not Clean
|
||||||
|
|
||||||
|
**Error:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": "Git validation failed: working tree not clean",
|
||||||
|
"suggestion": "Commit or stash changes before starting workflow"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
```bash
|
||||||
|
git status
|
||||||
|
git add . && git commit -m "chore: save work"
|
||||||
|
# Then start workflow
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error Recovery Patterns
|
||||||
|
|
||||||
|
#### Retry Pattern
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
async function withRetry<T>(
|
||||||
|
fn: () => Promise<T>,
|
||||||
|
maxAttempts: number = 3
|
||||||
|
): Promise<T> {
|
||||||
|
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
||||||
|
try {
|
||||||
|
return await fn();
|
||||||
|
} catch (error) {
|
||||||
|
if (attempt === maxAttempts) throw error;
|
||||||
|
console.log(`Attempt ${attempt} failed, retrying...`);
|
||||||
|
await sleep(1000 * attempt); // Exponential backoff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new Error('Should not reach here');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage
|
||||||
|
const results = await withRetry(async () => {
|
||||||
|
const output = await runTests();
|
||||||
|
return parseTestResults(output);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Graceful Degradation
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
async function completePhase(projectRoot: string, results: TestResults) {
|
||||||
|
try {
|
||||||
|
return await mcp.call('autopilot_complete_phase', {
|
||||||
|
projectRoot,
|
||||||
|
testResults: results
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Phase completion failed:', error);
|
||||||
|
|
||||||
|
// Log error for debugging
|
||||||
|
await logError(error);
|
||||||
|
|
||||||
|
// Attempt manual recovery
|
||||||
|
console.log('Attempting manual state recovery...');
|
||||||
|
const status = await mcp.call('autopilot_status', { projectRoot });
|
||||||
|
console.log('Current state:', status);
|
||||||
|
|
||||||
|
// Provide user guidance
|
||||||
|
console.log('Manual intervention required:');
|
||||||
|
console.log('1. Check test results are correct');
|
||||||
|
console.log('2. Verify current phase allows transition');
|
||||||
|
console.log('3. Run: tm autopilot status');
|
||||||
|
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Workflow State Issues
|
||||||
|
|
||||||
|
**Problem:** State file corrupted or inconsistent
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
```bash
|
||||||
|
# Check state file
|
||||||
|
cat .taskmaster/workflow-state.json
|
||||||
|
|
||||||
|
# If corrupted, abort and restart
|
||||||
|
tm autopilot abort --force
|
||||||
|
tm autopilot start 7
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test Results Parsing
|
||||||
|
|
||||||
|
**Problem:** Test output format not recognized
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
Ensure test results JSON has required fields:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"total": 10, // Required
|
||||||
|
"passed": 8, // Required
|
||||||
|
"failed": 2, // Required
|
||||||
|
"skipped": 0 // Optional
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Branch Conflicts
|
||||||
|
|
||||||
|
**Problem:** Workflow branch already exists
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
```bash
|
||||||
|
# Check branches
|
||||||
|
git branch
|
||||||
|
|
||||||
|
# Delete old workflow branch if safe
|
||||||
|
git branch -D task-7
|
||||||
|
|
||||||
|
# Start workflow again
|
||||||
|
tm autopilot start 7
|
||||||
|
```
|
||||||
|
|
||||||
|
### Permission Issues
|
||||||
|
|
||||||
|
**Problem:** Cannot write to .taskmaster directory
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
```bash
|
||||||
|
# Check directory permissions
|
||||||
|
ls -la .taskmaster/
|
||||||
|
|
||||||
|
# Fix permissions
|
||||||
|
chmod -R u+w .taskmaster/
|
||||||
|
```
|
||||||
|
|
||||||
|
### State Persistence Failures
|
||||||
|
|
||||||
|
**Problem:** State not saving between commands
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
```bash
|
||||||
|
# Check file system permissions
|
||||||
|
ls -la .taskmaster/workflow-state.json
|
||||||
|
|
||||||
|
# Verify state is being written
|
||||||
|
tm autopilot status --json | jq .
|
||||||
|
|
||||||
|
# If all else fails, reinstall
|
||||||
|
rm -rf .taskmaster/
|
||||||
|
tm init
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- [Command Reference](./command-reference.mdx) - Complete CLI command documentation
|
||||||
|
- [MCP Provider Guide](./mcp-provider-guide.mdx) - MCP integration details
|
||||||
|
- [Task Structure](./task-structure.mdx) - Understanding TaskMaster's task system
|
||||||
|
- [Configuration](./configuration.mdx) - Project configuration options
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
For issues, questions, or contributions:
|
||||||
|
- GitHub Issues: https://github.com/eyaltoledano/claude-task-master/issues
|
||||||
|
- Documentation: https://docs.task-master.dev
|
||||||
315
apps/docs/tdd-workflow/quickstart.mdx
Normal file
315
apps/docs/tdd-workflow/quickstart.mdx
Normal file
@@ -0,0 +1,315 @@
|
|||||||
|
---
|
||||||
|
title: "TDD Workflow Quick Start"
|
||||||
|
description: "Get started with TaskMaster's autonomous TDD workflow in 5 minutes"
|
||||||
|
---
|
||||||
|
|
||||||
|
Get started with TaskMaster's autonomous TDD workflow in 5 minutes.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- TaskMaster initialized project (`tm init`)
|
||||||
|
- Tasks with subtasks created (`tm parse-prd` or `tm expand`)
|
||||||
|
- Git repository with clean working tree
|
||||||
|
- Test framework installed (vitest, jest, mocha, etc.)
|
||||||
|
|
||||||
|
## 1. Start a Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tm autopilot start <taskId>
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```bash
|
||||||
|
$ tm autopilot start 7
|
||||||
|
|
||||||
|
✓ Workflow started for task 7
|
||||||
|
✓ Created branch: task-7
|
||||||
|
✓ Current phase: RED
|
||||||
|
✓ Subtask 1/5: Implement start command
|
||||||
|
→ Next action: Write a failing test
|
||||||
|
```
|
||||||
|
|
||||||
|
## 2. The TDD Cycle
|
||||||
|
|
||||||
|
### RED Phase: Write Failing Test
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check what to do next
|
||||||
|
$ tm autopilot next --json
|
||||||
|
{
|
||||||
|
"action": "generate_test",
|
||||||
|
"currentSubtask": {
|
||||||
|
"id": "1",
|
||||||
|
"title": "Implement start command"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Write a test that fails:
|
||||||
|
```typescript
|
||||||
|
// tests/start.test.ts
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { StartCommand } from '../src/commands/start';
|
||||||
|
|
||||||
|
describe('StartCommand', () => {
|
||||||
|
it('should initialize workflow', async () => {
|
||||||
|
const command = new StartCommand();
|
||||||
|
const result = await command.execute({ taskId: '7' });
|
||||||
|
expect(result.success).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
Run tests:
|
||||||
|
```bash
|
||||||
|
$ npm test
|
||||||
|
# ✗ 1 test failed
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete RED phase:
|
||||||
|
```bash
|
||||||
|
$ tm autopilot complete --results '{"total":1,"passed":0,"failed":1,"skipped":0}'
|
||||||
|
|
||||||
|
✓ RED phase complete
|
||||||
|
✓ Current phase: GREEN
|
||||||
|
→ Next action: Implement code to pass tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### GREEN Phase: Implement Feature
|
||||||
|
|
||||||
|
Write minimal code to pass:
|
||||||
|
```typescript
|
||||||
|
// src/commands/start.ts
|
||||||
|
export class StartCommand {
|
||||||
|
async execute(options: { taskId: string }) {
|
||||||
|
return { success: true };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Run tests:
|
||||||
|
```bash
|
||||||
|
$ npm test
|
||||||
|
# ✓ 1 test passed
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete GREEN phase:
|
||||||
|
```bash
|
||||||
|
$ tm autopilot complete --results '{"total":1,"passed":1,"failed":0,"skipped":0}'
|
||||||
|
|
||||||
|
✓ GREEN phase complete
|
||||||
|
✓ Current phase: COMMIT
|
||||||
|
→ Next action: Commit changes
|
||||||
|
```
|
||||||
|
|
||||||
|
### COMMIT Phase: Save Progress
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot commit
|
||||||
|
|
||||||
|
✓ Created commit: abc123
|
||||||
|
✓ Message: feat(autopilot): implement start command (Task 7.1)
|
||||||
|
✓ Advanced to subtask 2/5
|
||||||
|
✓ Current phase: RED
|
||||||
|
→ Next action: Write a failing test
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3. Continue for All Subtasks
|
||||||
|
|
||||||
|
Repeat the RED-GREEN-COMMIT cycle for each subtask until complete.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check progress anytime
|
||||||
|
$ tm autopilot status --json
|
||||||
|
{
|
||||||
|
"taskId": "7",
|
||||||
|
"progress": {
|
||||||
|
"completed": 1,
|
||||||
|
"total": 5,
|
||||||
|
"percentage": 20
|
||||||
|
},
|
||||||
|
"currentSubtask": {
|
||||||
|
"id": "2",
|
||||||
|
"title": "Implement resume command"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 4. Complete the Workflow
|
||||||
|
|
||||||
|
When all subtasks are done:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot status --json
|
||||||
|
{
|
||||||
|
"phase": "COMPLETE",
|
||||||
|
"progress": {
|
||||||
|
"completed": 5,
|
||||||
|
"total": 5,
|
||||||
|
"percentage": 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Your branch `task-7` is ready for review/merge!
|
||||||
|
|
||||||
|
## Common Patterns
|
||||||
|
|
||||||
|
### Parse Test Output
|
||||||
|
|
||||||
|
Your test runner outputs human-readable format - convert to JSON:
|
||||||
|
|
||||||
|
**Vitest:**
|
||||||
|
```
|
||||||
|
Tests 2 failed | 8 passed | 10 total
|
||||||
|
```
|
||||||
|
→ `{"total":10,"passed":8,"failed":2,"skipped":0}`
|
||||||
|
|
||||||
|
**Jest:**
|
||||||
|
```
|
||||||
|
Tests: 2 failed, 8 passed, 10 total
|
||||||
|
```
|
||||||
|
→ `{"total":10,"passed":8,"failed":2,"skipped":0}`
|
||||||
|
|
||||||
|
### Handle Errors
|
||||||
|
|
||||||
|
**Problem:** RED phase won't complete - "no test failures"
|
||||||
|
|
||||||
|
**Solution:** Your test isn't testing new behavior. Make sure it fails:
|
||||||
|
```typescript
|
||||||
|
// Bad - test passes immediately
|
||||||
|
it('should exist', () => {
|
||||||
|
expect(StartCommand).toBeDefined(); // Always passes
|
||||||
|
});
|
||||||
|
|
||||||
|
// Good - test fails until feature exists
|
||||||
|
it('should initialize workflow', async () => {
|
||||||
|
const result = await new StartCommand().execute({ taskId: '7' });
|
||||||
|
expect(result.success).toBe(true); // Fails until execute() is implemented
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem:** GREEN phase won't complete - "tests still failing"
|
||||||
|
|
||||||
|
**Solution:** Fix your implementation until all tests pass:
|
||||||
|
```bash
|
||||||
|
# Run tests to see what's failing
|
||||||
|
$ npm test
|
||||||
|
|
||||||
|
# Fix the issue
|
||||||
|
$ vim src/commands/start.ts
|
||||||
|
|
||||||
|
# Verify tests pass
|
||||||
|
$ npm test
|
||||||
|
|
||||||
|
# Try again
|
||||||
|
$ tm autopilot complete --results '{"total":1,"passed":1,"failed":0,"skipped":0}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Resume Interrupted Work
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# If you interrupted the workflow
|
||||||
|
$ tm autopilot resume
|
||||||
|
|
||||||
|
✓ Workflow resumed
|
||||||
|
✓ Task 7 - subtask 3/5
|
||||||
|
✓ Current phase: GREEN
|
||||||
|
→ Continue from where you left off
|
||||||
|
```
|
||||||
|
|
||||||
|
## JSON Output Mode
|
||||||
|
|
||||||
|
All commands support `--json` for programmatic use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tm autopilot start 7 --json | jq .
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"taskId": "7",
|
||||||
|
"branchName": "task-7",
|
||||||
|
"phase": "SUBTASK_LOOP",
|
||||||
|
"tddPhase": "RED",
|
||||||
|
"progress": { ... },
|
||||||
|
"currentSubtask": { ... },
|
||||||
|
"nextAction": "generate_test"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Perfect for:
|
||||||
|
- CI/CD integration
|
||||||
|
- Custom tooling
|
||||||
|
- Automated workflows
|
||||||
|
- Progress monitoring
|
||||||
|
|
||||||
|
## MCP Integration
|
||||||
|
|
||||||
|
For AI agents (Claude Code, etc.), use MCP tools:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Start workflow
|
||||||
|
await mcp.call('autopilot_start', {
|
||||||
|
taskId: '7',
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get next action
|
||||||
|
const next = await mcp.call('autopilot_next', {
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Complete phase
|
||||||
|
await mcp.call('autopilot_complete_phase', {
|
||||||
|
projectRoot: '/path/to/project',
|
||||||
|
testResults: { total: 1, passed: 0, failed: 1, skipped: 0 }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Commit
|
||||||
|
await mcp.call('autopilot_commit', {
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
See [AI Agent Integration Guide](./ai-agent-integration.mdx) for details.
|
||||||
|
|
||||||
|
## Cheat Sheet
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start
|
||||||
|
tm autopilot start <taskId> # Initialize workflow
|
||||||
|
|
||||||
|
# Workflow Control
|
||||||
|
tm autopilot next # What's next?
|
||||||
|
tm autopilot status # Current state
|
||||||
|
tm autopilot resume # Continue interrupted work
|
||||||
|
tm autopilot abort # Cancel and cleanup
|
||||||
|
|
||||||
|
# TDD Cycle
|
||||||
|
tm autopilot complete --results '{...}' # Advance phase
|
||||||
|
tm autopilot commit # Save progress
|
||||||
|
|
||||||
|
# Options
|
||||||
|
--json # Machine-readable output
|
||||||
|
--project-root <path> # Specify project location
|
||||||
|
--force # Override safety checks
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
- Read [AI Agent Integration Guide](./ai-agent-integration.mdx) for complete documentation
|
||||||
|
- See [Example Prompts](./templates/example-prompts.mdx) for AI agent patterns
|
||||||
|
- Check [Command Reference](./command-reference.mdx) for all options
|
||||||
|
- Review [CLAUDE.md Template](./templates/CLAUDE.md.template) for AI integration
|
||||||
|
|
||||||
|
## Tips
|
||||||
|
|
||||||
|
1. **Always let tests fail first** - That's the RED phase
|
||||||
|
2. **Write minimal code** - Just enough to pass
|
||||||
|
3. **Commit frequently** - After each subtask
|
||||||
|
4. **Use --json** - Better for programmatic use
|
||||||
|
5. **Check status often** - Know where you are
|
||||||
|
6. **Trust the workflow** - It enforces TDD rules
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Ready to start?** Run `tm autopilot start <taskId>` and begin your TDD journey!
|
||||||
388
apps/docs/tdd-workflow/templates/claude-template.mdx
Normal file
388
apps/docs/tdd-workflow/templates/claude-template.mdx
Normal file
@@ -0,0 +1,388 @@
|
|||||||
|
---
|
||||||
|
title: "CLAUDE.md Template"
|
||||||
|
description: "Ready-to-use CLAUDE.md template for AI agent integration with TDD workflow"
|
||||||
|
---
|
||||||
|
|
||||||
|
This file provides integration instructions for AI agents (like Claude Code) to work with TaskMaster's autonomous TDD workflow system.
|
||||||
|
|
||||||
|
## Quick Reference
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start workflow
|
||||||
|
tm autopilot start <taskId>
|
||||||
|
|
||||||
|
# Get next action
|
||||||
|
tm autopilot next --json
|
||||||
|
|
||||||
|
# Complete phase with test results
|
||||||
|
tm autopilot complete --results '{"total":N,"passed":N,"failed":N,"skipped":N}'
|
||||||
|
|
||||||
|
# Commit changes
|
||||||
|
tm autopilot commit
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
tm autopilot status --json
|
||||||
|
|
||||||
|
# Abort workflow
|
||||||
|
tm autopilot abort
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration Pattern
|
||||||
|
|
||||||
|
### 1. Start Task
|
||||||
|
|
||||||
|
Before implementing a task:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tm autopilot start {TASK_ID}
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates a workflow branch and initializes the TDD state machine.
|
||||||
|
|
||||||
|
### 2. Follow TDD Cycle
|
||||||
|
|
||||||
|
For each subtask, repeat this cycle:
|
||||||
|
|
||||||
|
#### RED Phase - Write Failing Test
|
||||||
|
|
||||||
|
1. Check next action:
|
||||||
|
```bash
|
||||||
|
tm autopilot next --json
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Write a test that **fails** because the feature doesn't exist yet
|
||||||
|
|
||||||
|
3. Run tests and report results:
|
||||||
|
```bash
|
||||||
|
npm test # or appropriate test command
|
||||||
|
tm autopilot complete --results '{TEST_RESULTS_JSON}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important:** RED phase MUST have at least one failing test.
|
||||||
|
|
||||||
|
#### GREEN Phase - Implement Feature
|
||||||
|
|
||||||
|
1. Check next action confirms GREEN phase
|
||||||
|
|
||||||
|
2. Write minimal implementation to make tests pass
|
||||||
|
|
||||||
|
3. Run tests and report results:
|
||||||
|
```bash
|
||||||
|
npm test
|
||||||
|
tm autopilot complete --results '{TEST_RESULTS_JSON}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important:** GREEN phase MUST have all tests passing (failed === 0).
|
||||||
|
|
||||||
|
#### COMMIT Phase - Save Progress
|
||||||
|
|
||||||
|
1. Review changes:
|
||||||
|
```bash
|
||||||
|
git status
|
||||||
|
git diff
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Commit (auto-generates message with metadata):
|
||||||
|
```bash
|
||||||
|
tm autopilot commit
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Workflow automatically advances to next subtask
|
||||||
|
|
||||||
|
### 3. Monitor Progress
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check overall progress
|
||||||
|
tm autopilot status --json
|
||||||
|
|
||||||
|
# See what's next
|
||||||
|
tm autopilot next --json
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Handle Completion
|
||||||
|
|
||||||
|
When all subtasks are done:
|
||||||
|
- Workflow enters COMPLETE phase
|
||||||
|
- Branch remains for review/merge
|
||||||
|
- State can be cleaned up
|
||||||
|
|
||||||
|
## Example Session
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start task with 3 subtasks
|
||||||
|
$ tm autopilot start 7
|
||||||
|
✓ Workflow started for task 7
|
||||||
|
✓ Created branch: task-7
|
||||||
|
✓ Phase: RED
|
||||||
|
✓ Next: generate_test for subtask 7.1
|
||||||
|
|
||||||
|
# Write failing test for subtask 7.1
|
||||||
|
$ cat > tests/feature.test.ts
|
||||||
|
# ... write test ...
|
||||||
|
|
||||||
|
$ npm test
|
||||||
|
# 1 test, 0 passed, 1 failed
|
||||||
|
|
||||||
|
$ tm autopilot complete --results '{"total":1,"passed":0,"failed":1,"skipped":0}'
|
||||||
|
✓ RED phase complete
|
||||||
|
✓ Phase: GREEN
|
||||||
|
✓ Next: implement_code
|
||||||
|
|
||||||
|
# Write implementation
|
||||||
|
$ cat > src/feature.ts
|
||||||
|
# ... write code ...
|
||||||
|
|
||||||
|
$ npm test
|
||||||
|
# 1 test, 1 passed, 0 failed
|
||||||
|
|
||||||
|
$ tm autopilot complete --results '{"total":1,"passed":1,"failed":0,"skipped":0}'
|
||||||
|
✓ GREEN phase complete
|
||||||
|
✓ Phase: COMMIT
|
||||||
|
✓ Next: commit_changes
|
||||||
|
|
||||||
|
$ tm autopilot commit
|
||||||
|
✓ Created commit: abc123
|
||||||
|
✓ Message: feat(feature): implement feature (Task 7.1)
|
||||||
|
✓ Advanced to subtask 7.2
|
||||||
|
✓ Phase: RED
|
||||||
|
✓ Next: generate_test
|
||||||
|
|
||||||
|
# Repeat for subtasks 7.2 and 7.3...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Result Format
|
||||||
|
|
||||||
|
Always provide test results in this JSON format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"total": 10, // Total number of tests
|
||||||
|
"passed": 8, // Number of passing tests
|
||||||
|
"failed": 2, // Number of failing tests
|
||||||
|
"skipped": 0 // Number of skipped tests (optional)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Parsing Test Output
|
||||||
|
|
||||||
|
Common test frameworks output that needs parsing:
|
||||||
|
|
||||||
|
**Vitest:**
|
||||||
|
```
|
||||||
|
Test Files 1 passed (1)
|
||||||
|
Tests 10 passed | 2 failed (12)
|
||||||
|
```
|
||||||
|
→ `{"total":12,"passed":10,"failed":2,"skipped":0}`
|
||||||
|
|
||||||
|
**Jest:**
|
||||||
|
```
|
||||||
|
Tests: 2 failed, 10 passed, 12 total
|
||||||
|
```
|
||||||
|
→ `{"total":12,"passed":10,"failed":2,"skipped":0}`
|
||||||
|
|
||||||
|
**Mocha:**
|
||||||
|
```
|
||||||
|
12 passing
|
||||||
|
2 failing
|
||||||
|
```
|
||||||
|
→ `{"total":14,"passed":12,"failed":2,"skipped":0}`
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
**1. RED Phase Won't Complete**
|
||||||
|
- Error: "RED phase validation failed: no test failures"
|
||||||
|
- Solution: Your test isn't actually testing new behavior. Write a test that fails.
|
||||||
|
|
||||||
|
**2. GREEN Phase Won't Complete**
|
||||||
|
- Error: "GREEN phase validation failed: tests still failing"
|
||||||
|
- Solution: Implementation incomplete. Debug and fix failing tests.
|
||||||
|
|
||||||
|
**3. Workflow Already Exists**
|
||||||
|
- Error: "Workflow already in progress"
|
||||||
|
- Solution: Run `tm autopilot resume` or `tm autopilot abort --force` then restart
|
||||||
|
|
||||||
|
**4. No Staged Changes**
|
||||||
|
- Error: "No staged changes to commit"
|
||||||
|
- Solution: Ensure you've actually created/modified files
|
||||||
|
|
||||||
|
### Recovery
|
||||||
|
|
||||||
|
If workflow gets stuck:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check current state
|
||||||
|
tm autopilot status --json
|
||||||
|
|
||||||
|
# If corrupted, abort and restart
|
||||||
|
tm autopilot abort --force
|
||||||
|
tm autopilot start {TASK_ID}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### 1. One Feature Per Test Cycle
|
||||||
|
|
||||||
|
Each RED-GREEN-COMMIT cycle should implement exactly one small feature or behavior.
|
||||||
|
|
||||||
|
**Good:**
|
||||||
|
- RED: Test that `getUser()` returns user object
|
||||||
|
- GREEN: Implement `getUser()` to return user
|
||||||
|
- COMMIT: One commit for getUser feature
|
||||||
|
|
||||||
|
**Bad:**
|
||||||
|
- RED: Test multiple features at once
|
||||||
|
- GREEN: Implement entire module
|
||||||
|
- COMMIT: Massive commit with unrelated changes
|
||||||
|
|
||||||
|
### 2. Meaningful Test Names
|
||||||
|
|
||||||
|
Tests should clearly describe what they're validating:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Good
|
||||||
|
it('should return 404 when user not found', async () => {
|
||||||
|
const result = await getUser('nonexistent');
|
||||||
|
expect(result.status).toBe(404);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Bad
|
||||||
|
it('test 1', () => {
|
||||||
|
// what does this test?
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Minimal Implementation
|
||||||
|
|
||||||
|
In GREEN phase, write just enough code to pass the test:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Good - minimal implementation
|
||||||
|
function getUser(id: string) {
|
||||||
|
if (id === 'nonexistent') {
|
||||||
|
return { status: 404 };
|
||||||
|
}
|
||||||
|
return { status: 200, data: users[id] };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bad - over-engineering
|
||||||
|
function getUser(id: string) {
|
||||||
|
// Adds caching, validation, logging, etc. that isn't tested
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Keep Tests Fast
|
||||||
|
|
||||||
|
Fast tests mean fast feedback:
|
||||||
|
- Avoid network calls (use mocks)
|
||||||
|
- Avoid file system operations (use in-memory)
|
||||||
|
- Avoid waiting/sleeping
|
||||||
|
|
||||||
|
### 5. Commit Message Quality
|
||||||
|
|
||||||
|
Let TaskMaster generate commit messages - they include:
|
||||||
|
- Conventional commit format (feat, fix, refactor, etc.)
|
||||||
|
- Subtask context and ID
|
||||||
|
- Workflow metadata
|
||||||
|
- Co-authorship attribution
|
||||||
|
|
||||||
|
## MCP Integration (Advanced)
|
||||||
|
|
||||||
|
For programmatic integration, use MCP tools instead of CLI:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { MCPClient } from '@modelcontextprotocol/sdk';
|
||||||
|
|
||||||
|
const client = new MCPClient();
|
||||||
|
|
||||||
|
// Start workflow
|
||||||
|
const start = await client.call('autopilot_start', {
|
||||||
|
taskId: '7',
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get next action
|
||||||
|
const next = await client.call('autopilot_next', {
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Complete phase
|
||||||
|
const complete = await client.call('autopilot_complete_phase', {
|
||||||
|
projectRoot: '/path/to/project',
|
||||||
|
testResults: { total: 1, passed: 0, failed: 1, skipped: 0 }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Commit
|
||||||
|
const commit = await client.call('autopilot_commit', {
|
||||||
|
projectRoot: '/path/to/project'
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
See [AI Agent Integration Guide](../ai-agent-integration.mdx) for complete MCP documentation.
|
||||||
|
|
||||||
|
## Workflow State Files
|
||||||
|
|
||||||
|
TaskMaster persists workflow state to `.taskmaster/workflow-state.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"phase": "SUBTASK_LOOP",
|
||||||
|
"context": {
|
||||||
|
"taskId": "7",
|
||||||
|
"subtasks": [...],
|
||||||
|
"currentSubtaskIndex": 0,
|
||||||
|
"currentTDDPhase": "RED",
|
||||||
|
"branchName": "task-7",
|
||||||
|
"errors": [],
|
||||||
|
"metadata": {
|
||||||
|
"startedAt": "2025-01-10T..."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important:** Never manually edit this file. Use CLI/MCP tools only.
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
project/
|
||||||
|
├── .taskmaster/
|
||||||
|
│ ├── workflow-state.json # Current workflow state
|
||||||
|
│ ├── tasks/
|
||||||
|
│ │ └── tasks.json # Task definitions
|
||||||
|
│ └── docs/
|
||||||
|
│ └── prd.txt # Product requirements
|
||||||
|
├── src/ # Implementation files
|
||||||
|
├── tests/ # Test files
|
||||||
|
└── package.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- [AI Agent Integration Guide](../ai-agent-integration.mdx) - Complete integration documentation
|
||||||
|
- [Command Reference](../command-reference.mdx) - All CLI commands
|
||||||
|
- [Task Structure](../task-structure.mdx) - Understanding tasks and subtasks
|
||||||
|
- [MCP Provider Guide](../mcp-provider-guide.mdx) - MCP integration details
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
**Q: Workflow won't start**
|
||||||
|
A: Check that task has subtasks (`tm show <taskId>`) and git working tree is clean
|
||||||
|
|
||||||
|
**Q: Can't complete RED phase**
|
||||||
|
A: Verify at least one test is actually failing (not skipped, not passing)
|
||||||
|
|
||||||
|
**Q: Can't complete GREEN phase**
|
||||||
|
A: Verify ALL tests pass (zero failures)
|
||||||
|
|
||||||
|
**Q: Commit fails**
|
||||||
|
A: Check that you've made changes and they're staged (or stageable)
|
||||||
|
|
||||||
|
**Q: State seems wrong**
|
||||||
|
A: Check `.taskmaster/workflow-state.json` or run `tm autopilot status`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**For detailed documentation, see:** [AI Agent Integration Guide](../ai-agent-integration.mdx)
|
||||||
478
apps/docs/tdd-workflow/templates/example-prompts.mdx
Normal file
478
apps/docs/tdd-workflow/templates/example-prompts.mdx
Normal file
@@ -0,0 +1,478 @@
|
|||||||
|
---
|
||||||
|
title: "Example Prompts"
|
||||||
|
description: "Collection of effective prompts for AI agents working with TaskMaster's TDD workflow system"
|
||||||
|
---
|
||||||
|
|
||||||
|
Collection of effective prompts for AI agents working with TaskMaster's TDD workflow system.
|
||||||
|
|
||||||
|
## Getting Started Prompts
|
||||||
|
|
||||||
|
### Start a Task
|
||||||
|
|
||||||
|
```
|
||||||
|
I want to implement Task 7 using TDD workflow. Please:
|
||||||
|
1. Start the autopilot workflow
|
||||||
|
2. Show me the first subtask to implement
|
||||||
|
3. Begin the RED-GREEN-COMMIT cycle
|
||||||
|
```
|
||||||
|
|
||||||
|
### Resume Work
|
||||||
|
|
||||||
|
```
|
||||||
|
I have an in-progress workflow. Please:
|
||||||
|
1. Resume the autopilot workflow
|
||||||
|
2. Show current status and progress
|
||||||
|
3. Continue from where we left off
|
||||||
|
```
|
||||||
|
|
||||||
|
### Understanding Current State
|
||||||
|
|
||||||
|
```
|
||||||
|
What's the current state of the workflow? Please show:
|
||||||
|
- Which subtask we're on
|
||||||
|
- Current TDD phase (RED/GREEN/COMMIT)
|
||||||
|
- Progress percentage
|
||||||
|
- Next action required
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Generation Prompts
|
||||||
|
|
||||||
|
### Basic Test Generation
|
||||||
|
|
||||||
|
```
|
||||||
|
We're in RED phase for subtask "{SUBTASK_TITLE}". Please:
|
||||||
|
1. Read the subtask requirements
|
||||||
|
2. Write a comprehensive test that validates the behavior
|
||||||
|
3. The test MUST fail because the feature doesn't exist yet
|
||||||
|
4. Use the project's testing framework (vitest/jest/etc)
|
||||||
|
5. Follow the project's test file conventions
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test for Specific Feature
|
||||||
|
|
||||||
|
```
|
||||||
|
For subtask: "Implement user authentication endpoint"
|
||||||
|
|
||||||
|
Write a failing test that:
|
||||||
|
1. Tests POST /api/auth/login
|
||||||
|
2. Validates request body (email, password)
|
||||||
|
3. Checks response format and status codes
|
||||||
|
4. Uses proper mocking for database calls
|
||||||
|
5. Follows security best practices
|
||||||
|
```
|
||||||
|
|
||||||
|
### Edge Case Testing
|
||||||
|
|
||||||
|
```
|
||||||
|
The basic happy path test is passing. Now write additional tests for:
|
||||||
|
1. Error cases (invalid input, missing fields)
|
||||||
|
2. Edge cases (empty strings, null values, etc.)
|
||||||
|
3. Security concerns (SQL injection, XSS)
|
||||||
|
4. Performance expectations (timeout, rate limits)
|
||||||
|
|
||||||
|
Each test should initially fail.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test Refactoring
|
||||||
|
|
||||||
|
```
|
||||||
|
Our tests are passing but could be improved. Please:
|
||||||
|
1. Review existing tests for duplication
|
||||||
|
2. Extract common setup into beforeEach/fixtures
|
||||||
|
3. Improve test descriptions for clarity
|
||||||
|
4. Add missing edge cases
|
||||||
|
5. Ensure all new tests fail first (RED phase)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Prompts
|
||||||
|
|
||||||
|
### Basic Implementation
|
||||||
|
|
||||||
|
```
|
||||||
|
We're in GREEN phase. The test is failing with: {ERROR_MESSAGE}
|
||||||
|
|
||||||
|
Please:
|
||||||
|
1. Implement the minimal code to make this test pass
|
||||||
|
2. Don't over-engineer or add features not tested
|
||||||
|
3. Follow the project's code style and patterns
|
||||||
|
4. Ensure the implementation is clean and readable
|
||||||
|
```
|
||||||
|
|
||||||
|
### Implementation with Constraints
|
||||||
|
|
||||||
|
```
|
||||||
|
Implement the feature to pass the test, but:
|
||||||
|
- Use TypeScript with strict type checking
|
||||||
|
- Follow SOLID principles
|
||||||
|
- Keep functions under 20 lines
|
||||||
|
- Use dependency injection where appropriate
|
||||||
|
- Add JSDoc comments for public APIs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fix Failing Tests
|
||||||
|
|
||||||
|
```
|
||||||
|
GREEN phase validation failed - {N} tests still failing.
|
||||||
|
|
||||||
|
Please:
|
||||||
|
1. Review the failing test output
|
||||||
|
2. Identify what's not working
|
||||||
|
3. Fix the implementation to pass all tests
|
||||||
|
4. Don't modify tests to make them pass
|
||||||
|
5. Explain what was wrong
|
||||||
|
```
|
||||||
|
|
||||||
|
### Refactor Implementation
|
||||||
|
|
||||||
|
```
|
||||||
|
Tests are passing but code quality needs improvement:
|
||||||
|
1. Extract repeated logic into functions
|
||||||
|
2. Improve variable names
|
||||||
|
3. Add error handling
|
||||||
|
4. Optimize performance if needed
|
||||||
|
5. Ensure tests still pass after refactoring
|
||||||
|
```
|
||||||
|
|
||||||
|
## Debugging Prompts
|
||||||
|
|
||||||
|
### Test Output Parsing
|
||||||
|
|
||||||
|
```
|
||||||
|
Here's the test output:
|
||||||
|
{PASTE_TEST_OUTPUT}
|
||||||
|
|
||||||
|
Please parse this into the required JSON format:
|
||||||
|
{
|
||||||
|
"total": N,
|
||||||
|
"passed": N,
|
||||||
|
"failed": N,
|
||||||
|
"skipped": N
|
||||||
|
}
|
||||||
|
|
||||||
|
Then complete the current phase.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Workflow Stuck
|
||||||
|
|
||||||
|
```
|
||||||
|
The workflow seems stuck. Please:
|
||||||
|
1. Check the current workflow status
|
||||||
|
2. Identify the issue
|
||||||
|
3. If corrupted, abort and restart
|
||||||
|
4. Explain what went wrong and how to prevent it
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase Validation Failing
|
||||||
|
|
||||||
|
```
|
||||||
|
I'm getting: "RED phase validation failed: no test failures"
|
||||||
|
|
||||||
|
Please:
|
||||||
|
1. Review the test I just wrote
|
||||||
|
2. Identify why it's not actually testing new behavior
|
||||||
|
3. Rewrite the test to properly fail
|
||||||
|
4. Explain what makes a good failing test
|
||||||
|
```
|
||||||
|
|
||||||
|
### Git Issues
|
||||||
|
|
||||||
|
```
|
||||||
|
Getting git errors when trying to start workflow:
|
||||||
|
{PASTE_ERROR}
|
||||||
|
|
||||||
|
Please:
|
||||||
|
1. Diagnose the git issue
|
||||||
|
2. Provide commands to fix it
|
||||||
|
3. Restart the workflow once fixed
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Patterns
|
||||||
|
|
||||||
|
### Parallel Test Generation
|
||||||
|
|
||||||
|
```
|
||||||
|
We have 3 subtasks to implement. For efficiency:
|
||||||
|
1. Read all 3 subtask descriptions
|
||||||
|
2. Plan the test structure for each
|
||||||
|
3. Identify shared test utilities needed
|
||||||
|
4. Generate tests for subtask 1 (they should fail)
|
||||||
|
5. Once we complete 1, move to 2, then 3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Integration Test Strategy
|
||||||
|
|
||||||
|
```
|
||||||
|
This subtask requires integration testing. Please:
|
||||||
|
1. Set up test database/environment
|
||||||
|
2. Write integration tests that exercise the full stack
|
||||||
|
3. Use proper cleanup in afterEach
|
||||||
|
4. Mock external services (APIs, third-party)
|
||||||
|
5. Ensure tests are deterministic and fast
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test-Driven Refactoring
|
||||||
|
|
||||||
|
```
|
||||||
|
We need to refactor {MODULE_NAME} but keep behavior unchanged:
|
||||||
|
1. First, write comprehensive tests for current behavior
|
||||||
|
2. Ensure all tests pass (document current state)
|
||||||
|
3. Refactor the implementation
|
||||||
|
4. Verify all tests still pass
|
||||||
|
5. Commit the refactoring
|
||||||
|
```
|
||||||
|
|
||||||
|
### Complex Feature Implementation
|
||||||
|
|
||||||
|
```
|
||||||
|
Subtask: "{COMPLEX_SUBTASK}"
|
||||||
|
|
||||||
|
This is complex. Let's break it down:
|
||||||
|
1. Identify the core functionality to test
|
||||||
|
2. Write tests for the simplest version
|
||||||
|
3. Implement minimal working code
|
||||||
|
4. Commit that cycle
|
||||||
|
5. Then iteratively add more tests for additional features
|
||||||
|
6. Each iteration is a RED-GREEN-COMMIT cycle
|
||||||
|
```
|
||||||
|
|
||||||
|
### Performance Testing
|
||||||
|
|
||||||
|
```
|
||||||
|
Write performance tests for {FEATURE}:
|
||||||
|
1. Measure baseline performance (current state)
|
||||||
|
2. Write test that fails if operation takes > {N}ms
|
||||||
|
3. Implement optimizations to pass the test
|
||||||
|
4. Document performance improvements
|
||||||
|
5. Consider edge cases (large inputs, concurrent requests)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Security Testing
|
||||||
|
|
||||||
|
```
|
||||||
|
Write security-focused tests for {FEATURE}:
|
||||||
|
1. Test input validation (injection attacks)
|
||||||
|
2. Test authentication/authorization
|
||||||
|
3. Test data sanitization
|
||||||
|
4. Test rate limiting
|
||||||
|
5. Document security assumptions
|
||||||
|
|
||||||
|
Each test should initially fail and demonstrate the vulnerability.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Workflow Automation Patterns
|
||||||
|
|
||||||
|
### Full Autonomous Mode
|
||||||
|
|
||||||
|
```
|
||||||
|
Implement Task {TASK_ID} completely autonomously:
|
||||||
|
1. Start the workflow
|
||||||
|
2. For each subtask:
|
||||||
|
a. Read requirements
|
||||||
|
b. Write failing tests
|
||||||
|
c. Implement to pass tests
|
||||||
|
d. Commit changes
|
||||||
|
3. Continue until all subtasks complete
|
||||||
|
4. Report final status
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- Never skip the RED phase
|
||||||
|
- Always verify tests fail first
|
||||||
|
- Implement minimal working code
|
||||||
|
- Commit after each subtask
|
||||||
|
- Handle errors gracefully with retries
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supervised Mode
|
||||||
|
|
||||||
|
```
|
||||||
|
Work on Task {TASK_ID} with human oversight:
|
||||||
|
1. Start workflow and show plan
|
||||||
|
2. For each subtask:
|
||||||
|
a. Show test plan, wait for approval
|
||||||
|
b. Write and run tests, show results
|
||||||
|
c. Show implementation plan, wait for approval
|
||||||
|
d. Implement and verify
|
||||||
|
e. Show commit message, wait for approval
|
||||||
|
3. Request feedback between subtasks
|
||||||
|
```
|
||||||
|
|
||||||
|
### Review Mode
|
||||||
|
|
||||||
|
```
|
||||||
|
Review the current workflow state:
|
||||||
|
1. Show all completed subtasks and their commits
|
||||||
|
2. Identify remaining subtasks
|
||||||
|
3. Check test coverage
|
||||||
|
4. Verify git history is clean
|
||||||
|
5. Recommend next steps
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Recovery Patterns
|
||||||
|
|
||||||
|
### Retry with Learning
|
||||||
|
|
||||||
|
```
|
||||||
|
The {PHASE} phase failed {N} times. Please:
|
||||||
|
1. Review all previous attempts
|
||||||
|
2. Identify the pattern of failures
|
||||||
|
3. Propose a different approach
|
||||||
|
4. Explain why this approach should work
|
||||||
|
5. Implement with the new approach
|
||||||
|
```
|
||||||
|
|
||||||
|
### Escalate to Human
|
||||||
|
|
||||||
|
```
|
||||||
|
After {MAX_ATTEMPTS} attempts, unable to complete {SUBTASK}.
|
||||||
|
|
||||||
|
Please:
|
||||||
|
1. Document what was tried
|
||||||
|
2. Explain what's not working
|
||||||
|
3. Provide relevant code and test output
|
||||||
|
4. Suggest where human expertise is needed
|
||||||
|
5. Save current state for manual intervention
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reset and Restart
|
||||||
|
|
||||||
|
```
|
||||||
|
Workflow is in an inconsistent state. Please:
|
||||||
|
1. Save any valuable work
|
||||||
|
2. Abort the current workflow
|
||||||
|
3. Explain what went wrong
|
||||||
|
4. Propose a better approach
|
||||||
|
5. Restart with improved strategy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Complete Session
|
||||||
|
|
||||||
|
```
|
||||||
|
I need to implement Task 7 which has 5 subtasks. Please work autonomously with these preferences:
|
||||||
|
|
||||||
|
1. Testing Framework: vitest
|
||||||
|
2. Code Style: TypeScript strict mode, functional style preferred
|
||||||
|
3. Commit Style: Conventional commits with detailed messages
|
||||||
|
4. Review: Show me status after each subtask completion
|
||||||
|
|
||||||
|
Workflow:
|
||||||
|
1. Start autopilot for task 7
|
||||||
|
2. For each subtask (7.1 through 7.5):
|
||||||
|
- RED: Write comprehensive failing tests
|
||||||
|
- GREEN: Implement minimal code to pass
|
||||||
|
- COMMIT: Auto-generate commit and advance
|
||||||
|
3. Final: Show summary of all commits and changes
|
||||||
|
|
||||||
|
Error Handling:
|
||||||
|
- If phase validation fails, explain why and retry
|
||||||
|
- If tests are flaky, identify and fix
|
||||||
|
- If stuck after 3 attempts, ask for help
|
||||||
|
|
||||||
|
Let's begin!
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tips for Effective Prompts
|
||||||
|
|
||||||
|
### 1. Be Specific About Context
|
||||||
|
|
||||||
|
**Good:**
|
||||||
|
```
|
||||||
|
For the UserAuthenticationService in src/services/auth.ts,
|
||||||
|
write tests for the login method using vitest.
|
||||||
|
```
|
||||||
|
|
||||||
|
**Bad:**
|
||||||
|
```
|
||||||
|
Write tests for authentication.
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Specify Success Criteria
|
||||||
|
|
||||||
|
**Good:**
|
||||||
|
```
|
||||||
|
Tests should cover:
|
||||||
|
1. Successful login with valid credentials
|
||||||
|
2. Failed login with invalid password
|
||||||
|
3. Account lockout after 5 failures
|
||||||
|
4. Rate limiting (max 10 attempts/minute)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Bad:**
|
||||||
|
```
|
||||||
|
Test login functionality.
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Request Explanations
|
||||||
|
|
||||||
|
**Good:**
|
||||||
|
```
|
||||||
|
Implement the feature and explain:
|
||||||
|
1. Why this approach was chosen
|
||||||
|
2. What edge cases are handled
|
||||||
|
3. What assumptions were made
|
||||||
|
```
|
||||||
|
|
||||||
|
**Bad:**
|
||||||
|
```
|
||||||
|
Just implement it.
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Include Project Context
|
||||||
|
|
||||||
|
**Good:**
|
||||||
|
```
|
||||||
|
Following the existing pattern in src/models/,
|
||||||
|
create a User model that:
|
||||||
|
- Extends BaseModel
|
||||||
|
- Uses Zod for validation
|
||||||
|
- Includes proper TypeScript types
|
||||||
|
```
|
||||||
|
|
||||||
|
**Bad:**
|
||||||
|
```
|
||||||
|
Create a user model.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting Prompts
|
||||||
|
|
||||||
|
### When Tests Won't Fail
|
||||||
|
|
||||||
|
```
|
||||||
|
My test is passing when it should fail. Please:
|
||||||
|
1. Review the test code
|
||||||
|
2. Identify why it's passing
|
||||||
|
3. Check if implementation already exists
|
||||||
|
4. Rewrite test to actually test new behavior
|
||||||
|
5. Verify it fails this time
|
||||||
|
```
|
||||||
|
|
||||||
|
### When Implementation is Incomplete
|
||||||
|
|
||||||
|
```
|
||||||
|
Tests are still failing after implementation. Please:
|
||||||
|
1. Show me the failing test output
|
||||||
|
2. Review the implementation
|
||||||
|
3. Identify what's missing
|
||||||
|
4. Fix the implementation
|
||||||
|
5. Verify all tests pass
|
||||||
|
```
|
||||||
|
|
||||||
|
### When Workflow Won't Advance
|
||||||
|
|
||||||
|
```
|
||||||
|
Can't complete the phase. Getting error: {ERROR}
|
||||||
|
|
||||||
|
Please:
|
||||||
|
1. Check workflow status
|
||||||
|
2. Verify test results format is correct
|
||||||
|
3. Check if phase validation requirements are met
|
||||||
|
4. If needed, show me how to manually fix state
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- [AI Agent Integration Guide](../ai-agent-integration.mdx)
|
||||||
|
- [CLAUDE.md Template](./CLAUDE.md.template)
|
||||||
|
- [Command Reference](../command-reference.mdx)
|
||||||
|
- [Testing Best Practices](./testing-best-practices.mdx)
|
||||||
54
apps/mcp/package.json
Normal file
54
apps/mcp/package.json
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
{
|
||||||
|
"name": "@tm/mcp",
|
||||||
|
"description": "Task Master MCP Tools - TypeScript MCP server tools for AI agent integration",
|
||||||
|
"type": "module",
|
||||||
|
"private": true,
|
||||||
|
"version": "0.28.0-rc.2",
|
||||||
|
"main": "./dist/index.js",
|
||||||
|
"types": "./src/index.ts",
|
||||||
|
"exports": {
|
||||||
|
".": "./src/index.ts",
|
||||||
|
"./tools/autopilot": "./src/tools/autopilot/index.ts"
|
||||||
|
},
|
||||||
|
"files": ["dist", "README.md"],
|
||||||
|
"scripts": {
|
||||||
|
"typecheck": "tsc --noEmit",
|
||||||
|
"lint": "biome check src",
|
||||||
|
"format": "biome format --write src",
|
||||||
|
"test": "vitest run",
|
||||||
|
"test:watch": "vitest",
|
||||||
|
"test:coverage": "vitest run --coverage",
|
||||||
|
"test:unit": "vitest run -t unit",
|
||||||
|
"test:integration": "vitest run -t integration",
|
||||||
|
"test:ci": "vitest run --coverage --reporter=dot"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@tm/core": "*",
|
||||||
|
"zod": "^4.1.11",
|
||||||
|
"fastmcp": "^3.19.2"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@biomejs/biome": "^1.9.4",
|
||||||
|
"@types/node": "^22.10.5",
|
||||||
|
"typescript": "^5.9.2",
|
||||||
|
"vitest": "^3.2.4"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18.0.0"
|
||||||
|
},
|
||||||
|
"keywords": [
|
||||||
|
"task-master",
|
||||||
|
"mcp",
|
||||||
|
"mcp-server",
|
||||||
|
"ai-agent",
|
||||||
|
"workflow",
|
||||||
|
"tdd"
|
||||||
|
],
|
||||||
|
"author": "",
|
||||||
|
"license": "MIT",
|
||||||
|
"typesVersions": {
|
||||||
|
"*": {
|
||||||
|
"*": ["src/*"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
8
apps/mcp/src/index.ts
Normal file
8
apps/mcp/src/index.ts
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Main entry point for @tm/mcp package
|
||||||
|
* Exports all MCP tool registration functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
export * from './tools/autopilot/index.js';
|
||||||
|
export * from './shared/utils.js';
|
||||||
|
export * from './shared/types.js';
|
||||||
36
apps/mcp/src/shared/types.ts
Normal file
36
apps/mcp/src/shared/types.ts
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
/**
|
||||||
|
* Shared types for MCP tools
|
||||||
|
*/
|
||||||
|
|
||||||
|
export interface MCPResponse<T = any> {
|
||||||
|
success: boolean;
|
||||||
|
data?: T;
|
||||||
|
error?: {
|
||||||
|
code: string;
|
||||||
|
message: string;
|
||||||
|
suggestion?: string;
|
||||||
|
details?: any;
|
||||||
|
};
|
||||||
|
version?: {
|
||||||
|
version: string;
|
||||||
|
name: string;
|
||||||
|
};
|
||||||
|
tag?: {
|
||||||
|
currentTag: string;
|
||||||
|
availableTags: string[];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface MCPContext {
|
||||||
|
log: {
|
||||||
|
info: (message: string) => void;
|
||||||
|
warn: (message: string) => void;
|
||||||
|
error: (message: string) => void;
|
||||||
|
debug: (message: string) => void;
|
||||||
|
};
|
||||||
|
session: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface WithProjectRoot {
|
||||||
|
projectRoot: string;
|
||||||
|
}
|
||||||
257
apps/mcp/src/shared/utils.ts
Normal file
257
apps/mcp/src/shared/utils.ts
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
/**
|
||||||
|
* Shared utilities for MCP tools
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { ContentResult } from 'fastmcp';
|
||||||
|
import path from 'node:path';
|
||||||
|
import fs from 'node:fs';
|
||||||
|
import packageJson from '../../../../package.json' with { type: 'json' };
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get version information
|
||||||
|
*/
|
||||||
|
export function getVersionInfo() {
|
||||||
|
return {
|
||||||
|
version: packageJson.version || 'unknown',
|
||||||
|
name: packageJson.name || 'task-master-ai'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current tag for a project root
|
||||||
|
*/
|
||||||
|
export function getCurrentTag(projectRoot: string): string | null {
|
||||||
|
try {
|
||||||
|
// Try to read current tag from state.json
|
||||||
|
const stateJsonPath = path.join(projectRoot, '.taskmaster', 'state.json');
|
||||||
|
|
||||||
|
if (fs.existsSync(stateJsonPath)) {
|
||||||
|
const stateData = JSON.parse(fs.readFileSync(stateJsonPath, 'utf-8'));
|
||||||
|
return stateData.currentTag || 'master';
|
||||||
|
}
|
||||||
|
|
||||||
|
return 'master';
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle API result with standardized error handling and response formatting
|
||||||
|
* This provides a consistent response structure for all MCP tools
|
||||||
|
*/
|
||||||
|
export async function handleApiResult<T>(options: {
|
||||||
|
result: { success: boolean; data?: T; error?: { message: string } };
|
||||||
|
log?: any;
|
||||||
|
errorPrefix?: string;
|
||||||
|
projectRoot?: string;
|
||||||
|
}): Promise<ContentResult> {
|
||||||
|
const { result, log, errorPrefix = 'API error', projectRoot } = options;
|
||||||
|
|
||||||
|
// Get version info for every response
|
||||||
|
const versionInfo = getVersionInfo();
|
||||||
|
|
||||||
|
// Get current tag if project root is provided
|
||||||
|
const currentTag = projectRoot ? getCurrentTag(projectRoot) : null;
|
||||||
|
|
||||||
|
if (!result.success) {
|
||||||
|
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
|
||||||
|
log?.error?.(`${errorPrefix}: ${errorMsg}`);
|
||||||
|
|
||||||
|
let errorText = `Error: ${errorMsg}\nVersion: ${versionInfo.version}\nName: ${versionInfo.name}`;
|
||||||
|
|
||||||
|
if (currentTag) {
|
||||||
|
errorText += `\nCurrent Tag: ${currentTag}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: errorText
|
||||||
|
}
|
||||||
|
],
|
||||||
|
isError: true
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
log?.info?.('Successfully completed operation');
|
||||||
|
|
||||||
|
// Create the response payload including version info and tag
|
||||||
|
const responsePayload: any = {
|
||||||
|
data: result.data,
|
||||||
|
version: versionInfo
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add current tag if available
|
||||||
|
if (currentTag) {
|
||||||
|
responsePayload.tag = currentTag;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: JSON.stringify(responsePayload, null, 2)
|
||||||
|
}
|
||||||
|
]
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize project root path (handles URI encoding, file:// protocol, Windows paths)
|
||||||
|
*/
|
||||||
|
export function normalizeProjectRoot(rawPath: string): string {
|
||||||
|
if (!rawPath) return process.cwd();
|
||||||
|
|
||||||
|
try {
|
||||||
|
let pathString = rawPath;
|
||||||
|
|
||||||
|
// Decode URI encoding
|
||||||
|
try {
|
||||||
|
pathString = decodeURIComponent(pathString);
|
||||||
|
} catch {
|
||||||
|
// If decoding fails, use as-is
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip file:// prefix
|
||||||
|
if (pathString.startsWith('file:///')) {
|
||||||
|
pathString = pathString.slice(7);
|
||||||
|
} else if (pathString.startsWith('file://')) {
|
||||||
|
pathString = pathString.slice(7);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Windows drive letter after stripping prefix (e.g., /C:/...)
|
||||||
|
if (
|
||||||
|
pathString.startsWith('/') &&
|
||||||
|
/[A-Za-z]:/.test(pathString.substring(1, 3))
|
||||||
|
) {
|
||||||
|
pathString = pathString.substring(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize backslashes to forward slashes
|
||||||
|
pathString = pathString.replace(/\\/g, '/');
|
||||||
|
|
||||||
|
// Resolve to absolute path
|
||||||
|
return path.resolve(pathString);
|
||||||
|
} catch {
|
||||||
|
return path.resolve(rawPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get project root from session object
|
||||||
|
*/
|
||||||
|
function getProjectRootFromSession(session: any): string | null {
|
||||||
|
try {
|
||||||
|
// Check primary location
|
||||||
|
if (session?.roots?.[0]?.uri) {
|
||||||
|
return normalizeProjectRoot(session.roots[0].uri);
|
||||||
|
}
|
||||||
|
// Check alternate location
|
||||||
|
else if (session?.roots?.roots?.[0]?.uri) {
|
||||||
|
return normalizeProjectRoot(session.roots.roots[0].uri);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper to normalize project root in args with proper precedence order
|
||||||
|
*
|
||||||
|
* PRECEDENCE ORDER:
|
||||||
|
* 1. TASK_MASTER_PROJECT_ROOT environment variable (from process.env or session)
|
||||||
|
* 2. args.projectRoot (explicitly provided)
|
||||||
|
* 3. Session-based project root resolution
|
||||||
|
* 4. Current directory fallback
|
||||||
|
*/
|
||||||
|
export function withNormalizedProjectRoot<T extends { projectRoot?: string }>(
|
||||||
|
fn: (
|
||||||
|
args: T & { projectRoot: string },
|
||||||
|
context: any
|
||||||
|
) => Promise<ContentResult>
|
||||||
|
): (args: T, context: any) => Promise<ContentResult> {
|
||||||
|
return async (args: T, context: any): Promise<ContentResult> => {
|
||||||
|
const { log, session } = context;
|
||||||
|
let normalizedRoot: string | null = null;
|
||||||
|
let rootSource = 'unknown';
|
||||||
|
|
||||||
|
try {
|
||||||
|
// 1. Check for TASK_MASTER_PROJECT_ROOT environment variable first
|
||||||
|
if (process.env.TASK_MASTER_PROJECT_ROOT) {
|
||||||
|
const envRoot = process.env.TASK_MASTER_PROJECT_ROOT;
|
||||||
|
normalizedRoot = path.isAbsolute(envRoot)
|
||||||
|
? envRoot
|
||||||
|
: path.resolve(process.cwd(), envRoot);
|
||||||
|
rootSource = 'TASK_MASTER_PROJECT_ROOT environment variable';
|
||||||
|
log?.info?.(`Using project root from ${rootSource}: ${normalizedRoot}`);
|
||||||
|
}
|
||||||
|
// Also check session environment variables for TASK_MASTER_PROJECT_ROOT
|
||||||
|
else if (session?.env?.TASK_MASTER_PROJECT_ROOT) {
|
||||||
|
const envRoot = session.env.TASK_MASTER_PROJECT_ROOT;
|
||||||
|
normalizedRoot = path.isAbsolute(envRoot)
|
||||||
|
? envRoot
|
||||||
|
: path.resolve(process.cwd(), envRoot);
|
||||||
|
rootSource = 'TASK_MASTER_PROJECT_ROOT session environment variable';
|
||||||
|
log?.info?.(`Using project root from ${rootSource}: ${normalizedRoot}`);
|
||||||
|
}
|
||||||
|
// 2. If no environment variable, try args.projectRoot
|
||||||
|
else if (args.projectRoot) {
|
||||||
|
normalizedRoot = normalizeProjectRoot(args.projectRoot);
|
||||||
|
rootSource = 'args.projectRoot';
|
||||||
|
log?.info?.(`Using project root from ${rootSource}: ${normalizedRoot}`);
|
||||||
|
}
|
||||||
|
// 3. If no args.projectRoot, try session-based resolution
|
||||||
|
else {
|
||||||
|
const sessionRoot = getProjectRootFromSession(session);
|
||||||
|
if (sessionRoot) {
|
||||||
|
normalizedRoot = sessionRoot;
|
||||||
|
rootSource = 'session';
|
||||||
|
log?.info?.(
|
||||||
|
`Using project root from ${rootSource}: ${normalizedRoot}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!normalizedRoot) {
|
||||||
|
log?.error?.(
|
||||||
|
'Could not determine project root from environment, args, or session.'
|
||||||
|
);
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'Could not determine project root. Please provide projectRoot argument or ensure TASK_MASTER_PROJECT_ROOT environment variable is set.'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inject the normalized root back into args
|
||||||
|
const updatedArgs = { ...args, projectRoot: normalizedRoot } as T & {
|
||||||
|
projectRoot: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Execute the original function with normalized root in args
|
||||||
|
return await fn(updatedArgs, context);
|
||||||
|
} catch (error: any) {
|
||||||
|
log?.error?.(
|
||||||
|
`Error within withNormalizedProjectRoot HOF (Normalized Root: ${normalizedRoot}): ${error.message}`
|
||||||
|
);
|
||||||
|
if (error.stack && log?.debug) {
|
||||||
|
log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Operation failed: ${error.message}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
99
apps/mcp/src/tools/autopilot/abort.tool.ts
Normal file
99
apps/mcp/src/tools/autopilot/abort.tool.ts
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-abort MCP tool
|
||||||
|
* Abort a running TDD workflow and clean up state
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { WorkflowService } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const AbortSchema = z.object({
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory')
|
||||||
|
});
|
||||||
|
|
||||||
|
type AbortArgs = z.infer<typeof AbortSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_abort tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotAbortTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_abort',
|
||||||
|
description:
|
||||||
|
'Abort the current TDD workflow and clean up workflow state. This will remove the workflow state file but will NOT delete the git branch or any code changes.',
|
||||||
|
parameters: AbortSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: AbortArgs, context: MCPContext) => {
|
||||||
|
const { projectRoot } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(`Aborting autopilot workflow in ${projectRoot}`);
|
||||||
|
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check if workflow exists
|
||||||
|
const hasWorkflow = await workflowService.hasWorkflow();
|
||||||
|
|
||||||
|
if (!hasWorkflow) {
|
||||||
|
context.log.warn('No active workflow to abort');
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: 'No active workflow to abort',
|
||||||
|
hadWorkflow: false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get info before aborting
|
||||||
|
await workflowService.resumeWorkflow();
|
||||||
|
const status = workflowService.getStatus();
|
||||||
|
|
||||||
|
// Abort workflow
|
||||||
|
await workflowService.abortWorkflow();
|
||||||
|
|
||||||
|
context.log.info('Workflow state deleted');
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: 'Workflow aborted',
|
||||||
|
hadWorkflow: true,
|
||||||
|
taskId: status.taskId,
|
||||||
|
branchName: status.branchName,
|
||||||
|
note: 'Git branch and code changes were preserved. You can manually clean them up if needed.'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-abort: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: `Failed to abort workflow: ${error.message}` }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
240
apps/mcp/src/tools/autopilot/commit.tool.ts
Normal file
240
apps/mcp/src/tools/autopilot/commit.tool.ts
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-commit MCP tool
|
||||||
|
* Create a git commit with automatic staging and message generation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { WorkflowService, GitAdapter, CommitMessageGenerator } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const CommitSchema = z.object({
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory'),
|
||||||
|
files: z
|
||||||
|
.array(z.string())
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Specific files to stage (relative to project root). If not provided, stages all changes.'
|
||||||
|
),
|
||||||
|
customMessage: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('Custom commit message to use instead of auto-generated message')
|
||||||
|
});
|
||||||
|
|
||||||
|
type CommitArgs = z.infer<typeof CommitSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_commit tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotCommitTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_commit',
|
||||||
|
description:
|
||||||
|
'Create a git commit with automatic staging, message generation, and metadata embedding. Generates appropriate commit messages based on subtask context and TDD phase.',
|
||||||
|
parameters: CommitSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: CommitArgs, context: MCPContext) => {
|
||||||
|
const { projectRoot, files, customMessage } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(`Creating commit for workflow in ${projectRoot}`);
|
||||||
|
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check if workflow exists
|
||||||
|
if (!(await workflowService.hasWorkflow())) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'No active workflow found. Start a workflow with autopilot_start'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume workflow
|
||||||
|
await workflowService.resumeWorkflow();
|
||||||
|
const status = workflowService.getStatus();
|
||||||
|
const workflowContext = workflowService.getContext();
|
||||||
|
|
||||||
|
// Verify we're in COMMIT phase
|
||||||
|
if (status.tddPhase !== 'COMMIT') {
|
||||||
|
context.log.warn(
|
||||||
|
`Not in COMMIT phase (currently in ${status.tddPhase})`
|
||||||
|
);
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Cannot commit: currently in ${status.tddPhase} phase. Complete the ${status.tddPhase} phase first using autopilot_complete_phase`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify there's an active subtask
|
||||||
|
if (!status.currentSubtask) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: 'No active subtask to commit' }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize git adapter
|
||||||
|
const gitAdapter = new GitAdapter(projectRoot);
|
||||||
|
|
||||||
|
// Stage files
|
||||||
|
try {
|
||||||
|
if (files && files.length > 0) {
|
||||||
|
await gitAdapter.stageFiles(files);
|
||||||
|
context.log.info(`Staged ${files.length} files`);
|
||||||
|
} else {
|
||||||
|
await gitAdapter.stageFiles(['.']);
|
||||||
|
context.log.info('Staged all changes');
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Failed to stage files: ${error.message}`);
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: `Failed to stage files: ${error.message}` }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if there are staged changes
|
||||||
|
const hasStagedChanges = await gitAdapter.hasStagedChanges();
|
||||||
|
if (!hasStagedChanges) {
|
||||||
|
context.log.warn('No staged changes to commit');
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'No staged changes to commit. Make code changes before committing'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get git status for message generation
|
||||||
|
const gitStatus = await gitAdapter.getStatus();
|
||||||
|
|
||||||
|
// Generate commit message
|
||||||
|
let commitMessage: string;
|
||||||
|
if (customMessage) {
|
||||||
|
commitMessage = customMessage;
|
||||||
|
context.log.info('Using custom commit message');
|
||||||
|
} else {
|
||||||
|
const messageGenerator = new CommitMessageGenerator();
|
||||||
|
|
||||||
|
// Determine commit type based on phase and subtask
|
||||||
|
// RED phase = test files, GREEN phase = implementation
|
||||||
|
const type = status.tddPhase === 'COMMIT' ? 'feat' : 'test';
|
||||||
|
|
||||||
|
// Use subtask title as description
|
||||||
|
const description = status.currentSubtask.title;
|
||||||
|
|
||||||
|
// Construct proper CommitMessageOptions
|
||||||
|
const options = {
|
||||||
|
type,
|
||||||
|
description,
|
||||||
|
changedFiles: gitStatus.staged,
|
||||||
|
taskId: status.taskId,
|
||||||
|
phase: status.tddPhase,
|
||||||
|
testsPassing: workflowContext.lastTestResults?.passed,
|
||||||
|
testsFailing: workflowContext.lastTestResults?.failed
|
||||||
|
};
|
||||||
|
|
||||||
|
commitMessage = messageGenerator.generateMessage(options);
|
||||||
|
context.log.info('Generated commit message automatically');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create commit
|
||||||
|
try {
|
||||||
|
await gitAdapter.createCommit(commitMessage);
|
||||||
|
context.log.info('Commit created successfully');
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Failed to create commit: ${error.message}`);
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: `Failed to create commit: ${error.message}` }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get last commit info
|
||||||
|
const lastCommit = await gitAdapter.getLastCommit();
|
||||||
|
|
||||||
|
// Complete COMMIT phase and advance workflow
|
||||||
|
const newStatus = await workflowService.commit();
|
||||||
|
|
||||||
|
context.log.info(
|
||||||
|
`Commit completed. Current phase: ${newStatus.tddPhase || newStatus.phase}`
|
||||||
|
);
|
||||||
|
|
||||||
|
const isComplete = newStatus.phase === 'COMPLETE';
|
||||||
|
|
||||||
|
// Get next action with guidance
|
||||||
|
const nextAction = workflowService.getNextAction();
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: isComplete
|
||||||
|
? 'Workflow completed successfully'
|
||||||
|
: 'Commit created and workflow advanced',
|
||||||
|
commitSha: lastCommit.sha,
|
||||||
|
commitMessage,
|
||||||
|
...newStatus,
|
||||||
|
isComplete,
|
||||||
|
nextAction: nextAction.action,
|
||||||
|
nextSteps: nextAction.nextSteps
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-commit: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: `Failed to commit: ${error.message}` }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
152
apps/mcp/src/tools/autopilot/complete.tool.ts
Normal file
152
apps/mcp/src/tools/autopilot/complete.tool.ts
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-complete MCP tool
|
||||||
|
* Complete the current TDD phase with test result validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { WorkflowService } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const CompletePhaseSchema = z.object({
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory'),
|
||||||
|
testResults: z
|
||||||
|
.object({
|
||||||
|
total: z.number().describe('Total number of tests'),
|
||||||
|
passed: z.number().describe('Number of passing tests'),
|
||||||
|
failed: z.number().describe('Number of failing tests'),
|
||||||
|
skipped: z.number().optional().describe('Number of skipped tests')
|
||||||
|
})
|
||||||
|
.describe('Test results from running the test suite')
|
||||||
|
});
|
||||||
|
|
||||||
|
type CompletePhaseArgs = z.infer<typeof CompletePhaseSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_complete_phase tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotCompleteTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_complete_phase',
|
||||||
|
description:
|
||||||
|
'Complete the current TDD phase (RED, GREEN, or COMMIT) with test result validation. RED phase: expects failures (if 0 failures, feature is already implemented and subtask auto-completes). GREEN phase: expects all tests passing.',
|
||||||
|
parameters: CompletePhaseSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: CompletePhaseArgs, context: MCPContext) => {
|
||||||
|
const { projectRoot, testResults } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(
|
||||||
|
`Completing current phase in workflow for ${projectRoot}`
|
||||||
|
);
|
||||||
|
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check if workflow exists
|
||||||
|
if (!(await workflowService.hasWorkflow())) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'No active workflow found. Start a workflow with autopilot_start'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume workflow to get current state
|
||||||
|
await workflowService.resumeWorkflow();
|
||||||
|
const currentStatus = workflowService.getStatus();
|
||||||
|
|
||||||
|
// Validate that we're in a TDD phase (RED or GREEN)
|
||||||
|
if (!currentStatus.tddPhase) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Cannot complete phase: not in a TDD phase (current phase: ${currentStatus.phase})`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// COMMIT phase completion is handled by autopilot_commit tool
|
||||||
|
if (currentStatus.tddPhase === 'COMMIT') {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'Cannot complete COMMIT phase with this tool. Use autopilot_commit instead'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map TDD phase to TestResult phase (only RED or GREEN allowed)
|
||||||
|
const phase = currentStatus.tddPhase as 'RED' | 'GREEN';
|
||||||
|
|
||||||
|
// Construct full TestResult with phase
|
||||||
|
const fullTestResults = {
|
||||||
|
total: testResults.total,
|
||||||
|
passed: testResults.passed,
|
||||||
|
failed: testResults.failed,
|
||||||
|
skipped: testResults.skipped ?? 0,
|
||||||
|
phase
|
||||||
|
};
|
||||||
|
|
||||||
|
// Complete phase with test results
|
||||||
|
const status = await workflowService.completePhase(fullTestResults);
|
||||||
|
const nextAction = workflowService.getNextAction();
|
||||||
|
|
||||||
|
context.log.info(
|
||||||
|
`Phase completed. New phase: ${status.tddPhase || status.phase}`
|
||||||
|
);
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: `Phase completed. Transitioned to ${status.tddPhase || status.phase}`,
|
||||||
|
...status,
|
||||||
|
nextAction: nextAction.action,
|
||||||
|
actionDescription: nextAction.description,
|
||||||
|
nextSteps: nextAction.nextSteps
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-complete: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Failed to complete phase: ${error.message}`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
114
apps/mcp/src/tools/autopilot/finalize.tool.ts
Normal file
114
apps/mcp/src/tools/autopilot/finalize.tool.ts
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-finalize MCP tool
|
||||||
|
* Finalize and complete the workflow with working tree validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { WorkflowService } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const FinalizeSchema = z.object({
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory')
|
||||||
|
});
|
||||||
|
|
||||||
|
type FinalizeArgs = z.infer<typeof FinalizeSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_finalize tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotFinalizeTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_finalize',
|
||||||
|
description:
|
||||||
|
'Finalize and complete the workflow. Validates that all changes are committed and working tree is clean before marking workflow as complete.',
|
||||||
|
parameters: FinalizeSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: FinalizeArgs, context: MCPContext) => {
|
||||||
|
const { projectRoot } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(`Finalizing workflow in ${projectRoot}`);
|
||||||
|
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check if workflow exists
|
||||||
|
if (!(await workflowService.hasWorkflow())) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'No active workflow found. Start a workflow with autopilot_start'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume workflow
|
||||||
|
await workflowService.resumeWorkflow();
|
||||||
|
const currentStatus = workflowService.getStatus();
|
||||||
|
|
||||||
|
// Verify we're in FINALIZE phase
|
||||||
|
if (currentStatus.phase !== 'FINALIZE') {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Cannot finalize: workflow is in ${currentStatus.phase} phase. Complete all subtasks first.`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize workflow (validates clean working tree)
|
||||||
|
const newStatus = await workflowService.finalizeWorkflow();
|
||||||
|
|
||||||
|
context.log.info('Workflow finalized successfully');
|
||||||
|
|
||||||
|
// Get next action
|
||||||
|
const nextAction = workflowService.getNextAction();
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: 'Workflow completed successfully',
|
||||||
|
...newStatus,
|
||||||
|
nextAction: nextAction.action,
|
||||||
|
nextSteps: nextAction.nextSteps
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-finalize: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Failed to finalize workflow: ${error.message}`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
13
apps/mcp/src/tools/autopilot/index.ts
Normal file
13
apps/mcp/src/tools/autopilot/index.ts
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Autopilot MCP tools index
|
||||||
|
* Exports all autopilot tool registration functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
export { registerAutopilotStartTool } from './start.tool.js';
|
||||||
|
export { registerAutopilotResumeTool } from './resume.tool.js';
|
||||||
|
export { registerAutopilotNextTool } from './next.tool.js';
|
||||||
|
export { registerAutopilotStatusTool } from './status.tool.js';
|
||||||
|
export { registerAutopilotCompleteTool } from './complete.tool.js';
|
||||||
|
export { registerAutopilotCommitTool } from './commit.tool.js';
|
||||||
|
export { registerAutopilotFinalizeTool } from './finalize.tool.js';
|
||||||
|
export { registerAutopilotAbortTool } from './abort.tool.js';
|
||||||
99
apps/mcp/src/tools/autopilot/next.tool.ts
Normal file
99
apps/mcp/src/tools/autopilot/next.tool.ts
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-next MCP tool
|
||||||
|
* Get the next action to perform in the TDD workflow
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { WorkflowService } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const NextActionSchema = z.object({
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory')
|
||||||
|
});
|
||||||
|
|
||||||
|
type NextActionArgs = z.infer<typeof NextActionSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_next tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotNextTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_next',
|
||||||
|
description:
|
||||||
|
'Get the next action to perform in the TDD workflow. Returns detailed context about what needs to be done next, including the current phase, subtask, and expected actions.',
|
||||||
|
parameters: NextActionSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: NextActionArgs, context: MCPContext) => {
|
||||||
|
const { projectRoot } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(
|
||||||
|
`Getting next action for workflow in ${projectRoot}`
|
||||||
|
);
|
||||||
|
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check if workflow exists
|
||||||
|
if (!(await workflowService.hasWorkflow())) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'No active workflow found. Start a workflow with autopilot_start'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume to load state
|
||||||
|
await workflowService.resumeWorkflow();
|
||||||
|
|
||||||
|
// Get next action
|
||||||
|
const nextAction = workflowService.getNextAction();
|
||||||
|
const status = workflowService.getStatus();
|
||||||
|
|
||||||
|
context.log.info(`Next action determined: ${nextAction.action}`);
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
action: nextAction.action,
|
||||||
|
actionDescription: nextAction.description,
|
||||||
|
...status,
|
||||||
|
nextSteps: nextAction.nextSteps
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-next: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Failed to get next action: ${error.message}`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
95
apps/mcp/src/tools/autopilot/resume.tool.ts
Normal file
95
apps/mcp/src/tools/autopilot/resume.tool.ts
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-resume MCP tool
|
||||||
|
* Resume a previously started TDD workflow from saved state
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { WorkflowService } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const ResumeWorkflowSchema = z.object({
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory')
|
||||||
|
});
|
||||||
|
|
||||||
|
type ResumeWorkflowArgs = z.infer<typeof ResumeWorkflowSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_resume tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotResumeTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_resume',
|
||||||
|
description:
|
||||||
|
'Resume a previously started TDD workflow from saved state. Restores the workflow state machine and continues from where it left off.',
|
||||||
|
parameters: ResumeWorkflowSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: ResumeWorkflowArgs, context: MCPContext) => {
|
||||||
|
const { projectRoot } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(`Resuming autopilot workflow in ${projectRoot}`);
|
||||||
|
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check if workflow exists
|
||||||
|
if (!(await workflowService.hasWorkflow())) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'No workflow state found. Start a new workflow with autopilot_start'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume workflow
|
||||||
|
const status = await workflowService.resumeWorkflow();
|
||||||
|
const nextAction = workflowService.getNextAction();
|
||||||
|
|
||||||
|
context.log.info(
|
||||||
|
`Workflow resumed successfully for task ${status.taskId}`
|
||||||
|
);
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: 'Workflow resumed',
|
||||||
|
...status,
|
||||||
|
nextAction: nextAction.action,
|
||||||
|
actionDescription: nextAction.description,
|
||||||
|
nextSteps: nextAction.nextSteps
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-resume: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: `Failed to resume workflow: ${error.message}` }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
197
apps/mcp/src/tools/autopilot/start.tool.ts
Normal file
197
apps/mcp/src/tools/autopilot/start.tool.ts
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-start MCP tool
|
||||||
|
* Initialize and start a new TDD workflow for a task
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { createTaskMasterCore } from '@tm/core';
|
||||||
|
import { WorkflowService } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const StartWorkflowSchema = z.object({
|
||||||
|
taskId: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
'Main task ID to start workflow for (e.g., "1", "2", "HAM-123"). Subtask IDs (e.g., "2.3", "1.1") are not allowed.'
|
||||||
|
),
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory'),
|
||||||
|
maxAttempts: z
|
||||||
|
.number()
|
||||||
|
.optional()
|
||||||
|
.default(3)
|
||||||
|
.describe('Maximum attempts per subtask (default: 3)'),
|
||||||
|
force: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.default(false)
|
||||||
|
.describe('Force start even if workflow state exists')
|
||||||
|
});
|
||||||
|
|
||||||
|
type StartWorkflowArgs = z.infer<typeof StartWorkflowSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a task ID is a main task (not a subtask)
|
||||||
|
* Main tasks: "1", "2", "HAM-123", "PROJ-456"
|
||||||
|
* Subtasks: "1.1", "2.3", "HAM-123.1"
|
||||||
|
*/
|
||||||
|
function isMainTaskId(taskId: string): boolean {
|
||||||
|
// A main task has no dots in the ID after the optional prefix
|
||||||
|
// Examples: "1" ✓, "HAM-123" ✓, "1.1" ✗, "HAM-123.1" ✗
|
||||||
|
const parts = taskId.split('.');
|
||||||
|
return parts.length === 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_start tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotStartTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_start',
|
||||||
|
description:
|
||||||
|
'Initialize and start a new TDD workflow for a task. Creates a git branch and sets up the workflow state machine.',
|
||||||
|
parameters: StartWorkflowSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: StartWorkflowArgs, context: MCPContext) => {
|
||||||
|
const { taskId, projectRoot, maxAttempts, force } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(
|
||||||
|
`Starting autopilot workflow for task ${taskId} in ${projectRoot}`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Validate that taskId is a main task (not a subtask)
|
||||||
|
if (!isMainTaskId(taskId)) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Task ID "${taskId}" is a subtask. Autopilot workflows can only be started for main tasks (e.g., "1", "2", "HAM-123"). Please provide the parent task ID instead.`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load task data and get current tag
|
||||||
|
const core = await createTaskMasterCore({
|
||||||
|
projectPath: projectRoot
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get current tag from ConfigManager
|
||||||
|
const currentTag = core.getActiveTag();
|
||||||
|
|
||||||
|
const taskResult = await core.getTaskWithSubtask(taskId);
|
||||||
|
|
||||||
|
if (!taskResult || !taskResult.task) {
|
||||||
|
await core.close();
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: `Task ${taskId} not found` }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const task = taskResult.task;
|
||||||
|
|
||||||
|
// Validate task has subtasks
|
||||||
|
if (!task.subtasks || task.subtasks.length === 0) {
|
||||||
|
await core.close();
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Task ${taskId} has no subtasks. Please use expand_task (with id="${taskId}") to create subtasks first. For improved results, consider running analyze_complexity before expanding the task.`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize workflow service
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check for existing workflow
|
||||||
|
const hasWorkflow = await workflowService.hasWorkflow();
|
||||||
|
if (hasWorkflow && !force) {
|
||||||
|
context.log.warn('Workflow state already exists');
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'Workflow already in progress. Use force=true to override or resume the existing workflow. Suggestion: Use autopilot_resume to continue the existing workflow'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start workflow
|
||||||
|
const status = await workflowService.startWorkflow({
|
||||||
|
taskId,
|
||||||
|
taskTitle: task.title,
|
||||||
|
subtasks: task.subtasks.map((st: any) => ({
|
||||||
|
id: st.id,
|
||||||
|
title: st.title,
|
||||||
|
status: st.status,
|
||||||
|
maxAttempts
|
||||||
|
})),
|
||||||
|
maxAttempts,
|
||||||
|
force,
|
||||||
|
tag: currentTag // Pass current tag for branch naming
|
||||||
|
});
|
||||||
|
|
||||||
|
context.log.info(`Workflow started successfully for task ${taskId}`);
|
||||||
|
|
||||||
|
// Get next action with guidance from WorkflowService
|
||||||
|
const nextAction = workflowService.getNextAction();
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: `Workflow started for task ${taskId}`,
|
||||||
|
taskId,
|
||||||
|
branchName: status.branchName,
|
||||||
|
phase: status.phase,
|
||||||
|
tddPhase: status.tddPhase,
|
||||||
|
progress: status.progress,
|
||||||
|
currentSubtask: status.currentSubtask,
|
||||||
|
nextAction: nextAction.action,
|
||||||
|
nextSteps: nextAction.nextSteps
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-start: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: { message: `Failed to start workflow: ${error.message}` }
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
93
apps/mcp/src/tools/autopilot/status.tool.ts
Normal file
93
apps/mcp/src/tools/autopilot/status.tool.ts
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview autopilot-status MCP tool
|
||||||
|
* Get comprehensive workflow status and progress information
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import {
|
||||||
|
handleApiResult,
|
||||||
|
withNormalizedProjectRoot
|
||||||
|
} from '../../shared/utils.js';
|
||||||
|
import type { MCPContext } from '../../shared/types.js';
|
||||||
|
import { WorkflowService } from '@tm/core';
|
||||||
|
import type { FastMCP } from 'fastmcp';
|
||||||
|
|
||||||
|
const StatusSchema = z.object({
|
||||||
|
projectRoot: z
|
||||||
|
.string()
|
||||||
|
.describe('Absolute path to the project root directory')
|
||||||
|
});
|
||||||
|
|
||||||
|
type StatusArgs = z.infer<typeof StatusSchema>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register the autopilot_status tool with the MCP server
|
||||||
|
*/
|
||||||
|
export function registerAutopilotStatusTool(server: FastMCP) {
|
||||||
|
server.addTool({
|
||||||
|
name: 'autopilot_status',
|
||||||
|
description:
|
||||||
|
'Get comprehensive workflow status including current phase, progress, subtask details, and activity history.',
|
||||||
|
parameters: StatusSchema,
|
||||||
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args: StatusArgs, context: MCPContext) => {
|
||||||
|
const { projectRoot } = args;
|
||||||
|
|
||||||
|
try {
|
||||||
|
context.log.info(`Getting workflow status for ${projectRoot}`);
|
||||||
|
|
||||||
|
const workflowService = new WorkflowService(projectRoot);
|
||||||
|
|
||||||
|
// Check if workflow exists
|
||||||
|
if (!(await workflowService.hasWorkflow())) {
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message:
|
||||||
|
'No active workflow found. Start a workflow with autopilot_start'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume to load state
|
||||||
|
await workflowService.resumeWorkflow();
|
||||||
|
|
||||||
|
// Get status
|
||||||
|
const status = workflowService.getStatus();
|
||||||
|
|
||||||
|
context.log.info(
|
||||||
|
`Workflow status retrieved for task ${status.taskId}`
|
||||||
|
);
|
||||||
|
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: true,
|
||||||
|
data: status
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
context.log.error(`Error in autopilot-status: ${error.message}`);
|
||||||
|
if (error.stack) {
|
||||||
|
context.log.debug(error.stack);
|
||||||
|
}
|
||||||
|
return handleApiResult({
|
||||||
|
result: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
message: `Failed to get workflow status: ${error.message}`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
log: context.log,
|
||||||
|
projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
36
apps/mcp/tsconfig.json
Normal file
36
apps/mcp/tsconfig.json
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2022",
|
||||||
|
"module": "NodeNext",
|
||||||
|
"lib": ["ES2022"],
|
||||||
|
"declaration": true,
|
||||||
|
"declarationMap": true,
|
||||||
|
"sourceMap": true,
|
||||||
|
"outDir": "./dist",
|
||||||
|
"baseUrl": ".",
|
||||||
|
"rootDir": "./src",
|
||||||
|
"strict": true,
|
||||||
|
"noImplicitAny": true,
|
||||||
|
"strictNullChecks": true,
|
||||||
|
"strictFunctionTypes": true,
|
||||||
|
"strictBindCallApply": true,
|
||||||
|
"strictPropertyInitialization": true,
|
||||||
|
"noImplicitThis": true,
|
||||||
|
"alwaysStrict": true,
|
||||||
|
"noUnusedLocals": true,
|
||||||
|
"noUnusedParameters": true,
|
||||||
|
"noImplicitReturns": true,
|
||||||
|
"noFallthroughCasesInSwitch": true,
|
||||||
|
"esModuleInterop": true,
|
||||||
|
"skipLibCheck": true,
|
||||||
|
"forceConsistentCasingInFileNames": true,
|
||||||
|
"moduleResolution": "NodeNext",
|
||||||
|
"moduleDetection": "force",
|
||||||
|
"types": ["node"],
|
||||||
|
"resolveJsonModule": true,
|
||||||
|
"isolatedModules": true,
|
||||||
|
"allowImportingTsExtensions": false
|
||||||
|
},
|
||||||
|
"include": ["src/**/*"],
|
||||||
|
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||||
|
}
|
||||||
23
apps/mcp/vitest.config.ts
Normal file
23
apps/mcp/vitest.config.ts
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
import { defineConfig } from 'vitest/config';
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
test: {
|
||||||
|
globals: true,
|
||||||
|
environment: 'node',
|
||||||
|
coverage: {
|
||||||
|
provider: 'v8',
|
||||||
|
reporter: ['text', 'json', 'html'],
|
||||||
|
exclude: [
|
||||||
|
'node_modules/',
|
||||||
|
'dist/',
|
||||||
|
'tests/',
|
||||||
|
'**/*.test.ts',
|
||||||
|
'**/*.spec.ts',
|
||||||
|
'**/*.d.ts',
|
||||||
|
'**/mocks/**',
|
||||||
|
'**/fixtures/**',
|
||||||
|
'vitest.config.ts'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
@@ -7,6 +7,9 @@ import logger from './src/logger.js';
|
|||||||
// Load environment variables
|
// Load environment variables
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
|
|
||||||
|
// Set MCP mode to silence tm-core console output
|
||||||
|
process.env.TASK_MASTER_MCP = 'true';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Start the MCP server
|
* Start the MCP server
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -40,8 +40,20 @@ import { registerRulesTool } from './rules.js';
|
|||||||
import { registerScopeUpTool } from './scope-up.js';
|
import { registerScopeUpTool } from './scope-up.js';
|
||||||
import { registerScopeDownTool } from './scope-down.js';
|
import { registerScopeDownTool } from './scope-down.js';
|
||||||
|
|
||||||
|
// Import TypeScript autopilot tools from apps/mcp
|
||||||
|
import {
|
||||||
|
registerAutopilotStartTool,
|
||||||
|
registerAutopilotResumeTool,
|
||||||
|
registerAutopilotNextTool,
|
||||||
|
registerAutopilotStatusTool,
|
||||||
|
registerAutopilotCompleteTool,
|
||||||
|
registerAutopilotCommitTool,
|
||||||
|
registerAutopilotFinalizeTool,
|
||||||
|
registerAutopilotAbortTool
|
||||||
|
} from '@tm/mcp';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Comprehensive tool registry mapping all 36 tool names to their registration functions
|
* Comprehensive tool registry mapping all 44 tool names to their registration functions
|
||||||
* Used for dynamic tool registration and validation
|
* Used for dynamic tool registration and validation
|
||||||
*/
|
*/
|
||||||
export const toolRegistry = {
|
export const toolRegistry = {
|
||||||
@@ -80,7 +92,15 @@ export const toolRegistry = {
|
|||||||
use_tag: registerUseTagTool,
|
use_tag: registerUseTagTool,
|
||||||
rename_tag: registerRenameTagTool,
|
rename_tag: registerRenameTagTool,
|
||||||
copy_tag: registerCopyTagTool,
|
copy_tag: registerCopyTagTool,
|
||||||
research: registerResearchTool
|
research: registerResearchTool,
|
||||||
|
autopilot_start: registerAutopilotStartTool,
|
||||||
|
autopilot_resume: registerAutopilotResumeTool,
|
||||||
|
autopilot_next: registerAutopilotNextTool,
|
||||||
|
autopilot_status: registerAutopilotStatusTool,
|
||||||
|
autopilot_complete: registerAutopilotCompleteTool,
|
||||||
|
autopilot_commit: registerAutopilotCommitTool,
|
||||||
|
autopilot_finalize: registerAutopilotFinalizeTool,
|
||||||
|
autopilot_abort: registerAutopilotAbortTool
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
1198
package-lock.json
generated
1198
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -86,6 +86,7 @@
|
|||||||
"express": "^4.21.2",
|
"express": "^4.21.2",
|
||||||
"fastmcp": "^3.5.0",
|
"fastmcp": "^3.5.0",
|
||||||
"figlet": "^1.8.0",
|
"figlet": "^1.8.0",
|
||||||
|
"fs-extra": "^11.3.0",
|
||||||
"fuse.js": "^7.1.0",
|
"fuse.js": "^7.1.0",
|
||||||
"gpt-tokens": "^1.3.14",
|
"gpt-tokens": "^1.3.14",
|
||||||
"gradient-string": "^3.0.0",
|
"gradient-string": "^3.0.0",
|
||||||
@@ -98,7 +99,9 @@
|
|||||||
"marked": "^15.0.12",
|
"marked": "^15.0.12",
|
||||||
"marked-terminal": "^7.3.0",
|
"marked-terminal": "^7.3.0",
|
||||||
"ollama-ai-provider-v2": "^1.3.1",
|
"ollama-ai-provider-v2": "^1.3.1",
|
||||||
|
"open": "^10.2.0",
|
||||||
"ora": "^8.2.0",
|
"ora": "^8.2.0",
|
||||||
|
"simple-git": "^3.28.0",
|
||||||
"uuid": "^11.1.0",
|
"uuid": "^11.1.0",
|
||||||
"zod": "^4.1.11"
|
"zod": "^4.1.11"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -31,11 +31,15 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@supabase/supabase-js": "^2.57.4",
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
|
"fs-extra": "^11.3.2",
|
||||||
|
"simple-git": "^3.28.0",
|
||||||
"zod": "^4.1.11"
|
"zod": "^4.1.11"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@types/fs-extra": "^11.0.4",
|
||||||
"@types/node": "^22.10.5",
|
"@types/node": "^22.10.5",
|
||||||
"@vitest/coverage-v8": "^3.2.4",
|
"@vitest/coverage-v8": "^3.2.4",
|
||||||
|
"strip-literal": "^3.1.0",
|
||||||
"typescript": "^5.9.2",
|
"typescript": "^5.9.2",
|
||||||
"vitest": "^3.2.4",
|
"vitest": "^3.2.4",
|
||||||
"strip-literal": "3.1.0"
|
"strip-literal": "3.1.0"
|
||||||
|
|||||||
@@ -25,10 +25,13 @@ import { getLogger } from '../logger/index.js';
|
|||||||
*/
|
*/
|
||||||
export class AuthManager {
|
export class AuthManager {
|
||||||
private static instance: AuthManager | null = null;
|
private static instance: AuthManager | null = null;
|
||||||
|
private static readonly staticLogger = getLogger('AuthManager');
|
||||||
private credentialStore: CredentialStore;
|
private credentialStore: CredentialStore;
|
||||||
private oauthService: OAuthService;
|
private oauthService: OAuthService;
|
||||||
private supabaseClient: SupabaseAuthClient;
|
private supabaseClient: SupabaseAuthClient;
|
||||||
private organizationService?: OrganizationService;
|
private organizationService?: OrganizationService;
|
||||||
|
private readonly logger = getLogger('AuthManager');
|
||||||
|
private refreshPromise: Promise<AuthCredentials> | null = null;
|
||||||
|
|
||||||
private constructor(config?: Partial<AuthConfig>) {
|
private constructor(config?: Partial<AuthConfig>) {
|
||||||
this.credentialStore = CredentialStore.getInstance(config);
|
this.credentialStore = CredentialStore.getInstance(config);
|
||||||
@@ -50,8 +53,7 @@ export class AuthManager {
|
|||||||
await this.supabaseClient.initialize();
|
await this.supabaseClient.initialize();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Log but don't throw - session might not exist yet
|
// Log but don't throw - session might not exist yet
|
||||||
const logger = getLogger('AuthManager');
|
this.logger.debug('No existing session to restore');
|
||||||
logger.debug('No existing session to restore');
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,8 +65,7 @@ export class AuthManager {
|
|||||||
AuthManager.instance = new AuthManager(config);
|
AuthManager.instance = new AuthManager(config);
|
||||||
} else if (config) {
|
} else if (config) {
|
||||||
// Warn if config is provided after initialization
|
// Warn if config is provided after initialization
|
||||||
const logger = getLogger('AuthManager');
|
AuthManager.staticLogger.warn(
|
||||||
logger.warn(
|
|
||||||
'getInstance called with config after initialization; config is ignored.'
|
'getInstance called with config after initialization; config is ignored.'
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -159,7 +160,7 @@ export class AuthManager {
|
|||||||
await this.supabaseClient.signOut();
|
await this.supabaseClient.signOut();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Log but don't throw - we still want to clear local credentials
|
// Log but don't throw - we still want to clear local credentials
|
||||||
getLogger('AuthManager').warn('Failed to sign out from Supabase:', error);
|
this.logger.warn('Failed to sign out from Supabase:', error);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Always clear local credentials (removes auth.json file)
|
// Always clear local credentials (removes auth.json file)
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ export type {
|
|||||||
ProviderConfig,
|
ProviderConfig,
|
||||||
TaskSettings,
|
TaskSettings,
|
||||||
TagSettings,
|
TagSettings,
|
||||||
|
WorkflowSettings,
|
||||||
StorageSettings,
|
StorageSettings,
|
||||||
RetrySettings,
|
RetrySettings,
|
||||||
LoggingSettings,
|
LoggingSettings,
|
||||||
|
|||||||
@@ -38,6 +38,35 @@ export class ConfigLoader {
|
|||||||
main: DEFAULT_CONFIG_VALUES.MODELS.MAIN,
|
main: DEFAULT_CONFIG_VALUES.MODELS.MAIN,
|
||||||
fallback: DEFAULT_CONFIG_VALUES.MODELS.FALLBACK
|
fallback: DEFAULT_CONFIG_VALUES.MODELS.FALLBACK
|
||||||
},
|
},
|
||||||
|
workflow: {
|
||||||
|
enableAutopilot: DEFAULT_CONFIG_VALUES.WORKFLOW.ENABLE_AUTOPILOT,
|
||||||
|
maxPhaseAttempts: DEFAULT_CONFIG_VALUES.WORKFLOW.MAX_PHASE_ATTEMPTS,
|
||||||
|
branchPattern: DEFAULT_CONFIG_VALUES.WORKFLOW.BRANCH_PATTERN,
|
||||||
|
requireCleanWorkingTree:
|
||||||
|
DEFAULT_CONFIG_VALUES.WORKFLOW.REQUIRE_CLEAN_WORKING_TREE,
|
||||||
|
autoStageChanges: DEFAULT_CONFIG_VALUES.WORKFLOW.AUTO_STAGE_CHANGES,
|
||||||
|
includeCoAuthor: DEFAULT_CONFIG_VALUES.WORKFLOW.INCLUDE_CO_AUTHOR,
|
||||||
|
coAuthorName: DEFAULT_CONFIG_VALUES.WORKFLOW.CO_AUTHOR_NAME,
|
||||||
|
coAuthorEmail: DEFAULT_CONFIG_VALUES.WORKFLOW.CO_AUTHOR_EMAIL,
|
||||||
|
testThresholds: {
|
||||||
|
minTests: DEFAULT_CONFIG_VALUES.WORKFLOW.MIN_TESTS,
|
||||||
|
maxFailuresInGreen:
|
||||||
|
DEFAULT_CONFIG_VALUES.WORKFLOW.MAX_FAILURES_IN_GREEN
|
||||||
|
},
|
||||||
|
commitMessageTemplate:
|
||||||
|
DEFAULT_CONFIG_VALUES.WORKFLOW.COMMIT_MESSAGE_TEMPLATE,
|
||||||
|
allowedCommitTypes: [
|
||||||
|
...DEFAULT_CONFIG_VALUES.WORKFLOW.ALLOWED_COMMIT_TYPES
|
||||||
|
],
|
||||||
|
defaultCommitType: DEFAULT_CONFIG_VALUES.WORKFLOW.DEFAULT_COMMIT_TYPE,
|
||||||
|
operationTimeout: DEFAULT_CONFIG_VALUES.WORKFLOW.OPERATION_TIMEOUT,
|
||||||
|
enableActivityLogging:
|
||||||
|
DEFAULT_CONFIG_VALUES.WORKFLOW.ENABLE_ACTIVITY_LOGGING,
|
||||||
|
activityLogPath: DEFAULT_CONFIG_VALUES.WORKFLOW.ACTIVITY_LOG_PATH,
|
||||||
|
enableStateBackup: DEFAULT_CONFIG_VALUES.WORKFLOW.ENABLE_STATE_BACKUP,
|
||||||
|
maxStateBackups: DEFAULT_CONFIG_VALUES.WORKFLOW.MAX_STATE_BACKUPS,
|
||||||
|
abortOnMaxAttempts: DEFAULT_CONFIG_VALUES.WORKFLOW.ABORT_ON_MAX_ATTEMPTS
|
||||||
|
},
|
||||||
storage: {
|
storage: {
|
||||||
type: DEFAULT_CONFIG_VALUES.STORAGE.TYPE,
|
type: DEFAULT_CONFIG_VALUES.STORAGE.TYPE,
|
||||||
encoding: DEFAULT_CONFIG_VALUES.STORAGE.ENCODING,
|
encoding: DEFAULT_CONFIG_VALUES.STORAGE.ENCODING,
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import {
|
|||||||
ERROR_CODES,
|
ERROR_CODES,
|
||||||
TaskMasterError
|
TaskMasterError
|
||||||
} from '../../errors/task-master-error.js';
|
} from '../../errors/task-master-error.js';
|
||||||
|
import { getLogger } from '../../logger/index.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Persistence options
|
* Persistence options
|
||||||
@@ -30,6 +31,7 @@ export interface PersistenceOptions {
|
|||||||
export class ConfigPersistence {
|
export class ConfigPersistence {
|
||||||
private localConfigPath: string;
|
private localConfigPath: string;
|
||||||
private backupDir: string;
|
private backupDir: string;
|
||||||
|
private readonly logger = getLogger('ConfigPersistence');
|
||||||
|
|
||||||
constructor(projectRoot: string) {
|
constructor(projectRoot: string) {
|
||||||
this.localConfigPath = path.join(projectRoot, '.taskmaster', 'config.json');
|
this.localConfigPath = path.join(projectRoot, '.taskmaster', 'config.json');
|
||||||
@@ -94,7 +96,7 @@ export class ConfigPersistence {
|
|||||||
|
|
||||||
return backupPath;
|
return backupPath;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn('Failed to create backup:', error);
|
this.logger.warn('Failed to create backup:', error);
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -116,7 +118,7 @@ export class ConfigPersistence {
|
|||||||
await fs.unlink(path.join(this.backupDir, file));
|
await fs.unlink(path.join(this.backupDir, file));
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn('Failed to clean old backups:', error);
|
this.logger.warn('Failed to clean old backups:', error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { PartialConfiguration } from '../../interfaces/configuration.interface.js';
|
import type { PartialConfiguration } from '../../interfaces/configuration.interface.js';
|
||||||
|
import { getLogger } from '../../logger/index.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Environment variable mapping definition
|
* Environment variable mapping definition
|
||||||
@@ -24,6 +25,8 @@ interface EnvMapping {
|
|||||||
* Single responsibility: Environment variable configuration extraction
|
* Single responsibility: Environment variable configuration extraction
|
||||||
*/
|
*/
|
||||||
export class EnvironmentConfigProvider {
|
export class EnvironmentConfigProvider {
|
||||||
|
private readonly logger = getLogger('EnvironmentConfigProvider');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default environment variable mappings
|
* Default environment variable mappings
|
||||||
*/
|
*/
|
||||||
@@ -75,7 +78,7 @@ export class EnvironmentConfigProvider {
|
|||||||
|
|
||||||
// Validate value if validator is provided
|
// Validate value if validator is provided
|
||||||
if (mapping.validate && !mapping.validate(value)) {
|
if (mapping.validate && !mapping.validate(value)) {
|
||||||
console.warn(`Invalid value for ${mapping.env}: ${value}`);
|
this.logger.warn(`Invalid value for ${mapping.env}: ${value}`);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import {
|
|||||||
TaskMasterError
|
TaskMasterError
|
||||||
} from '../../errors/task-master-error.js';
|
} from '../../errors/task-master-error.js';
|
||||||
import { DEFAULT_CONFIG_VALUES } from '../../interfaces/configuration.interface.js';
|
import { DEFAULT_CONFIG_VALUES } from '../../interfaces/configuration.interface.js';
|
||||||
|
import { getLogger } from '../../logger/index.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runtime state data structure
|
* Runtime state data structure
|
||||||
@@ -30,6 +31,7 @@ export interface RuntimeState {
|
|||||||
export class RuntimeStateManager {
|
export class RuntimeStateManager {
|
||||||
private stateFilePath: string;
|
private stateFilePath: string;
|
||||||
private currentState: RuntimeState;
|
private currentState: RuntimeState;
|
||||||
|
private readonly logger = getLogger('RuntimeStateManager');
|
||||||
|
|
||||||
constructor(projectRoot: string) {
|
constructor(projectRoot: string) {
|
||||||
this.stateFilePath = path.join(projectRoot, '.taskmaster', 'state.json');
|
this.stateFilePath = path.join(projectRoot, '.taskmaster', 'state.json');
|
||||||
@@ -66,7 +68,7 @@ export class RuntimeStateManager {
|
|||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
if (error.code === 'ENOENT') {
|
if (error.code === 'ENOENT') {
|
||||||
// State file doesn't exist, use defaults
|
// State file doesn't exist, use defaults
|
||||||
console.debug('No state.json found, using default state');
|
this.logger.debug('No state.json found, using default state');
|
||||||
|
|
||||||
// Check environment variable
|
// Check environment variable
|
||||||
if (process.env.TASKMASTER_TAG) {
|
if (process.env.TASKMASTER_TAG) {
|
||||||
@@ -76,7 +78,8 @@ export class RuntimeStateManager {
|
|||||||
return this.currentState;
|
return this.currentState;
|
||||||
}
|
}
|
||||||
|
|
||||||
console.warn('Failed to load state file:', error.message);
|
// Failed to load, use defaults
|
||||||
|
this.logger.warn('Failed to load state file:', error.message);
|
||||||
return this.currentState;
|
return this.currentState;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
67
packages/tm-core/src/git/branch-name-generator.spec.ts
Normal file
67
packages/tm-core/src/git/branch-name-generator.spec.ts
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import {
|
||||||
|
generateBranchName,
|
||||||
|
sanitizeBranchName
|
||||||
|
} from './branch-name-generator.js';
|
||||||
|
|
||||||
|
describe('Branch Name Generator', () => {
|
||||||
|
describe('sanitizeBranchName', () => {
|
||||||
|
it('should remove invalid characters', () => {
|
||||||
|
const result = sanitizeBranchName('feature/my feature!');
|
||||||
|
expect(result).toBe('feature-my-feature');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should replace spaces with hyphens', () => {
|
||||||
|
const result = sanitizeBranchName('my feature branch');
|
||||||
|
expect(result).toBe('my-feature-branch');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert to lowercase', () => {
|
||||||
|
const result = sanitizeBranchName('MyFeature');
|
||||||
|
expect(result).toBe('myfeature');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should remove consecutive hyphens', () => {
|
||||||
|
const result = sanitizeBranchName('my---feature');
|
||||||
|
expect(result).toBe('my-feature');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty string', () => {
|
||||||
|
const result = sanitizeBranchName('');
|
||||||
|
expect(result).toBe('branch');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('generateBranchName', () => {
|
||||||
|
it('should generate branch name from task ID', () => {
|
||||||
|
const result = generateBranchName({ taskId: '2.7' });
|
||||||
|
expect(result).toMatch(/^task-2-7-/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include description in branch name', () => {
|
||||||
|
const result = generateBranchName({
|
||||||
|
taskId: '2.7',
|
||||||
|
description: 'Add Feature'
|
||||||
|
});
|
||||||
|
expect(result).toContain('task-2-7');
|
||||||
|
expect(result).toContain('add-feature');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle custom pattern', () => {
|
||||||
|
const result = generateBranchName({
|
||||||
|
taskId: '2.7',
|
||||||
|
pattern: 'feature/{taskId}'
|
||||||
|
});
|
||||||
|
expect(result).toBe('feature-2-7');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should truncate long descriptions', () => {
|
||||||
|
const longDesc = 'a'.repeat(100);
|
||||||
|
const result = generateBranchName({
|
||||||
|
taskId: '2.7',
|
||||||
|
description: longDesc
|
||||||
|
});
|
||||||
|
expect(result.length).toBeLessThan(80);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
69
packages/tm-core/src/git/branch-name-generator.ts
Normal file
69
packages/tm-core/src/git/branch-name-generator.ts
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
/**
|
||||||
|
* Branch Name Generator - Generates valid git branch names from patterns
|
||||||
|
* @module branch-name-generator
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sanitizes a string to be a valid git branch name.
|
||||||
|
* Removes invalid characters, converts to lowercase, replaces spaces with hyphens.
|
||||||
|
*
|
||||||
|
* @param {string} name - Name to sanitize
|
||||||
|
* @returns {string} Sanitized branch name
|
||||||
|
*/
|
||||||
|
export function sanitizeBranchName(name: string): string {
|
||||||
|
if (!name || name.trim() === '') {
|
||||||
|
return 'branch';
|
||||||
|
}
|
||||||
|
|
||||||
|
return name
|
||||||
|
.toLowerCase()
|
||||||
|
.replace(/[^a-z0-9-_.\/]/g, '-') // Replace invalid chars with hyphens
|
||||||
|
.replace(/\//g, '-') // Replace slashes with hyphens
|
||||||
|
.replace(/-+/g, '-') // Remove consecutive hyphens
|
||||||
|
.replace(/^-+|-+$/g, ''); // Remove leading/trailing hyphens
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a branch name from a pattern and variables.
|
||||||
|
*
|
||||||
|
* @param {Object} options - Generation options
|
||||||
|
* @param {string} options.taskId - Task ID to include
|
||||||
|
* @param {string} [options.description] - Description to include
|
||||||
|
* @param {string} [options.pattern] - Custom pattern (default: 'task-{taskId}-{description}')
|
||||||
|
* @param {number} [options.maxLength=50] - Maximum branch name length
|
||||||
|
* @returns {string} Generated branch name
|
||||||
|
*/
|
||||||
|
export function generateBranchName(options: {
|
||||||
|
taskId: string;
|
||||||
|
description?: string;
|
||||||
|
pattern?: string;
|
||||||
|
maxLength?: number;
|
||||||
|
}): string {
|
||||||
|
const maxLength = options.maxLength || 50;
|
||||||
|
const pattern = options.pattern || 'task-{taskId}-{description}';
|
||||||
|
|
||||||
|
// Sanitize task ID (replace dots with hyphens)
|
||||||
|
const sanitizedTaskId = sanitizeBranchName(
|
||||||
|
options.taskId.replace(/\./g, '-')
|
||||||
|
);
|
||||||
|
|
||||||
|
// Sanitize description if provided
|
||||||
|
const sanitizedDescription = options.description
|
||||||
|
? sanitizeBranchName(options.description)
|
||||||
|
: sanitizeBranchName(Date.now().toString());
|
||||||
|
|
||||||
|
// Replace pattern variables
|
||||||
|
let branchName = pattern
|
||||||
|
.replace(/{taskId}/g, sanitizedTaskId)
|
||||||
|
.replace(/{description}/g, sanitizedDescription);
|
||||||
|
|
||||||
|
// Sanitize the final result
|
||||||
|
branchName = sanitizeBranchName(branchName);
|
||||||
|
|
||||||
|
// Truncate if too long
|
||||||
|
if (branchName.length > maxLength) {
|
||||||
|
branchName = branchName.substring(0, maxLength).replace(/-+$/, '');
|
||||||
|
}
|
||||||
|
|
||||||
|
return branchName;
|
||||||
|
}
|
||||||
319
packages/tm-core/src/git/commit-message-generator.test.ts
Normal file
319
packages/tm-core/src/git/commit-message-generator.test.ts
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
import { describe, it, expect, beforeEach } from 'vitest';
|
||||||
|
import { CommitMessageGenerator } from './commit-message-generator.js';
|
||||||
|
|
||||||
|
describe('CommitMessageGenerator', () => {
|
||||||
|
let generator: CommitMessageGenerator;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
generator = new CommitMessageGenerator();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('generateMessage', () => {
|
||||||
|
it('should generate basic conventional commit message', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: 'add user authentication',
|
||||||
|
changedFiles: ['packages/tm-core/src/auth/auth-manager.ts']
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('feat(core): add user authentication');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include scope from changed files', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'fix',
|
||||||
|
description: 'resolve CLI argument parsing',
|
||||||
|
changedFiles: ['packages/cli/src/commands/start.ts']
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('fix(cli): resolve CLI argument parsing');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include task metadata in commit body', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: 'implement feature',
|
||||||
|
changedFiles: ['packages/tm-core/src/index.ts'],
|
||||||
|
taskId: '5.3',
|
||||||
|
phase: 'GREEN'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('Task: 5.3');
|
||||||
|
expect(message).toContain('Phase: GREEN');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include test results metadata', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'test',
|
||||||
|
description: 'add unit tests',
|
||||||
|
changedFiles: ['packages/tm-core/src/auth/auth.test.ts'],
|
||||||
|
testsPassing: 42,
|
||||||
|
testsFailing: 0
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('Tests: 42 passing');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include failing test count when present', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'fix',
|
||||||
|
description: 'fix test failures',
|
||||||
|
changedFiles: ['packages/tm-core/src/index.ts'],
|
||||||
|
testsPassing: 40,
|
||||||
|
testsFailing: 2
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('Tests: 40 passing, 2 failing');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include custom body text', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: 'add new feature',
|
||||||
|
changedFiles: ['packages/tm-core/src/index.ts'],
|
||||||
|
body: 'This is a detailed explanation\nof the changes made.'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('This is a detailed explanation');
|
||||||
|
expect(message).toContain('of the changes made.');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple changed files with different scopes', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'refactor',
|
||||||
|
description: 'reorganize code structure',
|
||||||
|
changedFiles: [
|
||||||
|
'packages/cli/src/index.ts',
|
||||||
|
'packages/tm-core/src/index.ts'
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
// Should use CLI scope (higher priority due to count or priority)
|
||||||
|
expect(message).toMatch(/refactor\((cli|core)\):/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle test files and detect test scope', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'test',
|
||||||
|
description: 'add integration tests',
|
||||||
|
changedFiles: ['packages/tm-core/src/workflow/workflow.test.ts']
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('test(test):');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle docs changes', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'docs',
|
||||||
|
description: 'update README',
|
||||||
|
changedFiles: ['README.md', 'docs/guide.md']
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('docs(docs):');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should omit scope if not detected', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'chore',
|
||||||
|
description: 'update dependencies',
|
||||||
|
changedFiles: []
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('chore(repo): update dependencies');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should support manual scope override', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: 'add feature',
|
||||||
|
changedFiles: ['packages/tm-core/src/index.ts'],
|
||||||
|
scope: 'api'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('feat(api): add feature');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle breaking changes indicator', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: 'change API structure',
|
||||||
|
changedFiles: ['packages/tm-core/src/index.ts'],
|
||||||
|
breaking: true
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('feat(core)!: change API structure');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should format complete message with all metadata', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: 'implement TDD workflow',
|
||||||
|
changedFiles: ['packages/tm-core/src/workflow/orchestrator.ts'],
|
||||||
|
body: 'Implemented complete RED-GREEN-COMMIT cycle with state persistence.',
|
||||||
|
taskId: '4.1',
|
||||||
|
phase: 'GREEN',
|
||||||
|
testsPassing: 74,
|
||||||
|
testsFailing: 0
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('feat(core): implement TDD workflow');
|
||||||
|
expect(message).toContain('Implemented complete RED-GREEN-COMMIT cycle');
|
||||||
|
expect(message).toContain('Task: 4.1');
|
||||||
|
expect(message).toContain('Phase: GREEN');
|
||||||
|
expect(message).toContain('Tests: 74 passing');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('validateConventionalCommit', () => {
|
||||||
|
it('should validate correct conventional commit format', () => {
|
||||||
|
const message = 'feat(core): add feature\n\nDetails here.';
|
||||||
|
const result = generator.validateConventionalCommit(message);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(true);
|
||||||
|
expect(result.errors).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect missing type', () => {
|
||||||
|
const message = 'add feature';
|
||||||
|
const result = generator.validateConventionalCommit(message);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(false);
|
||||||
|
expect(result.errors.length).toBeGreaterThan(0);
|
||||||
|
expect(result.errors[0]).toContain('Invalid conventional commit format');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect invalid type', () => {
|
||||||
|
const message = 'invalid(core): add feature';
|
||||||
|
const result = generator.validateConventionalCommit(message);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(false);
|
||||||
|
expect(result.errors.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect missing description', () => {
|
||||||
|
const message = 'feat(core):';
|
||||||
|
const result = generator.validateConventionalCommit(message);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(false);
|
||||||
|
expect(result.errors.length).toBeGreaterThan(0);
|
||||||
|
expect(result.errors[0]).toContain('Invalid conventional commit format');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept valid types', () => {
|
||||||
|
const validTypes = [
|
||||||
|
'feat',
|
||||||
|
'fix',
|
||||||
|
'docs',
|
||||||
|
'style',
|
||||||
|
'refactor',
|
||||||
|
'test',
|
||||||
|
'chore'
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const type of validTypes) {
|
||||||
|
const message = `${type}(core): do something`;
|
||||||
|
const result = generator.validateConventionalCommit(message);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(true);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept breaking change indicator', () => {
|
||||||
|
const message = 'feat(core)!: breaking change';
|
||||||
|
const result = generator.validateConventionalCommit(message);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept message without scope', () => {
|
||||||
|
const message = 'fix: resolve issue';
|
||||||
|
const result = generator.validateConventionalCommit(message);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('parseCommitMessage', () => {
|
||||||
|
it('should parse conventional commit message', () => {
|
||||||
|
const message = 'feat(core): add feature\n\nDetailed explanation.';
|
||||||
|
const parsed = generator.parseCommitMessage(message);
|
||||||
|
|
||||||
|
expect(parsed.type).toBe('feat');
|
||||||
|
expect(parsed.scope).toBe('core');
|
||||||
|
expect(parsed.description).toBe('add feature');
|
||||||
|
expect(parsed.body).toContain('Detailed explanation.');
|
||||||
|
expect(parsed.breaking).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse breaking change indicator', () => {
|
||||||
|
const message = 'feat(core)!: breaking change';
|
||||||
|
const parsed = generator.parseCommitMessage(message);
|
||||||
|
|
||||||
|
expect(parsed.type).toBe('feat');
|
||||||
|
expect(parsed.breaking).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse message without scope', () => {
|
||||||
|
const message = 'fix: resolve issue';
|
||||||
|
const parsed = generator.parseCommitMessage(message);
|
||||||
|
|
||||||
|
expect(parsed.type).toBe('fix');
|
||||||
|
expect(parsed.scope).toBeUndefined();
|
||||||
|
expect(parsed.description).toBe('resolve issue');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiline body', () => {
|
||||||
|
const message = 'feat: add feature\n\nLine 1\nLine 2\nLine 3';
|
||||||
|
const parsed = generator.parseCommitMessage(message);
|
||||||
|
|
||||||
|
expect(parsed.body).toContain('Line 1');
|
||||||
|
expect(parsed.body).toContain('Line 2');
|
||||||
|
expect(parsed.body).toContain('Line 3');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('edge cases', () => {
|
||||||
|
it('should handle empty changed files list', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'chore',
|
||||||
|
description: 'general maintenance',
|
||||||
|
changedFiles: []
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('chore(repo):');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle very long description', () => {
|
||||||
|
const longDesc = 'a'.repeat(200);
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'feat',
|
||||||
|
description: longDesc,
|
||||||
|
changedFiles: ['packages/tm-core/src/index.ts']
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain(longDesc);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle special characters in description', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'fix',
|
||||||
|
description: 'resolve issue with $special @characters #123',
|
||||||
|
changedFiles: ['packages/tm-core/src/index.ts']
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('$special @characters #123');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle zero passing tests', () => {
|
||||||
|
const message = generator.generateMessage({
|
||||||
|
type: 'test',
|
||||||
|
description: 'add failing test',
|
||||||
|
changedFiles: ['test.ts'],
|
||||||
|
testsPassing: 0,
|
||||||
|
testsFailing: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(message).toContain('Tests: 0 passing, 1 failing');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
205
packages/tm-core/src/git/commit-message-generator.ts
Normal file
205
packages/tm-core/src/git/commit-message-generator.ts
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
/**
|
||||||
|
* CommitMessageGenerator - Generate conventional commit messages with metadata
|
||||||
|
*
|
||||||
|
* Combines TemplateEngine and ScopeDetector to create structured commit messages
|
||||||
|
* that follow conventional commits specification and include task metadata.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { TemplateEngine } from './template-engine.js';
|
||||||
|
import { ScopeDetector } from './scope-detector.js';
|
||||||
|
|
||||||
|
export interface CommitMessageOptions {
|
||||||
|
type: string;
|
||||||
|
description: string;
|
||||||
|
changedFiles: string[];
|
||||||
|
scope?: string;
|
||||||
|
body?: string;
|
||||||
|
breaking?: boolean;
|
||||||
|
taskId?: string;
|
||||||
|
phase?: string;
|
||||||
|
tag?: string;
|
||||||
|
testsPassing?: number;
|
||||||
|
testsFailing?: number;
|
||||||
|
coveragePercent?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ValidationResult {
|
||||||
|
isValid: boolean;
|
||||||
|
errors: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ParsedCommitMessage {
|
||||||
|
type: string;
|
||||||
|
scope?: string;
|
||||||
|
breaking: boolean;
|
||||||
|
description: string;
|
||||||
|
body?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const CONVENTIONAL_COMMIT_TYPES = [
|
||||||
|
'feat',
|
||||||
|
'fix',
|
||||||
|
'docs',
|
||||||
|
'style',
|
||||||
|
'refactor',
|
||||||
|
'perf',
|
||||||
|
'test',
|
||||||
|
'build',
|
||||||
|
'ci',
|
||||||
|
'chore',
|
||||||
|
'revert'
|
||||||
|
];
|
||||||
|
|
||||||
|
export class CommitMessageGenerator {
|
||||||
|
private templateEngine: TemplateEngine;
|
||||||
|
private scopeDetector: ScopeDetector;
|
||||||
|
|
||||||
|
constructor(
|
||||||
|
customTemplates?: Record<string, string>,
|
||||||
|
customScopeMappings?: Record<string, string>,
|
||||||
|
customScopePriorities?: Record<string, number>
|
||||||
|
) {
|
||||||
|
this.templateEngine = new TemplateEngine(customTemplates);
|
||||||
|
this.scopeDetector = new ScopeDetector(
|
||||||
|
customScopeMappings,
|
||||||
|
customScopePriorities
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a conventional commit message with metadata
|
||||||
|
*/
|
||||||
|
generateMessage(options: CommitMessageOptions): string {
|
||||||
|
const {
|
||||||
|
type,
|
||||||
|
description,
|
||||||
|
changedFiles,
|
||||||
|
scope: manualScope,
|
||||||
|
body,
|
||||||
|
breaking = false,
|
||||||
|
taskId,
|
||||||
|
phase,
|
||||||
|
tag,
|
||||||
|
testsPassing,
|
||||||
|
testsFailing,
|
||||||
|
coveragePercent
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
// Determine scope (manual override or auto-detect)
|
||||||
|
const scope = manualScope ?? this.scopeDetector.detectScope(changedFiles);
|
||||||
|
|
||||||
|
// Build template variables
|
||||||
|
const variables = {
|
||||||
|
type,
|
||||||
|
scope,
|
||||||
|
breaking: breaking ? '!' : '',
|
||||||
|
description,
|
||||||
|
body,
|
||||||
|
taskId,
|
||||||
|
phase,
|
||||||
|
tag,
|
||||||
|
testsPassing,
|
||||||
|
testsFailing,
|
||||||
|
coveragePercent
|
||||||
|
};
|
||||||
|
|
||||||
|
// Generate message from template
|
||||||
|
return this.templateEngine.render('commitMessage', variables);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate that a commit message follows conventional commits format
|
||||||
|
*/
|
||||||
|
validateConventionalCommit(message: string): ValidationResult {
|
||||||
|
const errors: string[] = [];
|
||||||
|
|
||||||
|
// Parse first line (header)
|
||||||
|
const lines = message.split('\n');
|
||||||
|
const header = lines[0];
|
||||||
|
|
||||||
|
if (!header) {
|
||||||
|
errors.push('Missing commit message');
|
||||||
|
return { isValid: false, errors };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check format: type(scope)?: description
|
||||||
|
const headerRegex = /^(\w+)(?:\(([^)]+)\))?(!)?:\s*(.+)$/;
|
||||||
|
const match = header.match(headerRegex);
|
||||||
|
|
||||||
|
if (!match) {
|
||||||
|
errors.push(
|
||||||
|
'Invalid conventional commit format. Expected: type(scope): description'
|
||||||
|
);
|
||||||
|
return { isValid: false, errors };
|
||||||
|
}
|
||||||
|
|
||||||
|
const [, type, , , description] = match;
|
||||||
|
|
||||||
|
// Validate type
|
||||||
|
if (!CONVENTIONAL_COMMIT_TYPES.includes(type)) {
|
||||||
|
errors.push(
|
||||||
|
`Invalid commit type "${type}". Must be one of: ${CONVENTIONAL_COMMIT_TYPES.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate description
|
||||||
|
if (!description || description.trim().length === 0) {
|
||||||
|
errors.push('Missing description');
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
isValid: errors.length === 0,
|
||||||
|
errors
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a conventional commit message into its components
|
||||||
|
*/
|
||||||
|
parseCommitMessage(message: string): ParsedCommitMessage {
|
||||||
|
const lines = message.split('\n');
|
||||||
|
const header = lines[0];
|
||||||
|
|
||||||
|
// Parse header: type(scope)!: description
|
||||||
|
const headerRegex = /^(\w+)(?:\(([^)]+)\))?(!)?:\s*(.+)$/;
|
||||||
|
const match = header.match(headerRegex);
|
||||||
|
|
||||||
|
if (!match) {
|
||||||
|
throw new Error('Invalid conventional commit format');
|
||||||
|
}
|
||||||
|
|
||||||
|
const [, type, scope, breaking, description] = match;
|
||||||
|
|
||||||
|
// Body is everything after the first blank line
|
||||||
|
const bodyStartIndex = lines.findIndex((line, i) => i > 0 && line === '');
|
||||||
|
const body =
|
||||||
|
bodyStartIndex !== -1
|
||||||
|
? lines
|
||||||
|
.slice(bodyStartIndex + 1)
|
||||||
|
.join('\n')
|
||||||
|
.trim()
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
return {
|
||||||
|
type,
|
||||||
|
scope,
|
||||||
|
breaking: breaking === '!',
|
||||||
|
description,
|
||||||
|
body
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the scope detector instance (for testing/customization)
|
||||||
|
*/
|
||||||
|
getScopeDetector(): ScopeDetector {
|
||||||
|
return this.scopeDetector;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the template engine instance (for testing/customization)
|
||||||
|
*/
|
||||||
|
getTemplateEngine(): TemplateEngine {
|
||||||
|
return this.templateEngine;
|
||||||
|
}
|
||||||
|
}
|
||||||
1211
packages/tm-core/src/git/git-adapter.test.ts
Normal file
1211
packages/tm-core/src/git/git-adapter.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
780
packages/tm-core/src/git/git-adapter.ts
Normal file
780
packages/tm-core/src/git/git-adapter.ts
Normal file
@@ -0,0 +1,780 @@
|
|||||||
|
/**
|
||||||
|
* GitAdapter - Safe git operations wrapper with validation and safety checks.
|
||||||
|
* Handles all git operations (branching, committing, pushing) with built-in safety gates.
|
||||||
|
*
|
||||||
|
* @module git-adapter
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { simpleGit, type SimpleGit } from 'simple-git';
|
||||||
|
import fs from 'fs-extra';
|
||||||
|
import path from 'path';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GitAdapter class for safe git operations
|
||||||
|
*/
|
||||||
|
export class GitAdapter {
|
||||||
|
public projectPath: string;
|
||||||
|
public git: SimpleGit;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new GitAdapter instance.
|
||||||
|
*
|
||||||
|
* @param {string} projectPath - Absolute path to the project directory
|
||||||
|
* @throws {Error} If projectPath is invalid or not absolute
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const git = new GitAdapter('/path/to/project');
|
||||||
|
* await git.ensureGitRepository();
|
||||||
|
*/
|
||||||
|
constructor(projectPath: string) {
|
||||||
|
// Validate project path
|
||||||
|
if (!projectPath) {
|
||||||
|
throw new Error('Project path is required');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!path.isAbsolute(projectPath)) {
|
||||||
|
throw new Error('Project path must be an absolute path');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize path
|
||||||
|
this.projectPath = path.normalize(projectPath);
|
||||||
|
|
||||||
|
// Initialize simple-git
|
||||||
|
this.git = simpleGit(this.projectPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the current directory is a git repository.
|
||||||
|
* Looks for .git directory or file (worktree/submodule).
|
||||||
|
*
|
||||||
|
* @returns {Promise<boolean>} True if in a git repository
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const isRepo = await git.isGitRepository();
|
||||||
|
* if (!isRepo) {
|
||||||
|
* console.log('Not a git repository');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async isGitRepository(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
// Check if .git exists (directory or file for submodules/worktrees)
|
||||||
|
const gitPath = path.join(this.projectPath, '.git');
|
||||||
|
|
||||||
|
if (await fs.pathExists(gitPath)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to find git root from subdirectory
|
||||||
|
try {
|
||||||
|
await this.git.revparse(['--git-dir']);
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates that git is installed and accessible.
|
||||||
|
* Checks git binary availability and version.
|
||||||
|
*
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If git is not installed or not accessible
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.validateGitInstallation();
|
||||||
|
* console.log('Git is installed');
|
||||||
|
*/
|
||||||
|
async validateGitInstallation(): Promise<void> {
|
||||||
|
try {
|
||||||
|
await this.git.version();
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage =
|
||||||
|
error instanceof Error ? error.message : String(error);
|
||||||
|
throw new Error(
|
||||||
|
`Git is not installed or not accessible: ${errorMessage}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the git version information.
|
||||||
|
*
|
||||||
|
* @returns {Promise<{major: number, minor: number, patch: number, agent: string}>}
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const version = await git.getGitVersion();
|
||||||
|
* console.log(`Git version: ${version.major}.${version.minor}.${version.patch}`);
|
||||||
|
*/
|
||||||
|
async getGitVersion(): Promise<{
|
||||||
|
major: number;
|
||||||
|
minor: number;
|
||||||
|
patch: number;
|
||||||
|
agent: string;
|
||||||
|
}> {
|
||||||
|
const versionResult = await this.git.version();
|
||||||
|
return {
|
||||||
|
major: versionResult.major,
|
||||||
|
minor: versionResult.minor,
|
||||||
|
patch:
|
||||||
|
typeof versionResult.patch === 'string'
|
||||||
|
? parseInt(versionResult.patch)
|
||||||
|
: versionResult.patch || 0,
|
||||||
|
agent: versionResult.agent
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the repository root path.
|
||||||
|
* Works even when called from a subdirectory.
|
||||||
|
*
|
||||||
|
* @returns {Promise<string>} Absolute path to repository root
|
||||||
|
* @throws {Error} If not in a git repository
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const root = await git.getRepositoryRoot();
|
||||||
|
* console.log(`Repository root: ${root}`);
|
||||||
|
*/
|
||||||
|
async getRepositoryRoot(): Promise<string> {
|
||||||
|
try {
|
||||||
|
const result = await this.git.revparse(['--show-toplevel']);
|
||||||
|
return path.normalize(result.trim());
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`not a git repository: ${this.projectPath}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates the repository state.
|
||||||
|
* Checks for corruption and basic integrity.
|
||||||
|
*
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If repository is corrupted or invalid
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.validateRepository();
|
||||||
|
* console.log('Repository is valid');
|
||||||
|
*/
|
||||||
|
async validateRepository(): Promise<void> {
|
||||||
|
// Check if it's a git repository
|
||||||
|
const isRepo = await this.isGitRepository();
|
||||||
|
if (!isRepo) {
|
||||||
|
throw new Error(`not a git repository: ${this.projectPath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get repository status to verify it's not corrupted
|
||||||
|
try {
|
||||||
|
await this.git.status();
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage =
|
||||||
|
error instanceof Error ? error.message : String(error);
|
||||||
|
throw new Error(`Repository validation failed: ${errorMessage}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures we're in a valid git repository before performing operations.
|
||||||
|
* Convenience method that throws descriptive errors.
|
||||||
|
*
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If not in a valid git repository
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.ensureGitRepository();
|
||||||
|
* // Safe to perform git operations after this
|
||||||
|
*/
|
||||||
|
async ensureGitRepository(): Promise<void> {
|
||||||
|
const isRepo = await this.isGitRepository();
|
||||||
|
if (!isRepo) {
|
||||||
|
throw new Error(
|
||||||
|
`not a git repository: ${this.projectPath}\n` +
|
||||||
|
`Please run this command from within a git repository, or initialize one with 'git init'.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the working tree is clean (no uncommitted changes).
|
||||||
|
* A clean working tree has no staged, unstaged, or untracked files.
|
||||||
|
*
|
||||||
|
* @returns {Promise<boolean>} True if working tree is clean
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const isClean = await git.isWorkingTreeClean();
|
||||||
|
* if (!isClean) {
|
||||||
|
* console.log('Working tree has uncommitted changes');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async isWorkingTreeClean(): Promise<boolean> {
|
||||||
|
const status = await this.git.status();
|
||||||
|
return status.isClean();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the detailed status of the working tree.
|
||||||
|
* Returns raw status from simple-git with all file changes.
|
||||||
|
*
|
||||||
|
* @returns {Promise<import('simple-git').StatusResult>} Detailed status object
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const status = await git.getStatus();
|
||||||
|
* console.log('Modified files:', status.modified);
|
||||||
|
* console.log('Staged files:', status.staged);
|
||||||
|
*/
|
||||||
|
async getStatus(): Promise<import('simple-git').StatusResult> {
|
||||||
|
return await this.git.status();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if there are any uncommitted changes in the working tree.
|
||||||
|
* Includes staged, unstaged, and untracked files.
|
||||||
|
*
|
||||||
|
* @returns {Promise<boolean>} True if there are uncommitted changes
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const hasChanges = await git.hasUncommittedChanges();
|
||||||
|
* if (hasChanges) {
|
||||||
|
* console.log('Please commit your changes before proceeding');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async hasUncommittedChanges(): Promise<boolean> {
|
||||||
|
const status = await this.git.status();
|
||||||
|
return !status.isClean();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if there are any staged changes ready to commit.
|
||||||
|
*
|
||||||
|
* @returns {Promise<boolean>} True if there are staged changes
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const hasStaged = await git.hasStagedChanges();
|
||||||
|
* if (hasStaged) {
|
||||||
|
* console.log('Ready to commit');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async hasStagedChanges(): Promise<boolean> {
|
||||||
|
const status = await this.git.status();
|
||||||
|
return status.staged.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if there are any untracked files in the working tree.
|
||||||
|
*
|
||||||
|
* @returns {Promise<boolean>} True if there are untracked files
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const hasUntracked = await git.hasUntrackedFiles();
|
||||||
|
* if (hasUntracked) {
|
||||||
|
* console.log('You have untracked files');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async hasUntrackedFiles(): Promise<boolean> {
|
||||||
|
const status = await this.git.status();
|
||||||
|
return status.not_added.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a summary of the working tree status with counts.
|
||||||
|
*
|
||||||
|
* @returns {Promise<{isClean: boolean, staged: number, modified: number, deleted: number, untracked: number, totalChanges: number}>}
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const summary = await git.getStatusSummary();
|
||||||
|
* console.log(`${summary.totalChanges} total changes`);
|
||||||
|
*/
|
||||||
|
async getStatusSummary(): Promise<{
|
||||||
|
isClean: boolean;
|
||||||
|
staged: number;
|
||||||
|
modified: number;
|
||||||
|
deleted: number;
|
||||||
|
untracked: number;
|
||||||
|
totalChanges: number;
|
||||||
|
}> {
|
||||||
|
const status = await this.git.status();
|
||||||
|
const staged = status.staged.length;
|
||||||
|
const modified = status.modified.length;
|
||||||
|
const deleted = status.deleted.length;
|
||||||
|
const untracked = status.not_added.length;
|
||||||
|
const totalChanges = staged + modified + deleted + untracked;
|
||||||
|
|
||||||
|
return {
|
||||||
|
isClean: status.isClean(),
|
||||||
|
staged,
|
||||||
|
modified,
|
||||||
|
deleted,
|
||||||
|
untracked,
|
||||||
|
totalChanges
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures the working tree is clean before performing operations.
|
||||||
|
* Throws an error with details if there are uncommitted changes.
|
||||||
|
*
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If working tree is not clean
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.ensureCleanWorkingTree();
|
||||||
|
* // Safe to perform git operations that require clean state
|
||||||
|
*/
|
||||||
|
async ensureCleanWorkingTree(): Promise<void> {
|
||||||
|
const status = await this.git.status();
|
||||||
|
if (!status.isClean()) {
|
||||||
|
const summary = await this.getStatusSummary();
|
||||||
|
throw new Error(
|
||||||
|
`working tree is not clean: ${this.projectPath}\n` +
|
||||||
|
`Staged: ${summary.staged}, Modified: ${summary.modified}, ` +
|
||||||
|
`Deleted: ${summary.deleted}, Untracked: ${summary.untracked}\n` +
|
||||||
|
`Please commit or stash your changes before proceeding.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the name of the current branch.
|
||||||
|
*
|
||||||
|
* @returns {Promise<string>} Current branch name
|
||||||
|
* @throws {Error} If unable to determine current branch
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const branch = await git.getCurrentBranch();
|
||||||
|
* console.log(`Currently on: ${branch}`);
|
||||||
|
*/
|
||||||
|
async getCurrentBranch(): Promise<string> {
|
||||||
|
const status = await this.git.status();
|
||||||
|
return status.current || 'HEAD';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lists all local branches in the repository.
|
||||||
|
*
|
||||||
|
* @returns {Promise<string[]>} Array of branch names
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const branches = await git.listBranches();
|
||||||
|
* console.log('Available branches:', branches);
|
||||||
|
*/
|
||||||
|
async listBranches(): Promise<string[]> {
|
||||||
|
const branchSummary = await this.git.branchLocal();
|
||||||
|
return Object.keys(branchSummary.branches);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if a branch exists in the repository.
|
||||||
|
*
|
||||||
|
* @param {string} branchName - Name of branch to check
|
||||||
|
* @returns {Promise<boolean>} True if branch exists
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const exists = await git.branchExists('feature-branch');
|
||||||
|
* if (!exists) {
|
||||||
|
* console.log('Branch does not exist');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async branchExists(branchName: string): Promise<boolean> {
|
||||||
|
const branches = await this.listBranches();
|
||||||
|
return branches.includes(branchName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new branch without checking it out.
|
||||||
|
*
|
||||||
|
* @param {string} branchName - Name for the new branch
|
||||||
|
* @param {Object} options - Branch creation options
|
||||||
|
* @param {boolean} options.checkout - Whether to checkout after creation
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If branch already exists or working tree is dirty (when checkout=true)
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.createBranch('feature-branch');
|
||||||
|
* await git.createBranch('feature-branch', { checkout: true });
|
||||||
|
*/
|
||||||
|
async createBranch(
|
||||||
|
branchName: string,
|
||||||
|
options: { checkout?: boolean } = {}
|
||||||
|
): Promise<void> {
|
||||||
|
// Check if branch already exists
|
||||||
|
const exists = await this.branchExists(branchName);
|
||||||
|
if (exists) {
|
||||||
|
throw new Error(`branch already exists: ${branchName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If checkout is requested, ensure working tree is clean
|
||||||
|
if (options.checkout) {
|
||||||
|
await this.ensureCleanWorkingTree();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the branch
|
||||||
|
await this.git.branch([branchName]);
|
||||||
|
|
||||||
|
// Checkout if requested
|
||||||
|
if (options.checkout) {
|
||||||
|
await this.git.checkout(branchName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks out an existing branch.
|
||||||
|
*
|
||||||
|
* @param {string} branchName - Name of branch to checkout
|
||||||
|
* @param {Object} options - Checkout options
|
||||||
|
* @param {boolean} options.force - Force checkout even with uncommitted changes
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If branch doesn't exist or working tree is dirty (unless force=true)
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.checkoutBranch('feature-branch');
|
||||||
|
* await git.checkoutBranch('feature-branch', { force: true });
|
||||||
|
*/
|
||||||
|
async checkoutBranch(
|
||||||
|
branchName: string,
|
||||||
|
options: { force?: boolean } = {}
|
||||||
|
): Promise<void> {
|
||||||
|
// Check if branch exists
|
||||||
|
const exists = await this.branchExists(branchName);
|
||||||
|
if (!exists) {
|
||||||
|
throw new Error(`branch does not exist: ${branchName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure clean working tree unless force is specified
|
||||||
|
if (!options.force) {
|
||||||
|
await this.ensureCleanWorkingTree();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checkout the branch
|
||||||
|
const checkoutOptions = options.force ? ['-f', branchName] : [branchName];
|
||||||
|
await this.git.checkout(checkoutOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new branch and checks it out.
|
||||||
|
* Convenience method combining createBranch and checkoutBranch.
|
||||||
|
*
|
||||||
|
* @param {string} branchName - Name for the new branch
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If branch already exists or working tree is dirty
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.createAndCheckoutBranch('new-feature');
|
||||||
|
*/
|
||||||
|
async createAndCheckoutBranch(branchName: string): Promise<void> {
|
||||||
|
// Ensure working tree is clean
|
||||||
|
await this.ensureCleanWorkingTree();
|
||||||
|
|
||||||
|
// Check if branch already exists
|
||||||
|
const exists = await this.branchExists(branchName);
|
||||||
|
if (exists) {
|
||||||
|
throw new Error(`branch already exists: ${branchName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and checkout the branch
|
||||||
|
await this.git.checkoutLocalBranch(branchName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes a branch.
|
||||||
|
*
|
||||||
|
* @param {string} branchName - Name of branch to delete
|
||||||
|
* @param {Object} options - Delete options
|
||||||
|
* @param {boolean} options.force - Force delete even if unmerged
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If branch doesn't exist or is currently checked out
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.deleteBranch('old-feature');
|
||||||
|
* await git.deleteBranch('unmerged-feature', { force: true });
|
||||||
|
*/
|
||||||
|
async deleteBranch(
|
||||||
|
branchName: string,
|
||||||
|
options: { force?: boolean } = {}
|
||||||
|
): Promise<void> {
|
||||||
|
// Check if branch exists
|
||||||
|
const exists = await this.branchExists(branchName);
|
||||||
|
if (!exists) {
|
||||||
|
throw new Error(`branch does not exist: ${branchName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if trying to delete current branch
|
||||||
|
const current = await this.getCurrentBranch();
|
||||||
|
if (current === branchName) {
|
||||||
|
throw new Error(`cannot delete current branch: ${branchName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the branch
|
||||||
|
const deleteOptions = options.force
|
||||||
|
? ['-D', branchName]
|
||||||
|
: ['-d', branchName];
|
||||||
|
await this.git.branch(deleteOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stages files for commit.
|
||||||
|
*
|
||||||
|
* @param {string[]} files - Array of file paths to stage
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.stageFiles(['file1.txt', 'file2.txt']);
|
||||||
|
* await git.stageFiles(['.']); // Stage all changes
|
||||||
|
*/
|
||||||
|
async stageFiles(files: string[]): Promise<void> {
|
||||||
|
await this.git.add(files);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unstages files that were previously staged.
|
||||||
|
*
|
||||||
|
* @param {string[]} files - Array of file paths to unstage
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.unstageFiles(['file1.txt']);
|
||||||
|
*/
|
||||||
|
async unstageFiles(files: string[]): Promise<void> {
|
||||||
|
await this.git.reset(['HEAD', '--', ...files]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a commit with optional metadata embedding.
|
||||||
|
*
|
||||||
|
* @param {string} message - Commit message
|
||||||
|
* @param {Object} options - Commit options
|
||||||
|
* @param {Object} options.metadata - Metadata to embed in commit message
|
||||||
|
* @param {boolean} options.allowEmpty - Allow empty commits
|
||||||
|
* @param {boolean} options.enforceNonDefaultBranch - Prevent commits on default branch
|
||||||
|
* @param {boolean} options.force - Force commit even on default branch
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If no staged changes (unless allowEmpty), or on default branch (unless force)
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.createCommit('Add feature');
|
||||||
|
* await git.createCommit('Add feature', {
|
||||||
|
* metadata: { taskId: '2.4', phase: 'implementation' }
|
||||||
|
* });
|
||||||
|
* await git.createCommit('Add feature', {
|
||||||
|
* enforceNonDefaultBranch: true
|
||||||
|
* });
|
||||||
|
*/
|
||||||
|
async createCommit(
|
||||||
|
message: string,
|
||||||
|
options: {
|
||||||
|
metadata?: Record<string, string>;
|
||||||
|
allowEmpty?: boolean;
|
||||||
|
enforceNonDefaultBranch?: boolean;
|
||||||
|
force?: boolean;
|
||||||
|
} = {}
|
||||||
|
): Promise<void> {
|
||||||
|
// Check if on default branch and enforcement is requested
|
||||||
|
if (options.enforceNonDefaultBranch && !options.force) {
|
||||||
|
const currentBranch = await this.getCurrentBranch();
|
||||||
|
const defaultBranches = ['main', 'master', 'develop'];
|
||||||
|
if (defaultBranches.includes(currentBranch)) {
|
||||||
|
throw new Error(
|
||||||
|
`cannot commit to default branch: ${currentBranch}\n` +
|
||||||
|
`Please create a feature branch or use force option.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for staged changes unless allowEmpty
|
||||||
|
if (!options.allowEmpty) {
|
||||||
|
const hasStaged = await this.hasStagedChanges();
|
||||||
|
if (!hasStaged) {
|
||||||
|
throw new Error('no staged changes to commit');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build commit arguments
|
||||||
|
const commitArgs: string[] = ['commit'];
|
||||||
|
|
||||||
|
// Add message
|
||||||
|
commitArgs.push('-m', message);
|
||||||
|
|
||||||
|
// Add metadata as separate commit message lines
|
||||||
|
if (options.metadata) {
|
||||||
|
commitArgs.push('-m', ''); // Empty line separator
|
||||||
|
for (const [key, value] of Object.entries(options.metadata)) {
|
||||||
|
commitArgs.push('-m', `[${key}:${value}]`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add flags
|
||||||
|
commitArgs.push('--no-gpg-sign');
|
||||||
|
if (options.allowEmpty) {
|
||||||
|
commitArgs.push('--allow-empty');
|
||||||
|
}
|
||||||
|
|
||||||
|
await this.git.raw(commitArgs);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the commit log history.
|
||||||
|
*
|
||||||
|
* @param {Object} options - Log options
|
||||||
|
* @param {number} options.maxCount - Maximum number of commits to return
|
||||||
|
* @returns {Promise<Array>} Array of commit objects
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const log = await git.getCommitLog();
|
||||||
|
* const recentLog = await git.getCommitLog({ maxCount: 10 });
|
||||||
|
*/
|
||||||
|
async getCommitLog(options: { maxCount?: number } = {}): Promise<any[]> {
|
||||||
|
const logOptions: any = {
|
||||||
|
format: {
|
||||||
|
hash: '%H',
|
||||||
|
date: '%ai',
|
||||||
|
message: '%B', // Full commit message including body
|
||||||
|
author_name: '%an',
|
||||||
|
author_email: '%ae'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if (options.maxCount) {
|
||||||
|
logOptions.maxCount = options.maxCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
const log = await this.git.log(logOptions);
|
||||||
|
return [...log.all];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the last commit.
|
||||||
|
*
|
||||||
|
* @returns {Promise<any>} Last commit object
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const lastCommit = await git.getLastCommit();
|
||||||
|
* console.log(lastCommit.message);
|
||||||
|
*/
|
||||||
|
async getLastCommit(): Promise<any> {
|
||||||
|
const log = await this.git.log({
|
||||||
|
maxCount: 1,
|
||||||
|
format: {
|
||||||
|
hash: '%H',
|
||||||
|
date: '%ai',
|
||||||
|
message: '%B', // Full commit message including body
|
||||||
|
author_name: '%an',
|
||||||
|
author_email: '%ae'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return log.latest;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detects the default branch for the repository.
|
||||||
|
* Returns the current branch name, assuming it's the default if it's main/master/develop.
|
||||||
|
*
|
||||||
|
* @returns {Promise<string>} Default branch name
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const defaultBranch = await git.getDefaultBranch();
|
||||||
|
* console.log(`Default branch: ${defaultBranch}`);
|
||||||
|
*/
|
||||||
|
async getDefaultBranch(): Promise<string> {
|
||||||
|
const currentBranch = await this.getCurrentBranch();
|
||||||
|
const defaultBranches = ['main', 'master', 'develop'];
|
||||||
|
|
||||||
|
if (defaultBranches.includes(currentBranch)) {
|
||||||
|
return currentBranch;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not on a default branch, check which default branches exist
|
||||||
|
const branches = await this.listBranches();
|
||||||
|
for (const defaultBranch of defaultBranches) {
|
||||||
|
if (branches.includes(defaultBranch)) {
|
||||||
|
return defaultBranch;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to main
|
||||||
|
return 'main';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if a given branch name is considered a default branch.
|
||||||
|
* Default branches are: main, master, develop.
|
||||||
|
*
|
||||||
|
* @param {string} branchName - Branch name to check
|
||||||
|
* @returns {Promise<boolean>} True if branch is a default branch
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const isDefault = await git.isDefaultBranch('main');
|
||||||
|
* if (isDefault) {
|
||||||
|
* console.log('This is a default branch');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async isDefaultBranch(branchName: string): Promise<boolean> {
|
||||||
|
const defaultBranches = ['main', 'master', 'develop'];
|
||||||
|
return defaultBranches.includes(branchName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if currently on a default branch.
|
||||||
|
*
|
||||||
|
* @returns {Promise<boolean>} True if on a default branch
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const onDefault = await git.isOnDefaultBranch();
|
||||||
|
* if (onDefault) {
|
||||||
|
* console.log('Warning: You are on a default branch');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async isOnDefaultBranch(): Promise<boolean> {
|
||||||
|
const currentBranch = await this.getCurrentBranch();
|
||||||
|
return await this.isDefaultBranch(currentBranch);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures the current branch is not a default branch.
|
||||||
|
* Throws an error if on a default branch.
|
||||||
|
*
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {Error} If currently on a default branch
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await git.ensureNotOnDefaultBranch();
|
||||||
|
* // Safe to perform operations that shouldn't happen on default branches
|
||||||
|
*/
|
||||||
|
async ensureNotOnDefaultBranch(): Promise<void> {
|
||||||
|
const onDefault = await this.isOnDefaultBranch();
|
||||||
|
if (onDefault) {
|
||||||
|
const currentBranch = await this.getCurrentBranch();
|
||||||
|
throw new Error(
|
||||||
|
`currently on default branch: ${currentBranch}\n` +
|
||||||
|
`Please create a feature branch before proceeding.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the repository has any remotes configured.
|
||||||
|
*
|
||||||
|
* @returns {Promise<boolean>} True if remotes exist
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const hasRemote = await git.hasRemote();
|
||||||
|
* if (!hasRemote) {
|
||||||
|
* console.log('No remotes configured');
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
async hasRemote(): Promise<boolean> {
|
||||||
|
const remotes = await this.git.getRemotes();
|
||||||
|
return remotes.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets all configured remotes.
|
||||||
|
*
|
||||||
|
* @returns {Promise<Array>} Array of remote objects
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const remotes = await git.getRemotes();
|
||||||
|
* console.log('Remotes:', remotes);
|
||||||
|
*/
|
||||||
|
async getRemotes(): Promise<any[]> {
|
||||||
|
return await this.git.getRemotes(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
13
packages/tm-core/src/git/index.ts
Normal file
13
packages/tm-core/src/git/index.ts
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Git operations layer for the tm-core package
|
||||||
|
* This file exports all git-related classes and interfaces
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Export GitAdapter
|
||||||
|
export { GitAdapter } from './git-adapter.js';
|
||||||
|
|
||||||
|
// Export branch name utilities
|
||||||
|
export {
|
||||||
|
generateBranchName,
|
||||||
|
sanitizeBranchName
|
||||||
|
} from './branch-name-generator.js';
|
||||||
284
packages/tm-core/src/git/scope-detector.test.ts
Normal file
284
packages/tm-core/src/git/scope-detector.test.ts
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
import { describe, it, expect, beforeEach } from 'vitest';
|
||||||
|
import { ScopeDetector } from './scope-detector.js';
|
||||||
|
|
||||||
|
describe('ScopeDetector', () => {
|
||||||
|
let scopeDetector: ScopeDetector;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
scopeDetector = new ScopeDetector();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('detectScope', () => {
|
||||||
|
it('should detect cli scope from CLI file changes', () => {
|
||||||
|
const files = ['packages/cli/src/commands/start.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('cli');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect core scope from core package changes', () => {
|
||||||
|
const files = ['packages/tm-core/src/workflow/orchestrator.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect test scope from test file changes', () => {
|
||||||
|
const files = ['packages/tm-core/src/workflow/orchestrator.test.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('test');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect docs scope from documentation changes', () => {
|
||||||
|
const files = ['README.md', 'docs/guide.md'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('docs');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect config scope from configuration changes', () => {
|
||||||
|
const files = ['tsconfig.json'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('config');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect workflow scope from workflow files', () => {
|
||||||
|
const files = ['packages/tm-core/src/workflow/types.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
// Files within packages get the package scope (more specific than feature scope)
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect git scope from git adapter files', () => {
|
||||||
|
const files = ['packages/tm-core/src/git/git-adapter.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
// Files within packages get the package scope (more specific than feature scope)
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect storage scope from storage files', () => {
|
||||||
|
const files = ['packages/tm-core/src/storage/state-manager.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
// Files within packages get the package scope (more specific than feature scope)
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use most relevant scope when multiple files', () => {
|
||||||
|
const files = [
|
||||||
|
'packages/cli/src/commands/start.ts',
|
||||||
|
'packages/cli/src/commands/stop.ts',
|
||||||
|
'packages/tm-core/src/types.ts'
|
||||||
|
];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('cli');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle mixed scopes by choosing highest priority', () => {
|
||||||
|
const files = [
|
||||||
|
'README.md',
|
||||||
|
'packages/tm-core/src/workflow/orchestrator.ts'
|
||||||
|
];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
// Core is higher priority than docs
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty file list gracefully', () => {
|
||||||
|
const files: string[] = [];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('repo');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect mcp scope from MCP server files', () => {
|
||||||
|
const files = ['packages/mcp-server/src/tools.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('mcp');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect auth scope from authentication files', () => {
|
||||||
|
const files = ['packages/tm-core/src/auth/auth-manager.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
// Files within packages get the package scope (more specific than feature scope)
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect deps scope from dependency changes', () => {
|
||||||
|
const files = ['pnpm-lock.yaml'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('deps');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('detectScopeWithCustomRules', () => {
|
||||||
|
it('should use custom scope mapping rules', () => {
|
||||||
|
const customRules: Record<string, number> = {
|
||||||
|
custom: 100
|
||||||
|
};
|
||||||
|
|
||||||
|
const customDetector = new ScopeDetector(
|
||||||
|
{
|
||||||
|
'custom/**': 'custom'
|
||||||
|
},
|
||||||
|
customRules
|
||||||
|
);
|
||||||
|
|
||||||
|
const files = ['custom/file.ts'];
|
||||||
|
const scope = customDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('custom');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should override default priorities with custom priorities', () => {
|
||||||
|
const customPriorities: Record<string, number> = {
|
||||||
|
docs: 100, // Make docs highest priority
|
||||||
|
core: 10
|
||||||
|
};
|
||||||
|
|
||||||
|
const customDetector = new ScopeDetector(undefined, customPriorities);
|
||||||
|
|
||||||
|
const files = [
|
||||||
|
'README.md',
|
||||||
|
'packages/tm-core/src/workflow/orchestrator.ts'
|
||||||
|
];
|
||||||
|
const scope = customDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('docs');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getAllMatchingScopes', () => {
|
||||||
|
it('should return all matching scopes for files', () => {
|
||||||
|
const files = [
|
||||||
|
'packages/cli/src/commands/start.ts',
|
||||||
|
'packages/tm-core/src/workflow/orchestrator.ts',
|
||||||
|
'README.md'
|
||||||
|
];
|
||||||
|
|
||||||
|
const scopes = scopeDetector.getAllMatchingScopes(files);
|
||||||
|
|
||||||
|
expect(scopes).toContain('cli');
|
||||||
|
expect(scopes).toContain('core');
|
||||||
|
expect(scopes).toContain('docs');
|
||||||
|
expect(scopes).toHaveLength(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return unique scopes only', () => {
|
||||||
|
const files = [
|
||||||
|
'packages/cli/src/commands/start.ts',
|
||||||
|
'packages/cli/src/commands/stop.ts'
|
||||||
|
];
|
||||||
|
|
||||||
|
const scopes = scopeDetector.getAllMatchingScopes(files);
|
||||||
|
|
||||||
|
expect(scopes).toEqual(['cli']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array for files with no matches', () => {
|
||||||
|
const files = ['unknown/path/file.ts'];
|
||||||
|
const scopes = scopeDetector.getAllMatchingScopes(files);
|
||||||
|
|
||||||
|
expect(scopes).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getScopePriority', () => {
|
||||||
|
it('should return priority for known scope', () => {
|
||||||
|
const priority = scopeDetector.getScopePriority('core');
|
||||||
|
|
||||||
|
expect(priority).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return 0 for unknown scope', () => {
|
||||||
|
const priority = scopeDetector.getScopePriority('nonexistent');
|
||||||
|
|
||||||
|
expect(priority).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should prioritize core > cli > test > docs', () => {
|
||||||
|
const corePriority = scopeDetector.getScopePriority('core');
|
||||||
|
const cliPriority = scopeDetector.getScopePriority('cli');
|
||||||
|
const testPriority = scopeDetector.getScopePriority('test');
|
||||||
|
const docsPriority = scopeDetector.getScopePriority('docs');
|
||||||
|
|
||||||
|
expect(corePriority).toBeGreaterThan(cliPriority);
|
||||||
|
expect(cliPriority).toBeGreaterThan(testPriority);
|
||||||
|
expect(testPriority).toBeGreaterThan(docsPriority);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('edge cases', () => {
|
||||||
|
it('should handle Windows paths', () => {
|
||||||
|
const files = ['packages\\cli\\src\\commands\\start.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('cli');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle absolute paths', () => {
|
||||||
|
const files = [
|
||||||
|
'/home/user/project/packages/tm-core/src/workflow/orchestrator.ts'
|
||||||
|
];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
// Absolute paths won't match package patterns
|
||||||
|
expect(scope).toBe('workflow');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle paths with special characters', () => {
|
||||||
|
const files = ['packages/tm-core/src/workflow/orchestrator@v2.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
// Files within packages get the package scope
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle very long file paths', () => {
|
||||||
|
const files = [
|
||||||
|
'packages/tm-core/src/deeply/nested/directory/structure/with/many/levels/file.ts'
|
||||||
|
];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('core');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle files in root directory', () => {
|
||||||
|
const files = ['file.ts'];
|
||||||
|
const scope = scopeDetector.detectScope(files);
|
||||||
|
|
||||||
|
expect(scope).toBe('repo');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getMatchingScope', () => {
|
||||||
|
it('should return matching scope for single file', () => {
|
||||||
|
const scope = scopeDetector.getMatchingScope('packages/cli/src/index.ts');
|
||||||
|
|
||||||
|
expect(scope).toBe('cli');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null for non-matching file', () => {
|
||||||
|
const scope = scopeDetector.getMatchingScope('unknown/file.ts');
|
||||||
|
|
||||||
|
expect(scope).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should match test files', () => {
|
||||||
|
const scope = scopeDetector.getMatchingScope(
|
||||||
|
'src/components/button.test.tsx'
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(scope).toBe('test');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
204
packages/tm-core/src/git/scope-detector.ts
Normal file
204
packages/tm-core/src/git/scope-detector.ts
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
/**
|
||||||
|
* ScopeDetector - Intelligent scope detection from changed files
|
||||||
|
*
|
||||||
|
* Automatically determines conventional commit scopes based on file paths
|
||||||
|
* using configurable pattern matching and priority-based resolution.
|
||||||
|
* // TODO: remove this
|
||||||
|
*/
|
||||||
|
|
||||||
|
export interface ScopeMapping {
|
||||||
|
[pattern: string]: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ScopePriority {
|
||||||
|
[scope: string]: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ordered from most specific to least specific
|
||||||
|
const DEFAULT_SCOPE_MAPPINGS: Array<[string, string]> = [
|
||||||
|
// Special file types (check first - most specific)
|
||||||
|
['**/*.test.*', 'test'],
|
||||||
|
['**/*.spec.*', 'test'],
|
||||||
|
['**/test/**', 'test'],
|
||||||
|
['**/tests/**', 'test'],
|
||||||
|
['**/__tests__/**', 'test'],
|
||||||
|
|
||||||
|
// Dependencies (specific files)
|
||||||
|
['**/package-lock.json', 'deps'],
|
||||||
|
['package-lock.json', 'deps'],
|
||||||
|
['**/pnpm-lock.yaml', 'deps'],
|
||||||
|
['pnpm-lock.yaml', 'deps'],
|
||||||
|
['**/yarn.lock', 'deps'],
|
||||||
|
['yarn.lock', 'deps'],
|
||||||
|
|
||||||
|
// Configuration files (before packages so root configs don't match package patterns)
|
||||||
|
['**/package.json', 'config'],
|
||||||
|
['package.json', 'config'],
|
||||||
|
['**/tsconfig*.json', 'config'],
|
||||||
|
['tsconfig*.json', 'config'],
|
||||||
|
['**/.eslintrc*', 'config'],
|
||||||
|
['.eslintrc*', 'config'],
|
||||||
|
['**/vite.config.*', 'config'],
|
||||||
|
['vite.config.*', 'config'],
|
||||||
|
['**/vitest.config.*', 'config'],
|
||||||
|
['vitest.config.*', 'config'],
|
||||||
|
|
||||||
|
// Package-level scopes (more specific than feature-level)
|
||||||
|
['packages/cli/**', 'cli'],
|
||||||
|
['packages/tm-core/**', 'core'],
|
||||||
|
['packages/mcp-server/**', 'mcp'],
|
||||||
|
|
||||||
|
// Feature-level scopes (within any package)
|
||||||
|
['**/workflow/**', 'workflow'],
|
||||||
|
['**/git/**', 'git'],
|
||||||
|
['**/storage/**', 'storage'],
|
||||||
|
['**/auth/**', 'auth'],
|
||||||
|
['**/config/**', 'config'],
|
||||||
|
|
||||||
|
// Documentation (least specific)
|
||||||
|
['**/*.md', 'docs'],
|
||||||
|
['**/docs/**', 'docs'],
|
||||||
|
['README*', 'docs'],
|
||||||
|
['CHANGELOG*', 'docs']
|
||||||
|
];
|
||||||
|
|
||||||
|
const DEFAULT_SCOPE_PRIORITIES: ScopePriority = {
|
||||||
|
core: 100,
|
||||||
|
cli: 90,
|
||||||
|
mcp: 85,
|
||||||
|
workflow: 80,
|
||||||
|
git: 75,
|
||||||
|
storage: 70,
|
||||||
|
auth: 65,
|
||||||
|
config: 60,
|
||||||
|
test: 50,
|
||||||
|
docs: 30,
|
||||||
|
deps: 20,
|
||||||
|
repo: 10
|
||||||
|
};
|
||||||
|
|
||||||
|
export class ScopeDetector {
|
||||||
|
private scopeMappings: Array<[string, string]>;
|
||||||
|
private scopePriorities: ScopePriority;
|
||||||
|
|
||||||
|
constructor(customMappings?: ScopeMapping, customPriorities?: ScopePriority) {
|
||||||
|
// Start with default mappings
|
||||||
|
this.scopeMappings = [...DEFAULT_SCOPE_MAPPINGS];
|
||||||
|
|
||||||
|
// Add custom mappings at the start (highest priority)
|
||||||
|
if (customMappings) {
|
||||||
|
const customEntries = Object.entries(customMappings);
|
||||||
|
this.scopeMappings = [...customEntries, ...this.scopeMappings];
|
||||||
|
}
|
||||||
|
|
||||||
|
this.scopePriorities = {
|
||||||
|
...DEFAULT_SCOPE_PRIORITIES,
|
||||||
|
...customPriorities
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detect the most relevant scope from a list of changed files
|
||||||
|
* Returns the scope with the highest priority
|
||||||
|
*/
|
||||||
|
detectScope(files: string[]): string {
|
||||||
|
if (files.length === 0) {
|
||||||
|
return 'repo';
|
||||||
|
}
|
||||||
|
|
||||||
|
const scopeCounts = new Map<string, number>();
|
||||||
|
|
||||||
|
// Count occurrences of each scope
|
||||||
|
for (const file of files) {
|
||||||
|
const scope = this.getMatchingScope(file);
|
||||||
|
if (scope) {
|
||||||
|
scopeCounts.set(scope, (scopeCounts.get(scope) || 0) + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no scopes matched, default to 'repo'
|
||||||
|
if (scopeCounts.size === 0) {
|
||||||
|
return 'repo';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find scope with highest priority (considering both priority and count)
|
||||||
|
let bestScope = 'repo';
|
||||||
|
let bestScore = 0;
|
||||||
|
|
||||||
|
for (const [scope, count] of scopeCounts) {
|
||||||
|
const priority = this.getScopePriority(scope);
|
||||||
|
// Score = priority * count (files in that scope)
|
||||||
|
const score = priority * count;
|
||||||
|
|
||||||
|
if (score > bestScore) {
|
||||||
|
bestScore = score;
|
||||||
|
bestScope = scope;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bestScope;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all matching scopes for the given files
|
||||||
|
*/
|
||||||
|
getAllMatchingScopes(files: string[]): string[] {
|
||||||
|
const scopes = new Set<string>();
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
const scope = this.getMatchingScope(file);
|
||||||
|
if (scope) {
|
||||||
|
scopes.add(scope);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Array.from(scopes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the matching scope for a single file
|
||||||
|
* Returns the first matching scope (order matters!)
|
||||||
|
*/
|
||||||
|
getMatchingScope(file: string): string | null {
|
||||||
|
// Normalize path separators
|
||||||
|
const normalizedFile = file.replace(/\\/g, '/');
|
||||||
|
|
||||||
|
for (const [pattern, scope] of this.scopeMappings) {
|
||||||
|
if (this.matchesPattern(normalizedFile, pattern)) {
|
||||||
|
return scope;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the priority of a scope
|
||||||
|
*/
|
||||||
|
getScopePriority(scope: string): number {
|
||||||
|
return this.scopePriorities[scope] || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Match a file path against a glob-like pattern
|
||||||
|
* Supports:
|
||||||
|
* - ** for multi-level directory matching
|
||||||
|
* - * for single-level matching
|
||||||
|
*/
|
||||||
|
private matchesPattern(filePath: string, pattern: string): boolean {
|
||||||
|
// Replace ** first with a unique placeholder
|
||||||
|
let regexPattern = pattern.replace(/\*\*/g, '§GLOBSTAR§');
|
||||||
|
|
||||||
|
// Escape special regex characters (but not our placeholder or *)
|
||||||
|
regexPattern = regexPattern.replace(/[.+^${}()|[\]\\]/g, '\\$&');
|
||||||
|
|
||||||
|
// Replace single * with [^/]* (matches anything except /)
|
||||||
|
regexPattern = regexPattern.replace(/\*/g, '[^/]*');
|
||||||
|
|
||||||
|
// Replace placeholder with .* (matches anything including /)
|
||||||
|
regexPattern = regexPattern.replace(/§GLOBSTAR§/g, '.*');
|
||||||
|
|
||||||
|
const regex = new RegExp(`^${regexPattern}$`);
|
||||||
|
return regex.test(filePath);
|
||||||
|
}
|
||||||
|
}
|
||||||
277
packages/tm-core/src/git/template-engine.test.ts
Normal file
277
packages/tm-core/src/git/template-engine.test.ts
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
import { describe, it, expect, beforeEach } from 'vitest';
|
||||||
|
import { TemplateEngine } from './template-engine.js';
|
||||||
|
|
||||||
|
describe('TemplateEngine', () => {
|
||||||
|
let templateEngine: TemplateEngine;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
templateEngine = new TemplateEngine();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('constructor and initialization', () => {
|
||||||
|
it('should initialize with default templates', () => {
|
||||||
|
expect(templateEngine).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept custom templates in constructor', () => {
|
||||||
|
const customTemplate = '{{type}}({{scope}}): {{description}}';
|
||||||
|
const engine = new TemplateEngine({ commitMessage: customTemplate });
|
||||||
|
|
||||||
|
const result = engine.render('commitMessage', {
|
||||||
|
type: 'feat',
|
||||||
|
scope: 'core',
|
||||||
|
description: 'add feature'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBe('feat(core): add feature');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('render', () => {
|
||||||
|
it('should render simple template with single variable', () => {
|
||||||
|
const template = 'Hello {{name}}';
|
||||||
|
const result = templateEngine.render('test', { name: 'World' }, template);
|
||||||
|
|
||||||
|
expect(result).toBe('Hello World');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should render template with multiple variables', () => {
|
||||||
|
const template = '{{type}}({{scope}}): {{description}}';
|
||||||
|
const result = templateEngine.render(
|
||||||
|
'test',
|
||||||
|
{
|
||||||
|
type: 'feat',
|
||||||
|
scope: 'api',
|
||||||
|
description: 'add endpoint'
|
||||||
|
},
|
||||||
|
template
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(result).toBe('feat(api): add endpoint');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle missing variables by leaving placeholder', () => {
|
||||||
|
const template = 'Hello {{name}} from {{location}}';
|
||||||
|
const result = templateEngine.render('test', { name: 'Alice' }, template);
|
||||||
|
|
||||||
|
expect(result).toBe('Hello Alice from {{location}}');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty variable values', () => {
|
||||||
|
const template = '{{prefix}}{{message}}';
|
||||||
|
const result = templateEngine.render(
|
||||||
|
'test',
|
||||||
|
{
|
||||||
|
prefix: '',
|
||||||
|
message: 'hello'
|
||||||
|
},
|
||||||
|
template
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(result).toBe('hello');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle numeric values', () => {
|
||||||
|
const template = 'Count: {{count}}';
|
||||||
|
const result = templateEngine.render('test', { count: 42 }, template);
|
||||||
|
|
||||||
|
expect(result).toBe('Count: 42');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle boolean values', () => {
|
||||||
|
const template = 'Active: {{active}}';
|
||||||
|
const result = templateEngine.render('test', { active: true }, template);
|
||||||
|
|
||||||
|
expect(result).toBe('Active: true');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('setTemplate', () => {
|
||||||
|
it('should set and use custom template', () => {
|
||||||
|
templateEngine.setTemplate('custom', 'Value: {{value}}');
|
||||||
|
const result = templateEngine.render('custom', { value: '123' });
|
||||||
|
|
||||||
|
expect(result).toBe('Value: 123');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should override existing template', () => {
|
||||||
|
templateEngine.setTemplate('commitMessage', 'Custom: {{msg}}');
|
||||||
|
const result = templateEngine.render('commitMessage', { msg: 'hello' });
|
||||||
|
|
||||||
|
expect(result).toBe('Custom: hello');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getTemplate', () => {
|
||||||
|
it('should return existing template', () => {
|
||||||
|
templateEngine.setTemplate('test', 'Template: {{value}}');
|
||||||
|
const template = templateEngine.getTemplate('test');
|
||||||
|
|
||||||
|
expect(template).toBe('Template: {{value}}');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined for non-existent template', () => {
|
||||||
|
const template = templateEngine.getTemplate('nonexistent');
|
||||||
|
|
||||||
|
expect(template).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('hasTemplate', () => {
|
||||||
|
it('should return true for existing template', () => {
|
||||||
|
templateEngine.setTemplate('test', 'Template');
|
||||||
|
|
||||||
|
expect(templateEngine.hasTemplate('test')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for non-existent template', () => {
|
||||||
|
expect(templateEngine.hasTemplate('nonexistent')).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('validateTemplate', () => {
|
||||||
|
it('should validate template with all required variables', () => {
|
||||||
|
const template = '{{type}}({{scope}}): {{description}}';
|
||||||
|
const requiredVars = ['type', 'scope', 'description'];
|
||||||
|
|
||||||
|
const result = templateEngine.validateTemplate(template, requiredVars);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(true);
|
||||||
|
expect(result.missingVars).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect missing required variables', () => {
|
||||||
|
const template = '{{type}}: {{description}}';
|
||||||
|
const requiredVars = ['type', 'scope', 'description'];
|
||||||
|
|
||||||
|
const result = templateEngine.validateTemplate(template, requiredVars);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(false);
|
||||||
|
expect(result.missingVars).toEqual(['scope']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should detect multiple missing variables', () => {
|
||||||
|
const template = '{{type}}';
|
||||||
|
const requiredVars = ['type', 'scope', 'description'];
|
||||||
|
|
||||||
|
const result = templateEngine.validateTemplate(template, requiredVars);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(false);
|
||||||
|
expect(result.missingVars).toEqual(['scope', 'description']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle optional variables in template', () => {
|
||||||
|
const template = '{{type}}({{scope}}): {{description}} [{{taskId}}]';
|
||||||
|
const requiredVars = ['type', 'scope', 'description'];
|
||||||
|
|
||||||
|
const result = templateEngine.validateTemplate(template, requiredVars);
|
||||||
|
|
||||||
|
expect(result.isValid).toBe(true);
|
||||||
|
expect(result.missingVars).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('extractVariables', () => {
|
||||||
|
it('should extract all variables from template', () => {
|
||||||
|
const template = '{{type}}({{scope}}): {{description}}';
|
||||||
|
const variables = templateEngine.extractVariables(template);
|
||||||
|
|
||||||
|
expect(variables).toEqual(['type', 'scope', 'description']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract unique variables only', () => {
|
||||||
|
const template = '{{name}} and {{name}} with {{other}}';
|
||||||
|
const variables = templateEngine.extractVariables(template);
|
||||||
|
|
||||||
|
expect(variables).toEqual(['name', 'other']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array for template without variables', () => {
|
||||||
|
const template = 'Static text with no variables';
|
||||||
|
const variables = templateEngine.extractVariables(template);
|
||||||
|
|
||||||
|
expect(variables).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle template with whitespace in placeholders', () => {
|
||||||
|
const template = '{{ type }} and {{ scope }}';
|
||||||
|
const variables = templateEngine.extractVariables(template);
|
||||||
|
|
||||||
|
expect(variables).toEqual(['type', 'scope']);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('edge cases', () => {
|
||||||
|
it('should handle empty template', () => {
|
||||||
|
const result = templateEngine.render('test', { name: 'value' }, '');
|
||||||
|
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle template with no variables', () => {
|
||||||
|
const template = 'Static text';
|
||||||
|
const result = templateEngine.render('test', {}, template);
|
||||||
|
|
||||||
|
expect(result).toBe('Static text');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty variables object', () => {
|
||||||
|
const template = 'Hello {{name}}';
|
||||||
|
const result = templateEngine.render('test', {}, template);
|
||||||
|
|
||||||
|
expect(result).toBe('Hello {{name}}');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle special characters in values', () => {
|
||||||
|
const template = 'Value: {{value}}';
|
||||||
|
const result = templateEngine.render(
|
||||||
|
'test',
|
||||||
|
{
|
||||||
|
value: 'hello$world{test}'
|
||||||
|
},
|
||||||
|
template
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(result).toBe('Value: hello$world{test}');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiline templates', () => {
|
||||||
|
const template = '{{type}}: {{description}}\n\n{{body}}';
|
||||||
|
const result = templateEngine.render(
|
||||||
|
'test',
|
||||||
|
{
|
||||||
|
type: 'feat',
|
||||||
|
description: 'add feature',
|
||||||
|
body: 'Details here'
|
||||||
|
},
|
||||||
|
template
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(result).toBe('feat: add feature\n\nDetails here');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('default commit message template', () => {
|
||||||
|
it('should have default commit message template', () => {
|
||||||
|
const template = templateEngine.getTemplate('commitMessage');
|
||||||
|
|
||||||
|
expect(template).toBeDefined();
|
||||||
|
expect(template).toContain('{{type}}');
|
||||||
|
expect(template).toContain('{{description}}');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should render default commit message template', () => {
|
||||||
|
const result = templateEngine.render('commitMessage', {
|
||||||
|
type: 'feat',
|
||||||
|
scope: 'core',
|
||||||
|
description: 'implement feature',
|
||||||
|
body: 'Additional details',
|
||||||
|
taskId: '5.1'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toContain('feat');
|
||||||
|
expect(result).toContain('core');
|
||||||
|
expect(result).toContain('implement feature');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
203
packages/tm-core/src/git/template-engine.ts
Normal file
203
packages/tm-core/src/git/template-engine.ts
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
/**
|
||||||
|
* TemplateEngine - Configurable template system for generating text from templates
|
||||||
|
*
|
||||||
|
* Supports:
|
||||||
|
* - Variable substitution using {{variableName}} syntax
|
||||||
|
* - Custom templates via constructor or setTemplate
|
||||||
|
* - Template validation with required variables
|
||||||
|
* - Variable extraction from templates
|
||||||
|
* - Multiple template storage and retrieval
|
||||||
|
*/
|
||||||
|
|
||||||
|
export interface TemplateValidationResult {
|
||||||
|
isValid: boolean;
|
||||||
|
missingVars: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TemplateVariables {
|
||||||
|
[key: string]: string | number | boolean | undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TemplateCollection {
|
||||||
|
[templateName: string]: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TemplateEngineOptions {
|
||||||
|
customTemplates?: TemplateCollection;
|
||||||
|
preservePlaceholders?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_TEMPLATES: TemplateCollection = {
|
||||||
|
commitMessage: `{{type}}{{#scope}}({{scope}}){{/scope}}{{#breaking}}!{{/breaking}}: {{description}}
|
||||||
|
|
||||||
|
{{#body}}{{body}}
|
||||||
|
|
||||||
|
{{/body}}{{#taskId}}Task: {{taskId}}{{/taskId}}{{#phase}}
|
||||||
|
Phase: {{phase}}{{/phase}}{{#testsPassing}}
|
||||||
|
Tests: {{testsPassing}} passing{{#testsFailing}}, {{testsFailing}} failing{{/testsFailing}}{{/testsPassing}}`
|
||||||
|
};
|
||||||
|
|
||||||
|
export class TemplateEngine {
|
||||||
|
private templates: TemplateCollection;
|
||||||
|
private preservePlaceholders: boolean;
|
||||||
|
|
||||||
|
constructor(
|
||||||
|
optionsOrTemplates: TemplateEngineOptions | TemplateCollection = {}
|
||||||
|
) {
|
||||||
|
// Backward compatibility: support old signature (TemplateCollection) and new signature (TemplateEngineOptions)
|
||||||
|
const isOptions =
|
||||||
|
'customTemplates' in optionsOrTemplates ||
|
||||||
|
'preservePlaceholders' in optionsOrTemplates;
|
||||||
|
const options: TemplateEngineOptions = isOptions
|
||||||
|
? (optionsOrTemplates as TemplateEngineOptions)
|
||||||
|
: { customTemplates: optionsOrTemplates as TemplateCollection };
|
||||||
|
|
||||||
|
this.templates = {
|
||||||
|
...DEFAULT_TEMPLATES,
|
||||||
|
...(options.customTemplates || {})
|
||||||
|
};
|
||||||
|
this.preservePlaceholders = options.preservePlaceholders ?? false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render a template with provided variables
|
||||||
|
*/
|
||||||
|
render(
|
||||||
|
templateName: string,
|
||||||
|
variables: TemplateVariables,
|
||||||
|
inlineTemplate?: string
|
||||||
|
): string {
|
||||||
|
const template =
|
||||||
|
inlineTemplate !== undefined
|
||||||
|
? inlineTemplate
|
||||||
|
: this.templates[templateName];
|
||||||
|
|
||||||
|
if (template === undefined) {
|
||||||
|
throw new Error(`Template "${templateName}" not found`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.substituteVariables(template, variables);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set or update a template
|
||||||
|
*/
|
||||||
|
setTemplate(name: string, template: string): void {
|
||||||
|
this.templates[name] = template;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a template by name
|
||||||
|
*/
|
||||||
|
getTemplate(name: string): string | undefined {
|
||||||
|
return this.templates[name];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a template exists
|
||||||
|
*/
|
||||||
|
hasTemplate(name: string): boolean {
|
||||||
|
return name in this.templates;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate that a template contains all required variables
|
||||||
|
*/
|
||||||
|
validateTemplate(
|
||||||
|
template: string,
|
||||||
|
requiredVars: string[]
|
||||||
|
): TemplateValidationResult {
|
||||||
|
const templateVars = this.extractVariables(template);
|
||||||
|
const missingVars = requiredVars.filter(
|
||||||
|
(varName) => !templateVars.includes(varName)
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
isValid: missingVars.length === 0,
|
||||||
|
missingVars
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract all variable names from a template
|
||||||
|
*/
|
||||||
|
extractVariables(template: string): string[] {
|
||||||
|
const regex = /\{\{\s*([^}#/\s]+)\s*\}\}/g;
|
||||||
|
const matches = template.matchAll(regex);
|
||||||
|
const variables = new Set<string>();
|
||||||
|
|
||||||
|
for (const match of matches) {
|
||||||
|
variables.add(match[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Array.from(variables);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Substitute variables in template
|
||||||
|
* Supports both {{variable}} and {{#variable}}...{{/variable}} (conditional blocks)
|
||||||
|
*/
|
||||||
|
private substituteVariables(
|
||||||
|
template: string,
|
||||||
|
variables: TemplateVariables
|
||||||
|
): string {
|
||||||
|
let result = template;
|
||||||
|
|
||||||
|
// Handle conditional blocks first ({{#var}}...{{/var}})
|
||||||
|
result = this.processConditionalBlocks(result, variables);
|
||||||
|
|
||||||
|
// Handle simple variable substitution ({{var}})
|
||||||
|
result = result.replace(/\{\{\s*([^}#/\s]+)\s*\}\}/g, (_, varName) => {
|
||||||
|
const value = variables[varName];
|
||||||
|
return value !== undefined && value !== null
|
||||||
|
? String(value)
|
||||||
|
: this.preservePlaceholders
|
||||||
|
? `{{${varName}}}`
|
||||||
|
: '';
|
||||||
|
});
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process conditional blocks in template
|
||||||
|
* {{#variable}}content{{/variable}} - shows content only if variable is truthy
|
||||||
|
* Processes innermost blocks first to handle nesting
|
||||||
|
*/
|
||||||
|
private processConditionalBlocks(
|
||||||
|
template: string,
|
||||||
|
variables: TemplateVariables
|
||||||
|
): string {
|
||||||
|
let result = template;
|
||||||
|
let hasChanges = true;
|
||||||
|
|
||||||
|
// Keep processing until no more conditional blocks are found
|
||||||
|
while (hasChanges) {
|
||||||
|
const before = result;
|
||||||
|
|
||||||
|
// Find and replace innermost conditional blocks (non-greedy match)
|
||||||
|
result = result.replace(
|
||||||
|
/\{\{#([^}]+)\}\}((?:(?!\{\{#).)*?)\{\{\/\1\}\}/gs,
|
||||||
|
(_, varName, content) => {
|
||||||
|
const value = variables[varName.trim()];
|
||||||
|
|
||||||
|
// Show content if variable is truthy (not undefined, null, false, or empty string)
|
||||||
|
if (
|
||||||
|
value !== undefined &&
|
||||||
|
value !== null &&
|
||||||
|
value !== false &&
|
||||||
|
value !== ''
|
||||||
|
) {
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
hasChanges = result !== before;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -83,3 +83,35 @@ export {
|
|||||||
type ValidationErrorType,
|
type ValidationErrorType,
|
||||||
type DependencyIssue
|
type DependencyIssue
|
||||||
} from './services/index.js';
|
} from './services/index.js';
|
||||||
|
|
||||||
|
// Re-export Git adapter
|
||||||
|
export { GitAdapter } from './git/git-adapter.js';
|
||||||
|
export {
|
||||||
|
CommitMessageGenerator,
|
||||||
|
type CommitMessageOptions
|
||||||
|
} from './git/commit-message-generator.js';
|
||||||
|
|
||||||
|
// Re-export workflow orchestrator, state manager, activity logger, and types
|
||||||
|
export { WorkflowOrchestrator } from './workflow/workflow-orchestrator.js';
|
||||||
|
export { WorkflowStateManager } from './workflow/workflow-state-manager.js';
|
||||||
|
export { WorkflowActivityLogger } from './workflow/workflow-activity-logger.js';
|
||||||
|
export type {
|
||||||
|
WorkflowPhase,
|
||||||
|
TDDPhase,
|
||||||
|
WorkflowContext,
|
||||||
|
WorkflowState,
|
||||||
|
WorkflowEvent,
|
||||||
|
WorkflowEventData,
|
||||||
|
WorkflowEventListener,
|
||||||
|
SubtaskInfo,
|
||||||
|
TestResult,
|
||||||
|
WorkflowError
|
||||||
|
} from './workflow/types.js';
|
||||||
|
|
||||||
|
// Re-export workflow service
|
||||||
|
export { WorkflowService } from './services/workflow.service.js';
|
||||||
|
export type {
|
||||||
|
StartWorkflowOptions,
|
||||||
|
WorkflowStatus,
|
||||||
|
NextAction
|
||||||
|
} from './services/workflow.service.js';
|
||||||
|
|||||||
@@ -9,6 +9,17 @@ import type {
|
|||||||
StorageType
|
StorageType
|
||||||
} from '../types/index.js';
|
} from '../types/index.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Conventional Commit types allowed in workflow
|
||||||
|
*/
|
||||||
|
export type CommitType =
|
||||||
|
| 'feat'
|
||||||
|
| 'fix'
|
||||||
|
| 'refactor'
|
||||||
|
| 'test'
|
||||||
|
| 'docs'
|
||||||
|
| 'chore';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Model configuration for different AI roles
|
* Model configuration for different AI roles
|
||||||
*/
|
*/
|
||||||
@@ -45,9 +56,15 @@ export interface TaskSettings {
|
|||||||
defaultPriority: TaskPriority;
|
defaultPriority: TaskPriority;
|
||||||
/** Default complexity for analysis */
|
/** Default complexity for analysis */
|
||||||
defaultComplexity: TaskComplexity;
|
defaultComplexity: TaskComplexity;
|
||||||
/** Maximum number of subtasks per task */
|
/**
|
||||||
|
* Maximum number of subtasks per task
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
maxSubtasks: number;
|
maxSubtasks: number;
|
||||||
/** Maximum number of concurrent tasks */
|
/**
|
||||||
|
* Maximum number of concurrent tasks
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
maxConcurrentTasks: number;
|
maxConcurrentTasks: number;
|
||||||
/** Enable automatic task ID generation */
|
/** Enable automatic task ID generation */
|
||||||
autoGenerateIds: boolean;
|
autoGenerateIds: boolean;
|
||||||
@@ -69,7 +86,10 @@ export interface TagSettings {
|
|||||||
enableTags: boolean;
|
enableTags: boolean;
|
||||||
/** Default tag for new tasks */
|
/** Default tag for new tasks */
|
||||||
defaultTag: string;
|
defaultTag: string;
|
||||||
/** Maximum number of tags per task */
|
/**
|
||||||
|
* Maximum number of tags per task
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
maxTagsPerTask: number;
|
maxTagsPerTask: number;
|
||||||
/** Enable automatic tag creation from Git branches */
|
/** Enable automatic tag creation from Git branches */
|
||||||
autoCreateFromBranch: boolean;
|
autoCreateFromBranch: boolean;
|
||||||
@@ -114,7 +134,10 @@ export interface StorageSettings
|
|||||||
readonly apiConfigured?: boolean;
|
readonly apiConfigured?: boolean;
|
||||||
/** Enable automatic backups */
|
/** Enable automatic backups */
|
||||||
enableBackup: boolean;
|
enableBackup: boolean;
|
||||||
/** Maximum number of backups to retain */
|
/**
|
||||||
|
* Maximum number of backups to retain
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
maxBackups: number;
|
maxBackups: number;
|
||||||
/** Enable compression for storage */
|
/** Enable compression for storage */
|
||||||
enableCompression: boolean;
|
enableCompression: boolean;
|
||||||
@@ -128,15 +151,30 @@ export interface StorageSettings
|
|||||||
* Retry and resilience settings
|
* Retry and resilience settings
|
||||||
*/
|
*/
|
||||||
export interface RetrySettings {
|
export interface RetrySettings {
|
||||||
/** Number of retry attempts for failed operations */
|
/**
|
||||||
|
* Number of retry attempts for failed operations
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
retryAttempts: number;
|
retryAttempts: number;
|
||||||
/** Base delay between retries in milliseconds */
|
/**
|
||||||
|
* Base delay between retries in milliseconds
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
retryDelay: number;
|
retryDelay: number;
|
||||||
/** Maximum delay between retries in milliseconds */
|
/**
|
||||||
|
* Maximum delay between retries in milliseconds
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
maxRetryDelay: number;
|
maxRetryDelay: number;
|
||||||
/** Exponential backoff multiplier */
|
/**
|
||||||
|
* Exponential backoff multiplier
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
backoffMultiplier: number;
|
backoffMultiplier: number;
|
||||||
/** Request timeout in milliseconds */
|
/**
|
||||||
|
* Request timeout in milliseconds
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
requestTimeout: number;
|
requestTimeout: number;
|
||||||
/** Enable retry for network errors */
|
/** Enable retry for network errors */
|
||||||
retryOnNetworkError: boolean;
|
retryOnNetworkError: boolean;
|
||||||
@@ -160,9 +198,15 @@ export interface LoggingSettings {
|
|||||||
logPerformance: boolean;
|
logPerformance: boolean;
|
||||||
/** Enable error stack traces */
|
/** Enable error stack traces */
|
||||||
logStackTraces: boolean;
|
logStackTraces: boolean;
|
||||||
/** Maximum log file size in MB */
|
/**
|
||||||
|
* Maximum log file size in MB
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
maxFileSize: number;
|
maxFileSize: number;
|
||||||
/** Maximum number of log files to retain */
|
/**
|
||||||
|
* Maximum number of log files to retain
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
maxFiles: number;
|
maxFiles: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,11 +218,17 @@ export interface SecuritySettings {
|
|||||||
validateApiKeys: boolean;
|
validateApiKeys: boolean;
|
||||||
/** Enable request rate limiting */
|
/** Enable request rate limiting */
|
||||||
enableRateLimit: boolean;
|
enableRateLimit: boolean;
|
||||||
/** Maximum requests per minute */
|
/**
|
||||||
|
* Maximum requests per minute
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
maxRequestsPerMinute: number;
|
maxRequestsPerMinute: number;
|
||||||
/** Enable input sanitization */
|
/** Enable input sanitization */
|
||||||
sanitizeInputs: boolean;
|
sanitizeInputs: boolean;
|
||||||
/** Maximum prompt length in characters */
|
/**
|
||||||
|
* Maximum prompt length in characters
|
||||||
|
* @minimum 1
|
||||||
|
*/
|
||||||
maxPromptLength: number;
|
maxPromptLength: number;
|
||||||
/** Allowed file extensions for imports */
|
/** Allowed file extensions for imports */
|
||||||
allowedFileExtensions: string[];
|
allowedFileExtensions: string[];
|
||||||
@@ -186,6 +236,72 @@ export interface SecuritySettings {
|
|||||||
enableCors: boolean;
|
enableCors: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Workflow and autopilot TDD settings
|
||||||
|
*/
|
||||||
|
export interface WorkflowSettings {
|
||||||
|
/** Enable autopilot/TDD workflow features */
|
||||||
|
enableAutopilot: boolean;
|
||||||
|
/**
|
||||||
|
* Maximum retry attempts for phase validation
|
||||||
|
* @minimum 1
|
||||||
|
* @maximum 10
|
||||||
|
*/
|
||||||
|
maxPhaseAttempts: number;
|
||||||
|
/** Branch naming pattern for workflow branches */
|
||||||
|
branchPattern: string;
|
||||||
|
/** Require clean working tree before starting workflow */
|
||||||
|
requireCleanWorkingTree: boolean;
|
||||||
|
/** Automatically stage all changes during commit phase */
|
||||||
|
autoStageChanges: boolean;
|
||||||
|
/** Include co-author attribution in commits */
|
||||||
|
includeCoAuthor: boolean;
|
||||||
|
/** Co-author name for commit messages */
|
||||||
|
coAuthorName: string;
|
||||||
|
/** Co-author email for commit messages (defaults to taskmaster@tryhamster.com) */
|
||||||
|
coAuthorEmail: string;
|
||||||
|
/** Test result thresholds for phase validation */
|
||||||
|
testThresholds: {
|
||||||
|
/**
|
||||||
|
* Minimum test count for valid RED phase
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
|
minTests: number;
|
||||||
|
/**
|
||||||
|
* Maximum allowed failing tests in GREEN phase
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
|
maxFailuresInGreen: number;
|
||||||
|
};
|
||||||
|
/** Commit message template pattern */
|
||||||
|
commitMessageTemplate: string;
|
||||||
|
/** Conventional commit types allowed */
|
||||||
|
allowedCommitTypes: readonly CommitType[];
|
||||||
|
/**
|
||||||
|
* Default commit type for autopilot
|
||||||
|
* @validation Must be present in allowedCommitTypes array
|
||||||
|
*/
|
||||||
|
defaultCommitType: CommitType;
|
||||||
|
/**
|
||||||
|
* Timeout for workflow operations in milliseconds
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
|
operationTimeout: number;
|
||||||
|
/** Enable activity logging for workflow events */
|
||||||
|
enableActivityLogging: boolean;
|
||||||
|
/** Path to store workflow activity logs */
|
||||||
|
activityLogPath: string;
|
||||||
|
/** Enable automatic backup of workflow state */
|
||||||
|
enableStateBackup: boolean;
|
||||||
|
/**
|
||||||
|
* Maximum workflow state backups to retain
|
||||||
|
* @minimum 0
|
||||||
|
*/
|
||||||
|
maxStateBackups: number;
|
||||||
|
/** Abort workflow if validation fails after max attempts */
|
||||||
|
abortOnMaxAttempts: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Main configuration interface for Task Master core
|
* Main configuration interface for Task Master core
|
||||||
*/
|
*/
|
||||||
@@ -211,6 +327,9 @@ export interface IConfiguration {
|
|||||||
/** Tag and context settings */
|
/** Tag and context settings */
|
||||||
tags: TagSettings;
|
tags: TagSettings;
|
||||||
|
|
||||||
|
/** Workflow and autopilot settings */
|
||||||
|
workflow: WorkflowSettings;
|
||||||
|
|
||||||
/** Storage configuration */
|
/** Storage configuration */
|
||||||
storage: StorageSettings;
|
storage: StorageSettings;
|
||||||
|
|
||||||
@@ -414,6 +533,35 @@ export const DEFAULT_CONFIG_VALUES = {
|
|||||||
MAX_TAGS_PER_TASK: 10,
|
MAX_TAGS_PER_TASK: 10,
|
||||||
NAMING_CONVENTION: 'kebab-case' as const
|
NAMING_CONVENTION: 'kebab-case' as const
|
||||||
},
|
},
|
||||||
|
WORKFLOW: {
|
||||||
|
ENABLE_AUTOPILOT: true,
|
||||||
|
MAX_PHASE_ATTEMPTS: 3,
|
||||||
|
BRANCH_PATTERN: 'task-{taskId}',
|
||||||
|
REQUIRE_CLEAN_WORKING_TREE: true,
|
||||||
|
AUTO_STAGE_CHANGES: true,
|
||||||
|
INCLUDE_CO_AUTHOR: true,
|
||||||
|
CO_AUTHOR_NAME: 'TaskMaster AI',
|
||||||
|
CO_AUTHOR_EMAIL: 'taskmaster@tryhamster.com',
|
||||||
|
MIN_TESTS: 1,
|
||||||
|
MAX_FAILURES_IN_GREEN: 0,
|
||||||
|
COMMIT_MESSAGE_TEMPLATE:
|
||||||
|
'{type}({scope}): {description} (Task {taskId}.{subtaskIndex})',
|
||||||
|
ALLOWED_COMMIT_TYPES: [
|
||||||
|
'feat',
|
||||||
|
'fix',
|
||||||
|
'refactor',
|
||||||
|
'test',
|
||||||
|
'docs',
|
||||||
|
'chore'
|
||||||
|
] as const satisfies readonly CommitType[],
|
||||||
|
DEFAULT_COMMIT_TYPE: 'feat' as CommitType,
|
||||||
|
OPERATION_TIMEOUT: 60000,
|
||||||
|
ENABLE_ACTIVITY_LOGGING: true,
|
||||||
|
ACTIVITY_LOG_PATH: '.taskmaster/logs/workflow-activity.log',
|
||||||
|
ENABLE_STATE_BACKUP: true,
|
||||||
|
MAX_STATE_BACKUPS: 5,
|
||||||
|
ABORT_ON_MAX_ATTEMPTS: false
|
||||||
|
},
|
||||||
STORAGE: {
|
STORAGE: {
|
||||||
TYPE: 'auto' as const,
|
TYPE: 'auto' as const,
|
||||||
ENCODING: 'utf8' as BufferEncoding,
|
ENCODING: 'utf8' as BufferEncoding,
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ export { OrganizationService } from './organization.service.js';
|
|||||||
export { ExportService } from './export.service.js';
|
export { ExportService } from './export.service.js';
|
||||||
export { PreflightChecker } from './preflight-checker.service.js';
|
export { PreflightChecker } from './preflight-checker.service.js';
|
||||||
export { TaskLoaderService } from './task-loader.service.js';
|
export { TaskLoaderService } from './task-loader.service.js';
|
||||||
|
export { TestResultValidator } from './test-result-validator.js';
|
||||||
export type { Organization, Brief } from './organization.service.js';
|
export type { Organization, Brief } from './organization.service.js';
|
||||||
export type {
|
export type {
|
||||||
ExportTasksOptions,
|
ExportTasksOptions,
|
||||||
@@ -22,3 +23,11 @@ export type {
|
|||||||
ValidationErrorType,
|
ValidationErrorType,
|
||||||
DependencyIssue
|
DependencyIssue
|
||||||
} from './task-loader.service.js';
|
} from './task-loader.service.js';
|
||||||
|
export type {
|
||||||
|
TestResult,
|
||||||
|
TestPhase,
|
||||||
|
Coverage,
|
||||||
|
CoverageThresholds,
|
||||||
|
ValidationResult,
|
||||||
|
PhaseValidationOptions
|
||||||
|
} from './test-result-validator.types.js';
|
||||||
|
|||||||
456
packages/tm-core/src/services/test-result-validator.test.ts
Normal file
456
packages/tm-core/src/services/test-result-validator.test.ts
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { TestResultValidator } from './test-result-validator.js';
|
||||||
|
import type {
|
||||||
|
TestResult,
|
||||||
|
ValidationResult,
|
||||||
|
TestPhase
|
||||||
|
} from './test-result-validator.types.js';
|
||||||
|
|
||||||
|
describe('TestResultValidator - Input Validation', () => {
|
||||||
|
const validator = new TestResultValidator();
|
||||||
|
|
||||||
|
describe('Schema Validation', () => {
|
||||||
|
it('should validate a valid test result', () => {
|
||||||
|
const validResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 5,
|
||||||
|
failed: 5,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'RED'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validate(validResult);
|
||||||
|
expect(result.valid).toBe(true);
|
||||||
|
expect(result.errors).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject negative test counts', () => {
|
||||||
|
const invalidResult = {
|
||||||
|
total: -1,
|
||||||
|
passed: 0,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'RED'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validate(invalidResult as TestResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject when totals do not match', () => {
|
||||||
|
const invalidResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 3,
|
||||||
|
failed: 3,
|
||||||
|
skipped: 3, // 3 + 3 + 3 = 9, not 10
|
||||||
|
phase: 'RED'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validate(invalidResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors).toContain(
|
||||||
|
'Total tests must equal passed + failed + skipped'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject missing required fields', () => {
|
||||||
|
const invalidResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 5
|
||||||
|
// missing failed, skipped, phase
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validate(invalidResult as TestResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept optional coverage data', () => {
|
||||||
|
const resultWithCoverage: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN',
|
||||||
|
coverage: {
|
||||||
|
line: 85,
|
||||||
|
branch: 75,
|
||||||
|
function: 90,
|
||||||
|
statement: 85
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validate(resultWithCoverage);
|
||||||
|
expect(result.valid).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject invalid coverage percentages', () => {
|
||||||
|
const invalidResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN',
|
||||||
|
coverage: {
|
||||||
|
line: 150, // Invalid: > 100
|
||||||
|
branch: -10, // Invalid: < 0
|
||||||
|
function: 90,
|
||||||
|
statement: 85
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validate(invalidResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject invalid phase values', () => {
|
||||||
|
const invalidResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 5,
|
||||||
|
failed: 5,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'INVALID_PHASE'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validate(invalidResult as TestResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('TestResultValidator - RED Phase Validation', () => {
|
||||||
|
const validator = new TestResultValidator();
|
||||||
|
|
||||||
|
it('should pass validation when RED phase has failures', () => {
|
||||||
|
const redResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 5,
|
||||||
|
failed: 5,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'RED'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateRedPhase(redResult);
|
||||||
|
expect(result.valid).toBe(true);
|
||||||
|
expect(result.errors).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fail validation when RED phase has zero failures', () => {
|
||||||
|
const redResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'RED'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateRedPhase(redResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors).toContain(
|
||||||
|
'RED phase must have at least one failing test'
|
||||||
|
);
|
||||||
|
expect(result.suggestions).toContain(
|
||||||
|
'Write failing tests first to follow TDD workflow'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fail validation when RED phase has empty test suite', () => {
|
||||||
|
const emptyResult: TestResult = {
|
||||||
|
total: 0,
|
||||||
|
passed: 0,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'RED'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateRedPhase(emptyResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors).toContain('Cannot validate empty test suite');
|
||||||
|
expect(result.suggestions).toContain(
|
||||||
|
'Add at least one test to begin TDD cycle'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should propagate base validation errors', () => {
|
||||||
|
const invalidResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 3,
|
||||||
|
failed: 3,
|
||||||
|
skipped: 3, // Total mismatch
|
||||||
|
phase: 'RED'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateRedPhase(invalidResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors).toContain(
|
||||||
|
'Total tests must equal passed + failed + skipped'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('TestResultValidator - GREEN Phase Validation', () => {
|
||||||
|
const validator = new TestResultValidator();
|
||||||
|
|
||||||
|
it('should pass validation when GREEN phase has all tests passing', () => {
|
||||||
|
const greenResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateGreenPhase(greenResult);
|
||||||
|
expect(result.valid).toBe(true);
|
||||||
|
expect(result.errors).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fail validation when GREEN phase has failures', () => {
|
||||||
|
const greenResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 5,
|
||||||
|
failed: 5,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateGreenPhase(greenResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors).toContain('GREEN phase must have zero failures');
|
||||||
|
expect(result.suggestions).toContain(
|
||||||
|
'Fix implementation to make all tests pass'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fail validation when GREEN phase has no passing tests', () => {
|
||||||
|
const greenResult: TestResult = {
|
||||||
|
total: 5,
|
||||||
|
passed: 0,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 5,
|
||||||
|
phase: 'GREEN'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateGreenPhase(greenResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors).toContain(
|
||||||
|
'GREEN phase must have at least one passing test'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should warn when test count decreases', () => {
|
||||||
|
const greenResult: TestResult = {
|
||||||
|
total: 5,
|
||||||
|
passed: 5,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateGreenPhase(greenResult, 10);
|
||||||
|
expect(result.valid).toBe(true);
|
||||||
|
expect(result.warnings).toContain('Test count decreased from 10 to 5');
|
||||||
|
expect(result.suggestions).toContain(
|
||||||
|
'Verify that no tests were accidentally removed'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not warn when test count increases', () => {
|
||||||
|
const greenResult: TestResult = {
|
||||||
|
total: 15,
|
||||||
|
passed: 15,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateGreenPhase(greenResult, 10);
|
||||||
|
expect(result.valid).toBe(true);
|
||||||
|
expect(result.warnings || []).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should propagate base validation errors', () => {
|
||||||
|
const invalidResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 3,
|
||||||
|
failed: 3,
|
||||||
|
skipped: 3, // Total mismatch
|
||||||
|
phase: 'GREEN'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = validator.validateGreenPhase(invalidResult);
|
||||||
|
expect(result.valid).toBe(false);
|
||||||
|
expect(result.errors).toContain(
|
||||||
|
'Total tests must equal passed + failed + skipped'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('TestResultValidator - Coverage Threshold Validation', () => {
|
||||||
|
const validator = new TestResultValidator();
|
||||||
|
|
||||||
|
it('should pass validation when coverage meets thresholds', () => {
|
||||||
|
const result: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN',
|
||||||
|
coverage: {
|
||||||
|
line: 85,
|
||||||
|
branch: 80,
|
||||||
|
function: 90,
|
||||||
|
statement: 85
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const thresholds = {
|
||||||
|
line: 80,
|
||||||
|
branch: 75,
|
||||||
|
function: 85,
|
||||||
|
statement: 80
|
||||||
|
};
|
||||||
|
|
||||||
|
const validationResult = validator.validateCoverage(result, thresholds);
|
||||||
|
expect(validationResult.valid).toBe(true);
|
||||||
|
expect(validationResult.errors).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fail validation when line coverage is below threshold', () => {
|
||||||
|
const result: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN',
|
||||||
|
coverage: {
|
||||||
|
line: 70,
|
||||||
|
branch: 80,
|
||||||
|
function: 90,
|
||||||
|
statement: 85
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const thresholds = {
|
||||||
|
line: 80
|
||||||
|
};
|
||||||
|
|
||||||
|
const validationResult = validator.validateCoverage(result, thresholds);
|
||||||
|
expect(validationResult.valid).toBe(false);
|
||||||
|
expect(validationResult.errors[0]).toContain('line coverage (70% < 80%)');
|
||||||
|
expect(validationResult.suggestions).toContain(
|
||||||
|
'Add more tests to improve code coverage'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fail validation when multiple coverage types are below threshold', () => {
|
||||||
|
const result: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN',
|
||||||
|
coverage: {
|
||||||
|
line: 70,
|
||||||
|
branch: 60,
|
||||||
|
function: 75,
|
||||||
|
statement: 65
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const thresholds = {
|
||||||
|
line: 80,
|
||||||
|
branch: 75,
|
||||||
|
function: 85,
|
||||||
|
statement: 80
|
||||||
|
};
|
||||||
|
|
||||||
|
const validationResult = validator.validateCoverage(result, thresholds);
|
||||||
|
expect(validationResult.valid).toBe(false);
|
||||||
|
expect(validationResult.errors[0]).toContain('line coverage (70% < 80%)');
|
||||||
|
expect(validationResult.errors[0]).toContain('branch coverage (60% < 75%)');
|
||||||
|
expect(validationResult.errors[0]).toContain(
|
||||||
|
'function coverage (75% < 85%)'
|
||||||
|
);
|
||||||
|
expect(validationResult.errors[0]).toContain(
|
||||||
|
'statement coverage (65% < 80%)'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip validation when no coverage data is provided', () => {
|
||||||
|
const result: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN'
|
||||||
|
};
|
||||||
|
|
||||||
|
const thresholds = {
|
||||||
|
line: 80,
|
||||||
|
branch: 75
|
||||||
|
};
|
||||||
|
|
||||||
|
const validationResult = validator.validateCoverage(result, thresholds);
|
||||||
|
expect(validationResult.valid).toBe(true);
|
||||||
|
expect(validationResult.errors).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only validate specified threshold types', () => {
|
||||||
|
const result: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 10,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
phase: 'GREEN',
|
||||||
|
coverage: {
|
||||||
|
line: 70,
|
||||||
|
branch: 60,
|
||||||
|
function: 90,
|
||||||
|
statement: 85
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const thresholds = {
|
||||||
|
line: 80
|
||||||
|
// Only checking line coverage
|
||||||
|
};
|
||||||
|
|
||||||
|
const validationResult = validator.validateCoverage(result, thresholds);
|
||||||
|
expect(validationResult.valid).toBe(false);
|
||||||
|
expect(validationResult.errors[0]).toContain('line coverage');
|
||||||
|
expect(validationResult.errors[0]).not.toContain('branch coverage');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should propagate base validation errors', () => {
|
||||||
|
const invalidResult: TestResult = {
|
||||||
|
total: 10,
|
||||||
|
passed: 3,
|
||||||
|
failed: 3,
|
||||||
|
skipped: 3, // Total mismatch
|
||||||
|
phase: 'GREEN',
|
||||||
|
coverage: {
|
||||||
|
line: 90,
|
||||||
|
branch: 90,
|
||||||
|
function: 90,
|
||||||
|
statement: 90
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const thresholds = {
|
||||||
|
line: 80
|
||||||
|
};
|
||||||
|
|
||||||
|
const validationResult = validator.validateCoverage(
|
||||||
|
invalidResult,
|
||||||
|
thresholds
|
||||||
|
);
|
||||||
|
expect(validationResult.valid).toBe(false);
|
||||||
|
expect(validationResult.errors).toContain(
|
||||||
|
'Total tests must equal passed + failed + skipped'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
268
packages/tm-core/src/services/test-result-validator.ts
Normal file
268
packages/tm-core/src/services/test-result-validator.ts
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
import { z } from 'zod';
|
||||||
|
import type {
|
||||||
|
TestResult,
|
||||||
|
ValidationResult,
|
||||||
|
CoverageThresholds,
|
||||||
|
PhaseValidationOptions
|
||||||
|
} from './test-result-validator.types.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schema for coverage metrics validation
|
||||||
|
*/
|
||||||
|
const coverageSchema = z.object({
|
||||||
|
line: z.number().min(0).max(100),
|
||||||
|
branch: z.number().min(0).max(100),
|
||||||
|
function: z.number().min(0).max(100),
|
||||||
|
statement: z.number().min(0).max(100)
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schema for test result validation
|
||||||
|
*/
|
||||||
|
const testResultSchema = z.object({
|
||||||
|
total: z.number().int().nonnegative(),
|
||||||
|
passed: z.number().int().nonnegative(),
|
||||||
|
failed: z.number().int().nonnegative(),
|
||||||
|
skipped: z.number().int().nonnegative(),
|
||||||
|
phase: z.enum(['RED', 'GREEN', 'REFACTOR']),
|
||||||
|
coverage: coverageSchema.optional()
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates test results according to TDD phase semantics
|
||||||
|
*/
|
||||||
|
export class TestResultValidator {
|
||||||
|
/**
|
||||||
|
* Validates a test result object
|
||||||
|
*/
|
||||||
|
validate(testResult: TestResult): ValidationResult {
|
||||||
|
const errors: string[] = [];
|
||||||
|
const warnings: string[] = [];
|
||||||
|
const suggestions: string[] = [];
|
||||||
|
|
||||||
|
// Schema validation
|
||||||
|
const parseResult = testResultSchema.safeParse(testResult);
|
||||||
|
if (!parseResult.success) {
|
||||||
|
const zodIssues = parseResult.error.issues || [];
|
||||||
|
errors.push(
|
||||||
|
...zodIssues.map((e) => {
|
||||||
|
const path = e.path.length > 0 ? `${e.path.join('.')}: ` : '';
|
||||||
|
return `${path}${e.message}`;
|
||||||
|
})
|
||||||
|
);
|
||||||
|
return { valid: false, errors, warnings, suggestions };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total validation
|
||||||
|
const sum = testResult.passed + testResult.failed + testResult.skipped;
|
||||||
|
if (sum !== testResult.total) {
|
||||||
|
errors.push('Total tests must equal passed + failed + skipped');
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are validation errors, return early
|
||||||
|
if (errors.length > 0) {
|
||||||
|
return { valid: false, errors, warnings, suggestions };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { valid: true, errors, warnings, suggestions };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates RED phase test results
|
||||||
|
* RED phase must have at least one failing test
|
||||||
|
*/
|
||||||
|
validateRedPhase(testResult: TestResult): ValidationResult {
|
||||||
|
const baseValidation = this.validate(testResult);
|
||||||
|
if (!baseValidation.valid) {
|
||||||
|
return baseValidation;
|
||||||
|
}
|
||||||
|
|
||||||
|
const errors: string[] = [];
|
||||||
|
const suggestions: string[] = [];
|
||||||
|
|
||||||
|
// RED phase must have failures
|
||||||
|
if (testResult.failed === 0) {
|
||||||
|
errors.push('RED phase must have at least one failing test');
|
||||||
|
suggestions.push('Write failing tests first to follow TDD workflow');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must have at least one test
|
||||||
|
if (testResult.total === 0) {
|
||||||
|
errors.push('Cannot validate empty test suite');
|
||||||
|
suggestions.push('Add at least one test to begin TDD cycle');
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
valid: errors.length === 0,
|
||||||
|
errors,
|
||||||
|
suggestions
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates GREEN phase test results
|
||||||
|
* GREEN phase must have zero failures
|
||||||
|
*/
|
||||||
|
validateGreenPhase(
|
||||||
|
testResult: TestResult,
|
||||||
|
previousTestCount?: number
|
||||||
|
): ValidationResult {
|
||||||
|
const baseValidation = this.validate(testResult);
|
||||||
|
if (!baseValidation.valid) {
|
||||||
|
return baseValidation;
|
||||||
|
}
|
||||||
|
|
||||||
|
const errors: string[] = [];
|
||||||
|
const warnings: string[] = [];
|
||||||
|
const suggestions: string[] = [];
|
||||||
|
|
||||||
|
// GREEN phase must have zero failures
|
||||||
|
if (testResult.failed > 0) {
|
||||||
|
errors.push('GREEN phase must have zero failures');
|
||||||
|
suggestions.push('Fix implementation to make all tests pass');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must have at least one passing test
|
||||||
|
if (testResult.passed === 0) {
|
||||||
|
errors.push('GREEN phase must have at least one passing test');
|
||||||
|
suggestions.push('Ensure tests exist and implementation makes them pass');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for test count regression
|
||||||
|
if (
|
||||||
|
previousTestCount !== undefined &&
|
||||||
|
testResult.total < previousTestCount
|
||||||
|
) {
|
||||||
|
warnings.push(
|
||||||
|
`Test count decreased from ${previousTestCount} to ${testResult.total}`
|
||||||
|
);
|
||||||
|
suggestions.push('Verify that no tests were accidentally removed');
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
valid: errors.length === 0,
|
||||||
|
errors,
|
||||||
|
warnings,
|
||||||
|
suggestions
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates coverage thresholds if provided
|
||||||
|
*/
|
||||||
|
validateCoverage(
|
||||||
|
testResult: TestResult,
|
||||||
|
thresholds: CoverageThresholds
|
||||||
|
): ValidationResult {
|
||||||
|
const baseValidation = this.validate(testResult);
|
||||||
|
if (!baseValidation.valid) {
|
||||||
|
return baseValidation;
|
||||||
|
}
|
||||||
|
|
||||||
|
const errors: string[] = [];
|
||||||
|
const suggestions: string[] = [];
|
||||||
|
|
||||||
|
// Skip validation if no coverage data
|
||||||
|
if (!testResult.coverage) {
|
||||||
|
return { valid: true, errors: [], suggestions: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
const coverage = testResult.coverage;
|
||||||
|
const gaps: string[] = [];
|
||||||
|
|
||||||
|
// Check each coverage type against threshold
|
||||||
|
if (thresholds.line !== undefined && coverage.line < thresholds.line) {
|
||||||
|
gaps.push(`line coverage (${coverage.line}% < ${thresholds.line}%)`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
thresholds.branch !== undefined &&
|
||||||
|
coverage.branch < thresholds.branch
|
||||||
|
) {
|
||||||
|
gaps.push(
|
||||||
|
`branch coverage (${coverage.branch}% < ${thresholds.branch}%)`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
thresholds.function !== undefined &&
|
||||||
|
coverage.function < thresholds.function
|
||||||
|
) {
|
||||||
|
gaps.push(
|
||||||
|
`function coverage (${coverage.function}% < ${thresholds.function}%)`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
thresholds.statement !== undefined &&
|
||||||
|
coverage.statement < thresholds.statement
|
||||||
|
) {
|
||||||
|
gaps.push(
|
||||||
|
`statement coverage (${coverage.statement}% < ${thresholds.statement}%)`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (gaps.length > 0) {
|
||||||
|
errors.push(`Coverage thresholds not met: ${gaps.join(', ')}`);
|
||||||
|
suggestions.push('Add more tests to improve code coverage');
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
valid: errors.length === 0,
|
||||||
|
errors,
|
||||||
|
suggestions
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates test results based on TDD phase
|
||||||
|
*/
|
||||||
|
validatePhase(
|
||||||
|
testResult: TestResult,
|
||||||
|
options?: PhaseValidationOptions
|
||||||
|
): ValidationResult {
|
||||||
|
const phase = options?.phase ?? testResult.phase;
|
||||||
|
|
||||||
|
// Phase-specific validation
|
||||||
|
let phaseResult: ValidationResult;
|
||||||
|
if (phase === 'RED') {
|
||||||
|
phaseResult = this.validateRedPhase(testResult);
|
||||||
|
} else if (phase === 'GREEN') {
|
||||||
|
phaseResult = this.validateGreenPhase(
|
||||||
|
testResult,
|
||||||
|
options?.previousTestCount
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
// REFACTOR phase uses same rules as GREEN
|
||||||
|
phaseResult = this.validateGreenPhase(
|
||||||
|
testResult,
|
||||||
|
options?.previousTestCount
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!phaseResult.valid) {
|
||||||
|
return phaseResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Coverage validation if thresholds provided
|
||||||
|
if (options?.coverageThresholds) {
|
||||||
|
const coverageResult = this.validateCoverage(
|
||||||
|
testResult,
|
||||||
|
options.coverageThresholds
|
||||||
|
);
|
||||||
|
|
||||||
|
// Merge results
|
||||||
|
return {
|
||||||
|
valid: coverageResult.valid,
|
||||||
|
errors: [...(phaseResult.errors || []), ...coverageResult.errors],
|
||||||
|
warnings: phaseResult.warnings,
|
||||||
|
suggestions: [
|
||||||
|
...(phaseResult.suggestions || []),
|
||||||
|
...(coverageResult.suggestions || [])
|
||||||
|
]
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return phaseResult;
|
||||||
|
}
|
||||||
|
}
|
||||||
55
packages/tm-core/src/services/test-result-validator.types.ts
Normal file
55
packages/tm-core/src/services/test-result-validator.types.ts
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
/**
|
||||||
|
* Test phase in TDD workflow
|
||||||
|
*/
|
||||||
|
export type TestPhase = 'RED' | 'GREEN' | 'REFACTOR';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Code coverage metrics
|
||||||
|
*/
|
||||||
|
export interface Coverage {
|
||||||
|
line: number;
|
||||||
|
branch: number;
|
||||||
|
function: number;
|
||||||
|
statement: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test result data structure
|
||||||
|
*/
|
||||||
|
export interface TestResult {
|
||||||
|
total: number;
|
||||||
|
passed: number;
|
||||||
|
failed: number;
|
||||||
|
skipped: number;
|
||||||
|
phase: TestPhase;
|
||||||
|
coverage?: Coverage;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Coverage threshold configuration
|
||||||
|
*/
|
||||||
|
export interface CoverageThresholds {
|
||||||
|
line?: number;
|
||||||
|
branch?: number;
|
||||||
|
function?: number;
|
||||||
|
statement?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validation result structure
|
||||||
|
*/
|
||||||
|
export interface ValidationResult {
|
||||||
|
valid: boolean;
|
||||||
|
errors: string[];
|
||||||
|
warnings?: string[];
|
||||||
|
suggestions?: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Phase-specific validation options
|
||||||
|
*/
|
||||||
|
export interface PhaseValidationOptions {
|
||||||
|
phase: TestPhase;
|
||||||
|
coverageThresholds?: CoverageThresholds;
|
||||||
|
previousTestCount?: number;
|
||||||
|
}
|
||||||
494
packages/tm-core/src/services/workflow.service.ts
Normal file
494
packages/tm-core/src/services/workflow.service.ts
Normal file
@@ -0,0 +1,494 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview WorkflowService - High-level facade for TDD workflow operations
|
||||||
|
* Provides a simplified API for MCP tools while delegating to WorkflowOrchestrator
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { WorkflowOrchestrator } from '../workflow/workflow-orchestrator.js';
|
||||||
|
import { WorkflowStateManager } from '../workflow/workflow-state-manager.js';
|
||||||
|
import { WorkflowActivityLogger } from '../workflow/workflow-activity-logger.js';
|
||||||
|
import type {
|
||||||
|
WorkflowContext,
|
||||||
|
SubtaskInfo,
|
||||||
|
TestResult,
|
||||||
|
WorkflowPhase,
|
||||||
|
TDDPhase,
|
||||||
|
WorkflowState
|
||||||
|
} from '../workflow/types.js';
|
||||||
|
import { GitAdapter } from '../git/git-adapter.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for starting a new workflow
|
||||||
|
*/
|
||||||
|
export interface StartWorkflowOptions {
|
||||||
|
taskId: string;
|
||||||
|
taskTitle: string;
|
||||||
|
subtasks: Array<{
|
||||||
|
id: string;
|
||||||
|
title: string;
|
||||||
|
status: string;
|
||||||
|
maxAttempts?: number;
|
||||||
|
}>;
|
||||||
|
maxAttempts?: number;
|
||||||
|
force?: boolean;
|
||||||
|
tag?: string; // Optional tag for branch naming
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Simplified workflow status for MCP responses
|
||||||
|
*/
|
||||||
|
export interface WorkflowStatus {
|
||||||
|
taskId: string;
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
tddPhase?: TDDPhase;
|
||||||
|
branchName?: string;
|
||||||
|
currentSubtask?: {
|
||||||
|
id: string;
|
||||||
|
title: string;
|
||||||
|
attempts: number;
|
||||||
|
maxAttempts: number;
|
||||||
|
};
|
||||||
|
progress: {
|
||||||
|
completed: number;
|
||||||
|
total: number;
|
||||||
|
current: number;
|
||||||
|
percentage: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Next action recommendation for AI agent
|
||||||
|
*/
|
||||||
|
export interface NextAction {
|
||||||
|
action: string;
|
||||||
|
description: string;
|
||||||
|
nextSteps: string;
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
tddPhase?: TDDPhase;
|
||||||
|
subtask?: {
|
||||||
|
id: string;
|
||||||
|
title: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* WorkflowService - Facade for workflow operations
|
||||||
|
* Manages WorkflowOrchestrator lifecycle and state persistence
|
||||||
|
*/
|
||||||
|
export class WorkflowService {
|
||||||
|
private readonly projectRoot: string;
|
||||||
|
private readonly stateManager: WorkflowStateManager;
|
||||||
|
private orchestrator?: WorkflowOrchestrator;
|
||||||
|
private activityLogger?: WorkflowActivityLogger;
|
||||||
|
|
||||||
|
constructor(projectRoot: string) {
|
||||||
|
this.projectRoot = projectRoot;
|
||||||
|
this.stateManager = new WorkflowStateManager(projectRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if workflow state exists
|
||||||
|
*/
|
||||||
|
async hasWorkflow(): Promise<boolean> {
|
||||||
|
return await this.stateManager.exists();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Start a new TDD workflow
|
||||||
|
*/
|
||||||
|
async startWorkflow(options: StartWorkflowOptions): Promise<WorkflowStatus> {
|
||||||
|
const {
|
||||||
|
taskId,
|
||||||
|
taskTitle,
|
||||||
|
subtasks,
|
||||||
|
maxAttempts = 3,
|
||||||
|
force,
|
||||||
|
tag
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
// Check for existing workflow
|
||||||
|
if ((await this.hasWorkflow()) && !force) {
|
||||||
|
throw new Error(
|
||||||
|
'Workflow already exists. Use force=true to override or resume existing workflow.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize git adapter and ensure clean state
|
||||||
|
const gitAdapter = new GitAdapter(this.projectRoot);
|
||||||
|
await gitAdapter.ensureGitRepository();
|
||||||
|
await gitAdapter.ensureCleanWorkingTree();
|
||||||
|
|
||||||
|
// Parse subtasks to WorkflowContext format
|
||||||
|
const workflowSubtasks: SubtaskInfo[] = subtasks.map((st) => ({
|
||||||
|
id: st.id,
|
||||||
|
title: st.title,
|
||||||
|
status: st.status === 'done' ? 'completed' : 'pending',
|
||||||
|
attempts: 0,
|
||||||
|
maxAttempts: st.maxAttempts || maxAttempts
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Find the first incomplete subtask to resume from
|
||||||
|
const firstIncompleteIndex = workflowSubtasks.findIndex(
|
||||||
|
(st) => st.status !== 'completed'
|
||||||
|
);
|
||||||
|
|
||||||
|
// If all subtasks are already completed, throw an error
|
||||||
|
if (firstIncompleteIndex === -1) {
|
||||||
|
throw new Error(
|
||||||
|
`All subtasks for task ${taskId} are already completed. Nothing to do.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create workflow context, starting from first incomplete subtask
|
||||||
|
const context: WorkflowContext = {
|
||||||
|
taskId,
|
||||||
|
subtasks: workflowSubtasks,
|
||||||
|
currentSubtaskIndex: firstIncompleteIndex,
|
||||||
|
errors: [],
|
||||||
|
metadata: {
|
||||||
|
startedAt: new Date().toISOString(),
|
||||||
|
taskTitle,
|
||||||
|
resumedFromSubtask:
|
||||||
|
firstIncompleteIndex > 0
|
||||||
|
? workflowSubtasks[firstIncompleteIndex].id
|
||||||
|
: undefined
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create orchestrator with auto-persistence
|
||||||
|
this.orchestrator = new WorkflowOrchestrator(context);
|
||||||
|
this.orchestrator.enableAutoPersist(async (state: WorkflowState) => {
|
||||||
|
await this.stateManager.save(state);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initialize activity logger to track all workflow events
|
||||||
|
this.activityLogger = new WorkflowActivityLogger(
|
||||||
|
this.orchestrator,
|
||||||
|
this.stateManager.getActivityLogPath()
|
||||||
|
);
|
||||||
|
this.activityLogger.start();
|
||||||
|
|
||||||
|
// Transition through PREFLIGHT and BRANCH_SETUP phases
|
||||||
|
this.orchestrator.transition({ type: 'PREFLIGHT_COMPLETE' });
|
||||||
|
|
||||||
|
// Create git branch with descriptive name
|
||||||
|
const branchName = this.generateBranchName(taskId, taskTitle, tag);
|
||||||
|
|
||||||
|
// Check if we're already on the target branch
|
||||||
|
const currentBranch = await gitAdapter.getCurrentBranch();
|
||||||
|
if (currentBranch !== branchName) {
|
||||||
|
// Only create branch if we're not already on it
|
||||||
|
await gitAdapter.createAndCheckoutBranch(branchName);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transition to SUBTASK_LOOP with RED phase
|
||||||
|
this.orchestrator.transition({
|
||||||
|
type: 'BRANCH_CREATED',
|
||||||
|
branchName
|
||||||
|
});
|
||||||
|
|
||||||
|
return this.getStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resume an existing workflow
|
||||||
|
*/
|
||||||
|
async resumeWorkflow(): Promise<WorkflowStatus> {
|
||||||
|
// Load state
|
||||||
|
const state = await this.stateManager.load();
|
||||||
|
|
||||||
|
// Create new orchestrator with loaded context
|
||||||
|
this.orchestrator = new WorkflowOrchestrator(state.context);
|
||||||
|
|
||||||
|
// Validate and restore state
|
||||||
|
if (!this.orchestrator.canResumeFromState(state)) {
|
||||||
|
throw new Error(
|
||||||
|
'Invalid workflow state. State may be corrupted. Consider starting a new workflow.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.orchestrator.restoreState(state);
|
||||||
|
|
||||||
|
// Re-enable auto-persistence
|
||||||
|
this.orchestrator.enableAutoPersist(async (newState: WorkflowState) => {
|
||||||
|
await this.stateManager.save(newState);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initialize activity logger to continue tracking events
|
||||||
|
this.activityLogger = new WorkflowActivityLogger(
|
||||||
|
this.orchestrator,
|
||||||
|
this.stateManager.getActivityLogPath()
|
||||||
|
);
|
||||||
|
this.activityLogger.start();
|
||||||
|
|
||||||
|
return this.getStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current workflow status
|
||||||
|
*/
|
||||||
|
getStatus(): WorkflowStatus {
|
||||||
|
if (!this.orchestrator) {
|
||||||
|
throw new Error('No active workflow. Start or resume a workflow first.');
|
||||||
|
}
|
||||||
|
|
||||||
|
const context = this.orchestrator.getContext();
|
||||||
|
const progress = this.orchestrator.getProgress();
|
||||||
|
const currentSubtask = this.orchestrator.getCurrentSubtask();
|
||||||
|
|
||||||
|
return {
|
||||||
|
taskId: context.taskId,
|
||||||
|
phase: this.orchestrator.getCurrentPhase(),
|
||||||
|
tddPhase: this.orchestrator.getCurrentTDDPhase(),
|
||||||
|
branchName: context.branchName,
|
||||||
|
currentSubtask: currentSubtask
|
||||||
|
? {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title,
|
||||||
|
attempts: currentSubtask.attempts,
|
||||||
|
maxAttempts: currentSubtask.maxAttempts || 3
|
||||||
|
}
|
||||||
|
: undefined,
|
||||||
|
progress
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get workflow context (for accessing full state details)
|
||||||
|
*/
|
||||||
|
getContext(): WorkflowContext {
|
||||||
|
if (!this.orchestrator) {
|
||||||
|
throw new Error('No active workflow. Start or resume a workflow first.');
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.orchestrator.getContext();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get next recommended action for AI agent
|
||||||
|
*/
|
||||||
|
getNextAction(): NextAction {
|
||||||
|
if (!this.orchestrator) {
|
||||||
|
throw new Error('No active workflow. Start or resume a workflow first.');
|
||||||
|
}
|
||||||
|
|
||||||
|
const phase = this.orchestrator.getCurrentPhase();
|
||||||
|
const tddPhase = this.orchestrator.getCurrentTDDPhase();
|
||||||
|
const currentSubtask = this.orchestrator.getCurrentSubtask();
|
||||||
|
|
||||||
|
// Determine action based on current phase
|
||||||
|
if (phase === 'COMPLETE') {
|
||||||
|
return {
|
||||||
|
action: 'workflow_complete',
|
||||||
|
description: 'All subtasks completed',
|
||||||
|
nextSteps:
|
||||||
|
'All subtasks completed! Review the entire implementation and merge your branch when ready.',
|
||||||
|
phase
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (phase === 'FINALIZE') {
|
||||||
|
return {
|
||||||
|
action: 'finalize_workflow',
|
||||||
|
description: 'Finalize and complete the workflow',
|
||||||
|
nextSteps:
|
||||||
|
'All subtasks are complete! Use autopilot_finalize to verify no uncommitted changes remain and mark the workflow as complete.',
|
||||||
|
phase
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (phase !== 'SUBTASK_LOOP' || !tddPhase || !currentSubtask) {
|
||||||
|
return {
|
||||||
|
action: 'unknown',
|
||||||
|
description: 'Workflow is not in active state',
|
||||||
|
nextSteps: 'Use autopilot_status to check workflow state.',
|
||||||
|
phase
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const baseAction = {
|
||||||
|
phase,
|
||||||
|
tddPhase,
|
||||||
|
subtask: {
|
||||||
|
id: currentSubtask.id,
|
||||||
|
title: currentSubtask.title
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
switch (tddPhase) {
|
||||||
|
case 'RED':
|
||||||
|
return {
|
||||||
|
...baseAction,
|
||||||
|
action: 'generate_test',
|
||||||
|
description: 'Generate failing test for current subtask',
|
||||||
|
nextSteps: `Write failing tests for subtask ${currentSubtask.id}: "${currentSubtask.title}". Create test file(s) that validate the expected behavior. Run tests and use autopilot_complete_phase with results. Note: If all tests pass (0 failures), the feature is already implemented and the subtask will be auto-completed.`
|
||||||
|
};
|
||||||
|
case 'GREEN':
|
||||||
|
return {
|
||||||
|
...baseAction,
|
||||||
|
action: 'implement_code',
|
||||||
|
description: 'Implement feature to make tests pass',
|
||||||
|
nextSteps: `Implement code to make tests pass for subtask ${currentSubtask.id}: "${currentSubtask.title}". Write the minimal code needed to pass all tests (GREEN phase), then use autopilot_complete_phase with test results.`
|
||||||
|
};
|
||||||
|
case 'COMMIT':
|
||||||
|
return {
|
||||||
|
...baseAction,
|
||||||
|
action: 'commit_changes',
|
||||||
|
description: 'Commit RED-GREEN cycle changes',
|
||||||
|
nextSteps: `Review and commit your changes for subtask ${currentSubtask.id}: "${currentSubtask.title}". Use autopilot_commit to create the commit and advance to the next subtask.`
|
||||||
|
};
|
||||||
|
default:
|
||||||
|
return {
|
||||||
|
...baseAction,
|
||||||
|
action: 'unknown',
|
||||||
|
description: 'Unknown TDD phase',
|
||||||
|
nextSteps: 'Use autopilot_status to check workflow state.'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete current TDD phase with test results
|
||||||
|
*/
|
||||||
|
async completePhase(testResults: TestResult): Promise<WorkflowStatus> {
|
||||||
|
if (!this.orchestrator) {
|
||||||
|
throw new Error('No active workflow. Start or resume a workflow first.');
|
||||||
|
}
|
||||||
|
|
||||||
|
const tddPhase = this.orchestrator.getCurrentTDDPhase();
|
||||||
|
|
||||||
|
if (!tddPhase) {
|
||||||
|
throw new Error('Not in active TDD phase');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transition based on current phase
|
||||||
|
switch (tddPhase) {
|
||||||
|
case 'RED':
|
||||||
|
this.orchestrator.transition({
|
||||||
|
type: 'RED_PHASE_COMPLETE',
|
||||||
|
testResults
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case 'GREEN':
|
||||||
|
this.orchestrator.transition({
|
||||||
|
type: 'GREEN_PHASE_COMPLETE',
|
||||||
|
testResults
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case 'COMMIT':
|
||||||
|
throw new Error(
|
||||||
|
'Cannot complete COMMIT phase with test results. Use commit() instead.'
|
||||||
|
);
|
||||||
|
default:
|
||||||
|
throw new Error(`Unknown TDD phase: ${tddPhase}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.getStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit current changes and advance workflow
|
||||||
|
*/
|
||||||
|
async commit(): Promise<WorkflowStatus> {
|
||||||
|
if (!this.orchestrator) {
|
||||||
|
throw new Error('No active workflow. Start or resume a workflow first.');
|
||||||
|
}
|
||||||
|
|
||||||
|
const tddPhase = this.orchestrator.getCurrentTDDPhase();
|
||||||
|
|
||||||
|
if (tddPhase !== 'COMMIT') {
|
||||||
|
throw new Error(
|
||||||
|
`Cannot commit in ${tddPhase} phase. Complete RED and GREEN phases first.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transition COMMIT phase complete
|
||||||
|
this.orchestrator.transition({
|
||||||
|
type: 'COMMIT_COMPLETE'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check if should advance to next subtask
|
||||||
|
const progress = this.orchestrator.getProgress();
|
||||||
|
if (progress.current < progress.total) {
|
||||||
|
this.orchestrator.transition({ type: 'SUBTASK_COMPLETE' });
|
||||||
|
} else {
|
||||||
|
// All subtasks complete
|
||||||
|
this.orchestrator.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.getStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Finalize and complete the workflow
|
||||||
|
* Validates working tree is clean before marking complete
|
||||||
|
*/
|
||||||
|
async finalizeWorkflow(): Promise<WorkflowStatus> {
|
||||||
|
if (!this.orchestrator) {
|
||||||
|
throw new Error('No active workflow. Start or resume a workflow first.');
|
||||||
|
}
|
||||||
|
|
||||||
|
const phase = this.orchestrator.getCurrentPhase();
|
||||||
|
if (phase !== 'FINALIZE') {
|
||||||
|
throw new Error(
|
||||||
|
`Cannot finalize workflow in ${phase} phase. Complete all subtasks first.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check working tree is clean
|
||||||
|
const gitAdapter = new GitAdapter(this.projectRoot);
|
||||||
|
const statusSummary = await gitAdapter.getStatusSummary();
|
||||||
|
|
||||||
|
if (!statusSummary.isClean) {
|
||||||
|
throw new Error(
|
||||||
|
`Cannot finalize workflow: working tree has uncommitted changes.\n` +
|
||||||
|
`Staged: ${statusSummary.staged}, Modified: ${statusSummary.modified}, ` +
|
||||||
|
`Deleted: ${statusSummary.deleted}, Untracked: ${statusSummary.untracked}\n` +
|
||||||
|
`Please commit all changes before finalizing the workflow.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transition to COMPLETE
|
||||||
|
this.orchestrator.transition({ type: 'FINALIZE_COMPLETE' });
|
||||||
|
|
||||||
|
return this.getStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abort current workflow
|
||||||
|
*/
|
||||||
|
async abortWorkflow(): Promise<void> {
|
||||||
|
if (this.orchestrator) {
|
||||||
|
this.orchestrator.transition({ type: 'ABORT' });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete state file
|
||||||
|
await this.stateManager.delete();
|
||||||
|
|
||||||
|
this.orchestrator = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a descriptive git branch name
|
||||||
|
* Format: tag-name/task-id-task-title or task-id-task-title
|
||||||
|
*/
|
||||||
|
private generateBranchName(
|
||||||
|
taskId: string,
|
||||||
|
taskTitle: string,
|
||||||
|
tag?: string
|
||||||
|
): string {
|
||||||
|
// Sanitize task title for branch name
|
||||||
|
const sanitizedTitle = taskTitle
|
||||||
|
.toLowerCase()
|
||||||
|
.replace(/[^a-z0-9]+/g, '-') // Replace non-alphanumeric with dash
|
||||||
|
.replace(/^-+|-+$/g, '') // Remove leading/trailing dashes
|
||||||
|
.substring(0, 50); // Limit length
|
||||||
|
|
||||||
|
// Format task ID for branch name
|
||||||
|
const formattedTaskId = taskId.replace(/\./g, '-');
|
||||||
|
|
||||||
|
// Add tag prefix if tag is provided
|
||||||
|
const tagPrefix = tag ? `${tag}/` : '';
|
||||||
|
|
||||||
|
return `${tagPrefix}task-${formattedTaskId}-${sanitizedTitle}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
182
packages/tm-core/src/storage/activity-logger.ts
Normal file
182
packages/tm-core/src/storage/activity-logger.ts
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
/**
|
||||||
|
* Activity.jsonl append-only logging system for workflow tracking.
|
||||||
|
* Uses newline-delimited JSON (JSONL) format for structured event logging.
|
||||||
|
*
|
||||||
|
* @module activity-logger
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs-extra';
|
||||||
|
import path from 'path';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Activity log entry structure
|
||||||
|
*/
|
||||||
|
export interface ActivityEvent {
|
||||||
|
timestamp: string;
|
||||||
|
type: string;
|
||||||
|
[key: string]: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filter criteria for activity log queries
|
||||||
|
*/
|
||||||
|
export interface ActivityFilter {
|
||||||
|
type?: string;
|
||||||
|
timestampFrom?: string;
|
||||||
|
timestampTo?: string;
|
||||||
|
predicate?: (event: ActivityEvent) => boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Appends an activity event to the log file.
|
||||||
|
* Uses atomic append operations to ensure data integrity.
|
||||||
|
*
|
||||||
|
* @param {string} activityPath - Path to the activity.jsonl file
|
||||||
|
* @param {Omit<ActivityEvent, 'timestamp'>} event - Event data to log (timestamp added automatically)
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* await logActivity('/path/to/activity.jsonl', {
|
||||||
|
* type: 'phase-start',
|
||||||
|
* phase: 'red'
|
||||||
|
* });
|
||||||
|
*/
|
||||||
|
export async function logActivity(
|
||||||
|
activityPath: string,
|
||||||
|
event: Omit<ActivityEvent, 'timestamp'>
|
||||||
|
): Promise<void> {
|
||||||
|
// Add timestamp to event
|
||||||
|
const logEntry = {
|
||||||
|
...event,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
} as ActivityEvent;
|
||||||
|
|
||||||
|
// Ensure directory exists
|
||||||
|
await fs.ensureDir(path.dirname(activityPath));
|
||||||
|
|
||||||
|
// Convert to JSONL format (single line with newline)
|
||||||
|
const line = JSON.stringify(logEntry) + '\n';
|
||||||
|
|
||||||
|
// Append to file atomically
|
||||||
|
// Using 'a' flag ensures atomic append on most systems
|
||||||
|
await fs.appendFile(activityPath, line, 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads and parses all events from an activity log file.
|
||||||
|
* Returns events in chronological order.
|
||||||
|
*
|
||||||
|
* @param {string} activityPath - Path to the activity.jsonl file
|
||||||
|
* @returns {Promise<ActivityEvent[]>} Array of activity events
|
||||||
|
* @throws {Error} If file contains invalid JSON
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const events = await readActivityLog('/path/to/activity.jsonl');
|
||||||
|
* console.log(`Found ${events.length} events`);
|
||||||
|
*/
|
||||||
|
export async function readActivityLog(
|
||||||
|
activityPath: string
|
||||||
|
): Promise<ActivityEvent[]> {
|
||||||
|
// Return empty array if file doesn't exist
|
||||||
|
if (!(await fs.pathExists(activityPath))) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read file content
|
||||||
|
const content = await fs.readFile(activityPath, 'utf-8');
|
||||||
|
|
||||||
|
// Parse JSONL (newline-delimited JSON)
|
||||||
|
const lines = content.trim().split('\n');
|
||||||
|
const events: ActivityEvent[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < lines.length; i++) {
|
||||||
|
const line = lines[i].trim();
|
||||||
|
|
||||||
|
// Skip empty lines
|
||||||
|
if (!line) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse JSON
|
||||||
|
try {
|
||||||
|
const event = JSON.parse(line);
|
||||||
|
events.push(event);
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage =
|
||||||
|
error instanceof Error ? error.message : String(error);
|
||||||
|
throw new Error(`Invalid JSON at line ${i + 1}: ${errorMessage}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return events;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filters activity log events based on criteria.
|
||||||
|
* Supports filtering by event type, timestamp range, and custom predicates.
|
||||||
|
*
|
||||||
|
* @param {string} activityPath - Path to the activity.jsonl file
|
||||||
|
* @param {ActivityFilter} filter - Filter criteria
|
||||||
|
* @returns {Promise<ActivityEvent[]>} Filtered array of events
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Filter by event type
|
||||||
|
* const phaseEvents = await filterActivityLog('/path/to/activity.jsonl', {
|
||||||
|
* type: 'phase-start'
|
||||||
|
* });
|
||||||
|
*
|
||||||
|
* // Filter by timestamp range
|
||||||
|
* const recentEvents = await filterActivityLog('/path/to/activity.jsonl', {
|
||||||
|
* timestampFrom: '2024-01-15T10:00:00.000Z'
|
||||||
|
* });
|
||||||
|
*
|
||||||
|
* // Filter with custom predicate
|
||||||
|
* const failedTests = await filterActivityLog('/path/to/activity.jsonl', {
|
||||||
|
* predicate: (event) => event.type === 'test-run' && event.result === 'fail'
|
||||||
|
* });
|
||||||
|
*/
|
||||||
|
export async function filterActivityLog(
|
||||||
|
activityPath: string,
|
||||||
|
filter: ActivityFilter & Record<string, any>
|
||||||
|
): Promise<ActivityEvent[]> {
|
||||||
|
const events = await readActivityLog(activityPath);
|
||||||
|
|
||||||
|
return events.filter((event) => {
|
||||||
|
// Filter by type
|
||||||
|
if (filter.type && event.type !== filter.type) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by timestamp range
|
||||||
|
if (filter.timestampFrom && event.timestamp < filter.timestampFrom) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (filter.timestampTo && event.timestamp > filter.timestampTo) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by custom predicate
|
||||||
|
if (filter.predicate && !filter.predicate(event)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by other fields (exact match)
|
||||||
|
for (const [key, value] of Object.entries(filter)) {
|
||||||
|
if (
|
||||||
|
key === 'type' ||
|
||||||
|
key === 'timestampFrom' ||
|
||||||
|
key === 'timestampTo' ||
|
||||||
|
key === 'predicate'
|
||||||
|
) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (event[key] !== value) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -8,6 +8,15 @@ export { FileStorage } from './file-storage/index.js';
|
|||||||
export { ApiStorage, type ApiStorageConfig } from './api-storage.js';
|
export { ApiStorage, type ApiStorageConfig } from './api-storage.js';
|
||||||
export { StorageFactory } from './storage-factory.js';
|
export { StorageFactory } from './storage-factory.js';
|
||||||
|
|
||||||
|
// Export activity logger
|
||||||
|
export {
|
||||||
|
logActivity,
|
||||||
|
readActivityLog,
|
||||||
|
filterActivityLog,
|
||||||
|
type ActivityEvent,
|
||||||
|
type ActivityFilter
|
||||||
|
} from './activity-logger.js';
|
||||||
|
|
||||||
// Export storage interface and types
|
// Export storage interface and types
|
||||||
export type {
|
export type {
|
||||||
IStorage,
|
IStorage,
|
||||||
|
|||||||
@@ -32,6 +32,21 @@ export {
|
|||||||
type GitHubRepoInfo
|
type GitHubRepoInfo
|
||||||
} from './git-utils.js';
|
} from './git-utils.js';
|
||||||
|
|
||||||
|
// Export path normalization utilities
|
||||||
|
export {
|
||||||
|
normalizeProjectPath,
|
||||||
|
denormalizeProjectPath,
|
||||||
|
isValidNormalizedPath
|
||||||
|
} from './path-normalizer.js';
|
||||||
|
|
||||||
|
// Export run ID generation utilities
|
||||||
|
export {
|
||||||
|
generateRunId,
|
||||||
|
isValidRunId,
|
||||||
|
parseRunId,
|
||||||
|
compareRunIds
|
||||||
|
} from './run-id-generator.js';
|
||||||
|
|
||||||
// Additional utility exports
|
// Additional utility exports
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
282
packages/tm-core/src/utils/path-normalizer.spec.ts
Normal file
282
packages/tm-core/src/utils/path-normalizer.spec.ts
Normal file
@@ -0,0 +1,282 @@
|
|||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import {
|
||||||
|
normalizeProjectPath,
|
||||||
|
denormalizeProjectPath,
|
||||||
|
isValidNormalizedPath
|
||||||
|
} from './path-normalizer.js';
|
||||||
|
|
||||||
|
describe('Path Normalizer (base64url encoding)', () => {
|
||||||
|
describe('normalizeProjectPath', () => {
|
||||||
|
it('should encode Unix paths to base64url', () => {
|
||||||
|
const input = '/Users/test/projects/myapp';
|
||||||
|
const normalized = normalizeProjectPath(input);
|
||||||
|
|
||||||
|
// Should be valid base64url (only A-Z, a-z, 0-9, -, _)
|
||||||
|
expect(/^[A-Za-z0-9_-]+$/.test(normalized)).toBe(true);
|
||||||
|
// Should not contain slashes
|
||||||
|
expect(normalized).not.toContain('/');
|
||||||
|
expect(normalized).not.toContain('\\');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should encode Windows paths to base64url', () => {
|
||||||
|
const input = 'C:\\Users\\test\\projects\\myapp';
|
||||||
|
const normalized = normalizeProjectPath(input);
|
||||||
|
|
||||||
|
// Should be valid base64url
|
||||||
|
expect(/^[A-Za-z0-9_-]+$/.test(normalized)).toBe(true);
|
||||||
|
expect(normalized).not.toContain('/');
|
||||||
|
expect(normalized).not.toContain('\\');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should encode paths with hyphens (preserving them for round-trip)', () => {
|
||||||
|
const input = '/projects/my-app';
|
||||||
|
const normalized = normalizeProjectPath(input);
|
||||||
|
|
||||||
|
// Should be valid base64url
|
||||||
|
expect(/^[A-Za-z0-9_-]+$/.test(normalized)).toBe(true);
|
||||||
|
// Hyphens in base64url are from encoding, not original path
|
||||||
|
expect(isValidNormalizedPath(normalized)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should encode paths with special characters', () => {
|
||||||
|
const input = '/projects/myapp (v2)';
|
||||||
|
const normalized = normalizeProjectPath(input);
|
||||||
|
|
||||||
|
// Should be valid base64url
|
||||||
|
expect(/^[A-Za-z0-9_-]+$/.test(normalized)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should encode relative paths', () => {
|
||||||
|
const input = './projects/app';
|
||||||
|
const normalized = normalizeProjectPath(input);
|
||||||
|
|
||||||
|
// Should be valid base64url
|
||||||
|
expect(/^[A-Za-z0-9_-]+$/.test(normalized)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty string', () => {
|
||||||
|
const input = '';
|
||||||
|
const expected = '';
|
||||||
|
expect(normalizeProjectPath(input)).toBe(expected);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should encode single directory', () => {
|
||||||
|
const input = 'project';
|
||||||
|
const normalized = normalizeProjectPath(input);
|
||||||
|
|
||||||
|
// Should be valid base64url
|
||||||
|
expect(/^[A-Za-z0-9_-]+$/.test(normalized)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should encode paths with multiple consecutive slashes', () => {
|
||||||
|
const input = '/Users//test///project';
|
||||||
|
const normalized = normalizeProjectPath(input);
|
||||||
|
|
||||||
|
// Should be valid base64url
|
||||||
|
expect(/^[A-Za-z0-9_-]+$/.test(normalized)).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('denormalizeProjectPath', () => {
|
||||||
|
it('should decode base64url back to original path', () => {
|
||||||
|
const original = '/Users/test/projects/myapp';
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should decode base64url for Windows paths', () => {
|
||||||
|
const original = 'C:\\Users\\test\\project';
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty string', () => {
|
||||||
|
const input = '';
|
||||||
|
const expected = '';
|
||||||
|
expect(denormalizeProjectPath(input)).toBe(expected);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve hyphens in directory names (no longer a limitation!)', () => {
|
||||||
|
const original = '/projects/my-app';
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
// With base64url, hyphens are preserved correctly
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle invalid base64url gracefully', () => {
|
||||||
|
// Invalid base64url - should return the input as fallback
|
||||||
|
const invalid = 'not@valid#base64url';
|
||||||
|
const result = denormalizeProjectPath(invalid);
|
||||||
|
|
||||||
|
// Should return input unchanged for backward compatibility
|
||||||
|
expect(result).toBe(invalid);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isValidNormalizedPath', () => {
|
||||||
|
it('should return true for valid base64url strings', () => {
|
||||||
|
// Valid base64url characters: A-Z, a-z, 0-9, -, _
|
||||||
|
expect(isValidNormalizedPath('VXNlcnMtdGVzdC1wcm9qZWN0')).toBe(true);
|
||||||
|
expect(isValidNormalizedPath('abc123_-ABC')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for base64url with hyphens and underscores', () => {
|
||||||
|
expect(isValidNormalizedPath('test-path_encoded')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for paths with slashes', () => {
|
||||||
|
expect(isValidNormalizedPath('Users/test/project')).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for paths with backslashes', () => {
|
||||||
|
expect(isValidNormalizedPath('Users\\test\\project')).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for empty string', () => {
|
||||||
|
expect(isValidNormalizedPath('')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for strings with special characters not in base64url', () => {
|
||||||
|
// Base64url only allows: A-Z, a-z, 0-9, -, _
|
||||||
|
expect(isValidNormalizedPath('my-app (v2)')).toBe(false); // parentheses and spaces not allowed
|
||||||
|
expect(isValidNormalizedPath('test@example')).toBe(false); // @ not allowed
|
||||||
|
expect(isValidNormalizedPath('test+value')).toBe(false); // + not allowed
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should validate normalized paths correctly', () => {
|
||||||
|
const path = '/Users/test/my-app';
|
||||||
|
const normalized = normalizeProjectPath(path);
|
||||||
|
expect(isValidNormalizedPath(normalized)).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Round-trip conversion', () => {
|
||||||
|
it('should perfectly preserve ALL Unix paths (including those with hyphens)', () => {
|
||||||
|
const originalPaths = [
|
||||||
|
'/Users/test/projects/myapp',
|
||||||
|
'/root/deep/nested/path',
|
||||||
|
'./relative/path',
|
||||||
|
'/projects/my-app', // Now works correctly!
|
||||||
|
'/path/with-multiple-hyphens/in-names'
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const original of originalPaths) {
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
// Perfect round-trip with base64url encoding
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should perfectly preserve Windows paths (including drive letters)', () => {
|
||||||
|
const originalPaths = [
|
||||||
|
'C:\\Users\\test\\project',
|
||||||
|
'D:\\Projects\\my-app',
|
||||||
|
'E:\\path\\with-hyphens\\test'
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const original of originalPaths) {
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
// Perfect round-trip - drive letters and colons preserved
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve paths with special characters', () => {
|
||||||
|
const originalPaths = [
|
||||||
|
'/projects/my app (v2)',
|
||||||
|
'/path/with spaces/test',
|
||||||
|
'/path/with-dashes-and_underscores',
|
||||||
|
'/path/with.dots.and-dashes'
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const original of originalPaths) {
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
// Perfect round-trip for all special characters
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle mixed slashes and preserve exact path structure', () => {
|
||||||
|
const original = '/Users/test\\mixed/path';
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
// Exact preservation of mixed slashes
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve multiple consecutive slashes', () => {
|
||||||
|
const original = '/Users//test///project';
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
// Exact preservation of all slashes
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Cross-platform consistency', () => {
|
||||||
|
it('should produce filesystem-safe normalized output for all platforms', () => {
|
||||||
|
const unixPath = '/Users/test/project';
|
||||||
|
const windowsPath = 'C:\\Users\\test\\project';
|
||||||
|
|
||||||
|
const normalizedUnix = normalizeProjectPath(unixPath);
|
||||||
|
const normalizedWindows = normalizeProjectPath(windowsPath);
|
||||||
|
|
||||||
|
// Both should be valid base64url (no slashes or backslashes)
|
||||||
|
expect(normalizedUnix).not.toContain('/');
|
||||||
|
expect(normalizedUnix).not.toContain('\\');
|
||||||
|
expect(normalizedWindows).not.toContain('/');
|
||||||
|
expect(normalizedWindows).not.toContain('\\');
|
||||||
|
|
||||||
|
// Both should be valid base64url format
|
||||||
|
expect(isValidNormalizedPath(normalizedUnix)).toBe(true);
|
||||||
|
expect(isValidNormalizedPath(normalizedWindows)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should produce different normalized outputs for different paths', () => {
|
||||||
|
// Unix and Windows paths are different, so should produce different encoded values
|
||||||
|
const unixPath = '/Users/test/project';
|
||||||
|
const windowsPath = 'C:\\Users\\test\\project';
|
||||||
|
|
||||||
|
const normalizedUnix = normalizeProjectPath(unixPath);
|
||||||
|
const normalizedWindows = normalizeProjectPath(windowsPath);
|
||||||
|
|
||||||
|
// Different inputs should produce different outputs
|
||||||
|
expect(normalizedUnix).not.toBe(normalizedWindows);
|
||||||
|
|
||||||
|
// But both should denormalize back to their originals
|
||||||
|
expect(denormalizeProjectPath(normalizedUnix)).toBe(unixPath);
|
||||||
|
expect(denormalizeProjectPath(normalizedWindows)).toBe(windowsPath);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle Unicode characters in paths', () => {
|
||||||
|
const unicodePaths = [
|
||||||
|
'/Users/测试/project',
|
||||||
|
'/Users/test/プロジェクト',
|
||||||
|
'/Users/тест/project'
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const original of unicodePaths) {
|
||||||
|
const normalized = normalizeProjectPath(original);
|
||||||
|
const denormalized = denormalizeProjectPath(normalized);
|
||||||
|
|
||||||
|
// Perfect round-trip for Unicode
|
||||||
|
expect(denormalized).toBe(original);
|
||||||
|
expect(isValidNormalizedPath(normalized)).toBe(true);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
76
packages/tm-core/src/utils/path-normalizer.ts
Normal file
76
packages/tm-core/src/utils/path-normalizer.ts
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
/**
|
||||||
|
* Path normalization utilities for global storage system.
|
||||||
|
* Converts project paths to storage-safe directory names using base64url encoding.
|
||||||
|
*
|
||||||
|
* This provides a bijective (one-to-one) mapping that preserves all characters
|
||||||
|
* and supports perfect round-trip conversion between paths and storage names.
|
||||||
|
*
|
||||||
|
* @module path-normalizer
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalizes a project path to a storage-safe directory name using base64url encoding.
|
||||||
|
* This encoding is filesystem-safe (no slashes, backslashes, or special characters)
|
||||||
|
* and fully reversible, preserving hyphens and all other characters in paths.
|
||||||
|
*
|
||||||
|
* @param {string} projectPath - The project path to normalize
|
||||||
|
* @returns {string} The base64url-encoded path safe for use as a directory name
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* normalizeProjectPath('/Users/test/project') // returns base64url encoded string
|
||||||
|
* normalizeProjectPath('C:\\Users\\test') // returns base64url encoded string
|
||||||
|
* normalizeProjectPath('/projects/my-app') // returns base64url encoded string (hyphens preserved)
|
||||||
|
*/
|
||||||
|
export function normalizeProjectPath(projectPath: string): string {
|
||||||
|
if (!projectPath) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use base64url encoding: filesystem-safe and fully reversible
|
||||||
|
return Buffer.from(projectPath, 'utf-8').toString('base64url');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Denormalizes a storage directory name back to the original path.
|
||||||
|
* Decodes base64url-encoded paths with perfect fidelity.
|
||||||
|
*
|
||||||
|
* @param {string} normalizedPath - The base64url-encoded path to decode
|
||||||
|
* @returns {string} The original path with all characters preserved
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* denormalizeProjectPath(normalizeProjectPath('/Users/test/project')) // returns '/Users/test/project'
|
||||||
|
* denormalizeProjectPath(normalizeProjectPath('/projects/my-app')) // returns '/projects/my-app'
|
||||||
|
*/
|
||||||
|
export function denormalizeProjectPath(normalizedPath: string): string {
|
||||||
|
if (!normalizedPath) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate that input is valid base64url before attempting to decode
|
||||||
|
if (!isValidNormalizedPath(normalizedPath)) {
|
||||||
|
// Return original string for backward compatibility with non-base64url inputs
|
||||||
|
return normalizedPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Buffer.from(normalizedPath, 'base64url').toString('utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates whether a path is in normalized (base64url) format.
|
||||||
|
* Valid base64url strings contain only: A-Z, a-z, 0-9, -, _
|
||||||
|
*
|
||||||
|
* @param {string} path - The path to validate
|
||||||
|
* @returns {boolean} True if the path is in normalized base64url format
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* isValidNormalizedPath('VXNlcnMvdGVzdC9wcm9qZWN0') // returns true (valid base64url)
|
||||||
|
* isValidNormalizedPath('Users/test/project') // returns false (contains slashes)
|
||||||
|
*/
|
||||||
|
export function isValidNormalizedPath(path: string): boolean {
|
||||||
|
if (path === '') {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path is valid base64url: only A-Z, a-z, 0-9, -, _
|
||||||
|
return /^[A-Za-z0-9_-]*$/.test(path);
|
||||||
|
}
|
||||||
266
packages/tm-core/src/utils/run-id-generator.spec.ts
Normal file
266
packages/tm-core/src/utils/run-id-generator.spec.ts
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import {
|
||||||
|
generateRunId,
|
||||||
|
isValidRunId,
|
||||||
|
parseRunId,
|
||||||
|
compareRunIds
|
||||||
|
} from './run-id-generator.js';
|
||||||
|
|
||||||
|
describe('Run ID Generator', () => {
|
||||||
|
describe('generateRunId', () => {
|
||||||
|
it('should generate a valid ISO 8601 timestamp-based ID', () => {
|
||||||
|
const runId = generateRunId();
|
||||||
|
|
||||||
|
// Should be in ISO 8601 format with milliseconds
|
||||||
|
expect(runId).toMatch(/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should generate unique IDs when called multiple times', () => {
|
||||||
|
const id1 = generateRunId();
|
||||||
|
const id2 = generateRunId();
|
||||||
|
const id3 = generateRunId();
|
||||||
|
|
||||||
|
expect(id1).not.toBe(id2);
|
||||||
|
expect(id2).not.toBe(id3);
|
||||||
|
expect(id1).not.toBe(id3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should generate chronologically ordered IDs', () => {
|
||||||
|
const id1 = generateRunId();
|
||||||
|
// Small delay to ensure different timestamp
|
||||||
|
const id2 = generateRunId();
|
||||||
|
|
||||||
|
expect(id2 > id1).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use current time by default', () => {
|
||||||
|
const before = new Date().toISOString();
|
||||||
|
const runId = generateRunId();
|
||||||
|
const after = new Date().toISOString();
|
||||||
|
|
||||||
|
expect(runId >= before).toBe(true);
|
||||||
|
expect(runId <= after).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept custom Date object', () => {
|
||||||
|
const customDate = new Date('2024-01-15T10:30:45.123Z');
|
||||||
|
const runId = generateRunId(customDate);
|
||||||
|
|
||||||
|
expect(runId).toBe('2024-01-15T10:30:45.123Z');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle date at year boundary', () => {
|
||||||
|
const newYear = new Date('2025-01-01T00:00:00.000Z');
|
||||||
|
const runId = generateRunId(newYear);
|
||||||
|
|
||||||
|
expect(runId).toBe('2025-01-01T00:00:00.000Z');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle millisecond precision correctly', () => {
|
||||||
|
const dateWithMs = new Date('2024-03-15T14:22:33.999Z');
|
||||||
|
const runId = generateRunId(dateWithMs);
|
||||||
|
|
||||||
|
expect(runId).toBe('2024-03-15T14:22:33.999Z');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isValidRunId', () => {
|
||||||
|
it('should return true for valid ISO 8601 timestamp', () => {
|
||||||
|
expect(isValidRunId('2024-01-15T10:30:45.123Z')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for generated run IDs', () => {
|
||||||
|
const runId = generateRunId();
|
||||||
|
expect(isValidRunId(runId)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for invalid format', () => {
|
||||||
|
expect(isValidRunId('not-a-timestamp')).toBe(false);
|
||||||
|
expect(isValidRunId('2024-01-15')).toBe(false);
|
||||||
|
expect(isValidRunId('2024-01-15T10:30:45')).toBe(false); // missing Z
|
||||||
|
expect(isValidRunId('2024-01-15 10:30:45.123Z')).toBe(false); // space instead of T
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for empty string', () => {
|
||||||
|
expect(isValidRunId('')).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for null or undefined', () => {
|
||||||
|
expect(isValidRunId(null)).toBe(false);
|
||||||
|
expect(isValidRunId(undefined)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for invalid dates', () => {
|
||||||
|
expect(isValidRunId('2024-13-01T10:30:45.123Z')).toBe(false); // invalid month
|
||||||
|
expect(isValidRunId('2024-01-32T10:30:45.123Z')).toBe(false); // invalid day
|
||||||
|
expect(isValidRunId('2024-01-15T25:30:45.123Z')).toBe(false); // invalid hour
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for edge case valid dates', () => {
|
||||||
|
expect(isValidRunId('2024-02-29T23:59:59.999Z')).toBe(true); // leap year
|
||||||
|
expect(isValidRunId('2025-01-01T00:00:00.000Z')).toBe(true); // year boundary
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for missing milliseconds', () => {
|
||||||
|
expect(isValidRunId('2024-01-15T10:30:45Z')).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for non-UTC timezone', () => {
|
||||||
|
expect(isValidRunId('2024-01-15T10:30:45.123+01:00')).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('parseRunId', () => {
|
||||||
|
it('should parse valid run ID to Date object', () => {
|
||||||
|
const runId = '2024-01-15T10:30:45.123Z';
|
||||||
|
const date = parseRunId(runId);
|
||||||
|
|
||||||
|
expect(date).toBeInstanceOf(Date);
|
||||||
|
expect(date?.toISOString()).toBe(runId);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse generated run ID', () => {
|
||||||
|
const originalDate = new Date('2024-03-20T15:45:30.500Z');
|
||||||
|
const runId = generateRunId(originalDate);
|
||||||
|
const parsedDate = parseRunId(runId);
|
||||||
|
|
||||||
|
expect(parsedDate?.getTime()).toBe(originalDate.getTime());
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null for invalid run ID', () => {
|
||||||
|
expect(parseRunId('invalid')).toBe(null);
|
||||||
|
expect(parseRunId('')).toBe(null);
|
||||||
|
expect(parseRunId(null)).toBe(null);
|
||||||
|
expect(parseRunId(undefined)).toBe(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle edge case dates correctly', () => {
|
||||||
|
const leapYear = '2024-02-29T12:00:00.000Z';
|
||||||
|
const parsed = parseRunId(leapYear);
|
||||||
|
|
||||||
|
expect(parsed?.toISOString()).toBe(leapYear);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('compareRunIds', () => {
|
||||||
|
it('should return negative when first ID is earlier', () => {
|
||||||
|
const earlier = '2024-01-15T10:00:00.000Z';
|
||||||
|
const later = '2024-01-15T11:00:00.000Z';
|
||||||
|
|
||||||
|
expect(compareRunIds(earlier, later)).toBeLessThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return positive when first ID is later', () => {
|
||||||
|
const earlier = '2024-01-15T10:00:00.000Z';
|
||||||
|
const later = '2024-01-15T11:00:00.000Z';
|
||||||
|
|
||||||
|
expect(compareRunIds(later, earlier)).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return zero when IDs are equal', () => {
|
||||||
|
const runId = '2024-01-15T10:00:00.000Z';
|
||||||
|
|
||||||
|
expect(compareRunIds(runId, runId)).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle millisecond differences', () => {
|
||||||
|
const id1 = '2024-01-15T10:00:00.100Z';
|
||||||
|
const id2 = '2024-01-15T10:00:00.200Z';
|
||||||
|
|
||||||
|
expect(compareRunIds(id1, id2)).toBeLessThan(0);
|
||||||
|
expect(compareRunIds(id2, id1)).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle cross-day comparisons', () => {
|
||||||
|
const yesterday = '2024-01-14T23:59:59.999Z';
|
||||||
|
const today = '2024-01-15T00:00:00.000Z';
|
||||||
|
|
||||||
|
expect(compareRunIds(yesterday, today)).toBeLessThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle cross-year comparisons', () => {
|
||||||
|
const lastYear = '2023-12-31T23:59:59.999Z';
|
||||||
|
const thisYear = '2024-01-01T00:00:00.000Z';
|
||||||
|
|
||||||
|
expect(compareRunIds(lastYear, thisYear)).toBeLessThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error for invalid run IDs', () => {
|
||||||
|
const valid = '2024-01-15T10:00:00.000Z';
|
||||||
|
|
||||||
|
expect(() => compareRunIds('invalid', valid)).toThrow();
|
||||||
|
expect(() => compareRunIds(valid, 'invalid')).toThrow();
|
||||||
|
expect(() => compareRunIds('invalid', 'invalid')).toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Collision detection', () => {
|
||||||
|
it('should generate different IDs in rapid succession', () => {
|
||||||
|
const ids = new Set();
|
||||||
|
const count = 100;
|
||||||
|
|
||||||
|
for (let i = 0; i < count; i++) {
|
||||||
|
ids.add(generateRunId());
|
||||||
|
}
|
||||||
|
|
||||||
|
// All IDs should be unique
|
||||||
|
expect(ids.size).toBe(count);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle high-frequency generation', () => {
|
||||||
|
const ids = [];
|
||||||
|
const iterations = 1000;
|
||||||
|
|
||||||
|
for (let i = 0; i < iterations; i++) {
|
||||||
|
ids.push(generateRunId());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check uniqueness
|
||||||
|
const uniqueIds = new Set(ids);
|
||||||
|
expect(uniqueIds.size).toBe(iterations);
|
||||||
|
|
||||||
|
// Check chronological order
|
||||||
|
for (let i = 1; i < ids.length; i++) {
|
||||||
|
expect(compareRunIds(ids[i - 1], ids[i])).toBeLessThanOrEqual(0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Chronological ordering', () => {
|
||||||
|
it('should allow sorting run IDs chronologically', () => {
|
||||||
|
const ids = [
|
||||||
|
'2024-01-15T14:00:00.000Z',
|
||||||
|
'2024-01-15T10:00:00.000Z',
|
||||||
|
'2024-01-15T12:00:00.000Z',
|
||||||
|
'2024-01-14T23:00:00.000Z',
|
||||||
|
'2024-01-16T08:00:00.000Z'
|
||||||
|
];
|
||||||
|
|
||||||
|
const sorted = [...ids].sort(compareRunIds);
|
||||||
|
|
||||||
|
expect(sorted).toEqual([
|
||||||
|
'2024-01-14T23:00:00.000Z',
|
||||||
|
'2024-01-15T10:00:00.000Z',
|
||||||
|
'2024-01-15T12:00:00.000Z',
|
||||||
|
'2024-01-15T14:00:00.000Z',
|
||||||
|
'2024-01-16T08:00:00.000Z'
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle reverse chronological sorting', () => {
|
||||||
|
const ids = [
|
||||||
|
'2024-01-15T10:00:00.000Z',
|
||||||
|
'2024-01-15T14:00:00.000Z',
|
||||||
|
'2024-01-15T12:00:00.000Z'
|
||||||
|
];
|
||||||
|
|
||||||
|
const sorted = [...ids].sort((a, b) => compareRunIds(b, a));
|
||||||
|
|
||||||
|
expect(sorted).toEqual([
|
||||||
|
'2024-01-15T14:00:00.000Z',
|
||||||
|
'2024-01-15T12:00:00.000Z',
|
||||||
|
'2024-01-15T10:00:00.000Z'
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
129
packages/tm-core/src/utils/run-id-generator.ts
Normal file
129
packages/tm-core/src/utils/run-id-generator.ts
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
/**
|
||||||
|
* Run ID generation and validation utilities for the global storage system.
|
||||||
|
* Uses ISO 8601 timestamps with millisecond precision for unique, chronologically-ordered run IDs.
|
||||||
|
*
|
||||||
|
* @module run-id-generator
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Collision detection state
|
||||||
|
let lastTimestamp = 0;
|
||||||
|
let counter = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a unique run ID using ISO 8601 timestamp format with millisecond precision.
|
||||||
|
* The ID is guaranteed to be chronologically sortable and URL-safe.
|
||||||
|
* Includes collision detection to ensure uniqueness even when called in rapid succession.
|
||||||
|
*
|
||||||
|
* @param {Date} [date=new Date()] - Optional date to use for the run ID. Defaults to current time.
|
||||||
|
* @returns {string} ISO 8601 formatted timestamp (e.g., '2024-01-15T10:30:45.123Z')
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* generateRunId() // returns '2024-01-15T10:30:45.123Z'
|
||||||
|
* generateRunId(new Date('2024-01-15T10:00:00.000Z')) // returns '2024-01-15T10:00:00.000Z'
|
||||||
|
*/
|
||||||
|
export function generateRunId(date: Date = new Date()): string {
|
||||||
|
const timestamp = date.getTime();
|
||||||
|
|
||||||
|
// Collision detection: if same millisecond, wait for next millisecond
|
||||||
|
if (timestamp === lastTimestamp) {
|
||||||
|
counter++;
|
||||||
|
// Wait for next millisecond to ensure uniqueness
|
||||||
|
let newTimestamp = timestamp;
|
||||||
|
while (newTimestamp === timestamp) {
|
||||||
|
newTimestamp = Date.now();
|
||||||
|
}
|
||||||
|
date = new Date(newTimestamp);
|
||||||
|
lastTimestamp = newTimestamp;
|
||||||
|
counter = 0;
|
||||||
|
} else {
|
||||||
|
lastTimestamp = timestamp;
|
||||||
|
counter = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return date.toISOString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates whether a string is a valid run ID.
|
||||||
|
* A valid run ID must be:
|
||||||
|
* - In ISO 8601 format with milliseconds
|
||||||
|
* - In UTC timezone (ends with 'Z')
|
||||||
|
* - A valid date when parsed
|
||||||
|
*
|
||||||
|
* @param {any} runId - The value to validate
|
||||||
|
* @returns {boolean} True if the value is a valid run ID
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* isValidRunId('2024-01-15T10:30:45.123Z') // returns true
|
||||||
|
* isValidRunId('invalid') // returns false
|
||||||
|
* isValidRunId('2024-01-15T10:30:45Z') // returns false (missing milliseconds)
|
||||||
|
*/
|
||||||
|
export function isValidRunId(runId: any): boolean {
|
||||||
|
if (!runId || typeof runId !== 'string') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check format: YYYY-MM-DDTHH:mm:ss.sssZ
|
||||||
|
const isoFormatRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/;
|
||||||
|
if (!isoFormatRegex.test(runId)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate it's a real date
|
||||||
|
const date = new Date(runId);
|
||||||
|
if (isNaN(date.getTime())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the parsed date matches the input (catches invalid dates like 2024-13-01)
|
||||||
|
return date.toISOString() === runId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parses a run ID string into a Date object.
|
||||||
|
*
|
||||||
|
* @param {any} runId - The run ID to parse
|
||||||
|
* @returns {Date | null} Date object if valid, null if invalid
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* parseRunId('2024-01-15T10:30:45.123Z') // returns Date object
|
||||||
|
* parseRunId('invalid') // returns null
|
||||||
|
*/
|
||||||
|
export function parseRunId(runId: any): Date | null {
|
||||||
|
if (!isValidRunId(runId)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Date(runId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compares two run IDs chronologically.
|
||||||
|
* Returns a negative number if id1 is earlier, positive if id1 is later, or 0 if equal.
|
||||||
|
* Can be used as a comparator function for Array.sort().
|
||||||
|
*
|
||||||
|
* @param {string} id1 - First run ID to compare
|
||||||
|
* @param {string} id2 - Second run ID to compare
|
||||||
|
* @returns {number} Negative if id1 < id2, positive if id1 > id2, zero if equal
|
||||||
|
* @throws {Error} If either run ID is invalid
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* compareRunIds('2024-01-15T10:00:00.000Z', '2024-01-15T11:00:00.000Z') // returns negative number
|
||||||
|
* ['2024-01-15T14:00:00.000Z', '2024-01-15T10:00:00.000Z'].sort(compareRunIds)
|
||||||
|
* // returns ['2024-01-15T10:00:00.000Z', '2024-01-15T14:00:00.000Z']
|
||||||
|
*/
|
||||||
|
export function compareRunIds(id1: string, id2: string): number {
|
||||||
|
if (!isValidRunId(id1)) {
|
||||||
|
throw new Error(`Invalid run ID: ${id1}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isValidRunId(id2)) {
|
||||||
|
throw new Error(`Invalid run ID: ${id2}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// String comparison works for ISO 8601 timestamps
|
||||||
|
// because they are lexicographically sortable
|
||||||
|
if (id1 < id2) return -1;
|
||||||
|
if (id1 > id2) return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
150
packages/tm-core/src/workflow/types.ts
Normal file
150
packages/tm-core/src/workflow/types.ts
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
/**
|
||||||
|
* Workflow phase definitions
|
||||||
|
*/
|
||||||
|
export type WorkflowPhase =
|
||||||
|
| 'PREFLIGHT'
|
||||||
|
| 'BRANCH_SETUP'
|
||||||
|
| 'SUBTASK_LOOP'
|
||||||
|
| 'FINALIZE'
|
||||||
|
| 'COMPLETE';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TDD cycle phases within subtask loop
|
||||||
|
*/
|
||||||
|
export type TDDPhase = 'RED' | 'GREEN' | 'COMMIT';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Workflow state context
|
||||||
|
*/
|
||||||
|
export interface WorkflowContext {
|
||||||
|
taskId: string;
|
||||||
|
subtasks: SubtaskInfo[];
|
||||||
|
currentSubtaskIndex: number;
|
||||||
|
currentTDDPhase?: TDDPhase;
|
||||||
|
branchName?: string;
|
||||||
|
errors: WorkflowError[];
|
||||||
|
metadata: Record<string, unknown>;
|
||||||
|
lastTestResults?: TestResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test result from test execution
|
||||||
|
*/
|
||||||
|
export interface TestResult {
|
||||||
|
total: number;
|
||||||
|
passed: number;
|
||||||
|
failed: number;
|
||||||
|
skipped: number;
|
||||||
|
phase: 'RED' | 'GREEN';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Subtask information
|
||||||
|
*/
|
||||||
|
export interface SubtaskInfo {
|
||||||
|
id: string;
|
||||||
|
title: string;
|
||||||
|
status: 'pending' | 'in-progress' | 'completed' | 'failed';
|
||||||
|
attempts: number;
|
||||||
|
maxAttempts?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Workflow error information
|
||||||
|
*/
|
||||||
|
export interface WorkflowError {
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
message: string;
|
||||||
|
timestamp: Date;
|
||||||
|
recoverable: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* State machine state
|
||||||
|
*/
|
||||||
|
export interface WorkflowState {
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
context: WorkflowContext;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* State transition event types
|
||||||
|
*/
|
||||||
|
export type WorkflowEvent =
|
||||||
|
| { type: 'PREFLIGHT_COMPLETE' }
|
||||||
|
| { type: 'BRANCH_CREATED'; branchName: string }
|
||||||
|
| { type: 'SUBTASK_START'; subtaskId: string }
|
||||||
|
| { type: 'RED_PHASE_COMPLETE'; testResults?: TestResult }
|
||||||
|
| { type: 'GREEN_PHASE_COMPLETE'; testResults?: TestResult }
|
||||||
|
| { type: 'COMMIT_COMPLETE' }
|
||||||
|
| { type: 'SUBTASK_COMPLETE' }
|
||||||
|
| { type: 'ALL_SUBTASKS_COMPLETE' }
|
||||||
|
| { type: 'FINALIZE_COMPLETE' }
|
||||||
|
| { type: 'ERROR'; error: WorkflowError }
|
||||||
|
| { type: 'RETRY' }
|
||||||
|
| { type: 'ABORT' };
|
||||||
|
|
||||||
|
/**
|
||||||
|
* State transition definition
|
||||||
|
*/
|
||||||
|
export interface StateTransition {
|
||||||
|
from: WorkflowPhase;
|
||||||
|
to: WorkflowPhase;
|
||||||
|
event: WorkflowEvent['type'];
|
||||||
|
guard?: (context: WorkflowContext) => boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* State machine configuration
|
||||||
|
*/
|
||||||
|
export interface StateMachineConfig {
|
||||||
|
initialPhase: WorkflowPhase;
|
||||||
|
transitions: StateTransition[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Workflow event listener
|
||||||
|
*/
|
||||||
|
export type WorkflowEventListener = (event: WorkflowEventData) => void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Comprehensive event data for workflow events
|
||||||
|
*/
|
||||||
|
export interface WorkflowEventData {
|
||||||
|
type: WorkflowEventType;
|
||||||
|
timestamp: Date;
|
||||||
|
phase: WorkflowPhase;
|
||||||
|
tddPhase?: TDDPhase;
|
||||||
|
subtaskId?: string;
|
||||||
|
data?: Record<string, unknown>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All possible workflow event types
|
||||||
|
*/
|
||||||
|
export type WorkflowEventType =
|
||||||
|
| 'workflow:started'
|
||||||
|
| 'workflow:completed'
|
||||||
|
| 'workflow:error'
|
||||||
|
| 'workflow:resumed'
|
||||||
|
| 'phase:entered'
|
||||||
|
| 'phase:exited'
|
||||||
|
| 'tdd:feature-already-implemented'
|
||||||
|
| 'tdd:red:started'
|
||||||
|
| 'tdd:red:completed'
|
||||||
|
| 'tdd:green:started'
|
||||||
|
| 'tdd:green:completed'
|
||||||
|
| 'tdd:commit:started'
|
||||||
|
| 'tdd:commit:completed'
|
||||||
|
| 'subtask:started'
|
||||||
|
| 'subtask:completed'
|
||||||
|
| 'subtask:failed'
|
||||||
|
| 'test:run'
|
||||||
|
| 'test:passed'
|
||||||
|
| 'test:failed'
|
||||||
|
| 'git:branch:created'
|
||||||
|
| 'git:commit:created'
|
||||||
|
| 'error:occurred'
|
||||||
|
| 'state:persisted'
|
||||||
|
| 'progress:updated'
|
||||||
|
| 'adapter:configured';
|
||||||
152
packages/tm-core/src/workflow/workflow-activity-logger.ts
Normal file
152
packages/tm-core/src/workflow/workflow-activity-logger.ts
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview WorkflowActivityLogger - Logs all workflow events to activity.jsonl
|
||||||
|
*
|
||||||
|
* Subscribes to all WorkflowOrchestrator events and persists them to a JSONL file
|
||||||
|
* for debugging, auditing, and workflow analysis.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { WorkflowOrchestrator } from './workflow-orchestrator.js';
|
||||||
|
import type { WorkflowEventData, WorkflowEventType } from './types.js';
|
||||||
|
import { logActivity, type ActivityEvent } from '../storage/activity-logger.js';
|
||||||
|
import { getLogger } from '../logger/index.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All workflow event types that should be logged
|
||||||
|
*/
|
||||||
|
const WORKFLOW_EVENT_TYPES: WorkflowEventType[] = [
|
||||||
|
'workflow:started',
|
||||||
|
'workflow:completed',
|
||||||
|
'workflow:error',
|
||||||
|
'workflow:resumed',
|
||||||
|
'phase:entered',
|
||||||
|
'phase:exited',
|
||||||
|
'tdd:feature-already-implemented',
|
||||||
|
'tdd:red:started',
|
||||||
|
'tdd:red:completed',
|
||||||
|
'tdd:green:started',
|
||||||
|
'tdd:green:completed',
|
||||||
|
'tdd:commit:started',
|
||||||
|
'tdd:commit:completed',
|
||||||
|
'subtask:started',
|
||||||
|
'subtask:completed',
|
||||||
|
'subtask:failed',
|
||||||
|
'test:run',
|
||||||
|
'test:passed',
|
||||||
|
'test:failed',
|
||||||
|
'git:branch:created',
|
||||||
|
'git:commit:created',
|
||||||
|
'error:occurred',
|
||||||
|
'state:persisted',
|
||||||
|
'progress:updated',
|
||||||
|
'adapter:configured'
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logs all workflow events to an activity.jsonl file
|
||||||
|
*/
|
||||||
|
export class WorkflowActivityLogger {
|
||||||
|
private readonly activityLogPath: string;
|
||||||
|
private readonly orchestrator: WorkflowOrchestrator;
|
||||||
|
private readonly logger = getLogger('WorkflowActivityLogger');
|
||||||
|
private readonly listenerMap: Map<
|
||||||
|
WorkflowEventType,
|
||||||
|
(event: WorkflowEventData) => void
|
||||||
|
> = new Map();
|
||||||
|
private isActive = false;
|
||||||
|
|
||||||
|
constructor(orchestrator: WorkflowOrchestrator, activityLogPath: string) {
|
||||||
|
this.orchestrator = orchestrator;
|
||||||
|
this.activityLogPath = activityLogPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Start logging workflow events
|
||||||
|
*/
|
||||||
|
start(): void {
|
||||||
|
if (this.isActive) {
|
||||||
|
this.logger.warn('Activity logger is already active');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to all workflow events, storing listener references for cleanup
|
||||||
|
WORKFLOW_EVENT_TYPES.forEach((eventType) => {
|
||||||
|
const listener = (event: WorkflowEventData) => this.logEvent(event);
|
||||||
|
this.listenerMap.set(eventType, listener);
|
||||||
|
this.orchestrator.on(eventType, listener);
|
||||||
|
});
|
||||||
|
|
||||||
|
this.isActive = true;
|
||||||
|
this.logger.debug(
|
||||||
|
`Activity logger started, logging to: ${this.activityLogPath}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop logging workflow events and remove all listeners
|
||||||
|
*/
|
||||||
|
stop(): void {
|
||||||
|
if (!this.isActive) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all registered listeners
|
||||||
|
this.listenerMap.forEach((listener, eventType) => {
|
||||||
|
this.orchestrator.off(eventType, listener);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Clear the listener map
|
||||||
|
this.listenerMap.clear();
|
||||||
|
|
||||||
|
this.isActive = false;
|
||||||
|
this.logger.debug('Activity logger stopped and listeners removed');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log a workflow event to the activity log
|
||||||
|
*/
|
||||||
|
private async logEvent(event: WorkflowEventData): Promise<void> {
|
||||||
|
if (!this.isActive) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Convert timestamp to ISO string, handling both Date objects and string/number timestamps
|
||||||
|
const ts =
|
||||||
|
(event.timestamp as any) instanceof Date
|
||||||
|
? (event.timestamp as Date).toISOString()
|
||||||
|
: new Date(event.timestamp as any).toISOString();
|
||||||
|
|
||||||
|
// Convert WorkflowEventData to ActivityEvent format
|
||||||
|
const activityEvent: Omit<ActivityEvent, 'timestamp'> = {
|
||||||
|
type: event.type,
|
||||||
|
phase: event.phase,
|
||||||
|
tddPhase: event.tddPhase,
|
||||||
|
subtaskId: event.subtaskId,
|
||||||
|
// Event timestamp kept as ISO for readability; storage layer adds its own "timestamp"
|
||||||
|
eventTimestamp: ts,
|
||||||
|
...(event.data || {})
|
||||||
|
};
|
||||||
|
|
||||||
|
await logActivity(this.activityLogPath, activityEvent);
|
||||||
|
} catch (error: any) {
|
||||||
|
// Log errors but don't throw - we don't want activity logging to break the workflow
|
||||||
|
this.logger.error(
|
||||||
|
`Failed to log activity event ${event.type}: ${error.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the path to the activity log file
|
||||||
|
*/
|
||||||
|
getActivityLogPath(): string {
|
||||||
|
return this.activityLogPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the logger is currently active
|
||||||
|
*/
|
||||||
|
isLogging(): boolean {
|
||||||
|
return this.isActive;
|
||||||
|
}
|
||||||
|
}
|
||||||
1535
packages/tm-core/src/workflow/workflow-orchestrator.test.ts
Normal file
1535
packages/tm-core/src/workflow/workflow-orchestrator.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
698
packages/tm-core/src/workflow/workflow-orchestrator.ts
Normal file
698
packages/tm-core/src/workflow/workflow-orchestrator.ts
Normal file
@@ -0,0 +1,698 @@
|
|||||||
|
import type {
|
||||||
|
WorkflowPhase,
|
||||||
|
TDDPhase,
|
||||||
|
WorkflowContext,
|
||||||
|
WorkflowEvent,
|
||||||
|
WorkflowState,
|
||||||
|
StateTransition,
|
||||||
|
WorkflowEventType,
|
||||||
|
WorkflowEventData,
|
||||||
|
WorkflowEventListener,
|
||||||
|
SubtaskInfo
|
||||||
|
} from './types.js';
|
||||||
|
import type { TestResultValidator } from '../services/test-result-validator.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lightweight state machine for TDD workflow orchestration
|
||||||
|
*/
|
||||||
|
export class WorkflowOrchestrator {
|
||||||
|
private currentPhase: WorkflowPhase;
|
||||||
|
private context: WorkflowContext;
|
||||||
|
private readonly transitions: StateTransition[];
|
||||||
|
private readonly eventListeners: Map<
|
||||||
|
WorkflowEventType,
|
||||||
|
Set<WorkflowEventListener>
|
||||||
|
>;
|
||||||
|
private persistCallback?: (state: WorkflowState) => void | Promise<void>;
|
||||||
|
private autoPersistEnabled: boolean = false;
|
||||||
|
private readonly phaseGuards: Map<
|
||||||
|
WorkflowPhase,
|
||||||
|
(context: WorkflowContext) => boolean
|
||||||
|
>;
|
||||||
|
private aborted: boolean = false;
|
||||||
|
private testResultValidator?: TestResultValidator;
|
||||||
|
private gitOperationHook?: (operation: string, data?: unknown) => void;
|
||||||
|
private executeHook?: (command: string, context: WorkflowContext) => void;
|
||||||
|
|
||||||
|
constructor(initialContext: WorkflowContext) {
|
||||||
|
this.currentPhase = 'PREFLIGHT';
|
||||||
|
this.context = { ...initialContext };
|
||||||
|
this.transitions = this.defineTransitions();
|
||||||
|
this.eventListeners = new Map();
|
||||||
|
this.phaseGuards = new Map();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Define valid state transitions
|
||||||
|
*/
|
||||||
|
private defineTransitions(): StateTransition[] {
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
from: 'PREFLIGHT',
|
||||||
|
to: 'BRANCH_SETUP',
|
||||||
|
event: 'PREFLIGHT_COMPLETE'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
from: 'BRANCH_SETUP',
|
||||||
|
to: 'SUBTASK_LOOP',
|
||||||
|
event: 'BRANCH_CREATED'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
from: 'SUBTASK_LOOP',
|
||||||
|
to: 'FINALIZE',
|
||||||
|
event: 'ALL_SUBTASKS_COMPLETE'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
from: 'FINALIZE',
|
||||||
|
to: 'COMPLETE',
|
||||||
|
event: 'FINALIZE_COMPLETE'
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current workflow phase
|
||||||
|
*/
|
||||||
|
getCurrentPhase(): WorkflowPhase {
|
||||||
|
return this.currentPhase;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current TDD phase (only valid in SUBTASK_LOOP)
|
||||||
|
*/
|
||||||
|
getCurrentTDDPhase(): TDDPhase | undefined {
|
||||||
|
if (this.currentPhase === 'SUBTASK_LOOP') {
|
||||||
|
return this.context.currentTDDPhase || 'RED';
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get workflow context
|
||||||
|
*/
|
||||||
|
getContext(): WorkflowContext {
|
||||||
|
return { ...this.context };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transition to next state based on event
|
||||||
|
*/
|
||||||
|
transition(event: WorkflowEvent): void {
|
||||||
|
// Check if workflow is aborted
|
||||||
|
if (this.aborted && event.type !== 'ABORT') {
|
||||||
|
throw new Error('Workflow has been aborted');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle special events that work across all phases
|
||||||
|
if (event.type === 'ERROR') {
|
||||||
|
this.handleError(event.error);
|
||||||
|
void this.triggerAutoPersist();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (event.type === 'ABORT') {
|
||||||
|
this.aborted = true;
|
||||||
|
void this.triggerAutoPersist();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (event.type === 'RETRY') {
|
||||||
|
this.handleRetry();
|
||||||
|
void this.triggerAutoPersist();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle TDD phase transitions within SUBTASK_LOOP
|
||||||
|
if (this.currentPhase === 'SUBTASK_LOOP') {
|
||||||
|
this.handleTDDPhaseTransition(event);
|
||||||
|
void this.triggerAutoPersist();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle main workflow phase transitions
|
||||||
|
const validTransition = this.transitions.find(
|
||||||
|
(t) => t.from === this.currentPhase && t.event === event.type
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!validTransition) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid transition: ${event.type} from ${this.currentPhase}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute transition
|
||||||
|
this.executeTransition(validTransition, event);
|
||||||
|
void this.triggerAutoPersist();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle TDD phase transitions (RED -> GREEN -> COMMIT)
|
||||||
|
*/
|
||||||
|
private handleTDDPhaseTransition(event: WorkflowEvent): void {
|
||||||
|
const currentTDD = this.context.currentTDDPhase || 'RED';
|
||||||
|
|
||||||
|
switch (event.type) {
|
||||||
|
case 'RED_PHASE_COMPLETE':
|
||||||
|
if (currentTDD !== 'RED') {
|
||||||
|
throw new Error(
|
||||||
|
'Invalid transition: RED_PHASE_COMPLETE from non-RED phase'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate test results are provided
|
||||||
|
if (!event.testResults) {
|
||||||
|
throw new Error('Test results required for RED phase transition');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store test results in context
|
||||||
|
this.context.lastTestResults = event.testResults;
|
||||||
|
|
||||||
|
// Special case: All tests passing in RED phase means feature already implemented
|
||||||
|
if (event.testResults.failed === 0) {
|
||||||
|
this.emit('tdd:red:completed');
|
||||||
|
this.emit('tdd:feature-already-implemented', {
|
||||||
|
subtaskId: this.getCurrentSubtaskId(),
|
||||||
|
testResults: event.testResults
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mark subtask as complete and move to next one
|
||||||
|
const subtask =
|
||||||
|
this.context.subtasks[this.context.currentSubtaskIndex];
|
||||||
|
if (subtask) {
|
||||||
|
subtask.status = 'completed';
|
||||||
|
}
|
||||||
|
|
||||||
|
this.emit('subtask:completed');
|
||||||
|
this.context.currentSubtaskIndex++;
|
||||||
|
|
||||||
|
// Emit progress update
|
||||||
|
const progress = this.getProgress();
|
||||||
|
this.emit('progress:updated', {
|
||||||
|
completed: progress.completed,
|
||||||
|
total: progress.total,
|
||||||
|
percentage: progress.percentage
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start next subtask or complete workflow
|
||||||
|
if (this.context.currentSubtaskIndex < this.context.subtasks.length) {
|
||||||
|
this.context.currentTDDPhase = 'RED';
|
||||||
|
this.emit('tdd:red:started');
|
||||||
|
this.emit('subtask:started');
|
||||||
|
} else {
|
||||||
|
// All subtasks complete, transition to FINALIZE
|
||||||
|
this.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normal RED phase: has failing tests, proceed to GREEN
|
||||||
|
this.emit('tdd:red:completed');
|
||||||
|
this.context.currentTDDPhase = 'GREEN';
|
||||||
|
this.emit('tdd:green:started');
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'GREEN_PHASE_COMPLETE':
|
||||||
|
if (currentTDD !== 'GREEN') {
|
||||||
|
throw new Error(
|
||||||
|
'Invalid transition: GREEN_PHASE_COMPLETE from non-GREEN phase'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate test results are provided
|
||||||
|
if (!event.testResults) {
|
||||||
|
throw new Error('Test results required for GREEN phase transition');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate GREEN phase has no failures
|
||||||
|
if (event.testResults.failed !== 0) {
|
||||||
|
throw new Error('GREEN phase must have zero failures');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store test results in context
|
||||||
|
this.context.lastTestResults = event.testResults;
|
||||||
|
|
||||||
|
this.emit('tdd:green:completed');
|
||||||
|
this.context.currentTDDPhase = 'COMMIT';
|
||||||
|
this.emit('tdd:commit:started');
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'COMMIT_COMPLETE':
|
||||||
|
if (currentTDD !== 'COMMIT') {
|
||||||
|
throw new Error(
|
||||||
|
'Invalid transition: COMMIT_COMPLETE from non-COMMIT phase'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
this.emit('tdd:commit:completed');
|
||||||
|
// Mark current subtask as completed
|
||||||
|
const currentSubtask =
|
||||||
|
this.context.subtasks[this.context.currentSubtaskIndex];
|
||||||
|
if (currentSubtask) {
|
||||||
|
currentSubtask.status = 'completed';
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'SUBTASK_COMPLETE':
|
||||||
|
this.emit('subtask:completed');
|
||||||
|
// Move to next subtask
|
||||||
|
this.context.currentSubtaskIndex++;
|
||||||
|
|
||||||
|
// Emit progress update
|
||||||
|
const progress = this.getProgress();
|
||||||
|
this.emit('progress:updated', {
|
||||||
|
completed: progress.completed,
|
||||||
|
total: progress.total,
|
||||||
|
percentage: progress.percentage
|
||||||
|
});
|
||||||
|
|
||||||
|
if (this.context.currentSubtaskIndex < this.context.subtasks.length) {
|
||||||
|
// Start next subtask with RED phase
|
||||||
|
this.context.currentTDDPhase = 'RED';
|
||||||
|
this.emit('tdd:red:started');
|
||||||
|
this.emit('subtask:started');
|
||||||
|
} else {
|
||||||
|
// All subtasks complete, transition to FINALIZE
|
||||||
|
this.transition({ type: 'ALL_SUBTASKS_COMPLETE' });
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'ALL_SUBTASKS_COMPLETE':
|
||||||
|
// Transition to FINALIZE phase
|
||||||
|
this.emit('phase:exited');
|
||||||
|
this.currentPhase = 'FINALIZE';
|
||||||
|
this.context.currentTDDPhase = undefined;
|
||||||
|
this.emit('phase:entered');
|
||||||
|
// Note: Don't auto-transition to COMPLETE - requires explicit finalize call
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw new Error(`Invalid transition: ${event.type} in SUBTASK_LOOP`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a state transition
|
||||||
|
*/
|
||||||
|
private executeTransition(
|
||||||
|
transition: StateTransition,
|
||||||
|
event: WorkflowEvent
|
||||||
|
): void {
|
||||||
|
// Check guard condition if present
|
||||||
|
if (transition.guard && !transition.guard(this.context)) {
|
||||||
|
throw new Error(
|
||||||
|
`Guard condition failed for transition to ${transition.to}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check phase-specific guard if present
|
||||||
|
const phaseGuard = this.phaseGuards.get(transition.to);
|
||||||
|
if (phaseGuard && !phaseGuard(this.context)) {
|
||||||
|
throw new Error('Guard condition failed');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit phase exit event
|
||||||
|
this.emit('phase:exited');
|
||||||
|
|
||||||
|
// Update context based on event
|
||||||
|
this.updateContext(event);
|
||||||
|
|
||||||
|
// Transition to new phase
|
||||||
|
this.currentPhase = transition.to;
|
||||||
|
|
||||||
|
// Emit phase entry event
|
||||||
|
this.emit('phase:entered');
|
||||||
|
|
||||||
|
// Initialize TDD phase if entering SUBTASK_LOOP
|
||||||
|
if (this.currentPhase === 'SUBTASK_LOOP') {
|
||||||
|
this.context.currentTDDPhase = 'RED';
|
||||||
|
this.emit('tdd:red:started');
|
||||||
|
this.emit('subtask:started');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update context based on event
|
||||||
|
*/
|
||||||
|
private updateContext(event: WorkflowEvent): void {
|
||||||
|
switch (event.type) {
|
||||||
|
case 'BRANCH_CREATED':
|
||||||
|
this.context.branchName = event.branchName;
|
||||||
|
this.emit('git:branch:created', { branchName: event.branchName });
|
||||||
|
|
||||||
|
// Trigger git operation hook
|
||||||
|
if (this.gitOperationHook) {
|
||||||
|
this.gitOperationHook('branch:created', {
|
||||||
|
branchName: event.branchName
|
||||||
|
});
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'ERROR':
|
||||||
|
this.context.errors.push(event.error);
|
||||||
|
this.emit('error:occurred', { error: event.error });
|
||||||
|
break;
|
||||||
|
|
||||||
|
// Add more context updates as needed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current state for serialization
|
||||||
|
*/
|
||||||
|
getState(): WorkflowState {
|
||||||
|
return {
|
||||||
|
phase: this.currentPhase,
|
||||||
|
context: { ...this.context }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restore state from checkpoint
|
||||||
|
*/
|
||||||
|
restoreState(state: WorkflowState): void {
|
||||||
|
this.currentPhase = state.phase;
|
||||||
|
this.context = { ...state.context };
|
||||||
|
|
||||||
|
// Emit workflow:resumed event
|
||||||
|
this.emit('workflow:resumed', {
|
||||||
|
phase: this.currentPhase,
|
||||||
|
progress: this.getProgress()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add event listener
|
||||||
|
*/
|
||||||
|
on(eventType: WorkflowEventType, listener: WorkflowEventListener): void {
|
||||||
|
if (!this.eventListeners.has(eventType)) {
|
||||||
|
this.eventListeners.set(eventType, new Set());
|
||||||
|
}
|
||||||
|
this.eventListeners.get(eventType)!.add(listener);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove event listener
|
||||||
|
*/
|
||||||
|
off(eventType: WorkflowEventType, listener: WorkflowEventListener): void {
|
||||||
|
const listeners = this.eventListeners.get(eventType);
|
||||||
|
if (listeners) {
|
||||||
|
listeners.delete(listener);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Emit workflow event
|
||||||
|
*/
|
||||||
|
private emit(
|
||||||
|
eventType: WorkflowEventType,
|
||||||
|
data?: Record<string, unknown>
|
||||||
|
): void {
|
||||||
|
const eventData: WorkflowEventData = {
|
||||||
|
type: eventType,
|
||||||
|
timestamp: new Date(),
|
||||||
|
phase: this.currentPhase,
|
||||||
|
tddPhase: this.context.currentTDDPhase,
|
||||||
|
subtaskId: this.getCurrentSubtaskId(),
|
||||||
|
data: {
|
||||||
|
...data,
|
||||||
|
adapters: {
|
||||||
|
testValidator: !!this.testResultValidator,
|
||||||
|
gitHook: !!this.gitOperationHook,
|
||||||
|
executeHook: !!this.executeHook
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const listeners = this.eventListeners.get(eventType);
|
||||||
|
if (listeners) {
|
||||||
|
listeners.forEach((listener) => listener(eventData));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current subtask ID
|
||||||
|
*/
|
||||||
|
private getCurrentSubtaskId(): string | undefined {
|
||||||
|
const currentSubtask =
|
||||||
|
this.context.subtasks[this.context.currentSubtaskIndex];
|
||||||
|
return currentSubtask?.id;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register callback for state persistence
|
||||||
|
*/
|
||||||
|
onStatePersist(
|
||||||
|
callback: (state: WorkflowState) => void | Promise<void>
|
||||||
|
): void {
|
||||||
|
this.persistCallback = callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enable auto-persistence after each transition
|
||||||
|
*/
|
||||||
|
enableAutoPersist(
|
||||||
|
callback: (state: WorkflowState) => void | Promise<void>
|
||||||
|
): void {
|
||||||
|
this.persistCallback = callback;
|
||||||
|
this.autoPersistEnabled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disable auto-persistence
|
||||||
|
*/
|
||||||
|
disableAutoPersist(): void {
|
||||||
|
this.autoPersistEnabled = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manually persist current state
|
||||||
|
*/
|
||||||
|
async persistState(): Promise<void> {
|
||||||
|
if (this.persistCallback) {
|
||||||
|
await this.persistCallback(this.getState());
|
||||||
|
}
|
||||||
|
this.emit('state:persisted');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger auto-persistence if enabled
|
||||||
|
*/
|
||||||
|
private async triggerAutoPersist(): Promise<void> {
|
||||||
|
if (this.autoPersistEnabled && this.persistCallback) {
|
||||||
|
await this.persistCallback(this.getState());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a guard condition for a specific phase
|
||||||
|
*/
|
||||||
|
addGuard(
|
||||||
|
phase: WorkflowPhase,
|
||||||
|
guard: (context: WorkflowContext) => boolean
|
||||||
|
): void {
|
||||||
|
this.phaseGuards.set(phase, guard);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove a guard condition for a specific phase
|
||||||
|
*/
|
||||||
|
removeGuard(phase: WorkflowPhase): void {
|
||||||
|
this.phaseGuards.delete(phase);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current subtask being worked on
|
||||||
|
*/
|
||||||
|
getCurrentSubtask(): SubtaskInfo | undefined {
|
||||||
|
return this.context.subtasks[this.context.currentSubtaskIndex];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get workflow progress information
|
||||||
|
*/
|
||||||
|
getProgress(): {
|
||||||
|
completed: number;
|
||||||
|
total: number;
|
||||||
|
current: number;
|
||||||
|
percentage: number;
|
||||||
|
} {
|
||||||
|
const completed = this.context.subtasks.filter(
|
||||||
|
(st) => st.status === 'completed'
|
||||||
|
).length;
|
||||||
|
const total = this.context.subtasks.length;
|
||||||
|
const current = this.context.currentSubtaskIndex + 1;
|
||||||
|
const percentage = total > 0 ? Math.round((completed / total) * 100) : 0;
|
||||||
|
|
||||||
|
return { completed, total, current, percentage };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if can proceed to next subtask or phase
|
||||||
|
*/
|
||||||
|
canProceed(): boolean {
|
||||||
|
if (this.currentPhase !== 'SUBTASK_LOOP') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentSubtask = this.getCurrentSubtask();
|
||||||
|
|
||||||
|
// Can proceed if current subtask is completed (after COMMIT phase)
|
||||||
|
return currentSubtask?.status === 'completed';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Increment attempts for current subtask
|
||||||
|
*/
|
||||||
|
incrementAttempts(): void {
|
||||||
|
const currentSubtask = this.getCurrentSubtask();
|
||||||
|
if (currentSubtask) {
|
||||||
|
currentSubtask.attempts++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if current subtask has exceeded max attempts
|
||||||
|
*/
|
||||||
|
hasExceededMaxAttempts(): boolean {
|
||||||
|
const currentSubtask = this.getCurrentSubtask();
|
||||||
|
if (!currentSubtask || !currentSubtask.maxAttempts) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return currentSubtask.attempts > currentSubtask.maxAttempts;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle error event
|
||||||
|
*/
|
||||||
|
private handleError(error: import('./types.js').WorkflowError): void {
|
||||||
|
this.context.errors.push(error);
|
||||||
|
this.emit('error:occurred', { error });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle retry event
|
||||||
|
*/
|
||||||
|
private handleRetry(): void {
|
||||||
|
if (this.currentPhase === 'SUBTASK_LOOP') {
|
||||||
|
// Reset to RED phase to retry current subtask
|
||||||
|
this.context.currentTDDPhase = 'RED';
|
||||||
|
this.emit('tdd:red:started');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retry current subtask (resets to RED phase)
|
||||||
|
*/
|
||||||
|
retryCurrentSubtask(): void {
|
||||||
|
if (this.currentPhase === 'SUBTASK_LOOP') {
|
||||||
|
this.context.currentTDDPhase = 'RED';
|
||||||
|
this.emit('tdd:red:started');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle max attempts exceeded for current subtask
|
||||||
|
*/
|
||||||
|
handleMaxAttemptsExceeded(): void {
|
||||||
|
const currentSubtask = this.getCurrentSubtask();
|
||||||
|
if (currentSubtask) {
|
||||||
|
currentSubtask.status = 'failed';
|
||||||
|
this.emit('subtask:failed', {
|
||||||
|
subtaskId: currentSubtask.id,
|
||||||
|
attempts: currentSubtask.attempts,
|
||||||
|
maxAttempts: currentSubtask.maxAttempts
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if workflow has been aborted
|
||||||
|
*/
|
||||||
|
isAborted(): boolean {
|
||||||
|
return this.aborted;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate if a state can be resumed from
|
||||||
|
*/
|
||||||
|
canResumeFromState(state: WorkflowState): boolean {
|
||||||
|
// Validate phase is valid
|
||||||
|
const validPhases: WorkflowPhase[] = [
|
||||||
|
'PREFLIGHT',
|
||||||
|
'BRANCH_SETUP',
|
||||||
|
'SUBTASK_LOOP',
|
||||||
|
'FINALIZE',
|
||||||
|
'COMPLETE'
|
||||||
|
];
|
||||||
|
|
||||||
|
if (!validPhases.includes(state.phase)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate context structure
|
||||||
|
if (!state.context || typeof state.context !== 'object') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate required context fields
|
||||||
|
if (!state.context.taskId || !Array.isArray(state.context.subtasks)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof state.context.currentSubtaskIndex !== 'number') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!Array.isArray(state.context.errors)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// All validations passed
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set TestResultValidator adapter
|
||||||
|
*/
|
||||||
|
setTestResultValidator(validator: TestResultValidator): void {
|
||||||
|
this.testResultValidator = validator;
|
||||||
|
this.emit('adapter:configured', { adapterType: 'test-validator' });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if TestResultValidator is configured
|
||||||
|
*/
|
||||||
|
hasTestResultValidator(): boolean {
|
||||||
|
return !!this.testResultValidator;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove TestResultValidator adapter
|
||||||
|
*/
|
||||||
|
removeTestResultValidator(): void {
|
||||||
|
this.testResultValidator = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register git operation hook
|
||||||
|
*/
|
||||||
|
onGitOperation(hook: (operation: string, data?: unknown) => void): void {
|
||||||
|
this.gitOperationHook = hook;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register execute command hook
|
||||||
|
*/
|
||||||
|
onExecute(hook: (command: string, context: WorkflowContext) => void): void {
|
||||||
|
this.executeHook = hook;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a command (triggers execute hook)
|
||||||
|
*/
|
||||||
|
executeCommand(command: string): void {
|
||||||
|
if (this.executeHook) {
|
||||||
|
this.executeHook(command, this.context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
146
packages/tm-core/src/workflow/workflow-state-manager.spec.ts
Normal file
146
packages/tm-core/src/workflow/workflow-state-manager.spec.ts
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Tests for WorkflowStateManager path sanitization
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { WorkflowStateManager } from './workflow-state-manager.js';
|
||||||
|
import os from 'node:os';
|
||||||
|
import path from 'node:path';
|
||||||
|
|
||||||
|
describe('WorkflowStateManager', () => {
|
||||||
|
describe('getProjectIdentifier', () => {
|
||||||
|
it('should sanitize paths like Claude Code', () => {
|
||||||
|
const projectRoot =
|
||||||
|
'/Volumes/Workspace/workspace/contrib/task-master/demos/nextjs-todo-tdd';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
const homeDir = os.homedir();
|
||||||
|
|
||||||
|
// Expected structure: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const expectedPath = path.join(
|
||||||
|
homeDir,
|
||||||
|
'.taskmaster',
|
||||||
|
'-Volumes-Workspace-workspace-contrib-task-master-demos-nextjs-todo-tdd',
|
||||||
|
'sessions'
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(sessionDir).toBe(expectedPath);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve case in paths', () => {
|
||||||
|
const projectRoot = '/Users/Alice/Projects/MyApp';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
|
||||||
|
|
||||||
|
// Case should be preserved
|
||||||
|
expect(projectId).toContain('Users');
|
||||||
|
expect(projectId).toContain('Alice');
|
||||||
|
expect(projectId).toContain('Projects');
|
||||||
|
expect(projectId).toContain('MyApp');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle paths with special characters', () => {
|
||||||
|
const projectRoot = '/tmp/my-project_v2.0/test';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
|
||||||
|
|
||||||
|
// Special chars should be replaced with dashes
|
||||||
|
expect(projectId).toBe('-tmp-my-project-v2-0-test');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create unique identifiers for different paths', () => {
|
||||||
|
const project1 = '/Users/alice/task-master';
|
||||||
|
const project2 = '/Users/bob/task-master';
|
||||||
|
|
||||||
|
const manager1 = new WorkflowStateManager(project1);
|
||||||
|
const manager2 = new WorkflowStateManager(project2);
|
||||||
|
|
||||||
|
// Extract project IDs from: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const id1 = manager1.getSessionDir().split(path.sep).slice(-2, -1)[0];
|
||||||
|
const id2 = manager2.getSessionDir().split(path.sep).slice(-2, -1)[0];
|
||||||
|
|
||||||
|
// Same basename but different full paths should be unique
|
||||||
|
expect(id1).not.toBe(id2);
|
||||||
|
expect(id1).toContain('alice');
|
||||||
|
expect(id2).toContain('bob');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should collapse multiple dashes', () => {
|
||||||
|
const projectRoot = '/path//with///multiple////slashes';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
|
||||||
|
|
||||||
|
// Multiple dashes should be collapsed to single dash
|
||||||
|
expect(projectId).not.toContain('--');
|
||||||
|
expect(projectId).toBe('-path-with-multiple-slashes');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not have trailing dashes', () => {
|
||||||
|
const projectRoot = '/path/to/project';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
|
||||||
|
|
||||||
|
// Should not end with dash
|
||||||
|
expect(projectId).not.toMatch(/-$/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should start with a dash like Claude Code', () => {
|
||||||
|
const projectRoot = '/any/path';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
// Extract project ID from: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const projectId = sessionDir.split(path.sep).slice(-2, -1)[0];
|
||||||
|
|
||||||
|
// Should start with dash like Claude Code's pattern
|
||||||
|
expect(projectId).toMatch(/^-/);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('session paths', () => {
|
||||||
|
it('should place sessions in global ~/.taskmaster/{project-id}/sessions/', () => {
|
||||||
|
const projectRoot = '/some/project';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
const homeDir = os.homedir();
|
||||||
|
|
||||||
|
// Should be: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
expect(sessionDir).toContain(path.join(homeDir, '.taskmaster'));
|
||||||
|
expect(sessionDir).toMatch(/\.taskmaster\/.*\/sessions$/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include workflow-state.json in session dir', () => {
|
||||||
|
const projectRoot = '/some/project';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const statePath = manager.getStatePath();
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
|
||||||
|
expect(statePath).toBe(path.join(sessionDir, 'workflow-state.json'));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include backups dir in session dir', () => {
|
||||||
|
const projectRoot = '/some/project';
|
||||||
|
const manager = new WorkflowStateManager(projectRoot);
|
||||||
|
|
||||||
|
const backupDir = manager.getBackupDir();
|
||||||
|
const sessionDir = manager.getSessionDir();
|
||||||
|
|
||||||
|
expect(backupDir).toBe(path.join(sessionDir, 'backups'));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
263
packages/tm-core/src/workflow/workflow-state-manager.ts
Normal file
263
packages/tm-core/src/workflow/workflow-state-manager.ts
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview WorkflowStateManager - Manages persistence of TDD workflow state
|
||||||
|
*
|
||||||
|
* Stores workflow state in global user directory (~/.taskmaster/{project-id}/sessions/)
|
||||||
|
* to avoid git conflicts and support multiple worktrees.
|
||||||
|
* Each project gets its own directory for organizing workflow-related data.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { promises as fs } from 'node:fs';
|
||||||
|
import path from 'node:path';
|
||||||
|
import os from 'node:os';
|
||||||
|
import type { WorkflowState } from './types.js';
|
||||||
|
import { getLogger } from '../logger/index.js';
|
||||||
|
|
||||||
|
export interface WorkflowStateBackup {
|
||||||
|
timestamp: string;
|
||||||
|
state: WorkflowState;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages workflow state persistence with backup support
|
||||||
|
* Stores state in global user directory to avoid git noise
|
||||||
|
*/
|
||||||
|
export class WorkflowStateManager {
|
||||||
|
private readonly projectRoot: string;
|
||||||
|
private readonly statePath: string;
|
||||||
|
private readonly backupDir: string;
|
||||||
|
private readonly sessionDir: string;
|
||||||
|
private maxBackups: number;
|
||||||
|
private readonly logger = getLogger('WorkflowStateManager');
|
||||||
|
|
||||||
|
constructor(projectRoot: string, maxBackups = 5) {
|
||||||
|
this.projectRoot = path.resolve(projectRoot);
|
||||||
|
this.maxBackups = maxBackups;
|
||||||
|
|
||||||
|
// Create project-specific directory in global .taskmaster
|
||||||
|
// Structure: ~/.taskmaster/{project-id}/sessions/
|
||||||
|
const projectId = this.getProjectIdentifier(this.projectRoot);
|
||||||
|
const homeDir = os.homedir();
|
||||||
|
const projectDir = path.join(homeDir, '.taskmaster', projectId);
|
||||||
|
this.sessionDir = path.join(projectDir, 'sessions');
|
||||||
|
|
||||||
|
this.statePath = path.join(this.sessionDir, 'workflow-state.json');
|
||||||
|
this.backupDir = path.join(this.sessionDir, 'backups');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a unique identifier for the project using full sanitized path
|
||||||
|
* Uses Claude Code's pattern: leading dash + full path with case preserved
|
||||||
|
* Example: /Volumes/Workspace/... -> -Volumes-Workspace-...
|
||||||
|
*/
|
||||||
|
private getProjectIdentifier(projectRoot: string): string {
|
||||||
|
// Resolve to absolute path
|
||||||
|
const absolutePath = path.resolve(projectRoot);
|
||||||
|
|
||||||
|
// Sanitize path like Claude Code does:
|
||||||
|
// - Add leading dash
|
||||||
|
// - Replace path separators and non-alphanumeric chars with dashes
|
||||||
|
// - Preserve case for readability
|
||||||
|
// - Collapse multiple dashes
|
||||||
|
const sanitized =
|
||||||
|
'-' +
|
||||||
|
absolutePath
|
||||||
|
.replace(/^\//, '') // Remove leading slash before adding dash
|
||||||
|
.replace(/[^a-zA-Z0-9]+/g, '-') // Replace sequences of non-alphanumeric with single dash
|
||||||
|
.replace(/-+/g, '-') // Collapse multiple dashes
|
||||||
|
.replace(/-+$/, ''); // Remove trailing dashes
|
||||||
|
|
||||||
|
return sanitized;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if workflow state exists
|
||||||
|
*/
|
||||||
|
async exists(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
await fs.access(this.statePath);
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load workflow state from disk
|
||||||
|
*/
|
||||||
|
async load(): Promise<WorkflowState> {
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(this.statePath, 'utf-8');
|
||||||
|
return JSON.parse(content) as WorkflowState;
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
throw new Error(`Workflow state file not found at ${this.statePath}`);
|
||||||
|
}
|
||||||
|
throw new Error(`Failed to load workflow state: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save workflow state to disk
|
||||||
|
*/
|
||||||
|
async save(state: WorkflowState): Promise<void> {
|
||||||
|
try {
|
||||||
|
// Ensure session directory exists
|
||||||
|
await fs.mkdir(this.sessionDir, { recursive: true });
|
||||||
|
|
||||||
|
// Serialize and validate JSON
|
||||||
|
const jsonContent = JSON.stringify(state, null, 2);
|
||||||
|
|
||||||
|
// Validate that the JSON is well-formed by parsing it back
|
||||||
|
try {
|
||||||
|
JSON.parse(jsonContent);
|
||||||
|
} catch (parseError) {
|
||||||
|
this.logger.error('Generated invalid JSON:', jsonContent);
|
||||||
|
throw new Error('Failed to generate valid JSON from workflow state');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write state atomically with newline at end
|
||||||
|
const tempPath = `${this.statePath}.tmp`;
|
||||||
|
await fs.writeFile(tempPath, jsonContent + '\n', 'utf-8');
|
||||||
|
await fs.rename(tempPath, this.statePath);
|
||||||
|
|
||||||
|
this.logger.debug(`Saved workflow state (${jsonContent.length} bytes)`);
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(`Failed to save workflow state: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a backup of current state
|
||||||
|
*/
|
||||||
|
async createBackup(): Promise<void> {
|
||||||
|
try {
|
||||||
|
const exists = await this.exists();
|
||||||
|
if (!exists) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const state = await this.load();
|
||||||
|
await fs.mkdir(this.backupDir, { recursive: true });
|
||||||
|
|
||||||
|
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||||
|
const backupPath = path.join(
|
||||||
|
this.backupDir,
|
||||||
|
`workflow-state-${timestamp}.json`
|
||||||
|
);
|
||||||
|
|
||||||
|
const backup: WorkflowStateBackup = {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
state
|
||||||
|
};
|
||||||
|
|
||||||
|
await fs.writeFile(backupPath, JSON.stringify(backup, null, 2), 'utf-8');
|
||||||
|
|
||||||
|
// Clean up old backups
|
||||||
|
await this.pruneBackups();
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(`Failed to create backup: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete workflow state file
|
||||||
|
*/
|
||||||
|
async delete(): Promise<void> {
|
||||||
|
try {
|
||||||
|
await fs.unlink(this.statePath);
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code !== 'ENOENT') {
|
||||||
|
throw new Error(`Failed to delete workflow state: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List available backups
|
||||||
|
*/
|
||||||
|
async listBackups(): Promise<string[]> {
|
||||||
|
try {
|
||||||
|
const files = await fs.readdir(this.backupDir);
|
||||||
|
return files
|
||||||
|
.filter((f) => f.startsWith('workflow-state-') && f.endsWith('.json'))
|
||||||
|
.sort()
|
||||||
|
.reverse();
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
throw new Error(`Failed to list backups: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restore from a backup
|
||||||
|
*/
|
||||||
|
async restoreBackup(backupFileName: string): Promise<void> {
|
||||||
|
try {
|
||||||
|
const backupPath = path.join(this.backupDir, backupFileName);
|
||||||
|
const content = await fs.readFile(backupPath, 'utf-8');
|
||||||
|
const backup: WorkflowStateBackup = JSON.parse(content);
|
||||||
|
|
||||||
|
await this.save(backup.state);
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(`Failed to restore backup: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prune old backups to maintain max backup count
|
||||||
|
*/
|
||||||
|
private async pruneBackups(): Promise<void> {
|
||||||
|
try {
|
||||||
|
const backups = await this.listBackups();
|
||||||
|
|
||||||
|
if (backups.length > this.maxBackups) {
|
||||||
|
const toDelete = backups.slice(this.maxBackups);
|
||||||
|
|
||||||
|
for (const backup of toDelete) {
|
||||||
|
await fs.unlink(path.join(this.backupDir, backup));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
// Non-critical error, log but don't throw
|
||||||
|
this.logger.warn(`Failed to prune backups: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the path to the state file (for debugging/testing)
|
||||||
|
*/
|
||||||
|
getStatePath(): string {
|
||||||
|
return this.statePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the path to the backup directory (for debugging/testing)
|
||||||
|
*/
|
||||||
|
getBackupDir(): string {
|
||||||
|
return this.backupDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the session directory path (for debugging/testing)
|
||||||
|
*/
|
||||||
|
getSessionDir(): string {
|
||||||
|
return this.sessionDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the project root this manager is for
|
||||||
|
*/
|
||||||
|
getProjectRoot(): string {
|
||||||
|
return this.projectRoot;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the path to the activity log file
|
||||||
|
* Activity log is stored next to workflow-state.json for correlation
|
||||||
|
*/
|
||||||
|
getActivityLogPath(): string {
|
||||||
|
return path.join(this.sessionDir, 'activity.jsonl');
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,401 @@
|
|||||||
|
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||||
|
import fs from 'fs-extra';
|
||||||
|
import path from 'path';
|
||||||
|
import os from 'os';
|
||||||
|
import {
|
||||||
|
logActivity,
|
||||||
|
readActivityLog,
|
||||||
|
filterActivityLog
|
||||||
|
} from '../../../src/storage/activity-logger.js';
|
||||||
|
|
||||||
|
describe('Activity Logger', () => {
|
||||||
|
let testDir: string;
|
||||||
|
let activityPath: string;
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
// Create a unique temporary test directory
|
||||||
|
const prefix = path.join(os.tmpdir(), 'activity-test-');
|
||||||
|
testDir = await fs.mkdtemp(prefix);
|
||||||
|
activityPath = path.join(testDir, 'activity.jsonl');
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Clean up test directory
|
||||||
|
await fs.remove(testDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('logActivity', () => {
|
||||||
|
it('should create activity log file on first write', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'phase-start',
|
||||||
|
phase: 'red',
|
||||||
|
data: {}
|
||||||
|
});
|
||||||
|
|
||||||
|
const exists = await fs.pathExists(activityPath);
|
||||||
|
expect(exists).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append event to log file', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'phase-start',
|
||||||
|
phase: 'red'
|
||||||
|
});
|
||||||
|
|
||||||
|
const content = await fs.readFile(activityPath, 'utf-8');
|
||||||
|
const lines = content.trim().split(/\r?\n/);
|
||||||
|
|
||||||
|
expect(lines.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should write valid JSONL format', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'test-run',
|
||||||
|
result: 'pass'
|
||||||
|
});
|
||||||
|
|
||||||
|
const content = await fs.readFile(activityPath, 'utf-8');
|
||||||
|
const line = content.trim();
|
||||||
|
const parsed = JSON.parse(line);
|
||||||
|
|
||||||
|
expect(parsed).toBeDefined();
|
||||||
|
expect(parsed.type).toBe('test-run');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include timestamp in log entry', async () => {
|
||||||
|
const before = new Date().toISOString();
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'phase-start',
|
||||||
|
phase: 'red'
|
||||||
|
});
|
||||||
|
const after = new Date().toISOString();
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].timestamp).toBeDefined();
|
||||||
|
expect(logs[0].timestamp >= before).toBe(true);
|
||||||
|
expect(logs[0].timestamp <= after).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append multiple events', async () => {
|
||||||
|
await logActivity(activityPath, { type: 'event1' });
|
||||||
|
await logActivity(activityPath, { type: 'event2' });
|
||||||
|
await logActivity(activityPath, { type: 'event3' });
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs.length).toBe(3);
|
||||||
|
expect(logs[0].type).toBe('event1');
|
||||||
|
expect(logs[1].type).toBe('event2');
|
||||||
|
expect(logs[2].type).toBe('event3');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve event data', async () => {
|
||||||
|
const eventData = {
|
||||||
|
type: 'git-commit',
|
||||||
|
hash: 'abc123',
|
||||||
|
message: 'test commit',
|
||||||
|
files: ['file1.ts', 'file2.ts']
|
||||||
|
};
|
||||||
|
|
||||||
|
await logActivity(activityPath, eventData);
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].type).toBe('git-commit');
|
||||||
|
expect(logs[0].hash).toBe('abc123');
|
||||||
|
expect(logs[0].message).toBe('test commit');
|
||||||
|
expect(logs[0].files).toEqual(['file1.ts', 'file2.ts']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle nested objects in event data', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'test-results',
|
||||||
|
results: {
|
||||||
|
passed: 10,
|
||||||
|
failed: 2,
|
||||||
|
details: { coverage: 85 }
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].results.details.coverage).toBe(85);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle special characters in event data', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'error',
|
||||||
|
message: 'Error: "Something went wrong"\nLine 2'
|
||||||
|
});
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].message).toBe('Error: "Something went wrong"\nLine 2');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create parent directory if it does not exist', async () => {
|
||||||
|
const nestedPath = path.join(testDir, 'nested', 'dir', 'activity.jsonl');
|
||||||
|
|
||||||
|
await logActivity(nestedPath, { type: 'test' });
|
||||||
|
|
||||||
|
const exists = await fs.pathExists(nestedPath);
|
||||||
|
expect(exists).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('readActivityLog', () => {
|
||||||
|
it('should read all events from log', async () => {
|
||||||
|
await logActivity(activityPath, { type: 'event1' });
|
||||||
|
await logActivity(activityPath, { type: 'event2' });
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
|
||||||
|
expect(logs.length).toBe(2);
|
||||||
|
expect(logs[0].type).toBe('event1');
|
||||||
|
expect(logs[1].type).toBe('event2');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array for non-existent file', async () => {
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse JSONL correctly', async () => {
|
||||||
|
await logActivity(activityPath, { type: 'event1', data: 'test1' });
|
||||||
|
await logActivity(activityPath, { type: 'event2', data: 'test2' });
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
|
||||||
|
expect(logs[0].data).toBe('test1');
|
||||||
|
expect(logs[1].data).toBe('test2');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty lines', async () => {
|
||||||
|
await fs.writeFile(
|
||||||
|
activityPath,
|
||||||
|
'{"type":"event1"}\n\n{"type":"event2"}\n'
|
||||||
|
);
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
|
||||||
|
expect(logs.length).toBe(2);
|
||||||
|
expect(logs[0].type).toBe('event1');
|
||||||
|
expect(logs[1].type).toBe('event2');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error for invalid JSON line', async () => {
|
||||||
|
await fs.writeFile(activityPath, '{"type":"event1"}\ninvalid json\n');
|
||||||
|
|
||||||
|
await expect(readActivityLog(activityPath)).rejects.toThrow(
|
||||||
|
/Invalid JSON/i
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should preserve chronological order', async () => {
|
||||||
|
for (let i = 0; i < 10; i++) {
|
||||||
|
await logActivity(activityPath, { type: 'event', index: i });
|
||||||
|
}
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
|
||||||
|
for (let i = 0; i < 10; i++) {
|
||||||
|
expect(logs[i].index).toBe(i);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('filterActivityLog', () => {
|
||||||
|
beforeEach(async () => {
|
||||||
|
// Create sample log entries
|
||||||
|
await logActivity(activityPath, { type: 'phase-start', phase: 'red' });
|
||||||
|
await logActivity(activityPath, { type: 'test-run', result: 'fail' });
|
||||||
|
await logActivity(activityPath, { type: 'phase-start', phase: 'green' });
|
||||||
|
await logActivity(activityPath, { type: 'test-run', result: 'pass' });
|
||||||
|
await logActivity(activityPath, { type: 'git-commit', hash: 'abc123' });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should filter by event type', async () => {
|
||||||
|
const filtered = await filterActivityLog(activityPath, {
|
||||||
|
type: 'phase-start'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(filtered.length).toBe(2);
|
||||||
|
expect(filtered[0].type).toBe('phase-start');
|
||||||
|
expect(filtered[1].type).toBe('phase-start');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should filter by multiple criteria', async () => {
|
||||||
|
const filtered = await filterActivityLog(activityPath, {
|
||||||
|
type: 'test-run',
|
||||||
|
result: 'pass'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(filtered.length).toBe(1);
|
||||||
|
expect(filtered[0].result).toBe('pass');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return all events when no filter provided', async () => {
|
||||||
|
const filtered = await filterActivityLog(activityPath, {});
|
||||||
|
|
||||||
|
expect(filtered.length).toBe(5);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should filter by timestamp range', async () => {
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
const midpoint = logs[2].timestamp;
|
||||||
|
|
||||||
|
const filtered = await filterActivityLog(activityPath, {
|
||||||
|
timestampFrom: midpoint
|
||||||
|
});
|
||||||
|
|
||||||
|
// Should get events from midpoint onwards (inclusive)
|
||||||
|
// Expect at least 3 events, may be more due to timestamp collisions
|
||||||
|
expect(filtered.length).toBeGreaterThanOrEqual(3);
|
||||||
|
expect(filtered.length).toBeLessThanOrEqual(5);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should filter by custom predicate', async () => {
|
||||||
|
const filtered = await filterActivityLog(activityPath, {
|
||||||
|
predicate: (event: any) => event.phase === 'red'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(filtered.length).toBe(1);
|
||||||
|
expect(filtered[0].phase).toBe('red');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array for non-matching filter', async () => {
|
||||||
|
const filtered = await filterActivityLog(activityPath, {
|
||||||
|
type: 'non-existent'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(filtered).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle nested property filters', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'test-results',
|
||||||
|
results: { coverage: 85 }
|
||||||
|
});
|
||||||
|
|
||||||
|
const filtered = await filterActivityLog(activityPath, {
|
||||||
|
predicate: (event: any) => event.results?.coverage > 80
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(filtered.length).toBe(1);
|
||||||
|
expect(filtered[0].results.coverage).toBe(85);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Event types', () => {
|
||||||
|
it('should support phase-transition events', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'phase-transition',
|
||||||
|
from: 'red',
|
||||||
|
to: 'green'
|
||||||
|
});
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].type).toBe('phase-transition');
|
||||||
|
expect(logs[0].from).toBe('red');
|
||||||
|
expect(logs[0].to).toBe('green');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should support test-run events', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'test-run',
|
||||||
|
result: 'pass',
|
||||||
|
testsRun: 50,
|
||||||
|
testsPassed: 50,
|
||||||
|
testsFailed: 0,
|
||||||
|
coverage: 85.5
|
||||||
|
});
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].testsRun).toBe(50);
|
||||||
|
expect(logs[0].coverage).toBe(85.5);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should support git-operation events', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'git-commit',
|
||||||
|
hash: 'abc123def456',
|
||||||
|
message: 'feat: add new feature',
|
||||||
|
files: ['file1.ts', 'file2.ts']
|
||||||
|
});
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].hash).toBe('abc123def456');
|
||||||
|
expect(logs[0].files.length).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should support error events', async () => {
|
||||||
|
await logActivity(activityPath, {
|
||||||
|
type: 'error',
|
||||||
|
phase: 'red',
|
||||||
|
error: 'Test failed',
|
||||||
|
stack: 'Error stack trace...'
|
||||||
|
});
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs[0].type).toBe('error');
|
||||||
|
expect(logs[0].error).toBe('Test failed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Concurrency handling', () => {
|
||||||
|
it('should handle rapid concurrent writes', async () => {
|
||||||
|
const writes: Promise<void>[] = [];
|
||||||
|
for (let i = 0; i < 50; i++) {
|
||||||
|
writes.push(logActivity(activityPath, { type: 'event', index: i }));
|
||||||
|
}
|
||||||
|
|
||||||
|
await Promise.all(writes);
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
expect(logs.length).toBe(50);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should maintain data integrity with concurrent writes', async () => {
|
||||||
|
const writes: Promise<void>[] = [];
|
||||||
|
for (let i = 0; i < 20; i++) {
|
||||||
|
writes.push(
|
||||||
|
logActivity(activityPath, {
|
||||||
|
type: 'concurrent-test',
|
||||||
|
id: i,
|
||||||
|
data: `data-${i}`
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
await Promise.all(writes);
|
||||||
|
|
||||||
|
const logs = await readActivityLog(activityPath);
|
||||||
|
|
||||||
|
// All events should be present
|
||||||
|
expect(logs.length).toBe(20);
|
||||||
|
// Validate ids set
|
||||||
|
const ids = new Set(logs.map((l) => l.id));
|
||||||
|
expect([...ids].sort((a, b) => a - b)).toEqual([...Array(20).keys()]);
|
||||||
|
// Validate shape
|
||||||
|
for (const log of logs) {
|
||||||
|
expect(log.type).toBe('concurrent-test');
|
||||||
|
expect(typeof log.id).toBe('number');
|
||||||
|
expect(log.data).toMatch(/^data-\d+$/);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('File integrity', () => {
|
||||||
|
it('should maintain valid JSONL after many operations', async () => {
|
||||||
|
for (let i = 0; i < 100; i++) {
|
||||||
|
await logActivity(activityPath, { type: 'test', iteration: i });
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = await fs.readFile(activityPath, 'utf-8');
|
||||||
|
const lines = content.trim().split(/\r?\n/);
|
||||||
|
|
||||||
|
expect(lines.length).toBe(100);
|
||||||
|
|
||||||
|
// All lines should be valid JSON
|
||||||
|
for (const line of lines) {
|
||||||
|
expect(() => JSON.parse(line)).not.toThrow();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -22,12 +22,15 @@ export default defineConfig({
|
|||||||
provider: 'v8',
|
provider: 'v8',
|
||||||
reporter: ['text', 'json', 'html', 'lcov'],
|
reporter: ['text', 'json', 'html', 'lcov'],
|
||||||
exclude: [
|
exclude: [
|
||||||
'node_modules',
|
'node_modules/',
|
||||||
'dist',
|
'dist/',
|
||||||
'tests',
|
'tests/',
|
||||||
'**/*.test.ts',
|
'**/*.test.ts',
|
||||||
'**/*.spec.ts',
|
'**/*.spec.ts',
|
||||||
'**/*.d.ts',
|
'**/*.d.ts',
|
||||||
|
'**/mocks/**',
|
||||||
|
'**/fixtures/**',
|
||||||
|
'vitest.config.ts',
|
||||||
'src/index.ts'
|
'src/index.ts'
|
||||||
],
|
],
|
||||||
thresholds: {
|
thresholds: {
|
||||||
|
|||||||
@@ -18,6 +18,8 @@
|
|||||||
"@tm/core/*": ["./packages/tm-core/src/*"],
|
"@tm/core/*": ["./packages/tm-core/src/*"],
|
||||||
"@tm/cli": ["./apps/cli/src/index.ts"],
|
"@tm/cli": ["./apps/cli/src/index.ts"],
|
||||||
"@tm/cli/*": ["./apps/cli/src/*"],
|
"@tm/cli/*": ["./apps/cli/src/*"],
|
||||||
|
"@tm/mcp": ["./apps/mcp/src/index.ts"],
|
||||||
|
"@tm/mcp/*": ["./apps/mcp/src/*"],
|
||||||
"@tm/build-config": ["./packages/build-config/src/index.ts"],
|
"@tm/build-config": ["./packages/build-config/src/index.ts"],
|
||||||
"@tm/build-config/*": ["./packages/build-config/src/*"],
|
"@tm/build-config/*": ["./packages/build-config/src/*"],
|
||||||
"@tm/ai-sdk-provider-grok-cli": [
|
"@tm/ai-sdk-provider-grok-cli": [
|
||||||
|
|||||||
Reference in New Issue
Block a user