Release 0.43.0

This commit is contained in:
Ralph Khreish
2026-01-27 16:52:24 +01:00
committed by GitHub
50 changed files with 4495 additions and 143 deletions

View File

@@ -0,0 +1,10 @@
---
"task-master-ai": minor
---
Add MCPB bundle for single-click Claude Desktop installation
- Added `manifest.json` for MCP Bundle (MCPB) specification v0.3
- Added `.mcpbignore` to exclude development files from bundle
- Added `icon.png` (512x512) for Claude Desktop display
- Enables users to install Task Master MCP server directly in Claude Desktop without manual configuration

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Add modifyJSON function for safer file updates

View File

@@ -0,0 +1,9 @@
---
"task-master-ai": minor
---
Add verbose output mode to loop command with `--verbose` flag
- New `-v, --verbose` flag shows Claude's work in real-time (thinking, tool calls) rather than waiting until the iteration completes
- New `--no-output` flag excludes full Claude output from iteration results to save memory
- Improved error handling with proper validation for incompatible options (verbose + sandbox)

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Add --no-banner to suppress the startup banner.

View File

@@ -0,0 +1,40 @@
---
"task-master-ai": minor
---
Add optional `metadata` field to tasks for storing user-defined custom data
Tasks and subtasks now support an optional `metadata` field that allows storing arbitrary JSON data such as:
- External IDs (GitHub issues, Jira tickets, Linear issues)
- Workflow data (sprints, story points, custom statuses)
- Integration data (sync timestamps, external system references)
- Custom tracking (UUIDs, version numbers, audit information)
Key features:
- **AI-Safe**: Metadata is preserved through all AI operations (update-task, expand, etc.) because AI schemas intentionally exclude this field
- **Flexible Schema**: Store any JSON-serializable data without schema changes
- **Backward Compatible**: The field is optional; existing tasks work without modification
- **Subtask Support**: Both tasks and subtasks can have their own metadata
- **MCP Tool Support**: Use `update_task` and `update_subtask` with the `metadata` parameter to update metadata (requires `TASK_MASTER_ALLOW_METADATA_UPDATES=true` in MCP server environment)
Example usage:
```json
{
"id": 1,
"title": "Implement authentication",
"metadata": {
"githubIssue": 42,
"sprint": "Q1-S3",
"storyPoints": 5
}
}
```
MCP metadata update example:
```javascript
// With TASK_MASTER_ALLOW_METADATA_UPDATES=true set in MCP env
update_task({
id: "1",
metadata: '{"githubIssue": 42, "sprint": "Q1-S3"}'
})
```

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env node
import { spawnSync } from 'node:child_process';
import {
existsSync,
readFileSync,
readdirSync,
unlinkSync,
writeFileSync
} from 'node:fs';
import { dirname, join } from 'node:path';
import { fileURLToPath } from 'node:url';
import { findRootDir } from './utils.mjs';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const rootDir = findRootDir(__dirname);
// Read the root package.json version
const pkgPath = join(rootDir, 'package.json');
const manifestPath = join(rootDir, 'manifest.json');
let pkg;
try {
pkg = JSON.parse(readFileSync(pkgPath, 'utf8'));
} catch (error) {
console.error('Failed to read package.json:', error.message);
process.exit(1);
}
let manifest;
try {
manifest = JSON.parse(readFileSync(manifestPath, 'utf8'));
} catch (error) {
console.error('Failed to read manifest.json:', error.message);
process.exit(1);
}
// Sync manifest version if different
if (manifest.version !== pkg.version) {
console.log(
`Syncing manifest.json version: ${manifest.version}${pkg.version}`
);
manifest.version = pkg.version;
try {
writeFileSync(
manifestPath,
JSON.stringify(manifest, null, '\t') + '\n',
'utf8'
);
console.log(`✅ Updated manifest.json version to ${pkg.version}`);
} catch (error) {
console.error('Failed to write manifest.json:', error.message);
process.exit(1);
}
} else {
console.log(
`✓ manifest.json version already matches package.json (${pkg.version})`
);
}
// Remove old .mcpb files
const files = readdirSync(rootDir);
for (const file of files) {
if (file.endsWith('.mcpb')) {
const filePath = join(rootDir, file);
console.log(`Removing old bundle: ${file}`);
unlinkSync(filePath);
}
}
// Generate new .mcpb bundle
const bundleName = 'taskmaster.mcpb';
console.log(`Generating ${bundleName} for version ${pkg.version}...`);
const result = spawnSync('npx', ['mcpb', 'pack', '.', bundleName], {
cwd: rootDir,
encoding: 'utf8',
stdio: 'inherit'
});
if (result.status !== 0) {
console.error('Failed to generate MCPB bundle');
process.exit(1);
}
// Verify the new bundle was created
if (existsSync(join(rootDir, bundleName))) {
console.log(`✅ Generated ${bundleName}`);
} else {
console.error(`Expected bundle ${bundleName} was not created`);
process.exit(1);
}

169
.github/workflows/forward-port.yml vendored Normal file
View File

@@ -0,0 +1,169 @@
name: Forward Port to Next
on:
push:
branches:
- main
concurrency:
group: forward-port
cancel-in-progress: false
permissions:
contents: write
pull-requests: write
jobs:
forward-port:
runs-on: ubuntu-latest
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_METRICS_WEBHOOK }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check for existing PR
id: check-pr
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
EXISTING_PR=$(gh pr list --base next --head main --state open --json number --jq '.[0].number // empty')
if [ -n "$EXISTING_PR" ]; then
echo "existing_pr=$EXISTING_PR" >> $GITHUB_OUTPUT
echo "PR #$EXISTING_PR already exists for main → next"
else
echo "existing_pr=" >> $GITHUB_OUTPUT
echo "No existing PR found"
fi
- name: Check if main has changes not in next
id: check-diff
if: steps.check-pr.outputs.existing_pr == ''
run: |
git fetch origin next
DIFF_COUNT=$(git rev-list --count origin/next..origin/main)
echo "diff_count=$DIFF_COUNT" >> $GITHUB_OUTPUT
if [ "$DIFF_COUNT" -gt 0 ]; then
echo "Found $DIFF_COUNT commit(s) in main not in next"
else
echo "No new commits to forward port"
fi
- name: Check for merge conflicts
id: check-conflicts
if: steps.check-pr.outputs.existing_pr == '' && steps.check-diff.outputs.diff_count != '0'
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
# Try a test merge to detect conflicts
git checkout origin/next
if git merge --no-commit --no-ff origin/main 2>/dev/null; then
echo "has_conflicts=false" >> $GITHUB_OUTPUT
echo "No merge conflicts detected"
else
echo "has_conflicts=true" >> $GITHUB_OUTPUT
# Get list of conflicting files
CONFLICTING_FILES=$(git diff --name-only --diff-filter=U | head -10)
echo "conflicting_files<<EOF" >> $GITHUB_OUTPUT
echo "$CONFLICTING_FILES" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "Merge conflicts detected in: $CONFLICTING_FILES"
fi
git merge --abort 2>/dev/null || true
- name: Create forward-port PR
id: create-pr
if: steps.check-pr.outputs.existing_pr == '' && steps.check-diff.outputs.diff_count != '0'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Get the commits being forward-ported for the PR body
COMMITS=$(git log origin/next..origin/main --oneline --no-decorate | head -20)
COMMIT_COUNT=$(git rev-list --count origin/next..origin/main)
# Build conflict warning if needed
CONFLICT_WARNING=""
if [ "${{ steps.check-conflicts.outputs.has_conflicts }}" = "true" ]; then
CONFLICT_WARNING="
> [!WARNING]
> **Merge conflicts detected.** Manual resolution required.
>
> Conflicting files:
> \`\`\`
> ${{ steps.check-conflicts.outputs.conflicting_files }}
> \`\`\`
### How to resolve
\`\`\`bash
# Option 1: Resolve in a temporary branch (recommended)
git fetch origin
git checkout -b resolve-forward-port origin/next
git merge origin/main
# Fix conflicts in your editor, then:
git add .
git commit
git push origin resolve-forward-port
# Create PR from resolve-forward-port → next, then close this PR
# Option 2: Resolve directly on next
git checkout next
git pull origin next
git merge origin/main
# Fix conflicts, commit, and push
\`\`\`
"
fi
# Create PR body
BODY="## Forward Port: main → next
This PR forward-ports changes from \`main\` to \`next\` to ensure hotfixes and releases are included in the next development branch.
$CONFLICT_WARNING
### Commits ($COMMIT_COUNT total)
\`\`\`
$COMMITS
\`\`\`
$([ "$COMMIT_COUNT" -gt 20 ] && echo "... and $((COMMIT_COUNT - 20)) more")
---
*Auto-generated by forward-port workflow*"
# Create the PR
PR_URL=$(gh pr create \
--base next \
--head main \
--title "chore: forward port main to next" \
--label "forward-port" \
--label "automated" \
--body "$BODY")
PR_NUMBER=$(echo "$PR_URL" | grep -oE '[0-9]+$')
echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT
echo "pr_url=$PR_URL" >> $GITHUB_OUTPUT
# Add conflict label if needed
if [ "${{ steps.check-conflicts.outputs.has_conflicts }}" = "true" ]; then
gh pr edit "$PR_NUMBER" --add-label "has-conflicts"
fi
- name: Send Discord notification
if: steps.create-pr.outputs.pr_url != '' && env.DISCORD_WEBHOOK != ''
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ env.DISCORD_WEBHOOK }}
status: ${{ steps.check-conflicts.outputs.has_conflicts == 'true' && 'Warning' || 'Success' }}
title: "🔄 Forward Port PR Created"
description: |
**main → next**
${{ steps.check-conflicts.outputs.has_conflicts == 'true' && '⚠️ **Merge conflicts detected** - manual resolution required' || '✅ No conflicts - ready for review' }}
**Commits:** ${{ steps.check-diff.outputs.diff_count }}
**PR:** ${{ steps.create-pr.outputs.pr_url }}
color: ${{ steps.check-conflicts.outputs.has_conflicts == 'true' && '0xFFA500' || '0x58AFFF' }}
username: Task Master Bot
avatar_url: https://raw.githubusercontent.com/eyaltoledano/claude-task-master/main/images/logo.png

31
.mcpbignore Normal file
View File

@@ -0,0 +1,31 @@
# Exclude everything except manifest and icon
# This is an npx-based bundle - no source code needed
# All source code
*.js
*.ts
*.mjs
*.cjs
*.jsx
*.tsx
*.json
!manifest.json
# All directories
*/
# All other files
*.md
*.txt
*.yml
*.yaml
*.lock
*.log
.git*
.env*
.eslint*
.prettier*
.editorconfig
LICENSE*
Makefile
Dockerfile

View File

@@ -18,6 +18,55 @@
### Patch Changes
- [#1569](https://github.com/eyaltoledano/claude-task-master/pull/1569) [`4cfde1c`](https://github.com/eyaltoledano/claude-task-master/commit/4cfde1c3d54b94701e0fcfc8dbdedbc3bbaf4339) Thanks [@bjcoombs](https://github.com/bjcoombs)! - Improve concurrency safety by adopting modifyJson pattern in file-storage
- Refactor saveTasks, createTag, deleteTag, renameTag to use modifyJson for atomic read-modify-write operations
- This prevents lost updates when multiple processes concurrently modify tasks.json
- Complements the cross-process file locking added in PR #1566
- [#1566](https://github.com/eyaltoledano/claude-task-master/pull/1566) [`3cc6174`](https://github.com/eyaltoledano/claude-task-master/commit/3cc6174b471fc1ea7f12955095d0d35b4dc5904c) Thanks [@bjcoombs](https://github.com/bjcoombs)! - Fix race condition when multiple Claude Code windows write to tasks.json simultaneously
- Add cross-process file locking to prevent concurrent write collisions
- Implement atomic writes using temp file + rename pattern to prevent partial writes
- Re-read file inside lock to get current state, preventing lost updates from stale snapshots
- Add stale lock detection and automatic cleanup (10-second timeout)
- Export `withFileLock` and `withFileLockSync` utilities for use by other modules
This fix prevents data loss that could occur when multiple Task Master instances (e.g., multiple Claude Code windows) access the same tasks.json file concurrently.
- [#1576](https://github.com/eyaltoledano/claude-task-master/pull/1576) [`097c8ed`](https://github.com/eyaltoledano/claude-task-master/commit/097c8edcb0ca065218e9b51758ad370ac7475f1a) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Improve loop command error handling and use dangerously-skip-permissions
- Add proper spawn error handling (ENOENT, EACCES) with actionable messages
- Return error info from checkSandboxAuth and runInteractiveAuth instead of silent failures
- Use --dangerously-skip-permissions for unattended loop execution
- Fix null exit code masking issue
- [#1577](https://github.com/eyaltoledano/claude-task-master/pull/1577) [`e762e4f`](https://github.com/eyaltoledano/claude-task-master/commit/e762e4f64608a77d248ac8ce5eeb218000b51907) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Make Docker sandbox mode opt-in for loop command
- Add `--sandbox` flag to `task-master loop` (default: use plain `claude -p`)
- Preserve progress.txt between runs (append instead of overwrite)
- Display execution mode in loop startup output
- [#1580](https://github.com/eyaltoledano/claude-task-master/pull/1580) [`940ab58`](https://github.com/eyaltoledano/claude-task-master/commit/940ab587e50cff43c3a2639bbbd210fdd577c3f1) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Update Codex CLI supported models to match current available models
- Remove deprecated models: gpt-5, gpt-5-codex, gpt-5.1
- Add gpt-5.2-codex as the current default model
- Add gpt-5.1-codex-mini for faster, cheaper option
- Keep gpt-5.1-codex-max and gpt-5.2
## 0.42.0-rc.0
### Minor Changes
- [#1533](https://github.com/eyaltoledano/claude-task-master/pull/1533) [`6c3a92c`](https://github.com/eyaltoledano/claude-task-master/commit/6c3a92c439d4573ff5046e3d251a4a26d85d0deb) Thanks [@bjcoombs](https://github.com/bjcoombs)! - Add --ready and --blocking filters to list command for identifying parallelizable tasks
- Add `--ready` filter to show only tasks with satisfied dependencies (ready to work on)
- Add `--blocking` filter to show only tasks that block other tasks
- Combine `--ready --blocking` to find high-impact tasks (ready AND blocking others)
- Add "Blocks" column to task table showing which tasks depend on each task
- Blocks field included in JSON output for programmatic access
- Add "Ready" column to `tags` command showing count of ready tasks per tag
- Add `--ready` filter to `tags` command to show only tags with available work
- Excludes deferred/blocked tasks from ready count (only actionable statuses)
- Add `--all-tags` option to list ready tasks across all tags (use with `--ready`)
- Tag column shown as first column when using `--all-tags` for easy scanning
### Patch Changes
- [#1569](https://github.com/eyaltoledano/claude-task-master/pull/1569) [`4cfde1c`](https://github.com/eyaltoledano/claude-task-master/commit/4cfde1c3d54b94701e0fcfc8dbdedbc3bbaf4339) Thanks [@bjcoombs](https://github.com/bjcoombs)! - Improve concurrency safety by adopting modifyJson pattern in file-storage
- Refactor saveTasks, createTag, deleteTag, renameTag to use modifyJson for atomic read-modify-write operations
- This prevents lost updates when multiple processes concurrently modify tasks.json

View File

@@ -91,6 +91,9 @@ describe('LoopCommand', () => {
getStorageType: vi.fn().mockReturnValue('local'),
getNext: vi.fn().mockResolvedValue({ id: '1', title: 'Test Task' }),
getCount: vi.fn().mockResolvedValue(0)
},
auth: {
getContext: vi.fn().mockReturnValue(null)
}
};

View File

@@ -5,6 +5,8 @@
import path from 'node:path';
import {
type LoopConfig,
type LoopIteration,
type LoopOutputCallbacks,
type LoopResult,
PRESET_NAMES,
type TmCore,
@@ -23,6 +25,8 @@ export interface LoopCommandOptions {
tag?: string;
project?: string;
sandbox?: boolean;
output?: boolean;
verbose?: boolean;
}
export class LoopCommand extends Command {
@@ -49,6 +53,11 @@ export class LoopCommand extends Command {
'Project root directory (auto-detected if not provided)'
)
.option('--sandbox', 'Run Claude in Docker sandbox mode')
.option(
'--no-output',
'Exclude full Claude output from iteration results'
)
.option('-v, --verbose', "Show Claude's work in real-time")
.action((options: LoopCommandOptions) => this.execute(options));
}
@@ -109,12 +118,21 @@ export class LoopCommand extends Command {
}
console.log();
// Auto-detect brief name from auth context (if available)
const briefName = this.tmCore.auth.getContext()?.briefName;
const config: Partial<LoopConfig> = {
iterations,
prompt,
progressFile,
tag: options.tag,
sandbox: options.sandbox
sandbox: options.sandbox,
// CLI defaults to including output (users typically want to see it)
// Domain defaults to false (library consumers opt-in explicitly)
includeOutput: options.output ?? true,
verbose: options.verbose ?? false,
brief: briefName,
callbacks: this.createOutputCallbacks()
};
const result = await this.tmCore.loop.run(config);
@@ -161,6 +179,47 @@ export class LoopCommand extends Command {
}
}
private createOutputCallbacks(): LoopOutputCallbacks {
return {
onIterationStart: (iteration: number, total: number) => {
console.log();
console.log(chalk.cyan(`━━━ Iteration ${iteration} of ${total} ━━━`));
},
onText: (text: string) => {
console.log(text);
},
onToolUse: (toolName: string) => {
console.log(chalk.dim(`${toolName}`));
},
onError: (message: string, severity?: 'warning' | 'error') => {
if (severity === 'warning') {
console.error(chalk.yellow(`[Loop Warning] ${message}`));
} else {
console.error(chalk.red(`[Loop Error] ${message}`));
}
},
onStderr: (iteration: number, text: string) => {
process.stderr.write(chalk.dim(`[Iteration ${iteration}] `) + text);
},
onOutput: (output: string) => {
console.log(output);
},
onIterationEnd: (iteration: LoopIteration) => {
const statusColor =
iteration.status === 'success'
? chalk.green
: iteration.status === 'error'
? chalk.red
: chalk.yellow;
console.log(
statusColor(
` Iteration ${iteration.iteration} completed: ${iteration.status}`
)
);
}
};
}
private displayResult(result: LoopResult): void {
console.log();
console.log(chalk.bold('Loop Complete'));
@@ -168,6 +227,9 @@ export class LoopCommand extends Command {
console.log(`Total iterations: ${result.totalIterations}`);
console.log(`Tasks completed: ${result.tasksCompleted}`);
console.log(`Final status: ${this.formatStatus(result.finalStatus)}`);
if (result.errorMessage) {
console.log(chalk.red(`Error: ${result.errorMessage}`));
}
}
private formatStatus(status: LoopResult['finalStatus']): string {

View File

@@ -8,17 +8,18 @@ description: "Tasks in Task Master follow a specific format designed to provide
Tasks in tasks.json have the following structure:
| Field | Description | Example |
| -------------- | ---------------------------------------------- | ------------------------------------------------------ |
| `id` | Unique identifier for the task. | `1` |
| `title` | Brief, descriptive title. | `"Initialize Repo"` |
| `description` | What the task involves. | `"Create a new repository, set up initial structure."` |
| `status` | Current state. | `"pending"`, `"done"`, `"deferred"` |
| Field | Description | Example |
| -------------- | ----------------------------------------------- | ------------------------------------------------------ |
| `id` | Unique identifier for the task. | `1` |
| `title` | Brief, descriptive title. | `"Initialize Repo"` |
| `description` | What the task involves. | `"Create a new repository, set up initial structure."` |
| `status` | Current state. | `"pending"`, `"done"`, `"deferred"` |
| `dependencies` | Prerequisite task IDs. ✅ Completed, ⏱️ Pending | `[1, 2]` |
| `priority` | Task importance. | `"high"`, `"medium"`, `"low"` |
| `details` | Implementation instructions. | `"Use GitHub client ID/secret, handle callback..."` |
| `testStrategy` | How to verify success. | `"Deploy and confirm 'Hello World' response."` |
| `subtasks` | Nested subtasks related to the main task. | `[{"id": 1, "title": "Configure OAuth", ...}]` |
| `priority` | Task importance. | `"high"`, `"medium"`, `"low"` |
| `details` | Implementation instructions. | `"Use GitHub client ID/secret, handle callback..."` |
| `testStrategy` | How to verify success. | `"Deploy and confirm 'Hello World' response."` |
| `subtasks` | Nested subtasks related to the main task. | `[{"id": 1, "title": "Configure OAuth", ...}]` |
| `metadata` | Optional user-defined data (see below). | `{"githubIssue": 42, "sprint": "Q1-S3"}` |
## Task File Format
@@ -38,6 +39,158 @@ Individual task files follow this format:
<verification approach>
```
## User-Defined Metadata Field
The `metadata` field allows you to store arbitrary custom data on tasks without requiring schema changes. This is useful for:
- **External IDs**: Link tasks to GitHub issues, Jira tickets, Linear issues, etc.
- **Workflow data**: Track sprints, story points, custom statuses
- **Integration data**: Store sync timestamps, external system references
- **Custom tracking**: UUIDs, version numbers, audit information
### Key Characteristics
<CardGroup cols={2}>
<Card title="Fully Optional" icon="toggle-off">
The field is optional. Existing tasks work without it.
</Card>
<Card title="AI-Safe" icon="shield">
AI operations preserve your metadata - it's never overwritten by AI.
</Card>
<Card title="Flexible Schema" icon="shapes">
Store any JSON-serializable data: strings, numbers, objects, arrays.
</Card>
<Card title="Subtask Support" icon="list-tree">
Both tasks and subtasks can have their own metadata.
</Card>
</CardGroup>
### Usage Examples
**GitHub Issue Linking**
```json
{
"id": 1,
"title": "Implement authentication",
"metadata": {
"githubIssue": 42,
"githubIssueUrl": "https://github.com/org/repo/issues/42"
}
}
```
**Sprint & Project Management**
```json
{
"id": 2,
"title": "Refactor API endpoints",
"metadata": {
"sprint": "Q1-S3",
"storyPoints": 5,
"epic": "API Modernization"
}
}
```
**External System Integration**
```json
{
"id": 3,
"title": "Fix login bug",
"metadata": {
"jira": {
"key": "PROJ-123",
"type": "bug",
"priority": "P1"
},
"importedAt": "2024-01-15T10:30:00Z",
"lastSyncedAt": "2024-01-20T14:00:00Z"
}
}
```
**Stable UUID Tracking**
```json
{
"id": 4,
"title": "Add user preferences",
"metadata": {
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"version": 2,
"createdBy": "import-script"
}
}
```
<Warning>
**Security Note**: Do not store secrets, API keys, or sensitive credentials in
the metadata field. Task data may be visible in logs, exports, or shared with
AI providers.
</Warning>
### Metadata Behavior
| Operation | Metadata Behavior |
| ---------------- | ------------------------------------------------------------ |
| `parse-prd` | New tasks are created without metadata |
| `update-task` | Existing metadata is preserved unless explicitly changed |
| `expand` | Parent task metadata is preserved; subtasks don't inherit it |
| `update-subtask` | Subtask metadata is preserved |
| Manual edit | You can add/modify metadata directly in tasks.json |
| MCP (with flag) | Use the `metadata` parameter to explicitly update metadata |
### Updating Metadata via MCP
The `update_task` and `update_subtask` MCP tools support a `metadata` parameter for updating task metadata. This feature is disabled by default for safety.
**To enable MCP metadata updates:**
Add `TASK_MASTER_ALLOW_METADATA_UPDATES=true` to your MCP server environment configuration in `.mcp.json`:
```json
{
"mcpServers": {
"task-master-ai": {
"command": "npx",
"args": ["-y", "task-master-ai"],
"env": {
"TASK_MASTER_ALLOW_METADATA_UPDATES": "true",
"ANTHROPIC_API_KEY": "your_key_here"
}
}
}
}
```
**Usage example:**
```javascript
// Update task metadata (merges with existing)
update_task({
id: "1",
projectRoot: "/path/to/project",
metadata: '{"githubIssue": 42, "sprint": "Q1-S3"}'
})
// Update only metadata (no prompt required)
update_task({
id: "1",
projectRoot: "/path/to/project",
metadata: '{"status": "reviewed"}'
})
```
<Note>
The `metadata` parameter accepts a JSON string. The new metadata is merged with existing metadata, allowing you to update specific fields without losing others.
</Note>
## Features in Detail
<AccordionGroup>
@@ -93,6 +246,7 @@ task-master expand --id=8
# or expand all tasks
task-master expand --all
```
</Accordion>
<Accordion title="Finding the Next Task">
@@ -130,11 +284,13 @@ The `show` command:
</Card>
<Card title="👀 Review Tasks" icon="magnifying-glass">
After parsing the PRD, review the tasks to ensure they make sense and have appropriate dependencies.
After parsing the PRD, review the tasks to ensure they make sense and have
appropriate dependencies.
</Card>
<Card title="📊 Analyze Complexity" icon="chart-line">
Use the complexity analysis feature to identify which tasks should be broken down further.
Use the complexity analysis feature to identify which tasks should be broken
down further.
</Card>
<Card title="⛓️ Follow Dependencies" icon="link">
@@ -142,7 +298,8 @@ The `show` command:
</Card>
<Card title="🔄 Update As You Go" icon="arrows-rotate">
If your implementation diverges from the plan, use the update command to keep future tasks aligned.
If your implementation diverges from the plan, use the update command to
keep future tasks aligned.
</Card>
<Card title="📦 Break Down Tasks" icon="boxes-stacked">
@@ -150,14 +307,17 @@ The `show` command:
</Card>
<Card title="🔄 Regenerate Files" icon="file-arrow-up">
After any updates to tasks.json, regenerate the task files to keep them in sync.
After any updates to tasks.json, regenerate the task files to keep them in
sync.
</Card>
<Card title="💬 Provide Context" icon="comment">
When asking the Cursor agent to help with a task, provide context about what you're trying to achieve.
When asking the Cursor agent to help with a task, provide context about what
you're trying to achieve.
</Card>
<Card title="✅ Validate Dependencies" icon="circle-check">
Periodically run the validate-dependencies command to check for invalid or circular dependencies.
Periodically run the validate-dependencies command to check for invalid or
circular dependencies.
</Card>
</CardGroup>

View File

@@ -442,3 +442,63 @@ export function withToolContext<TArgs extends { projectRoot?: string }>(
}
);
}
/**
* Validates and parses metadata string for MCP tools.
* Checks environment flag, validates JSON format, and ensures metadata is a plain object.
*
* @param metadataString - JSON string to parse and validate
* @param errorResponseFn - Function to create error response
* @returns Object with parsed metadata or error
*/
export function validateMcpMetadata(
metadataString: string | null | undefined,
errorResponseFn: (message: string) => ContentResult
): { parsedMetadata: Record<string, unknown> | null; error?: ContentResult } {
// Return null if no metadata provided
if (!metadataString) {
return { parsedMetadata: null };
}
// Check if metadata updates are allowed via environment variable
const allowMetadataUpdates =
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES === 'true';
if (!allowMetadataUpdates) {
return {
parsedMetadata: null,
error: errorResponseFn(
'Metadata updates are disabled. Set TASK_MASTER_ALLOW_METADATA_UPDATES=true in your MCP server environment to enable metadata modifications.'
)
};
}
// Parse and validate JSON
try {
const parsedMetadata = JSON.parse(metadataString);
// Ensure it's a plain object (not null, not array)
if (
typeof parsedMetadata !== 'object' ||
parsedMetadata === null ||
Array.isArray(parsedMetadata)
) {
return {
parsedMetadata: null,
error: errorResponseFn(
'Invalid metadata: must be a JSON object (not null or array)'
)
};
}
return { parsedMetadata };
} catch (parseError: unknown) {
const message =
parseError instanceof Error ? parseError.message : 'Unknown parse error';
return {
parsedMetadata: null,
error: errorResponseFn(
`Invalid metadata JSON: ${message}. Provide a valid JSON object string.`
)
};
}
}

View File

@@ -131,7 +131,7 @@ The Claude Code settings can be specified globally in the `claudeCode` section o
}
```
- For a full list of Cluaude Code settings, see the [Claude Code Settings documentation](https://docs.anthropic.com/en/docs/claude-code/settings).
- For a full list of Claude Code settings, see the [Claude Code Settings documentation](https://docs.anthropic.com/en/docs/claude-code/settings).
- For a full list of AI powered command names, see this file: `src/constants/commands.js`
### Why These Settings Matter

BIN
icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

89
manifest.json Normal file
View File

@@ -0,0 +1,89 @@
{
"manifest_version": "0.3",
"name": "Claude Task Master",
"version": "0.42.0",
"description": "AI-powered task management for structured development workflows. Parse PRDs, generate tasks with AI, track dependencies, and manage complexity.",
"author": {
"name": "Eyal Toledano",
"url": "https://github.com/eyaltoledano"
},
"repository": {
"type": "git",
"url": "https://github.com/eyaltoledano/claude-task-master"
},
"icon": "icon.png",
"server": {
"type": "node",
"entry_point": "dist/mcp-server.js",
"mcp_config": {
"command": "npx",
"args": ["-y", "task-master-ai"],
"env": {
"ANTHROPIC_API_KEY": "${user_config.anthropic_api_key}",
"PERPLEXITY_API_KEY": "${user_config.perplexity_api_key}",
"OPENAI_API_KEY": "${user_config.openai_api_key}",
"TASK_MASTER_TOOLS": "${user_config.task_master_tools}"
}
}
},
"user_config": {
"anthropic_api_key": {
"type": "string",
"title": "Anthropic API Key",
"description": "API key for Claude models - get from console.anthropic.com (recommended)",
"required": false,
"sensitive": true
},
"perplexity_api_key": {
"type": "string",
"title": "Perplexity API Key",
"description": "API key for research features - get from perplexity.ai (optional)",
"required": false,
"sensitive": true
},
"openai_api_key": {
"type": "string",
"title": "OpenAI API Key",
"description": "API key for GPT models - get from platform.openai.com (optional)",
"required": false,
"sensitive": true
},
"task_master_tools": {
"type": "string",
"title": "Tool Set",
"description": "Which tools to enable: core (7 tools), standard (15 tools), or all (44 tools)",
"required": false,
"default": "core"
}
},
"tools": [
{
"name": "get_tasks",
"description": "Get all tasks with optional status filter"
},
{
"name": "next_task",
"description": "Get the next recommended task to work on"
},
{
"name": "get_task",
"description": "Get details for a specific task by ID"
},
{
"name": "set_task_status",
"description": "Set the status of one or more tasks or subtasks"
},
{
"name": "update_subtask",
"description": "Update a subtask with implementation details"
},
{
"name": "parse_prd",
"description": "Parse a Product Requirements Document to generate initial tasks"
},
{
"name": "expand_task",
"description": "Expand a task into subtasks for detailed implementation"
}
]
}

View File

@@ -17,8 +17,9 @@ import { createLogWrapper } from '../../tools/utils.js';
* @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot.
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
* @param {string} args.id - Subtask ID in format "parent.sub".
* @param {string} args.prompt - Information to append to the subtask.
* @param {string} [args.prompt] - Information to append to the subtask. Required unless only updating metadata.
* @param {boolean} [args.research] - Whether to use research role.
* @param {Object} [args.metadata] - Parsed metadata object to merge into subtask metadata.
* @param {string} [args.projectRoot] - Project root path.
* @param {string} [args.tag] - Tag for the task (optional)
* @param {Object} log - Logger object.
@@ -27,8 +28,9 @@ import { createLogWrapper } from '../../tools/utils.js';
*/
export async function updateSubtaskByIdDirect(args, log, context = {}) {
const { session } = context;
// Destructure expected args, including projectRoot
const { tasksJsonPath, id, prompt, research, projectRoot, tag } = args;
// Destructure expected args, including projectRoot and metadata
const { tasksJsonPath, id, prompt, research, metadata, projectRoot, tag } =
args;
const logWrapper = createLogWrapper(log);
@@ -60,9 +62,10 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
};
}
if (!prompt) {
// At least prompt or metadata is required (validated in MCP tool layer)
if (!prompt && !metadata) {
const errorMessage =
'No prompt specified. Please provide the information to append.';
'No prompt or metadata specified. Please provide information to append or metadata to update.';
logWrapper.error(errorMessage);
return {
success: false,
@@ -77,7 +80,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
const useResearch = research === true;
log.info(
`Updating subtask with ID ${subtaskIdStr} with prompt "${prompt}" and research: ${useResearch}`
`Updating subtask with ID ${subtaskIdStr} with prompt "${prompt || '(metadata-only)'}" and research: ${useResearch}`
);
const wasSilent = isSilentMode();
@@ -98,7 +101,8 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
projectRoot,
tag,
commandName: 'update-subtask',
outputType: 'mcp'
outputType: 'mcp',
metadata
},
'json'
);

View File

@@ -18,9 +18,10 @@ import { findTasksPath } from '../utils/path-utils.js';
* @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot.
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
* @param {string} args.id - Task ID (or subtask ID like "1.2").
* @param {string} args.prompt - New information/context prompt.
* @param {string} [args.prompt] - New information/context prompt. Required unless only updating metadata.
* @param {boolean} [args.research] - Whether to use research role.
* @param {boolean} [args.append] - Whether to append timestamped information instead of full update.
* @param {Object} [args.metadata] - Parsed metadata object to merge into task metadata.
* @param {string} [args.projectRoot] - Project root path.
* @param {string} [args.tag] - Tag for the task (optional)
* @param {Object} log - Logger object.
@@ -29,9 +30,17 @@ import { findTasksPath } from '../utils/path-utils.js';
*/
export async function updateTaskByIdDirect(args, log, context = {}) {
const { session } = context;
// Destructure expected args, including projectRoot
const { tasksJsonPath, id, prompt, research, append, projectRoot, tag } =
args;
// Destructure expected args, including projectRoot and metadata
const {
tasksJsonPath,
id,
prompt,
research,
append,
metadata,
projectRoot,
tag
} = args;
const logWrapper = createLogWrapper(log);
@@ -51,9 +60,10 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
};
}
if (!prompt) {
// At least prompt or metadata is required (validated in MCP tool layer)
if (!prompt && !metadata) {
const errorMessage =
'No prompt specified. Please provide a prompt with new information for the task update.';
'No prompt or metadata specified. Please provide a prompt with new information or metadata for the task update.';
logWrapper.error(errorMessage);
return {
success: false,
@@ -95,7 +105,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
const useResearch = research === true;
logWrapper.info(
`Updating task with ID ${taskId} with prompt "${prompt}" and research: ${useResearch}`
`Updating task with ID ${taskId} with prompt "${prompt || '(metadata-only)'}" and research: ${useResearch}`
);
const wasSilent = isSilentMode();
@@ -116,7 +126,8 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
projectRoot,
tag,
commandName: 'update-task',
outputType: 'mcp'
outputType: 'mcp',
metadata
},
'json',
append || false

View File

@@ -7,7 +7,8 @@ import { TaskIdSchemaForMcp } from '@tm/core';
import {
createErrorResponse,
handleApiResult,
withNormalizedProjectRoot
withNormalizedProjectRoot,
validateMcpMetadata
} from '@tm/mcp';
import { z } from 'zod';
import { resolveTag } from '../../../scripts/modules/utils.js';
@@ -27,11 +28,22 @@ export function registerUpdateSubtaskTool(server) {
id: TaskIdSchemaForMcp.describe(
'ID of the subtask to update in format "parentId.subtaskId" (e.g., "5.2"). Parent ID is the ID of the task that contains the subtask.'
),
prompt: z.string().describe('Information to add to the subtask'),
prompt: z
.string()
.optional()
.describe(
'Information to add to the subtask. Required unless only updating metadata.'
),
research: z
.boolean()
.optional()
.describe('Use Perplexity AI for research-backed updates'),
metadata: z
.string()
.optional()
.describe(
'JSON string of metadata to merge into subtask metadata. Example: \'{"ticketId": "JIRA-456", "reviewed": true}\'. Requires TASK_MASTER_ALLOW_METADATA_UPDATES=true in MCP environment.'
),
file: z.string().optional().describe('Absolute path to the tasks file'),
projectRoot: z
.string()
@@ -65,12 +77,29 @@ export function registerUpdateSubtaskTool(server) {
);
}
// Validate metadata if provided
const validationResult = validateMcpMetadata(
args.metadata,
createErrorResponse
);
if (validationResult.error) {
return validationResult.error;
}
const parsedMetadata = validationResult.parsedMetadata;
// Validate that at least prompt or metadata is provided
if (!args.prompt && !parsedMetadata) {
return createErrorResponse(
'Either prompt or metadata must be provided for update-subtask'
);
}
const result = await updateSubtaskByIdDirect(
{
tasksJsonPath: tasksJsonPath,
id: args.id,
prompt: args.prompt,
research: args.research,
metadata: parsedMetadata,
projectRoot: args.projectRoot,
tag: resolvedTag
},

View File

@@ -6,7 +6,8 @@
import {
createErrorResponse,
handleApiResult,
withNormalizedProjectRoot
withNormalizedProjectRoot,
validateMcpMetadata
} from '@tm/mcp';
import { z } from 'zod';
import { resolveTag } from '../../../scripts/modules/utils.js';
@@ -30,7 +31,10 @@ export function registerUpdateTaskTool(server) {
),
prompt: z
.string()
.describe('New information or context to incorporate into the task'),
.optional()
.describe(
'New information or context to incorporate into the task. Required unless only updating metadata.'
),
research: z
.boolean()
.optional()
@@ -41,6 +45,12 @@ export function registerUpdateTaskTool(server) {
.describe(
'Append timestamped information to task details instead of full update'
),
metadata: z
.string()
.optional()
.describe(
'JSON string of metadata to merge into task metadata. Example: \'{"githubIssue": 42, "sprint": "Q1-S3"}\'. Requires TASK_MASTER_ALLOW_METADATA_UPDATES=true in MCP environment.'
),
file: z.string().optional().describe('Absolute path to the tasks file'),
projectRoot: z
.string()
@@ -76,7 +86,23 @@ export function registerUpdateTaskTool(server) {
);
}
// 3. Call Direct Function - Include projectRoot
// Validate metadata if provided
const validationResult = validateMcpMetadata(
args.metadata,
createErrorResponse
);
if (validationResult.error) {
return validationResult.error;
}
const parsedMetadata = validationResult.parsedMetadata;
// Validate that at least prompt or metadata is provided
if (!args.prompt && !parsedMetadata) {
return createErrorResponse(
'Either prompt or metadata must be provided for update-task'
);
}
// Call Direct Function - Include projectRoot and metadata
const result = await updateTaskByIdDirect(
{
tasksJsonPath: tasksJsonPath,
@@ -84,6 +110,7 @@ export function registerUpdateTaskTool(server) {
prompt: args.prompt,
research: args.research,
append: args.append,
metadata: parsedMetadata,
projectRoot: args.projectRoot,
tag: resolvedTag
},

890
package-lock.json generated
View File

@@ -82,6 +82,7 @@
"task-master-mcp": "dist/mcp-server.js"
},
"devDependencies": {
"@anthropic-ai/mcpb": "^2.1.2",
"@biomejs/biome": "^1.9.4",
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
@@ -1950,6 +1951,429 @@
"url": "https://opencollective.com/express"
}
},
"apps/extension/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"dev": true,
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"apps/extension/node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/mime": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"dev": true,
"license": "MIT",
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=4"
}
},
"apps/extension/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/negotiator": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/task-master-ai": {
"version": "0.42.0-rc.0",
"resolved": "https://registry.npmjs.org/task-master-ai/-/task-master-ai-0.42.0-rc.0.tgz",
"integrity": "sha512-CYHRFumTfCnFkmYPLjwp9V2+wTDmk/fSTEexbOIu4FBFgRxzeqMICVQYsaDukw1tYwBXh7MO1VTwD8Dp1Lnpvg==",
"dev": true,
"license": "MIT WITH Commons-Clause",
"workspaces": [
"apps/*",
"packages/*",
"."
],
"dependencies": {
"@ai-sdk/amazon-bedrock": "^3.0.23",
"@ai-sdk/anthropic": "^2.0.18",
"@ai-sdk/azure": "^2.0.89",
"@ai-sdk/google": "^2.0.16",
"@ai-sdk/google-vertex": "^3.0.86",
"@ai-sdk/groq": "^2.0.21",
"@ai-sdk/mistral": "^2.0.16",
"@ai-sdk/openai": "^2.0.34",
"@ai-sdk/openai-compatible": "^1.0.25",
"@ai-sdk/perplexity": "^2.0.10",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.10",
"@ai-sdk/xai": "^2.0.22",
"@aws-sdk/credential-providers": "^3.895.0",
"@inquirer/search": "^3.0.15",
"@openrouter/ai-sdk-provider": "^1.2.0",
"@sentry/node": "^10.27.0",
"@streamparser/json": "^0.0.22",
"@supabase/supabase-js": "^2.57.4",
"@types/turndown": "^5.0.6",
"ai": "^5.0.51",
"ai-sdk-provider-claude-code": "^2.2.4",
"ai-sdk-provider-codex-cli": "^0.7.0",
"ai-sdk-provider-gemini-cli": "^1.4.0",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.1",
"boxen": "^8.0.1",
"chalk": "5.6.2",
"cli-highlight": "^2.1.11",
"cli-progress": "^3.12.0",
"cli-table3": "^0.6.5",
"commander": "^12.1.0",
"cors": "^2.8.5",
"date-fns": "^4.1.0",
"dotenv": "^16.6.1",
"express": "^4.21.2",
"fastmcp": "^3.23.1",
"figlet": "^1.8.0",
"fs-extra": "^11.3.0",
"fuse.js": "^7.1.0",
"gpt-tokens": "^1.3.14",
"gradient-string": "^3.0.0",
"helmet": "^8.1.0",
"inquirer": "^12.5.0",
"jsonc-parser": "^3.3.1",
"jsonrepair": "^3.13.0",
"jsonwebtoken": "^9.0.2",
"lru-cache": "^10.2.0",
"marked": "^15.0.12",
"marked-terminal": "^7.3.0",
"ollama-ai-provider-v2": "^1.3.1",
"open": "^10.2.0",
"ora": "^8.2.0",
"proper-lockfile": "^4.1.2",
"simple-git": "^3.28.0",
"steno": "^4.0.2",
"terminal-link": "^5.0.0",
"turndown": "^7.2.2",
"undici": "^7.16.0",
"uuid": "^11.1.0",
"zod": "^4.1.12"
},
"bin": {
"task-master": "dist/task-master.js",
"task-master-ai": "dist/mcp-server.js",
"task-master-mcp": "dist/mcp-server.js"
},
"engines": {
"node": ">=20.0.0"
},
"optionalDependencies": {
"@anthropic-ai/claude-code": "^2.0.59",
"@biomejs/cli-linux-x64": "^1.9.4"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"dev": true,
"license": "MIT",
"dependencies": {
"mime-types": "~2.1.34",
"negotiator": "0.6.3"
},
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/ajv": {
"version": "8.17.1",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz",
"integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==",
"dev": true,
"license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.3",
"fast-uri": "^3.0.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/body-parser": {
"version": "1.20.4",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz",
"integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==",
"dev": true,
"license": "MIT",
"dependencies": {
"bytes": "~3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "~1.2.0",
"http-errors": "~2.0.1",
"iconv-lite": "~0.4.24",
"on-finished": "~2.4.1",
"qs": "~6.14.0",
"raw-body": "~2.5.3",
"type-is": "~1.6.18",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"safe-buffer": "5.2.1"
},
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/cookie-signature": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz",
"integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==",
"dev": true,
"license": "MIT"
},
"apps/extension/node_modules/task-master-ai/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "2.0.0"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/debug/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"dev": true,
"license": "MIT"
},
"apps/extension/node_modules/task-master-ai/node_modules/express": {
"version": "4.22.1",
"resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz",
"integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==",
"dev": true,
"license": "MIT",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "~1.20.3",
"content-disposition": "~0.5.4",
"content-type": "~1.0.4",
"cookie": "~0.7.1",
"cookie-signature": "~1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "~1.3.1",
"fresh": "~0.5.2",
"http-errors": "~2.0.0",
"merge-descriptors": "1.0.3",
"methods": "~1.1.2",
"on-finished": "~2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "~0.1.12",
"proxy-addr": "~2.0.7",
"qs": "~6.14.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "~0.19.0",
"serve-static": "~1.16.2",
"setprototypeof": "1.2.0",
"statuses": "~2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.10.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/finalhandler": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz",
"integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==",
"dev": true,
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"on-finished": "~2.4.1",
"parseurl": "~1.3.3",
"statuses": "~2.0.2",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
"dev": true,
"license": "MIT"
},
"apps/extension/node_modules/task-master-ai/node_modules/merge-descriptors": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
"integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
"dev": true,
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dev": true,
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/raw-body": {
"version": "2.5.3",
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz",
"integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==",
"dev": true,
"license": "MIT",
"dependencies": {
"bytes": "~3.1.2",
"http-errors": "~2.0.1",
"iconv-lite": "~0.4.24",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/send": {
"version": "0.19.2",
"resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz",
"integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==",
"dev": true,
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "~0.5.2",
"http-errors": "~2.0.1",
"mime": "1.6.0",
"ms": "2.1.3",
"on-finished": "~2.4.1",
"range-parser": "~1.2.1",
"statuses": "~2.0.2"
},
"engines": {
"node": ">= 0.8.0"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/serve-static": {
"version": "1.16.3",
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz",
"integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==",
"dev": true,
"license": "MIT",
"dependencies": {
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
"send": "~0.19.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"dev": true,
"license": "MIT",
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
},
"engines": {
"node": ">= 0.6"
}
},
"apps/extension/node_modules/task-master-ai/node_modules/zod": {
"version": "4.3.6",
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz",
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
"dev": true,
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"apps/extension/node_modules/undici-types": {
"version": "6.21.0",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
@@ -2431,6 +2855,308 @@
"@img/sharp-win32-x64": "^0.33.5"
}
},
"node_modules/@anthropic-ai/mcpb": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/@anthropic-ai/mcpb/-/mcpb-2.1.2.tgz",
"integrity": "sha512-goRbBC8ySo7SWb7tRzr+tL6FxDc4JPTRCdgfD2omba7freofvjq5rom1lBnYHZHo6Mizs1jAHJeN53aZbDoy8A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/prompts": "^6.0.1",
"commander": "^13.1.0",
"fflate": "^0.8.2",
"galactus": "^1.0.0",
"ignore": "^7.0.5",
"node-forge": "^1.3.2",
"pretty-bytes": "^5.6.0",
"zod": "^3.25.67",
"zod-to-json-schema": "^3.24.6"
},
"bin": {
"mcpb": "dist/cli/cli.js"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/checkbox": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/checkbox/-/checkbox-3.0.1.tgz",
"integrity": "sha512-0hm2nrToWUdD6/UHnel/UKGdk1//ke5zGUpHIvk5ZWmaKezlGxZkOJXNSWsdxO/rEqTkbB3lNC2J6nBElV2aAQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/figures": "^1.0.6",
"@inquirer/type": "^2.0.0",
"ansi-escapes": "^4.3.2",
"yoctocolors-cjs": "^2.1.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/confirm": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-4.0.1.tgz",
"integrity": "sha512-46yL28o2NJ9doViqOy0VDcoTzng7rAb6yPQKU7VDLqkmbCaH4JqK4yk4XqlzNWy9PVC5pG1ZUXPBQv+VqnYs2w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/type": "^2.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/core": {
"version": "9.2.1",
"resolved": "https://registry.npmjs.org/@inquirer/core/-/core-9.2.1.tgz",
"integrity": "sha512-F2VBt7W/mwqEU4bL0RnHNZmC/OxzNx9cOYxHqnXX3MP6ruYvZUZAW9imgN9+h/uBT/oP8Gh888J2OZSbjSeWcg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/figures": "^1.0.6",
"@inquirer/type": "^2.0.0",
"@types/mute-stream": "^0.0.4",
"@types/node": "^22.5.5",
"@types/wrap-ansi": "^3.0.0",
"ansi-escapes": "^4.3.2",
"cli-width": "^4.1.0",
"mute-stream": "^1.0.0",
"signal-exit": "^4.1.0",
"strip-ansi": "^6.0.1",
"wrap-ansi": "^6.2.0",
"yoctocolors-cjs": "^2.1.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/editor": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/editor/-/editor-3.0.1.tgz",
"integrity": "sha512-VA96GPFaSOVudjKFraokEEmUQg/Lub6OXvbIEZU1SDCmBzRkHGhxoFAVaF30nyiB4m5cEbDgiI2QRacXZ2hw9Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/type": "^2.0.0",
"external-editor": "^3.1.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/expand": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/expand/-/expand-3.0.1.tgz",
"integrity": "sha512-ToG8d6RIbnVpbdPdiN7BCxZGiHOTomOX94C2FaT5KOHupV40tKEDozp12res6cMIfRKrXLJyexAZhWVHgbALSQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/type": "^2.0.0",
"yoctocolors-cjs": "^2.1.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/input": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/input/-/input-3.0.1.tgz",
"integrity": "sha512-BDuPBmpvi8eMCxqC5iacloWqv+5tQSJlUafYWUe31ow1BVXjW2a5qe3dh4X/Z25Wp22RwvcaLCc2siHobEOfzg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/type": "^2.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/number": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/number/-/number-2.0.1.tgz",
"integrity": "sha512-QpR8jPhRjSmlr/mD2cw3IR8HRO7lSVOnqUvQa8scv1Lsr3xoAMMworcYW3J13z3ppjBFBD2ef1Ci6AE5Qn8goQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/type": "^2.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/password": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/password/-/password-3.0.1.tgz",
"integrity": "sha512-haoeEPUisD1NeE2IanLOiFr4wcTXGWrBOyAyPZi1FfLJuXOzNmxCJPgUrGYKVh+Y8hfGJenIfz5Wb/DkE9KkMQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/type": "^2.0.0",
"ansi-escapes": "^4.3.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/prompts": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/prompts/-/prompts-6.0.1.tgz",
"integrity": "sha512-yl43JD/86CIj3Mz5mvvLJqAOfIup7ncxfJ0Btnl0/v5TouVUyeEdcpknfgc+yMevS/48oH9WAkkw93m7otLb/A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/checkbox": "^3.0.1",
"@inquirer/confirm": "^4.0.1",
"@inquirer/editor": "^3.0.1",
"@inquirer/expand": "^3.0.1",
"@inquirer/input": "^3.0.1",
"@inquirer/number": "^2.0.1",
"@inquirer/password": "^3.0.1",
"@inquirer/rawlist": "^3.0.1",
"@inquirer/search": "^2.0.1",
"@inquirer/select": "^3.0.1"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/rawlist": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/rawlist/-/rawlist-3.0.1.tgz",
"integrity": "sha512-VgRtFIwZInUzTiPLSfDXK5jLrnpkuSOh1ctfaoygKAdPqjcjKYmGh6sCY1pb0aGnCGsmhUxoqLDUAU0ud+lGXQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/type": "^2.0.0",
"yoctocolors-cjs": "^2.1.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/search": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/search/-/search-2.0.1.tgz",
"integrity": "sha512-r5hBKZk3g5MkIzLVoSgE4evypGqtOannnB3PKTG9NRZxyFRKcfzrdxXXPcoJQsxJPzvdSU2Rn7pB7lw0GCmGAg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/figures": "^1.0.6",
"@inquirer/type": "^2.0.0",
"yoctocolors-cjs": "^2.1.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/select": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@inquirer/select/-/select-3.0.1.tgz",
"integrity": "sha512-lUDGUxPhdWMkN/fHy1Lk7pF3nK1fh/gqeyWXmctefhxLYxlDsc7vsPBEpxrfVGDsVdyYJsiJoD4bJ1b623cV1Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@inquirer/core": "^9.2.1",
"@inquirer/figures": "^1.0.6",
"@inquirer/type": "^2.0.0",
"ansi-escapes": "^4.3.2",
"yoctocolors-cjs": "^2.1.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@inquirer/type": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@inquirer/type/-/type-2.0.0.tgz",
"integrity": "sha512-XvJRx+2KR3YXyYtPUUy+qd9i7p+GO9Ko6VIIpWlBrpWwXDv8WLFeHTxz35CfQFUiBMLXlGHhGzys7lqit9gWag==",
"dev": true,
"license": "MIT",
"dependencies": {
"mute-stream": "^1.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/@types/node": {
"version": "22.19.7",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.7.tgz",
"integrity": "sha512-MciR4AKGHWl7xwxkBa6xUGxQJ4VBOmPTF7sL+iGzuahOFaO0jHCsuEfS80pan1ef4gWId1oWOweIhrDEYLuaOw==",
"dev": true,
"license": "MIT",
"dependencies": {
"undici-types": "~6.21.0"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/commander": {
"version": "13.1.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-13.1.0.tgz",
"integrity": "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/mute-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-1.0.0.tgz",
"integrity": "sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==",
"dev": true,
"license": "ISC",
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/@anthropic-ai/mcpb/node_modules/undici-types": {
"version": "6.21.0",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
"dev": true,
"license": "MIT"
},
"node_modules/@anthropic-ai/mcpb/node_modules/zod": {
"version": "3.25.76",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
"integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
"dev": true,
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/@apm-js-collab/code-transformer": {
"version": "0.8.2",
"resolved": "https://registry.npmjs.org/@apm-js-collab/code-transformer/-/code-transformer-0.8.2.tgz",
@@ -13654,6 +14380,16 @@
"dev": true,
"license": "MIT"
},
"node_modules/@types/mute-stream": {
"version": "0.0.4",
"resolved": "https://registry.npmjs.org/@types/mute-stream/-/mute-stream-0.0.4.tgz",
"integrity": "sha512-CPM9nzrCPPJHQNA9keH9CVkVI+WR5kMa+7XEs5jcGQ0VoAGnLv242w8lIVgwAEfmE4oufJRaTc9PNLQl0ioAow==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/mysql": {
"version": "2.15.27",
"resolved": "https://registry.npmjs.org/@types/mysql/-/mysql-2.15.27.tgz",
@@ -13866,6 +14602,13 @@
"dev": true,
"license": "MIT"
},
"node_modules/@types/wrap-ansi": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@types/wrap-ansi/-/wrap-ansi-3.0.0.tgz",
"integrity": "sha512-ltIpx+kM7g/MLRZfkbL7EsCEjfzCcScLpkg37eXEtx5kmrAKBkTJwd1GIAjDSL8wTpM6Hzn5YO4pSb91BEwu1g==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/ws": {
"version": "8.18.1",
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz",
@@ -19640,6 +20383,54 @@
"resolved": "apps/extension",
"link": true
},
"node_modules/external-editor": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
"integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
"dev": true,
"license": "MIT",
"dependencies": {
"chardet": "^0.7.0",
"iconv-lite": "^0.4.24",
"tmp": "^0.0.33"
},
"engines": {
"node": ">=4"
}
},
"node_modules/external-editor/node_modules/chardet": {
"version": "0.7.0",
"resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
"integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
"dev": true,
"license": "MIT"
},
"node_modules/external-editor/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"dev": true,
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/external-editor/node_modules/tmp": {
"version": "0.0.33",
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
"integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
"dev": true,
"license": "MIT",
"dependencies": {
"os-tmpdir": "~1.0.2"
},
"engines": {
"node": ">=0.6.0"
}
},
"node_modules/extract-zip": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz",
@@ -20072,6 +20863,13 @@
}
}
},
"node_modules/fflate": {
"version": "0.8.2",
"resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz",
"integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==",
"dev": true,
"license": "MIT"
},
"node_modules/figlet": {
"version": "1.9.4",
"resolved": "https://registry.npmjs.org/figlet/-/figlet-1.9.4.tgz",
@@ -20228,6 +21026,35 @@
"flat": "cli.js"
}
},
"node_modules/flora-colossus": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/flora-colossus/-/flora-colossus-2.0.0.tgz",
"integrity": "sha512-dz4HxH6pOvbUzZpZ/yXhafjbR2I8cenK5xL0KtBFb7U2ADsR+OwXifnxZjij/pZWF775uSCMzWVd+jDik2H2IA==",
"dev": true,
"license": "MIT",
"dependencies": {
"debug": "^4.3.4",
"fs-extra": "^10.1.0"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/flora-colossus/node_modules/fs-extra": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
"integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/follow-redirects": {
"version": "1.15.11",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
@@ -20560,6 +21387,36 @@
"integrity": "sha512-Tt4kuxLXFKHy8KT40zwsUPUkg1CrsgY25FxA2U/j/0WgEDCk3ddc/zLTCCcbSHX9FcKtLuVaDGtGE/STWC+j3Q==",
"license": "BSD-3-Clause"
},
"node_modules/galactus": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/galactus/-/galactus-1.0.0.tgz",
"integrity": "sha512-R1fam6D4CyKQGNlvJne4dkNF+PvUUl7TAJInvTGa9fti9qAv95quQz29GXapA4d8Ec266mJJxFVh82M4GIIGDQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"debug": "^4.3.4",
"flora-colossus": "^2.0.0",
"fs-extra": "^10.1.0"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/galactus/node_modules/fs-extra": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
"integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/gaxios": {
"version": "6.7.1",
"resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz",
@@ -28110,6 +28967,16 @@
}
}
},
"node_modules/node-forge": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.3.tgz",
"integrity": "sha512-rLvcdSyRCyouf6jcOIPe/BgwG/d7hKjzMKOas33/pHEr6gbq18IK9zV7DiPvzsz0oBJPme6qr6H6kGZuI9/DZg==",
"dev": true,
"license": "(BSD-3-Clause OR GPL-2.0)",
"engines": {
"node": ">= 6.13.0"
}
},
"node_modules/node-gyp-build": {
"version": "4.8.4",
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.8.4.tgz",
@@ -28668,6 +29535,16 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/os-tmpdir": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
"integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/outdent": {
"version": "0.5.0",
"resolved": "https://registry.npmjs.org/outdent/-/outdent-0.5.0.tgz",
@@ -29578,6 +30455,19 @@
"url": "https://github.com/prettier/prettier?sponsor=1"
}
},
"node_modules/pretty-bytes": {
"version": "5.6.0",
"resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz",
"integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/pretty-format": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",

View File

@@ -36,7 +36,7 @@
"postpack": "chmod +x dist/task-master.js dist/mcp-server.js",
"changeset": "changeset",
"changeset:validate": "node .github/scripts/validate-changesets.mjs",
"version": "changeset version",
"version": "changeset version && node ./.github/scripts/sync-manifest-version.mjs && npm i --package-lock-only",
"release": "node ./.github/scripts/release.mjs",
"publish-packages": "turbo run build lint test && changeset version && changeset publish",
"inspector": "npx @modelcontextprotocol/inspector node dist/mcp-server.js",
@@ -114,14 +114,14 @@
"ollama-ai-provider-v2": "^1.3.1",
"open": "^10.2.0",
"ora": "^8.2.0",
"proper-lockfile": "^4.1.2",
"simple-git": "^3.28.0",
"steno": "^4.0.2",
"terminal-link": "^5.0.0",
"turndown": "^7.2.2",
"undici": "^7.16.0",
"uuid": "^11.1.0",
"zod": "^4.1.12",
"proper-lockfile": "^4.1.2"
"zod": "^4.1.12"
},
"optionalDependencies": {
"@anthropic-ai/claude-code": "^2.0.59",
@@ -151,6 +151,7 @@
"whatwg-url": "^11.0.0"
},
"devDependencies": {
"@anthropic-ai/mcpb": "^2.1.2",
"@biomejs/biome": "^1.9.4",
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",

View File

@@ -12,6 +12,10 @@ export interface UpdateBridgeParams extends BaseBridgeParams {
prompt: string;
/** Whether to append or full update (default: false) */
appendMode?: boolean;
/** Whether to use research mode (default: false) */
useResearch?: boolean;
/** Metadata to merge into task (for metadata-only updates or alongside prompt) */
metadata?: Record<string, unknown>;
}
/**
@@ -45,6 +49,8 @@ export async function tryUpdateViaRemote(
projectRoot,
tag,
appendMode = false,
useResearch = false,
metadata,
isMCP = false,
outputFormat = 'text',
report
@@ -76,7 +82,9 @@ export async function tryUpdateViaRemote(
try {
// Call the API storage method which handles the remote update
await tmCore.tasks.updateWithPrompt(String(taskId), prompt, tag, {
mode
mode,
useResearch,
...(metadata && { metadata })
});
if (spinner) {

View File

@@ -156,6 +156,13 @@ export interface Task extends TaskImplementationMetadata {
recommendedSubtasks?: number;
expansionPrompt?: string;
complexityReasoning?: string;
/**
* User-defined metadata that survives all task operations.
* Use for external IDs, custom workflow data, integrations, etc.
* This field is preserved through AI operations, updates, and serialization.
*/
metadata?: Record<string, unknown>;
}
/**

View File

@@ -49,6 +49,9 @@ export type {
// Storage adapters - FileStorage for direct local file access
export { FileStorage } from './modules/storage/index.js';
// File operations - for atomic file modifications
export { FileOperations } from './modules/storage/adapters/file-storage/file-operations.js';
// Constants
export * from './common/constants/index.js';
@@ -152,7 +155,8 @@ export type {
LoopPreset,
LoopConfig,
LoopIteration,
LoopResult
LoopResult,
LoopOutputCallbacks
} from './modules/loop/index.js';
export { LoopDomain, PRESET_NAMES } from './modules/loop/index.js';

View File

@@ -15,7 +15,8 @@ export type {
LoopPreset,
LoopConfig,
LoopIteration,
LoopResult
LoopResult,
LoopOutputCallbacks
} from './types.js';
// Presets - content and helpers

View File

@@ -190,7 +190,11 @@ export class LoopDomain {
path.join(this.projectRoot, '.taskmaster', 'progress.txt'),
sleepSeconds: partial.sleepSeconds ?? 5,
tag: partial.tag,
sandbox: partial.sandbox ?? false
sandbox: partial.sandbox ?? false,
includeOutput: partial.includeOutput ?? false,
verbose: partial.verbose ?? false,
brief: partial.brief,
callbacks: partial.callbacks
};
}
}

View File

@@ -3,7 +3,7 @@
exports[`Preset Snapshots > default preset matches snapshot 1`] = `
"SETUP: If task-master command not found, run: npm i -g task-master-ai
TASK: Implement ONE task/subtask from the Task Master backlog.
TASK: Implement ONE task/subtask from the Taskmaster backlog.
PROCESS:
1. Run task-master next (or use MCP) to get the next available task/subtask.
@@ -26,7 +26,7 @@ IMPORTANT:
`;
exports[`Preset Snapshots > duplication preset matches snapshot 1`] = `
"# Task Master Loop - Duplication
"# Taskmaster Loop - Duplication
Find duplicated code and refactor into shared utilities. ONE refactor per session.
@@ -60,7 +60,7 @@ Find duplicated code and refactor into shared utilities. ONE refactor per sessio
`;
exports[`Preset Snapshots > entropy preset matches snapshot 1`] = `
"# Task Master Loop - Entropy (Code Smells)
"# Taskmaster Loop - Entropy (Code Smells)
Find code smells and clean them up. ONE cleanup per session.
@@ -102,7 +102,7 @@ Find code smells and clean them up. ONE cleanup per session.
`;
exports[`Preset Snapshots > linting preset matches snapshot 1`] = `
"# Task Master Loop - Linting
"# Taskmaster Loop - Linting
Fix lint errors and type errors one by one. ONE fix per session.
@@ -136,7 +136,7 @@ Fix lint errors and type errors one by one. ONE fix per session.
`;
exports[`Preset Snapshots > test-coverage preset matches snapshot 1`] = `
"# Task Master Loop - Test Coverage
"# Taskmaster Loop - Test Coverage
Find uncovered code and write meaningful tests. ONE test per session.

View File

@@ -1,10 +1,10 @@
/**
* Default preset for Task Master loop - general task completion
* Default preset for Taskmaster loop - general task completion
* Matches the structure of scripts/loop.sh prompt
*/
export const DEFAULT_PRESET = `SETUP: If task-master command not found, run: npm i -g task-master-ai
TASK: Implement ONE task/subtask from the Task Master backlog.
TASK: Implement ONE task/subtask from the Taskmaster backlog.
PROCESS:
1. Run task-master next (or use MCP) to get the next available task/subtask.

View File

@@ -1,7 +1,7 @@
/**
* Duplication preset for Task Master loop - code deduplication
* Duplication preset for Taskmaster loop - code deduplication
*/
export const DUPLICATION_PRESET = `# Task Master Loop - Duplication
export const DUPLICATION_PRESET = `# Taskmaster Loop - Duplication
Find duplicated code and refactor into shared utilities. ONE refactor per session.

View File

@@ -2,7 +2,7 @@
* @fileoverview Entropy (Code Smells) preset for loop module
*/
export const ENTROPY_PRESET = `# Task Master Loop - Entropy (Code Smells)
export const ENTROPY_PRESET = `# Taskmaster Loop - Entropy (Code Smells)
Find code smells and clean them up. ONE cleanup per session.

View File

@@ -1,7 +1,7 @@
/**
* Linting preset for Task Master loop - fix lint and type errors
* Linting preset for Taskmaster loop - fix lint and type errors
*/
export const LINTING_PRESET = `# Task Master Loop - Linting
export const LINTING_PRESET = `# Taskmaster Loop - Linting
Fix lint errors and type errors one by one. ONE fix per session.

View File

@@ -1,7 +1,7 @@
/**
* Test coverage preset for Task Master loop - writing meaningful tests
* Test coverage preset for Taskmaster loop - writing meaningful tests
*/
export const TEST_COVERAGE_PRESET = `# Task Master Loop - Test Coverage
export const TEST_COVERAGE_PRESET = `# Taskmaster Loop - Test Coverage
Find uncovered code and write meaningful tests. ONE test per session.

View File

@@ -394,7 +394,7 @@ describe('LoopService', () => {
// Uses appendFile instead of writeFile to preserve existing progress
expect(fsPromises.appendFile).toHaveBeenCalledWith(
'/test/progress.txt',
expect.stringContaining('# Task Master Loop Progress'),
expect.stringContaining('# Taskmaster Loop Progress'),
'utf-8'
);
});
@@ -619,12 +619,13 @@ describe('LoopService', () => {
expect(header).toContain('@/test/progress.txt');
});
it('should include tasks file reference', () => {
it('should NOT include tasks file reference (preset controls task source)', () => {
const header = buildContextHeader(
{ iterations: 1, progressFile: '/test/progress.txt' },
1
);
expect(header).toContain('@.taskmaster/tasks/tasks.json');
// tasks.json intentionally excluded - let preset control task source to avoid confusion
expect(header).not.toContain('tasks.json');
});
it('should include tag filter when provided', () => {

View File

@@ -2,13 +2,15 @@
* @fileoverview Loop Service - Orchestrates running Claude Code iterations (sandbox or CLI mode)
*/
import { spawnSync } from 'node:child_process';
import { spawn, spawnSync } from 'node:child_process';
import { appendFile, mkdir, readFile } from 'node:fs/promises';
import path from 'node:path';
import { getLogger } from '../../../common/logger/index.js';
import { PRESETS, isPreset as checkIsPreset } from '../presets/index.js';
import type {
LoopConfig,
LoopIteration,
LoopOutputCallbacks,
LoopPreset,
LoopResult
} from '../types.js';
@@ -19,6 +21,7 @@ export interface LoopServiceOptions {
export class LoopService {
private readonly projectRoot: string;
private readonly logger = getLogger('LoopService');
private _isRunning = false;
constructor(options: LoopServiceOptions) {
@@ -109,6 +112,20 @@ export class LoopService {
/** Run a loop with the given configuration */
async run(config: LoopConfig): Promise<LoopResult> {
// Validate incompatible options early - fail once, not per iteration
if (config.verbose && config.sandbox) {
const errorMsg =
'Verbose mode is not supported with sandbox mode. Use --verbose without --sandbox, or remove --verbose.';
this.reportError(config.callbacks, errorMsg);
return {
iterations: [],
totalIterations: 0,
tasksCompleted: 0,
finalStatus: 'error',
errorMessage: errorMsg
};
}
this._isRunning = true;
const iterations: LoopIteration[] = [];
let tasksCompleted = 0;
@@ -116,18 +133,23 @@ export class LoopService {
await this.initProgressFile(config);
for (let i = 1; i <= config.iterations && this._isRunning; i++) {
// Show iteration header
console.log();
console.log(`━━━ Iteration ${i} of ${config.iterations} ━━━`);
// Notify presentation layer of iteration start
config.callbacks?.onIterationStart?.(i, config.iterations);
const prompt = await this.buildPrompt(config, i);
const iteration = this.executeIteration(
const iteration = await this.executeIteration(
prompt,
i,
config.sandbox ?? false
config.sandbox ?? false,
config.includeOutput ?? false,
config.verbose ?? false,
config.callbacks
);
iterations.push(iteration);
// Notify presentation layer of iteration completion
config.callbacks?.onIterationEnd?.(iteration);
// Check for early exit conditions
if (iteration.status === 'complete') {
return this.finalize(
@@ -177,21 +199,41 @@ export class LoopService {
return result;
}
/**
* Report an error via callback if provided, otherwise log to the logger.
* Ensures errors are never silently swallowed when callbacks aren't configured.
*/
private reportError(
callbacks: LoopOutputCallbacks | undefined,
message: string,
severity: 'warning' | 'error' = 'error'
): void {
if (callbacks?.onError) {
callbacks.onError(message, severity);
} else if (severity === 'warning') {
this.logger.warn(message);
} else {
this.logger.error(message);
}
}
private async initProgressFile(config: LoopConfig): Promise<void> {
await mkdir(path.dirname(config.progressFile), { recursive: true });
const tagLine = config.tag ? `# Tag: ${config.tag}\n` : '';
const lines = [
'# Taskmaster Loop Progress',
`# Started: ${new Date().toISOString()}`,
...(config.brief ? [`# Brief: ${config.brief}`] : []),
`# Preset: ${config.prompt}`,
`# Max Iterations: ${config.iterations}`,
...(config.tag ? [`# Tag: ${config.tag}`] : []),
'',
'---',
''
];
// Append to existing progress file instead of overwriting
await appendFile(
config.progressFile,
`
# Task Master Loop Progress
# Started: ${new Date().toISOString()}
# Preset: ${config.prompt}
# Max Iterations: ${config.iterations}
${tagLine}
---
`,
'\n' + lines.join('\n') + '\n',
'utf-8'
);
}
@@ -230,7 +272,8 @@ ${tagLine}
private buildContextHeader(config: LoopConfig, iteration: number): string {
const tagInfo = config.tag ? ` (tag: ${config.tag})` : '';
return `@${config.progressFile} @.taskmaster/tasks/tasks.json @CLAUDE.md
// Note: tasks.json reference removed - let the preset control task source to avoid confusion
return `@${config.progressFile} @CLAUDE.md
Loop iteration ${iteration} of ${config.iterations}${tagInfo}`;
}
@@ -262,63 +305,56 @@ Loop iteration ${iteration} of ${config.iterations}${tagInfo}`;
return { status: 'success' };
}
private executeIteration(
private async executeIteration(
prompt: string,
iterationNum: number,
sandbox: boolean
): LoopIteration {
sandbox: boolean,
includeOutput = false,
verbose = false,
callbacks?: LoopOutputCallbacks
): Promise<LoopIteration> {
const startTime = Date.now();
// Use docker sandbox or plain claude based on config
const command = sandbox ? 'docker' : 'claude';
const args = sandbox
? ['sandbox', 'run', 'claude', '-p', prompt]
: ['-p', prompt, '--dangerously-skip-permissions'];
if (verbose) {
return this.executeVerboseIteration(
prompt,
iterationNum,
command,
sandbox,
includeOutput,
startTime,
callbacks
);
}
const args = this.buildCommandArgs(prompt, sandbox, false);
const result = spawnSync(command, args, {
cwd: this.projectRoot,
encoding: 'utf-8',
maxBuffer: 50 * 1024 * 1024, // 50MB buffer
maxBuffer: 50 * 1024 * 1024,
stdio: ['inherit', 'pipe', 'pipe']
});
// Check for spawn-level errors (command not found, permission denied, etc.)
if (result.error) {
const code = (result.error as NodeJS.ErrnoException).code;
let errorMessage: string;
if (code === 'ENOENT') {
errorMessage = sandbox
? 'Docker is not installed. Install Docker Desktop to use --sandbox mode.'
: 'Claude CLI is not installed. Install with: npm install -g @anthropic-ai/claude-code';
} else if (code === 'EACCES') {
errorMessage = `Permission denied executing '${command}'`;
} else {
errorMessage = `Failed to execute '${command}': ${result.error.message}`;
}
console.error(`[Loop Error] ${errorMessage}`);
return {
iteration: iterationNum,
status: 'error',
duration: Date.now() - startTime,
message: errorMessage
};
const errorMessage = this.formatCommandError(
result.error,
command,
sandbox
);
this.reportError(callbacks, errorMessage);
return this.createErrorIteration(iterationNum, startTime, errorMessage);
}
const output = (result.stdout || '') + (result.stderr || '');
if (output) {
callbacks?.onOutput?.(output);
}
// Print output to console (spawnSync with pipe captures but doesn't display)
if (output) console.log(output);
// Handle null status (spawn failed but no error object - shouldn't happen but be safe)
if (result.status === null) {
return {
iteration: iterationNum,
status: 'error',
duration: Date.now() - startTime,
message: 'Command terminated abnormally (no exit code)'
};
const errorMsg = 'Command terminated abnormally (no exit code)';
this.reportError(callbacks, errorMsg);
return this.createErrorIteration(iterationNum, startTime, errorMsg);
}
const { status, message } = this.parseCompletion(output, result.status);
@@ -326,7 +362,282 @@ Loop iteration ${iteration} of ${config.iterations}${tagInfo}`;
iteration: iterationNum,
status,
duration: Date.now() - startTime,
message,
...(includeOutput && { output })
};
}
/**
* Execute an iteration with verbose output (shows Claude's work in real-time).
* Uses Claude's stream-json format to display assistant messages as they arrive.
* @param prompt - The prompt to send to Claude
* @param iterationNum - Current iteration number (1-indexed)
* @param command - The command to execute ('claude' or 'docker')
* @param sandbox - Whether running in Docker sandbox mode
* @param includeOutput - Whether to include full output in the result
* @param startTime - Timestamp when iteration started (for duration calculation)
* @param callbacks - Optional callbacks for presentation layer output
* @returns Promise resolving to the iteration result
*/
private executeVerboseIteration(
prompt: string,
iterationNum: number,
command: string,
sandbox: boolean,
includeOutput: boolean,
startTime: number,
callbacks?: LoopOutputCallbacks
): Promise<LoopIteration> {
const args = this.buildCommandArgs(prompt, sandbox, true);
return new Promise((resolve) => {
// Prevent multiple resolutions from race conditions between error/close events
let isResolved = false;
const resolveOnce = (result: LoopIteration): void => {
if (!isResolved) {
isResolved = true;
resolve(result);
}
};
const child = spawn(command, args, {
cwd: this.projectRoot,
stdio: ['inherit', 'pipe', 'pipe']
});
// Track stdout completion to handle race between data and close events
let stdoutEnded = false;
let finalResult = '';
let buffer = '';
const processLine = (line: string): void => {
if (!line.startsWith('{')) return;
try {
const event = JSON.parse(line);
// Validate event structure before accessing properties
if (!this.isValidStreamEvent(event)) {
return;
}
this.handleStreamEvent(event, callbacks);
// Capture final result for includeOutput feature
if (event.type === 'result') {
finalResult = typeof event.result === 'string' ? event.result : '';
}
} catch (error) {
// Log malformed JSON for debugging (non-JSON lines like system output are expected)
if (line.trim().startsWith('{')) {
const parseError = `Failed to parse JSON event: ${error instanceof Error ? error.message : 'Unknown error'}. Line: ${line.substring(0, 100)}...`;
this.reportError(callbacks, parseError, 'warning');
}
}
};
// Handle null stdout (shouldn't happen with pipe, but be defensive)
if (!child.stdout) {
resolveOnce(
this.createErrorIteration(
iterationNum,
startTime,
'Failed to capture stdout from child process'
)
);
return;
}
child.stdout.on('data', (data: Buffer) => {
try {
const lines = this.processBufferedLines(
buffer,
data.toString('utf-8')
);
buffer = lines.remaining;
for (const line of lines.complete) {
processLine(line);
}
} catch (error) {
this.reportError(
callbacks,
`Failed to process stdout data: ${error instanceof Error ? error.message : 'Unknown error'}`,
'warning'
);
}
});
child.stdout.on('end', () => {
stdoutEnded = true;
// Process any remaining buffer when stdout ends
if (buffer) {
processLine(buffer);
buffer = '';
}
});
child.stderr?.on('data', (data: Buffer) => {
const stderrText = data.toString('utf-8');
callbacks?.onStderr?.(iterationNum, stderrText);
});
child.on('error', (error: NodeJS.ErrnoException) => {
const errorMessage = this.formatCommandError(error, command, sandbox);
this.reportError(callbacks, errorMessage);
// Cleanup: remove listeners and kill process if still running
child.stdout?.removeAllListeners();
child.stderr?.removeAllListeners();
if (!child.killed) {
try {
child.kill('SIGTERM');
} catch {
// Process may have already exited
}
}
resolveOnce(
this.createErrorIteration(iterationNum, startTime, errorMessage)
);
});
child.on('close', (exitCode: number | null) => {
// Process remaining buffer only if stdout hasn't already ended
if (!stdoutEnded && buffer) {
processLine(buffer);
}
if (exitCode === null) {
const errorMsg = 'Command terminated abnormally (no exit code)';
this.reportError(callbacks, errorMsg);
resolveOnce(
this.createErrorIteration(iterationNum, startTime, errorMsg)
);
return;
}
const { status, message } = this.parseCompletion(finalResult, exitCode);
resolveOnce({
iteration: iterationNum,
status,
duration: Date.now() - startTime,
message,
...(includeOutput && { output: finalResult })
});
});
});
}
/**
* Validate that a parsed JSON object has the expected stream event structure.
*/
private isValidStreamEvent(event: unknown): event is {
type: string;
message?: {
content?: Array<{ type: string; text?: string; name?: string }>;
};
result?: string;
} {
if (!event || typeof event !== 'object') {
return false;
}
const e = event as Record<string, unknown>;
if (!('type' in e) || typeof e.type !== 'string') {
return false;
}
// Validate message structure if present
if ('message' in e && e.message !== undefined) {
if (typeof e.message !== 'object' || e.message === null) {
return false;
}
const msg = e.message as Record<string, unknown>;
if ('content' in msg && !Array.isArray(msg.content)) {
return false;
}
}
return true;
}
private buildCommandArgs(
prompt: string,
sandbox: boolean,
verbose: boolean
): string[] {
if (sandbox) {
return ['sandbox', 'run', 'claude', '-p', prompt];
}
const args = ['-p', prompt, '--dangerously-skip-permissions'];
if (verbose) {
// Use stream-json format to show Claude's work in real-time
args.push('--output-format', 'stream-json', '--verbose');
}
return args;
}
private formatCommandError(
error: NodeJS.ErrnoException,
command: string,
sandbox: boolean
): string {
if (error.code === 'ENOENT') {
return sandbox
? 'Docker is not installed. Install Docker Desktop to use --sandbox mode.'
: 'Claude CLI is not installed. Install with: npm install -g @anthropic-ai/claude-code';
}
if (error.code === 'EACCES') {
return `Permission denied executing '${command}'`;
}
return `Failed to execute '${command}': ${error.message}`;
}
private createErrorIteration(
iterationNum: number,
startTime: number,
message: string
): LoopIteration {
return {
iteration: iterationNum,
status: 'error',
duration: Date.now() - startTime,
message
};
}
private handleStreamEvent(
event: {
type: string;
message?: {
content?: Array<{ type: string; text?: string; name?: string }>;
};
},
callbacks?: LoopOutputCallbacks
): void {
if (event.type !== 'assistant' || !event.message?.content) return;
for (const block of event.message.content) {
if (block.type === 'text' && block.text) {
callbacks?.onText?.(block.text);
} else if (block.type === 'tool_use' && block.name) {
callbacks?.onToolUse?.(block.name);
}
}
}
private processBufferedLines(
buffer: string,
newData: string
): { complete: string[]; remaining: string } {
const combined = buffer + newData;
const lines = combined.split('\n');
return {
complete: lines.slice(0, -1),
remaining: lines[lines.length - 1]
};
}
}

View File

@@ -12,6 +12,33 @@ export type LoopPreset =
| 'duplication'
| 'entropy';
/**
* Output callbacks for loop execution.
* These allow the caller (CLI/MCP) to handle presentation while
* the service stays focused on business logic.
*
* Callback modes:
* - `onIterationStart`, `onIterationEnd`, `onError`, `onStderr`: Called in both verbose and non-verbose modes
* - `onText`, `onToolUse`: Called only in VERBOSE mode (--verbose flag)
* - `onOutput`: Called only in NON-VERBOSE mode (default)
*/
export interface LoopOutputCallbacks {
/** Called at the start of each iteration (both modes) */
onIterationStart?: (iteration: number, total: number) => void;
/** Called when Claude outputs text (VERBOSE MODE ONLY) */
onText?: (text: string) => void;
/** Called when Claude invokes a tool (VERBOSE MODE ONLY) */
onToolUse?: (toolName: string) => void;
/** Called when an error occurs (both modes) */
onError?: (message: string, severity?: 'warning' | 'error') => void;
/** Called for stderr output (both modes) */
onStderr?: (iteration: number, text: string) => void;
/** Called when non-verbose iteration completes with output (NON-VERBOSE MODE ONLY) */
onOutput?: (output: string) => void;
/** Called at the end of each iteration with the result (both modes) */
onIterationEnd?: (iteration: LoopIteration) => void;
}
/**
* Configuration options for a loop execution
*/
@@ -28,6 +55,39 @@ export interface LoopConfig {
tag?: string;
/** Run Claude in Docker sandbox mode (default: false) */
sandbox?: boolean;
/**
* Include full Claude output in iteration results (default: false)
*
* When true: `LoopIteration.output` will contain full stdout+stderr text
* When false: `LoopIteration.output` will be undefined (saves memory)
*
* Can be combined with `verbose=true` to both display and capture output.
* Note: Output can be large (up to 50MB per iteration).
*/
includeOutput?: boolean;
/**
* Show Claude's work in real-time instead of just the result (default: false)
*
* When true: Output appears as Claude generates it (shows thinking, tool calls)
* When false: Output appears only after iteration completes
*
* Independent of `includeOutput` - controls display timing, not capture.
* Note: NOT compatible with `sandbox=true` (will return error).
*/
verbose?: boolean;
/**
* Brief title describing the current initiative/goal (optional)
*
* If provided, included in the progress file header to give Claude
* context about the bigger picture across iterations.
* Example: "Implement streaming output for loop command"
*/
brief?: string;
/**
* Output callbacks for presentation layer (CLI/MCP).
* If not provided, the service runs silently (no console output).
*/
callbacks?: LoopOutputCallbacks;
}
/**
@@ -44,6 +104,15 @@ export interface LoopIteration {
message?: string;
/** Duration of this iteration in milliseconds */
duration?: number;
/**
* Full Claude output text
*
* ONLY present when `LoopConfig.includeOutput=true`.
* Contains concatenated stdout and stderr from Claude CLI execution.
* May include ANSI color codes and tool call output.
* Can be large - use `includeOutput=false` to save memory.
*/
output?: string;
}
/**
@@ -58,4 +127,6 @@ export interface LoopResult {
tasksCompleted: number;
/** Final status of the loop */
finalStatus: 'all_complete' | 'max_iterations' | 'blocked' | 'error';
/** Error message when finalStatus is 'error' (optional) */
errorMessage?: string;
}

View File

@@ -283,6 +283,7 @@ export class FileStorage implements IStorage {
/**
* Normalize task IDs - keep Task IDs as strings, Subtask IDs as numbers
* Note: Uses spread operator to preserve all task properties including user-defined metadata
*/
private normalizeTaskIds(tasks: Task[]): Task[] {
return tasks.map((task) => ({
@@ -372,9 +373,37 @@ export class FileStorage implements IStorage {
throw new Error(`Task ${taskId} not found`);
}
const existingTask = tasks[taskIndex];
// Preserve subtask metadata when subtasks are updated
// AI operations don't include metadata in returned subtasks
let mergedSubtasks = updates.subtasks;
if (updates.subtasks && existingTask.subtasks) {
mergedSubtasks = updates.subtasks.map((updatedSubtask) => {
// Type-coerce IDs for comparison; fall back to title match if IDs don't match
const originalSubtask = existingTask.subtasks?.find(
(st) =>
String(st.id) === String(updatedSubtask.id) ||
(updatedSubtask.title && st.title === updatedSubtask.title)
);
// Merge metadata: preserve original and add/override with new
if (originalSubtask?.metadata || updatedSubtask.metadata) {
return {
...updatedSubtask,
metadata: {
...(originalSubtask?.metadata || {}),
...(updatedSubtask.metadata || {})
}
};
}
return updatedSubtask;
});
}
tasks[taskIndex] = {
...tasks[taskIndex],
...existingTask,
...updates,
...(mergedSubtasks && { subtasks: mergedSubtasks }),
id: String(taskId) // Keep consistent with normalizeTaskIds
};
await this.saveTasks(tasks, tag);

View File

@@ -0,0 +1,345 @@
/**
* @fileoverview Unit tests for TaskEntity metadata handling
*
* Tests the preservation of user-defined metadata through all TaskEntity operations
* including construction, serialization, and deserialization.
*/
import { describe, expect, it } from 'vitest';
import { TaskEntity } from './task.entity.js';
import type { Task } from '../../../common/types/index.js';
/**
* Creates a minimal valid task for testing
*/
function createMinimalTask(overrides: Partial<Task> = {}): Task {
return {
id: '1',
title: 'Test Task',
description: 'Test description',
status: 'pending',
priority: 'medium',
dependencies: [],
details: 'Task details',
testStrategy: 'Test strategy',
subtasks: [],
...overrides
};
}
describe('TaskEntity', () => {
describe('metadata property', () => {
it('should preserve metadata through constructor', () => {
const metadata = { uuid: '123', custom: 'value' };
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
it('should handle undefined metadata', () => {
const task = createMinimalTask();
// Explicitly not setting metadata
const entity = new TaskEntity(task);
expect(entity.metadata).toBeUndefined();
});
it('should handle empty metadata object', () => {
const task = createMinimalTask({ metadata: {} });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual({});
});
it('should preserve metadata with string values', () => {
const metadata = { externalId: 'EXT-123', source: 'jira' };
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
it('should preserve metadata with number values', () => {
const metadata = { priority: 5, score: 100 };
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
it('should preserve metadata with boolean values', () => {
const metadata = { isBlocking: true, reviewed: false };
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
it('should preserve metadata with nested objects', () => {
const metadata = {
jira: {
key: 'PROJ-123',
sprint: {
id: 5,
name: 'Sprint 5'
}
}
};
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
it('should preserve metadata with arrays', () => {
const metadata = {
labels: ['bug', 'high-priority'],
relatedIds: [1, 2, 3]
};
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
it('should preserve metadata with null values', () => {
const metadata = { deletedAt: null, archivedBy: null };
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
it('should preserve complex mixed metadata', () => {
const metadata = {
externalId: 'EXT-456',
score: 85,
isUrgent: true,
tags: ['frontend', 'refactor'],
integration: {
source: 'github',
issueNumber: 123,
labels: ['enhancement']
},
timestamps: {
importedAt: '2024-01-15T10:00:00Z',
lastSynced: null
}
};
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
expect(entity.metadata).toEqual(metadata);
});
});
describe('toJSON() with metadata', () => {
it('should include metadata in toJSON output', () => {
const metadata = { uuid: '123', custom: 'value' };
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
const json = entity.toJSON();
expect(json.metadata).toEqual(metadata);
});
it('should include undefined metadata in toJSON output', () => {
const task = createMinimalTask();
const entity = new TaskEntity(task);
const json = entity.toJSON();
expect(json.metadata).toBeUndefined();
});
it('should include empty metadata object in toJSON output', () => {
const task = createMinimalTask({ metadata: {} });
const entity = new TaskEntity(task);
const json = entity.toJSON();
expect(json.metadata).toEqual({});
});
it('should preserve nested metadata through toJSON', () => {
const metadata = {
integration: {
source: 'linear',
config: {
apiKey: 'redacted',
projectId: 'proj_123'
}
}
};
const task = createMinimalTask({ metadata });
const entity = new TaskEntity(task);
const json = entity.toJSON();
expect(json.metadata).toEqual(metadata);
});
});
describe('round-trip preservation', () => {
it('should preserve metadata through full round-trip', () => {
const originalMetadata = {
uuid: '550e8400-e29b-41d4-a716-446655440000',
externalSystem: 'jira',
customField: { nested: 'value' }
};
const originalTask = createMinimalTask({ metadata: originalMetadata });
// Task -> TaskEntity -> toJSON() -> TaskEntity -> toJSON()
const entity1 = new TaskEntity(originalTask);
const json1 = entity1.toJSON();
const entity2 = new TaskEntity(json1);
const json2 = entity2.toJSON();
expect(json2.metadata).toEqual(originalMetadata);
});
it('should preserve all task fields alongside metadata', () => {
const metadata = { custom: 'data' };
const task = createMinimalTask({
id: '42',
title: 'Important Task',
description: 'Do the thing',
status: 'in-progress',
priority: 'high',
dependencies: ['1', '2'],
details: 'Detailed info',
testStrategy: 'Unit tests',
tags: ['urgent'],
metadata
});
const entity = new TaskEntity(task);
const json = entity.toJSON();
expect(json.id).toBe('42');
expect(json.title).toBe('Important Task');
expect(json.description).toBe('Do the thing');
expect(json.status).toBe('in-progress');
expect(json.priority).toBe('high');
expect(json.dependencies).toEqual(['1', '2']);
expect(json.details).toBe('Detailed info');
expect(json.testStrategy).toBe('Unit tests');
expect(json.tags).toEqual(['urgent']);
expect(json.metadata).toEqual(metadata);
});
});
describe('fromObject() with metadata', () => {
it('should preserve metadata through fromObject', () => {
const metadata = { externalId: 'EXT-789' };
const task = createMinimalTask({ metadata });
const entity = TaskEntity.fromObject(task);
expect(entity.metadata).toEqual(metadata);
});
it('should handle undefined metadata in fromObject', () => {
const task = createMinimalTask();
const entity = TaskEntity.fromObject(task);
expect(entity.metadata).toBeUndefined();
});
});
describe('fromArray() with metadata', () => {
it('should preserve metadata on all tasks through fromArray', () => {
const task1 = createMinimalTask({
id: '1',
metadata: { source: 'import1' }
});
const task2 = createMinimalTask({
id: '2',
metadata: { source: 'import2' }
});
const task3 = createMinimalTask({ id: '3' }); // No metadata
const entities = TaskEntity.fromArray([task1, task2, task3]);
expect(entities).toHaveLength(3);
expect(entities[0].metadata).toEqual({ source: 'import1' });
expect(entities[1].metadata).toEqual({ source: 'import2' });
expect(entities[2].metadata).toBeUndefined();
});
it('should preserve different metadata structures across tasks', () => {
const tasks = [
createMinimalTask({ id: '1', metadata: { simple: 'value' } }),
createMinimalTask({
id: '2',
metadata: { nested: { deep: { value: 123 } } }
}),
createMinimalTask({ id: '3', metadata: { array: [1, 2, 3] } }),
createMinimalTask({ id: '4', metadata: {} })
];
const entities = TaskEntity.fromArray(tasks);
const jsons = entities.map((e) => e.toJSON());
expect(jsons[0].metadata).toEqual({ simple: 'value' });
expect(jsons[1].metadata).toEqual({ nested: { deep: { value: 123 } } });
expect(jsons[2].metadata).toEqual({ array: [1, 2, 3] });
expect(jsons[3].metadata).toEqual({});
});
});
describe('no corruption of other fields', () => {
it('should not affect other task fields when metadata is present', () => {
const taskWithMetadata = createMinimalTask({
id: '99',
title: 'Original Title',
metadata: { someKey: 'someValue' }
});
const entity = new TaskEntity(taskWithMetadata);
expect(entity.id).toBe('99');
expect(entity.title).toBe('Original Title');
expect(entity.status).toBe('pending');
expect(entity.priority).toBe('medium');
});
it('should not affect subtasks when metadata is present', () => {
const taskWithSubtasks = createMinimalTask({
metadata: { tracked: true },
subtasks: [
{
id: 1,
parentId: '1',
title: 'Subtask 1',
description: 'Subtask desc',
status: 'pending',
priority: 'low',
dependencies: [],
details: '',
testStrategy: ''
}
]
});
const entity = new TaskEntity(taskWithSubtasks);
expect(entity.subtasks).toHaveLength(1);
expect(entity.subtasks[0].title).toBe('Subtask 1');
expect(entity.metadata).toEqual({ tracked: true });
});
});
});

View File

@@ -36,6 +36,7 @@ export class TaskEntity implements Task {
recommendedSubtasks?: number;
expansionPrompt?: string;
complexityReasoning?: string;
metadata?: Record<string, unknown>;
constructor(data: Task | (Omit<Task, 'id'> & { id: number | string })) {
this.validate(data);
@@ -68,6 +69,7 @@ export class TaskEntity implements Task {
this.recommendedSubtasks = data.recommendedSubtasks;
this.expansionPrompt = data.expansionPrompt;
this.complexityReasoning = data.complexityReasoning;
this.metadata = data.metadata;
}
/**
@@ -255,7 +257,8 @@ export class TaskEntity implements Task {
complexity: this.complexity,
recommendedSubtasks: this.recommendedSubtasks,
expansionPrompt: this.expansionPrompt,
complexityReasoning: this.complexityReasoning
complexityReasoning: this.complexityReasoning,
metadata: this.metadata
};
}

View File

@@ -0,0 +1,481 @@
/**
* @fileoverview Integration tests for metadata preservation across AI operations
*
* Tests that user-defined metadata survives all AI operations including:
* - update-task: AI updates task fields but doesn't include metadata in response
* - expand-task: AI generates subtasks but parent task metadata is preserved
* - parse-prd: AI generates new tasks without metadata field
*
* Key insight: AI schemas (base-schemas.js) intentionally EXCLUDE the metadata field.
* This means AI responses never include metadata, and the spread operator in
* storage/service layers preserves existing metadata during updates.
*
* These tests simulate what happens when AI operations update tasks - the AI
* returns a task object without a metadata field, and we verify that the
* existing metadata is preserved through the storage layer.
*/
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
import { FileStorage } from '../../../src/modules/storage/adapters/file-storage/file-storage.js';
import type { Task, Subtask } from '../../../src/common/types/index.js';
/**
* Creates a minimal valid task for testing
*/
function createTask(id: string, overrides: Partial<Task> = {}): Task {
return {
id,
title: `Task ${id}`,
description: `Description for task ${id}`,
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [],
...overrides
};
}
/**
* Creates a realistic metadata object like external integrations would produce
*/
function createRealisticMetadata(): Record<string, unknown> {
return {
uuid: '550e8400-e29b-41d4-a716-446655440000',
githubIssue: 42,
sprint: 'Q1-S3',
jira: {
key: 'PROJ-123',
type: 'story',
epic: 'EPIC-45'
},
importedAt: '2024-01-15T10:30:00Z',
source: 'github-sync',
labels: ['frontend', 'refactor', 'high-priority']
};
}
describe('AI Operation Metadata Preservation - Integration Tests', () => {
let tempDir: string;
let storage: FileStorage;
beforeEach(() => {
// Create a temp directory for each test
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-ai-test-'));
// Create .taskmaster/tasks directory structure
const taskmasterDir = path.join(tempDir, '.taskmaster', 'tasks');
fs.mkdirSync(taskmasterDir, { recursive: true });
storage = new FileStorage(tempDir);
});
afterEach(() => {
// Clean up temp directory
fs.rmSync(tempDir, { recursive: true, force: true });
});
describe('update-task operation simulation', () => {
it('should preserve metadata when AI returns task without metadata field', async () => {
// Setup: Task with user metadata
const originalMetadata = createRealisticMetadata();
const tasks: Task[] = [
createTask('1', {
title: 'Original Title',
description: 'Original description',
metadata: originalMetadata
})
];
await storage.saveTasks(tasks);
// Simulate AI response: AI updates title/description but doesn't include metadata
// This is the exact pattern from update-task-by-id.js
const aiGeneratedUpdate: Partial<Task> = {
title: 'AI Updated Title',
description: 'AI refined description with more detail',
details: 'AI generated implementation details',
testStrategy: 'AI suggested test approach'
// Note: NO metadata field - AI schemas don't include it
};
// Apply update through FileStorage (simulating what AI operations do)
await storage.updateTask('1', aiGeneratedUpdate);
// Verify: AI fields updated, metadata preserved
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].title).toBe('AI Updated Title');
expect(loadedTasks[0].description).toBe(
'AI refined description with more detail'
);
expect(loadedTasks[0].details).toBe(
'AI generated implementation details'
);
expect(loadedTasks[0].testStrategy).toBe('AI suggested test approach');
// Critical: metadata must be preserved
expect(loadedTasks[0].metadata).toEqual(originalMetadata);
});
it('should preserve metadata through multiple sequential AI updates', async () => {
const metadata = { externalId: 'EXT-999', version: 1 };
const tasks: Task[] = [createTask('1', { metadata })];
await storage.saveTasks(tasks);
// First AI update
await storage.updateTask('1', { title: 'First AI Update' });
// Second AI update
await storage.updateTask('1', {
description: 'Second AI Update adds details'
});
// Third AI update
await storage.updateTask('1', { priority: 'high' });
// Verify metadata survived all updates
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].title).toBe('First AI Update');
expect(loadedTasks[0].description).toBe('Second AI Update adds details');
expect(loadedTasks[0].priority).toBe('high');
expect(loadedTasks[0].metadata).toEqual(metadata);
});
it('should preserve realistic integration metadata during AI operations', async () => {
const realisticMetadata = createRealisticMetadata();
const tasks: Task[] = [
createTask('1', {
title: 'Sync from GitHub',
metadata: realisticMetadata
})
];
await storage.saveTasks(tasks);
// AI enriches the task
await storage.updateTask('1', {
title: 'Implement user authentication',
description: 'Set up JWT-based authentication system',
details: `
## Implementation Plan
1. Create auth middleware
2. Implement JWT token generation
3. Add refresh token logic
4. Set up protected routes
`.trim(),
testStrategy:
'Unit tests for JWT functions, integration tests for auth flow'
});
const loadedTasks = await storage.loadTasks();
// All AI updates applied
expect(loadedTasks[0].title).toBe('Implement user authentication');
expect(loadedTasks[0].details).toContain('Implementation Plan');
// Realistic metadata preserved with all its nested structure
expect(loadedTasks[0].metadata).toEqual(realisticMetadata);
expect(
(loadedTasks[0].metadata as Record<string, unknown>).githubIssue
).toBe(42);
expect(
(
(loadedTasks[0].metadata as Record<string, unknown>).jira as Record<
string,
unknown
>
).key
).toBe('PROJ-123');
});
});
describe('expand-task operation simulation', () => {
it('should preserve parent task metadata when adding AI-generated subtasks', async () => {
const parentMetadata = { tracked: true, source: 'import' };
const tasks: Task[] = [
createTask('1', {
metadata: parentMetadata,
subtasks: []
})
];
await storage.saveTasks(tasks);
// Simulate expand-task: AI generates subtasks (without metadata)
const aiGeneratedSubtasks: Subtask[] = [
{
id: 1,
parentId: '1',
title: 'AI Subtask 1',
description: 'First step generated by AI',
status: 'pending',
priority: 'medium',
dependencies: [],
details: 'Implementation details',
testStrategy: 'Test approach'
// No metadata - AI doesn't generate it
},
{
id: 2,
parentId: '1',
title: 'AI Subtask 2',
description: 'Second step generated by AI',
status: 'pending',
priority: 'medium',
dependencies: ['1'],
details: 'More details',
testStrategy: 'More tests'
}
];
// Apply subtasks update
await storage.updateTask('1', { subtasks: aiGeneratedSubtasks });
// Verify parent metadata preserved
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toEqual(parentMetadata);
expect(loadedTasks[0].subtasks).toHaveLength(2);
// Subtasks don't inherit parent metadata
expect(loadedTasks[0].subtasks[0].metadata).toBeUndefined();
expect(loadedTasks[0].subtasks[1].metadata).toBeUndefined();
});
it('should preserve subtask metadata when parent is updated', async () => {
const tasks: Task[] = [
createTask('1', {
metadata: { parentMeta: 'parent-value' },
subtasks: [
{
id: 1,
parentId: '1',
title: 'Subtask with metadata',
description: 'Has its own metadata',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
metadata: { subtaskMeta: 'subtask-value' }
}
]
})
];
await storage.saveTasks(tasks);
// AI updates parent task (not subtasks)
await storage.updateTask('1', {
title: 'Parent Updated by AI',
description: 'New description'
});
const loadedTasks = await storage.loadTasks();
// Parent metadata preserved
expect(loadedTasks[0].metadata).toEqual({ parentMeta: 'parent-value' });
// Subtask and its metadata preserved
expect(loadedTasks[0].subtasks[0].metadata).toEqual({
subtaskMeta: 'subtask-value'
});
});
});
describe('parse-prd operation simulation', () => {
it('should generate tasks without metadata field (as AI would)', async () => {
// Simulate parse-prd output: AI generates tasks without metadata
const aiGeneratedTasks: Task[] = [
{
id: '1',
title: 'Set up project structure',
description: 'Initialize the project with proper folder structure',
status: 'pending',
priority: 'high',
dependencies: [],
details: 'Create src/, tests/, docs/ directories',
testStrategy: 'Verify directories exist',
subtasks: []
// No metadata - AI doesn't generate it
},
{
id: '2',
title: 'Implement core functionality',
description: 'Build the main features',
status: 'pending',
priority: 'high',
dependencies: ['1'],
details: 'Implement main modules',
testStrategy: 'Unit tests for each module',
subtasks: []
}
];
await storage.saveTasks(aiGeneratedTasks);
// Verify tasks saved correctly without metadata
const loadedTasks = await storage.loadTasks();
expect(loadedTasks).toHaveLength(2);
expect(loadedTasks[0].metadata).toBeUndefined();
expect(loadedTasks[1].metadata).toBeUndefined();
// Later, user can add metadata
await storage.updateTask('1', {
metadata: { externalId: 'USER-ADDED-123' }
});
const updatedTasks = await storage.loadTasks();
expect(updatedTasks[0].metadata).toEqual({
externalId: 'USER-ADDED-123'
});
});
});
describe('update-subtask operation simulation', () => {
it('should preserve subtask metadata when appending info', async () => {
const tasks: Task[] = [
createTask('1', {
subtasks: [
{
id: 1,
parentId: '1',
title: 'Tracked subtask',
description: 'Has metadata from import',
status: 'pending',
priority: 'medium',
dependencies: [],
details: 'Initial details',
testStrategy: '',
metadata: { importedFrom: 'jira', ticketId: 'JIRA-456' }
}
]
})
];
await storage.saveTasks(tasks);
// Update subtask details (like update-subtask command does)
const updatedSubtask: Subtask = {
id: 1,
parentId: '1',
title: 'Tracked subtask',
description: 'Has metadata from import',
status: 'in-progress',
priority: 'medium',
dependencies: [],
details:
'Initial details\n\n<info added on 2024-01-20T10:00:00Z>\nImplementation notes from AI\n</info added on 2024-01-20T10:00:00Z>',
testStrategy: 'AI suggested tests',
metadata: { importedFrom: 'jira', ticketId: 'JIRA-456' }
};
await storage.updateTask('1', { subtasks: [updatedSubtask] });
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].subtasks[0].metadata).toEqual({
importedFrom: 'jira',
ticketId: 'JIRA-456'
});
expect(loadedTasks[0].subtasks[0].details).toContain(
'Implementation notes from AI'
);
});
});
describe('mixed AI and storage metadata coexistence', () => {
it('should preserve user metadata alongside AI-generated task fields', async () => {
const tasks: Task[] = [
createTask('1', {
// AI-generated fields
relevantFiles: [
{
path: 'src/auth.ts',
description: 'Auth module',
action: 'modify'
}
],
category: 'development',
skills: ['TypeScript', 'Security'],
acceptanceCriteria: ['Tests pass', 'Code reviewed'],
// User-defined metadata (from import)
metadata: {
externalId: 'JIRA-789',
storyPoints: 5,
sprint: 'Sprint 10'
}
})
];
await storage.saveTasks(tasks);
// AI updates the task (doesn't touch metadata)
await storage.updateTask('1', {
relevantFiles: [
{ path: 'src/auth.ts', description: 'Auth module', action: 'modify' },
{
path: 'src/middleware.ts',
description: 'Added middleware',
action: 'create'
}
],
skills: ['TypeScript', 'Security', 'JWT']
});
const loadedTasks = await storage.loadTasks();
// AI fields updated
expect(loadedTasks[0].relevantFiles).toHaveLength(2);
expect(loadedTasks[0].skills).toContain('JWT');
// User metadata preserved
expect(loadedTasks[0].metadata).toEqual({
externalId: 'JIRA-789',
storyPoints: 5,
sprint: 'Sprint 10'
});
});
});
describe('edge cases for AI operations', () => {
it('should handle task with only metadata being updated by AI', async () => {
// Task has ONLY metadata set (sparse task)
const tasks: Task[] = [
createTask('1', {
metadata: { sparse: true, tracking: 'minimal' }
})
];
await storage.saveTasks(tasks);
// AI fills in all the other fields
await storage.updateTask('1', {
title: 'AI Generated Title',
description: 'AI Generated Description',
details: 'AI Generated Details',
testStrategy: 'AI Generated Test Strategy',
priority: 'high'
});
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].title).toBe('AI Generated Title');
expect(loadedTasks[0].priority).toBe('high');
expect(loadedTasks[0].metadata).toEqual({
sparse: true,
tracking: 'minimal'
});
});
it('should preserve deeply nested metadata through AI operations', async () => {
const deepMetadata = {
integration: {
source: {
type: 'github',
repo: {
owner: 'org',
name: 'repo',
issue: {
number: 123,
labels: ['bug', 'priority-1']
}
}
}
}
};
const tasks: Task[] = [createTask('1', { metadata: deepMetadata })];
await storage.saveTasks(tasks);
// Multiple AI operations
await storage.updateTask('1', { title: 'Update 1' });
await storage.updateTask('1', { description: 'Update 2' });
await storage.updateTask('1', { status: 'in-progress' });
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toEqual(deepMetadata);
});
});
});

View File

@@ -0,0 +1,540 @@
/**
* @fileoverview Integration tests for MCP tool metadata updates
*
* Tests that metadata updates via update-task and update-subtask MCP tools
* work correctly with the TASK_MASTER_ALLOW_METADATA_UPDATES flag.
*
* These tests validate the metadata flow from MCP tool layer through
* direct functions to the legacy scripts and storage layer.
*
* NOTE: These tests focus on validation logic (JSON parsing, env flags, merge behavior)
* rather than full end-to-end MCP tool calls. End-to-end behavior is covered by:
* - FileStorage metadata tests (storage layer)
* - AI operation metadata preservation tests (full workflow)
* - Direct function integration (covered by the validation tests here)
*/
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
import { validateMcpMetadata } from '@tm/mcp';
describe('MCP Tool Metadata Updates - Integration Tests', () => {
let tempDir: string;
let tasksJsonPath: string;
beforeEach(() => {
// Create a temp directory for each test
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-mcp-test-'));
// Create .taskmaster/tasks directory structure
const taskmasterDir = path.join(tempDir, '.taskmaster', 'tasks');
fs.mkdirSync(taskmasterDir, { recursive: true });
tasksJsonPath = path.join(taskmasterDir, 'tasks.json');
});
afterEach(() => {
// Clean up temp directory
fs.rmSync(tempDir, { recursive: true, force: true });
// Reset env vars
delete process.env.TASK_MASTER_ALLOW_METADATA_UPDATES;
});
describe('metadata JSON validation', () => {
it('should validate metadata is a valid JSON object', () => {
// Test valid JSON objects
const validMetadata = [
'{"key": "value"}',
'{"githubIssue": 42, "sprint": "Q1"}',
'{"nested": {"deep": true}}'
];
for (const meta of validMetadata) {
const parsed = JSON.parse(meta);
expect(typeof parsed).toBe('object');
expect(parsed).not.toBeNull();
expect(Array.isArray(parsed)).toBe(false);
}
});
it('should reject invalid metadata formats', () => {
const invalidMetadata = [
'"string"', // Just a string
'123', // Just a number
'true', // Just a boolean
'null', // Null
'[1, 2, 3]' // Array
];
for (const meta of invalidMetadata) {
const parsed = JSON.parse(meta);
const isValidObject =
typeof parsed === 'object' &&
parsed !== null &&
!Array.isArray(parsed);
expect(isValidObject).toBe(false);
}
});
it('should reject invalid JSON strings', () => {
const invalidJson = [
'{key: "value"}', // Missing quotes
"{'key': 'value'}", // Single quotes
'{"key": }' // Incomplete
];
for (const json of invalidJson) {
expect(() => JSON.parse(json)).toThrow();
}
});
});
describe('TASK_MASTER_ALLOW_METADATA_UPDATES flag', () => {
it('should block metadata updates when flag is not set', () => {
delete process.env.TASK_MASTER_ALLOW_METADATA_UPDATES;
const allowMetadataUpdates =
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES === 'true';
expect(allowMetadataUpdates).toBe(false);
});
it('should block metadata updates when flag is set to false', () => {
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES = 'false';
const allowMetadataUpdates =
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES === 'true';
expect(allowMetadataUpdates).toBe(false);
});
it('should allow metadata updates when flag is set to true', () => {
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES = 'true';
const allowMetadataUpdates =
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES === 'true';
expect(allowMetadataUpdates).toBe(true);
});
it('should be case-sensitive (TRUE should not work)', () => {
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES = 'TRUE';
const allowMetadataUpdates =
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES === 'true';
expect(allowMetadataUpdates).toBe(false);
});
});
describe('metadata merge logic', () => {
it('should merge new metadata with existing metadata', () => {
const existingMetadata = { githubIssue: 42, sprint: 'Q1' };
const newMetadata = { storyPoints: 5, reviewed: true };
const merged = {
...(existingMetadata || {}),
...(newMetadata || {})
};
expect(merged).toEqual({
githubIssue: 42,
sprint: 'Q1',
storyPoints: 5,
reviewed: true
});
});
it('should override existing keys with new values', () => {
const existingMetadata = { githubIssue: 42, sprint: 'Q1' };
const newMetadata = { sprint: 'Q2' }; // Override sprint
const merged = {
...(existingMetadata || {}),
...(newMetadata || {})
};
expect(merged).toEqual({
githubIssue: 42,
sprint: 'Q2' // Overridden
});
});
it('should handle empty existing metadata', () => {
const existingMetadata = undefined;
const newMetadata = { key: 'value' };
const merged = {
...(existingMetadata || {}),
...(newMetadata || {})
};
expect(merged).toEqual({ key: 'value' });
});
it('should handle empty new metadata', () => {
const existingMetadata = { key: 'value' };
const newMetadata = undefined;
const merged = {
...(existingMetadata || {}),
...(newMetadata || {})
};
expect(merged).toEqual({ key: 'value' });
});
it('should preserve nested objects in metadata', () => {
const existingMetadata = {
jira: { key: 'PROJ-123' },
other: 'data'
};
const newMetadata = {
jira: { key: 'PROJ-456', type: 'bug' } // Replace entire jira object
};
const merged = {
...(existingMetadata || {}),
...(newMetadata || {})
};
expect(merged).toEqual({
jira: { key: 'PROJ-456', type: 'bug' }, // Entire jira object replaced
other: 'data'
});
});
});
describe('metadata-only update detection', () => {
it('should detect metadata-only update when prompt is empty', () => {
const prompt: string = '';
const metadata = { key: 'value' };
const isMetadataOnly = metadata && (!prompt || prompt.trim() === '');
expect(isMetadataOnly).toBe(true);
});
it('should detect metadata-only update when prompt is whitespace', () => {
const prompt: string = ' ';
const metadata = { key: 'value' };
const isMetadataOnly = metadata && (!prompt || prompt.trim() === '');
expect(isMetadataOnly).toBe(true);
});
it('should not be metadata-only when prompt is provided', () => {
const prompt: string = 'Update task details';
const metadata = { key: 'value' };
const isMetadataOnly = metadata && (!prompt || prompt.trim() === '');
expect(isMetadataOnly).toBe(false);
});
it('should not be metadata-only when neither is provided', () => {
const prompt: string = '';
const metadata = null;
const isMetadataOnly = metadata && (!prompt || prompt.trim() === '');
expect(isMetadataOnly).toBeFalsy(); // metadata is null, so falsy
});
});
describe('tasks.json file format with metadata', () => {
it('should write and read metadata correctly in tasks.json', () => {
const tasksData = {
tasks: [
{
id: 1,
title: 'Test Task',
description: 'Description',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [],
metadata: {
githubIssue: 42,
sprint: 'Q1-S3',
storyPoints: 5
}
}
],
metadata: {
version: '1.0.0',
lastModified: new Date().toISOString(),
taskCount: 1,
completedCount: 0
}
};
// Write
fs.writeFileSync(tasksJsonPath, JSON.stringify(tasksData, null, 2));
// Read and verify
const rawContent = fs.readFileSync(tasksJsonPath, 'utf-8');
const parsed = JSON.parse(rawContent);
expect(parsed.tasks[0].metadata).toEqual({
githubIssue: 42,
sprint: 'Q1-S3',
storyPoints: 5
});
});
it('should write and read subtask metadata correctly', () => {
const tasksData = {
tasks: [
{
id: 1,
title: 'Parent Task',
description: 'Description',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [
{
id: 1,
parentId: 1,
title: 'Subtask',
description: 'Subtask description',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
metadata: {
linkedTicket: 'JIRA-456',
reviewed: true
}
}
]
}
],
metadata: {
version: '1.0.0',
lastModified: new Date().toISOString(),
taskCount: 1,
completedCount: 0
}
};
// Write
fs.writeFileSync(tasksJsonPath, JSON.stringify(tasksData, null, 2));
// Read and verify
const rawContent = fs.readFileSync(tasksJsonPath, 'utf-8');
const parsed = JSON.parse(rawContent);
expect(parsed.tasks[0].subtasks[0].metadata).toEqual({
linkedTicket: 'JIRA-456',
reviewed: true
});
});
});
describe('error message formatting', () => {
it('should provide clear error for disabled metadata updates', () => {
const errorMessage =
'Metadata updates are disabled. Set TASK_MASTER_ALLOW_METADATA_UPDATES=true in your MCP server environment to enable metadata modifications.';
expect(errorMessage).toContain('TASK_MASTER_ALLOW_METADATA_UPDATES');
expect(errorMessage).toContain('true');
expect(errorMessage).toContain('MCP server environment');
});
it('should provide clear error for invalid JSON', () => {
const invalidJson = '{key: value}';
const errorMessage = `Invalid metadata JSON: ${invalidJson}. Provide a valid JSON object string.`;
expect(errorMessage).toContain(invalidJson);
expect(errorMessage).toContain('valid JSON object');
});
it('should provide clear error for non-object JSON', () => {
const errorMessage =
'Invalid metadata: must be a JSON object (not null or array)';
expect(errorMessage).toContain('JSON object');
expect(errorMessage).toContain('not null or array');
});
});
});
/**
* Unit tests for the actual validateMcpMetadata function from @tm/mcp
* These tests verify the security gate behavior for MCP metadata updates.
*/
describe('validateMcpMetadata function', () => {
// Mock error response creator that matches the MCP ContentResult format
const mockCreateErrorResponse = (message: string) => ({
content: [{ type: 'text' as const, text: `Error: ${message}` }],
isError: true
});
// Helper to safely extract text from content
const getErrorText = (
error: { content: Array<{ type: string; text?: string }> } | undefined
): string => {
if (!error?.content?.[0]) return '';
const content = error.content[0];
return 'text' in content ? (content.text ?? '') : '';
};
afterEach(() => {
delete process.env.TASK_MASTER_ALLOW_METADATA_UPDATES;
});
describe('when metadataString is null/undefined', () => {
it('should return null parsedMetadata for undefined input', () => {
const result = validateMcpMetadata(undefined, mockCreateErrorResponse);
expect(result.parsedMetadata).toBeNull();
expect(result.error).toBeUndefined();
});
it('should return null parsedMetadata for null input', () => {
const result = validateMcpMetadata(null, mockCreateErrorResponse);
expect(result.parsedMetadata).toBeNull();
expect(result.error).toBeUndefined();
});
it('should return null parsedMetadata for empty string', () => {
const result = validateMcpMetadata('', mockCreateErrorResponse);
expect(result.parsedMetadata).toBeNull();
expect(result.error).toBeUndefined();
});
});
describe('when TASK_MASTER_ALLOW_METADATA_UPDATES is not set', () => {
beforeEach(() => {
delete process.env.TASK_MASTER_ALLOW_METADATA_UPDATES;
});
it('should return error when flag is not set', () => {
const result = validateMcpMetadata(
'{"key": "value"}',
mockCreateErrorResponse
);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
expect(getErrorText(result.error)).toContain(
'TASK_MASTER_ALLOW_METADATA_UPDATES'
);
});
it('should return error when flag is set to "false"', () => {
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES = 'false';
const result = validateMcpMetadata(
'{"key": "value"}',
mockCreateErrorResponse
);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
});
it('should return error when flag is "TRUE" (case sensitive)', () => {
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES = 'TRUE';
const result = validateMcpMetadata(
'{"key": "value"}',
mockCreateErrorResponse
);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
});
it('should return error when flag is "True" (case sensitive)', () => {
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES = 'True';
const result = validateMcpMetadata(
'{"key": "value"}',
mockCreateErrorResponse
);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
});
});
describe('when TASK_MASTER_ALLOW_METADATA_UPDATES is "true"', () => {
beforeEach(() => {
process.env.TASK_MASTER_ALLOW_METADATA_UPDATES = 'true';
});
it('should return parsed metadata for valid JSON object', () => {
const result = validateMcpMetadata(
'{"key": "value"}',
mockCreateErrorResponse
);
expect(result.parsedMetadata).toEqual({ key: 'value' });
expect(result.error).toBeUndefined();
});
it('should return parsed metadata for complex nested object', () => {
const complexMeta = {
githubIssue: 42,
sprint: 'Q1-S3',
nested: { deep: { value: true } },
array: [1, 2, 3]
};
const result = validateMcpMetadata(
JSON.stringify(complexMeta),
mockCreateErrorResponse
);
expect(result.parsedMetadata).toEqual(complexMeta);
expect(result.error).toBeUndefined();
});
it('should return parsed metadata for empty object', () => {
const result = validateMcpMetadata('{}', mockCreateErrorResponse);
expect(result.parsedMetadata).toEqual({});
expect(result.error).toBeUndefined();
});
it('should return error for invalid JSON string', () => {
const result = validateMcpMetadata(
'{key: "value"}',
mockCreateErrorResponse
);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
expect(getErrorText(result.error)).toContain('Invalid metadata JSON');
});
it('should return error for JSON array', () => {
const result = validateMcpMetadata('[1, 2, 3]', mockCreateErrorResponse);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
expect(getErrorText(result.error)).toContain(
'must be a JSON object (not null or array)'
);
});
it('should return error for JSON null', () => {
const result = validateMcpMetadata('null', mockCreateErrorResponse);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
expect(getErrorText(result.error)).toContain(
'must be a JSON object (not null or array)'
);
});
it('should return error for JSON string primitive', () => {
const result = validateMcpMetadata('"string"', mockCreateErrorResponse);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
expect(getErrorText(result.error)).toContain(
'must be a JSON object (not null or array)'
);
});
it('should return error for JSON number primitive', () => {
const result = validateMcpMetadata('123', mockCreateErrorResponse);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
expect(getErrorText(result.error)).toContain(
'must be a JSON object (not null or array)'
);
});
it('should return error for JSON boolean primitive', () => {
const result = validateMcpMetadata('true', mockCreateErrorResponse);
expect(result.error).toBeDefined();
expect(result.error?.isError).toBe(true);
expect(getErrorText(result.error)).toContain(
'must be a JSON object (not null or array)'
);
});
});
});

View File

@@ -0,0 +1,472 @@
/**
* @fileoverview Integration tests for FileStorage metadata preservation
*
* Tests that user-defined metadata survives all FileStorage CRUD operations
* including load, save, update, and append.
*/
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
import { FileStorage } from '../../../src/modules/storage/adapters/file-storage/file-storage.js';
import type { Task } from '../../../src/common/types/index.js';
/**
* Creates a minimal valid task for testing
*/
function createTask(id: string, overrides: Partial<Task> = {}): Task {
return {
id,
title: `Task ${id}`,
description: `Description for task ${id}`,
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [],
...overrides
};
}
describe('FileStorage Metadata Preservation - Integration Tests', () => {
let tempDir: string;
let storage: FileStorage;
beforeEach(() => {
// Create a temp directory for each test
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'taskmaster-test-'));
// Create .taskmaster/tasks directory structure
const taskmasterDir = path.join(tempDir, '.taskmaster', 'tasks');
fs.mkdirSync(taskmasterDir, { recursive: true });
storage = new FileStorage(tempDir);
});
afterEach(() => {
// Clean up temp directory
fs.rmSync(tempDir, { recursive: true, force: true });
});
describe('saveTasks() and loadTasks() round-trip', () => {
it('should preserve metadata through save and load cycle', async () => {
const tasks: Task[] = [
createTask('1', {
metadata: {
externalId: 'JIRA-123',
source: 'import',
customField: { nested: 'value' }
}
}),
createTask('2', {
metadata: {
score: 85,
isUrgent: true
}
})
];
await storage.saveTasks(tasks);
const loadedTasks = await storage.loadTasks();
expect(loadedTasks).toHaveLength(2);
expect(loadedTasks[0].metadata).toEqual({
externalId: 'JIRA-123',
source: 'import',
customField: { nested: 'value' }
});
expect(loadedTasks[1].metadata).toEqual({
score: 85,
isUrgent: true
});
});
it('should preserve empty metadata object', async () => {
const tasks: Task[] = [createTask('1', { metadata: {} })];
await storage.saveTasks(tasks);
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toEqual({});
});
it('should handle tasks without metadata', async () => {
const tasks: Task[] = [createTask('1')]; // No metadata
await storage.saveTasks(tasks);
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toBeUndefined();
});
it('should preserve complex metadata with various types', async () => {
const complexMetadata = {
string: 'value',
number: 42,
float: 3.14,
boolean: true,
nullValue: null,
array: [1, 'two', { three: 3 }],
nested: {
deep: {
deeper: {
value: 'found'
}
}
}
};
const tasks: Task[] = [createTask('1', { metadata: complexMetadata })];
await storage.saveTasks(tasks);
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toEqual(complexMetadata);
});
it('should preserve metadata on subtasks', async () => {
const tasks: Task[] = [
createTask('1', {
metadata: { parentMeta: 'value' },
subtasks: [
{
id: 1,
parentId: '1',
title: 'Subtask 1',
description: 'Description',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
metadata: { subtaskMeta: 'subtask-value' }
}
]
})
];
await storage.saveTasks(tasks);
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toEqual({ parentMeta: 'value' });
expect(loadedTasks[0].subtasks[0].metadata).toEqual({
subtaskMeta: 'subtask-value'
});
});
});
describe('updateTask() metadata preservation', () => {
it('should preserve existing metadata when updating other fields', async () => {
const originalMetadata = { externalId: 'EXT-123', version: 1 };
const tasks: Task[] = [createTask('1', { metadata: originalMetadata })];
await storage.saveTasks(tasks);
// Update title only, not metadata
await storage.updateTask('1', { title: 'Updated Title' });
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].title).toBe('Updated Title');
expect(loadedTasks[0].metadata).toEqual(originalMetadata);
});
it('should allow updating metadata field directly', async () => {
const tasks: Task[] = [createTask('1', { metadata: { original: true } })];
await storage.saveTasks(tasks);
// Update metadata
await storage.updateTask('1', {
metadata: { original: true, updated: true, newField: 'value' }
});
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toEqual({
original: true,
updated: true,
newField: 'value'
});
});
it('should allow replacing metadata entirely', async () => {
const tasks: Task[] = [
createTask('1', { metadata: { oldField: 'old' } })
];
await storage.saveTasks(tasks);
// Replace metadata entirely
await storage.updateTask('1', { metadata: { newField: 'new' } });
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].metadata).toEqual({ newField: 'new' });
});
it('should preserve metadata when updating status', async () => {
const tasks: Task[] = [createTask('1', { metadata: { tracked: true } })];
await storage.saveTasks(tasks);
await storage.updateTask('1', { status: 'in-progress' });
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].status).toBe('in-progress');
expect(loadedTasks[0].metadata).toEqual({ tracked: true });
});
});
describe('appendTasks() metadata preservation', () => {
it('should preserve metadata on existing tasks when appending', async () => {
const existingTasks: Task[] = [
createTask('1', { metadata: { existing: true } })
];
await storage.saveTasks(existingTasks);
// Append new tasks
const newTasks: Task[] = [
createTask('2', { metadata: { newTask: true } })
];
await storage.appendTasks(newTasks);
const loadedTasks = await storage.loadTasks();
expect(loadedTasks).toHaveLength(2);
expect(loadedTasks.find((t) => t.id === '1')?.metadata).toEqual({
existing: true
});
expect(loadedTasks.find((t) => t.id === '2')?.metadata).toEqual({
newTask: true
});
});
});
describe('loadTask() single task metadata', () => {
it('should preserve metadata when loading single task', async () => {
const tasks: Task[] = [
createTask('1', { metadata: { specific: 'metadata' } }),
createTask('2', { metadata: { other: 'data' } })
];
await storage.saveTasks(tasks);
const task = await storage.loadTask('1');
expect(task).toBeDefined();
expect(task?.metadata).toEqual({ specific: 'metadata' });
});
});
describe('metadata alongside AI implementation metadata', () => {
it('should preserve both user metadata and AI metadata', async () => {
const tasks: Task[] = [
createTask('1', {
// AI implementation metadata
relevantFiles: [
{
path: 'src/test.ts',
description: 'Test file',
action: 'modify'
}
],
category: 'development',
skills: ['TypeScript'],
acceptanceCriteria: ['Tests pass'],
// User-defined metadata
metadata: {
externalId: 'JIRA-456',
importedAt: '2024-01-15T10:00:00Z'
}
})
];
await storage.saveTasks(tasks);
const loadedTasks = await storage.loadTasks();
// AI metadata preserved
expect(loadedTasks[0].relevantFiles).toHaveLength(1);
expect(loadedTasks[0].category).toBe('development');
expect(loadedTasks[0].skills).toEqual(['TypeScript']);
// User metadata preserved
expect(loadedTasks[0].metadata).toEqual({
externalId: 'JIRA-456',
importedAt: '2024-01-15T10:00:00Z'
});
});
});
describe('AI operation metadata preservation', () => {
it('should preserve metadata when updating task with AI-like partial update', async () => {
// Simulate existing task with user metadata
const tasks: Task[] = [
createTask('1', {
title: 'Original Title',
metadata: { externalId: 'JIRA-123', version: 1 }
})
];
await storage.saveTasks(tasks);
// Simulate AI update - only updates specific fields, no metadata field
// This mimics what happens when AI processes update-task
const aiUpdate: Partial<Task> = {
title: 'AI Updated Title',
description: 'AI generated description',
details: 'AI generated details'
// Note: no metadata field - AI schemas don't include it
};
await storage.updateTask('1', aiUpdate);
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].title).toBe('AI Updated Title');
expect(loadedTasks[0].description).toBe('AI generated description');
// User metadata must be preserved
expect(loadedTasks[0].metadata).toEqual({
externalId: 'JIRA-123',
version: 1
});
});
it('should preserve metadata when adding AI-generated subtasks', async () => {
const tasks: Task[] = [
createTask('1', {
metadata: { tracked: true, source: 'import' },
subtasks: []
})
];
await storage.saveTasks(tasks);
// Simulate expand-task adding subtasks
// Subtasks from AI don't have metadata field
const updatedTask: Partial<Task> = {
subtasks: [
{
id: 1,
parentId: '1',
title: 'AI Generated Subtask',
description: 'Description',
status: 'pending',
priority: 'medium',
dependencies: [],
details: 'Details',
testStrategy: 'Tests'
// No metadata - AI doesn't generate it
}
]
};
await storage.updateTask('1', updatedTask);
const loadedTasks = await storage.loadTasks();
// Parent task metadata preserved
expect(loadedTasks[0].metadata).toEqual({
tracked: true,
source: 'import'
});
// Subtask has no metadata (as expected from AI)
expect(loadedTasks[0].subtasks[0].metadata).toBeUndefined();
});
it('should handle multiple sequential AI updates preserving metadata', async () => {
const tasks: Task[] = [
createTask('1', {
metadata: { originalField: 'preserved' }
})
];
await storage.saveTasks(tasks);
// First AI update
await storage.updateTask('1', { title: 'First Update' });
// Second AI update
await storage.updateTask('1', { description: 'Second Update' });
// Third AI update
await storage.updateTask('1', { priority: 'high' });
const loadedTasks = await storage.loadTasks();
expect(loadedTasks[0].title).toBe('First Update');
expect(loadedTasks[0].description).toBe('Second Update');
expect(loadedTasks[0].priority).toBe('high');
// Metadata preserved through all updates
expect(loadedTasks[0].metadata).toEqual({ originalField: 'preserved' });
});
it('should preserve metadata when update object omits metadata field entirely', async () => {
// This is how AI operations work - they simply don't include metadata
const tasks: Task[] = [
createTask('1', {
metadata: { important: 'data' }
})
];
await storage.saveTasks(tasks);
// Update WITHOUT metadata field (AI schemas don't include it)
const updateWithoutMetadata: Partial<Task> = { title: 'Updated' };
await storage.updateTask('1', updateWithoutMetadata);
const loadedTasks = await storage.loadTasks();
// When metadata field is absent from updates, existing metadata is preserved
expect(loadedTasks[0].metadata).toEqual({ important: 'data' });
});
});
describe('file format verification', () => {
it('should write metadata to JSON file correctly', async () => {
const tasks: Task[] = [createTask('1', { metadata: { written: true } })];
await storage.saveTasks(tasks);
// Read raw file to verify format
const filePath = path.join(tempDir, '.taskmaster', 'tasks', 'tasks.json');
const rawContent = fs.readFileSync(filePath, 'utf-8');
const parsed = JSON.parse(rawContent);
expect(parsed.tasks[0].metadata).toEqual({ written: true });
});
it('should load metadata from pre-existing JSON file', async () => {
// Write a tasks.json file manually
const tasksDir = path.join(tempDir, '.taskmaster', 'tasks');
const filePath = path.join(tasksDir, 'tasks.json');
const fileContent = {
tasks: [
{
id: '1',
title: 'Pre-existing task',
description: 'Description',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [],
metadata: {
preExisting: true,
importedFrom: 'external-system'
}
}
],
metadata: {
version: '1.0.0',
lastModified: new Date().toISOString(),
taskCount: 1,
completedCount: 0
}
};
fs.writeFileSync(filePath, JSON.stringify(fileContent, null, 2));
// Load through FileStorage
const loadedTasks = await storage.loadTasks();
expect(loadedTasks).toHaveLength(1);
expect(loadedTasks[0].metadata).toEqual({
preExisting: true,
importedFrom: 'external-system'
});
});
});
});

View File

@@ -492,4 +492,157 @@ describe('Task Metadata Extraction - Integration Tests', () => {
expect(validCategories).toContain(task.category);
});
});
describe('User-Defined Metadata Field', () => {
it('should preserve user-defined metadata through JSON serialization', () => {
const taskWithMetadata: Task = {
id: '1',
title: 'Task with custom metadata',
description: 'Test description',
status: 'pending',
priority: 'high',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [],
metadata: {
externalId: 'JIRA-123',
source: 'import',
customField: { nested: 'value' }
}
};
const serialized = JSON.stringify(taskWithMetadata);
const deserialized: Task = JSON.parse(serialized);
expect(deserialized.metadata).toEqual(taskWithMetadata.metadata);
expect(deserialized.metadata?.externalId).toBe('JIRA-123');
expect(deserialized.metadata?.customField).toEqual({ nested: 'value' });
});
it('should preserve metadata on subtasks through JSON serialization', () => {
const taskWithSubtasks: Task = {
id: '1',
title: 'Parent task',
description: 'Test',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
metadata: { parentMeta: true },
subtasks: [
{
id: 1,
parentId: '1',
title: 'Subtask 1',
description: 'Test',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
metadata: { subtaskMeta: 'value1' }
}
]
};
const serialized = JSON.stringify(taskWithSubtasks);
const deserialized: Task = JSON.parse(serialized);
expect(deserialized.metadata).toEqual({ parentMeta: true });
expect(deserialized.subtasks[0].metadata).toEqual({
subtaskMeta: 'value1'
});
});
it('should handle empty metadata object', () => {
const task: Task = {
id: '1',
title: 'Task',
description: 'Test',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [],
metadata: {}
};
const serialized = JSON.stringify(task);
const deserialized: Task = JSON.parse(serialized);
expect(deserialized.metadata).toEqual({});
});
it('should handle complex metadata with various types', () => {
const task: Task = {
id: '1',
title: 'Task',
description: 'Test',
status: 'pending',
priority: 'medium',
dependencies: [],
details: '',
testStrategy: '',
subtasks: [],
metadata: {
string: 'value',
number: 42,
boolean: true,
nullValue: null,
array: [1, 2, 3],
nested: {
deep: {
value: 'found'
}
}
}
};
const serialized = JSON.stringify(task);
const deserialized: Task = JSON.parse(serialized);
expect(deserialized.metadata?.string).toBe('value');
expect(deserialized.metadata?.number).toBe(42);
expect(deserialized.metadata?.boolean).toBe(true);
expect(deserialized.metadata?.nullValue).toBeNull();
expect(deserialized.metadata?.array).toEqual([1, 2, 3]);
expect((deserialized.metadata?.nested as any)?.deep?.value).toBe('found');
});
it('should preserve metadata alongside AI implementation metadata', () => {
const task: Task = {
id: '1',
title: 'Task',
description: 'Test',
status: 'pending',
priority: 'medium',
dependencies: [],
details: 'Some details',
testStrategy: 'Unit tests',
subtasks: [],
// AI implementation metadata
relevantFiles: [
{ path: 'src/test.ts', description: 'Test file', action: 'modify' }
],
category: 'development',
skills: ['TypeScript'],
// User-defined metadata
metadata: {
externalId: 'EXT-456',
importedAt: '2024-01-15T10:00:00Z'
}
};
const serialized = JSON.stringify(task);
const deserialized: Task = JSON.parse(serialized);
// Both types of metadata should be preserved
expect(deserialized.relevantFiles).toHaveLength(1);
expect(deserialized.category).toBe('development');
expect(deserialized.metadata?.externalId).toBe('EXT-456');
});
});
});

View File

@@ -5249,6 +5249,15 @@ Examples:
return programInstance;
}
/**
* Load the TUI module (coming soon)
* @returns {Promise<object|null>} null - TUI not yet available
*/
async function loadTUI() {
// TUI is coming soon - return null for now
return null;
}
/**
* Launch the interactive TUI REPL
*/
@@ -5257,10 +5266,8 @@ async function launchREPL() {
const tui = await loadTUI();
if (!tui) {
// Fallback to help if TUI not available
console.log(
chalk.yellow('TUI mode not available. Install @tm/tui to enable.')
);
// TUI coming soon - show help for now
console.log(chalk.yellow('TUI mode coming soon!'));
console.log(chalk.dim('Showing help instead...\n'));
if (isConnectedToHamster()) {
displayHamsterHelp();
@@ -5398,7 +5405,8 @@ async function runCLI(argv = process.argv) {
// Display banner if not in a pipe (except for init/start/repl commands which have their own)
const isInitCommand = argv.includes('init');
const isREPLCommand = argv.includes('tui') || argv.includes('repl');
if (process.stdout.isTTY && !isInitCommand && !isREPLCommand) {
const noBanner = argv.includes('--no-banner');
if (process.stdout.isTTY && !isInitCommand && !isREPLCommand && !noBanner) {
displayBanner();
}
@@ -5435,7 +5443,9 @@ async function runCLI(argv = process.argv) {
// NOTE: getConfig() might be called during setupCLI->registerCommands if commands need config
// This means the ConfigurationError might be thrown here if configuration file is missing.
const programInstance = setupCLI();
await programInstance.parseAsync(argv);
// Filter out --no-banner since it's handled above and not a Commander option
const filteredArgv = argv.filter((arg) => arg !== '--no-banner');
await programInstance.parseAsync(filteredArgv);
// Check if migration has occurred and show FYI notice once
try {

View File

@@ -48,7 +48,13 @@ async function updateSubtaskById(
context = {},
outputFormat = context.mcpLog ? 'json' : 'text'
) {
const { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;
const {
session,
mcpLog,
projectRoot: providedProjectRoot,
tag,
metadata
} = context;
const logFn = mcpLog || consoleLog;
const isMCP = !!mcpLog;
@@ -71,10 +77,13 @@ async function updateSubtaskById(
if (!subtaskId || typeof subtaskId !== 'string') {
throw new Error('Subtask ID cannot be empty.');
}
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
// Allow metadata-only updates (no prompt required if metadata is provided)
if (
(!prompt || typeof prompt !== 'string' || prompt.trim() === '') &&
!metadata
) {
throw new Error(
'Prompt cannot be empty. Please provide context for the subtask update.'
'Prompt cannot be empty unless metadata is provided. Please provide context for the subtask update or metadata to merge.'
);
}
@@ -93,6 +102,7 @@ async function updateSubtaskById(
tag,
appendMode: true, // Subtask updates are always append mode
useResearch,
metadata,
isMCP,
outputFormat,
report
@@ -164,6 +174,30 @@ async function updateSubtaskById(
const subtask = parentTask.subtasks[subtaskIndex];
// --- Metadata-Only Update (Fast Path) ---
// If only metadata is provided (no prompt), skip AI and just update metadata
if (metadata && (!prompt || prompt.trim() === '')) {
report('info', `Metadata-only update for subtask ${subtaskId}`);
// Merge new metadata with existing
subtask.metadata = {
...(subtask.metadata || {}),
...metadata
};
parentTask.subtasks[subtaskIndex] = subtask;
writeJSON(tasksPath, data, projectRoot, tag);
report(
'success',
`Successfully updated metadata for subtask ${subtaskId}`
);
return {
updatedSubtask: subtask,
telemetryData: null,
tagInfo: { tag }
};
}
// --- End Metadata-Only Update ---
// --- Context Gathering ---
let gatheredContext = '';
try {
@@ -334,6 +368,14 @@ async function updateSubtaskById(
const updatedSubtask = parentTask.subtasks[subtaskIndex];
// Merge metadata if provided (preserve existing metadata)
if (metadata) {
updatedSubtask.metadata = {
...(updatedSubtask.metadata || {}),
...metadata
};
}
if (outputFormat === 'text' && getDebugFlag(session)) {
console.log(
'>>> DEBUG: Subtask details AFTER AI update:',

View File

@@ -58,7 +58,13 @@ async function updateTaskById(
outputFormat = 'text',
appendMode = false
) {
const { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;
const {
session,
mcpLog,
projectRoot: providedProjectRoot,
tag,
metadata
} = context;
const { report, isMCP } = createBridgeLogger(mcpLog, session);
try {
@@ -70,8 +76,15 @@ async function updateTaskById(
if (taskId === null || taskId === undefined || String(taskId).trim() === '')
throw new Error('Task ID cannot be empty.');
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '')
throw new Error('Prompt cannot be empty.');
// Allow metadata-only updates (prompt can be empty if metadata is provided)
if (
(!prompt || typeof prompt !== 'string' || prompt.trim() === '') &&
!metadata
) {
throw new Error(
'Prompt cannot be empty unless metadata is provided for update.'
);
}
// Determine project root first (needed for API key checks)
const projectRoot = providedProjectRoot || findProjectRoot();
@@ -99,6 +112,7 @@ async function updateTaskById(
tag,
appendMode,
useResearch,
metadata,
isMCP,
outputFormat,
report
@@ -166,6 +180,27 @@ async function updateTaskById(
}
// --- End Task Loading ---
// --- Metadata-Only Update (Fast Path) ---
// If only metadata is provided (no prompt), skip AI and just update metadata
if (metadata && (!prompt || prompt.trim() === '')) {
report('info', `Metadata-only update for task ${taskId}`);
// Merge new metadata with existing
taskToUpdate.metadata = {
...(taskToUpdate.metadata || {}),
...metadata
};
data.tasks[taskIndex] = taskToUpdate;
writeJSON(tasksPath, data, projectRoot, tag);
report('success', `Successfully updated metadata for task ${taskId}`);
return {
updatedTask: taskToUpdate,
telemetryData: null,
tagInfo: { tag }
};
}
// --- End Metadata-Only Update ---
// --- Context Gathering ---
let gatheredContext = '';
try {
@@ -385,6 +420,14 @@ async function updateTaskById(
}
}
// Merge metadata if provided
if (metadata) {
taskToUpdate.metadata = {
...(taskToUpdate.metadata || {}),
...metadata
};
}
// Write the updated task back to file
data.tasks[taskIndex] = taskToUpdate;
writeJSON(tasksPath, data, projectRoot, tag);
@@ -455,6 +498,14 @@ async function updateTaskById(
if (updatedTask.subtasks && Array.isArray(updatedTask.subtasks)) {
let currentSubtaskId = 1;
updatedTask.subtasks = updatedTask.subtasks.map((subtask) => {
// Find original subtask to preserve its metadata
// Use type-coerced ID matching (AI may return string IDs vs numeric)
// Also match by title as fallback (subtask titles are typically unique)
const originalSubtask = taskToUpdate.subtasks?.find(
(st) =>
String(st.id) === String(subtask.id) ||
(subtask.title && st.title === subtask.title)
);
// Fix AI-generated subtask IDs that might be strings or use parent ID as prefix
const correctedSubtask = {
...subtask,
@@ -472,7 +523,11 @@ async function updateTaskById(
)
: [],
status: subtask.status || 'pending',
testStrategy: subtask.testStrategy ?? null
testStrategy: subtask.testStrategy ?? null,
// Preserve subtask metadata from original (AI schema excludes metadata)
...(originalSubtask?.metadata && {
metadata: originalSubtask.metadata
})
};
currentSubtaskId++;
return correctedSubtask;
@@ -529,6 +584,17 @@ async function updateTaskById(
}
// --- End Task Validation/Correction ---
// --- Preserve and Merge Metadata ---
// AI responses don't include metadata (AI schema excludes it)
// Preserve existing metadata from original task and merge new metadata if provided
if (taskToUpdate.metadata || metadata) {
updatedTask.metadata = {
...(taskToUpdate.metadata || {}),
...(metadata || {})
};
}
// --- End Preserve and Merge Metadata ---
// --- Update Task Data (Keep existing) ---
data.tasks[taskIndex] = updatedTask;
// --- End Update Task Data ---

View File

@@ -14,6 +14,8 @@ import {
} from '../../src/constants/paths.js';
// Import specific config getters needed here
import { getDebugFlag, getLogLevel } from './config-manager.js';
// Import FileOperations from tm-core for atomic file modifications
import { FileOperations } from '@tm/core';
import * as gitUtils from './utils/git-utils.js';
// Global silent mode flag
@@ -973,9 +975,45 @@ function markMigrationForNotice(tasksJsonPath) {
}
}
// Shared FileOperations instance for modifyJSON
let _fileOps = null;
/**
* Gets or creates the shared FileOperations instance
* @returns {FileOperations} The shared FileOperations instance
*/
function getFileOps() {
if (!_fileOps) {
_fileOps = new FileOperations();
}
return _fileOps;
}
/**
* Atomically modifies a JSON file using a callback pattern.
* This is the safe way to update JSON files - it reads, modifies, and writes
* all within a single lock, preventing race conditions.
*
* Uses FileOperations from @tm/core for proper cross-process locking.
*
* @param {string} filepath - Path to the JSON file
* @param {Function} modifier - Async callback that receives current data and returns modified data.
* Signature: (currentData: Object) => Object | Promise<Object>
* @returns {Promise<void>}
*/
async function modifyJSON(filepath, modifier) {
const fileOps = getFileOps();
await fileOps.modifyJson(filepath, modifier);
}
/**
* Writes and saves a JSON file. Handles tagged task lists properly.
* Uses cross-process file locking and atomic writes to prevent race conditions.
*
* @deprecated For new code, prefer modifyJSON() which provides atomic read-modify-write.
* This function is maintained for backwards compatibility but callers should migrate
* to modifyJSON() to prevent race conditions from stale reads.
*
* @param {string} filepath - Path to the JSON file
* @param {Object} data - Data to write (can be resolved tag data or raw tagged data)
* @param {string} projectRoot - Optional project root for tag context
@@ -1921,6 +1959,7 @@ export {
log,
readJSON,
writeJSON,
modifyJSON,
sanitizePrompt,
readComplexityReport,
findTaskInComplexityReport,

View File

@@ -10,6 +10,11 @@ import { z } from 'zod';
*
* Other providers (Anthropic, Google, etc.) safely ignore this constraint.
* See: https://platform.openai.com/docs/guides/structured-outputs
*
* NOTE: The `metadata` field (user-defined task metadata) is intentionally EXCLUDED
* from all AI schemas. This ensures AI operations cannot overwrite user metadata.
* When tasks are updated via AI, the spread operator preserves existing metadata
* since AI responses won't include a metadata field.
*/
export const TaskStatusSchema = z.enum([
'pending',

BIN
taskmaster.mcpb Normal file

Binary file not shown.

View File

@@ -51,3 +51,16 @@ if (process.env.SILENCE_CONSOLE === 'true') {
error: () => {}
};
}
// Clean up signal-exit listeners after all tests to prevent open handle warnings
// This is needed because packages like proper-lockfile register signal handlers
afterAll(async () => {
// Give any pending async operations time to complete
await new Promise((resolve) => setImmediate(resolve));
// Clean up any registered signal handlers from signal-exit
const listeners = ['SIGINT', 'SIGTERM', 'SIGHUP'];
for (const signal of listeners) {
process.removeAllListeners(signal);
}
});