Merge pull request #1546 from eyaltoledano/next (Release 0.41.0)

This commit is contained in:
Ralph Khreish
2025-12-28 09:45:03 +01:00
committed by GitHub
26 changed files with 807 additions and 319 deletions

View File

@@ -0,0 +1,8 @@
---
"task-master-ai": patch
---
Codex cli Validate reasoning effort against model capabilities
- Add provider-level reasoning effort validation for OpenAI models
- Automatically cap unsupported effort levels (e.g., 'xhigh' on gpt-5.1 and gpt-5 becomes 'high')

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Improve CLI startup speed by 2x

View File

@@ -0,0 +1,9 @@
---
"task-master-ai": patch
---
Smarter project root detection with boundary markers
- Prevents Task Master from incorrectly detecting `.taskmaster` folders in your home directory when working inside a different project
- Now stops at project boundaries (`.git`, `package.json`, lock files) instead of searching all the way up to the filesystem root
- Adds support for monorepo markers (`lerna.json`, `nx.json`, `turbo.json`) and additional lock files (`bun.lockb`, `deno.lock`)

View File

@@ -0,0 +1,7 @@
---
"task-master-ai": patch
---
Improve json schemas for ai-related commands making it more compatible with openai models
- Fixes #1541 #1542

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Fixed vertex-ai authentication when using service account and vertex location env variable.

View File

@@ -26,20 +26,11 @@ jobs:
permissions:
contents: write
pull-requests: write
issues: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: next
fetch-depth: 0 # Need full history to checkout specific commit
- name: Create docs update branch
id: create-branch
run: |
BRANCH_NAME="docs/auto-update-$(date +%Y%m%d-%H%M%S)"
git checkout -b $BRANCH_NAME
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
- name: Run Claude Code to Update Documentation
uses: anthropics/claude-code-action@v1
@@ -79,47 +70,34 @@ jobs:
Only make changes if the documentation truly needs updating based on the code changes.
- name: Check if changes were made
id: check-changes
run: |
if git diff --quiet; then
echo "has_changes=false" >> $GITHUB_OUTPUT
else
echo "has_changes=true" >> $GITHUB_OUTPUT
git add -A
git config --local user.email "github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
git commit -m "docs: auto-update documentation based on changes in next branch
- name: Create Pull Request
uses: peter-evans/create-pull-request@v8
with:
token: ${{ secrets.GITHUB_TOKEN }}
branch: docs/auto-update-${{ github.run_id }}
base: next
title: "docs: update documentation for recent changes"
commit-message: |
docs: auto-update documentation based on changes in next branch
This PR was automatically generated to update documentation based on recent changes.
Original commit: ${{ inputs.commit_message }}
Co-authored-by: Claude <claude-assistant@anthropic.com>"
fi
- name: Push changes and create PR
if: steps.check-changes.outputs.has_changes == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git push origin ${{ steps.create-branch.outputs.branch_name }}
Co-authored-by: Claude <claude-assistant@anthropic.com>
body: |
## 📚 Documentation Update
# Create PR using GitHub CLI
gh pr create \
--title "docs: update documentation for recent changes" \
--body "## 📚 Documentation Update
This PR automatically updates documentation based on recent changes merged to the \`next\` branch.
This PR automatically updates documentation based on recent changes merged to the `next` branch.
### Original Changes
**Commit:** ${{ inputs.commit_sha }}
**Message:** ${{ inputs.commit_message }}
### Changed Files in Original Commit
\`\`\`
```
${{ inputs.changed_files }}
\`\`\`
```
### Documentation Updates
This PR includes documentation updates to reflect the changes above. Please review to ensure:
@@ -129,8 +107,9 @@ jobs:
- [ ] Style is consistent with existing documentation
---
*This PR was automatically generated by Claude Code GitHub Action*" \
--base next \
--head ${{ steps.create-branch.outputs.branch_name }} \
--label "documentation" \
--label "automated"
*This PR was automatically generated by Claude Code GitHub Action*
labels: |
documentation
automated
author: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
delete-branch: true

View File

@@ -1,88 +1,187 @@
/**
* @fileoverview Check npm registry for package updates
* @fileoverview Check npm registry for package updates with caching
*
* Uses a simple file-based cache in the OS temp directory to avoid
* hitting npm on every CLI invocation. Cache expires after 1 hour.
*/
import fs from 'node:fs';
import https from 'https';
import os from 'node:os';
import path from 'node:path';
import { fetchChangelogHighlights } from './changelog.js';
import type { UpdateInfo } from './types.js';
import { compareVersions, getCurrentVersion } from './version.js';
// ============================================================================
// Cache Configuration
// ============================================================================
/** Cache TTL: 1 hour in milliseconds */
const CACHE_TTL_MS = 60 * 60 * 1000;
/** Cache file name */
const CACHE_FILENAME = 'taskmaster-update-cache.json';
interface UpdateCache {
timestamp: number;
latestVersion: string;
highlights?: string[];
}
// ============================================================================
// Cache Operations (Single Responsibility: cache I/O)
// ============================================================================
/**
* Get the path to the update cache file in OS temp directory
*/
const getCachePath = (): string => path.join(os.tmpdir(), CACHE_FILENAME);
/**
* Read cached update info if still valid
* @returns Cached data or null if expired/missing/invalid
*/
function readCache(): UpdateCache | null {
try {
const cachePath = getCachePath();
if (!fs.existsSync(cachePath)) return null;
const data: UpdateCache = JSON.parse(fs.readFileSync(cachePath, 'utf-8'));
const isExpired = Date.now() - data.timestamp > CACHE_TTL_MS;
return isExpired ? null : data;
} catch {
return null;
}
}
/**
* Write update info to cache
*/
function writeCache(latestVersion: string, highlights?: string[]): void {
try {
fs.writeFileSync(
getCachePath(),
JSON.stringify(
{
timestamp: Date.now(),
latestVersion,
highlights
} satisfies UpdateCache,
null,
2
)
);
} catch {
// Cache write failures are non-critical - silently ignore
}
}
// ============================================================================
// NPM Registry Operations (Single Responsibility: npm API)
// ============================================================================
/** Request timeout for npm registry */
const NPM_TIMEOUT_MS = 3000;
/**
* Fetch latest version from npm registry
* @returns Latest version string or null on failure
*/
function fetchLatestVersion(currentVersion: string): Promise<string | null> {
return new Promise((resolve) => {
const req = https.request(
{
hostname: 'registry.npmjs.org',
path: '/task-master-ai',
method: 'GET',
headers: {
Accept: 'application/vnd.npm.install-v1+json',
'User-Agent': `task-master-ai/${currentVersion}`
}
},
(res) => {
let data = '';
res.on('data', (chunk) => (data += chunk));
res.on('end', () => {
try {
if (res.statusCode !== 200) {
resolve(null);
return;
}
const npmData = JSON.parse(data);
resolve(npmData['dist-tags']?.latest || null);
} catch {
resolve(null);
}
});
}
);
req.on('error', () => resolve(null));
req.setTimeout(NPM_TIMEOUT_MS, () => {
req.destroy();
resolve(null);
});
req.end();
});
}
// ============================================================================
// Public API
// ============================================================================
/**
* Build UpdateInfo response
*/
function buildUpdateInfo(
currentVersion: string,
latestVersion: string,
highlights?: string[]
): UpdateInfo {
return {
currentVersion,
latestVersion,
needsUpdate: compareVersions(currentVersion, latestVersion) < 0,
highlights
};
}
/**
* Check for newer version of task-master-ai
* Uses a 1-hour cache to avoid hitting npm on every CLI invocation
*/
export async function checkForUpdate(
currentVersionOverride?: string
): Promise<UpdateInfo> {
const currentVersion = currentVersionOverride || getCurrentVersion();
return new Promise((resolve) => {
const options = {
hostname: 'registry.npmjs.org',
path: '/task-master-ai',
method: 'GET',
headers: {
Accept: 'application/vnd.npm.install-v1+json',
'User-Agent': `task-master-ai/${currentVersion}`
}
};
// Return cached result if valid
const cached = readCache();
if (cached) {
return buildUpdateInfo(
currentVersion,
cached.latestVersion,
cached.highlights
);
}
const req = https.request(options, (res) => {
let data = '';
// Fetch from npm registry
const latestVersion = await fetchLatestVersion(currentVersion);
if (!latestVersion) {
return buildUpdateInfo(currentVersion, currentVersion);
}
res.on('data', (chunk) => {
data += chunk;
});
// Fetch changelog highlights if update available
const needsUpdate = compareVersions(currentVersion, latestVersion) < 0;
const highlights = needsUpdate
? await fetchChangelogHighlights(latestVersion)
: undefined;
res.on('end', async () => {
try {
if (res.statusCode !== 200)
throw new Error(`npm registry status ${res.statusCode}`);
const npmData = JSON.parse(data);
const latestVersion = npmData['dist-tags']?.latest || currentVersion;
// Cache result
writeCache(latestVersion, highlights);
const needsUpdate =
compareVersions(currentVersion, latestVersion) < 0;
// Fetch highlights if update is needed
let highlights: string[] | undefined;
if (needsUpdate) {
highlights = await fetchChangelogHighlights(latestVersion);
}
resolve({
currentVersion,
latestVersion,
needsUpdate,
highlights
});
} catch {
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
}
});
});
req.on('error', () => {
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
});
req.setTimeout(3000, () => {
req.destroy();
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
});
req.end();
});
return buildUpdateInfo(currentVersion, latestVersion, highlights);
}

View File

@@ -4,6 +4,10 @@ import rootConfig from '../../vitest.config';
/**
* CLI package Vitest configuration
* Extends root config with CLI-specific settings
*
* Integration tests (.test.ts) spawn CLI processes and need more time.
* The 30s timeout is reasonable now that auto-update network calls are skipped
* when TASKMASTER_SKIP_AUTO_UPDATE=1 or NODE_ENV=test.
*/
export default mergeConfig(
rootConfig,
@@ -15,7 +19,10 @@ export default mergeConfig(
'tests/**/*.spec.ts',
'src/**/*.test.ts',
'src/**/*.spec.ts'
]
],
// Integration tests spawn CLI processes - 30s is reasonable with optimized startup
testTimeout: 30000,
hookTimeout: 15000
}
})
);

View File

@@ -24,7 +24,8 @@
"dependencies": {
"@tm/core": "*",
"fastmcp": "^3.23.0",
"zod": "^4.1.11"
"zod": "^4.1.11",
"dotenv": "^16.6.1"
},
"devDependencies": {
"@biomejs/biome": "^1.9.4",

View File

@@ -2,6 +2,7 @@
* Shared utilities for MCP tools
*/
import dotenv from 'dotenv';
import fs from 'node:fs';
import path from 'node:path';
import {
@@ -395,6 +396,13 @@ export function withToolContext<TArgs extends { projectRoot?: string }>(
args: TArgs & { projectRoot: string },
context: Context<undefined>
) => {
// Load project .env if it exists (won't overwrite MCP-provided env vars)
// This ensures project-specific env vars are available to tool execution
const envPath = path.join(args.projectRoot, '.env');
if (fs.existsSync(envPath)) {
dotenv.config({ path: envPath });
}
// Create tmCore instance
const tmCore = await createTmCore({
projectPath: args.projectRoot,

1
package-lock.json generated
View File

@@ -1971,6 +1971,7 @@
"license": "MIT",
"dependencies": {
"@tm/core": "*",
"dotenv": "^16.6.1",
"fastmcp": "^3.23.0",
"zod": "^4.1.11"
},

View File

@@ -0,0 +1,268 @@
/**
* @fileoverview Integration tests for project root detection
*
* These tests verify real-world scenarios for project root detection,
* particularly edge cases around:
* - Empty directories (tm init scenario)
* - .taskmaster in home/parent directories that should be ignored
* - Monorepo detection
*/
import fs from 'node:fs';
import os from 'node:os';
import path from 'node:path';
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
import { findProjectRoot } from './project-root-finder.js';
describe('findProjectRoot - Integration Tests', () => {
let tempDir: string;
beforeEach(() => {
// Create a temporary directory structure for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'tm-integration-test-'));
});
afterEach(() => {
// Clean up temp directory
if (fs.existsSync(tempDir)) {
fs.rmSync(tempDir, { recursive: true, force: true });
}
});
describe('Empty directory scenarios (tm init)', () => {
it('should find .taskmaster in immediate parent (depth=1) even without boundary marker', () => {
// Scenario: User is in a subdirectory of a project that only has .taskmaster
// (no .git or package.json). This is a valid use case for projects where
// user ran `tm init` but not `git init`.
//
// Structure:
// /project/.taskmaster
// /project/src/ (start here)
//
// NOTE: For `tm init` command specifically, the command should use
// process.cwd() directly instead of findProjectRoot() to avoid
// finding a stray .taskmaster in a parent directory.
const projectDir = tempDir;
const srcDir = path.join(projectDir, 'src');
fs.mkdirSync(path.join(projectDir, '.taskmaster'));
fs.mkdirSync(srcDir);
const result = findProjectRoot(srcDir);
// Immediate parent (depth=1) is trusted even without boundary marker
expect(result).toBe(projectDir);
});
it('should return startDir when running from completely empty directory', () => {
// Scenario: User runs `tm init` in a brand new, completely empty directory
// No markers anywhere in the tree
//
// Structure:
// /tmp/test/empty-project/ (empty)
const emptyProjectDir = path.join(tempDir, 'empty-project');
fs.mkdirSync(emptyProjectDir);
const result = findProjectRoot(emptyProjectDir);
// Should return the empty directory itself
expect(result).toBe(emptyProjectDir);
});
it('should return startDir when no boundary markers exist but .taskmaster exists in distant parent', () => {
// Scenario: Deep directory structure with .taskmaster only at top level
// No boundary markers (.git, package.json, etc.) anywhere
//
// Structure:
// /tmp/home/.taskmaster (should be IGNORED - too far up)
// /tmp/home/code/projects/my-app/ (empty - no markers)
const homeDir = tempDir;
const deepProjectDir = path.join(homeDir, 'code', 'projects', 'my-app');
fs.mkdirSync(path.join(homeDir, '.taskmaster'));
fs.mkdirSync(deepProjectDir, { recursive: true });
const result = findProjectRoot(deepProjectDir);
// Should return the deep directory, not home
expect(result).toBe(deepProjectDir);
});
});
describe('Project with boundary markers', () => {
it('should find .taskmaster when at same level as .git', () => {
// Scenario: Normal project setup with both .taskmaster and .git
//
// Structure:
// /tmp/project/.taskmaster
// /tmp/project/.git
const projectDir = tempDir;
fs.mkdirSync(path.join(projectDir, '.taskmaster'));
fs.mkdirSync(path.join(projectDir, '.git'));
const result = findProjectRoot(projectDir);
expect(result).toBe(projectDir);
});
it('should find .taskmaster in parent when subdirectory has no markers', () => {
// Scenario: Running from a subdirectory of an initialized project
//
// Structure:
// /tmp/project/.taskmaster
// /tmp/project/.git
// /tmp/project/src/components/ (no markers)
const projectDir = tempDir;
const srcDir = path.join(projectDir, 'src', 'components');
fs.mkdirSync(path.join(projectDir, '.taskmaster'));
fs.mkdirSync(path.join(projectDir, '.git'));
fs.mkdirSync(srcDir, { recursive: true });
const result = findProjectRoot(srcDir);
expect(result).toBe(projectDir);
});
it('should stop at .git and NOT find .taskmaster beyond it', () => {
// Scenario: Project has .git but no .taskmaster, parent has .taskmaster
// Should use project with .git, not parent with .taskmaster
//
// Structure:
// /tmp/home/.taskmaster (should be IGNORED)
// /tmp/home/my-project/.git (boundary marker)
const homeDir = tempDir;
const projectDir = path.join(homeDir, 'my-project');
fs.mkdirSync(path.join(homeDir, '.taskmaster'));
fs.mkdirSync(path.join(projectDir, '.git'), { recursive: true });
const result = findProjectRoot(projectDir);
expect(result).toBe(projectDir);
});
it('should stop at package.json and NOT find .taskmaster beyond it', () => {
// Scenario: JS project with package.json but no .taskmaster
//
// Structure:
// /tmp/home/.taskmaster (should be IGNORED)
// /tmp/home/my-project/package.json (boundary)
const homeDir = tempDir;
const projectDir = path.join(homeDir, 'my-project');
fs.mkdirSync(path.join(homeDir, '.taskmaster'));
fs.mkdirSync(projectDir);
fs.writeFileSync(path.join(projectDir, 'package.json'), '{}');
const result = findProjectRoot(projectDir);
expect(result).toBe(projectDir);
});
});
describe('Monorepo scenarios', () => {
it('should find monorepo root .taskmaster from package subdirectory', () => {
// Scenario: Monorepo with .taskmaster at root, packages without their own markers
//
// Structure:
// /tmp/monorepo/.taskmaster
// /tmp/monorepo/.git
// /tmp/monorepo/packages/my-package/src/
const monorepoRoot = tempDir;
const packageSrcDir = path.join(
monorepoRoot,
'packages',
'my-package',
'src'
);
fs.mkdirSync(path.join(monorepoRoot, '.taskmaster'));
fs.mkdirSync(path.join(monorepoRoot, '.git'));
fs.mkdirSync(packageSrcDir, { recursive: true });
const result = findProjectRoot(packageSrcDir);
expect(result).toBe(monorepoRoot);
});
it('should return package root when package has its own boundary marker', () => {
// Scenario: Monorepo where individual package has its own package.json
// Package should be treated as its own project
//
// Structure:
// /tmp/monorepo/.taskmaster
// /tmp/monorepo/packages/my-package/package.json (boundary)
const monorepoRoot = tempDir;
const packageDir = path.join(monorepoRoot, 'packages', 'my-package');
fs.mkdirSync(path.join(monorepoRoot, '.taskmaster'));
fs.mkdirSync(packageDir, { recursive: true });
fs.writeFileSync(path.join(packageDir, 'package.json'), '{}');
const result = findProjectRoot(packageDir);
// Package has its own boundary, so it should be returned
expect(result).toBe(packageDir);
});
it('should return package root when package has its own .taskmaster', () => {
// Scenario: Nested Task Master initialization
//
// Structure:
// /tmp/monorepo/.taskmaster
// /tmp/monorepo/packages/my-package/.taskmaster
const monorepoRoot = tempDir;
const packageDir = path.join(monorepoRoot, 'packages', 'my-package');
fs.mkdirSync(path.join(monorepoRoot, '.taskmaster'));
fs.mkdirSync(path.join(packageDir, '.taskmaster'), { recursive: true });
const result = findProjectRoot(packageDir);
// Package has its own .taskmaster, so it should be returned
expect(result).toBe(packageDir);
});
});
describe('Environment variable loading context', () => {
it('should document that .env loading should use process.cwd(), not findProjectRoot()', () => {
// IMPORTANT: For .env loading (e.g., TM_BASE_DOMAIN for auth),
// the code should use process.cwd() directly, NOT findProjectRoot().
//
// findProjectRoot() is designed to find the .taskmaster directory
// for task storage. It will traverse up to find .taskmaster in parent
// directories when appropriate.
//
// For environment variables, we want to load from WHERE the user
// is running the command, not where .taskmaster is located.
//
// This test documents this design decision and verifies findProjectRoot
// behavior when .taskmaster is in immediate parent (depth=1).
const projectDir = tempDir;
const subDir = path.join(projectDir, 'subdir');
fs.mkdirSync(path.join(projectDir, '.taskmaster'));
fs.mkdirSync(subDir);
const result = findProjectRoot(subDir);
// findProjectRoot WILL find parent's .taskmaster (depth=1 is trusted)
// This is correct for task storage - we want to use the parent's tasks.json
// For .env loading, callers should use process.cwd() instead
expect(result).toBe(projectDir);
});
});
});

View File

@@ -1,8 +1,3 @@
/**
* @fileoverview Project root detection utilities
* Provides functionality to locate project roots by searching for marker files/directories
*/
import fs from 'node:fs';
import path from 'node:path';
import {
@@ -11,9 +6,6 @@ import {
TASKMASTER_PROJECT_MARKERS
} from '../constants/paths.js';
/**
* Check if a marker file/directory exists at the given path
*/
function markerExists(dir: string, marker: string): boolean {
try {
return fs.existsSync(path.join(dir, marker));
@@ -22,170 +14,101 @@ function markerExists(dir: string, marker: string): boolean {
}
}
/**
* Check if any of the given markers exist in a directory
*/
function hasAnyMarker(dir: string, markers: readonly string[]): boolean {
return markers.some((marker) => markerExists(dir, marker));
}
/**
* Find the project root directory by looking for project markers
* Traverses upwards from startDir until a project marker is found or filesystem root is reached
* Limited to 50 parent directory levels to prevent excessive traversal
* Find the project root by traversing upward from startDir looking for project markers.
*
* Strategy:
* 1. PASS 1: Search for .taskmaster markers, but STOP at project boundaries
* - If .taskmaster found, return that directory
* - If a project boundary (package.json, .git, lock files) is found WITHOUT .taskmaster,
* stop searching further up (prevents finding .taskmaster in home directory)
* 2. PASS 2: If no .taskmaster found, search for other project markers
*
* This ensures:
* - .taskmaster in a parent directory takes precedence (within project boundary)
* - .taskmaster outside the project boundary (e.g., home dir) is NOT returned
*
* @param startDir - Directory to start searching from (defaults to process.cwd())
* @returns Project root path (falls back to startDir if no markers found)
*
* @example
* ```typescript
* // In a monorepo structure:
* // /project/.taskmaster
* // /project/packages/my-package/.git
* // When called from /project/packages/my-package:
* const root = findProjectRoot(); // Returns /project (not /project/packages/my-package)
*
* // When .taskmaster is outside project boundary:
* // /home/user/.taskmaster (should be ignored!)
* // /home/user/code/myproject/package.json
* // When called from /home/user/code/myproject:
* const root = findProjectRoot(); // Returns /home/user/code/myproject (NOT /home/user)
* ```
* Search strategy prevents false matches from stray .taskmaster dirs (e.g., in home):
* 1. If startDir has .taskmaster or a boundary marker (.git, package.json), return immediately
* 2. Search parents for .taskmaster anchored by a boundary marker (or 1 level up without boundary)
* 3. Fall back to searching for other project markers (pyproject.toml, Cargo.toml, etc.)
* 4. If nothing found, return startDir (supports `tm init` in empty directories)
*/
export function findProjectRoot(startDir: string = process.cwd()): string {
let currentDir = path.resolve(startDir);
const rootDir = path.parse(currentDir).root;
const maxDepth = 50; // Reasonable limit to prevent infinite loops
const maxDepth = 50;
let depth = 0;
// Track if we've seen a project boundary - we'll stop searching for .taskmaster beyond it
let projectBoundaryDir: string | null = null;
// FIRST PASS: Search for Task Master markers, but respect project boundaries
// A project boundary is a directory containing .git, package.json, lock files, etc.
// If we find a boundary without .taskmaster, we stop searching further up
let searchDir = currentDir;
depth = 0;
// Check startDir first - if it has .taskmaster or a boundary marker, we're done
if (hasAnyMarker(currentDir, TASKMASTER_PROJECT_MARKERS)) {
return currentDir;
}
if (hasAnyMarker(currentDir, PROJECT_BOUNDARY_MARKERS)) {
return currentDir;
}
// Search parent directories for .taskmaster
let searchDir = path.dirname(currentDir);
depth = 1;
while (depth < maxDepth) {
// First, check for Task Master markers in this directory
for (const marker of TASKMASTER_PROJECT_MARKERS) {
if (markerExists(searchDir, marker)) {
// Found a Task Master marker - this is our project root
const hasTaskmaster = hasAnyMarker(searchDir, TASKMASTER_PROJECT_MARKERS);
const hasBoundary = hasAnyMarker(searchDir, PROJECT_BOUNDARY_MARKERS);
if (hasTaskmaster) {
// Accept .taskmaster if anchored by boundary or only 1 level up
if (hasBoundary || depth === 1) {
return searchDir;
}
// Distant .taskmaster without boundary is likely stray (e.g., home dir) - skip it
}
// Check if this directory is a project boundary
// (has markers like .git, package.json, lock files, etc.)
if (hasAnyMarker(searchDir, PROJECT_BOUNDARY_MARKERS)) {
// This is a project boundary - record it and STOP looking for .taskmaster
// beyond this point. The .taskmaster in home directory should NOT be found
// when the user is inside a different project.
if (hasBoundary && !hasTaskmaster) {
// Hit project boundary without .taskmaster - stop searching upward
projectBoundaryDir = searchDir;
break; // Stop Pass 1 - don't look for .taskmaster beyond this boundary
}
// If we're at root, stop after checking it
if (searchDir === rootDir) {
break;
}
// Move up one directory level
if (searchDir === rootDir) break;
const parentDir = path.dirname(searchDir);
// Safety check: if dirname returns the same path, we've hit the root
if (parentDir === searchDir) {
break;
}
if (parentDir === searchDir) break;
searchDir = parentDir;
depth++;
}
// SECOND PASS: No Task Master markers found within project boundary
// Now search for other project markers starting from the original directory
// If we found a project boundary in Pass 1, start from there (it will match immediately)
// No .taskmaster found - search for other project markers
currentDir = projectBoundaryDir || path.resolve(startDir);
depth = 0;
while (depth < maxDepth) {
for (const marker of OTHER_PROJECT_MARKERS) {
if (markerExists(currentDir, marker)) {
// Found another project marker - return this as project root
return currentDir;
}
if (hasAnyMarker(currentDir, OTHER_PROJECT_MARKERS)) {
return currentDir;
}
// If we're at root, stop after checking it
if (currentDir === rootDir) {
break;
}
if (currentDir === rootDir) break;
// Move up one directory level
const parentDir = path.dirname(currentDir);
// Safety check: if dirname returns the same path, we've hit the root
if (parentDir === currentDir) {
break;
}
if (parentDir === currentDir) break;
currentDir = parentDir;
depth++;
}
// Fallback to startDir if no project root found
// This handles empty repos or directories with no recognized project markers
// (e.g., a repo with just a .env file should still use that directory as root)
return path.resolve(startDir);
}
/**
* Normalize project root to ensure it doesn't end with .taskmaster
* This prevents double .taskmaster paths when using constants that include .taskmaster
*
* @param projectRoot - The project root path to normalize
* @returns Normalized project root path
*
* @example
* ```typescript
* normalizeProjectRoot('/project/.taskmaster'); // Returns '/project'
* normalizeProjectRoot('/project'); // Returns '/project'
* normalizeProjectRoot('/project/.taskmaster/tasks'); // Returns '/project'
* ```
* Strip .taskmaster (and anything after it) from a path.
* Prevents double .taskmaster paths when combining with constants that include .taskmaster.
*/
export function normalizeProjectRoot(
projectRoot: string | null | undefined
): string {
if (!projectRoot) return projectRoot || '';
if (!projectRoot) return '';
// Ensure it's a string
const projectRootStr = String(projectRoot);
// Split the path into segments
const segments = projectRootStr.split(path.sep);
// Find the index of .taskmaster segment
const taskmasterIndex = segments.findIndex(
(segment) => segment === '.taskmaster'
);
const segments = String(projectRoot).split(path.sep);
const taskmasterIndex = segments.findIndex((s) => s === '.taskmaster');
if (taskmasterIndex !== -1) {
// If .taskmaster is found, return everything up to but not including .taskmaster
const normalizedSegments = segments.slice(0, taskmasterIndex);
return normalizedSegments.join(path.sep) || path.sep;
return segments.slice(0, taskmasterIndex).join(path.sep) || path.sep;
}
return projectRootStr;
return String(projectRoot);
}

View File

@@ -359,10 +359,14 @@ function _getVertexConfiguration(projectRoot, session) {
`Using Vertex AI configuration: Project ID=${projectId}, Location=${location}`
);
const credentials = credentialsPath
? { keyFile: credentialsPath }
: undefined;
return {
projectId,
location,
...(credentialsPath && { credentials: { credentialsFromEnv: true } })
...(credentials && { credentials })
};
}

View File

@@ -5402,9 +5402,16 @@ async function runCLI(argv = process.argv) {
displayBanner();
}
// Check for updates BEFORE executing the command
// Check for updates BEFORE executing the command (skip entirely in test/CI mode)
const skipAutoUpdate =
process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' ||
process.env.CI ||
process.env.NODE_ENV === 'test';
const currentVersion = getTaskMasterVersion();
const updateInfo = await checkForUpdate(currentVersion);
const updateInfo = skipAutoUpdate
? { currentVersion, latestVersion: currentVersion, needsUpdate: false }
: await checkForUpdate(currentVersion);
if (updateInfo.needsUpdate) {
// Display the upgrade notification first

View File

@@ -719,11 +719,11 @@ function getVertexProjectId(explicitRoot = null) {
/**
* Gets the Google Cloud location for Vertex AI from configuration
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The location or default value of "us-central1"
* @returns {string} The location
*/
function getVertexLocation(explicitRoot = null) {
// Return value from config or default
return getGlobalConfig(explicitRoot).vertexLocation || 'us-central1';
return getGlobalConfig(explicitRoot).vertexLocation;
}
function getResponseLanguage(explicitRoot = null) {

View File

@@ -15,6 +15,32 @@ import {
import { log } from '../../scripts/modules/utils.js';
import { BaseAIProvider } from './base-provider.js';
/**
* OpenAI model reasoning effort support.
* Different models support different reasoning effort levels.
* This is provider-specific logic that belongs here, not in the general model catalog.
*
* See: https://platform.openai.com/docs/guides/reasoning
*/
const REASONING_EFFORT_SUPPORT = {
// GPT-5.1 base does not support xhigh
'gpt-5.1': ['none', 'low', 'medium', 'high'],
// GPT-5.1 Codex Max supports full range
'gpt-5.1-codex-max': ['none', 'low', 'medium', 'high', 'xhigh'],
// GPT-5.2 supports full range
'gpt-5.2': ['none', 'low', 'medium', 'high', 'xhigh'],
// GPT-5.2 Pro only supports medium and above
'gpt-5.2-pro': ['medium', 'high', 'xhigh'],
// GPT-5 supports full range
'gpt-5': ['none', 'low', 'medium', 'high', 'xhigh']
};
// Default for models not explicitly listed
const DEFAULT_REASONING_EFFORTS = ['none', 'low', 'medium', 'high'];
// Ordering for effort levels (lowest to highest)
const EFFORT_ORDER = ['none', 'low', 'medium', 'high', 'xhigh'];
export class CodexCliProvider extends BaseAIProvider {
constructor() {
super();
@@ -80,10 +106,54 @@ export class CodexCliProvider extends BaseAIProvider {
}
}
/**
* Gets a validated reasoningEffort for the model.
* If no effort is specified, returns the model's highest supported effort.
* If an unsupported effort is specified, caps it to the highest supported.
* @param {string} modelId - The model ID to check
* @param {string} [requestedEffort] - The requested reasoning effort (optional)
* @returns {string} The validated reasoning effort
*/
_getValidatedReasoningEffort(modelId, requestedEffort) {
// Get supported efforts for this model, or use defaults
const supportedEfforts =
REASONING_EFFORT_SUPPORT[modelId] || DEFAULT_REASONING_EFFORTS;
// Get the highest supported effort for this model
const highestSupported = supportedEfforts.reduce((highest, effort) => {
const currentIndex = EFFORT_ORDER.indexOf(effort);
const highestIndex = EFFORT_ORDER.indexOf(highest);
return currentIndex > highestIndex ? effort : highest;
}, supportedEfforts[0]);
// If no effort requested, use the model's highest supported
if (!requestedEffort) {
log(
'debug',
`No reasoning effort specified for ${modelId}. Using '${highestSupported}'.`
);
return highestSupported;
}
// If the requested effort is supported, use it
if (supportedEfforts.includes(requestedEffort)) {
return requestedEffort;
}
// Cap to the highest supported effort
log(
'warn',
`Reasoning effort '${requestedEffort}' not supported by ${modelId}. Using '${highestSupported}' instead.`
);
return highestSupported;
}
/**
* Creates a Codex CLI client instance
* @param {object} params
* @param {string} [params.commandName] - Command name for settings lookup
* @param {string} [params.modelId] - Model ID for capability validation
* @param {string} [params.apiKey] - Optional API key (injected as OPENAI_API_KEY for Codex CLI)
* @returns {Function}
*/
@@ -92,9 +162,16 @@ export class CodexCliProvider extends BaseAIProvider {
// Merge global + command-specific settings from config
const settings = getCodexCliSettingsForCommand(params.commandName) || {};
// Get validated reasoningEffort - always pass to override Codex CLI global config
const validatedReasoningEffort = this._getValidatedReasoningEffort(
params.modelId,
settings.reasoningEffort
);
// Inject API key only if explicitly provided; OAuth is the primary path
const defaultSettings = {
...settings,
reasoningEffort: validatedReasoningEffort,
...(params.apiKey
? { env: { ...(settings.env || {}), OPENAI_API_KEY: params.apiKey } }
: {})

View File

@@ -48,30 +48,71 @@ export class VertexAIProvider extends BaseAIProvider {
return 'GOOGLE_API_KEY';
}
/**
* API key is optional, Service Account credentials can be used instead.
* @returns {boolean}
*/
isRequiredApiKey() {
return false;
}
/**
* API key or Service Account is mandatory.
* @returns {boolean}
*/
isAuthenticationRequired() {
return true;
}
/**
* Validates that a credential value is present and non-empty.
* @private
* @param {string|object|null|undefined} value
* @returns {boolean}
*/
isValidCredential(value) {
if (!value) return false;
if (typeof value === 'string') {
return value.trim().length > 0;
}
return typeof value === 'object';
}
/**
* Validates Vertex AI-specific authentication parameters
* @param {object} params - Parameters to validate
* @throws {Error} If required parameters are missing
* @throws {VertexAuthError|VertexConfigError}
*/
validateAuth(params) {
const { apiKey, projectId, location, credentials } = params;
// Check for API key OR service account credentials
if (!apiKey && !credentials) {
const hasValidApiKey = this.isValidCredential(apiKey);
const hasValidCredentials = this.isValidCredential(credentials);
if (!hasValidApiKey && !hasValidCredentials) {
throw new VertexAuthError(
'Either Google API key (GOOGLE_API_KEY) or service account credentials (GOOGLE_APPLICATION_CREDENTIALS) is required for Vertex AI'
'Vertex AI requires authentication. Provide one of the following:\n' +
' • GOOGLE_API_KEY environment variable (typical for API-based auth), OR\n' +
' • GOOGLE_APPLICATION_CREDENTIALS pointing to a service account JSON file (recommended for production)'
);
}
// Project ID is required for Vertex AI
if (!projectId) {
if (
!projectId ||
(typeof projectId === 'string' && projectId.trim().length === 0)
) {
throw new VertexConfigError(
'Google Cloud project ID is required for Vertex AI. Set VERTEX_PROJECT_ID environment variable.'
);
}
// Location is required for Vertex AI
if (!location) {
if (
!location ||
(typeof location === 'string' && location.trim().length === 0)
) {
throw new VertexConfigError(
'Google Cloud location is required for Vertex AI. Set VERTEX_LOCATION environment variable (e.g., "us-central1").'
);
@@ -97,7 +138,11 @@ export class VertexAIProvider extends BaseAIProvider {
// Configure auth options - either API key or service account
const authOptions = {};
if (apiKey) {
authOptions.apiKey = apiKey;
// Vercel AI SDK expects googleAuthOptions even when using apiKey for some configurations
authOptions.googleAuthOptions = {
...credentials,
apiKey
};
} else if (credentials) {
authOptions.googleAuthOptions = credentials;
}
@@ -105,7 +150,7 @@ export class VertexAIProvider extends BaseAIProvider {
// Return Vertex AI client
return createVertex({
...authOptions,
projectId,
project: projectId,
location,
...(baseURL && { baseURL }),
...(fetchImpl && { fetch: fetchImpl })

View File

@@ -1,21 +1,25 @@
import { z } from 'zod';
// Schema that matches the inline AiTaskDataSchema from add-task.js
export const AddTaskResponseSchema = z.object({
title: z.string().describe('Clear, concise title for the task'),
description: z
.string()
.describe('A one or two sentence description of the task'),
details: z
.string()
.describe('In-depth implementation details, considerations, and guidance'),
testStrategy: z
.string()
.describe('Detailed approach for verifying task completion'),
dependencies: z
.array(z.number())
.nullable()
.describe(
'Array of task IDs that this task depends on (must be completed before this task can start)'
)
});
export const AddTaskResponseSchema = z
.object({
title: z.string().describe('Clear, concise title for the task'),
description: z
.string()
.describe('A one or two sentence description of the task'),
details: z
.string()
.describe(
'In-depth implementation details, considerations, and guidance'
),
testStrategy: z
.string()
.describe('Detailed approach for verifying task completion'),
dependencies: z
.array(z.number())
.nullable()
.describe(
'Array of task IDs that this task depends on (must be completed before this task can start)'
)
})
.strict();

View File

@@ -1,14 +1,18 @@
import { z } from 'zod';
export const ComplexityAnalysisItemSchema = z.object({
taskId: z.number().int().positive(),
taskTitle: z.string(),
complexityScore: z.number().min(1).max(10),
recommendedSubtasks: z.number().int().nonnegative(),
expansionPrompt: z.string(),
reasoning: z.string()
});
export const ComplexityAnalysisItemSchema = z
.object({
taskId: z.number().int().positive(),
taskTitle: z.string(),
complexityScore: z.number().min(1).max(10),
recommendedSubtasks: z.number().int().nonnegative(),
expansionPrompt: z.string(),
reasoning: z.string()
})
.strict();
export const ComplexityAnalysisResponseSchema = z.object({
complexityAnalysis: z.array(ComplexityAnalysisItemSchema)
});
export const ComplexityAnalysisResponseSchema = z
.object({
complexityAnalysis: z.array(ComplexityAnalysisItemSchema)
})
.strict();

View File

@@ -1,6 +1,16 @@
import { z } from 'zod';
// Base schemas that will be reused across commands
/**
* Base schemas that will be reused across commands.
*
* IMPORTANT: All object schemas use .strict() to add "additionalProperties: false"
* to the generated JSON Schema. This is REQUIRED for OpenAI's Structured Outputs API,
* which mandates that every object type explicitly includes additionalProperties: false.
* Without .strict(), OpenAI API returns 400 Bad Request errors.
*
* Other providers (Anthropic, Google, etc.) safely ignore this constraint.
* See: https://platform.openai.com/docs/guides/structured-outputs
*/
export const TaskStatusSchema = z.enum([
'pending',
'in-progress',
@@ -10,26 +20,30 @@ export const TaskStatusSchema = z.enum([
'deferred'
]);
export const BaseTaskSchema = z.object({
id: z.number().int().positive(),
title: z.string().min(1).max(200),
description: z.string().min(1),
status: TaskStatusSchema,
dependencies: z.array(z.union([z.number().int(), z.string()])).default([]),
priority: z
.enum(['low', 'medium', 'high', 'critical'])
.nullable()
.default(null),
details: z.string().nullable().default(null),
testStrategy: z.string().nullable().default(null)
});
export const BaseTaskSchema = z
.object({
id: z.number().int().positive(),
title: z.string().min(1).max(200),
description: z.string().min(1),
status: TaskStatusSchema,
dependencies: z.array(z.union([z.number().int(), z.string()])).default([]),
priority: z
.enum(['low', 'medium', 'high', 'critical'])
.nullable()
.default(null),
details: z.string().nullable().default(null),
testStrategy: z.string().nullable().default(null)
})
.strict();
export const SubtaskSchema = z.object({
id: z.number().int().positive(),
title: z.string().min(5).max(200),
description: z.string().min(10),
dependencies: z.array(z.number().int()).default([]),
details: z.string().min(20),
status: z.enum(['pending', 'done', 'completed']).default('pending'),
testStrategy: z.string().nullable().default(null)
});
export const SubtaskSchema = z
.object({
id: z.number().int().positive(),
title: z.string().min(5).max(200),
description: z.string().min(10),
dependencies: z.array(z.number().int()).default([]),
details: z.string().min(20),
status: z.enum(['pending', 'done', 'completed']).default('pending'),
testStrategy: z.string().nullable().default(null)
})
.strict();

View File

@@ -1,6 +1,8 @@
import { z } from 'zod';
import { SubtaskSchema } from './base-schemas.js';
export const ExpandTaskResponseSchema = z.object({
subtasks: z.array(SubtaskSchema)
});
export const ExpandTaskResponseSchema = z
.object({
subtasks: z.array(SubtaskSchema)
})
.strict();

View File

@@ -1,18 +1,22 @@
import { z } from 'zod';
// Schema for a single task from PRD parsing
const PRDSingleTaskSchema = z.object({
id: z.number().int().positive(),
title: z.string().min(1),
description: z.string().min(1),
details: z.string().nullable(),
testStrategy: z.string().nullable(),
priority: z.enum(['high', 'medium', 'low']).nullable(),
dependencies: z.array(z.number().int().positive()).nullable(),
status: z.string().nullable()
});
const PRDSingleTaskSchema = z
.object({
id: z.number().int().positive(),
title: z.string().min(1),
description: z.string().min(1),
details: z.string().nullable(),
testStrategy: z.string().nullable(),
priority: z.enum(['high', 'medium', 'low']).nullable(),
dependencies: z.array(z.number().int().positive()).nullable(),
status: z.string().nullable()
})
.strict();
// Schema for the AI response - only expects tasks array since metadata is generated by the code
export const ParsePRDResponseSchema = z.object({
tasks: z.array(PRDSingleTaskSchema)
});
export const ParsePRDResponseSchema = z
.object({
tasks: z.array(PRDSingleTaskSchema)
})
.strict();

View File

@@ -1,6 +1,8 @@
import { z } from 'zod';
import { SubtaskSchema } from './base-schemas.js';
export const UpdateSubtaskResponseSchema = z.object({
subtask: SubtaskSchema
});
export const UpdateSubtaskResponseSchema = z
.object({
subtask: SubtaskSchema
})
.strict();

View File

@@ -3,8 +3,10 @@ import { BaseTaskSchema, SubtaskSchema } from './base-schemas.js';
export const UpdatedTaskSchema = BaseTaskSchema.extend({
subtasks: z.array(SubtaskSchema).nullable().default(null)
});
}).strict();
export const UpdateTasksResponseSchema = z.object({
tasks: z.array(UpdatedTaskSchema)
});
export const UpdateTasksResponseSchema = z
.object({
tasks: z.array(UpdatedTaskSchema)
})
.strict();

View File

@@ -93,9 +93,12 @@ describe('initTaskMaster', () => {
});
test('should find project root from deeply nested subdirectory', () => {
// Arrange - Create .taskmaster directory in temp dir
// Arrange - Create .taskmaster directory and a boundary marker in temp dir
// The boundary marker (.git) anchors .taskmaster to prevent false matches
// from stray .taskmaster dirs (e.g., in home directory)
const taskMasterDir = path.join(tempDir, TASKMASTER_DIR);
fs.mkdirSync(taskMasterDir, { recursive: true });
fs.mkdirSync(path.join(tempDir, '.git'), { recursive: true });
// Create deeply nested subdirectory and change to it
const deepDir = path.join(tempDir, 'src', 'components', 'ui');