Compare commits

...

6 Commits

Author SHA1 Message Date
Ralph Khreish
e42e4d778c chore: fix format 2025-07-11 19:13:58 +03:00
Ralph Khreish
b8649b2f8d fix: mcp bug when expanding task 2025-07-11 19:12:54 +03:00
Ralph Khreish
21392a1117 fix: more regression bugs (#956)
* fix: more regression bugs

* chore: fix format

* chore: fix unit tests

* chore: fix format
2025-07-11 14:23:54 +02:00
Ben Vargas
3e61d26235 fix: resolve path resolution and context gathering errors across multiple commands (#954)
* fix: resolve path resolution issues in parse-prd and analyze-complexity commands

This commit fixes critical path resolution regressions where commands were requiring files they create to already exist.

## Changes Made:

### 1. parse-prd Command (Lines 808, 828-835, 919-921)
**Problem**: Command required tasks.json to exist before it could create it (catch-22)
**Root Cause**: Default value in option definition meant options.output was always set
**Fixes**:
- Removed default value from --output option definition (line 808)
- Modified initTaskMaster to only include tasksPath when explicitly specified
- Added null handling for output path with fallback to default location

### 2. analyze-complexity Command (Lines 1637-1640, 1673-1680, 1695-1696)
**Problem**: Command required complexity report file to exist before creating it
**Root Cause**: Default value in option definition meant options.output was always set
**Fixes**:
- Removed default value from --output option definition (lines 1637-1640)
- Modified initTaskMaster to only include complexityReportPath when explicitly specified
- Added null handling for report path with fallback to default location

## Technical Details:

The core issue was that Commander.js option definitions with default values always populate the options object, making conditional checks like `if (options.output)` always true. By removing default values from option definitions, we ensure paths are only included in initTaskMaster when users explicitly provide them.

This approach is cleaner than using boolean flags (true/false) for required/optional, as it eliminates the path entirely when not needed, letting initTaskMaster use its default behavior.

## Testing:
- parse-prd now works on fresh projects without existing tasks.json
- analyze-complexity creates report file without requiring it to exist
- Commands maintain backward compatibility when paths are explicitly provided

Fixes issues reported in PATH-FIXES.md and extends the solution to other affected commands.

* fix: update expand-task test to match context gathering fix

The test was expecting gatheredContext to be a string, but the actual
implementation returns an object with a context property. Updated the
ContextGatherer mock to return the correct format and added missing
FuzzyTaskSearch mock.

---------

Co-authored-by: Ben Vargas <ben@example.com>
2025-07-11 05:46:28 +02:00
github-actions[bot]
dc5de53dcd docs: Auto-update and format models.md 2025-07-10 09:56:54 +00:00
Ralph Khreish
4312d3bd67 fix: models setup command not working (#952)
* fix: models command not working

* chore: re-order supported models to something that makes more sense

* chore: format
2025-07-10 11:56:41 +02:00
15 changed files with 604 additions and 489 deletions

View File

@@ -4,21 +4,15 @@
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
| azure | gpt-4o | 0.332 | 2.5 | 10 |
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
| azure | gpt-4-1 | — | 2 | 10 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
| openai | gpt-4o | 0.332 | 2.5 | 10 |
| openai | o1 | 0.489 | 15 | 60 |
| openai | o3 | 0.5 | 2 | 8 |
@@ -35,19 +29,21 @@
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
| google | gemini-2.0-flash-lite | — | — | — |
| perplexity | sonar-pro | — | 3 | 15 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| xai | grok-3 | — | 3 | 15 |
| xai | grok-3-fast | — | 5 | 25 |
| xai | grok-4 | — | 3 | 15 |
| ollama | devstral:latest | | 0 | 0 |
| ollama | qwen3:latest | | 0 | 0 |
| ollama | qwen3:14b | | 0 | 0 |
| ollama | qwen3:32b | | 0 | 0 |
| ollama | mistral-small3.1:latest | | 0 | 0 |
| ollama | llama3.3:latest | | 0 | 0 |
| ollama | phi4:latest | | 0 | 0 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
| groq | whisper-large-v3 | — | 0.11 | 0 |
| perplexity | sonar-pro | — | 3 | 15 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
@@ -73,57 +69,16 @@
| openrouter | mistralai/devstral-small | — | 0.1 | 0.3 |
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
| groq | whisper-large-v3 | | 0.11 | 0 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
## Research Models
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
| bedrock | us.deepseek.r1-v1:0 | — | 1.35 | 5.4 |
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
| perplexity | sonar-pro | — | 3 | 15 |
| perplexity | sonar | — | 1 | 1 |
| perplexity | deep-research | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| xai | grok-3 | — | 3 | 15 |
| xai | grok-3-fast | — | 5 | 25 |
| xai | grok-4 | — | 3 | 15 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
## Fallback Models
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
| ollama | devstral:latest | | 0 | 0 |
| ollama | qwen3:latest | | 0 | 0 |
| ollama | qwen3:14b | | 0 | 0 |
| ollama | qwen3:32b | | 0 | 0 |
| ollama | mistral-small3.1:latest | | 0 | 0 |
| ollama | llama3.3:latest | | 0 | 0 |
| ollama | phi4:latest | | 0 | 0 |
| azure | gpt-4o | 0.332 | 2.5 | 10 |
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
| azure | gpt-4-1 | | 2 | 10 |
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
@@ -132,13 +87,52 @@
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
## Research Models
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
| xai | grok-3 | — | 3 | 15 |
| xai | grok-3-fast | — | 5 | 25 |
| xai | grok-4 | — | 3 | 15 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
| perplexity | sonar-pro | — | 3 | 15 |
| perplexity | sonar | — | 1 | 1 |
| perplexity | deep-research | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
| bedrock | us.deepseek.r1-v1:0 | — | 1.35 | 5.4 |
## Fallback Models
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
| azure | gpt-4o | 0.332 | 2.5 | 10 |
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
| azure | gpt-4-1 | — | 2 | 10 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
| openai | gpt-4o | 0.332 | 2.5 | 10 |
| openai | o3 | 0.5 | 2 | 8 |
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
@@ -147,18 +141,18 @@
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
| google | gemini-2.0-flash-lite | — | — | — |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| xai | grok-3 | — | 3 | 15 |
| xai | grok-3-fast | — | 5 | 25 |
| xai | grok-4 | — | 3 | 15 |
| ollama | devstral:latest | | 0 | 0 |
| ollama | qwen3:latest | | 0 | 0 |
| ollama | qwen3:14b | | 0 | 0 |
| ollama | qwen3:32b | | 0 | 0 |
| ollama | mistral-small3.1:latest | | 0 | 0 |
| ollama | llama3.3:latest | | 0 | 0 |
| ollama | phi4:latest | | 0 | 0 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
@@ -182,15 +176,21 @@
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
| ollama | devstral:latest | | 0 | 0 |
| ollama | qwen3:latest | | 0 | 0 |
| ollama | qwen3:14b | | 0 | 0 |
| ollama | qwen3:32b | | 0 | 0 |
| ollama | mistral-small3.1:latest | | 0 | 0 |
| ollama | llama3.3:latest | | 0 | 0 |
| ollama | phi4:latest | | 0 | 0 |
| azure | gpt-4o | 0.332 | 2.5 | 10 |
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
| azure | gpt-4-1 | — | 2 | 10 |
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |

View File

@@ -805,7 +805,7 @@ function registerCommands(programInstance) {
'-i, --input <file>',
'Path to the PRD file (alternative to positional argument)'
)
.option('-o, --output <file>', 'Output file path', TASKMASTER_TASKS_FILE)
.option('-o, --output <file>', 'Output file path')
.option(
'-n, --num-tasks <number>',
'Number of tasks to generate',
@@ -825,14 +825,18 @@ function registerCommands(programInstance) {
// Initialize TaskMaster
let taskMaster;
try {
taskMaster = initTaskMaster({
prdPath: file || options.input || true,
tasksPath: options.output || true
});
const initOptions = {
prdPath: file || options.input || true
};
// Only include tasksPath if output is explicitly specified
if (options.output) {
initOptions.tasksPath = options.output;
}
taskMaster = initTaskMaster(initOptions);
} catch (error) {
console.log(
boxen(
`${chalk.white.bold('Parse PRD Help')}\n\n${chalk.cyan('Usage:')}\n task-master parse-prd <prd-file.txt> [options]\n\n${chalk.cyan('Options:')}\n -i, --input <file> Path to the PRD file (alternative to positional argument)\n -o, --output <file> Output file path (default: "${TASKMASTER_TASKS_FILE}")\n -n, --num-tasks <number> Number of tasks to generate (default: 10)\n -f, --force Skip confirmation when overwriting existing tasks\n --append Append new tasks to existing tasks.json instead of overwriting\n -r, --research Use Perplexity AI for research-backed task generation\n\n${chalk.cyan('Example:')}\n task-master parse-prd requirements.txt --num-tasks 15\n task-master parse-prd --input=requirements.txt\n task-master parse-prd --force\n task-master parse-prd requirements_v2.txt --append\n task-master parse-prd requirements.txt --research\n\n${chalk.yellow('Note: This command will:')}\n 1. Look for a PRD file at ${TASKMASTER_DOCS_DIR}/PRD.md by default\n 2. Use the file specified by --input or positional argument if provided\n 3. Generate tasks from the PRD and either:\n - Overwrite any existing tasks.json file (default)\n - Append to existing tasks.json if --append is used`,
`${chalk.white.bold('Parse PRD Help')}\n\n${chalk.cyan('Usage:')}\n task-master parse-prd <prd-file.txt> [options]\n\n${chalk.cyan('Options:')}\n -i, --input <file> Path to the PRD file (alternative to positional argument)\n -o, --output <file> Output file path (default: .taskmaster/tasks/tasks.json)\n -n, --num-tasks <number> Number of tasks to generate (default: 10)\n -f, --force Skip confirmation when overwriting existing tasks\n --append Append new tasks to existing tasks.json instead of overwriting\n -r, --research Use Perplexity AI for research-backed task generation\n\n${chalk.cyan('Example:')}\n task-master parse-prd requirements.txt --num-tasks 15\n task-master parse-prd --input=requirements.txt\n task-master parse-prd --force\n task-master parse-prd requirements_v2.txt --append\n task-master parse-prd requirements.txt --research\n\n${chalk.yellow('Note: This command will:')}\n 1. Look for a PRD file at ${TASKMASTER_DOCS_DIR}/PRD.md by default\n 2. Use the file specified by --input or positional argument if provided\n 3. Generate tasks from the PRD and either:\n - Overwrite any existing tasks.json file (default)\n - Append to existing tasks.json if --append is used`,
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
)
);
@@ -912,18 +916,17 @@ function registerCommands(programInstance) {
}
spinner = ora('Parsing PRD and generating tasks...\n').start();
await parsePRD(
taskMaster.getPrdPath(),
taskMaster.getTasksPath(),
numTasks,
{
append: useAppend,
force: useForce,
research: research,
projectRoot: taskMaster.getProjectRoot(),
tag: tag
}
);
// Handle case where getTasksPath() returns null
const outputPath =
taskMaster.getTasksPath() ||
path.join(taskMaster.getProjectRoot(), TASKMASTER_TASKS_FILE);
await parsePRD(taskMaster.getPrdPath(), outputPath, numTasks, {
append: useAppend,
force: useForce,
research: research,
projectRoot: taskMaster.getProjectRoot(),
tag: tag
});
spinner.succeed('Tasks generated successfully!');
} catch (error) {
if (spinner) {
@@ -1497,10 +1500,16 @@ function registerCommands(programInstance) {
.option('--tag <tag>', 'Specify tag context for task operations')
.action(async (options) => {
// Initialize TaskMaster
const taskMaster = initTaskMaster({
tasksPath: options.file || true,
complexityReportPath: options.report || false
});
const initOptions = {
tasksPath: options.file || true
};
// Only pass complexityReportPath if user provided a custom path
if (options.report && options.report !== COMPLEXITY_REPORT_FILE) {
initOptions.complexityReportPath = options.report;
}
const taskMaster = initTaskMaster(initOptions);
const statusFilter = options.status;
const withSubtasks = options.withSubtasks || false;
@@ -1631,11 +1640,7 @@ function registerCommands(programInstance) {
.description(
`Analyze tasks and generate expansion recommendations${chalk.reset('')}`
)
.option(
'-o, --output <file>',
'Output file path for the report',
COMPLEXITY_REPORT_FILE
)
.option('-o, --output <file>', 'Output file path for the report')
.option(
'-m, --model <model>',
'LLM model to use for analysis (defaults to configured model)'
@@ -1663,10 +1668,14 @@ function registerCommands(programInstance) {
.option('--tag <tag>', 'Specify tag context for task operations')
.action(async (options) => {
// Initialize TaskMaster
const taskMaster = initTaskMaster({
tasksPath: options.file || true,
complexityReportPath: options.output || true
});
const initOptions = {
tasksPath: options.file || true // Tasks file is required to analyze
};
// Only include complexityReportPath if output is explicitly specified
if (options.output) {
initOptions.complexityReportPath = options.output;
}
const taskMaster = initTaskMaster(initOptions);
const tag = options.tag;
const modelOverride = options.model;
@@ -1681,11 +1690,13 @@ function registerCommands(programInstance) {
displayCurrentTagIndicator(targetTag);
// Tag-aware output file naming: master -> task-complexity-report.json, other tags -> task-complexity-report_tagname.json
const baseOutputPath = taskMaster.getComplexityReportPath();
const baseOutputPath =
taskMaster.getComplexityReportPath() ||
path.join(taskMaster.getProjectRoot(), COMPLEXITY_REPORT_FILE);
const outputPath =
options.output === COMPLEXITY_REPORT_FILE && targetTag !== 'master'
? baseOutputPath.replace('.json', `_${targetTag}.json`)
: baseOutputPath;
: options.output || baseOutputPath;
console.log(
chalk.blue(
@@ -1765,6 +1776,11 @@ function registerCommands(programInstance) {
)
.option('--tag <tag>', 'Specify tag context for task operations')
.action(async (prompt, options) => {
// Initialize TaskMaster
const taskMaster = initTaskMaster({
tasksPath: options.file || true
});
// Parameter validation
if (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {
console.error(
@@ -2206,6 +2222,8 @@ ${result.result}
tasksPath: options.file || true
});
const projectRoot = taskMaster.getProjectRoot();
// Show current tag context
displayCurrentTagIndicator(
options.tag || getCurrentTag(taskMaster.getProjectRoot()) || 'master'
@@ -3455,8 +3473,11 @@ Examples:
.action(async (options) => {
// Initialize TaskMaster
const taskMaster = initTaskMaster({
tasksPath: options.file || true
tasksPath: options.file || false
});
const projectRoot = taskMaster.getProjectRoot();
// Validate flags: cannot use multiple provider flags simultaneously
const providerFlags = [
options.openrouter,
@@ -3485,7 +3506,7 @@ Examples:
// Action 1: Run Interactive Setup
console.log(chalk.blue('Starting interactive model setup...')); // Added feedback
try {
await runInteractiveSetup(projectRoot);
await runInteractiveSetup(taskMaster.getProjectRoot());
// runInteractiveSetup logs its own completion/error messages
} catch (setupError) {
console.error(

View File

@@ -4,7 +4,10 @@ import chalk from 'chalk';
import { z } from 'zod';
import { fileURLToPath } from 'url';
import { log, findProjectRoot, resolveEnvVariable, isEmpty } from './utils.js';
import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js';
import {
LEGACY_CONFIG_FILE,
TASKMASTER_DIR
} from '../../src/constants/paths.js';
import { findConfigPath } from '../../src/utils/path-utils.js';
import {
VALIDATED_PROVIDERS,
@@ -99,17 +102,30 @@ function _loadAndValidateConfig(explicitRoot = null) {
if (rootToUse) {
configSource = `found root (${rootToUse})`;
} else {
// No root found, return defaults immediately
return defaults;
// No root found, use current working directory as fallback
// This prevents infinite loops during initialization
rootToUse = process.cwd();
configSource = `current directory (${rootToUse}) - no project markers found`;
}
}
// ---> End find project root logic <---
// --- Find configuration file using centralized path utility ---
const configPath = findConfigPath(null, { projectRoot: rootToUse });
// --- Find configuration file ---
let configPath = null;
let config = { ...defaults }; // Start with a deep copy of defaults
let configExists = false;
// During initialization (no project markers), skip config file search entirely
const hasProjectMarkers =
fs.existsSync(path.join(rootToUse, TASKMASTER_DIR)) ||
fs.existsSync(path.join(rootToUse, LEGACY_CONFIG_FILE));
if (hasProjectMarkers) {
// Only try to find config if we have project markers
// This prevents the repeated warnings during init
configPath = findConfigPath(null, { projectRoot: rootToUse });
}
if (configPath) {
configExists = true;
const isLegacy = configPath.endsWith(LEGACY_CONFIG_FILE);
@@ -199,11 +215,22 @@ function _loadAndValidateConfig(explicitRoot = null) {
)
);
} else {
console.warn(
chalk.yellow(
`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`
)
// Don't warn about missing config during initialization
// Only warn if this looks like an existing project (has .taskmaster dir or legacy config marker)
const hasTaskmasterDir = fs.existsSync(
path.join(rootToUse, TASKMASTER_DIR)
);
const hasLegacyMarker = fs.existsSync(
path.join(rootToUse, LEGACY_CONFIG_FILE)
);
if (hasTaskmasterDir || hasLegacyMarker) {
console.warn(
chalk.yellow(
`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`
)
);
}
}
// Keep config as defaults
config = { ...defaults };

View File

@@ -1,89 +1,4 @@
{
"bedrock": [
{
"id": "us.anthropic.claude-3-haiku-20240307-v1:0",
"swe_score": 0.4,
"cost_per_1m_tokens": {
"input": 0.25,
"output": 1.25
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "us.anthropic.claude-3-opus-20240229-v1:0",
"swe_score": 0.725,
"cost_per_1m_tokens": {
"input": 15,
"output": 75
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-3-5-sonnet-20240620-v1:0",
"swe_score": 0.49,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
"swe_score": 0.49,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"swe_score": 0.623,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 65536
},
{
"id": "us.anthropic.claude-3-5-haiku-20241022-v1:0",
"swe_score": 0.4,
"cost_per_1m_tokens": {
"input": 0.8,
"output": 4
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "us.anthropic.claude-opus-4-20250514-v1:0",
"swe_score": 0.725,
"cost_per_1m_tokens": {
"input": 15,
"output": 75
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-sonnet-4-20250514-v1:0",
"swe_score": 0.727,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.deepseek.r1-v1:0",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 1.35,
"output": 5.4
},
"allowed_roles": ["research"],
"max_tokens": 65536
}
],
"anthropic": [
{
"id": "claude-sonnet-4-20250514",
@@ -126,36 +41,60 @@
"max_tokens": 8192
}
],
"azure": [
"claude-code": [
{
"id": "gpt-4o",
"swe_score": 0.332,
"id": "opus",
"swe_score": 0.725,
"cost_per_1m_tokens": {
"input": 2.5,
"output": 10.0
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32000
},
{
"id": "gpt-4o-mini",
"swe_score": 0.3,
"id": "sonnet",
"swe_score": 0.727,
"cost_per_1m_tokens": {
"input": 0.15,
"output": 0.6
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 64000
}
],
"mcp": [
{
"id": "mcp-sampling",
"swe_score": null,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 100000
}
],
"gemini-cli": [
{
"id": "gemini-2.5-pro",
"swe_score": 0.72,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 65536
},
{
"id": "gpt-4-1",
"swe_score": 0,
"id": "gemini-2.5-flash",
"swe_score": 0.71,
"cost_per_1m_tokens": {
"input": 2.0,
"output": 10.0
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 65536
}
],
"openai": [
@@ -320,6 +259,133 @@
"max_tokens": 1048000
}
],
"xai": [
{
"id": "grok-3",
"name": "Grok 3",
"swe_score": null,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072
},
{
"id": "grok-3-fast",
"name": "Grok 3 Fast",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 5,
"output": 25
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072
},
{
"id": "grok-4",
"name": "Grok 4",
"swe_score": null,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072
}
],
"groq": [
{
"id": "llama-3.3-70b-versatile",
"swe_score": 0.55,
"cost_per_1m_tokens": {
"input": 0.59,
"output": 0.79
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "llama-3.1-8b-instant",
"swe_score": 0.32,
"cost_per_1m_tokens": {
"input": 0.05,
"output": 0.08
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 131072
},
{
"id": "llama-4-scout",
"swe_score": 0.45,
"cost_per_1m_tokens": {
"input": 0.11,
"output": 0.34
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "llama-4-maverick",
"swe_score": 0.52,
"cost_per_1m_tokens": {
"input": 0.5,
"output": 0.77
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "mixtral-8x7b-32768",
"swe_score": 0.35,
"cost_per_1m_tokens": {
"input": 0.24,
"output": 0.24
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 32768
},
{
"id": "qwen-qwq-32b-preview",
"swe_score": 0.4,
"cost_per_1m_tokens": {
"input": 0.18,
"output": 0.18
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "deepseek-r1-distill-llama-70b",
"swe_score": 0.52,
"cost_per_1m_tokens": {
"input": 0.75,
"output": 0.99
},
"allowed_roles": ["main", "research"],
"max_tokens": 8192
},
{
"id": "gemma2-9b-it",
"swe_score": 0.3,
"cost_per_1m_tokens": {
"input": 0.2,
"output": 0.2
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 8192
},
{
"id": "whisper-large-v3",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0.11,
"output": 0
},
"allowed_roles": ["main"],
"max_tokens": 0
}
],
"perplexity": [
{
"id": "sonar-pro",
@@ -372,106 +438,6 @@
"max_tokens": 8700
}
],
"xai": [
{
"id": "grok-3",
"name": "Grok 3",
"swe_score": null,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072
},
{
"id": "grok-3-fast",
"name": "Grok 3 Fast",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 5,
"output": 25
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072
},
{
"id": "grok-4",
"name": "Grok 4",
"swe_score": null,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072
}
],
"ollama": [
{
"id": "devstral:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "qwen3:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "qwen3:14b",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "qwen3:32b",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "mistral-small3.1:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "llama3.3:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "phi4:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
}
],
"openrouter": [
{
"id": "google/gemini-2.5-flash-preview-05-20",
@@ -724,151 +690,185 @@
"max_tokens": 32768
}
],
"groq": [
"ollama": [
{
"id": "llama-3.3-70b-versatile",
"swe_score": 0.55,
"cost_per_1m_tokens": {
"input": 0.59,
"output": 0.79
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "llama-3.1-8b-instant",
"swe_score": 0.32,
"cost_per_1m_tokens": {
"input": 0.05,
"output": 0.08
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 131072
},
{
"id": "llama-4-scout",
"swe_score": 0.45,
"cost_per_1m_tokens": {
"input": 0.11,
"output": 0.34
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "llama-4-maverick",
"swe_score": 0.52,
"cost_per_1m_tokens": {
"input": 0.5,
"output": 0.77
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "mixtral-8x7b-32768",
"swe_score": 0.35,
"cost_per_1m_tokens": {
"input": 0.24,
"output": 0.24
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 32768
},
{
"id": "qwen-qwq-32b-preview",
"swe_score": 0.4,
"cost_per_1m_tokens": {
"input": 0.18,
"output": 0.18
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32768
},
{
"id": "deepseek-r1-distill-llama-70b",
"swe_score": 0.52,
"cost_per_1m_tokens": {
"input": 0.75,
"output": 0.99
},
"allowed_roles": ["main", "research"],
"max_tokens": 8192
},
{
"id": "gemma2-9b-it",
"swe_score": 0.3,
"cost_per_1m_tokens": {
"input": 0.2,
"output": 0.2
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 8192
},
{
"id": "whisper-large-v3",
"id": "devstral:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0.11,
"output": 0
},
"allowed_roles": ["main"],
"max_tokens": 0
}
],
"claude-code": [
{
"id": "opus",
"swe_score": 0.725,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 32000
"allowed_roles": ["main", "fallback"]
},
{
"id": "sonnet",
"swe_score": 0.727,
"id": "qwen3:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 64000
}
],
"mcp": [
"allowed_roles": ["main", "fallback"]
},
{
"id": "mcp-sampling",
"swe_score": null,
"id": "qwen3:14b",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 100000
}
],
"gemini-cli": [
"allowed_roles": ["main", "fallback"]
},
{
"id": "gemini-2.5-pro",
"swe_score": 0.72,
"id": "qwen3:32b",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "mistral-small3.1:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "llama3.3:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "phi4:latest",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"]
}
],
"azure": [
{
"id": "gpt-4o",
"swe_score": 0.332,
"cost_per_1m_tokens": {
"input": 2.5,
"output": 10.0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384
},
{
"id": "gpt-4o-mini",
"swe_score": 0.3,
"cost_per_1m_tokens": {
"input": 0.15,
"output": 0.6
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384
},
{
"id": "gpt-4-1",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 2.0,
"output": 10.0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384
}
],
"bedrock": [
{
"id": "us.anthropic.claude-3-haiku-20240307-v1:0",
"swe_score": 0.4,
"cost_per_1m_tokens": {
"input": 0.25,
"output": 1.25
},
"allowed_roles": ["main", "fallback"]
},
{
"id": "us.anthropic.claude-3-opus-20240229-v1:0",
"swe_score": 0.725,
"cost_per_1m_tokens": {
"input": 15,
"output": 75
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-3-5-sonnet-20240620-v1:0",
"swe_score": 0.49,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
"swe_score": 0.49,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"swe_score": 0.623,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"],
"max_tokens": 65536
},
{
"id": "gemini-2.5-flash",
"swe_score": 0.71,
"id": "us.anthropic.claude-3-5-haiku-20241022-v1:0",
"swe_score": 0.4,
"cost_per_1m_tokens": {
"input": 0,
"output": 0
"input": 0.8,
"output": 4
},
"allowed_roles": ["main", "fallback", "research"],
"allowed_roles": ["main", "fallback"]
},
{
"id": "us.anthropic.claude-opus-4-20250514-v1:0",
"swe_score": 0.725,
"cost_per_1m_tokens": {
"input": 15,
"output": 75
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.anthropic.claude-sonnet-4-20250514-v1:0",
"swe_score": 0.727,
"cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "us.deepseek.r1-v1:0",
"swe_score": 0,
"cost_per_1m_tokens": {
"input": 1.35,
"output": 5.4
},
"allowed_roles": ["research"],
"max_tokens": 65536
}
]

View File

@@ -240,7 +240,7 @@ async function analyzeTaskComplexity(options, context = {}) {
tasks: relevantTaskIds,
format: 'research'
});
gatheredContext = contextResult;
gatheredContext = contextResult.context || '';
}
} catch (contextError) {
reportLog(
@@ -406,11 +406,10 @@ async function analyzeTaskComplexity(options, context = {}) {
useResearch: useResearch
};
const variantKey = useResearch ? 'research' : 'default';
const { systemPrompt, userPrompt: prompt } = await promptManager.loadPrompt(
'analyze-complexity',
promptParams,
variantKey
'default'
);
let loadingIndicator = null;

View File

@@ -369,7 +369,7 @@ async function expandTask(
tasks: finalTaskIds,
format: 'research'
});
gatheredContext = contextResult;
gatheredContext = contextResult.context || '';
}
} catch (contextError) {
logger.warn(`Could not gather context: ${contextError.message}`);
@@ -461,19 +461,44 @@ async function expandTask(
`${combinedAdditionalContext}\n\n# Project Context\n\n${gatheredContext}`.trim();
}
// Ensure expansionPrompt is a string (handle both string and object formats)
let expansionPromptText = undefined;
if (taskAnalysis?.expansionPrompt) {
if (typeof taskAnalysis.expansionPrompt === 'string') {
expansionPromptText = taskAnalysis.expansionPrompt;
} else if (
typeof taskAnalysis.expansionPrompt === 'object' &&
taskAnalysis.expansionPrompt.text
) {
expansionPromptText = taskAnalysis.expansionPrompt.text;
}
}
// Ensure gatheredContext is a string (handle both string and object formats)
let gatheredContextText = gatheredContext;
if (typeof gatheredContext === 'object' && gatheredContext !== null) {
if (gatheredContext.data) {
gatheredContextText = gatheredContext.data;
} else if (gatheredContext.text) {
gatheredContextText = gatheredContext.text;
} else {
gatheredContextText = JSON.stringify(gatheredContext);
}
}
const promptParams = {
task: task,
subtaskCount: finalSubtaskCount,
nextSubtaskId: nextSubtaskId,
additionalContext: additionalContext,
complexityReasoningContext: complexityReasoningContext,
gatheredContext: gatheredContext,
gatheredContext: gatheredContextText || '',
useResearch: useResearch,
expansionPrompt: taskAnalysis?.expansionPrompt || null
expansionPrompt: expansionPromptText || undefined
};
let variantKey = 'default';
if (taskAnalysis?.expansionPrompt) {
if (expansionPromptText) {
variantKey = 'complexity-report';
logger.info(
`Using expansion prompt from complexity report for task ${task.id}.`

View File

@@ -205,12 +205,10 @@ async function performResearch(
}
};
// Select variant based on detail level
const variantKey = detailLevel; // 'low', 'medium', or 'high'
// Load prompts - the research template handles detail level internally
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
'research',
promptParams,
variantKey
promptParams
);
// Count tokens for system and user prompts

View File

@@ -161,7 +161,7 @@ async function updateSubtaskById(
tasks: finalTaskIds,
format: 'research'
});
gatheredContext = contextResult;
gatheredContext = contextResult.context || '';
}
} catch (contextError) {
report('warn', `Could not gather context: ${contextError.message}`);
@@ -214,7 +214,7 @@ async function updateSubtaskById(
title: parentTask.subtasks[subtaskIndex - 1].title,
status: parentTask.subtasks[subtaskIndex - 1].status
}
: null;
: undefined;
const nextSubtask =
subtaskIndex < parentTask.subtasks.length - 1
? {
@@ -222,7 +222,7 @@ async function updateSubtaskById(
title: parentTask.subtasks[subtaskIndex + 1].title,
status: parentTask.subtasks[subtaskIndex + 1].status
}
: null;
: undefined;
// Build prompts using PromptManager
const promptManager = getPromptManager();

View File

@@ -346,7 +346,7 @@ async function updateTaskById(
tasks: finalTaskIds,
format: 'research'
});
gatheredContext = contextResult;
gatheredContext = contextResult.context || '';
}
} catch (contextError) {
report('warn', `Could not gather context: ${contextError.message}`);

View File

@@ -300,7 +300,7 @@ async function updateTasks(
tasks: finalTaskIds,
format: 'research'
});
gatheredContext = contextResult; // contextResult is a string
gatheredContext = contextResult.context || '';
}
} catch (contextError) {
logFn(

View File

@@ -218,7 +218,16 @@ export function initTaskMaster(overrides = {}) {
);
}
// Remaining paths - only resolve if key exists in overrides
// Always set default paths first
// These can be overridden below if needed
paths.configPath = path.join(paths.projectRoot, TASKMASTER_CONFIG_FILE);
paths.statePath = path.join(
paths.taskMasterDir || path.join(paths.projectRoot, TASKMASTER_DIR),
'state.json'
);
paths.tasksPath = path.join(paths.projectRoot, TASKMASTER_TASKS_FILE);
// Handle overrides - only validate/resolve if explicitly provided
if ('configPath' in overrides) {
paths.configPath = resolvePath(
'config file',

View File

@@ -557,7 +557,10 @@ describe('getConfig Tests', () => {
// Assert
expect(config).toEqual(DEFAULT_CONFIG);
expect(mockFindProjectRoot).not.toHaveBeenCalled(); // Explicit root provided
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
// The implementation checks for .taskmaster directory first
expect(fsExistsSyncSpy).toHaveBeenCalledWith(
path.join(MOCK_PROJECT_ROOT, '.taskmaster')
);
expect(fsReadFileSyncSpy).not.toHaveBeenCalled(); // No read if file doesn't exist
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining('not found at provided project root')

View File

@@ -184,7 +184,7 @@ jest.unstable_mockModule(
);
// Import the mocked modules
const { readJSON, writeJSON, log, CONFIG } = await import(
const { readJSON, writeJSON, log, CONFIG, findTaskById } = await import(
'../../../../../scripts/modules/utils.js'
);
@@ -265,6 +265,13 @@ describe('analyzeTaskComplexity', () => {
_rawTaggedData: sampleTasks
};
});
// Mock findTaskById to return the expected structure
findTaskById.mockImplementation((tasks, taskId) => {
const task = tasks?.find((t) => t.id === parseInt(taskId));
return { task: task || null, originalSubtaskCount: null };
});
generateTextService.mockResolvedValue(sampleApiResponse);
});

View File

@@ -131,7 +131,19 @@ jest.unstable_mockModule(
'../../../../../scripts/modules/utils/contextGatherer.js',
() => ({
ContextGatherer: jest.fn().mockImplementation(() => ({
gather: jest.fn().mockResolvedValue('Mock project context from files')
gather: jest.fn().mockResolvedValue({
context: 'Mock project context from files'
})
}))
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/utils/fuzzyTaskSearch.js',
() => ({
FuzzyTaskSearch: jest.fn().mockImplementation(() => ({
findRelevantTasks: jest.fn().mockReturnValue([]),
getTaskIds: jest.fn().mockReturnValue([])
}))
})
);

View File

@@ -248,7 +248,7 @@ describe('initTaskMaster', () => {
expect(taskMaster.getTasksPath()).toBeNull();
});
test('should return null when optional files not specified in overrides', () => {
test('should return default paths when optional files not specified in overrides', () => {
// Arrange - Remove all optional files
fs.unlinkSync(tasksPath);
fs.unlinkSync(configPath);
@@ -257,10 +257,16 @@ describe('initTaskMaster', () => {
// Act - Don't specify any optional paths
const taskMaster = initTaskMaster({});
// Assert
expect(taskMaster.getTasksPath()).toBeUndefined();
expect(taskMaster.getConfigPath()).toBeUndefined();
expect(taskMaster.getStatePath()).toBeUndefined();
// Assert - Should return absolute paths with default locations
expect(taskMaster.getTasksPath()).toBe(
path.join(tempDir, TASKMASTER_TASKS_FILE)
);
expect(taskMaster.getConfigPath()).toBe(
path.join(tempDir, TASKMASTER_CONFIG_FILE)
);
expect(taskMaster.getStatePath()).toBe(
path.join(tempDir, TASKMASTER_DIR, 'state.json')
);
});
});
@@ -415,11 +421,19 @@ describe('initTaskMaster', () => {
// Assert
expect(taskMaster.getProjectRoot()).toBe(tempDir);
expect(taskMaster.getTaskMasterDir()).toBe(taskMasterDir);
expect(taskMaster.getTasksPath()).toBeUndefined();
// Default paths are always set for tasks, config, and state
expect(taskMaster.getTasksPath()).toBe(
path.join(tempDir, TASKMASTER_TASKS_FILE)
);
expect(taskMaster.getConfigPath()).toBe(
path.join(tempDir, TASKMASTER_CONFIG_FILE)
);
expect(taskMaster.getStatePath()).toBe(
path.join(taskMasterDir, 'state.json')
);
// PRD and complexity report paths are undefined when not provided
expect(taskMaster.getPrdPath()).toBeUndefined();
expect(taskMaster.getComplexityReportPath()).toBeUndefined();
expect(taskMaster.getConfigPath()).toBeUndefined();
expect(taskMaster.getStatePath()).toBeUndefined();
});
});
});