Compare commits
9 Commits
fix/expand
...
v0.20.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
458496e3b6 | ||
|
|
fb92693d81 | ||
|
|
f6ba4a36ee | ||
|
|
baf9bd545a | ||
|
|
fbea48d8ec | ||
|
|
d0fe7dc25a | ||
|
|
f380b8e86c | ||
|
|
bd89061a1d | ||
|
|
7d5ebf05e3 |
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Recover from `@anthropic-ai/claude-code` JSON truncation bug that caused Task Master to crash when handling large (>8 kB) structured responses. The CLI/SDK still truncates, but Task Master now detects the error, preserves buffered text, and returns a usable response instead of throwing.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Updating dependency ai-sdk-provider-gemini-cli to 0.0.4 to address breaking change Google made to Gemini CLI and add better 'api-key' in addition to 'gemini-api-key' AI-SDK compatibility.
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add support for xAI Grok 4 model
|
|
||||||
|
|
||||||
- Add grok-4 model to xAI provider with $3/$15 per 1M token pricing
|
|
||||||
- Enable main, fallback, and research roles for grok-4
|
|
||||||
- Max tokens set to 131,072 (matching other xAI models)
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add stricter validation and clearer feedback for task priority when adding new tasks
|
|
||||||
|
|
||||||
- if a task priority is invalid, it will default to medium
|
|
||||||
- made taks priority case-insensitive, essentially making HIGH and high the same value
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add support for MCP Sampling as AI provider, requires no API key, uses the client LLM provider
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Unify and streamline profile system architecture for improved maintainability
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Added Groq provider support
|
|
||||||
@@ -1,21 +1,21 @@
|
|||||||
{
|
{
|
||||||
"models": {
|
"models": {
|
||||||
"main": {
|
"main": {
|
||||||
"provider": "groq",
|
"provider": "anthropic",
|
||||||
"modelId": "llama-3.1-8b-instant",
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
"maxTokens": 131072,
|
"maxTokens": 120000,
|
||||||
"temperature": 0.2
|
"temperature": 0.2
|
||||||
},
|
},
|
||||||
"research": {
|
"research": {
|
||||||
"provider": "groq",
|
"provider": "perplexity",
|
||||||
"modelId": "llama-3.3-70b-versatile",
|
"modelId": "sonar",
|
||||||
"maxTokens": 32768,
|
"maxTokens": 8700,
|
||||||
"temperature": 0.1
|
"temperature": 0.1
|
||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
"modelId": "claude-3-5-sonnet-20241022",
|
||||||
"maxTokens": 128000,
|
"maxTokens": 8192,
|
||||||
"temperature": 0.2
|
"temperature": 0.2
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
# Task ID: 1
|
|
||||||
# Title: Implement TTS Flag for Taskmaster Commands
|
|
||||||
# Status: pending
|
|
||||||
# Dependencies: 16 (Not found)
|
|
||||||
# Priority: medium
|
|
||||||
# Description: Add text-to-speech functionality to taskmaster commands with configurable voice options and audio output settings.
|
|
||||||
# Details:
|
|
||||||
Implement TTS functionality including:
|
|
||||||
- Add --tts flag to all relevant taskmaster commands (list, show, generate, etc.)
|
|
||||||
- Integrate with system TTS engines (Windows SAPI, macOS say command, Linux espeak/festival)
|
|
||||||
- Create TTS configuration options in the configuration management system
|
|
||||||
- Add voice selection options (male/female, different languages if available)
|
|
||||||
- Implement audio output settings (volume, speed, pitch)
|
|
||||||
- Add TTS-specific error handling for cases where TTS is unavailable
|
|
||||||
- Create fallback behavior when TTS fails (silent failure or text output)
|
|
||||||
- Support for reading task titles, descriptions, and status updates aloud
|
|
||||||
- Add option to read entire task lists or individual task details
|
|
||||||
- Implement TTS for command confirmations and error messages
|
|
||||||
- Create TTS output formatting to make spoken text more natural (removing markdown, formatting numbers/dates appropriately)
|
|
||||||
- Add configuration option to enable/disable TTS globally
|
|
||||||
|
|
||||||
# Test Strategy:
|
|
||||||
Test TTS functionality across different operating systems (Windows, macOS, Linux). Verify that the --tts flag works with all major commands. Test voice configuration options and ensure audio output settings are properly applied. Test error handling when TTS services are unavailable. Verify that text formatting for speech is natural and understandable. Test with various task content types including special characters, code snippets, and long descriptions. Ensure TTS can be disabled and enabled through configuration.
|
|
||||||
5648
.taskmaster/tasks/tasks.json
Normal file
5648
.taskmaster/tasks/tasks.json
Normal file
File diff suppressed because one or more lines are too long
50
CHANGELOG.md
50
CHANGELOG.md
@@ -1,5 +1,55 @@
|
|||||||
# task-master-ai
|
# task-master-ai
|
||||||
|
|
||||||
|
## 0.20.0
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#950](https://github.com/eyaltoledano/claude-task-master/pull/950) [`699e9ee`](https://github.com/eyaltoledano/claude-task-master/commit/699e9eefb5d687b256e9402d686bdd5e3a358b4a) Thanks [@ben-vargas](https://github.com/ben-vargas)! - Add support for xAI Grok 4 model
|
||||||
|
- Add grok-4 model to xAI provider with $3/$15 per 1M token pricing
|
||||||
|
- Enable main, fallback, and research roles for grok-4
|
||||||
|
- Max tokens set to 131,072 (matching other xAI models)
|
||||||
|
|
||||||
|
- [#946](https://github.com/eyaltoledano/claude-task-master/pull/946) [`5f009a5`](https://github.com/eyaltoledano/claude-task-master/commit/5f009a5e1fc10e37be26f5135df4b7f44a9c5320) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add stricter validation and clearer feedback for task priority when adding new tasks
|
||||||
|
- if a task priority is invalid, it will default to medium
|
||||||
|
- made taks priority case-insensitive, essentially making HIGH and high the same value
|
||||||
|
|
||||||
|
- [#863](https://github.com/eyaltoledano/claude-task-master/pull/863) [`b530657`](https://github.com/eyaltoledano/claude-task-master/commit/b53065713c8da0ae6f18eb2655397aa975004923) Thanks [@OrenMe](https://github.com/OrenMe)! - Add support for MCP Sampling as AI provider, requires no API key, uses the client LLM provider
|
||||||
|
|
||||||
|
- [#930](https://github.com/eyaltoledano/claude-task-master/pull/930) [`98d1c97`](https://github.com/eyaltoledano/claude-task-master/commit/98d1c974361a56ddbeb772b1272986b9d3913459) Thanks [@OmarElKadri](https://github.com/OmarElKadri)! - Added Groq provider support
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#958](https://github.com/eyaltoledano/claude-task-master/pull/958) [`6c88a4a`](https://github.com/eyaltoledano/claude-task-master/commit/6c88a4a749083e3bd2d073a9240799771774495a) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Recover from `@anthropic-ai/claude-code` JSON truncation bug that caused Task Master to crash when handling large (>8 kB) structured responses. The CLI/SDK still truncates, but Task Master now detects the error, preserves buffered text, and returns a usable response instead of throwing.
|
||||||
|
|
||||||
|
- [#958](https://github.com/eyaltoledano/claude-task-master/pull/958) [`3334e40`](https://github.com/eyaltoledano/claude-task-master/commit/3334e409ae659d5223bb136ae23fd22c5e219073) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Updating dependency ai-sdk-provider-gemini-cli to 0.0.4 to address breaking change Google made to Gemini CLI and add better 'api-key' in addition to 'gemini-api-key' AI-SDK compatibility.
|
||||||
|
|
||||||
|
- [#853](https://github.com/eyaltoledano/claude-task-master/pull/853) [`95c299d`](https://github.com/eyaltoledano/claude-task-master/commit/95c299df642bd8e6d75f8fa5110ac705bcc72edf) Thanks [@joedanz](https://github.com/joedanz)! - Unify and streamline profile system architecture for improved maintainability
|
||||||
|
|
||||||
|
## 0.20.0-rc.0
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#950](https://github.com/eyaltoledano/claude-task-master/pull/950) [`699e9ee`](https://github.com/eyaltoledano/claude-task-master/commit/699e9eefb5d687b256e9402d686bdd5e3a358b4a) Thanks [@ben-vargas](https://github.com/ben-vargas)! - Add support for xAI Grok 4 model
|
||||||
|
- Add grok-4 model to xAI provider with $3/$15 per 1M token pricing
|
||||||
|
- Enable main, fallback, and research roles for grok-4
|
||||||
|
- Max tokens set to 131,072 (matching other xAI models)
|
||||||
|
|
||||||
|
- [#946](https://github.com/eyaltoledano/claude-task-master/pull/946) [`5f009a5`](https://github.com/eyaltoledano/claude-task-master/commit/5f009a5e1fc10e37be26f5135df4b7f44a9c5320) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add stricter validation and clearer feedback for task priority when adding new tasks
|
||||||
|
- if a task priority is invalid, it will default to medium
|
||||||
|
- made taks priority case-insensitive, essentially making HIGH and high the same value
|
||||||
|
|
||||||
|
- [#863](https://github.com/eyaltoledano/claude-task-master/pull/863) [`b530657`](https://github.com/eyaltoledano/claude-task-master/commit/b53065713c8da0ae6f18eb2655397aa975004923) Thanks [@OrenMe](https://github.com/OrenMe)! - Add support for MCP Sampling as AI provider, requires no API key, uses the client LLM provider
|
||||||
|
|
||||||
|
- [#930](https://github.com/eyaltoledano/claude-task-master/pull/930) [`98d1c97`](https://github.com/eyaltoledano/claude-task-master/commit/98d1c974361a56ddbeb772b1272986b9d3913459) Thanks [@OmarElKadri](https://github.com/OmarElKadri)! - Added Groq provider support
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#916](https://github.com/eyaltoledano/claude-task-master/pull/916) [`6c88a4a`](https://github.com/eyaltoledano/claude-task-master/commit/6c88a4a749083e3bd2d073a9240799771774495a) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Recover from `@anthropic-ai/claude-code` JSON truncation bug that caused Task Master to crash when handling large (>8 kB) structured responses. The CLI/SDK still truncates, but Task Master now detects the error, preserves buffered text, and returns a usable response instead of throwing.
|
||||||
|
|
||||||
|
- [#916](https://github.com/eyaltoledano/claude-task-master/pull/916) [`3334e40`](https://github.com/eyaltoledano/claude-task-master/commit/3334e409ae659d5223bb136ae23fd22c5e219073) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Updating dependency ai-sdk-provider-gemini-cli to 0.0.4 to address breaking change Google made to Gemini CLI and add better 'api-key' in addition to 'gemini-api-key' AI-SDK compatibility.
|
||||||
|
|
||||||
|
- [#853](https://github.com/eyaltoledano/claude-task-master/pull/853) [`95c299d`](https://github.com/eyaltoledano/claude-task-master/commit/95c299df642bd8e6d75f8fa5110ac705bcc72edf) Thanks [@joedanz](https://github.com/joedanz)! - Unify and streamline profile system architecture for improved maintainability
|
||||||
|
|
||||||
## 0.19.0
|
## 0.19.0
|
||||||
|
|
||||||
### Minor Changes
|
### Minor Changes
|
||||||
|
|||||||
@@ -4,30 +4,7 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
|
|
||||||
1. **`.taskmaster/config.json` File (Recommended - New Structure)**
|
1. **`.taskmaster/config.json` File (Recommended - New Structure)**
|
||||||
|
|
||||||
- This JSON file stores most configuration settings, including A5. **Usage Requirements**:
|
- This JSON file stores most configuration settings, including AI model selections, parameters, logging levels, and project defaults.
|
||||||
8. **Troubleshooting**:
|
|
||||||
- "MCP provider requires session context" → Ensure running in MCP environment
|
|
||||||
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshootingust be running in an MCP context (session must be available)
|
|
||||||
- Session must provide `clientCapabilities.sampling` capability
|
|
||||||
|
|
||||||
6. **Best Practices**:
|
|
||||||
- Always configure a non-MCP fallback provider
|
|
||||||
- Use `mcp` for main/research roles when in MCP environments
|
|
||||||
- Test sampling capability before production use
|
|
||||||
|
|
||||||
7. **Setup Commands**:
|
|
||||||
```bash
|
|
||||||
# Set MCP provider for main role
|
|
||||||
task-master models set-main --provider mcp --model claude-3-5-sonnet-20241022
|
|
||||||
|
|
||||||
# Set MCP provider for research role
|
|
||||||
task-master models set-research --provider mcp --model claude-3-opus-20240229
|
|
||||||
|
|
||||||
# Verify configuration
|
|
||||||
task-master models list
|
|
||||||
```
|
|
||||||
|
|
||||||
8. **Troubleshooting**:lections, parameters, logging levels, and project defaults.
|
|
||||||
- **Location:** This file is created in the `.taskmaster/` directory when you run the `task-master models --setup` interactive setup or initialize a new project with `task-master init`.
|
- **Location:** This file is created in the `.taskmaster/` directory when you run the `task-master models --setup` interactive setup or initialize a new project with `task-master init`.
|
||||||
- **Migration:** Existing projects with `.taskmasterconfig` in the root will continue to work, but should be migrated to the new structure using `task-master migrate`.
|
- **Migration:** Existing projects with `.taskmasterconfig` in the root will continue to work, but should be migrated to the new structure using `task-master migrate`.
|
||||||
- **Management:** Use the `task-master models --setup` command (or `models` MCP tool) to interactively create and manage this file. You can also set specific models directly using `task-master models --set-<role>=<model_id>`, adding `--ollama` or `--openrouter` flags for custom models. Manual editing is possible but not recommended unless you understand the structure.
|
- **Management:** Use the `task-master models --setup` command (or `models` MCP tool) to interactively create and manage this file. You can also set specific models directly using `task-master models --set-<role>=<model_id>`, adding `--ollama` or `--openrouter` flags for custom models. Manual editing is possible but not recommended unless you understand the structure.
|
||||||
@@ -68,11 +45,12 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
"azureBaseURL": "https://your-endpoint.azure.com/openai/deployments",
|
"azureBaseURL": "https://your-endpoint.azure.com/openai/deployments",
|
||||||
"vertexProjectId": "your-gcp-project-id",
|
"vertexProjectId": "your-gcp-project-id",
|
||||||
"vertexLocation": "us-central1",
|
"vertexLocation": "us-central1",
|
||||||
"responseLanguage": "English"
|
"responseLanguage": "English"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> For MCP-specific setup and troubleshooting, see [Provider-Specific Configuration](#provider-specific-configuration).
|
||||||
|
|
||||||
2. **Legacy `.taskmasterconfig` File (Backward Compatibility)**
|
2. **Legacy `.taskmasterconfig` File (Backward Compatibility)**
|
||||||
|
|
||||||
@@ -198,8 +176,6 @@ node scripts/init.js
|
|||||||
|
|
||||||
### MCP (Model Context Protocol) Provider
|
### MCP (Model Context Protocol) Provider
|
||||||
|
|
||||||
The MCP provider enables Task Master to use MCP servers as AI providers. This is particularly useful when running Task Master within MCP-compatible development environments like Claude Desktop or Cursor.
|
|
||||||
|
|
||||||
1. **Prerequisites**:
|
1. **Prerequisites**:
|
||||||
- An active MCP session with sampling capability
|
- An active MCP session with sampling capability
|
||||||
- MCP client with sampling support (e.g. VS Code)
|
- MCP client with sampling support (e.g. VS Code)
|
||||||
@@ -238,12 +214,24 @@ The MCP provider enables Task Master to use MCP servers as AI providers. This is
|
|||||||
- Must be running in an MCP context (session must be available)
|
- Must be running in an MCP context (session must be available)
|
||||||
- Session must provide `clientCapabilities.sampling` capability
|
- Session must provide `clientCapabilities.sampling` capability
|
||||||
|
|
||||||
5. **Best Practices**:
|
6. **Best Practices**:
|
||||||
- Always configure a non-MCP fallback provider
|
- Always configure a non-MCP fallback provider
|
||||||
- Use `mcp` for main/research roles when in MCP environments
|
- Use `mcp` for main/research roles when in MCP environments
|
||||||
- Test sampling capability before production use
|
- Test sampling capability before production use
|
||||||
|
|
||||||
6. **Troubleshooting**:
|
7. **Setup Commands**:
|
||||||
|
```bash
|
||||||
|
# Set MCP provider for main role
|
||||||
|
task-master models set-main --provider mcp --model claude-3-5-sonnet-20241022
|
||||||
|
|
||||||
|
# Set MCP provider for research role
|
||||||
|
task-master models set-research --provider mcp --model claude-3-opus-20240229
|
||||||
|
|
||||||
|
# Verify configuration
|
||||||
|
task-master models list
|
||||||
|
```
|
||||||
|
|
||||||
|
8. **Troubleshooting**:
|
||||||
- "MCP provider requires session context" → Ensure running in MCP environment
|
- "MCP provider requires session context" → Ensure running in MCP environment
|
||||||
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
|
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
|
||||||
|
|
||||||
|
|||||||
@@ -125,8 +125,7 @@ export async function addTaskDirect(args, log, context = {}) {
|
|||||||
},
|
},
|
||||||
'json', // outputFormat
|
'json', // outputFormat
|
||||||
manualTaskData, // Pass the manual task data
|
manualTaskData, // Pass the manual task data
|
||||||
false, // research flag is false for manual creation
|
false // research flag is false for manual creation
|
||||||
projectRoot // Pass projectRoot
|
|
||||||
);
|
);
|
||||||
newTaskId = result.newTaskId;
|
newTaskId = result.newTaskId;
|
||||||
telemetryData = result.telemetryData;
|
telemetryData = result.telemetryData;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.19.0",
|
"version": "0.20.0",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
|
|||||||
@@ -561,16 +561,6 @@ async function addTask(
|
|||||||
writeJSON(tasksPath, rawData, projectRoot, targetTag);
|
writeJSON(tasksPath, rawData, projectRoot, targetTag);
|
||||||
report('DEBUG: tasks.json written.', 'debug');
|
report('DEBUG: tasks.json written.', 'debug');
|
||||||
|
|
||||||
// Generate markdown task files
|
|
||||||
report('Generating task files...', 'info');
|
|
||||||
report('DEBUG: Calling generateTaskFiles...', 'debug');
|
|
||||||
// Pass mcpLog if available to generateTaskFiles
|
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), {
|
|
||||||
projectRoot,
|
|
||||||
tag: targetTag
|
|
||||||
});
|
|
||||||
report('DEBUG: generateTaskFiles finished.', 'debug');
|
|
||||||
|
|
||||||
// Show success message - only for text output (CLI)
|
// Show success message - only for text output (CLI)
|
||||||
if (outputFormat === 'text') {
|
if (outputFormat === 'text') {
|
||||||
const table = new Table({
|
const table = new Table({
|
||||||
|
|||||||
@@ -461,19 +461,44 @@ async function expandTask(
|
|||||||
`${combinedAdditionalContext}\n\n# Project Context\n\n${gatheredContext}`.trim();
|
`${combinedAdditionalContext}\n\n# Project Context\n\n${gatheredContext}`.trim();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure expansionPrompt is a string (handle both string and object formats)
|
||||||
|
let expansionPromptText = undefined;
|
||||||
|
if (taskAnalysis?.expansionPrompt) {
|
||||||
|
if (typeof taskAnalysis.expansionPrompt === 'string') {
|
||||||
|
expansionPromptText = taskAnalysis.expansionPrompt;
|
||||||
|
} else if (
|
||||||
|
typeof taskAnalysis.expansionPrompt === 'object' &&
|
||||||
|
taskAnalysis.expansionPrompt.text
|
||||||
|
) {
|
||||||
|
expansionPromptText = taskAnalysis.expansionPrompt.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure gatheredContext is a string (handle both string and object formats)
|
||||||
|
let gatheredContextText = gatheredContext;
|
||||||
|
if (typeof gatheredContext === 'object' && gatheredContext !== null) {
|
||||||
|
if (gatheredContext.data) {
|
||||||
|
gatheredContextText = gatheredContext.data;
|
||||||
|
} else if (gatheredContext.text) {
|
||||||
|
gatheredContextText = gatheredContext.text;
|
||||||
|
} else {
|
||||||
|
gatheredContextText = JSON.stringify(gatheredContext);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const promptParams = {
|
const promptParams = {
|
||||||
task: task,
|
task: task,
|
||||||
subtaskCount: finalSubtaskCount,
|
subtaskCount: finalSubtaskCount,
|
||||||
nextSubtaskId: nextSubtaskId,
|
nextSubtaskId: nextSubtaskId,
|
||||||
additionalContext: additionalContext,
|
additionalContext: additionalContext,
|
||||||
complexityReasoningContext: complexityReasoningContext,
|
complexityReasoningContext: complexityReasoningContext,
|
||||||
gatheredContext: gatheredContext,
|
gatheredContext: gatheredContextText || '',
|
||||||
useResearch: useResearch,
|
useResearch: useResearch,
|
||||||
expansionPrompt: taskAnalysis?.expansionPrompt || undefined
|
expansionPrompt: expansionPromptText || undefined
|
||||||
};
|
};
|
||||||
|
|
||||||
let variantKey = 'default';
|
let variantKey = 'default';
|
||||||
if (taskAnalysis?.expansionPrompt) {
|
if (expansionPromptText) {
|
||||||
variantKey = 'complexity-report';
|
variantKey = 'complexity-report';
|
||||||
logger.info(
|
logger.info(
|
||||||
`Using expansion prompt from complexity report for task ${task.id}.`
|
`Using expansion prompt from complexity report for task ${task.id}.`
|
||||||
|
|||||||
Reference in New Issue
Block a user