Compare commits

..

21 Commits

Author SHA1 Message Date
Ralph Khreish
d5db033fdc security: tighten permissions and add debug step
- Change issues permission from write to read (least privilege)
- Add debug step to show generated metrics files before parsing
- Helps troubleshoot empty/missing files without failing the job
- Updated file list to match current pr_created_metrics.md structure
2025-09-22 15:41:57 +02:00
Ralph Khreish
cd2da6a1ec chore: fix format 2025-09-22 15:31:04 +02:00
Ralph Khreish
1f9dfdd934 harden: use regex for robust markdown table parsing
- Replace brittle column index parsing with regex matching
- Escape special characters in metric names for safe regex
- More reliable parsing against column drift and table format variations
- Prevents false matches from substring matching
2025-09-22 15:30:00 +02:00
Ralph Khreish
aec5a80cfb polish: improve step names and logging clarity
- Rename step from 'Get dates for last week' to 'Get dates for last 14 days'
- Add consistent warning messages when metrics files are missing
- Use console.warn for missing file scenarios with [parse-metrics] prefix
- Improve debugging visibility for fallback behavior
2025-09-22 15:28:03 +02:00
Ralph Khreish
d38b470572 enhance: split PR created vs merged metrics for accuracy
- Add separate steps for PR created metrics and PR merged metrics
- Query PRs by merge date for more accurate merge timing data
- Parse both pr_created_metrics.md and pr_merged_metrics.md files
- Maintain backward compatibility with fallback to old pr_metrics.md
- Fixes timing discrepancy where merge time was faster than response time
2025-09-22 15:26:06 +02:00
Ralph Khreish
6c54a2821d fix: correct merged vs closed terminology for PR metrics
- Prefer 'Number of items merged' over 'Number of items closed' for accuracy
- Prefer 'Average time to merge' over 'Time to close' for PR timing
- Add safe fallbacks if action doesn't provide merged-specific metrics
- Fixes Discord report showing 'merged' but parsing 'closed' data
2025-09-22 15:25:05 +02:00
Ralph Khreish
320cc6e6a0 harden: implement CodeRabbit bash and output file suggestions
- Add 'set -Eeuo pipefail' for strict bash error handling
- Make issue metrics output file explicit to avoid coupling to action defaults
- Prevents future breakage if github/issue-metrics changes default filenames
- Follows defensive programming best practices
2025-09-22 15:23:34 +02:00
Ralph Khreish
c86158c911 improve: implement CodeRabbit review suggestions
- Consolidate fs imports into single import statement
- Harden table parsing with better column detection and filtering
- Improve numeric extraction to handle commas and edge cases
- Add comprehensive error handling for file writes
- Add stdout logging for better debugging and transparency
- Add file existence logging with emojis for clarity
2025-09-22 15:21:14 +02:00
Ralph Khreish
ce66b069e5 enhance: add Repobeats visual analytics to Discord report
- Include Repobeats chart URL in the weekly metrics message
- Provides visual complement to the text-based metrics
- Shows broader repository health and trends
2025-09-22 15:16:48 +02:00
Ralph Khreish
2071109258 final: extend to 14-day window and clean up debug output
- Increase window from 7 to 14 days for better sample sizes
- Remove debug output now that we understand the metrics
- Should give more representative averages for response/merge times
2025-09-22 15:09:38 +02:00
Ralph Khreish
dea5ddbebd debug: add raw file content output to understand metric discrepancies 2025-09-22 15:02:14 +02:00
Ralph Khreish
6011fe9cf1 fix: add Node.js setup for script execution
- Add actions/setup-node@v4 with Node 20
- Required to run .github/scripts/parse-metrics.mjs
- Following pattern from other workflows that use .mjs scripts
2025-09-22 14:52:40 +02:00
Ralph Khreish
604b3d6702 refactor: replace bash parsing with JavaScript script
- Create .github/scripts/parse-metrics.mjs for robust markdown parsing
- Replace complex bash/awk parsing with proper JavaScript logic
- Better error handling and debug output
- Should correctly parse time values and handle edge cases
2025-09-22 14:47:34 +02:00
Ralph Khreish
f29ac02ac5 debug: add temporary debug output to check markdown table format 2025-09-22 14:43:59 +02:00
Ralph Khreish
f8aaaabace fix: extract response time metrics from markdown tables
- Use direct grep for 'Time to first response' and 'Time to close' table rows
- Parse table columns with awk -F'|' to get actual time values
- Should now show response times instead of N/A
2025-09-22 14:43:43 +02:00
Ralph Khreish
4e4c73faf5 fix: correct markdown table parsing for metrics
- Use awk -F'|' to parse markdown table columns properly
- Extract actual numbers instead of table headers
- Should now show '7' instead of '| Total number of items created | 7 |'
2025-09-22 14:38:41 +02:00
Ralph Khreish
e3d2ac5a7c cleanup: revert to 7-day window and remove debug output
- Back to 7-day window now that parsing is fixed
- Remove debug output for cleaner workflow logs
- Workflow should now show real metrics reliably
2025-09-22 14:34:29 +02:00
Ralph Khreish
34b76ab2fa fix: correct regex patterns for metrics parsing
- Use 'Total number of items created' instead of 'issues/pull requests created'
- Use 'Number of items closed' instead of 'issues/pull requests closed'
- Patterns now match the actual output from github/issue-metrics@v3
2025-09-22 11:48:03 +02:00
Ralph Khreish
83d5b22ca9 fix: improve weekly metrics workflow with 14-day window and debug output
- Extend search window from 7 to 14 days for more reliable metrics
- Add debug output to troubleshoot empty metrics
- Add fallback values when no metrics files found
- Show date range in Discord message for clarity
2025-09-22 11:41:37 +02:00
Ralph Khreish
c395e93696 chore: remove pre-mode (get out of RC) 2025-09-20 01:11:50 +02:00
Ralph Khreish
a621ff05ea feat: update tm models defaults (#1225) 2025-09-20 01:07:33 +02:00
20 changed files with 251 additions and 337 deletions

View File

@@ -1,5 +0,0 @@
---
"task-master-ai": minor
---
Testing one more pre-release iteration

View File

@@ -1,5 +0,0 @@
---
"task-master-ai": minor
---
Test out the RC

View File

@@ -1,5 +0,0 @@
---
"@tm/cli": minor
---
testing this stuff out to see how the release candidate works with monorepo

View File

@@ -1,5 +1,5 @@
{
"mode": "pre",
"mode": "exit",
"tag": "rc",
"initialVersions": {
"task-master-ai": "0.26.0",

View File

@@ -2,7 +2,7 @@
"task-master-ai": minor
---
Add grok-cli as a provider. You can now use Grok models with Task Master by setting the `GROK_CLI_API_KEY` environment variable.
Add grok-cli as a provider with full codebase context support. You can now use Grok models (grok-2, grok-3, grok-4, etc.) with Task Master for AI operations that have access to your entire codebase context, enabling more informed task generation and PRD parsing.
## Setup Instructions
@@ -20,11 +20,17 @@ Add grok-cli as a provider. You can now use Grok models with Task Master by sett
task-master models --set-fallback grok-beta
```
## Available Models
- `grok-beta` - Latest Grok model
- `grok-vision-beta` - Grok with vision capabilities
## Key Features
- **Full codebase context**: Grok models can analyze your entire project when generating tasks or parsing PRDs
- **xAI model access**: Support for latest Grok models (grok-2, grok-3, grok-4, etc.)
- **Code-aware task generation**: Create more accurate and contextual tasks based on your actual codebase
- **Intelligent PRD parsing**: Parse requirements with understanding of your existing code structure
The Grok CLI provider integrates with xAI's Grok models and can also use the local Grok CLI configuration file (`~/.grok/user-settings.json`) if available.
## Available Models
- `grok-beta` - Latest Grok model with codebase context
- `grok-vision-beta` - Grok with vision capabilities and codebase context
The Grok CLI provider integrates with xAI's Grok models via grok-cli and can also use the local Grok CLI configuration file (`~/.grok/user-settings.json`) if available.
## Credits
Built using the [grok-cli](https://github.com/superagent-ai/grok-cli) by Superagent AI for seamless integration with xAI's Grok models.

View File

@@ -0,0 +1,8 @@
---
"task-master-ai": minor
---
Improve taskmaster ai provider defaults
- moving from main anthropic 3.7 to anthropic sonnet 4
- moving from fallback anthropic 3.5 to anthropic 3.7

157
.github/scripts/parse-metrics.mjs vendored Normal file
View File

@@ -0,0 +1,157 @@
#!/usr/bin/env node
import { readFileSync, existsSync, writeFileSync } from 'fs';
function parseMetricsTable(content, metricName) {
const lines = content.split('\n');
for (let i = 0; i < lines.length; i++) {
const line = lines[i].trim();
// Match a markdown table row like: | Metric Name | value | ...
const safeName = metricName.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const re = new RegExp(`^\\|\\s*${safeName}\\s*\\|\\s*([^|]+)\\|?`);
const match = line.match(re);
if (match) {
return match[1].trim() || 'N/A';
}
}
return 'N/A';
}
function parseCountMetric(content, metricName) {
const result = parseMetricsTable(content, metricName);
// Extract number from string, handling commas and spaces
const numberMatch = result.toString().match(/[\d,]+/);
if (numberMatch) {
const number = parseInt(numberMatch[0].replace(/,/g, ''));
return isNaN(number) ? 0 : number;
}
return 0;
}
function main() {
const metrics = {
issues_created: 0,
issues_closed: 0,
prs_created: 0,
prs_merged: 0,
issue_avg_first_response: 'N/A',
issue_avg_time_to_close: 'N/A',
pr_avg_first_response: 'N/A',
pr_avg_merge_time: 'N/A'
};
// Parse issue metrics
if (existsSync('issue_metrics.md')) {
console.log('📄 Found issue_metrics.md, parsing...');
const issueContent = readFileSync('issue_metrics.md', 'utf8');
metrics.issues_created = parseCountMetric(
issueContent,
'Total number of items created'
);
metrics.issues_closed = parseCountMetric(
issueContent,
'Number of items closed'
);
metrics.issue_avg_first_response = parseMetricsTable(
issueContent,
'Time to first response'
);
metrics.issue_avg_time_to_close = parseMetricsTable(
issueContent,
'Time to close'
);
} else {
console.warn('[parse-metrics] issue_metrics.md not found; using defaults.');
}
// Parse PR created metrics
if (existsSync('pr_created_metrics.md')) {
console.log('📄 Found pr_created_metrics.md, parsing...');
const prCreatedContent = readFileSync('pr_created_metrics.md', 'utf8');
metrics.prs_created = parseCountMetric(
prCreatedContent,
'Total number of items created'
);
metrics.pr_avg_first_response = parseMetricsTable(
prCreatedContent,
'Time to first response'
);
} else {
console.warn(
'[parse-metrics] pr_created_metrics.md not found; using defaults.'
);
}
// Parse PR merged metrics (for more accurate merge data)
if (existsSync('pr_merged_metrics.md')) {
console.log('📄 Found pr_merged_metrics.md, parsing...');
const prMergedContent = readFileSync('pr_merged_metrics.md', 'utf8');
metrics.prs_merged = parseCountMetric(
prMergedContent,
'Total number of items created'
);
// For merged PRs, "Time to close" is actually time to merge
metrics.pr_avg_merge_time = parseMetricsTable(
prMergedContent,
'Time to close'
);
} else {
console.warn(
'[parse-metrics] pr_merged_metrics.md not found; falling back to pr_metrics.md.'
);
// Fallback: try old pr_metrics.md if it exists
if (existsSync('pr_metrics.md')) {
console.log('📄 Falling back to pr_metrics.md...');
const prContent = readFileSync('pr_metrics.md', 'utf8');
const mergedCount = parseCountMetric(prContent, 'Number of items merged');
metrics.prs_merged =
mergedCount || parseCountMetric(prContent, 'Number of items closed');
const maybeMergeTime = parseMetricsTable(
prContent,
'Average time to merge'
);
metrics.pr_avg_merge_time =
maybeMergeTime !== 'N/A'
? maybeMergeTime
: parseMetricsTable(prContent, 'Time to close');
} else {
console.warn('[parse-metrics] pr_metrics.md not found; using defaults.');
}
}
// Output for GitHub Actions
const output = Object.entries(metrics)
.map(([key, value]) => `${key}=${value}`)
.join('\n');
// Always output to stdout for debugging
console.log('\n=== FINAL METRICS ===');
Object.entries(metrics).forEach(([key, value]) => {
console.log(`${key}: ${value}`);
});
// Write to GITHUB_OUTPUT if in GitHub Actions
if (process.env.GITHUB_OUTPUT) {
try {
writeFileSync(process.env.GITHUB_OUTPUT, output + '\n', { flag: 'a' });
console.log(
`\nSuccessfully wrote metrics to ${process.env.GITHUB_OUTPUT}`
);
} catch (error) {
console.error(`Failed to write to GITHUB_OUTPUT: ${error.message}`);
process.exit(1);
}
} else {
console.log(
'\nNo GITHUB_OUTPUT environment variable found, skipping file write'
);
}
}
main();

View File

@@ -8,7 +8,7 @@ on:
permissions:
contents: read
issues: write
issues: read
pull-requests: read
jobs:
@@ -17,15 +17,25 @@ jobs:
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_METRICS_WEBHOOK }}
steps:
- name: Get dates for last week
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Get dates for last 14 days
run: |
# Last 7 days
first_day=$(date -d "7 days ago" +%Y-%m-%d)
set -Eeuo pipefail
# Last 14 days
first_day=$(date -d "14 days ago" +%Y-%m-%d)
last_day=$(date +%Y-%m-%d)
echo "first_day=$first_day" >> $GITHUB_ENV
echo "last_day=$last_day" >> $GITHUB_ENV
echo "week_of=$(date -d '7 days ago' +'Week of %B %d, %Y')" >> $GITHUB_ENV
echo "date_range=Past 14 days ($first_day to $last_day)" >> $GITHUB_ENV
- name: Generate issue metrics
uses: github/issue-metrics@v3
@@ -34,40 +44,39 @@ jobs:
SEARCH_QUERY: "repo:${{ github.repository }} is:issue created:${{ env.first_day }}..${{ env.last_day }}"
HIDE_TIME_TO_ANSWER: true
HIDE_LABEL_METRICS: false
OUTPUT_FILE: issue_metrics.md
- name: Generate PR metrics
- name: Generate PR created metrics
uses: github/issue-metrics@v3
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SEARCH_QUERY: "repo:${{ github.repository }} is:pr created:${{ env.first_day }}..${{ env.last_day }}"
OUTPUT_FILE: pr_metrics.md
OUTPUT_FILE: pr_created_metrics.md
- name: Generate PR merged metrics
uses: github/issue-metrics@v3
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SEARCH_QUERY: "repo:${{ github.repository }} is:pr is:merged merged:${{ env.first_day }}..${{ env.last_day }}"
OUTPUT_FILE: pr_merged_metrics.md
- name: Debug generated metrics
run: |
set -Eeuo pipefail
echo "Listing markdown files in workspace:"
ls -la *.md || true
for f in issue_metrics.md pr_created_metrics.md pr_merged_metrics.md; do
if [ -f "$f" ]; then
echo "== $f (first 10 lines) =="
head -n 10 "$f"
else
echo "Missing $f"
fi
done
- name: Parse metrics
id: metrics
run: |
# Parse the metrics from the generated markdown files
if [ -f "issue_metrics.md" ]; then
# Extract key metrics using grep/awk
AVG_TIME_TO_FIRST_RESPONSE=$(grep -A 1 "Average time to first response" issue_metrics.md | tail -1 | xargs || echo "N/A")
AVG_TIME_TO_CLOSE=$(grep -A 1 "Average time to close" issue_metrics.md | tail -1 | xargs || echo "N/A")
NUM_ISSUES_CREATED=$(grep -oP '\d+(?= issues created)' issue_metrics.md || echo "0")
NUM_ISSUES_CLOSED=$(grep -oP '\d+(?= issues closed)' issue_metrics.md || echo "0")
fi
if [ -f "pr_metrics.md" ]; then
PR_AVG_TIME_TO_MERGE=$(grep -A 1 "Average time to close" pr_metrics.md | tail -1 | xargs || echo "N/A")
NUM_PRS_CREATED=$(grep -oP '\d+(?= pull requests created)' pr_metrics.md || echo "0")
NUM_PRS_MERGED=$(grep -oP '\d+(?= pull requests closed)' pr_metrics.md || echo "0")
fi
# Set outputs for Discord action
echo "issues_created=${NUM_ISSUES_CREATED:-0}" >> $GITHUB_OUTPUT
echo "issues_closed=${NUM_ISSUES_CLOSED:-0}" >> $GITHUB_OUTPUT
echo "prs_created=${NUM_PRS_CREATED:-0}" >> $GITHUB_OUTPUT
echo "prs_merged=${NUM_PRS_MERGED:-0}" >> $GITHUB_OUTPUT
echo "avg_first_response=${AVG_TIME_TO_FIRST_RESPONSE:-N/A}" >> $GITHUB_OUTPUT
echo "avg_time_to_close=${AVG_TIME_TO_CLOSE:-N/A}" >> $GITHUB_OUTPUT
echo "pr_avg_merge_time=${PR_AVG_TIME_TO_MERGE:-N/A}" >> $GITHUB_OUTPUT
run: node .github/scripts/parse-metrics.mjs
- name: Send to Discord
uses: sarisia/actions-status-discord@v1
@@ -78,19 +87,22 @@ jobs:
title: "📊 Weekly Metrics Report"
description: |
**${{ env.week_of }}**
*${{ env.date_range }}*
**🎯 Issues**
• Created: ${{ steps.metrics.outputs.issues_created }}
• Closed: ${{ steps.metrics.outputs.issues_closed }}
• Avg Response Time: ${{ steps.metrics.outputs.issue_avg_first_response }}
• Avg Time to Close: ${{ steps.metrics.outputs.issue_avg_time_to_close }}
**🔀 Pull Requests**
• Created: ${{ steps.metrics.outputs.prs_created }}
• Merged: ${{ steps.metrics.outputs.prs_merged }}
**⏱️ Response Times**
• First Response: ${{ steps.metrics.outputs.avg_first_response }}
• Time to Close: ${{ steps.metrics.outputs.avg_time_to_close }}
• PR Merge Time: ${{ steps.metrics.outputs.pr_avg_merge_time }}
• Avg Response Time: ${{ steps.metrics.outputs.pr_avg_first_response }}
• Avg Time to Merge: ${{ steps.metrics.outputs.pr_avg_merge_time }}
**📈 Visual Analytics**
https://repobeats.axiom.co/api/embed/b439f28f0ab5bd7a2da19505355693cd2c55bfd4.svg
color: 0x58AFFF
username: Task Master Metrics Bot
avatar_url: https://raw.githubusercontent.com/eyaltoledano/claude-task-master/main/images/logo.png

View File

@@ -1,9 +1,9 @@
{
"models": {
"main": {
"provider": "grok-cli",
"modelId": "grok-4-latest",
"maxTokens": 131072,
"provider": "anthropic",
"modelId": "claude-sonnet-4-20250514",
"maxTokens": 64000,
"temperature": 0.2
},
"research": {
@@ -14,8 +14,8 @@
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-sonnet-4-20250514",
"maxTokens": 64000,
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 120000,
"temperature": 0.2
}
},

View File

@@ -75,7 +75,6 @@ Taskmaster uses two primary methods for configuration:
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
- `XAI_API_KEY`: Your X-AI API key.
- `GROK_CLI_API_KEY`: Your Grok API key from console.x.ai.
- **Optional Endpoint Overrides:**
- **Per-role `baseURL` in `.taskmasterconfig`:** You can add a `baseURL` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
- **Environment Variable Overrides (`<PROVIDER>_BASE_URL`):** For greater flexibility, especially with third-party services, you can set an environment variable like `OPENAI_BASE_URL` or `MISTRAL_BASE_URL`. This will override any `baseURL` set in the configuration file for that provider. This is the recommended way to connect to OpenAI-compatible APIs.
@@ -317,68 +316,4 @@ Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure c
- Confirm the model is deployed in your Azure OpenAI resource
- Verify the deployment name matches your configuration exactly (case-sensitive)
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
### Grok AI Configuration
Grok AI provides access to xAI's Grok models with enhanced reasoning capabilities and requires minimal configuration:
1. **Prerequisites**:
- An xAI account with API access
- Grok API key from [console.x.ai](https://console.x.ai)
2. **Authentication**:
- Set the `GROK_CLI_API_KEY` environment variable with your Grok API key
3. **Available Models**:
- `grok-beta`: Latest Grok model with advanced reasoning
- `grok-vision-beta`: Grok with vision capabilities for image analysis
4. **Configuration Example**:
```json
// In .taskmaster/config.json
{
"models": {
"main": {
"provider": "grok-cli",
"modelId": "grok-beta",
"maxTokens": 131072,
"temperature": 0.3
},
"research": {
"provider": "grok-cli",
"modelId": "grok-vision-beta",
"maxTokens": 131072,
"temperature": 0.1
}
}
}
```
5. **Environment Variables**:
```bash
# In .env file
GROK_CLI_API_KEY=your-grok-api-key-here
```
6. **Setup Commands**:
```bash
# Set Grok as your main model
task-master models --set-main grok-beta
# Set Grok as your research model
task-master models --set-research grok-beta
# Set Grok as your fallback model
task-master models --set-fallback grok-beta
```
7. **Integration Features**:
- **Local Configuration Support**: The Grok CLI provider can use your local Grok CLI configuration file (`~/.grok/user-settings.json`) if available
- **Full Token Capacity**: Supports Grok's full 131K token capacity for large context operations
- **Built on Grok CLI**: Uses the [grok-cli](https://github.com/superagent-ai/grok-cli) by Superagent AI for reliable integration
8. **Troubleshooting**:
- **API Key Issues**: Verify your `GROK_CLI_API_KEY` is correctly set and valid
- **Model Availability**: Ensure you have access to the specified Grok model variant
- **Rate Limits**: Grok models have generous rate limits, but large contexts may take longer to process
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.

View File

@@ -38,39 +38,6 @@ sidebarTitle: "CLI Commands"
```
</Accordion>
<Accordion title="Start Working on a Task">
```bash
# Start working on a specific task with Claude Code
task-master start <id>
# Start the next available task automatically
task-master start
# Show what would be executed without launching Claude Code
task-master start <id> --dry-run
# Force start even if another task is in-progress
task-master start <id> --force
# Don't automatically update task status to in-progress
task-master start <id> --no-status-update
# Specify project root directory
task-master start <id> --project /path/to/project
# Get results in JSON format
task-master start <id> --format json
```
The `start` command automatically launches Claude Code with comprehensive context about the task, including:
- Task details and requirements
- Implementation guidelines
- Related subtasks and dependencies
- Project-specific context
When no task ID is provided, it automatically finds and starts the next available task based on dependencies and status.
</Accordion>
<Accordion title="Show Specific Task">
```bash
# Show details of a specific task

View File

@@ -16,24 +16,6 @@ Alternatively you can use the CLI to show the next task
task-master next
```
### Quick Start with `task-master start`
For immediate task execution, you can use the new `start` command to automatically launch Claude Code with full task context:
```bash
# Start the next available task automatically
task-master start
# Or start a specific task
task-master start 1.2
```
This command will:
- Find the next available task (if no ID is provided)
- Update the task status to "in-progress"
- Launch Claude Code with comprehensive task context
- Provide all necessary implementation details and project context
## Discuss Task
When you know what task to work on next you can then start chatting with the agent to make sure it understands the plan of action.

View File

@@ -3,70 +3,4 @@ title: "What's New"
sidebarTitle: "What's New"
---
## Latest Features (January 2025)
### 🚀 New `task-master start` Command
**Automated Task Execution with Claude Code Integration**
The new `start` command revolutionizes your development workflow by automatically launching Claude Code with comprehensive task context:
```bash
# Start a specific task
task-master start 1.2
# Start the next available task automatically
task-master start
# Preview what would be executed without launching Claude Code
task-master start 1.2 --dry-run
```
**Key Features:**
- **Automatic Task Discovery** - When no ID is provided, finds the next available task based on dependencies and status
- **Rich Context Injection** - Provides Claude Code with task details, requirements, subtasks, and project context
- **Status Management** - Automatically updates task status to "in-progress" when starting
- **Flexible Options** - Support for dry-run, force mode, custom project paths, and JSON output
### 🤖 Grok AI Provider Support
**Enhanced AI Model Options**
Task Master now supports xAI's Grok models with full 131K token capacity:
```bash
# Configure Grok as your main model
task-master models --set-main grok-beta
# Use Grok with vision capabilities
task-master models --set-research grok-vision-beta
```
**Setup:**
1. Get your API key from [console.x.ai](https://console.x.ai)
2. Set `GROK_CLI_API_KEY` environment variable
3. Configure using `task-master models --setup`
**Available Models:**
- `grok-beta` - Latest Grok model with advanced reasoning
- `grok-vision-beta` - Grok with vision capabilities
### 📱 VS Code Extension "Start Task" Button
**Seamless VS Code Integration**
The Task Master VS Code extension now includes a "Start Task" button for one-click task execution:
- **Direct Integration** - Launch Claude Code directly from task cards in VS Code
- **No Terminal Switching** - Automatic terminal management and command execution
- **Full Context** - Same rich context injection as the CLI command
- **Visual Workflow** - Seamless transition from task planning to implementation
### 🔧 Technical Improvements
- **TypeScript Migration** - Core components now use TypeScript for better type safety
- **Model Configuration Updates** - Upgraded fallback model to Claude Sonnet 4
- **Token Capacity Fixes** - Grok models now properly support their full 131K token capacity
- **Enhanced Error Handling** - Improved error messages and debugging capabilities
An easy way to see the latest releases

View File

@@ -22,7 +22,6 @@ Taskmaster AI is an intelligent task management system designed for AI-assisted
![Kanban Board](assets/screenshots/kanban-board.png)
### 🤖 **AI-Powered Features**
- **One-Click Task Start** - Launch Claude Code directly from task cards with full context
- **Task Content Generation** - Regenerate task descriptions using AI
- **Smart Task Updates** - Append findings and progress notes automatically
- **MCP Integration** - Seamless connection to Taskmaster AI via Model Context Protocol
@@ -84,7 +83,6 @@ The extension automatically handles the Taskmaster MCP server connection:
| **View Kanban Board** | `Ctrl/Cmd + Shift + P` → "Taskmaster: Show Board" |
| **Change Task Status** | Drag task card to different column |
| **View Task Details** | Click on any task card |
| **Start Working on Task** | Click "Start Task" button to launch Claude Code automatically |
| **Edit Task Content** | Click task → Use edit buttons in details panel |
| **Add Subtasks** | Click the + button on parent task cards |
| **Use AI Features** | Open task details → Click AI action buttons |

File diff suppressed because one or more lines are too long

View File

@@ -5,6 +5,7 @@
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
import { ConfigManager } from './config-manager.js';
import { DEFAULT_CONFIG_VALUES } from '../interfaces/configuration.interface.js';
import { ConfigLoader } from './services/config-loader.service.js';
import { ConfigMerger } from './services/config-merger.service.js';
import { RuntimeStateManager } from './services/runtime-state-manager.service.js';
@@ -69,8 +70,8 @@ describe('ConfigManager', () => {
({
loadState: vi.fn().mockResolvedValue({ activeTag: 'master' }),
saveState: vi.fn().mockResolvedValue(undefined),
getActiveTag: vi.fn().mockReturnValue('master'),
setActiveTag: vi.fn().mockResolvedValue(undefined),
getCurrentTag: vi.fn().mockReturnValue('master'),
setCurrentTag: vi.fn().mockResolvedValue(undefined),
getState: vi.fn().mockReturnValue({ activeTag: 'master' }),
updateMetadata: vi.fn().mockResolvedValue(undefined),
clearState: vi.fn().mockResolvedValue(undefined)
@@ -227,8 +228,8 @@ describe('ConfigManager', () => {
const models = manager.getModelConfig();
expect(models).toEqual({
main: 'claude-3-5-sonnet-20241022',
fallback: 'gpt-4o-mini'
main: DEFAULT_CONFIG_VALUES.MODELS.MAIN,
fallback: DEFAULT_CONFIG_VALUES.MODELS.FALLBACK
});
});
@@ -281,7 +282,7 @@ describe('ConfigManager', () => {
await manager.setActiveTag('feature-branch');
const stateManager = (manager as any).stateManager;
expect(stateManager.setActiveTag).toHaveBeenCalledWith('feature-branch');
expect(stateManager.setCurrentTag).toHaveBeenCalledWith('feature-branch');
});
});

View File

@@ -10,6 +10,7 @@ import type {
PartialConfiguration,
RuntimeStorageConfig
} from '../interfaces/configuration.interface.js';
import { DEFAULT_CONFIG_VALUES as DEFAULTS } from '../interfaces/configuration.interface.js';
import { ConfigLoader } from './services/config-loader.service.js';
import {
ConfigMerger,
@@ -167,8 +168,8 @@ export class ConfigManager {
getModelConfig() {
return (
this.config.models || {
main: 'claude-3-5-sonnet-20241022',
fallback: 'gpt-4o-mini'
main: DEFAULTS.MODELS.MAIN,
fallback: DEFAULTS.MODELS.FALLBACK
}
);
}

View File

@@ -399,8 +399,8 @@ export interface IConfigurationManager {
*/
export const DEFAULT_CONFIG_VALUES = {
MODELS: {
MAIN: 'claude-3-5-sonnet-20241022',
FALLBACK: 'gpt-4o-mini'
MAIN: 'claude-sonnet-4-20250514',
FALLBACK: 'claude-3-7-sonnet-20250219'
},
TASKS: {
DEFAULT_PRIORITY: 'medium' as TaskPriority,

View File

@@ -27,21 +27,21 @@ const DEFAULTS = {
models: {
main: {
provider: 'anthropic',
modelId: 'claude-3-7-sonnet-20250219',
modelId: 'claude-sonnet-4-20250514',
maxTokens: 64000,
temperature: 0.2
},
research: {
provider: 'perplexity',
modelId: 'sonar-pro',
modelId: 'sonar',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
// No default fallback provider/model initially
provider: 'anthropic',
modelId: 'claude-3-5-sonnet',
maxTokens: 8192, // Default parameters if fallback IS configured
modelId: 'claude-3-7-sonnet-20250219',
maxTokens: 120000, // Default parameters if fallback IS configured
temperature: 0.2
}
},

View File

@@ -119,20 +119,20 @@ const DEFAULT_CONFIG = {
models: {
main: {
provider: 'anthropic',
modelId: 'claude-3-7-sonnet-20250219',
modelId: 'claude-sonnet-4-20250514',
maxTokens: 64000,
temperature: 0.2
},
research: {
provider: 'perplexity',
modelId: 'sonar-pro',
modelId: 'sonar',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
provider: 'anthropic',
modelId: 'claude-3-5-sonnet',
maxTokens: 8192,
modelId: 'claude-3-7-sonnet-20250219',
maxTokens: 120000,
temperature: 0.2
}
},