Compare commits

...

10 Commits

Author SHA1 Message Date
Ralph Khreish
e4ae526337 Update .changeset/thick-squids-attend.md
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2025-07-24 16:45:07 +03:00
Ralph Khreish
3e50b53d56 Update .changeset/thick-squids-attend.md
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2025-07-24 15:41:18 +02:00
Ralph Khreish
9ef66c764e Update .changeset/thick-squids-attend.md
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2025-07-24 14:21:29 +02:00
Ralph Khreish
d364a60f14 chore: run format 2025-07-24 15:11:43 +03:00
Ralph Khreish
2e7c2c40c7 chore: fix git issue 2025-07-24 15:10:49 +03:00
Ralph Khreish
292bc3ff49 feat: fix CLI UI error when trying to display non-existent complexity report 2025-07-24 15:08:35 +03:00
github-actions[bot]
31b8407dbc chore: rc version bump 2025-07-23 16:29:49 +00:00
Ralph Khreish
2df4f13f65 chore: improve pre-release CI to be able to release more than one release candidate (#1036)
* chore: improve pre-release CI to be able to release more than one release candidate

* chore: implement requested changes from coderabbit

* chore: apply requested changes
2025-07-23 18:28:17 +02:00
github-actions[bot]
a37017e5a5 docs: Auto-update and format models.md 2025-07-23 16:03:40 +00:00
Ralph Khreish
fb7d588137 feat: improve config-manager max tokens for openrouter and kimi-k2 model (#1035) 2025-07-23 18:03:26 +02:00
10 changed files with 101 additions and 26 deletions

16
.changeset/pre.json Normal file
View File

@@ -0,0 +1,16 @@
{
"mode": "pre",
"tag": "rc",
"initialVersions": {
"task-master-ai": "0.21.0",
"extension": "0.20.0"
},
"changesets": [
"fix-gemini-cli-dependency",
"fresh-bugs-squashed",
"happy-sites-stay",
"orange-pots-add",
"quiet-rabbits-bathe",
"swift-otters-argue"
]
}

View File

@@ -0,0 +1,10 @@
---
"task-master-ai": patch
---
Fix max_tokens limits for OpenRouter and Groq models
- Add special handling in config-manager.js for custom OpenRouter models to use a conservative default of 32,768 max_tokens
- Update qwen/qwen-turbo model max_tokens from 1,000,000 to 32,768 to match OpenRouter's actual limits
- Fix moonshotai/kimi-k2-instruct max_tokens to 16,384 to match Groq's actual limit (fixes #1028)
- This prevents "maximum context length exceeded" errors when using OpenRouter models not in our supported models list

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": minor
---
Prompt to generate a complexity report when it is missing

View File

@@ -16,7 +16,7 @@ jobs:
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
cache: "npm"
- name: Cache node_modules
uses: actions/cache@v4
@@ -32,10 +32,13 @@ jobs:
run: npm ci
timeout-minutes: 2
- name: Enter RC mode
- name: Enter RC mode (if not already in RC mode)
run: |
npx changeset pre exit || true
npx changeset pre enter rc
# ensure were in the right pre-mode (tag "rc")
if [ ! -f .changeset/pre.json ] \
|| [ "$(jq -r '.tag' .changeset/pre.json 2>/dev/null || echo '')" != "rc" ]; then
npx changeset pre enter rc
fi
- name: Version RC packages
run: npx changeset version
@@ -51,12 +54,9 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Exit RC mode
run: npx changeset pre exit
- name: Commit & Push changes
uses: actions-js/push@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}
message: 'chore: rc version bump'
message: "chore: rc version bump"

View File

@@ -1,5 +1,44 @@
# task-master-ai
## 0.22.0-rc.0
### Minor Changes
- [#1032](https://github.com/eyaltoledano/claude-task-master/pull/1032) [`4423119`](https://github.com/eyaltoledano/claude-task-master/commit/4423119a5ec53958c9dffa8bf564da8be7a2827d) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add comprehensive Kiro IDE integration with autonomous task management hooks
- **Kiro Profile**: Added full support for Kiro IDE with automatic installation of 7 Taskmaster agent hooks
- **Hook-Driven Workflow**: Introduced natural language automation hooks that eliminate manual task status updates
- **Automatic Hook Installation**: Hooks are now automatically copied to `.kiro/hooks/` when running `task-master rules add kiro`
- **Language-Agnostic Support**: All hooks support multiple programming languages (JS, Python, Go, Rust, Java, etc.)
- **Frontmatter Transformation**: Kiro rules use simplified `inclusion: always` format instead of Cursor's complex frontmatter
- **Special Rule**: Added `taskmaster_hooks_workflow.md` that guides AI assistants to prefer hook-driven completion
Key hooks included:
- Task Dependency Auto-Progression: Automatically starts tasks when dependencies complete
- Code Change Task Tracker: Updates task progress as you save files
- Test Success Task Completer: Marks tasks done when tests pass
- Daily Standup Assistant: Provides personalized task status summaries
- PR Readiness Checker: Validates task completion before creating pull requests
- Complexity Analyzer: Auto-expands complex tasks into manageable subtasks
- Git Commit Task Linker: Links commits to tasks for better traceability
This creates a truly autonomous development workflow where task management happens naturally as you code!
### Patch Changes
- [#1033](https://github.com/eyaltoledano/claude-task-master/pull/1033) [`7b90568`](https://github.com/eyaltoledano/claude-task-master/commit/7b9056832653464f934c91c22997077065d738c4) Thanks [@ben-vargas](https://github.com/ben-vargas)! - Fix compatibility with @google/gemini-cli-core v0.1.12+ by updating ai-sdk-provider-gemini-cli to v0.1.1.
- [#1038](https://github.com/eyaltoledano/claude-task-master/pull/1038) [`77cc5e4`](https://github.com/eyaltoledano/claude-task-master/commit/77cc5e4537397642f2664f61940a101433ee6fb4) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix 'expand --all' and 'show' commands to correctly handle tag contexts for complexity reports and task display.
- [#1025](https://github.com/eyaltoledano/claude-task-master/pull/1025) [`8781794`](https://github.com/eyaltoledano/claude-task-master/commit/8781794c56d454697fc92c88a3925982d6b81205) Thanks [@joedanz](https://github.com/joedanz)! - Clean up remaining automatic task file generation calls
- [#1035](https://github.com/eyaltoledano/claude-task-master/pull/1035) [`fb7d588`](https://github.com/eyaltoledano/claude-task-master/commit/fb7d588137e8c53b0d0f54bd1dd8d387648583ee) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix max_tokens limits for OpenRouter and Groq models
- Add special handling in config-manager.js for custom OpenRouter models to use a conservative default of 32,768 max_tokens
- Update qwen/qwen-turbo model max_tokens from 1,000,000 to 32,768 to match OpenRouter's actual limits
- Fix moonshotai/kimi-k2-instruct max_tokens to 16,384 to match Groq's actual limit (fixes #1028)
- This prevents "maximum context length exceeded" errors when using OpenRouter models not in our supported models list
- [#1027](https://github.com/eyaltoledano/claude-task-master/pull/1027) [`6ae66b2`](https://github.com/eyaltoledano/claude-task-master/commit/6ae66b2afbfe911340fa25e0236c3db83deaa7eb) Thanks [@andreswebs](https://github.com/andreswebs)! - Fix VSCode profile generation to use correct rule file names (using `.instructions.md` extension instead of `.md`) and front-matter properties (removing the unsupported `alwaysApply` property from instructions files' front-matter).
## 0.21.0
### Minor Changes

View File

@@ -1,4 +1,4 @@
# Available Models as of July 22, 2025
# Available Models as of July 23, 2025
## Main Models

View File

@@ -1,6 +1,6 @@
{
"name": "task-master-ai",
"version": "0.21.0",
"version": "0.22.0-rc.0",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",

View File

@@ -584,10 +584,21 @@ function getParametersForRole(role, explicitRoot = null) {
);
}
} else {
log(
'debug',
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
);
// Special handling for custom OpenRouter models
if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
// Use a conservative default for OpenRouter models not in our list
const openrouterDefault = 32768;
effectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);
log(
'debug',
`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`
);
} else {
log(
'debug',
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
);
}
}
} catch (lookupError) {
log(

View File

@@ -333,7 +333,7 @@
"output": 3.0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 131072,
"max_tokens": 16384,
"supported": true
},
{
@@ -701,7 +701,7 @@
"output": 0.2
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 1000000,
"max_tokens": 32768,
"supported": true
},
{

View File

@@ -9,6 +9,7 @@ import boxen from 'boxen';
import ora from 'ora';
import Table from 'cli-table3';
import gradient from 'gradient-string';
import readline from 'readline';
import {
log,
findTaskById,
@@ -1682,18 +1683,15 @@ async function displayComplexityReport(reportPath) {
)
);
const readline = require('readline').createInterface({
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
const answer = await new Promise((resolve) => {
readline.question(
chalk.cyan('Generate complexity report? (y/n): '),
resolve
);
rl.question(chalk.cyan('Generate complexity report? (y/n): '), resolve);
});
readline.close();
rl.close();
if (answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes') {
// Call the analyze-complexity command
@@ -1974,8 +1972,6 @@ async function confirmTaskOverwrite(tasksPath) {
)
);
// Use dynamic import to get the readline module
const readline = await import('readline');
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
@@ -2463,8 +2459,6 @@ async function displayMultipleTasksSummary(
)
);
// Use dynamic import for readline
const readline = await import('readline');
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout