Compare commits
3 Commits
fix/openro
...
task-maste
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2df4f13f65 | ||
|
|
a37017e5a5 | ||
|
|
fb7d588137 |
10
.changeset/quiet-rabbits-bathe.md
Normal file
10
.changeset/quiet-rabbits-bathe.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix max_tokens limits for OpenRouter and Groq models
|
||||
|
||||
- Add special handling in config-manager.js for custom OpenRouter models to use a conservative default of 32,768 max_tokens
|
||||
- Update qwen/qwen-turbo model max_tokens from 1,000,000 to 32,768 to match OpenRouter's actual limits
|
||||
- Fix moonshotai/kimi-k2-instruct max_tokens to 16,384 to match Groq's actual limit (fixes #1028)
|
||||
- This prevents "maximum context length exceeded" errors when using OpenRouter models not in our supported models list
|
||||
16
.github/workflows/pre-release.yml
vendored
16
.github/workflows/pre-release.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache: "npm"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
@@ -32,10 +32,13 @@ jobs:
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Enter RC mode
|
||||
- name: Enter RC mode (if not already in RC mode)
|
||||
run: |
|
||||
npx changeset pre exit || true
|
||||
npx changeset pre enter rc
|
||||
# ensure we’re in the right pre-mode (tag "rc")
|
||||
if [ ! -f .changeset/pre.json ] \
|
||||
|| [ "$(jq -r '.tag' .changeset/pre.json 2>/dev/null || echo '')" != "rc" ]; then
|
||||
npx changeset pre enter rc
|
||||
fi
|
||||
|
||||
- name: Version RC packages
|
||||
run: npx changeset version
|
||||
@@ -51,12 +54,9 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Exit RC mode
|
||||
run: npx changeset pre exit
|
||||
|
||||
- name: Commit & Push changes
|
||||
uses: actions-js/push@master
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
message: 'chore: rc version bump'
|
||||
message: "chore: rc version bump"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Available Models as of July 22, 2025
|
||||
# Available Models as of July 23, 2025
|
||||
|
||||
## Main Models
|
||||
|
||||
|
||||
@@ -584,10 +584,21 @@ function getParametersForRole(role, explicitRoot = null) {
|
||||
);
|
||||
}
|
||||
} else {
|
||||
log(
|
||||
'debug',
|
||||
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
|
||||
);
|
||||
// Special handling for custom OpenRouter models
|
||||
if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
|
||||
// Use a conservative default for OpenRouter models not in our list
|
||||
const openrouterDefault = 32768;
|
||||
effectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);
|
||||
log(
|
||||
'debug',
|
||||
`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`
|
||||
);
|
||||
} else {
|
||||
log(
|
||||
'debug',
|
||||
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (lookupError) {
|
||||
log(
|
||||
|
||||
@@ -333,7 +333,7 @@
|
||||
"output": 3.0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072,
|
||||
"max_tokens": 16384,
|
||||
"supported": true
|
||||
},
|
||||
{
|
||||
@@ -701,7 +701,7 @@
|
||||
"output": 0.2
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1000000,
|
||||
"max_tokens": 32768,
|
||||
"supported": true
|
||||
},
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user