Compare commits

..

3 Commits

Author SHA1 Message Date
Ralph Khreish
598e64566a chore: apply requested changes 2025-07-23 19:19:37 +03:00
Ralph Khreish
f32f1ccaa7 chore: implement requested changes from coderabbit 2025-07-23 19:10:38 +03:00
Ralph Khreish
0b31bdf910 chore: improve pre-release CI to be able to release more than one release candidate 2025-07-23 19:01:33 +03:00
4 changed files with 7 additions and 28 deletions

View File

@@ -1,10 +0,0 @@
---
"task-master-ai": patch
---
Fix max_tokens limits for OpenRouter and Groq models
- Add special handling in config-manager.js for custom OpenRouter models to use a conservative default of 32,768 max_tokens
- Update qwen/qwen-turbo model max_tokens from 1,000,000 to 32,768 to match OpenRouter's actual limits
- Fix moonshotai/kimi-k2-instruct max_tokens to 16,384 to match Groq's actual limit (fixes #1028)
- This prevents "maximum context length exceeded" errors when using OpenRouter models not in our supported models list

View File

@@ -1,4 +1,4 @@
# Available Models as of July 23, 2025
# Available Models as of July 22, 2025
## Main Models

View File

@@ -583,23 +583,12 @@ function getParametersForRole(role, explicitRoot = null) {
`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`
);
}
} else {
// Special handling for custom OpenRouter models
if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
// Use a conservative default for OpenRouter models not in our list
const openrouterDefault = 32768;
effectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);
log(
'debug',
`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`
);
} else {
log(
'debug',
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
);
}
}
} catch (lookupError) {
log(
'warn',

View File

@@ -333,7 +333,7 @@
"output": 3.0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 16384,
"max_tokens": 131072,
"supported": true
},
{
@@ -701,7 +701,7 @@
"output": 0.2
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 32768,
"max_tokens": 1000000,
"supported": true
},
{