Compare commits

...

1 Commits

Author SHA1 Message Date
Ralph Khreish
1c76e9c5fd feat: improve config-manager max tokens for openrouter and kimi-k2 model 2025-07-23 18:53:30 +03:00
3 changed files with 27 additions and 6 deletions

View File

@@ -0,0 +1,10 @@
---
"task-master-ai": patch
---
Fix max_tokens limits for OpenRouter and Groq models
- Add special handling in config-manager.js for custom OpenRouter models to use a conservative default of 32,768 max_tokens
- Update qwen/qwen-turbo model max_tokens from 1,000,000 to 32,768 to match OpenRouter's actual limits
- Fix moonshotai/kimi-k2-instruct max_tokens to 16,384 to match Groq's actual limit (fixes #1028)
- This prevents "maximum context length exceeded" errors when using OpenRouter models not in our supported models list

View File

@@ -584,10 +584,21 @@ function getParametersForRole(role, explicitRoot = null) {
);
}
} else {
log(
'debug',
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
);
// Special handling for custom OpenRouter models
if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
// Use a conservative default for OpenRouter models not in our list
const openrouterDefault = 32768;
effectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);
log(
'debug',
`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`
);
} else {
log(
'debug',
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
);
}
}
} catch (lookupError) {
log(

View File

@@ -333,7 +333,7 @@
"output": 3.0
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 131072,
"max_tokens": 16384,
"supported": true
},
{
@@ -701,7 +701,7 @@
"output": 0.2
},
"allowed_roles": ["main", "fallback"],
"max_tokens": 1000000,
"max_tokens": 32768,
"supported": true
},
{