feat: add Claude Code provider support

Implements Claude Code as a new AI provider that uses the Claude Code CLI
without requiring API keys. This enables users to leverage Claude models
through their local Claude Code installation.

Key changes:
- Add complete AI SDK v1 implementation for Claude Code provider
  - Custom SDK with streaming/non-streaming support
  - Session management for conversation continuity
  - JSON extraction for object generation mode
  - Support for advanced settings (maxTurns, allowedTools, etc.)

- Integrate Claude Code into Task Master's provider system
  - Update ai-services-unified.js to handle keyless authentication
  - Add provider to supported-models.json with opus/sonnet models
  - Ensure correct maxTokens values are applied (opus: 32000, sonnet: 64000)

- Fix maxTokens configuration issue
  - Add max_tokens property to getAvailableModels() output
  - Update setModel() to properly handle claude-code models
  - Create update-config-tokens.js utility for init process

- Add comprehensive documentation
  - User guide with configuration examples
  - Advanced settings explanation and future integration options

The implementation maintains full backward compatibility with existing
providers while adding seamless Claude Code support to all Task Master
commands.
This commit is contained in:
Ben Vargas
2025-06-16 12:20:28 -06:00
committed by Ralph Khreish
parent 21d988691b
commit 5c726dc542
2 changed files with 62 additions and 0 deletions

View File

@@ -30,6 +30,7 @@ import {
convertAllRulesToProfileRules,
getRulesProfile
} from '../src/utils/rule-transformer.js';
import { updateConfigMaxTokens } from './modules/update-config-tokens.js';
import { execSync } from 'child_process';
import {
@@ -622,6 +623,14 @@ function createProjectStructure(
...replacements
}
);
// Update config.json with correct maxTokens values from supported-models.json
const configPath = path.join(targetDir, TASKMASTER_CONFIG_FILE);
if (updateConfigMaxTokens(configPath)) {
log('info', 'Updated config with correct maxTokens values');
} else {
log('warn', 'Could not update maxTokens in config');
}
// Copy .gitignore with GitTasks preference
try {

View File

@@ -0,0 +1,53 @@
/**
* update-config-tokens.js
* Updates config.json with correct maxTokens values from supported-models.json
*/
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { dirname } from 'path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* Updates the config file with correct maxTokens values from supported-models.json
* @param {string} configPath - Path to the config.json file to update
* @returns {boolean} True if successful, false otherwise
*/
export function updateConfigMaxTokens(configPath) {
try {
// Load supported models
const supportedModelsPath = path.join(__dirname, 'supported-models.json');
const supportedModels = JSON.parse(fs.readFileSync(supportedModelsPath, 'utf-8'));
// Load config
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
// Update each role's maxTokens if the model exists in supported-models.json
const roles = ['main', 'research', 'fallback'];
for (const role of roles) {
if (config.models && config.models[role]) {
const provider = config.models[role].provider;
const modelId = config.models[role].modelId;
// Find the model in supported models
if (supportedModels[provider]) {
const modelData = supportedModels[provider].find(m => m.id === modelId);
if (modelData && modelData.max_tokens) {
config.models[role].maxTokens = modelData.max_tokens;
}
}
}
}
// Write back the updated config
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
return true;
} catch (error) {
console.error('Error updating config maxTokens:', error.message);
return false;
}
}