fix(cli): Fix interactive model setup (models --setup)
The interactive model setup triggered by `task-master models --setup` was previously attempting to call non-existent setter functions (`setMainModel`, etc.) in `config-manager.js`, leading to errors and preventing configuration updates. This commit refactors the `--setup` logic within the `models` command handler in `scripts/modules/commands.js`. It now correctly: - Loads the current configuration using `getConfig()`." -m "- Updates the appropriate sections of the loaded configuration object based on user selections from `inquirer`." -m "- Saves the modified configuration using the existing `writeConfig()` function from `config-manager.js`." -m "- Handles disabling the fallback model correctly."
This commit is contained in:
@@ -1,30 +1,30 @@
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "google",
|
||||
"modelId": "gemini-2.5-pro-latest",
|
||||
"maxTokens": 256000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"global": {
|
||||
"logLevel": "info",
|
||||
"debug": false,
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"projectName": "Task Master",
|
||||
"ollamaBaseUrl": "http://localhost:11434/api"
|
||||
}
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 256000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3.5-sonnet-20240620",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"global": {
|
||||
"logLevel": "info",
|
||||
"debug": false,
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"projectName": "Task Master",
|
||||
"ollamaBaseUrl": "http://localhost:11434/api"
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,6 @@ import boxen from 'boxen';
|
||||
import fs from 'fs';
|
||||
import https from 'https';
|
||||
import inquirer from 'inquirer';
|
||||
import ora from 'ora';
|
||||
import Table from 'cli-table3';
|
||||
|
||||
import { log, readJSON } from './utils.js';
|
||||
@@ -1775,69 +1774,124 @@ function registerCommands(programInstance) {
|
||||
]);
|
||||
|
||||
let setupSuccess = true;
|
||||
let setupConfigModified = false; // Track if config was changed during setup
|
||||
const configToUpdate = getConfig(); // Load the current config
|
||||
|
||||
// Set Main Model
|
||||
if (answers.mainModel) {
|
||||
if (
|
||||
!setMainModel(answers.mainModel.provider, answers.mainModel.id)
|
||||
) {
|
||||
console.error(chalk.red('Failed to set main model.'));
|
||||
setupSuccess = false;
|
||||
const modelData = findModelData(answers.mainModel.id); // Find full model data
|
||||
if (modelData) {
|
||||
configToUpdate.models.main = {
|
||||
...configToUpdate.models.main, // Keep existing params
|
||||
provider: modelData.provider,
|
||||
modelId: modelData.id
|
||||
};
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Selected main model: ${modelData.provider} / ${modelData.id}`
|
||||
)
|
||||
);
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
// Success message printed by setMainModel
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error finding model data for main selection: ${answers.mainModel.id}`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Set Research Model
|
||||
if (answers.researchModel) {
|
||||
if (
|
||||
!setResearchModel(
|
||||
answers.researchModel.provider,
|
||||
answers.researchModel.id
|
||||
)
|
||||
) {
|
||||
console.error(chalk.red('Failed to set research model.'));
|
||||
setupSuccess = false;
|
||||
const modelData = findModelData(answers.researchModel.id); // Find full model data
|
||||
if (modelData) {
|
||||
configToUpdate.models.research = {
|
||||
...configToUpdate.models.research, // Keep existing params
|
||||
provider: modelData.provider,
|
||||
modelId: modelData.id
|
||||
};
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Selected research model: ${modelData.provider} / ${modelData.id}`
|
||||
)
|
||||
);
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
// Success message printed by setResearchModel
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error finding model data for research selection: ${answers.researchModel.id}`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Set Fallback Model
|
||||
if (answers.fallbackModel) {
|
||||
if (
|
||||
!setFallbackModel(
|
||||
answers.fallbackModel.provider,
|
||||
answers.fallbackModel.id
|
||||
)
|
||||
) {
|
||||
console.error(chalk.red('Failed to set fallback model.'));
|
||||
setupSuccess = false;
|
||||
} else {
|
||||
// User selected a specific fallback model
|
||||
const modelData = findModelData(answers.fallbackModel.id); // Find full model data
|
||||
if (modelData) {
|
||||
configToUpdate.models.fallback = {
|
||||
...configToUpdate.models.fallback, // Keep existing params
|
||||
provider: modelData.provider,
|
||||
modelId: modelData.id
|
||||
};
|
||||
console.log(
|
||||
chalk.green(
|
||||
`Fallback model set to: ${answers.fallbackModel.provider} / ${answers.fallbackModel.id}`
|
||||
chalk.blue(
|
||||
`Selected fallback model: ${modelData.provider} / ${modelData.id}`
|
||||
)
|
||||
);
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error finding model data for fallback selection: ${answers.fallbackModel.id}`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
}
|
||||
} else {
|
||||
// User selected None - attempt to remove fallback from config
|
||||
const config = readConfig();
|
||||
if (config.models.fallback) {
|
||||
delete config.models.fallback;
|
||||
if (!writeConfig(config)) {
|
||||
console.error(
|
||||
chalk.red('Failed to remove fallback model configuration.')
|
||||
);
|
||||
setupSuccess = false;
|
||||
} else {
|
||||
console.log(chalk.green('Fallback model disabled.'));
|
||||
}
|
||||
// User selected None - ensure fallback is disabled
|
||||
if (
|
||||
configToUpdate.models.fallback?.provider ||
|
||||
configToUpdate.models.fallback?.modelId
|
||||
) {
|
||||
// Only mark as modified if something was actually cleared
|
||||
configToUpdate.models.fallback = {
|
||||
...configToUpdate.models.fallback, // Keep existing params like maxTokens
|
||||
provider: undefined, // Or null
|
||||
modelId: undefined // Or null
|
||||
};
|
||||
console.log(chalk.blue('Fallback model disabled.'));
|
||||
setupConfigModified = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (setupSuccess) {
|
||||
// Save the updated configuration if changes were made and no errors occurred
|
||||
if (setupConfigModified && setupSuccess) {
|
||||
if (!writeConfig(configToUpdate)) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Failed to save updated model configuration to .taskmasterconfig.'
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
}
|
||||
} else if (!setupSuccess) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Errors occurred during model selection. Configuration not saved.'
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (setupSuccess && setupConfigModified) {
|
||||
console.log(chalk.green.bold('\nModel setup complete!'));
|
||||
} else if (setupSuccess && !setupConfigModified) {
|
||||
console.log(
|
||||
chalk.yellow('\nNo changes made to model configuration.')
|
||||
);
|
||||
}
|
||||
return; // Exit after setup
|
||||
}
|
||||
|
||||
@@ -1097,7 +1097,7 @@ const completion = await generateTextService({
|
||||
### Details:
|
||||
|
||||
|
||||
## 22. Implement `openai.js` Provider Module using Vercel AI SDK [pending]
|
||||
## 22. Implement `openai.js` Provider Module using Vercel AI SDK [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `openai.js` module within `src/ai-providers/`. This module should contain functions to interact with the OpenAI API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. (Optional, implement if OpenAI models are needed).
|
||||
### Details:
|
||||
@@ -1186,37 +1186,37 @@ function checkProviderCapability(provider, capability) {
|
||||
```
|
||||
</info added on 2025-04-20T03:52:13.065Z>
|
||||
|
||||
## 24. Implement `google.js` Provider Module using Vercel AI SDK [pending]
|
||||
## 24. Implement `google.js` Provider Module using Vercel AI SDK [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `google.js` module within `src/ai-providers/`. This module should contain functions to interact with Google AI models (e.g., Gemini) using the **Vercel AI SDK (`@ai-sdk/google`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
||||
### Details:
|
||||
|
||||
|
||||
## 25. Implement `ollama.js` Provider Module [pending]
|
||||
## 25. Implement `ollama.js` Provider Module [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `ollama.js` module within `src/ai-providers/`. This module should contain functions to interact with local Ollama models using the **`ollama-ai-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.
|
||||
### Details:
|
||||
|
||||
|
||||
## 26. Implement `mistral.js` Provider Module using Vercel AI SDK [pending]
|
||||
## 26. Implement `mistral.js` Provider Module using Vercel AI SDK [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `mistral.js` module within `src/ai-providers/`. This module should contain functions to interact with Mistral AI models using the **Vercel AI SDK (`@ai-sdk/mistral`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
||||
### Details:
|
||||
|
||||
|
||||
## 27. Implement `azure.js` Provider Module using Vercel AI SDK [pending]
|
||||
## 27. Implement `azure.js` Provider Module using Vercel AI SDK [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `azure.js` module within `src/ai-providers/`. This module should contain functions to interact with Azure OpenAI models using the **Vercel AI SDK (`@ai-sdk/azure`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
||||
### Details:
|
||||
|
||||
|
||||
## 28. Implement `openrouter.js` Provider Module [pending]
|
||||
## 28. Implement `openrouter.js` Provider Module [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `openrouter.js` module within `src/ai-providers/`. This module should contain functions to interact with various models via OpenRouter using the **`@openrouter/ai-sdk-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.
|
||||
### Details:
|
||||
|
||||
|
||||
## 29. Implement `xai.js` Provider Module using Vercel AI SDK [pending]
|
||||
## 29. Implement `xai.js` Provider Module using Vercel AI SDK [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `xai.js` module within `src/ai-providers/`. This module should contain functions to interact with xAI models (e.g., Grok) using the **Vercel AI SDK (`@ai-sdk/xai`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
||||
### Details:
|
||||
@@ -1469,7 +1469,7 @@ The new AI architecture introduces a clear separation between sensitive credenti
|
||||
### Details:
|
||||
|
||||
|
||||
## 34. Audit and Standardize Env Variable Access [pending]
|
||||
## 34. Audit and Standardize Env Variable Access [done]
|
||||
### Dependencies: None
|
||||
### Description: Audit the entire codebase (core modules, provider modules, utilities) to ensure all accesses to environment variables (API keys, configuration flags) consistently use a standardized resolution function (like `resolveEnvVariable` or a new utility) that checks `process.env` first and then `session.env` if available. Refactor any direct `process.env` access where `session.env` should also be considered.
|
||||
### Details:
|
||||
|
||||
@@ -2991,7 +2991,7 @@
|
||||
"title": "Implement `openai.js` Provider Module using Vercel AI SDK",
|
||||
"description": "Create and implement the `openai.js` module within `src/ai-providers/`. This module should contain functions to interact with the OpenAI API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. (Optional, implement if OpenAI models are needed).",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3011,7 +3011,7 @@
|
||||
"title": "Implement `google.js` Provider Module using Vercel AI SDK",
|
||||
"description": "Create and implement the `google.js` module within `src/ai-providers/`. This module should contain functions to interact with Google AI models (e.g., Gemini) using the **Vercel AI SDK (`@ai-sdk/google`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3020,7 +3020,7 @@
|
||||
"title": "Implement `ollama.js` Provider Module",
|
||||
"description": "Create and implement the `ollama.js` module within `src/ai-providers/`. This module should contain functions to interact with local Ollama models using the **`ollama-ai-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3029,7 +3029,7 @@
|
||||
"title": "Implement `mistral.js` Provider Module using Vercel AI SDK",
|
||||
"description": "Create and implement the `mistral.js` module within `src/ai-providers/`. This module should contain functions to interact with Mistral AI models using the **Vercel AI SDK (`@ai-sdk/mistral`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3038,7 +3038,7 @@
|
||||
"title": "Implement `azure.js` Provider Module using Vercel AI SDK",
|
||||
"description": "Create and implement the `azure.js` module within `src/ai-providers/`. This module should contain functions to interact with Azure OpenAI models using the **Vercel AI SDK (`@ai-sdk/azure`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3047,7 +3047,7 @@
|
||||
"title": "Implement `openrouter.js` Provider Module",
|
||||
"description": "Create and implement the `openrouter.js` module within `src/ai-providers/`. This module should contain functions to interact with various models via OpenRouter using the **`@openrouter/ai-sdk-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3056,7 +3056,7 @@
|
||||
"title": "Implement `xai.js` Provider Module using Vercel AI SDK",
|
||||
"description": "Create and implement the `xai.js` module within `src/ai-providers/`. This module should contain functions to interact with xAI models (e.g., Grok) using the **Vercel AI SDK (`@ai-sdk/xai`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3107,7 +3107,7 @@
|
||||
"title": "Audit and Standardize Env Variable Access",
|
||||
"description": "Audit the entire codebase (core modules, provider modules, utilities) to ensure all accesses to environment variables (API keys, configuration flags) consistently use a standardized resolution function (like `resolveEnvVariable` or a new utility) that checks `process.env` first and then `session.env` if available. Refactor any direct `process.env` access where `session.env` should also be considered.",
|
||||
"details": "\n\n<info added on 2025-04-20T03:50:25.632Z>\nThis audit should distinguish between two types of configuration:\n\n1. **Sensitive credentials (API keys)**: These should exclusively use the `resolveEnvVariable` pattern to check both `process.env` and `session.env`. Verify that no API keys are hardcoded or accessed through direct `process.env` references.\n\n2. **Application configuration**: All non-credential settings should be migrated to use the centralized `.taskmasterconfig` system via the `config-manager.js` getters. This includes:\n - Model selections and role assignments\n - Parameter settings (temperature, maxTokens, etc.)\n - Logging configuration\n - Default behaviors and fallbacks\n\nImplementation notes:\n- Create a comprehensive inventory of all environment variable accesses\n- Categorize each as either credential or application configuration\n- For credentials: standardize on `resolveEnvVariable` pattern\n- For app config: migrate to appropriate `config-manager.js` getter methods\n- Document any exceptions that require special handling\n- Add validation to prevent regression (e.g., ESLint rules against direct `process.env` access)\n\nThis separation ensures security best practices for credentials while centralizing application configuration for better maintainability.\n</info added on 2025-04-20T03:50:25.632Z>\n\n<info added on 2025-04-20T06:58:36.731Z>\n**Plan & Analysis (Added on 2023-05-15T14:32:18.421Z)**:\n\n**Goal:**\n1. **Standardize API Key Access**: Ensure all accesses to sensitive API keys (Anthropic, Perplexity, etc.) consistently use a standard function (like `resolveEnvVariable(key, session)`) that checks both `process.env` and `session.env`. Replace direct `process.env.API_KEY` access.\n2. **Centralize App Configuration**: Ensure all non-sensitive configuration values (model names, temperature, logging levels, max tokens, etc.) are accessed *only* through `scripts/modules/config-manager.js` getters. Eliminate direct `process.env` access for these.\n\n**Strategy: Inventory -> Analyze -> Target -> Refine**\n\n1. **Inventory (`process.env` Usage):** Performed grep search (`rg \"process\\.env\"`). Results indicate widespread usage across multiple files.\n2. **Analysis (Categorization of Usage):**\n * **API Keys (Credentials):** ANTHROPIC_API_KEY, PERPLEXITY_API_KEY, OPENAI_API_KEY, etc. found in `task-manager.js`, `ai-services.js`, `commands.js`, `dependency-manager.js`, `ai-client-utils.js`, test files. Needs replacement with `resolveEnvVariable(key, session)`.\n * **App Configuration:** PERPLEXITY_MODEL, TEMPERATURE, MAX_TOKENS, MODEL, DEBUG, LOG_LEVEL, DEFAULT_*, PROJECT_*, TASK_MASTER_PROJECT_ROOT found in `task-manager.js`, `ai-services.js`, `scripts/init.js`, `mcp-server/src/logger.js`, `mcp-server/src/tools/utils.js`, test files. Needs replacement with `config-manager.js` getters.\n * **System/Environment Info:** HOME, USERPROFILE, SHELL in `scripts/init.js`. Needs review (e.g., `os.homedir()` preference).\n * **Test Code/Setup:** Extensive usage in test files. Acceptable for mocking, but code under test must use standard methods. May require test adjustments.\n * **Helper Functions/Comments:** Definitions/comments about `resolveEnvVariable`. No action needed.\n3. **Target (High-Impact Areas & Initial Focus):**\n * High Impact: `task-manager.js` (~5800 lines), `ai-services.js` (~1500 lines).\n * Medium Impact: `commands.js`, Test Files.\n * Foundational: `ai-client-utils.js`, `config-manager.js`, `utils.js`.\n * **Initial Target Command:** `task-master analyze-complexity` for a focused, end-to-end refactoring exercise.\n\n4. **Refine (Plan for `analyze-complexity`):**\n a. **Trace Code Path:** Identify functions involved in `analyze-complexity`.\n b. **Refactor API Key Access:** Replace direct `process.env.PERPLEXITY_API_KEY` with `resolveEnvVariable(key, session)`.\n c. **Refactor App Config Access:** Replace direct `process.env` for model name, temp, tokens with `config-manager.js` getters.\n d. **Verify `resolveEnvVariable`:** Ensure robustness, especially handling potentially undefined `session`.\n e. **Test:** Verify command works locally and via MCP context (if possible). Update tests.\n\nThis piecemeal approach aims to establish the refactoring pattern before tackling the entire codebase.\n</info added on 2025-04-20T06:58:36.731Z>",
|
||||
"status": "pending",
|
||||
"status": "done",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user