diff --git a/.taskmasterconfig b/.taskmasterconfig index a4ef94ef..a38f2bd8 100644 --- a/.taskmasterconfig +++ b/.taskmasterconfig @@ -1,31 +1,31 @@ { - "models": { - "main": { - "provider": "anthropic", - "modelId": "claude-3-7-sonnet-20250219", - "maxTokens": 100000, - "temperature": 0.2 - }, - "research": { - "provider": "xai", - "modelId": "grok-3", - "maxTokens": 8700, - "temperature": 0.1 - }, - "fallback": { - "provider": "anthropic", - "modelId": "claude-3-5-sonnet-20241022", - "maxTokens": 120000, - "temperature": 0.2 - } - }, - "global": { - "logLevel": "info", - "debug": false, - "defaultSubtasks": 5, - "defaultPriority": "medium", - "projectName": "Taskmaster", - "ollamaBaseUrl": "http://localhost:11434/api", - "azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/" - } -} \ No newline at end of file + "models": { + "main": { + "provider": "anthropic", + "modelId": "claude-3-7-sonnet-20250219", + "maxTokens": 100000, + "temperature": 0.2 + }, + "research": { + "provider": "perplexity", + "modelId": "sonar-pro", + "maxTokens": 8700, + "temperature": 0.1 + }, + "fallback": { + "provider": "anthropic", + "modelId": "claude-3-5-sonnet-20241022", + "maxTokens": 120000, + "temperature": 0.2 + } + }, + "global": { + "logLevel": "info", + "debug": false, + "defaultSubtasks": 5, + "defaultPriority": "medium", + "projectName": "Taskmaster", + "ollamaBaseUrl": "http://localhost:11434/api", + "azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/" + } +} diff --git a/scripts/modules/config-manager.js b/scripts/modules/config-manager.js index 0a29fec4..64f98b13 100644 --- a/scripts/modules/config-manager.js +++ b/scripts/modules/config-manager.js @@ -424,12 +424,13 @@ function getParametersForRole(role, explicitRoot = null) { /** * Checks if the API key for a given provider is set in the environment. - * Checks process.env first, then session.env if session is provided. + * Checks process.env first, then session.env if session is provided, then .env file if projectRoot provided. * @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic'). * @param {object|null} [session=null] - The MCP session object (optional). + * @param {string|null} [projectRoot=null] - The project root directory (optional, for .env file check). * @returns {boolean} True if the API key is set, false otherwise. */ -function isApiKeySet(providerName, session = null) { +function isApiKeySet(providerName, session = null, projectRoot = null) { // Define the expected environment variable name for each provider if (providerName?.toLowerCase() === 'ollama') { return true; // Indicate key status is effectively "OK" @@ -454,7 +455,7 @@ function isApiKeySet(providerName, session = null) { } const envVarName = keyMap[providerKey]; - const apiKeyValue = resolveEnvVariable(envVarName, session); + const apiKeyValue = resolveEnvVariable(envVarName, session, projectRoot); // Check if the key exists, is not empty, and is not a placeholder return ( diff --git a/scripts/modules/task-manager/models.js b/scripts/modules/task-manager/models.js index cb058e74..1ee63175 100644 --- a/scripts/modules/task-manager/models.js +++ b/scripts/modules/task-manager/models.js @@ -77,7 +77,7 @@ function fetchOpenRouterModels() { * @returns {Object} RESTful response with current model configuration */ async function getModelConfiguration(options = {}) { - const { mcpLog, projectRoot } = options; + const { mcpLog, projectRoot, session } = options; const report = (level, ...args) => { if (mcpLog && typeof mcpLog[level] === 'function') { @@ -125,12 +125,16 @@ async function getModelConfiguration(options = {}) { const fallbackModelId = getFallbackModelId(projectRoot); // Check API keys - const mainCliKeyOk = isApiKeySet(mainProvider); + const mainCliKeyOk = isApiKeySet(mainProvider, session, projectRoot); const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider, projectRoot); - const researchCliKeyOk = isApiKeySet(researchProvider); + const researchCliKeyOk = isApiKeySet( + researchProvider, + session, + projectRoot + ); const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider, projectRoot); const fallbackCliKeyOk = fallbackProvider - ? isApiKeySet(fallbackProvider) + ? isApiKeySet(fallbackProvider, session, projectRoot) : true; const fallbackMcpKeyOk = fallbackProvider ? getMcpApiKeyStatus(fallbackProvider, projectRoot) @@ -523,7 +527,7 @@ async function getApiKeyStatusReport(options = {}) { ); // Ollama is not a provider, it's a service, doesn't need an api key usually const statusReport = providersToCheck.map((provider) => { // Use provided projectRoot for MCP status check - const cliOk = isApiKeySet(provider, session); // Pass session for CLI check too + const cliOk = isApiKeySet(provider, session, projectRoot); // Pass session and projectRoot for CLI check const mcpOk = getMcpApiKeyStatus(provider, projectRoot); return { provider, diff --git a/scripts/modules/utils.js b/scripts/modules/utils.js index dd6f4eb6..9303ccf9 100644 --- a/scripts/modules/utils.js +++ b/scripts/modules/utils.js @@ -6,6 +6,7 @@ import fs from 'fs'; import path from 'path'; import chalk from 'chalk'; +import dotenv from 'dotenv'; // Import specific config getters needed here import { getLogLevel, getDebugFlag } from './config-manager.js'; @@ -14,16 +15,47 @@ let silentMode = false; // --- Environment Variable Resolution Utility --- /** - * Resolves an environment variable by checking process.env first, then session.env. - * @param {string} varName - The name of the environment variable. - * @param {string|null} session - The MCP session object (optional). + * Resolves an environment variable's value. + * Precedence: + * 1. session.env (if session provided) + * 2. process.env + * 3. .env file at projectRoot (if projectRoot provided) + * @param {string} key - The environment variable key. + * @param {object|null} [session=null] - The MCP session object. + * @param {string|null} [projectRoot=null] - The project root directory (for .env fallback). * @returns {string|undefined} The value of the environment variable or undefined if not found. */ -function resolveEnvVariable(varName, session) { - // Ensure session and session.env exist before attempting access - const sessionValue = - session && session.env ? session.env[varName] : undefined; - return process.env[varName] ?? sessionValue; +function resolveEnvVariable(key, session = null, projectRoot = null) { + // 1. Check session.env + if (session?.env?.[key]) { + return session.env[key]; + } + + // 2. Read .env file at projectRoot + if (projectRoot) { + const envPath = path.join(projectRoot, '.env'); + if (fs.existsSync(envPath)) { + try { + const envFileContent = fs.readFileSync(envPath, 'utf-8'); + const parsedEnv = dotenv.parse(envFileContent); // Use dotenv to parse + if (parsedEnv && parsedEnv[key]) { + // console.log(`DEBUG: Found key ${key} in ${envPath}`); // Optional debug log + return parsedEnv[key]; + } + } catch (error) { + // Log error but don't crash, just proceed as if key wasn't found in file + log('warn', `Could not read or parse ${envPath}: ${error.message}`); + } + } + } + + // 3. Fallback: Check process.env + if (process.env[key]) { + return process.env[key]; + } + + // Not found anywhere + return undefined; } // --- Project Root Finding Utility --- diff --git a/tasks/task_075.txt b/tasks/task_075.txt new file mode 100644 index 00000000..80b79ea5 --- /dev/null +++ b/tasks/task_075.txt @@ -0,0 +1,11 @@ +# Task ID: 75 +# Title: Integrate Google Search Grounding for Research Role +# Status: pending +# Dependencies: None +# Priority: medium +# Description: Update the AI service layer to enable Google Search Grounding specifically when a Google model is used in the 'research' role. +# Details: +**Goal:** Conditionally enable Google Search Grounding based on the AI role.\n\n**Implementation Plan:**\n\n1. **Modify `ai-services-unified.js`:** Update `generateTextService`, `streamTextService`, and `generateObjectService`.\n2. **Conditional Logic:** Inside these functions, check if `providerName === 'google'` AND `role === 'research'`.\n3. **Construct `providerOptions`:** If the condition is met, create an options object:\n ```javascript\n let providerSpecificOptions = {};\n if (providerName === 'google' && role === 'research') {\n log('info', 'Enabling Google Search Grounding for research role.');\n providerSpecificOptions = {\n google: {\n useSearchGrounding: true,\n // Optional: Add dynamic retrieval for compatible models\n // dynamicRetrievalConfig: { mode: 'MODE_DYNAMIC' } \n }\n };\n }\n ```\n4. **Pass Options to SDK:** Pass `providerSpecificOptions` to the Vercel AI SDK functions (`generateText`, `streamText`, `generateObject`) via the `providerOptions` parameter:\n ```javascript\n const { text, ... } = await generateText({\n // ... other params\n providerOptions: providerSpecificOptions \n });\n ```\n5. **Update `supported-models.json`:** Ensure Google models intended for research (e.g., `gemini-1.5-pro-latest`, `gemini-1.5-flash-latest`) include `'research'` in their `allowed_roles` array.\n\n**Rationale:** This approach maintains the clear separation between 'main' and 'research' roles, ensuring grounding is only activated when explicitly requested via the `--research` flag or when the research model is invoked. + +# Test Strategy: +1. Configure a Google model (e.g., gemini-1.5-flash-latest) as the 'research' model in `.taskmasterconfig`.\n2. Run a command with the `--research` flag (e.g., `task-master add-task --prompt='Latest news on AI SDK 4.2' --research`).\n3. Verify logs show 'Enabling Google Search Grounding'.\n4. Check if the task output incorporates recent information.\n5. Configure the same Google model as the 'main' model.\n6. Run a command *without* the `--research` flag.\n7. Verify logs *do not* show grounding being enabled.\n8. Add unit tests to `ai-services-unified.test.js` to verify the conditional logic for adding `providerOptions`. Ensure mocks correctly simulate different roles and providers. diff --git a/tasks/tasks.json b/tasks/tasks.json index 8352b1ec..d966c16a 100644 --- a/tasks/tasks.json +++ b/tasks/tasks.json @@ -3937,6 +3937,17 @@ "parentTaskId": 74 } ] + }, + { + "id": 75, + "title": "Integrate Google Search Grounding for Research Role", + "description": "Update the AI service layer to enable Google Search Grounding specifically when a Google model is used in the 'research' role.", + "details": "**Goal:** Conditionally enable Google Search Grounding based on the AI role.\\n\\n**Implementation Plan:**\\n\\n1. **Modify `ai-services-unified.js`:** Update `generateTextService`, `streamTextService`, and `generateObjectService`.\\n2. **Conditional Logic:** Inside these functions, check if `providerName === 'google'` AND `role === 'research'`.\\n3. **Construct `providerOptions`:** If the condition is met, create an options object:\\n ```javascript\\n let providerSpecificOptions = {};\\n if (providerName === 'google' && role === 'research') {\\n log('info', 'Enabling Google Search Grounding for research role.');\\n providerSpecificOptions = {\\n google: {\\n useSearchGrounding: true,\\n // Optional: Add dynamic retrieval for compatible models\\n // dynamicRetrievalConfig: { mode: 'MODE_DYNAMIC' } \\n }\\n };\\n }\\n ```\\n4. **Pass Options to SDK:** Pass `providerSpecificOptions` to the Vercel AI SDK functions (`generateText`, `streamText`, `generateObject`) via the `providerOptions` parameter:\\n ```javascript\\n const { text, ... } = await generateText({\\n // ... other params\\n providerOptions: providerSpecificOptions \\n });\\n ```\\n5. **Update `supported-models.json`:** Ensure Google models intended for research (e.g., `gemini-1.5-pro-latest`, `gemini-1.5-flash-latest`) include `'research'` in their `allowed_roles` array.\\n\\n**Rationale:** This approach maintains the clear separation between 'main' and 'research' roles, ensuring grounding is only activated when explicitly requested via the `--research` flag or when the research model is invoked.", + "testStrategy": "1. Configure a Google model (e.g., gemini-1.5-flash-latest) as the 'research' model in `.taskmasterconfig`.\\n2. Run a command with the `--research` flag (e.g., `task-master add-task --prompt='Latest news on AI SDK 4.2' --research`).\\n3. Verify logs show 'Enabling Google Search Grounding'.\\n4. Check if the task output incorporates recent information.\\n5. Configure the same Google model as the 'main' model.\\n6. Run a command *without* the `--research` flag.\\n7. Verify logs *do not* show grounding being enabled.\\n8. Add unit tests to `ai-services-unified.test.js` to verify the conditional logic for adding `providerOptions`. Ensure mocks correctly simulate different roles and providers.", + "status": "pending", + "dependencies": [], + "priority": "medium", + "subtasks": [] } ] } \ No newline at end of file