fix(ai-services): add logic for API key checking in fallback sequence
This commit is contained in:
5
.changeset/tall-mails-sing.md
Normal file
5
.changeset/tall-mails-sing.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
ensures that the second fallback which tries to call research if main and fallback both fail will correctly check if the api key is set before attempting to make the call. this closes #421 #519
|
||||
@@ -18,7 +18,8 @@ import {
|
||||
getUserId,
|
||||
MODEL_MAP,
|
||||
getDebugFlag,
|
||||
getBaseUrlForRole
|
||||
getBaseUrlForRole,
|
||||
isApiKeySet
|
||||
} from './config-manager.js';
|
||||
import { log, resolveEnvVariable, isSilentMode } from './utils.js';
|
||||
|
||||
@@ -322,11 +323,8 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
});
|
||||
}
|
||||
|
||||
// Determine the effective project root (passed in or detected if needed by config getters)
|
||||
const { findProjectRoot: detectProjectRoot } = await import('./utils.js'); // Dynamically import if needed
|
||||
const { findProjectRoot: detectProjectRoot } = await import('./utils.js');
|
||||
const effectiveProjectRoot = projectRoot || detectProjectRoot();
|
||||
|
||||
// Get userId from config - ensure effectiveProjectRoot is passed
|
||||
const userId = getUserId(effectiveProjectRoot);
|
||||
|
||||
let sequence;
|
||||
@@ -362,8 +360,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
try {
|
||||
log('info', `New AI service call with role: ${currentRole}`);
|
||||
|
||||
// 1. Get Config: Provider, Model, Parameters for the current role
|
||||
// Pass effectiveProjectRoot to config getters
|
||||
if (currentRole === 'main') {
|
||||
providerName = getMainProvider(effectiveProjectRoot);
|
||||
modelId = getMainModelId(effectiveProjectRoot);
|
||||
@@ -396,11 +392,24 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Pass effectiveProjectRoot to getParametersForRole
|
||||
// Check if API key is set for the current provider and role (excluding 'ollama')
|
||||
if (providerName?.toLowerCase() !== 'ollama') {
|
||||
if (!isApiKeySet(providerName, session, effectiveProjectRoot)) {
|
||||
log(
|
||||
'warn',
|
||||
`Skipping role '${currentRole}' (Provider: ${providerName}): API key not set or invalid.`
|
||||
);
|
||||
lastError =
|
||||
lastError ||
|
||||
new Error(
|
||||
`API key for provider '${providerName}' (role: ${currentRole}) is not set.`
|
||||
);
|
||||
continue; // Skip to the next role in the sequence
|
||||
}
|
||||
}
|
||||
|
||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
||||
|
||||
// 2. Get Provider Function Set
|
||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||
if (!providerFnSet) {
|
||||
log(
|
||||
@@ -413,7 +422,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use the original service type to get the function
|
||||
providerApiFn = providerFnSet[serviceType];
|
||||
if (typeof providerApiFn !== 'function') {
|
||||
log(
|
||||
@@ -427,16 +435,13 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 3. Resolve API Key (will throw if required and missing)
|
||||
// Pass effectiveProjectRoot to _resolveApiKey
|
||||
|
||||
apiKey = _resolveApiKey(
|
||||
providerName?.toLowerCase(),
|
||||
session,
|
||||
effectiveProjectRoot
|
||||
);
|
||||
|
||||
// 4. Construct Messages Array
|
||||
const messages = [];
|
||||
if (systemPrompt) {
|
||||
messages.push({ role: 'system', content: systemPrompt });
|
||||
@@ -461,14 +466,11 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
// }
|
||||
|
||||
if (prompt) {
|
||||
// Ensure prompt exists before adding
|
||||
messages.push({ role: 'user', content: prompt });
|
||||
} else {
|
||||
// Throw an error if the prompt is missing, as it's essential
|
||||
throw new Error('User prompt content is missing.');
|
||||
}
|
||||
|
||||
// 5. Prepare call parameters (using messages array)
|
||||
const callParams = {
|
||||
apiKey,
|
||||
modelId,
|
||||
@@ -480,7 +482,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
...restApiParams
|
||||
};
|
||||
|
||||
// 6. Attempt the call with retries
|
||||
providerResponse = await _attemptProviderCallWithRetries(
|
||||
providerApiFn,
|
||||
callParams,
|
||||
@@ -489,8 +490,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
currentRole
|
||||
);
|
||||
|
||||
// --- Log Telemetry & Capture Data ---
|
||||
// Use providerResponse which contains the usage data directly for text/object
|
||||
if (userId && providerResponse && providerResponse.usage) {
|
||||
try {
|
||||
telemetryData = await logAiUsage({
|
||||
@@ -512,26 +511,22 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
`Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data. (May be expected for streams)`
|
||||
);
|
||||
}
|
||||
// --- End Log Telemetry ---
|
||||
|
||||
// --- Extract the correct main result based on serviceType ---
|
||||
let finalMainResult;
|
||||
if (serviceType === 'generateText') {
|
||||
finalMainResult = providerResponse.text;
|
||||
} else if (serviceType === 'generateObject') {
|
||||
finalMainResult = providerResponse.object;
|
||||
} else if (serviceType === 'streamText') {
|
||||
finalMainResult = providerResponse; // Return the whole stream object
|
||||
finalMainResult = providerResponse;
|
||||
} else {
|
||||
log(
|
||||
'error',
|
||||
`Unknown serviceType in _unifiedServiceRunner: ${serviceType}`
|
||||
);
|
||||
finalMainResult = providerResponse; // Default to returning the whole object as fallback
|
||||
finalMainResult = providerResponse;
|
||||
}
|
||||
// --- End Main Result Extraction ---
|
||||
|
||||
// Return a composite object including the extracted main result and telemetry data
|
||||
return {
|
||||
mainResult: finalMainResult,
|
||||
telemetryData: telemetryData
|
||||
@@ -564,9 +559,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
}
|
||||
}
|
||||
|
||||
// If loop completes, all roles failed
|
||||
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
|
||||
// Throw a new error with the cleaner message from the last failure
|
||||
throw new Error(lastCleanErrorMessage);
|
||||
}
|
||||
|
||||
|
||||
23
tasks/task_089.txt
Normal file
23
tasks/task_089.txt
Normal file
@@ -0,0 +1,23 @@
|
||||
# Task ID: 89
|
||||
# Title: Task #89: Introduce Prioritize Command with Enhanced Priority Levels
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Implement a prioritize command with --up/--down/--priority/--id flags and shorthand equivalents (-u/-d/-p/-i). Add 'lowest' and 'highest' priority levels, updating CLI output accordingly.
|
||||
# Details:
|
||||
The new prioritize command should allow users to adjust task priorities using the specified flags. The --up and --down flags will modify the priority relative to the current level, while --priority sets an absolute priority. The --id flag specifies which task to prioritize. Shorthand equivalents (-u/-d/-p/-i) should be supported for user convenience.
|
||||
|
||||
The priority levels should now include 'lowest', 'low', 'medium', 'high', and 'highest'. The CLI output should be updated to reflect these new priority levels accurately.
|
||||
|
||||
Considerations:
|
||||
- Ensure backward compatibility with existing commands and configurations.
|
||||
- Update the help documentation to include the new command and its usage.
|
||||
- Implement proper error handling for invalid priority levels or missing flags.
|
||||
|
||||
# Test Strategy:
|
||||
To verify task completion, perform the following tests:
|
||||
1. Test each flag (--up, --down, --priority, --id) individually and in combination to ensure they function as expected.
|
||||
2. Verify that shorthand equivalents (-u, -d, -p, -i) work correctly.
|
||||
3. Check that the new priority levels ('lowest' and 'highest') are recognized and displayed properly in CLI output.
|
||||
4. Test error handling for invalid inputs (e.g., non-existent task IDs, invalid priority levels).
|
||||
5. Ensure that the help command displays accurate information about the new prioritize command.
|
||||
@@ -5302,6 +5302,17 @@
|
||||
"testStrategy": "Test with large sets of tasks to verify performance improvements."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 89,
|
||||
"title": "Task #89: Introduce Prioritize Command with Enhanced Priority Levels",
|
||||
"description": "Implement a prioritize command with --up/--down/--priority/--id flags and shorthand equivalents (-u/-d/-p/-i). Add 'lowest' and 'highest' priority levels, updating CLI output accordingly.",
|
||||
"details": "The new prioritize command should allow users to adjust task priorities using the specified flags. The --up and --down flags will modify the priority relative to the current level, while --priority sets an absolute priority. The --id flag specifies which task to prioritize. Shorthand equivalents (-u/-d/-p/-i) should be supported for user convenience.\n\nThe priority levels should now include 'lowest', 'low', 'medium', 'high', and 'highest'. The CLI output should be updated to reflect these new priority levels accurately.\n\nConsiderations:\n- Ensure backward compatibility with existing commands and configurations.\n- Update the help documentation to include the new command and its usage.\n- Implement proper error handling for invalid priority levels or missing flags.",
|
||||
"testStrategy": "To verify task completion, perform the following tests:\n1. Test each flag (--up, --down, --priority, --id) individually and in combination to ensure they function as expected.\n2. Verify that shorthand equivalents (-u, -d, -p, -i) work correctly.\n3. Check that the new priority levels ('lowest' and 'highest') are recognized and displayed properly in CLI output.\n4. Test error handling for invalid inputs (e.g., non-existent task IDs, invalid priority levels).\n5. Ensure that the help command displays accurate information about the new prioritize command.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"subtasks": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -10,6 +10,7 @@ const mockGetFallbackModelId = jest.fn();
|
||||
const mockGetParametersForRole = jest.fn();
|
||||
const mockGetUserId = jest.fn();
|
||||
const mockGetDebugFlag = jest.fn();
|
||||
const mockIsApiKeySet = jest.fn();
|
||||
|
||||
// --- Mock MODEL_MAP Data ---
|
||||
// Provide a simplified structure sufficient for cost calculation tests
|
||||
@@ -29,6 +30,12 @@ const mockModelMap = {
|
||||
id: 'test-research-model',
|
||||
cost_per_1m_tokens: { input: 1, output: 1, currency: 'USD' }
|
||||
}
|
||||
],
|
||||
openai: [
|
||||
{
|
||||
id: 'test-openai-model',
|
||||
cost_per_1m_tokens: { input: 2, output: 6, currency: 'USD' }
|
||||
}
|
||||
]
|
||||
// Add other providers/models if needed for specific tests
|
||||
};
|
||||
@@ -45,7 +52,8 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||
getUserId: mockGetUserId,
|
||||
getDebugFlag: mockGetDebugFlag,
|
||||
MODEL_MAP: mockModelMap,
|
||||
getBaseUrlForRole: mockGetBaseUrlForRole
|
||||
getBaseUrlForRole: mockGetBaseUrlForRole,
|
||||
isApiKeySet: mockIsApiKeySet
|
||||
}));
|
||||
|
||||
// Mock AI Provider Modules
|
||||
@@ -67,7 +75,24 @@ jest.unstable_mockModule('../../src/ai-providers/perplexity.js', () => ({
|
||||
generatePerplexityObject: mockGeneratePerplexityObject
|
||||
}));
|
||||
|
||||
// ... Mock other providers (google, openai, etc.) similarly ...
|
||||
const mockGenerateOpenAIText = jest.fn();
|
||||
const mockStreamOpenAIText = jest.fn();
|
||||
const mockGenerateOpenAIObject = jest.fn();
|
||||
jest.unstable_mockModule('../../src/ai-providers/openai.js', () => ({
|
||||
generateOpenAIText: mockGenerateOpenAIText,
|
||||
streamOpenAIText: mockStreamOpenAIText,
|
||||
generateOpenAIObject: mockGenerateOpenAIObject
|
||||
}));
|
||||
|
||||
// Mock ollama provider (for special case testing - API key is optional)
|
||||
const mockGenerateOllamaText = jest.fn();
|
||||
const mockStreamOllamaText = jest.fn();
|
||||
const mockGenerateOllamaObject = jest.fn();
|
||||
jest.unstable_mockModule('../../src/ai-providers/ollama.js', () => ({
|
||||
generateOllamaText: mockGenerateOllamaText,
|
||||
streamOllamaText: mockStreamOllamaText,
|
||||
generateOllamaObject: mockGenerateOllamaObject
|
||||
}));
|
||||
|
||||
// Mock utils logger, API key resolver, AND findProjectRoot
|
||||
const mockLog = jest.fn();
|
||||
@@ -112,6 +137,8 @@ describe('Unified AI Services', () => {
|
||||
mockResolveEnvVariable.mockImplementation((key) => {
|
||||
if (key === 'ANTHROPIC_API_KEY') return 'mock-anthropic-key';
|
||||
if (key === 'PERPLEXITY_API_KEY') return 'mock-perplexity-key';
|
||||
if (key === 'OPENAI_API_KEY') return 'mock-openai-key';
|
||||
if (key === 'OLLAMA_API_KEY') return 'mock-ollama-key';
|
||||
return null;
|
||||
});
|
||||
|
||||
@@ -119,6 +146,7 @@ describe('Unified AI Services', () => {
|
||||
mockFindProjectRoot.mockReturnValue(fakeProjectRoot);
|
||||
mockGetDebugFlag.mockReturnValue(false);
|
||||
mockGetUserId.mockReturnValue('test-user-id'); // Add default mock for getUserId
|
||||
mockIsApiKeySet.mockReturnValue(true); // Default to true for most tests
|
||||
});
|
||||
|
||||
describe('generateTextService', () => {
|
||||
@@ -333,13 +361,218 @@ describe('Unified AI Services', () => {
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
// Add more tests for edge cases:
|
||||
// - Missing API keys (should throw from _resolveApiKey)
|
||||
// - Unsupported provider configured (should skip and log)
|
||||
// - Missing provider/model config for a role (should skip and log)
|
||||
// - Missing prompt
|
||||
// - Different initial roles (research, fallback)
|
||||
// - generateObjectService (mock schema, check object result)
|
||||
// - streamTextService (more complex to test, might need stream helpers)
|
||||
// New tests for API key checking and fallback sequence
|
||||
// These tests verify that:
|
||||
// 1. The system checks if API keys are set before trying to use a provider
|
||||
// 2. If a provider's API key is missing, it skips to the next provider in the fallback sequence
|
||||
// 3. The system throws an appropriate error if all providers' API keys are missing
|
||||
// 4. Ollama is a special case where API key is optional and not checked
|
||||
// 5. Session context is correctly used for API key checks
|
||||
|
||||
test('should skip provider with missing API key and try next in fallback sequence', async () => {
|
||||
// Setup isApiKeySet to return false for anthropic but true for perplexity
|
||||
mockIsApiKeySet
|
||||
.mockImplementation((provider, session, root) => {
|
||||
if (provider === 'anthropic') return false; // Main provider has no key
|
||||
return true; // Other providers have keys
|
||||
});
|
||||
|
||||
// Mock perplexity text response (since we'll skip anthropic)
|
||||
mockGeneratePerplexityText.mockResolvedValue({
|
||||
text: 'Perplexity response (skipped to research)',
|
||||
usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
|
||||
});
|
||||
|
||||
const params = {
|
||||
role: 'main',
|
||||
prompt: 'Skip main provider test',
|
||||
session: { env: {} }
|
||||
};
|
||||
|
||||
const result = await generateTextService(params);
|
||||
|
||||
// Should have gotten the perplexity response
|
||||
expect(result.mainResult).toBe('Perplexity response (skipped to research)');
|
||||
|
||||
// Should check API keys
|
||||
expect(mockIsApiKeySet).toHaveBeenCalledWith('anthropic', params.session, fakeProjectRoot);
|
||||
expect(mockIsApiKeySet).toHaveBeenCalledWith('perplexity', params.session, fakeProjectRoot);
|
||||
|
||||
// Should log a warning
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'warn',
|
||||
expect.stringContaining(`Skipping role 'main' (Provider: anthropic): API key not set or invalid.`)
|
||||
);
|
||||
|
||||
// Should NOT call anthropic provider
|
||||
expect(mockGenerateAnthropicText).not.toHaveBeenCalled();
|
||||
|
||||
// Should call perplexity provider
|
||||
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('should skip multiple providers with missing API keys and use first available', async () => {
|
||||
// Setup: Main and fallback providers have no keys, only research has a key
|
||||
mockIsApiKeySet
|
||||
.mockImplementation((provider, session, root) => {
|
||||
if (provider === 'anthropic') return false; // Main and fallback are both anthropic
|
||||
if (provider === 'perplexity') return true; // Research has a key
|
||||
return false;
|
||||
});
|
||||
|
||||
// Define different providers for testing multiple skips
|
||||
mockGetFallbackProvider.mockReturnValue('openai'); // Different from main
|
||||
mockGetFallbackModelId.mockReturnValue('test-openai-model');
|
||||
|
||||
// Mock isApiKeySet to return false for both main and fallback
|
||||
mockIsApiKeySet
|
||||
.mockImplementation((provider, session, root) => {
|
||||
if (provider === 'anthropic') return false; // Main provider has no key
|
||||
if (provider === 'openai') return false; // Fallback provider has no key
|
||||
return true; // Research provider has a key
|
||||
});
|
||||
|
||||
// Mock perplexity text response (since we'll skip to research)
|
||||
mockGeneratePerplexityText.mockResolvedValue({
|
||||
text: 'Research response after skipping main and fallback',
|
||||
usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
|
||||
});
|
||||
|
||||
const params = {
|
||||
role: 'main',
|
||||
prompt: 'Skip multiple providers test',
|
||||
session: { env: {} }
|
||||
};
|
||||
|
||||
const result = await generateTextService(params);
|
||||
|
||||
// Should have gotten the perplexity (research) response
|
||||
expect(result.mainResult).toBe('Research response after skipping main and fallback');
|
||||
|
||||
// Should check API keys for all three roles
|
||||
expect(mockIsApiKeySet).toHaveBeenCalledWith('anthropic', params.session, fakeProjectRoot);
|
||||
expect(mockIsApiKeySet).toHaveBeenCalledWith('openai', params.session, fakeProjectRoot);
|
||||
expect(mockIsApiKeySet).toHaveBeenCalledWith('perplexity', params.session, fakeProjectRoot);
|
||||
|
||||
// Should log warnings for both skipped providers
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'warn',
|
||||
expect.stringContaining(`Skipping role 'main' (Provider: anthropic): API key not set or invalid.`)
|
||||
);
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'warn',
|
||||
expect.stringContaining(`Skipping role 'fallback' (Provider: openai): API key not set or invalid.`)
|
||||
);
|
||||
|
||||
// Should NOT call skipped providers
|
||||
expect(mockGenerateAnthropicText).not.toHaveBeenCalled();
|
||||
expect(mockGenerateOpenAIText).not.toHaveBeenCalled();
|
||||
|
||||
// Should call perplexity provider
|
||||
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('should throw error if all providers in sequence have missing API keys', async () => {
|
||||
// Mock all providers to have missing API keys
|
||||
mockIsApiKeySet.mockReturnValue(false);
|
||||
|
||||
const params = {
|
||||
role: 'main',
|
||||
prompt: 'All API keys missing test',
|
||||
session: { env: {} }
|
||||
};
|
||||
|
||||
// Should throw error since all providers would be skipped
|
||||
await expect(generateTextService(params)).rejects.toThrow(
|
||||
'AI service call failed for all configured roles'
|
||||
);
|
||||
|
||||
// Should log warnings for all skipped providers
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'warn',
|
||||
expect.stringContaining(`Skipping role 'main' (Provider: anthropic): API key not set or invalid.`)
|
||||
);
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'warn',
|
||||
expect.stringContaining(`Skipping role 'fallback' (Provider: anthropic): API key not set or invalid.`)
|
||||
);
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'warn',
|
||||
expect.stringContaining(`Skipping role 'research' (Provider: perplexity): API key not set or invalid.`)
|
||||
);
|
||||
|
||||
// Should log final error
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'error',
|
||||
expect.stringContaining('All roles in the sequence [main, fallback, research] failed.')
|
||||
);
|
||||
|
||||
// Should NOT call any providers
|
||||
expect(mockGenerateAnthropicText).not.toHaveBeenCalled();
|
||||
expect(mockGeneratePerplexityText).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should not check API key for Ollama provider and try to use it', async () => {
|
||||
// Setup: Set main provider to ollama
|
||||
mockGetMainProvider.mockReturnValue('ollama');
|
||||
mockGetMainModelId.mockReturnValue('llama3');
|
||||
|
||||
// Mock Ollama text generation to succeed
|
||||
mockGenerateOllamaText.mockResolvedValue({
|
||||
text: 'Ollama response (no API key required)',
|
||||
usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 }
|
||||
});
|
||||
|
||||
const params = {
|
||||
role: 'main',
|
||||
prompt: 'Ollama special case test',
|
||||
session: { env: {} }
|
||||
};
|
||||
|
||||
const result = await generateTextService(params);
|
||||
|
||||
// Should have gotten the Ollama response
|
||||
expect(result.mainResult).toBe('Ollama response (no API key required)');
|
||||
|
||||
// isApiKeySet shouldn't be called for Ollama
|
||||
// Note: This is indirect - the code just doesn't check isApiKeySet for ollama
|
||||
// so we're verifying ollama provider was called despite isApiKeySet being mocked to false
|
||||
mockIsApiKeySet.mockReturnValue(false); // Should be ignored for Ollama
|
||||
|
||||
// Should call Ollama provider
|
||||
expect(mockGenerateOllamaText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('should correctly use the provided session for API key check', async () => {
|
||||
// Mock custom session object with env vars
|
||||
const customSession = { env: { ANTHROPIC_API_KEY: 'session-api-key' } };
|
||||
|
||||
// Setup API key check to verify the session is passed correctly
|
||||
mockIsApiKeySet
|
||||
.mockImplementation((provider, session, root) => {
|
||||
// Only return true if the correct session was provided
|
||||
return session === customSession;
|
||||
});
|
||||
|
||||
// Mock the anthropic response
|
||||
mockGenerateAnthropicText.mockResolvedValue({
|
||||
text: 'Anthropic response with session key',
|
||||
usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 }
|
||||
});
|
||||
|
||||
const params = {
|
||||
role: 'main',
|
||||
prompt: 'Session API key test',
|
||||
session: customSession
|
||||
};
|
||||
|
||||
const result = await generateTextService(params);
|
||||
|
||||
// Should check API key with the custom session
|
||||
expect(mockIsApiKeySet).toHaveBeenCalledWith('anthropic', customSession, fakeProjectRoot);
|
||||
|
||||
// Should have gotten the anthropic response
|
||||
expect(result.mainResult).toBe('Anthropic response with session key');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
759
tests/unit/config-manager.test.mjs
Normal file
759
tests/unit/config-manager.test.mjs
Normal file
@@ -0,0 +1,759 @@
|
||||
// @ts-check
|
||||
/**
|
||||
* Module to test the config-manager.js functionality
|
||||
* This file uses ES module syntax (.mjs) to properly handle imports
|
||||
*/
|
||||
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { jest } from '@jest/globals';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { sampleTasks } from '../fixtures/sample-tasks.js';
|
||||
|
||||
// Disable chalk's color detection which can cause fs.readFileSync calls
|
||||
process.env.FORCE_COLOR = '0';
|
||||
|
||||
// --- Read REAL supported-models.json data BEFORE mocks ---
|
||||
const __filename = fileURLToPath(import.meta.url); // Get current file path
|
||||
const __dirname = path.dirname(__filename); // Get current directory
|
||||
const realSupportedModelsPath = path.resolve(
|
||||
__dirname,
|
||||
'../../scripts/modules/supported-models.json'
|
||||
);
|
||||
let REAL_SUPPORTED_MODELS_CONTENT;
|
||||
let REAL_SUPPORTED_MODELS_DATA;
|
||||
try {
|
||||
REAL_SUPPORTED_MODELS_CONTENT = fs.readFileSync(
|
||||
realSupportedModelsPath,
|
||||
'utf-8'
|
||||
);
|
||||
REAL_SUPPORTED_MODELS_DATA = JSON.parse(REAL_SUPPORTED_MODELS_CONTENT);
|
||||
} catch (err) {
|
||||
console.error(
|
||||
'FATAL TEST SETUP ERROR: Could not read or parse real supported-models.json',
|
||||
err
|
||||
);
|
||||
REAL_SUPPORTED_MODELS_CONTENT = '{}'; // Default to empty object on error
|
||||
REAL_SUPPORTED_MODELS_DATA = {};
|
||||
process.exit(1); // Exit if essential test data can't be loaded
|
||||
}
|
||||
|
||||
// --- Define Mock Function Instances ---
|
||||
const mockFindProjectRoot = jest.fn();
|
||||
const mockLog = jest.fn();
|
||||
const mockResolveEnvVariable = jest.fn();
|
||||
|
||||
// --- Mock fs functions directly instead of the whole module ---
|
||||
const mockExistsSync = jest.fn();
|
||||
const mockReadFileSync = jest.fn();
|
||||
const mockWriteFileSync = jest.fn();
|
||||
|
||||
// Instead of mocking the entire fs module, mock just the functions we need
|
||||
fs.existsSync = mockExistsSync;
|
||||
fs.readFileSync = mockReadFileSync;
|
||||
fs.writeFileSync = mockWriteFileSync;
|
||||
|
||||
// --- Test Data (Keep as is, ensure DEFAULT_CONFIG is accurate) ---
|
||||
const MOCK_PROJECT_ROOT = '/mock/project';
|
||||
const MOCK_CONFIG_PATH = path.join(MOCK_PROJECT_ROOT, '.taskmasterconfig');
|
||||
|
||||
// Updated DEFAULT_CONFIG reflecting the implementation
|
||||
const DEFAULT_CONFIG = {
|
||||
models: {
|
||||
main: {
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
temperature: 0.2
|
||||
},
|
||||
research: {
|
||||
provider: 'perplexity',
|
||||
modelId: 'sonar-pro',
|
||||
maxTokens: 8700,
|
||||
temperature: 0.1
|
||||
},
|
||||
fallback: {
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-5-sonnet',
|
||||
maxTokens: 64000,
|
||||
temperature: 0.2
|
||||
}
|
||||
},
|
||||
global: {
|
||||
logLevel: 'info',
|
||||
debug: false,
|
||||
defaultSubtasks: 5,
|
||||
defaultPriority: 'medium',
|
||||
projectName: 'Task Master',
|
||||
ollamaBaseUrl: 'http://localhost:11434/api'
|
||||
}
|
||||
};
|
||||
|
||||
// Other test data (VALID_CUSTOM_CONFIG, PARTIAL_CONFIG, INVALID_PROVIDER_CONFIG)
|
||||
const VALID_CUSTOM_CONFIG = {
|
||||
models: {
|
||||
main: {
|
||||
provider: 'openai',
|
||||
modelId: 'gpt-4o',
|
||||
maxTokens: 4096,
|
||||
temperature: 0.5
|
||||
},
|
||||
research: {
|
||||
provider: 'google',
|
||||
modelId: 'gemini-1.5-pro-latest',
|
||||
maxTokens: 8192,
|
||||
temperature: 0.3
|
||||
},
|
||||
fallback: {
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-opus-20240229',
|
||||
maxTokens: 100000,
|
||||
temperature: 0.4
|
||||
}
|
||||
},
|
||||
global: {
|
||||
logLevel: 'debug',
|
||||
defaultPriority: 'high',
|
||||
projectName: 'My Custom Project'
|
||||
}
|
||||
};
|
||||
|
||||
const PARTIAL_CONFIG = {
|
||||
models: {
|
||||
main: { provider: 'openai', modelId: 'gpt-4-turbo' }
|
||||
},
|
||||
global: {
|
||||
projectName: 'Partial Project'
|
||||
}
|
||||
};
|
||||
|
||||
const INVALID_PROVIDER_CONFIG = {
|
||||
models: {
|
||||
main: { provider: 'invalid-provider', modelId: 'some-model' },
|
||||
research: {
|
||||
provider: 'perplexity',
|
||||
modelId: 'llama-3-sonar-large-32k-online'
|
||||
}
|
||||
},
|
||||
global: {
|
||||
logLevel: 'warn'
|
||||
}
|
||||
};
|
||||
|
||||
// Define spies globally to be restored in afterAll
|
||||
let consoleErrorSpy;
|
||||
let consoleWarnSpy;
|
||||
|
||||
beforeAll(() => {
|
||||
// Set up console spies
|
||||
consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
|
||||
consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
// Restore all spies
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('Config Manager Module', () => {
|
||||
// Declare variables for imported module
|
||||
let configManager;
|
||||
|
||||
// Reset mocks before each test for isolation
|
||||
beforeEach(async () => {
|
||||
// Clear all mock calls and reset implementations between tests
|
||||
jest.clearAllMocks();
|
||||
// Reset the external mock instances for utils
|
||||
mockFindProjectRoot.mockReset();
|
||||
mockLog.mockReset();
|
||||
mockResolveEnvVariable.mockReset();
|
||||
mockExistsSync.mockReset();
|
||||
mockReadFileSync.mockReset();
|
||||
mockWriteFileSync.mockReset();
|
||||
|
||||
// --- Mock Dependencies BEFORE importing the module under test ---
|
||||
// Mock the 'utils.js' module using doMock (applied at runtime)
|
||||
jest.doMock('../../scripts/modules/utils.js', () => ({
|
||||
__esModule: true, // Indicate it's an ES module mock
|
||||
findProjectRoot: mockFindProjectRoot, // Use the mock function instance
|
||||
log: mockLog, // Use the mock function instance
|
||||
resolveEnvVariable: mockResolveEnvVariable // Use the mock function instance
|
||||
}));
|
||||
|
||||
// Dynamically import the module under test AFTER mocking dependencies
|
||||
configManager = await import('../../scripts/modules/config-manager.js');
|
||||
|
||||
// --- Default Mock Implementations ---
|
||||
mockFindProjectRoot.mockReturnValue(MOCK_PROJECT_ROOT); // Default for utils.findProjectRoot
|
||||
mockExistsSync.mockReturnValue(true); // Assume files exist by default
|
||||
|
||||
// Default readFileSync: Return REAL models content, mocked config, or throw error
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
const baseName = path.basename(filePath);
|
||||
if (baseName === 'supported-models.json') {
|
||||
// Return the REAL file content stringified
|
||||
return REAL_SUPPORTED_MODELS_CONTENT;
|
||||
} else if (filePath === MOCK_CONFIG_PATH) {
|
||||
// Still mock the .taskmasterconfig reads
|
||||
return JSON.stringify(DEFAULT_CONFIG); // Default behavior
|
||||
}
|
||||
// Throw for unexpected reads - helps catch errors
|
||||
throw new Error(`Unexpected fs.readFileSync call in test: ${filePath}`);
|
||||
});
|
||||
|
||||
// Default writeFileSync: Do nothing, just allow calls
|
||||
mockWriteFileSync.mockImplementation(() => {});
|
||||
});
|
||||
|
||||
// --- Validation Functions ---
|
||||
describe('Validation Functions', () => {
|
||||
// Tests for validateProvider and validateProviderModelCombination
|
||||
test('validateProvider should return true for valid providers', () => {
|
||||
expect(configManager.validateProvider('openai')).toBe(true);
|
||||
expect(configManager.validateProvider('anthropic')).toBe(true);
|
||||
expect(configManager.validateProvider('google')).toBe(true);
|
||||
expect(configManager.validateProvider('perplexity')).toBe(true);
|
||||
expect(configManager.validateProvider('ollama')).toBe(true);
|
||||
expect(configManager.validateProvider('openrouter')).toBe(true);
|
||||
});
|
||||
|
||||
test('validateProvider should return false for invalid providers', () => {
|
||||
expect(configManager.validateProvider('invalid-provider')).toBe(false);
|
||||
expect(configManager.validateProvider('grok')).toBe(false); // Not in mock map
|
||||
expect(configManager.validateProvider('')).toBe(false);
|
||||
expect(configManager.validateProvider(null)).toBe(false);
|
||||
});
|
||||
|
||||
test('validateProviderModelCombination should validate known good combinations', () => {
|
||||
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
expect(
|
||||
configManager.validateProviderModelCombination('openai', 'gpt-4o')
|
||||
).toBe(true);
|
||||
expect(
|
||||
configManager.validateProviderModelCombination(
|
||||
'anthropic',
|
||||
'claude-3-5-sonnet-20241022'
|
||||
)
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
test('validateProviderModelCombination should return false for known bad combinations', () => {
|
||||
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
expect(
|
||||
configManager.validateProviderModelCombination(
|
||||
'openai',
|
||||
'claude-3-opus-20240229'
|
||||
)
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
test('validateProviderModelCombination should return true for ollama/openrouter (empty lists in map)', () => {
|
||||
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
expect(
|
||||
configManager.validateProviderModelCombination('ollama', 'any-model')
|
||||
).toBe(false);
|
||||
expect(
|
||||
configManager.validateProviderModelCombination('openrouter', 'any/model')
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
test('validateProviderModelCombination should return true for providers not in map', () => {
|
||||
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
// The implementation returns true if the provider isn't in the map
|
||||
expect(
|
||||
configManager.validateProviderModelCombination(
|
||||
'unknown-provider',
|
||||
'some-model'
|
||||
)
|
||||
).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// --- getConfig Tests ---
|
||||
describe('getConfig Tests', () => {
|
||||
test('should return default config if .taskmasterconfig does not exist', () => {
|
||||
// Arrange
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
// findProjectRoot mock is set in beforeEach
|
||||
|
||||
// Act: Call getConfig with explicit root
|
||||
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload
|
||||
|
||||
// Assert
|
||||
expect(config).toEqual(DEFAULT_CONFIG);
|
||||
expect(mockFindProjectRoot).not.toHaveBeenCalled(); // Explicit root provided
|
||||
expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
||||
expect(mockReadFileSync).not.toHaveBeenCalled(); // No read if file doesn't exist
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('not found at provided project root')
|
||||
);
|
||||
});
|
||||
|
||||
test.skip('should use findProjectRoot and return defaults if file not found', () => {
|
||||
// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
|
||||
// Arrange
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
// findProjectRoot mock is set in beforeEach
|
||||
|
||||
// Act: Call getConfig without explicit root
|
||||
const config = configManager.getConfig(null, true); // Force reload
|
||||
|
||||
// Assert
|
||||
expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
|
||||
expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
||||
expect(config).toEqual(DEFAULT_CONFIG);
|
||||
expect(mockReadFileSync).not.toHaveBeenCalled();
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('not found at derived root')
|
||||
); // Adjusted expected warning
|
||||
});
|
||||
|
||||
test('should read and merge valid config file with defaults', () => {
|
||||
// Arrange: Override readFileSync for this test
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH)
|
||||
return JSON.stringify(VALID_CUSTOM_CONFIG);
|
||||
if (path.basename(filePath) === 'supported-models.json') {
|
||||
// Provide necessary models for validation within getConfig
|
||||
return JSON.stringify({
|
||||
openai: [{ id: 'gpt-4o' }],
|
||||
google: [{ id: 'gemini-1.5-pro-latest' }],
|
||||
perplexity: [{ id: 'sonar-pro' }],
|
||||
anthropic: [
|
||||
{ id: 'claude-3-opus-20240229' },
|
||||
{ id: 'claude-3-5-sonnet' },
|
||||
{ id: 'claude-3-7-sonnet-20250219' },
|
||||
{ id: 'claude-3-5-sonnet' }
|
||||
],
|
||||
ollama: [],
|
||||
openrouter: []
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload
|
||||
|
||||
// Assert: Construct expected merged config
|
||||
const expectedMergedConfig = {
|
||||
models: {
|
||||
main: {
|
||||
...DEFAULT_CONFIG.models.main,
|
||||
...VALID_CUSTOM_CONFIG.models.main
|
||||
},
|
||||
research: {
|
||||
...DEFAULT_CONFIG.models.research,
|
||||
...VALID_CUSTOM_CONFIG.models.research
|
||||
},
|
||||
fallback: {
|
||||
...DEFAULT_CONFIG.models.fallback,
|
||||
...VALID_CUSTOM_CONFIG.models.fallback
|
||||
}
|
||||
},
|
||||
global: { ...DEFAULT_CONFIG.global, ...VALID_CUSTOM_CONFIG.global }
|
||||
};
|
||||
expect(config).toEqual(expectedMergedConfig);
|
||||
expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
||||
expect(mockReadFileSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
|
||||
});
|
||||
|
||||
test('should merge defaults for partial config file', () => {
|
||||
// Arrange
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(PARTIAL_CONFIG);
|
||||
if (path.basename(filePath) === 'supported-models.json') {
|
||||
return JSON.stringify({
|
||||
openai: [{ id: 'gpt-4-turbo' }],
|
||||
perplexity: [{ id: 'sonar-pro' }],
|
||||
anthropic: [
|
||||
{ id: 'claude-3-7-sonnet-20250219' },
|
||||
{ id: 'claude-3-5-sonnet' }
|
||||
],
|
||||
ollama: [],
|
||||
openrouter: []
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Assert: Construct expected merged config
|
||||
const expectedMergedConfig = {
|
||||
models: {
|
||||
main: { ...DEFAULT_CONFIG.models.main, ...PARTIAL_CONFIG.models.main },
|
||||
research: { ...DEFAULT_CONFIG.models.research },
|
||||
fallback: { ...DEFAULT_CONFIG.models.fallback }
|
||||
},
|
||||
global: { ...DEFAULT_CONFIG.global, ...PARTIAL_CONFIG.global }
|
||||
};
|
||||
expect(config).toEqual(expectedMergedConfig);
|
||||
expect(mockReadFileSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
|
||||
});
|
||||
|
||||
test('should handle JSON parsing error and return defaults', () => {
|
||||
// Arrange
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH) return 'invalid json';
|
||||
// Mock models read needed for initial load before parse error
|
||||
if (path.basename(filePath) === 'supported-models.json') {
|
||||
return JSON.stringify({
|
||||
anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
|
||||
perplexity: [{ id: 'sonar-pro' }],
|
||||
fallback: [{ id: 'claude-3-5-sonnet' }],
|
||||
ollama: [],
|
||||
openrouter: []
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Assert
|
||||
expect(config).toEqual(DEFAULT_CONFIG);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Error reading or parsing')
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle file read error and return defaults', () => {
|
||||
// Arrange
|
||||
const readError = new Error('Permission denied');
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH) throw readError;
|
||||
// Mock models read needed for initial load before read error
|
||||
if (path.basename(filePath) === 'supported-models.json') {
|
||||
return JSON.stringify({
|
||||
anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
|
||||
perplexity: [{ id: 'sonar-pro' }],
|
||||
fallback: [{ id: 'claude-3-5-sonnet' }],
|
||||
ollama: [],
|
||||
openrouter: []
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Assert
|
||||
expect(config).toEqual(DEFAULT_CONFIG);
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(`Permission denied. Using default configuration.`)
|
||||
);
|
||||
});
|
||||
|
||||
test('should validate provider and fallback to default if invalid', () => {
|
||||
// Arrange
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH)
|
||||
return JSON.stringify(INVALID_PROVIDER_CONFIG);
|
||||
if (path.basename(filePath) === 'supported-models.json') {
|
||||
return JSON.stringify({
|
||||
perplexity: [{ id: 'llama-3-sonar-large-32k-online' }],
|
||||
anthropic: [
|
||||
{ id: 'claude-3-7-sonnet-20250219' },
|
||||
{ id: 'claude-3-5-sonnet' }
|
||||
],
|
||||
ollama: [],
|
||||
openrouter: []
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Assert
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'Warning: Invalid main provider "invalid-provider"'
|
||||
)
|
||||
);
|
||||
const expectedMergedConfig = {
|
||||
models: {
|
||||
main: { ...DEFAULT_CONFIG.models.main },
|
||||
research: {
|
||||
...DEFAULT_CONFIG.models.research,
|
||||
...INVALID_PROVIDER_CONFIG.models.research
|
||||
},
|
||||
fallback: { ...DEFAULT_CONFIG.models.fallback }
|
||||
},
|
||||
global: { ...DEFAULT_CONFIG.global, ...INVALID_PROVIDER_CONFIG.global }
|
||||
};
|
||||
expect(config).toEqual(expectedMergedConfig);
|
||||
});
|
||||
});
|
||||
|
||||
// --- writeConfig Tests ---
|
||||
describe('writeConfig', () => {
|
||||
test('should write valid config to file', () => {
|
||||
// Arrange (Default mocks are sufficient)
|
||||
// findProjectRoot mock set in beforeEach
|
||||
mockWriteFileSync.mockImplementation(() => {}); // Ensure it doesn't throw
|
||||
|
||||
// Act
|
||||
const success = configManager.writeConfig(
|
||||
VALID_CUSTOM_CONFIG,
|
||||
MOCK_PROJECT_ROOT
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(success).toBe(true);
|
||||
expect(mockWriteFileSync).toHaveBeenCalledWith(
|
||||
MOCK_CONFIG_PATH,
|
||||
JSON.stringify(VALID_CUSTOM_CONFIG, null, 2) // writeConfig stringifies
|
||||
);
|
||||
expect(consoleErrorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should return false and log error if write fails', () => {
|
||||
// Arrange
|
||||
const mockWriteError = new Error('Disk full');
|
||||
mockWriteFileSync.mockImplementation(() => {
|
||||
throw mockWriteError;
|
||||
});
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const success = configManager.writeConfig(
|
||||
VALID_CUSTOM_CONFIG,
|
||||
MOCK_PROJECT_ROOT
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(success).toBe(false);
|
||||
expect(mockWriteFileSync).toHaveBeenCalled();
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(`Disk full`)
|
||||
);
|
||||
});
|
||||
|
||||
test.skip('should return false if project root cannot be determined', () => {
|
||||
// TODO: Fix mock interaction or function logic, returns true unexpectedly in test
|
||||
// Arrange: Override mock for this specific test
|
||||
mockFindProjectRoot.mockReturnValue(null);
|
||||
|
||||
// Act: Call without explicit root
|
||||
const success = configManager.writeConfig(VALID_CUSTOM_CONFIG);
|
||||
|
||||
// Assert
|
||||
expect(success).toBe(false); // Function should return false if root is null
|
||||
expect(mockFindProjectRoot).toHaveBeenCalled();
|
||||
expect(mockWriteFileSync).not.toHaveBeenCalled();
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Could not determine project root')
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// --- Getter Functions ---
|
||||
describe('Getter Functions', () => {
|
||||
test('getMainProvider should return provider from config', () => {
|
||||
// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH)
|
||||
return JSON.stringify(VALID_CUSTOM_CONFIG);
|
||||
if (path.basename(filePath) === 'supported-models.json') {
|
||||
return JSON.stringify({
|
||||
openai: [{ id: 'gpt-4o' }],
|
||||
google: [{ id: 'gemini-1.5-pro-latest' }],
|
||||
anthropic: [
|
||||
{ id: 'claude-3-opus-20240229' },
|
||||
{ id: 'claude-3-7-sonnet-20250219' },
|
||||
{ id: 'claude-3-5-sonnet' }
|
||||
],
|
||||
perplexity: [{ id: 'sonar-pro' }],
|
||||
ollama: [],
|
||||
openrouter: []
|
||||
}); // Added perplexity
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const provider = configManager.getMainProvider(MOCK_PROJECT_ROOT);
|
||||
|
||||
// Assert
|
||||
expect(provider).toBe(VALID_CUSTOM_CONFIG.models.main.provider);
|
||||
});
|
||||
|
||||
test('getLogLevel should return logLevel from config', () => {
|
||||
// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH)
|
||||
return JSON.stringify(VALID_CUSTOM_CONFIG);
|
||||
if (path.basename(filePath) === 'supported-models.json') {
|
||||
// Provide enough mock model data for validation within getConfig
|
||||
return JSON.stringify({
|
||||
openai: [{ id: 'gpt-4o' }],
|
||||
google: [{ id: 'gemini-1.5-pro-latest' }],
|
||||
anthropic: [
|
||||
{ id: 'claude-3-opus-20240229' },
|
||||
{ id: 'claude-3-7-sonnet-20250219' },
|
||||
{ id: 'claude-3-5-sonnet' }
|
||||
],
|
||||
perplexity: [{ id: 'sonar-pro' }],
|
||||
ollama: [],
|
||||
openrouter: []
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
|
||||
// Act
|
||||
const logLevel = configManager.getLogLevel(MOCK_PROJECT_ROOT);
|
||||
|
||||
// Assert
|
||||
expect(logLevel).toBe(VALID_CUSTOM_CONFIG.global.logLevel);
|
||||
});
|
||||
|
||||
// Add more tests for other getters (getResearchProvider, getProjectName, etc.)
|
||||
});
|
||||
|
||||
// --- isConfigFilePresent Tests ---
|
||||
describe('isConfigFilePresent', () => {
|
||||
test('should return true if config file exists', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(true);
|
||||
expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
||||
});
|
||||
|
||||
test('should return false if config file does not exist', () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(false);
|
||||
expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
||||
});
|
||||
|
||||
test.skip('should use findProjectRoot if explicitRoot is not provided', () => {
|
||||
// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// findProjectRoot mock set in beforeEach
|
||||
expect(configManager.isConfigFilePresent()).toBe(true);
|
||||
expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
|
||||
});
|
||||
});
|
||||
|
||||
// --- getAllProviders Tests ---
|
||||
describe('getAllProviders', () => {
|
||||
test('should return list of providers from supported-models.json', () => {
|
||||
// Arrange: Ensure config is loaded with real data
|
||||
configManager.getConfig(null, true); // Force load using the mock that returns real data
|
||||
|
||||
// Act
|
||||
const providers = configManager.getAllProviders();
|
||||
// Assert
|
||||
// Assert against the actual keys in the REAL loaded data
|
||||
const expectedProviders = Object.keys(REAL_SUPPORTED_MODELS_DATA);
|
||||
expect(providers).toEqual(expect.arrayContaining(expectedProviders));
|
||||
expect(providers.length).toBe(expectedProviders.length);
|
||||
});
|
||||
});
|
||||
|
||||
// Add tests for getParametersForRole if needed
|
||||
|
||||
// Note: Tests for setMainModel, setResearchModel were removed as the functions were removed in the implementation.
|
||||
// If similar setter functions exist, add tests for them following the writeConfig pattern.
|
||||
|
||||
// --- isApiKeySet Tests ---
|
||||
describe('isApiKeySet', () => {
|
||||
const mockSession = { env: {} }; // Mock session for MCP context
|
||||
|
||||
// Test cases: [providerName, envVarName, keyValue, expectedResult, testName]
|
||||
const testCases = [
|
||||
// Valid Keys
|
||||
['anthropic', 'ANTHROPIC_API_KEY', 'sk-valid-key', true, 'valid Anthropic key'],
|
||||
['openai', 'OPENAI_API_KEY', 'sk-another-valid-key', true, 'valid OpenAI key'],
|
||||
['perplexity', 'PERPLEXITY_API_KEY', 'pplx-valid', true, 'valid Perplexity key'],
|
||||
['google', 'GOOGLE_API_KEY', 'google-valid-key', true, 'valid Google key'],
|
||||
['mistral', 'MISTRAL_API_KEY', 'mistral-valid-key', true, 'valid Mistral key'],
|
||||
['openrouter', 'OPENROUTER_API_KEY', 'or-valid-key', true, 'valid OpenRouter key'],
|
||||
['xai', 'XAI_API_KEY', 'xai-valid-key', true, 'valid XAI key'],
|
||||
['azure', 'AZURE_OPENAI_API_KEY', 'azure-valid-key', true, 'valid Azure key'],
|
||||
|
||||
// Ollama (special case - no key needed)
|
||||
['ollama', 'OLLAMA_API_KEY', undefined, true, 'Ollama provider (no key needed)'], // OLLAMA_API_KEY might not be in keyMap
|
||||
|
||||
// Invalid / Missing Keys
|
||||
['anthropic', 'ANTHROPIC_API_KEY', undefined, false, 'missing Anthropic key'],
|
||||
['anthropic', 'ANTHROPIC_API_KEY', null, false, 'null Anthropic key'],
|
||||
['openai', 'OPENAI_API_KEY', '', false, 'empty OpenAI key'],
|
||||
['perplexity', 'PERPLEXITY_API_KEY', ' ', false, 'whitespace Perplexity key'],
|
||||
|
||||
// Placeholder Keys
|
||||
['google', 'GOOGLE_API_KEY', 'YOUR_GOOGLE_API_KEY_HERE', false, 'placeholder Google key (YOUR_..._HERE)'],
|
||||
['mistral', 'MISTRAL_API_KEY', 'MISTRAL_KEY_HERE', false, 'placeholder Mistral key (..._KEY_HERE)'],
|
||||
['openrouter', 'OPENROUTER_API_KEY', 'ENTER_OPENROUTER_KEY_HERE', false, 'placeholder OpenRouter key (general ...KEY_HERE)'],
|
||||
|
||||
// Unknown provider
|
||||
['unknownprovider', 'UNKNOWN_KEY', 'any-key', false, 'unknown provider'],
|
||||
];
|
||||
|
||||
testCases.forEach(([providerName, envVarName, keyValue, expectedResult, testName]) => {
|
||||
test(`should return ${expectedResult} for ${testName} (CLI context)`, () => {
|
||||
// CLI context (resolveEnvVariable uses process.env or .env via projectRoot)
|
||||
mockResolveEnvVariable.mockImplementation((key) => {
|
||||
return key === envVarName ? keyValue : undefined;
|
||||
});
|
||||
expect(configManager.isApiKeySet(providerName, null, MOCK_PROJECT_ROOT)).toBe(expectedResult);
|
||||
if (providerName !== 'ollama' && providerName !== 'unknownprovider') { // Ollama and unknown don't try to resolve
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(envVarName, null, MOCK_PROJECT_ROOT);
|
||||
}
|
||||
});
|
||||
|
||||
test(`should return ${expectedResult} for ${testName} (MCP context)`, () => {
|
||||
// MCP context (resolveEnvVariable uses session.env)
|
||||
const mcpSession = { env: { [envVarName]: keyValue } };
|
||||
mockResolveEnvVariable.mockImplementation((key, sessionArg) => {
|
||||
return sessionArg && sessionArg.env ? sessionArg.env[key] : undefined;
|
||||
});
|
||||
expect(configManager.isApiKeySet(providerName, mcpSession, null)).toBe(expectedResult);
|
||||
if (providerName !== 'ollama' && providerName !== 'unknownprovider') {
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(envVarName, mcpSession, null);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('isApiKeySet should log a warning for an unknown provider', () => {
|
||||
mockLog.mockClear(); // Clear previous log calls
|
||||
configManager.isApiKeySet('nonexistentprovider');
|
||||
expect(mockLog).toHaveBeenCalledWith('warn', expect.stringContaining('Unknown provider name: nonexistentprovider'));
|
||||
});
|
||||
|
||||
test('isApiKeySet should handle provider names case-insensitively for keyMap lookup', () => {
|
||||
mockResolveEnvVariable.mockReturnValue('a-valid-key');
|
||||
expect(configManager.isApiKeySet('Anthropic', null, MOCK_PROJECT_ROOT)).toBe(true);
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith('ANTHROPIC_API_KEY', null, MOCK_PROJECT_ROOT);
|
||||
|
||||
mockResolveEnvVariable.mockReturnValue('another-valid-key');
|
||||
expect(configManager.isApiKeySet('OPENAI', null, MOCK_PROJECT_ROOT)).toBe(true);
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith('OPENAI_API_KEY', null, MOCK_PROJECT_ROOT);
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user