Revert "Release 0.13.0"

This commit is contained in:
Ralph Khreish
2025-05-03 14:38:33 +02:00
committed by GitHub
parent 8dace2186c
commit 6f5ddabc96
177 changed files with 13894 additions and 26358 deletions

View File

@@ -0,0 +1,350 @@
/**
* ai-client-utils.test.js
* Tests for AI client utility functions
*/
import { jest } from '@jest/globals';
import {
getAnthropicClientForMCP,
getPerplexityClientForMCP,
getModelConfig,
getBestAvailableAIModel,
handleClaudeError
} from '../../mcp-server/src/core/utils/ai-client-utils.js';
// Mock the Anthropic constructor
jest.mock('@anthropic-ai/sdk', () => {
return {
Anthropic: jest.fn().mockImplementation(() => {
return {
messages: {
create: jest.fn().mockResolvedValue({})
}
};
})
};
});
// Mock the OpenAI dynamic import
jest.mock('openai', () => {
return {
default: jest.fn().mockImplementation(() => {
return {
chat: {
completions: {
create: jest.fn().mockResolvedValue({})
}
}
};
})
};
});
describe('AI Client Utilities', () => {
const originalEnv = process.env;
beforeEach(() => {
// Reset process.env before each test
process.env = { ...originalEnv };
// Clear all mocks
jest.clearAllMocks();
});
afterAll(() => {
// Restore process.env
process.env = originalEnv;
});
describe('getAnthropicClientForMCP', () => {
it('should initialize client with API key from session', () => {
// Setup
const session = {
env: {
ANTHROPIC_API_KEY: 'test-key-from-session'
}
};
const mockLog = { error: jest.fn() };
// Execute
const client = getAnthropicClientForMCP(session, mockLog);
// Verify
expect(client).toBeDefined();
expect(client.messages.create).toBeDefined();
expect(mockLog.error).not.toHaveBeenCalled();
});
it('should fall back to process.env when session key is missing', () => {
// Setup
process.env.ANTHROPIC_API_KEY = 'test-key-from-env';
const session = { env: {} };
const mockLog = { error: jest.fn() };
// Execute
const client = getAnthropicClientForMCP(session, mockLog);
// Verify
expect(client).toBeDefined();
expect(mockLog.error).not.toHaveBeenCalled();
});
it('should throw error when API key is missing', () => {
// Setup
delete process.env.ANTHROPIC_API_KEY;
const session = { env: {} };
const mockLog = { error: jest.fn() };
// Execute & Verify
expect(() => getAnthropicClientForMCP(session, mockLog)).toThrow();
expect(mockLog.error).toHaveBeenCalled();
});
});
describe('getPerplexityClientForMCP', () => {
it('should initialize client with API key from session', async () => {
// Setup
const session = {
env: {
PERPLEXITY_API_KEY: 'test-perplexity-key'
}
};
const mockLog = { error: jest.fn() };
// Execute
const client = await getPerplexityClientForMCP(session, mockLog);
// Verify
expect(client).toBeDefined();
expect(client.chat.completions.create).toBeDefined();
expect(mockLog.error).not.toHaveBeenCalled();
});
it('should throw error when API key is missing', async () => {
// Setup
delete process.env.PERPLEXITY_API_KEY;
const session = { env: {} };
const mockLog = { error: jest.fn() };
// Execute & Verify
await expect(
getPerplexityClientForMCP(session, mockLog)
).rejects.toThrow();
expect(mockLog.error).toHaveBeenCalled();
});
});
describe('getModelConfig', () => {
it('should get model config from session', () => {
// Setup
const session = {
env: {
MODEL: 'claude-3-opus',
MAX_TOKENS: '8000',
TEMPERATURE: '0.5'
}
};
// Execute
const config = getModelConfig(session);
// Verify
expect(config).toEqual({
model: 'claude-3-opus',
maxTokens: 8000,
temperature: 0.5
});
});
it('should use default values when session values are missing', () => {
// Setup
const session = {
env: {
// No values
}
};
// Execute
const config = getModelConfig(session);
// Verify
expect(config).toEqual({
model: 'claude-3-7-sonnet-20250219',
maxTokens: 64000,
temperature: 0.2
});
});
it('should allow custom defaults', () => {
// Setup
const session = { env: {} };
const customDefaults = {
model: 'custom-model',
maxTokens: 2000,
temperature: 0.3
};
// Execute
const config = getModelConfig(session, customDefaults);
// Verify
expect(config).toEqual(customDefaults);
});
});
describe('getBestAvailableAIModel', () => {
it('should return Perplexity for research when available', async () => {
// Setup
const session = {
env: {
PERPLEXITY_API_KEY: 'test-perplexity-key',
ANTHROPIC_API_KEY: 'test-anthropic-key'
}
};
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
// Execute
const result = await getBestAvailableAIModel(
session,
{ requiresResearch: true },
mockLog
);
// Verify
expect(result.type).toBe('perplexity');
expect(result.client).toBeDefined();
});
it('should return Claude when Perplexity is not available and Claude is not overloaded', async () => {
// Setup
const originalPerplexityKey = process.env.PERPLEXITY_API_KEY;
delete process.env.PERPLEXITY_API_KEY; // Make sure Perplexity is not available in process.env
const session = {
env: {
ANTHROPIC_API_KEY: 'test-anthropic-key'
// Purposely not including PERPLEXITY_API_KEY
}
};
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
try {
// Execute
const result = await getBestAvailableAIModel(
session,
{ requiresResearch: true },
mockLog
);
// Verify
// In our implementation, we prioritize research capability through Perplexity
// so if we're testing research but Perplexity isn't available, Claude is used
expect(result.type).toBe('claude');
expect(result.client).toBeDefined();
expect(mockLog.warn).toHaveBeenCalled(); // Warning about using Claude instead of Perplexity
} finally {
// Restore original env variables
if (originalPerplexityKey) {
process.env.PERPLEXITY_API_KEY = originalPerplexityKey;
}
}
});
it('should fall back to Claude as last resort when overloaded', async () => {
// Setup
const session = {
env: {
ANTHROPIC_API_KEY: 'test-anthropic-key'
}
};
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
// Execute
const result = await getBestAvailableAIModel(
session,
{ claudeOverloaded: true },
mockLog
);
// Verify
expect(result.type).toBe('claude');
expect(result.client).toBeDefined();
expect(mockLog.warn).toHaveBeenCalled(); // Warning about Claude overloaded
});
it('should throw error when no models are available', async () => {
// Setup
delete process.env.ANTHROPIC_API_KEY;
delete process.env.PERPLEXITY_API_KEY;
const session = { env: {} };
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
// Execute & Verify
await expect(
getBestAvailableAIModel(session, {}, mockLog)
).rejects.toThrow();
});
});
describe('handleClaudeError', () => {
it('should handle overloaded error', () => {
// Setup
const error = {
type: 'error',
error: {
type: 'overloaded_error',
message: 'Claude is overloaded'
}
};
// Execute
const message = handleClaudeError(error);
// Verify
expect(message).toContain('overloaded');
});
it('should handle rate limit error', () => {
// Setup
const error = {
type: 'error',
error: {
type: 'rate_limit_error',
message: 'Rate limit exceeded'
}
};
// Execute
const message = handleClaudeError(error);
// Verify
expect(message).toContain('rate limit');
});
it('should handle timeout error', () => {
// Setup
const error = {
message: 'Request timed out after 60 seconds'
};
// Execute
const message = handleClaudeError(error);
// Verify
expect(message).toContain('timed out');
});
it('should handle generic errors', () => {
// Setup
const error = {
message: 'Something went wrong'
};
// Execute
const message = handleClaudeError(error);
// Verify
expect(message).toContain('Error communicating with Claude');
});
});
});

View File

@@ -1,289 +0,0 @@
import { jest } from '@jest/globals';
// Mock config-manager
const mockGetMainProvider = jest.fn();
const mockGetMainModelId = jest.fn();
const mockGetResearchProvider = jest.fn();
const mockGetResearchModelId = jest.fn();
const mockGetFallbackProvider = jest.fn();
const mockGetFallbackModelId = jest.fn();
const mockGetParametersForRole = jest.fn();
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
getMainProvider: mockGetMainProvider,
getMainModelId: mockGetMainModelId,
getResearchProvider: mockGetResearchProvider,
getResearchModelId: mockGetResearchModelId,
getFallbackProvider: mockGetFallbackProvider,
getFallbackModelId: mockGetFallbackModelId,
getParametersForRole: mockGetParametersForRole
}));
// Mock AI Provider Modules
const mockGenerateAnthropicText = jest.fn();
const mockStreamAnthropicText = jest.fn();
const mockGenerateAnthropicObject = jest.fn();
jest.unstable_mockModule('../../src/ai-providers/anthropic.js', () => ({
generateAnthropicText: mockGenerateAnthropicText,
streamAnthropicText: mockStreamAnthropicText,
generateAnthropicObject: mockGenerateAnthropicObject
}));
const mockGeneratePerplexityText = jest.fn();
const mockStreamPerplexityText = jest.fn();
const mockGeneratePerplexityObject = jest.fn();
jest.unstable_mockModule('../../src/ai-providers/perplexity.js', () => ({
generatePerplexityText: mockGeneratePerplexityText,
streamPerplexityText: mockStreamPerplexityText,
generatePerplexityObject: mockGeneratePerplexityObject
}));
// ... Mock other providers (google, openai, etc.) similarly ...
// Mock utils logger, API key resolver, AND findProjectRoot
const mockLog = jest.fn();
const mockResolveEnvVariable = jest.fn();
const mockFindProjectRoot = jest.fn();
jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
log: mockLog,
resolveEnvVariable: mockResolveEnvVariable,
findProjectRoot: mockFindProjectRoot
}));
// Import the module to test (AFTER mocks)
const { generateTextService } = await import(
'../../scripts/modules/ai-services-unified.js'
);
describe('Unified AI Services', () => {
const fakeProjectRoot = '/fake/project/root'; // Define for reuse
beforeEach(() => {
// Clear mocks before each test
jest.clearAllMocks(); // Clears all mocks
// Set default mock behaviors
mockGetMainProvider.mockReturnValue('anthropic');
mockGetMainModelId.mockReturnValue('test-main-model');
mockGetResearchProvider.mockReturnValue('perplexity');
mockGetResearchModelId.mockReturnValue('test-research-model');
mockGetFallbackProvider.mockReturnValue('anthropic');
mockGetFallbackModelId.mockReturnValue('test-fallback-model');
mockGetParametersForRole.mockImplementation((role) => {
if (role === 'main') return { maxTokens: 100, temperature: 0.5 };
if (role === 'research') return { maxTokens: 200, temperature: 0.3 };
if (role === 'fallback') return { maxTokens: 150, temperature: 0.6 };
return { maxTokens: 100, temperature: 0.5 }; // Default
});
mockResolveEnvVariable.mockImplementation((key) => {
if (key === 'ANTHROPIC_API_KEY') return 'mock-anthropic-key';
if (key === 'PERPLEXITY_API_KEY') return 'mock-perplexity-key';
return null;
});
// Set a default behavior for the new mock
mockFindProjectRoot.mockReturnValue(fakeProjectRoot);
});
describe('generateTextService', () => {
test('should use main provider/model and succeed', async () => {
mockGenerateAnthropicText.mockResolvedValue('Main provider response');
const params = {
role: 'main',
session: { env: {} },
systemPrompt: 'System',
prompt: 'Test'
};
const result = await generateTextService(params);
expect(result).toBe('Main provider response');
expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot);
expect(mockGetMainModelId).toHaveBeenCalledWith(fakeProjectRoot);
expect(mockGetParametersForRole).toHaveBeenCalledWith(
'main',
fakeProjectRoot
);
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
'ANTHROPIC_API_KEY',
params.session,
fakeProjectRoot
);
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(1);
expect(mockGenerateAnthropicText).toHaveBeenCalledWith({
apiKey: 'mock-anthropic-key',
modelId: 'test-main-model',
maxTokens: 100,
temperature: 0.5,
messages: [
{ role: 'system', content: 'System' },
{ role: 'user', content: 'Test' }
]
});
expect(mockGeneratePerplexityText).not.toHaveBeenCalled();
});
test('should fall back to fallback provider if main fails', async () => {
const mainError = new Error('Main provider failed');
mockGenerateAnthropicText
.mockRejectedValueOnce(mainError)
.mockResolvedValueOnce('Fallback provider response');
const explicitRoot = '/explicit/test/root';
const params = {
role: 'main',
prompt: 'Fallback test',
projectRoot: explicitRoot
};
const result = await generateTextService(params);
expect(result).toBe('Fallback provider response');
expect(mockGetMainProvider).toHaveBeenCalledWith(explicitRoot);
expect(mockGetFallbackProvider).toHaveBeenCalledWith(explicitRoot);
expect(mockGetParametersForRole).toHaveBeenCalledWith(
'main',
explicitRoot
);
expect(mockGetParametersForRole).toHaveBeenCalledWith(
'fallback',
explicitRoot
);
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
'ANTHROPIC_API_KEY',
undefined,
explicitRoot
);
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2);
expect(mockGeneratePerplexityText).not.toHaveBeenCalled();
expect(mockLog).toHaveBeenCalledWith(
'error',
expect.stringContaining('Service call failed for role main')
);
expect(mockLog).toHaveBeenCalledWith(
'info',
expect.stringContaining('New AI service call with role: fallback')
);
});
test('should fall back to research provider if main and fallback fail', async () => {
const mainError = new Error('Main failed');
const fallbackError = new Error('Fallback failed');
mockGenerateAnthropicText
.mockRejectedValueOnce(mainError)
.mockRejectedValueOnce(fallbackError);
mockGeneratePerplexityText.mockResolvedValue(
'Research provider response'
);
const params = { role: 'main', prompt: 'Research fallback test' };
const result = await generateTextService(params);
expect(result).toBe('Research provider response');
expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot);
expect(mockGetFallbackProvider).toHaveBeenCalledWith(fakeProjectRoot);
expect(mockGetResearchProvider).toHaveBeenCalledWith(fakeProjectRoot);
expect(mockGetParametersForRole).toHaveBeenCalledWith(
'main',
fakeProjectRoot
);
expect(mockGetParametersForRole).toHaveBeenCalledWith(
'fallback',
fakeProjectRoot
);
expect(mockGetParametersForRole).toHaveBeenCalledWith(
'research',
fakeProjectRoot
);
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
'ANTHROPIC_API_KEY',
undefined,
fakeProjectRoot
);
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
'ANTHROPIC_API_KEY',
undefined,
fakeProjectRoot
);
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
'PERPLEXITY_API_KEY',
undefined,
fakeProjectRoot
);
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2);
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1);
expect(mockLog).toHaveBeenCalledWith(
'error',
expect.stringContaining('Service call failed for role fallback')
);
expect(mockLog).toHaveBeenCalledWith(
'info',
expect.stringContaining('New AI service call with role: research')
);
});
test('should throw error if all providers in sequence fail', async () => {
mockGenerateAnthropicText.mockRejectedValue(
new Error('Anthropic failed')
);
mockGeneratePerplexityText.mockRejectedValue(
new Error('Perplexity failed')
);
const params = { role: 'main', prompt: 'All fail test' };
await expect(generateTextService(params)).rejects.toThrow(
'Perplexity failed' // Error from the last attempt (research)
);
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); // main, fallback
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1); // research
});
test('should handle retryable errors correctly', async () => {
const retryableError = new Error('Rate limit');
mockGenerateAnthropicText
.mockRejectedValueOnce(retryableError) // Fails once
.mockResolvedValue('Success after retry'); // Succeeds on retry
const params = { role: 'main', prompt: 'Retry success test' };
const result = await generateTextService(params);
expect(result).toBe('Success after retry');
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); // Initial + 1 retry
expect(mockLog).toHaveBeenCalledWith(
'info',
expect.stringContaining('Retryable error detected. Retrying')
);
});
test('should use default project root or handle null if findProjectRoot returns null', async () => {
mockFindProjectRoot.mockReturnValue(null); // Simulate not finding root
mockGenerateAnthropicText.mockResolvedValue('Response with no root');
const params = { role: 'main', prompt: 'No root test' }; // No explicit root passed
await generateTextService(params);
expect(mockGetMainProvider).toHaveBeenCalledWith(null);
expect(mockGetParametersForRole).toHaveBeenCalledWith('main', null);
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
'ANTHROPIC_API_KEY',
undefined,
null
);
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(1);
});
// Add more tests for edge cases:
// - Missing API keys (should throw from _resolveApiKey)
// - Unsupported provider configured (should skip and log)
// - Missing provider/model config for a role (should skip and log)
// - Missing prompt
// - Different initial roles (research, fallback)
// - generateObjectService (mock schema, check object result)
// - streamTextService (more complex to test, might need stream helpers)
});
});

View File

@@ -0,0 +1,373 @@
/**
* AI Services module tests
*/
import { jest } from '@jest/globals';
import { parseSubtasksFromText } from '../../scripts/modules/ai-services.js';
// Create a mock log function we can check later
const mockLog = jest.fn();
// Mock dependencies
jest.mock('@anthropic-ai/sdk', () => {
const mockCreate = jest.fn().mockResolvedValue({
content: [{ text: 'AI response' }]
});
const mockAnthropicInstance = {
messages: {
create: mockCreate
}
};
const mockAnthropicConstructor = jest
.fn()
.mockImplementation(() => mockAnthropicInstance);
return {
Anthropic: mockAnthropicConstructor
};
});
// Use jest.fn() directly for OpenAI mock
const mockOpenAIInstance = {
chat: {
completions: {
create: jest.fn().mockResolvedValue({
choices: [{ message: { content: 'Perplexity response' } }]
})
}
}
};
const mockOpenAI = jest.fn().mockImplementation(() => mockOpenAIInstance);
jest.mock('openai', () => {
return { default: mockOpenAI };
});
jest.mock('dotenv', () => ({
config: jest.fn()
}));
jest.mock('../../scripts/modules/utils.js', () => ({
CONFIG: {
model: 'claude-3-sonnet-20240229',
temperature: 0.7,
maxTokens: 4000
},
log: mockLog,
sanitizePrompt: jest.fn((text) => text)
}));
jest.mock('../../scripts/modules/ui.js', () => ({
startLoadingIndicator: jest.fn().mockReturnValue('mockLoader'),
stopLoadingIndicator: jest.fn()
}));
// Mock anthropic global object
global.anthropic = {
messages: {
create: jest.fn().mockResolvedValue({
content: [
{
text: '[{"id": 1, "title": "Test", "description": "Test", "dependencies": [], "details": "Test"}]'
}
]
})
}
};
// Mock process.env
const originalEnv = process.env;
// Import Anthropic for testing constructor arguments
import { Anthropic } from '@anthropic-ai/sdk';
describe('AI Services Module', () => {
beforeEach(() => {
jest.clearAllMocks();
process.env = { ...originalEnv };
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
process.env.PERPLEXITY_API_KEY = 'test-perplexity-key';
});
afterEach(() => {
process.env = originalEnv;
});
describe('parseSubtasksFromText function', () => {
test('should parse subtasks from JSON text', () => {
const text = `Here's your list of subtasks:
[
{
"id": 1,
"title": "Implement database schema",
"description": "Design and implement the database schema for user data",
"dependencies": [],
"details": "Create tables for users, preferences, and settings"
},
{
"id": 2,
"title": "Create API endpoints",
"description": "Develop RESTful API endpoints for user operations",
"dependencies": [],
"details": "Implement CRUD operations for user management"
}
]
These subtasks will help you implement the parent task efficiently.`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
id: 1,
title: 'Implement database schema',
description: 'Design and implement the database schema for user data',
status: 'pending',
dependencies: [],
details: 'Create tables for users, preferences, and settings',
parentTaskId: 5
});
expect(result[1]).toEqual({
id: 2,
title: 'Create API endpoints',
description: 'Develop RESTful API endpoints for user operations',
status: 'pending',
dependencies: [],
details: 'Implement CRUD operations for user management',
parentTaskId: 5
});
});
test('should handle subtasks with dependencies', () => {
const text = `
[
{
"id": 1,
"title": "Setup React environment",
"description": "Initialize React app with necessary dependencies",
"dependencies": [],
"details": "Use Create React App or Vite to set up a new project"
},
{
"id": 2,
"title": "Create component structure",
"description": "Design and implement component hierarchy",
"dependencies": [1],
"details": "Organize components by feature and reusability"
}
]`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result).toHaveLength(2);
expect(result[0].dependencies).toEqual([]);
expect(result[1].dependencies).toEqual([1]);
});
test('should handle complex dependency lists', () => {
const text = `
[
{
"id": 1,
"title": "Setup database",
"description": "Initialize database structure",
"dependencies": [],
"details": "Set up PostgreSQL database"
},
{
"id": 2,
"title": "Create models",
"description": "Implement data models",
"dependencies": [1],
"details": "Define Prisma models"
},
{
"id": 3,
"title": "Implement controllers",
"description": "Create API controllers",
"dependencies": [1, 2],
"details": "Build controllers for all endpoints"
}
]`;
const result = parseSubtasksFromText(text, 1, 3, 5);
expect(result).toHaveLength(3);
expect(result[2].dependencies).toEqual([1, 2]);
});
test('should throw an error for empty text', () => {
const emptyText = '';
expect(() => parseSubtasksFromText(emptyText, 1, 2, 5)).toThrow(
'Empty text provided, cannot parse subtasks'
);
});
test('should normalize subtask IDs', () => {
const text = `
[
{
"id": 10,
"title": "First task with incorrect ID",
"description": "First description",
"dependencies": [],
"details": "First details"
},
{
"id": 20,
"title": "Second task with incorrect ID",
"description": "Second description",
"dependencies": [],
"details": "Second details"
}
]`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result).toHaveLength(2);
expect(result[0].id).toBe(1); // Should normalize to starting ID
expect(result[1].id).toBe(2); // Should normalize to starting ID + 1
});
test('should convert string dependencies to numbers', () => {
const text = `
[
{
"id": 1,
"title": "First task",
"description": "First description",
"dependencies": [],
"details": "First details"
},
{
"id": 2,
"title": "Second task",
"description": "Second description",
"dependencies": ["1"],
"details": "Second details"
}
]`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result[1].dependencies).toEqual([1]);
expect(typeof result[1].dependencies[0]).toBe('number');
});
test('should throw an error for invalid JSON', () => {
const text = `This is not valid JSON and cannot be parsed`;
expect(() => parseSubtasksFromText(text, 1, 2, 5)).toThrow(
'Could not locate valid JSON array in the response'
);
});
});
describe('handleClaudeError function', () => {
// Import the function directly for testing
let handleClaudeError;
beforeAll(async () => {
// Dynamic import to get the actual function
const module = await import('../../scripts/modules/ai-services.js');
handleClaudeError = module.handleClaudeError;
});
test('should handle overloaded_error type', () => {
const error = {
type: 'error',
error: {
type: 'overloaded_error',
message: 'Claude is experiencing high volume'
}
};
// Mock process.env to include PERPLEXITY_API_KEY
const originalEnv = process.env;
process.env = { ...originalEnv, PERPLEXITY_API_KEY: 'test-key' };
const result = handleClaudeError(error);
// Restore original env
process.env = originalEnv;
expect(result).toContain('Claude is currently overloaded');
expect(result).toContain('fall back to Perplexity AI');
});
test('should handle rate_limit_error type', () => {
const error = {
type: 'error',
error: {
type: 'rate_limit_error',
message: 'Rate limit exceeded'
}
};
const result = handleClaudeError(error);
expect(result).toContain('exceeded the rate limit');
});
test('should handle invalid_request_error type', () => {
const error = {
type: 'error',
error: {
type: 'invalid_request_error',
message: 'Invalid request parameters'
}
};
const result = handleClaudeError(error);
expect(result).toContain('issue with the request format');
});
test('should handle timeout errors', () => {
const error = {
message: 'Request timed out after 60000ms'
};
const result = handleClaudeError(error);
expect(result).toContain('timed out');
});
test('should handle network errors', () => {
const error = {
message: 'Network error occurred'
};
const result = handleClaudeError(error);
expect(result).toContain('network error');
});
test('should handle generic errors', () => {
const error = {
message: 'Something unexpected happened'
};
const result = handleClaudeError(error);
expect(result).toContain('Error communicating with Claude');
expect(result).toContain('Something unexpected happened');
});
});
describe('Anthropic client configuration', () => {
test('should include output-128k beta header in client configuration', async () => {
// Read the file content to verify the change is present
const fs = await import('fs');
const path = await import('path');
const filePath = path.resolve('./scripts/modules/ai-services.js');
const fileContent = fs.readFileSync(filePath, 'utf8');
// Check if the beta header is in the file
expect(fileContent).toContain(
"'anthropic-beta': 'output-128k-2025-02-19'"
);
});
});
});

View File

@@ -155,19 +155,19 @@ describe('Commands Module', () => {
const program = setupCLI();
const version = program._version();
expect(mockReadFileSync).not.toHaveBeenCalled();
expect(version).toBe('unknown');
expect(version).toBe('1.5.0');
});
test('should use default version when package.json reading throws an error', () => {
mockExistsSync.mockReturnValue(true);
mockReadFileSync.mockImplementation(() => {
throw new Error('Read error');
throw new Error('Invalid JSON');
});
const program = setupCLI();
const version = program._version();
expect(mockReadFileSync).toHaveBeenCalled();
expect(version).toBe('unknown');
expect(version).toBe('1.5.0');
});
});

View File

@@ -1,670 +0,0 @@
import fs from 'fs';
import path from 'path';
import { jest } from '@jest/globals';
import { fileURLToPath } from 'url';
// --- Read REAL supported-models.json data BEFORE mocks ---
const __filename = fileURLToPath(import.meta.url); // Get current file path
const __dirname = path.dirname(__filename); // Get current directory
const realSupportedModelsPath = path.resolve(
__dirname,
'../../scripts/modules/supported-models.json'
);
let REAL_SUPPORTED_MODELS_CONTENT;
let REAL_SUPPORTED_MODELS_DATA;
try {
REAL_SUPPORTED_MODELS_CONTENT = fs.readFileSync(
realSupportedModelsPath,
'utf-8'
);
REAL_SUPPORTED_MODELS_DATA = JSON.parse(REAL_SUPPORTED_MODELS_CONTENT);
} catch (err) {
console.error(
'FATAL TEST SETUP ERROR: Could not read or parse real supported-models.json',
err
);
REAL_SUPPORTED_MODELS_CONTENT = '{}'; // Default to empty object on error
REAL_SUPPORTED_MODELS_DATA = {};
process.exit(1); // Exit if essential test data can't be loaded
}
// --- Define Mock Function Instances ---
const mockFindProjectRoot = jest.fn();
const mockLog = jest.fn();
// --- Mock Dependencies BEFORE importing the module under test ---
// Mock the entire 'fs' module
jest.mock('fs');
// Mock the 'utils.js' module using a factory function
jest.mock('../../scripts/modules/utils.js', () => ({
__esModule: true, // Indicate it's an ES module mock
findProjectRoot: mockFindProjectRoot, // Use the mock function instance
log: mockLog, // Use the mock function instance
// Include other necessary exports from utils if config-manager uses them directly
resolveEnvVariable: jest.fn() // Example if needed
}));
// DO NOT MOCK 'chalk'
// --- Import the module under test AFTER mocks are defined ---
import * as configManager from '../../scripts/modules/config-manager.js';
// Import the mocked 'fs' module to allow spying on its functions
import fsMocked from 'fs';
// --- Test Data (Keep as is, ensure DEFAULT_CONFIG is accurate) ---
const MOCK_PROJECT_ROOT = '/mock/project';
const MOCK_CONFIG_PATH = path.join(MOCK_PROJECT_ROOT, '.taskmasterconfig');
// Updated DEFAULT_CONFIG reflecting the implementation
const DEFAULT_CONFIG = {
models: {
main: {
provider: 'anthropic',
modelId: 'claude-3-7-sonnet-20250219',
maxTokens: 64000,
temperature: 0.2
},
research: {
provider: 'perplexity',
modelId: 'sonar-pro',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
provider: 'anthropic',
modelId: 'claude-3-5-sonnet',
maxTokens: 64000,
temperature: 0.2
}
},
global: {
logLevel: 'info',
debug: false,
defaultSubtasks: 5,
defaultPriority: 'medium',
projectName: 'Task Master',
ollamaBaseUrl: 'http://localhost:11434/api'
}
};
// Other test data (VALID_CUSTOM_CONFIG, PARTIAL_CONFIG, INVALID_PROVIDER_CONFIG)
const VALID_CUSTOM_CONFIG = {
models: {
main: {
provider: 'openai',
modelId: 'gpt-4o',
maxTokens: 4096,
temperature: 0.5
},
research: {
provider: 'google',
modelId: 'gemini-1.5-pro-latest',
maxTokens: 8192,
temperature: 0.3
},
fallback: {
provider: 'anthropic',
modelId: 'claude-3-opus-20240229',
maxTokens: 100000,
temperature: 0.4
}
},
global: {
logLevel: 'debug',
defaultPriority: 'high',
projectName: 'My Custom Project'
}
};
const PARTIAL_CONFIG = {
models: {
main: { provider: 'openai', modelId: 'gpt-4-turbo' }
},
global: {
projectName: 'Partial Project'
}
};
const INVALID_PROVIDER_CONFIG = {
models: {
main: { provider: 'invalid-provider', modelId: 'some-model' },
research: {
provider: 'perplexity',
modelId: 'llama-3-sonar-large-32k-online'
}
},
global: {
logLevel: 'warn'
}
};
// Define spies globally to be restored in afterAll
let consoleErrorSpy;
let consoleWarnSpy;
let fsReadFileSyncSpy;
let fsWriteFileSyncSpy;
let fsExistsSyncSpy;
beforeAll(() => {
// Set up console spies
consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
});
afterAll(() => {
// Restore all spies
jest.restoreAllMocks();
});
// Reset mocks before each test for isolation
beforeEach(() => {
// Clear all mock calls and reset implementations between tests
jest.clearAllMocks();
// Reset the external mock instances for utils
mockFindProjectRoot.mockReset();
mockLog.mockReset();
// --- Set up spies ON the imported 'fs' mock ---
fsExistsSyncSpy = jest.spyOn(fsMocked, 'existsSync');
fsReadFileSyncSpy = jest.spyOn(fsMocked, 'readFileSync');
fsWriteFileSyncSpy = jest.spyOn(fsMocked, 'writeFileSync');
// --- Default Mock Implementations ---
mockFindProjectRoot.mockReturnValue(MOCK_PROJECT_ROOT); // Default for utils.findProjectRoot
fsExistsSyncSpy.mockReturnValue(true); // Assume files exist by default
// Default readFileSync: Return REAL models content, mocked config, or throw error
fsReadFileSyncSpy.mockImplementation((filePath) => {
const baseName = path.basename(filePath);
if (baseName === 'supported-models.json') {
// Return the REAL file content stringified
return REAL_SUPPORTED_MODELS_CONTENT;
} else if (filePath === MOCK_CONFIG_PATH) {
// Still mock the .taskmasterconfig reads
return JSON.stringify(DEFAULT_CONFIG); // Default behavior
}
// Throw for unexpected reads - helps catch errors
throw new Error(`Unexpected fs.readFileSync call in test: ${filePath}`);
});
// Default writeFileSync: Do nothing, just allow calls
fsWriteFileSyncSpy.mockImplementation(() => {});
});
// --- Validation Functions ---
describe('Validation Functions', () => {
// Tests for validateProvider and validateProviderModelCombination
test('validateProvider should return true for valid providers', () => {
expect(configManager.validateProvider('openai')).toBe(true);
expect(configManager.validateProvider('anthropic')).toBe(true);
expect(configManager.validateProvider('google')).toBe(true);
expect(configManager.validateProvider('perplexity')).toBe(true);
expect(configManager.validateProvider('ollama')).toBe(true);
expect(configManager.validateProvider('openrouter')).toBe(true);
});
test('validateProvider should return false for invalid providers', () => {
expect(configManager.validateProvider('invalid-provider')).toBe(false);
expect(configManager.validateProvider('grok')).toBe(false); // Not in mock map
expect(configManager.validateProvider('')).toBe(false);
expect(configManager.validateProvider(null)).toBe(false);
});
test('validateProviderModelCombination should validate known good combinations', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
expect(
configManager.validateProviderModelCombination('openai', 'gpt-4o')
).toBe(true);
expect(
configManager.validateProviderModelCombination(
'anthropic',
'claude-3-5-sonnet-20241022'
)
).toBe(true);
});
test('validateProviderModelCombination should return false for known bad combinations', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
expect(
configManager.validateProviderModelCombination(
'openai',
'claude-3-opus-20240229'
)
).toBe(false);
});
test('validateProviderModelCombination should return true for ollama/openrouter (empty lists in map)', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
expect(
configManager.validateProviderModelCombination('ollama', 'any-model')
).toBe(false);
expect(
configManager.validateProviderModelCombination('openrouter', 'any/model')
).toBe(false);
});
test('validateProviderModelCombination should return true for providers not in map', () => {
// Re-load config to ensure MODEL_MAP is populated from mock (now real data)
configManager.getConfig(MOCK_PROJECT_ROOT, true);
// The implementation returns true if the provider isn't in the map
expect(
configManager.validateProviderModelCombination(
'unknown-provider',
'some-model'
)
).toBe(true);
});
});
// --- getConfig Tests ---
describe('getConfig Tests', () => {
test('should return default config if .taskmasterconfig does not exist', () => {
// Arrange
fsExistsSyncSpy.mockReturnValue(false);
// findProjectRoot mock is set in beforeEach
// Act: Call getConfig with explicit root
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload
// Assert
expect(config).toEqual(DEFAULT_CONFIG);
expect(mockFindProjectRoot).not.toHaveBeenCalled(); // Explicit root provided
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
expect(fsReadFileSyncSpy).not.toHaveBeenCalled(); // No read if file doesn't exist
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining('not found at provided project root')
);
});
test.skip('should use findProjectRoot and return defaults if file not found', () => {
// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
// Arrange
fsExistsSyncSpy.mockReturnValue(false);
// findProjectRoot mock is set in beforeEach
// Act: Call getConfig without explicit root
const config = configManager.getConfig(null, true); // Force reload
// Assert
expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
expect(config).toEqual(DEFAULT_CONFIG);
expect(fsReadFileSyncSpy).not.toHaveBeenCalled();
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining('not found at derived root')
); // Adjusted expected warning
});
test('should read and merge valid config file with defaults', () => {
// Arrange: Override readFileSync for this test
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(VALID_CUSTOM_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
// Provide necessary models for validation within getConfig
return JSON.stringify({
openai: [{ id: 'gpt-4o' }],
google: [{ id: 'gemini-1.5-pro-latest' }],
perplexity: [{ id: 'sonar-pro' }],
anthropic: [
{ id: 'claude-3-opus-20240229' },
{ id: 'claude-3-5-sonnet' },
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true); // Force reload
// Assert: Construct expected merged config
const expectedMergedConfig = {
models: {
main: {
...DEFAULT_CONFIG.models.main,
...VALID_CUSTOM_CONFIG.models.main
},
research: {
...DEFAULT_CONFIG.models.research,
...VALID_CUSTOM_CONFIG.models.research
},
fallback: {
...DEFAULT_CONFIG.models.fallback,
...VALID_CUSTOM_CONFIG.models.fallback
}
},
global: { ...DEFAULT_CONFIG.global, ...VALID_CUSTOM_CONFIG.global }
};
expect(config).toEqual(expectedMergedConfig);
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
});
test('should merge defaults for partial config file', () => {
// Arrange
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) return JSON.stringify(PARTIAL_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
openai: [{ id: 'gpt-4-turbo' }],
perplexity: [{ id: 'sonar-pro' }],
anthropic: [
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert: Construct expected merged config
const expectedMergedConfig = {
models: {
main: { ...DEFAULT_CONFIG.models.main, ...PARTIAL_CONFIG.models.main },
research: { ...DEFAULT_CONFIG.models.research },
fallback: { ...DEFAULT_CONFIG.models.fallback }
},
global: { ...DEFAULT_CONFIG.global, ...PARTIAL_CONFIG.global }
};
expect(config).toEqual(expectedMergedConfig);
expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
});
test('should handle JSON parsing error and return defaults', () => {
// Arrange
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) return 'invalid json';
// Mock models read needed for initial load before parse error
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
perplexity: [{ id: 'sonar-pro' }],
fallback: [{ id: 'claude-3-5-sonnet' }],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert
expect(config).toEqual(DEFAULT_CONFIG);
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Error reading or parsing')
);
});
test('should handle file read error and return defaults', () => {
// Arrange
const readError = new Error('Permission denied');
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH) throw readError;
// Mock models read needed for initial load before read error
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
anthropic: [{ id: 'claude-3-7-sonnet-20250219' }],
perplexity: [{ id: 'sonar-pro' }],
fallback: [{ id: 'claude-3-5-sonnet' }],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert
expect(config).toEqual(DEFAULT_CONFIG);
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining(`Permission denied. Using default configuration.`)
);
});
test('should validate provider and fallback to default if invalid', () => {
// Arrange
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(INVALID_PROVIDER_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
perplexity: [{ id: 'llama-3-sonar-large-32k-online' }],
anthropic: [
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const config = configManager.getConfig(MOCK_PROJECT_ROOT, true);
// Assert
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining(
'Warning: Invalid main provider "invalid-provider"'
)
);
const expectedMergedConfig = {
models: {
main: { ...DEFAULT_CONFIG.models.main },
research: {
...DEFAULT_CONFIG.models.research,
...INVALID_PROVIDER_CONFIG.models.research
},
fallback: { ...DEFAULT_CONFIG.models.fallback }
},
global: { ...DEFAULT_CONFIG.global, ...INVALID_PROVIDER_CONFIG.global }
};
expect(config).toEqual(expectedMergedConfig);
});
});
// --- writeConfig Tests ---
describe('writeConfig', () => {
test('should write valid config to file', () => {
// Arrange (Default mocks are sufficient)
// findProjectRoot mock set in beforeEach
fsWriteFileSyncSpy.mockImplementation(() => {}); // Ensure it doesn't throw
// Act
const success = configManager.writeConfig(
VALID_CUSTOM_CONFIG,
MOCK_PROJECT_ROOT
);
// Assert
expect(success).toBe(true);
expect(fsWriteFileSyncSpy).toHaveBeenCalledWith(
MOCK_CONFIG_PATH,
JSON.stringify(VALID_CUSTOM_CONFIG, null, 2) // writeConfig stringifies
);
expect(consoleErrorSpy).not.toHaveBeenCalled();
});
test('should return false and log error if write fails', () => {
// Arrange
const mockWriteError = new Error('Disk full');
fsWriteFileSyncSpy.mockImplementation(() => {
throw mockWriteError;
});
// findProjectRoot mock set in beforeEach
// Act
const success = configManager.writeConfig(
VALID_CUSTOM_CONFIG,
MOCK_PROJECT_ROOT
);
// Assert
expect(success).toBe(false);
expect(fsWriteFileSyncSpy).toHaveBeenCalled();
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining(`Disk full`)
);
});
test.skip('should return false if project root cannot be determined', () => {
// TODO: Fix mock interaction or function logic, returns true unexpectedly in test
// Arrange: Override mock for this specific test
mockFindProjectRoot.mockReturnValue(null);
// Act: Call without explicit root
const success = configManager.writeConfig(VALID_CUSTOM_CONFIG);
// Assert
expect(success).toBe(false); // Function should return false if root is null
expect(mockFindProjectRoot).toHaveBeenCalled();
expect(fsWriteFileSyncSpy).not.toHaveBeenCalled();
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Could not determine project root')
);
});
});
// --- Getter Functions ---
describe('Getter Functions', () => {
test('getMainProvider should return provider from config', () => {
// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(VALID_CUSTOM_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
return JSON.stringify({
openai: [{ id: 'gpt-4o' }],
google: [{ id: 'gemini-1.5-pro-latest' }],
anthropic: [
{ id: 'claude-3-opus-20240229' },
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
perplexity: [{ id: 'sonar-pro' }],
ollama: [],
openrouter: []
}); // Added perplexity
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const provider = configManager.getMainProvider(MOCK_PROJECT_ROOT);
// Assert
expect(provider).toBe(VALID_CUSTOM_CONFIG.models.main.provider);
});
test('getLogLevel should return logLevel from config', () => {
// Arrange: Set up readFileSync to return VALID_CUSTOM_CONFIG
fsReadFileSyncSpy.mockImplementation((filePath) => {
if (filePath === MOCK_CONFIG_PATH)
return JSON.stringify(VALID_CUSTOM_CONFIG);
if (path.basename(filePath) === 'supported-models.json') {
// Provide enough mock model data for validation within getConfig
return JSON.stringify({
openai: [{ id: 'gpt-4o' }],
google: [{ id: 'gemini-1.5-pro-latest' }],
anthropic: [
{ id: 'claude-3-opus-20240229' },
{ id: 'claude-3-7-sonnet-20250219' },
{ id: 'claude-3-5-sonnet' }
],
perplexity: [{ id: 'sonar-pro' }],
ollama: [],
openrouter: []
});
}
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
});
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
// Act
const logLevel = configManager.getLogLevel(MOCK_PROJECT_ROOT);
// Assert
expect(logLevel).toBe(VALID_CUSTOM_CONFIG.global.logLevel);
});
// Add more tests for other getters (getResearchProvider, getProjectName, etc.)
});
// --- isConfigFilePresent Tests ---
describe('isConfigFilePresent', () => {
test('should return true if config file exists', () => {
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(true);
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
});
test('should return false if config file does not exist', () => {
fsExistsSyncSpy.mockReturnValue(false);
// findProjectRoot mock set in beforeEach
expect(configManager.isConfigFilePresent(MOCK_PROJECT_ROOT)).toBe(false);
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
});
test.skip('should use findProjectRoot if explicitRoot is not provided', () => {
// TODO: Fix mock interaction, findProjectRoot isn't being registered as called
fsExistsSyncSpy.mockReturnValue(true);
// findProjectRoot mock set in beforeEach
expect(configManager.isConfigFilePresent()).toBe(true);
expect(mockFindProjectRoot).toHaveBeenCalled(); // Should be called now
});
});
// --- getAllProviders Tests ---
describe('getAllProviders', () => {
test('should return list of providers from supported-models.json', () => {
// Arrange: Ensure config is loaded with real data
configManager.getConfig(null, true); // Force load using the mock that returns real data
// Act
const providers = configManager.getAllProviders();
// Assert
// Assert against the actual keys in the REAL loaded data
const expectedProviders = Object.keys(REAL_SUPPORTED_MODELS_DATA);
expect(providers).toEqual(expect.arrayContaining(expectedProviders));
expect(providers.length).toBe(expectedProviders.length);
});
});
// Add tests for getParametersForRole if needed
// Note: Tests for setMainModel, setResearchModel were removed as the functions were removed in the implementation.
// If similar setter functions exist, add tests for them following the writeConfig pattern.

View File

@@ -1,182 +0,0 @@
import { jest } from '@jest/globals';
import fs from 'fs';
import path from 'path';
import os from 'os';
// Mock external modules
jest.mock('child_process', () => ({
execSync: jest.fn()
}));
// Mock console methods
jest.mock('console', () => ({
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
clear: jest.fn()
}));
describe('Roo Integration', () => {
let tempDir;
beforeEach(() => {
jest.clearAllMocks();
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('.roomodes')) {
return 'Existing roomodes content';
}
if (filePath.toString().includes('-rules')) {
return 'Existing mode rules content';
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation(() => false);
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
});
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
// Test function that simulates the createProjectStructure behavior for Roo files
function mockCreateRooStructure() {
// Create main .roo directory
fs.mkdirSync(path.join(tempDir, '.roo'), { recursive: true });
// Create rules directory
fs.mkdirSync(path.join(tempDir, '.roo', 'rules'), { recursive: true });
// Create mode-specific rule directories
const rooModes = ['architect', 'ask', 'boomerang', 'code', 'debug', 'test'];
for (const mode of rooModes) {
fs.mkdirSync(path.join(tempDir, '.roo', `rules-${mode}`), {
recursive: true
});
fs.writeFileSync(
path.join(tempDir, '.roo', `rules-${mode}`, `${mode}-rules`),
`Content for ${mode} rules`
);
}
// Create additional directories
fs.mkdirSync(path.join(tempDir, '.roo', 'config'), { recursive: true });
fs.mkdirSync(path.join(tempDir, '.roo', 'templates'), { recursive: true });
fs.mkdirSync(path.join(tempDir, '.roo', 'logs'), { recursive: true });
// Copy .roomodes file
fs.writeFileSync(path.join(tempDir, '.roomodes'), 'Roomodes file content');
}
test('creates all required .roo directories', () => {
// Act
mockCreateRooStructure();
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(path.join(tempDir, '.roo'), {
recursive: true
});
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules'),
{ recursive: true }
);
// Verify all mode directories are created
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-architect'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-ask'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-boomerang'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-code'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-debug'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-test'),
{ recursive: true }
);
});
test('creates rule files for all modes', () => {
// Act
mockCreateRooStructure();
// Assert - check all rule files are created
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-architect', 'architect-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-ask', 'ask-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-boomerang', 'boomerang-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-code', 'code-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-debug', 'debug-rules'),
expect.any(String)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'rules-test', 'test-rules'),
expect.any(String)
);
});
test('creates .roomodes file in project root', () => {
// Act
mockCreateRooStructure();
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.roomodes'),
expect.any(String)
);
});
test('creates additional required Roo directories', () => {
// Act
mockCreateRooStructure();
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'config'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'templates'),
{ recursive: true }
);
expect(fs.mkdirSync).toHaveBeenCalledWith(
path.join(tempDir, '.roo', 'logs'),
{ recursive: true }
);
});
});

View File

@@ -1,112 +0,0 @@
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { dirname } from 'path';
import { convertCursorRuleToRooRule } from '../../scripts/modules/rule-transformer.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
describe('Rule Transformer', () => {
const testDir = path.join(__dirname, 'temp-test-dir');
beforeAll(() => {
// Create test directory
if (!fs.existsSync(testDir)) {
fs.mkdirSync(testDir, { recursive: true });
}
});
afterAll(() => {
// Clean up test directory
if (fs.existsSync(testDir)) {
fs.rmSync(testDir, { recursive: true, force: true });
}
});
it('should correctly convert basic terms', () => {
// Create a test Cursor rule file with basic terms
const testCursorRule = path.join(testDir, 'basic-terms.mdc');
const testContent = `---
description: Test Cursor rule for basic terms
globs: **/*
alwaysApply: true
---
This is a Cursor rule that references cursor.so and uses the word Cursor multiple times.
Also has references to .mdc files.`;
fs.writeFileSync(testCursorRule, testContent);
// Convert it
const testRooRule = path.join(testDir, 'basic-terms.md');
convertCursorRuleToRooRule(testCursorRule, testRooRule);
// Read the converted file
const convertedContent = fs.readFileSync(testRooRule, 'utf8');
// Verify transformations
expect(convertedContent).toContain('Roo Code');
expect(convertedContent).toContain('roocode.com');
expect(convertedContent).toContain('.md');
expect(convertedContent).not.toContain('cursor.so');
expect(convertedContent).not.toContain('Cursor rule');
});
it('should correctly convert tool references', () => {
// Create a test Cursor rule file with tool references
const testCursorRule = path.join(testDir, 'tool-refs.mdc');
const testContent = `---
description: Test Cursor rule for tool references
globs: **/*
alwaysApply: true
---
- Use the search tool to find code
- The edit_file tool lets you modify files
- run_command executes terminal commands
- use_mcp connects to external services`;
fs.writeFileSync(testCursorRule, testContent);
// Convert it
const testRooRule = path.join(testDir, 'tool-refs.md');
convertCursorRuleToRooRule(testCursorRule, testRooRule);
// Read the converted file
const convertedContent = fs.readFileSync(testRooRule, 'utf8');
// Verify transformations
expect(convertedContent).toContain('search_files tool');
expect(convertedContent).toContain('apply_diff tool');
expect(convertedContent).toContain('execute_command');
expect(convertedContent).toContain('use_mcp_tool');
});
it('should correctly update file references', () => {
// Create a test Cursor rule file with file references
const testCursorRule = path.join(testDir, 'file-refs.mdc');
const testContent = `---
description: Test Cursor rule for file references
globs: **/*
alwaysApply: true
---
This references [dev_workflow.mdc](mdc:.cursor/rules/dev_workflow.mdc) and
[taskmaster.mdc](mdc:.cursor/rules/taskmaster.mdc).`;
fs.writeFileSync(testCursorRule, testContent);
// Convert it
const testRooRule = path.join(testDir, 'file-refs.md');
convertCursorRuleToRooRule(testCursorRule, testRooRule);
// Read the converted file
const convertedContent = fs.readFileSync(testRooRule, 'utf8');
// Verify transformations
expect(convertedContent).toContain('(mdc:.roo/rules/dev_workflow.md)');
expect(convertedContent).toContain('(mdc:.roo/rules/taskmaster.md)');
expect(convertedContent).not.toContain('(mdc:.cursor/rules/');
});
});

View File

@@ -8,52 +8,43 @@ import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
describe('Task Finder', () => {
describe('findTaskById function', () => {
test('should find a task by numeric ID', () => {
const result = findTaskById(sampleTasks.tasks, 2);
expect(result.task).toBeDefined();
expect(result.task.id).toBe(2);
expect(result.task.title).toBe('Create Core Functionality');
expect(result.originalSubtaskCount).toBeNull();
const task = findTaskById(sampleTasks.tasks, 2);
expect(task).toBeDefined();
expect(task.id).toBe(2);
expect(task.title).toBe('Create Core Functionality');
});
test('should find a task by string ID', () => {
const result = findTaskById(sampleTasks.tasks, '2');
expect(result.task).toBeDefined();
expect(result.task.id).toBe(2);
expect(result.originalSubtaskCount).toBeNull();
const task = findTaskById(sampleTasks.tasks, '2');
expect(task).toBeDefined();
expect(task.id).toBe(2);
});
test('should find a subtask using dot notation', () => {
const result = findTaskById(sampleTasks.tasks, '3.1');
expect(result.task).toBeDefined();
expect(result.task.id).toBe(1);
expect(result.task.title).toBe('Create Header Component');
expect(result.task.isSubtask).toBe(true);
expect(result.task.parentTask.id).toBe(3);
expect(result.originalSubtaskCount).toBeNull();
const subtask = findTaskById(sampleTasks.tasks, '3.1');
expect(subtask).toBeDefined();
expect(subtask.id).toBe(1);
expect(subtask.title).toBe('Create Header Component');
});
test('should return null for non-existent task ID', () => {
const result = findTaskById(sampleTasks.tasks, 99);
expect(result.task).toBeNull();
expect(result.originalSubtaskCount).toBeNull();
const task = findTaskById(sampleTasks.tasks, 99);
expect(task).toBeNull();
});
test('should return null for non-existent subtask ID', () => {
const result = findTaskById(sampleTasks.tasks, '3.99');
expect(result.task).toBeNull();
expect(result.originalSubtaskCount).toBeNull();
const subtask = findTaskById(sampleTasks.tasks, '3.99');
expect(subtask).toBeNull();
});
test('should return null for non-existent parent task ID in subtask notation', () => {
const result = findTaskById(sampleTasks.tasks, '99.1');
expect(result.task).toBeNull();
expect(result.originalSubtaskCount).toBeNull();
const subtask = findTaskById(sampleTasks.tasks, '99.1');
expect(subtask).toBeNull();
});
test('should return null when tasks array is empty', () => {
const result = findTaskById(emptySampleTasks.tasks, 1);
expect(result.task).toBeNull();
expect(result.originalSubtaskCount).toBeNull();
const task = findTaskById(emptySampleTasks.tasks, 1);
expect(task).toBeNull();
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,7 @@
import { jest } from '@jest/globals';
import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
// Import the actual module to test
import {
@@ -18,14 +19,21 @@ import {
taskExists,
formatTaskId,
findCycles,
CONFIG,
LOG_LEVELS,
findTaskById,
toKebabCase
} from '../../scripts/modules/utils.js';
// Mock config-manager to provide config values
const mockGetLogLevel = jest.fn(() => 'info'); // Default log level for tests
jest.mock('../../scripts/modules/config-manager.js', () => ({
getLogLevel: mockGetLogLevel
// Mock other getters if needed by utils.js functions under test
// Skip the import of detectCamelCaseFlags as we'll implement our own version for testing
// Mock chalk functions
jest.mock('chalk', () => ({
gray: jest.fn((text) => `gray:${text}`),
blue: jest.fn((text) => `blue:${text}`),
yellow: jest.fn((text) => `yellow:${text}`),
red: jest.fn((text) => `red:${text}`),
green: jest.fn((text) => `green:${text}`)
}));
// Test implementation of detectCamelCaseFlags
@@ -121,27 +129,23 @@ describe('Utils Module', () => {
});
});
describe.skip('log function', () => {
// const originalConsoleLog = console.log; // Keep original for potential restore if needed
describe('log function', () => {
// Save original console.log
const originalConsoleLog = console.log;
beforeEach(() => {
// Mock console.log for each test
// console.log = jest.fn(); // REMOVE console.log spy
mockGetLogLevel.mockClear(); // Clear mock calls
console.log = jest.fn();
});
afterEach(() => {
// Restore original console.log after each test
// console.log = originalConsoleLog; // REMOVE console.log restore
console.log = originalConsoleLog;
});
test('should log messages according to log level from config-manager', () => {
// Test with info level (default from mock)
mockGetLogLevel.mockReturnValue('info');
// Spy on console.log JUST for this test to verify calls
const consoleSpy = jest
.spyOn(console, 'log')
.mockImplementation(() => {});
test('should log messages according to log level', () => {
// Test with info level (1)
CONFIG.logLevel = 'info';
log('debug', 'Debug message');
log('info', 'Info message');
@@ -149,47 +153,36 @@ describe('Utils Module', () => {
log('error', 'Error message');
// Debug should not be logged (level 0 < 1)
expect(consoleSpy).not.toHaveBeenCalledWith(
expect(console.log).not.toHaveBeenCalledWith(
expect.stringContaining('Debug message')
);
// Info and above should be logged
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('Info message')
);
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('Warning message')
);
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('Error message')
);
// Verify the formatting includes text prefixes
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('[INFO]')
);
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('[WARN]')
);
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('[ERROR]')
);
// Verify getLogLevel was called by log function
expect(mockGetLogLevel).toHaveBeenCalled();
// Restore spy for this test
consoleSpy.mockRestore();
});
test('should not log messages below the configured log level', () => {
// Set log level to error via mock
mockGetLogLevel.mockReturnValue('error');
// Spy on console.log JUST for this test
const consoleSpy = jest
.spyOn(console, 'log')
.mockImplementation(() => {});
// Set log level to error (3)
CONFIG.logLevel = 'error';
log('debug', 'Debug message');
log('info', 'Info message');
@@ -197,44 +190,30 @@ describe('Utils Module', () => {
log('error', 'Error message');
// Only error should be logged
expect(consoleSpy).not.toHaveBeenCalledWith(
expect(console.log).not.toHaveBeenCalledWith(
expect.stringContaining('Debug message')
);
expect(consoleSpy).not.toHaveBeenCalledWith(
expect(console.log).not.toHaveBeenCalledWith(
expect.stringContaining('Info message')
);
expect(consoleSpy).not.toHaveBeenCalledWith(
expect(console.log).not.toHaveBeenCalledWith(
expect.stringContaining('Warning message')
);
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('Error message')
);
// Verify getLogLevel was called
expect(mockGetLogLevel).toHaveBeenCalled();
// Restore spy for this test
consoleSpy.mockRestore();
});
test('should join multiple arguments into a single message', () => {
mockGetLogLevel.mockReturnValue('info');
// Spy on console.log JUST for this test
const consoleSpy = jest
.spyOn(console, 'log')
.mockImplementation(() => {});
CONFIG.logLevel = 'info';
log('info', 'Message', 'with', 'multiple', 'parts');
expect(consoleSpy).toHaveBeenCalledWith(
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining('Message with multiple parts')
);
// Restore spy for this test
consoleSpy.mockRestore();
});
});
describe.skip('readJSON function', () => {
describe('readJSON function', () => {
test('should read and parse a valid JSON file', () => {
const testData = { key: 'value', nested: { prop: true } };
fsReadFileSyncSpy.mockReturnValue(JSON.stringify(testData));
@@ -280,7 +259,7 @@ describe('Utils Module', () => {
});
});
describe.skip('writeJSON function', () => {
describe('writeJSON function', () => {
test('should write JSON data to a file', () => {
const testData = { key: 'value', nested: { prop: true } };