feat: Migrate Task Master to generateObject for structured AI responses (#1262)

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Co-authored-by: Ben Vargas <ben@example.com>
This commit is contained in:
Ralph Khreish
2025-10-02 16:23:34 +02:00
committed by GitHub
parent c7418c4594
commit 738ec51c04
34 changed files with 862 additions and 1034 deletions

View File

@@ -330,7 +330,7 @@ describe('Complex Cross-Tag Scenarios', () => {
describe('Large Task Set Performance', () => {
it('should handle large task sets efficiently', () => {
// Create a large task set (100 tasks)
// Create a large task set (50 tasks)
const largeTaskSet = {
master: {
tasks: [],
@@ -348,8 +348,8 @@ describe('Complex Cross-Tag Scenarios', () => {
}
};
// Add 50 tasks to master with dependencies
for (let i = 1; i <= 50; i++) {
// Add 25 tasks to master with dependencies
for (let i = 1; i <= 25; i++) {
largeTaskSet.master.tasks.push({
id: i,
title: `Task ${i}`,
@@ -359,8 +359,8 @@ describe('Complex Cross-Tag Scenarios', () => {
});
}
// Add 50 tasks to in-progress
for (let i = 51; i <= 100; i++) {
// Add 25 tasks to in-progress (ensure no ID conflict with master)
for (let i = 26; i <= 50; i++) {
largeTaskSet['in-progress'].tasks.push({
id: i,
title: `Task ${i}`,
@@ -371,21 +371,32 @@ describe('Complex Cross-Tag Scenarios', () => {
}
fs.writeFileSync(tasksPath, JSON.stringify(largeTaskSet, null, 2));
// Should complete within reasonable time
const timeout = process.env.CI ? 12000 : 8000;
const startTime = Date.now();
// Execute move; correctness is validated below (no timing assertion)
execSync(
`node ${binPath} move --from=50 --from-tag=master --to-tag=in-progress --with-dependencies`,
`node ${binPath} move --from=25 --from-tag=master --to-tag=in-progress --with-dependencies`,
{ stdio: 'pipe' }
);
const endTime = Date.now();
expect(endTime - startTime).toBeLessThan(timeout);
// Verify the move was successful
const tasksAfter = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
expect(
tasksAfter['in-progress'].tasks.find((t) => t.id === 50)
).toBeDefined();
// Verify all tasks in the dependency chain were moved
for (let i = 1; i <= 25; i++) {
expect(tasksAfter.master.tasks.find((t) => t.id === i)).toBeUndefined();
expect(
tasksAfter['in-progress'].tasks.find((t) => t.id === i)
).toBeDefined();
}
// Verify in-progress still has its original tasks (26-50)
for (let i = 26; i <= 50; i++) {
expect(
tasksAfter['in-progress'].tasks.find((t) => t.id === i)
).toBeDefined();
}
// Final count check
expect(tasksAfter['in-progress'].tasks).toHaveLength(50); // 25 moved + 25 original
});
});

View File

@@ -1,5 +1,6 @@
import { jest } from '@jest/globals';
import { PromptManager } from '../../../scripts/modules/prompt-manager.js';
import { ExpandTaskResponseSchema } from '../../../src/schemas/expand-task.js';
import { SubtaskSchema } from '../../../src/schemas/base-schemas.js';
describe('expand-task prompt template', () => {
let promptManager;
@@ -74,30 +75,25 @@ describe('expand-task prompt template', () => {
expect(userPrompt).toContain(`Current details: ${testTask.details}`);
// Also includes the expansion prompt
expect(userPrompt).toContain('Expansion Guidance:');
expect(userPrompt).toContain(params.expansionPrompt);
expect(userPrompt).toContain(params.complexityReasoningContext);
});
test('all variants request JSON format with subtasks array', () => {
const variants = ['default', 'research', 'complexity-report'];
test('ExpandTaskResponseSchema defines required subtask fields', () => {
// Test the schema definition directly instead of weak substring matching
const schema = ExpandTaskResponseSchema;
const subtasksSchema = schema.shape.subtasks;
const subtaskSchema = subtasksSchema.element;
variants.forEach((variant) => {
const params =
variant === 'complexity-report'
? { ...baseParams, expansionPrompt: 'test' }
: baseParams;
const { systemPrompt, userPrompt } = promptManager.loadPrompt(
'expand-task',
params,
variant
);
const combined = systemPrompt + userPrompt;
expect(combined.toLowerCase()).toContain('subtasks');
expect(combined).toContain('JSON');
});
// Verify the schema has the required fields
expect(subtaskSchema).toBe(SubtaskSchema);
expect(SubtaskSchema.shape).toHaveProperty('id');
expect(SubtaskSchema.shape).toHaveProperty('title');
expect(SubtaskSchema.shape).toHaveProperty('description');
expect(SubtaskSchema.shape).toHaveProperty('dependencies');
expect(SubtaskSchema.shape).toHaveProperty('details');
expect(SubtaskSchema.shape).toHaveProperty('status');
expect(SubtaskSchema.shape).toHaveProperty('testStrategy');
});
test('complexity-report variant fails without task context regression test', () => {

View File

@@ -0,0 +1,55 @@
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const promptsDir = path.join(__dirname, '../../../src/prompts');
describe('Prompt Migration Validation', () => {
const bannedPhrases = [
'Respond ONLY with',
'Return only the',
'valid JSON',
'Do not include any explanatory text',
'Do not include any explanation',
'code block markers'
];
// Map banned phrases to contexts where they're allowed
const allowedContexts = {
'respond only with': ['Use markdown formatting for better readability'],
'return only the': ['Use markdown formatting for better readability']
};
test('prompts should not contain JSON formatting instructions', () => {
const promptFiles = fs
.readdirSync(promptsDir)
.filter((file) => file.endsWith('.json') && !file.includes('schema'))
// Exclude update-subtask.json as it returns plain strings, not JSON
.filter((file) => file !== 'update-subtask.json');
promptFiles.forEach((file) => {
const content = fs.readFileSync(path.join(promptsDir, file), 'utf8');
bannedPhrases.forEach((phrase) => {
const lowerContent = content.toLowerCase();
const lowerPhrase = phrase.toLowerCase();
if (lowerContent.includes(lowerPhrase)) {
// Check if this phrase is allowed in its context
const allowedInContext = allowedContexts[lowerPhrase];
const isAllowed =
allowedInContext &&
allowedInContext.some((context) =>
lowerContent.includes(context.toLowerCase())
);
expect(isAllowed).toBe(
true,
`File ${file} contains banned phrase "${phrase}" without allowed context`
);
}
});
});
});
});

View File

@@ -50,7 +50,7 @@ jest.unstable_mockModule(
() => ({
generateObjectService: jest.fn().mockResolvedValue({
mainResult: {
tasks: []
complexityAnalysis: []
},
telemetryData: {
timestamp: new Date().toISOString(),
@@ -307,10 +307,15 @@ describe('analyzeTaskComplexity', () => {
return { task: task || null, originalSubtaskCount: null };
});
generateTextService.mockResolvedValue(sampleApiResponse);
generateObjectService.mockResolvedValue({
mainResult: {
complexityAnalysis: JSON.parse(sampleApiResponse.mainResult).tasks
},
telemetryData: sampleApiResponse.telemetryData
});
});
test('should call generateTextService with the correct parameters', async () => {
test('should call generateObjectService with the correct parameters', async () => {
// Arrange
const options = {
file: 'tasks/tasks.json',
@@ -338,7 +343,7 @@ describe('analyzeTaskComplexity', () => {
'/mock/project/root',
undefined
);
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
expect(generateObjectService).toHaveBeenCalledWith(expect.any(Object));
expect(mockWriteFileSync).toHaveBeenCalledWith(
expect.stringContaining('task-complexity-report.json'),
expect.stringContaining('"thresholdScore": 5'),
@@ -369,7 +374,7 @@ describe('analyzeTaskComplexity', () => {
});
// Assert
expect(generateTextService).toHaveBeenCalledWith(
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
role: 'research' // This should be present when research is true
})
@@ -454,7 +459,7 @@ describe('analyzeTaskComplexity', () => {
// Assert
// Check if the prompt sent to AI doesn't include the completed task (id: 3)
expect(generateTextService).toHaveBeenCalledWith(
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
prompt: expect.not.stringContaining('"id": 3')
})
@@ -471,7 +476,7 @@ describe('analyzeTaskComplexity', () => {
};
// Force API error
generateTextService.mockRejectedValueOnce(new Error('API Error'));
generateObjectService.mockRejectedValueOnce(new Error('API Error'));
const mockMcpLog = {
info: jest.fn(),

View File

@@ -196,9 +196,62 @@ jest.unstable_mockModule(
currency: 'USD'
}
}),
generateObjectService: jest.fn().mockResolvedValue({
mainResult: {
object: {
generateObjectService: jest.fn().mockImplementation((params) => {
const commandName = params?.commandName || 'default';
if (commandName === 'analyze-complexity') {
// Check if this is for a specific tag test by looking at the prompt
const isFeatureTag =
params?.prompt?.includes('feature') || params?.role === 'feature';
const isMasterTag =
params?.prompt?.includes('master') || params?.role === 'master';
let taskTitle = 'Test Task';
if (isFeatureTag) {
taskTitle = 'Feature Task 1';
} else if (isMasterTag) {
taskTitle = 'Master Task 1';
}
return Promise.resolve({
mainResult: {
complexityAnalysis: [
{
taskId: 1,
taskTitle: taskTitle,
complexityScore: 7,
recommendedSubtasks: 4,
expansionPrompt: 'Break down this task',
reasoning: 'This task is moderately complex'
},
{
taskId: 2,
taskTitle: 'Task 2',
complexityScore: 5,
recommendedSubtasks: 3,
expansionPrompt: 'Break down this task with a focus on task 2.',
reasoning:
'Automatically added due to missing analysis in AI response.'
}
]
},
telemetryData: {
timestamp: new Date().toISOString(),
commandName: 'analyze-complexity',
modelUsed: 'claude-3-5-sonnet',
providerName: 'anthropic',
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
totalCost: 0.012414,
currency: 'USD'
}
});
}
// Default response for expand-task and others
return Promise.resolve({
mainResult: {
subtasks: [
{
id: 1,
@@ -210,19 +263,19 @@ jest.unstable_mockModule(
testStrategy: 'Test strategy'
}
]
},
telemetryData: {
timestamp: new Date().toISOString(),
commandName: 'expand-task',
modelUsed: 'claude-3-5-sonnet',
providerName: 'anthropic',
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
totalCost: 0.012414,
currency: 'USD'
}
},
telemetryData: {
timestamp: new Date().toISOString(),
commandName: 'expand-task',
modelUsed: 'claude-3-5-sonnet',
providerName: 'anthropic',
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
totalCost: 0.012414,
currency: 'USD'
}
});
})
})
);
@@ -421,9 +474,8 @@ const { readJSON, writeJSON, getTagAwareFilePath } = await import(
'../../../../../scripts/modules/utils.js'
);
const { generateTextService, streamTextService } = await import(
'../../../../../scripts/modules/ai-services-unified.js'
);
const { generateTextService, generateObjectService, streamTextService } =
await import('../../../../../scripts/modules/ai-services-unified.js');
// Import the modules under test
const { default: analyzeTaskComplexity } = await import(

View File

@@ -65,8 +65,8 @@ jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
jest.unstable_mockModule(
'../../../../../scripts/modules/ai-services-unified.js',
() => ({
generateTextService: jest.fn().mockResolvedValue({
mainResult: JSON.stringify({
generateObjectService: jest.fn().mockResolvedValue({
mainResult: {
subtasks: [
{
id: 1,
@@ -101,7 +101,7 @@ jest.unstable_mockModule(
testStrategy: 'UI tests and visual regression testing'
}
]
}),
},
telemetryData: {
timestamp: new Date().toISOString(),
userId: '1234567890',
@@ -213,7 +213,7 @@ const {
findProjectRoot
} = await import('../../../../../scripts/modules/utils.js');
const { generateTextService } = await import(
const { generateObjectService } = await import(
'../../../../../scripts/modules/ai-services-unified.js'
);
@@ -373,7 +373,7 @@ describe('expandTask', () => {
'/mock/project/root',
undefined
);
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
expect(generateObjectService).toHaveBeenCalledWith(expect.any(Object));
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
expect.objectContaining({
@@ -458,7 +458,7 @@ describe('expandTask', () => {
);
// Assert
expect(generateTextService).toHaveBeenCalledWith(
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
role: 'research',
commandName: expect.any(String)
@@ -496,7 +496,7 @@ describe('expandTask', () => {
telemetryData: expect.any(Object)
})
);
expect(generateTextService).toHaveBeenCalled();
expect(generateObjectService).toHaveBeenCalled();
});
});
@@ -660,25 +660,38 @@ describe('expandTask', () => {
// Act
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should append to existing subtasks with proper ID increments
expect(writeJSON).toHaveBeenCalledWith(
tasksPath,
// Assert - Verify generateObjectService was called correctly
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
tasks: expect.arrayContaining([
expect.objectContaining({
id: 4,
subtasks: expect.arrayContaining([
// Should contain both existing and new subtasks
expect.any(Object),
expect.any(Object),
expect.any(Object),
expect.any(Object) // 1 existing + 3 new = 4 total
])
})
])
}),
'/mock/project/root',
undefined
role: 'main',
commandName: 'expand-task',
objectName: 'subtasks'
})
);
// Assert - Verify data was written with appended subtasks
expect(writeJSON).toHaveBeenCalled();
const writeCall = writeJSON.mock.calls[0];
const savedData = writeCall[1]; // Second argument is the data
const task4 = savedData.tasks.find((t) => t.id === 4);
// Should have 4 subtasks total (1 existing + 3 new)
expect(task4.subtasks).toHaveLength(4);
// Verify existing subtask is preserved at index 0
expect(task4.subtasks[0]).toEqual(
expect.objectContaining({
id: 1,
title: 'Existing subtask'
})
);
// Verify new subtasks were appended (they start with id=1 from AI)
expect(task4.subtasks[1]).toEqual(
expect.objectContaining({
id: 1,
title: 'Set up project structure'
})
);
});
});
@@ -743,8 +756,8 @@ describe('expandTask', () => {
// Act
await expandTask(tasksPath, taskId, undefined, false, '', context, false);
// Assert - generateTextService called with systemPrompt for 5 subtasks
const callArg = generateTextService.mock.calls[0][0];
// Assert - generateObjectService called with systemPrompt for 5 subtasks
const callArg = generateObjectService.mock.calls[0][0];
expect(callArg.systemPrompt).toContain('Generate exactly 5 subtasks');
// Assert - Should use complexity-report variant with expansion prompt
@@ -831,7 +844,9 @@ describe('expandTask', () => {
projectRoot: '/mock/project/root'
};
generateTextService.mockRejectedValueOnce(new Error('AI service error'));
generateObjectService.mockRejectedValueOnce(
new Error('AI service error')
);
// Act & Assert
await expect(
@@ -841,6 +856,54 @@ describe('expandTask', () => {
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle missing mainResult from AI response', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Mock AI service returning response without mainResult
generateObjectService.mockResolvedValueOnce({
telemetryData: { inputTokens: 100, outputTokens: 50 }
// Missing mainResult
});
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('AI response did not include a valid subtasks array.');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle invalid subtasks array from AI response', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const context = {
mcpLog: createMcpLogMock(),
projectRoot: '/mock/project/root'
};
// Mock AI service returning response with invalid subtasks
generateObjectService.mockResolvedValueOnce({
mainResult: {
subtasks: 'not-an-array' // Invalid: should be an array
},
telemetryData: { inputTokens: 100, outputTokens: 50 }
});
// Act & Assert
await expect(
expandTask(tasksPath, taskId, 3, false, '', context, false)
).rejects.toThrow('AI response did not include a valid subtasks array.');
expect(writeJSON).not.toHaveBeenCalled();
});
test('should handle file read errors', async () => {
// Arrange
const tasksPath = 'tasks/tasks.json';
@@ -941,7 +1004,7 @@ describe('expandTask', () => {
await expandTask(tasksPath, taskId, 3, false, '', context, false);
// Assert - Should work with empty context (but may include project context)
expect(generateTextService).toHaveBeenCalledWith(
expect(generateObjectService).toHaveBeenCalledWith(
expect.objectContaining({
prompt: expect.stringMatching(/.*/) // Just ensure prompt exists
})
@@ -1074,7 +1137,7 @@ describe('expandTask', () => {
// Assert - Should complete successfully
expect(result).toBeDefined();
expect(generateTextService).toHaveBeenCalled();
expect(generateObjectService).toHaveBeenCalled();
});
test('should use dynamic prompting when numSubtasks is 0', async () => {
@@ -1095,11 +1158,11 @@ describe('expandTask', () => {
// Act
await expandTask(tasksPath, taskId, 0, false, '', context, false);
// Assert - Verify generateTextService was called
expect(generateTextService).toHaveBeenCalled();
// Assert - Verify generateObjectService was called
expect(generateObjectService).toHaveBeenCalled();
// Get the call arguments to verify the system prompt
const callArgs = generateTextService.mock.calls[0][0];
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain(
'an appropriate number of specific subtasks'
);
@@ -1122,11 +1185,11 @@ describe('expandTask', () => {
// Act
await expandTask(tasksPath, taskId, 5, false, '', context, false);
// Assert - Verify generateTextService was called
expect(generateTextService).toHaveBeenCalled();
// Assert - Verify generateObjectService was called
expect(generateObjectService).toHaveBeenCalled();
// Get the call arguments to verify the system prompt
const callArgs = generateTextService.mock.calls[0][0];
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('5 specific subtasks');
});
@@ -1151,8 +1214,8 @@ describe('expandTask', () => {
await expandTask(tasksPath, taskId, -3, false, '', context, false);
// Assert - Should use default value instead of negative
expect(generateTextService).toHaveBeenCalled();
const callArgs = generateTextService.mock.calls[0][0];
expect(generateObjectService).toHaveBeenCalled();
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('4 specific subtasks');
});
@@ -1177,8 +1240,8 @@ describe('expandTask', () => {
await expandTask(tasksPath, taskId, undefined, false, '', context, false);
// Assert - Should use default value
expect(generateTextService).toHaveBeenCalled();
const callArgs = generateTextService.mock.calls[0][0];
expect(generateObjectService).toHaveBeenCalled();
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('6 specific subtasks');
});
@@ -1203,8 +1266,8 @@ describe('expandTask', () => {
await expandTask(tasksPath, taskId, null, false, '', context, false);
// Assert - Should use default value
expect(generateTextService).toHaveBeenCalled();
const callArgs = generateTextService.mock.calls[0][0];
expect(generateObjectService).toHaveBeenCalled();
const callArgs = generateObjectService.mock.calls[0][0];
expect(callArgs.systemPrompt).toContain('7 specific subtasks');
});
});

View File

@@ -43,7 +43,23 @@ jest.unstable_mockModule(
() => ({
generateTextService: jest
.fn()
.mockResolvedValue({ mainResult: { content: '{}' }, telemetryData: {} })
.mockResolvedValue({ mainResult: { content: '{}' }, telemetryData: {} }),
generateObjectService: jest.fn().mockResolvedValue({
mainResult: {
task: {
id: 1,
title: 'Updated Task',
description: 'Updated description',
status: 'pending',
dependencies: [],
priority: 'medium',
details: null,
testStrategy: null,
subtasks: []
}
},
telemetryData: {}
})
})
);
@@ -120,3 +136,206 @@ describe('updateTaskById validation', () => {
expect(log).toHaveBeenCalled();
});
});
describe('updateTaskById success path with generateObjectService', () => {
let fs;
let generateObjectService;
beforeEach(async () => {
jest.clearAllMocks();
jest.spyOn(process, 'exit').mockImplementation(() => {
throw new Error('process.exit called');
});
fs = await import('fs');
const aiServices = await import(
'../../../../../scripts/modules/ai-services-unified.js'
);
generateObjectService = aiServices.generateObjectService;
});
test('successfully updates task with all fields from generateObjectService', async () => {
fs.existsSync.mockReturnValue(true);
readJSON.mockReturnValue({
tag: 'master',
tasks: [
{
id: 1,
title: 'Original Task',
description: 'Original description',
status: 'pending',
dependencies: [],
priority: 'low',
details: null,
testStrategy: null,
subtasks: []
}
]
});
const updatedTaskData = {
id: 1,
title: 'Updated Task',
description: 'Updated description',
status: 'pending',
dependencies: [2],
priority: 'high',
details: 'New implementation details',
testStrategy: 'Unit tests required',
subtasks: [
{
id: 1,
title: 'Subtask 1',
description: 'First subtask',
status: 'pending',
dependencies: []
}
]
};
generateObjectService.mockResolvedValue({
mainResult: {
task: updatedTaskData
},
telemetryData: {
model: 'claude-3-5-sonnet-20241022',
inputTokens: 100,
outputTokens: 200
}
});
const result = await updateTaskById(
'tasks/tasks.json',
1,
'Update task with new requirements',
false,
{ tag: 'master' },
'json'
);
// Verify generateObjectService was called (not generateTextService)
expect(generateObjectService).toHaveBeenCalled();
const callArgs = generateObjectService.mock.calls[0][0];
// Verify correct arguments were passed
expect(callArgs).toMatchObject({
role: 'main',
commandName: 'update-task',
objectName: 'task'
});
expect(callArgs.schema).toBeDefined();
expect(callArgs.systemPrompt).toContain(
'update a software development task'
);
expect(callArgs.prompt).toContain('Update task with new requirements');
// Verify the returned task contains all expected fields
expect(result).toEqual({
updatedTask: expect.objectContaining({
id: 1,
title: 'Updated Task',
description: 'Updated description',
status: 'pending',
dependencies: [2],
priority: 'high',
details: 'New implementation details',
testStrategy: 'Unit tests required',
subtasks: expect.arrayContaining([
expect.objectContaining({
id: 1,
title: 'Subtask 1',
description: 'First subtask',
status: 'pending'
})
])
}),
telemetryData: expect.objectContaining({
model: 'claude-3-5-sonnet-20241022',
inputTokens: 100,
outputTokens: 200
}),
tagInfo: undefined
});
});
test('handles generateObjectService with malformed mainResult', async () => {
fs.existsSync.mockReturnValue(true);
readJSON.mockReturnValue({
tag: 'master',
tasks: [
{
id: 1,
title: 'Task',
description: 'Description',
status: 'pending',
dependencies: [],
priority: 'medium',
details: null,
testStrategy: null,
subtasks: []
}
]
});
generateObjectService.mockResolvedValue({
mainResult: {
task: null // Malformed: task is null
},
telemetryData: {}
});
await expect(
updateTaskById(
'tasks/tasks.json',
1,
'Update task',
false,
{ tag: 'master' },
'json'
)
).rejects.toThrow('Received invalid task object from AI');
});
test('handles generateObjectService with missing required fields', async () => {
fs.existsSync.mockReturnValue(true);
readJSON.mockReturnValue({
tag: 'master',
tasks: [
{
id: 1,
title: 'Task',
description: 'Description',
status: 'pending',
dependencies: [],
priority: 'medium',
details: null,
testStrategy: null,
subtasks: []
}
]
});
generateObjectService.mockResolvedValue({
mainResult: {
task: {
id: 1,
// Missing title and description
status: 'pending',
dependencies: [],
priority: 'medium'
}
},
telemetryData: {}
});
await expect(
updateTaskById(
'tasks/tasks.json',
1,
'Update task',
false,
{ tag: 'master' },
'json'
)
).rejects.toThrow('Updated task missing required fields');
});
});

View File

@@ -30,6 +30,12 @@ jest.unstable_mockModule(
generateTextService: jest.fn().mockResolvedValue({
mainResult: '[]', // mainResult is the text string directly
telemetryData: {}
}),
generateObjectService: jest.fn().mockResolvedValue({
mainResult: {
tasks: [] // generateObject returns structured data
},
telemetryData: {}
})
})
);
@@ -84,7 +90,7 @@ const { readJSON, writeJSON, log } = await import(
'../../../../../scripts/modules/utils.js'
);
const { generateTextService } = await import(
const { generateObjectService } = await import(
'../../../../../scripts/modules/ai-services-unified.js'
);
@@ -154,7 +160,9 @@ describe('updateTasks', () => {
];
const mockApiResponse = {
mainResult: JSON.stringify(mockUpdatedTasks), // mainResult is the JSON string directly
mainResult: {
tasks: mockUpdatedTasks // generateObject returns structured data
},
telemetryData: {}
};
@@ -164,7 +172,7 @@ describe('updateTasks', () => {
tag: 'master',
_rawTaggedData: mockInitialTasks
});
generateTextService.mockResolvedValue(mockApiResponse);
generateObjectService.mockResolvedValue(mockApiResponse);
// Act
const result = await updateTasks(
@@ -185,7 +193,7 @@ describe('updateTasks', () => {
);
// 2. AI Service called with correct args
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
expect(generateObjectService).toHaveBeenCalledWith(expect.any(Object));
// 3. Write JSON called with correctly merged tasks
expect(writeJSON).toHaveBeenCalledWith(
@@ -252,7 +260,7 @@ describe('updateTasks', () => {
'/mock/path',
'master'
);
expect(generateTextService).not.toHaveBeenCalled();
expect(generateObjectService).not.toHaveBeenCalled();
expect(writeJSON).not.toHaveBeenCalled();
expect(log).toHaveBeenCalledWith(
'info',
@@ -327,8 +335,10 @@ describe('updateTasks', () => {
_rawTaggedData: mockTaggedData
});
generateTextService.mockResolvedValue({
mainResult: JSON.stringify(mockUpdatedTasks),
generateObjectService.mockResolvedValue({
mainResult: {
tasks: mockUpdatedTasks
},
telemetryData: { commandName: 'update-tasks', totalCost: 0.05 }
});