Adjusts claude calls using message to use stream instead.
This commit is contained in:
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.9.19",
|
"version": "0.9.22",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
|
|||||||
@@ -123,9 +123,11 @@ Important: Your response must be valid JSON only, with no additional explanation
|
|||||||
async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens, systemPrompt) {
|
async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens, systemPrompt) {
|
||||||
const loadingIndicator = startLoadingIndicator('Generating tasks from PRD...');
|
const loadingIndicator = startLoadingIndicator('Generating tasks from PRD...');
|
||||||
let responseText = '';
|
let responseText = '';
|
||||||
|
let streamingInterval = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const message = await anthropic.messages.create({
|
// Use streaming for handling large responses
|
||||||
|
const stream = await anthropic.messages.create({
|
||||||
model: CONFIG.model,
|
model: CONFIG.model,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
temperature: CONFIG.temperature,
|
temperature: CONFIG.temperature,
|
||||||
@@ -135,14 +137,34 @@ async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens,
|
|||||||
role: 'user',
|
role: 'user',
|
||||||
content: `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:\n\n${prdContent}`
|
content: `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:\n\n${prdContent}`
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
|
stream: true
|
||||||
});
|
});
|
||||||
|
|
||||||
responseText = message.content[0].text;
|
// Update loading indicator to show streaming progress
|
||||||
|
let dotCount = 0;
|
||||||
|
const readline = await import('readline');
|
||||||
|
streamingInterval = setInterval(() => {
|
||||||
|
readline.cursorTo(process.stdout, 0);
|
||||||
|
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
|
||||||
|
dotCount = (dotCount + 1) % 4;
|
||||||
|
}, 500);
|
||||||
|
|
||||||
|
// Process the stream
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||||
|
responseText += chunk.delta.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
stopLoadingIndicator(loadingIndicator);
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
|
||||||
|
log('info', "Completed streaming response from Claude API!");
|
||||||
|
|
||||||
return processClaudeResponse(responseText, numTasks, 0, prdContent, prdPath);
|
return processClaudeResponse(responseText, numTasks, 0, prdContent, prdPath);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
stopLoadingIndicator(loadingIndicator);
|
stopLoadingIndicator(loadingIndicator);
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
@@ -224,6 +246,8 @@ async function generateSubtasks(task, numSubtasks, nextSubtaskId, additionalCont
|
|||||||
log('info', `Generating ${numSubtasks} subtasks for task ${task.id}: ${task.title}`);
|
log('info', `Generating ${numSubtasks} subtasks for task ${task.id}: ${task.title}`);
|
||||||
|
|
||||||
const loadingIndicator = startLoadingIndicator(`Generating subtasks for task ${task.id}...`);
|
const loadingIndicator = startLoadingIndicator(`Generating subtasks for task ${task.id}...`);
|
||||||
|
let streamingInterval = null;
|
||||||
|
let responseText = '';
|
||||||
|
|
||||||
const systemPrompt = `You are an AI assistant helping with task breakdown for software development.
|
const systemPrompt = `You are an AI assistant helping with task breakdown for software development.
|
||||||
You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one.
|
You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one.
|
||||||
@@ -269,22 +293,49 @@ Return exactly ${numSubtasks} subtasks with the following JSON structure:
|
|||||||
|
|
||||||
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
||||||
|
|
||||||
const message = await anthropic.messages.create({
|
try {
|
||||||
model: CONFIG.model,
|
// Update loading indicator to show streaming progress
|
||||||
max_tokens: CONFIG.maxTokens,
|
let dotCount = 0;
|
||||||
temperature: CONFIG.temperature,
|
const readline = await import('readline');
|
||||||
system: systemPrompt,
|
streamingInterval = setInterval(() => {
|
||||||
messages: [
|
readline.cursorTo(process.stdout, 0);
|
||||||
{
|
process.stdout.write(`Generating subtasks for task ${task.id}${'.'.repeat(dotCount)}`);
|
||||||
role: 'user',
|
dotCount = (dotCount + 1) % 4;
|
||||||
content: userPrompt
|
}, 500);
|
||||||
|
|
||||||
|
// Use streaming API call
|
||||||
|
const stream = await anthropic.messages.create({
|
||||||
|
model: CONFIG.model,
|
||||||
|
max_tokens: CONFIG.maxTokens,
|
||||||
|
temperature: CONFIG.temperature,
|
||||||
|
system: systemPrompt,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: userPrompt
|
||||||
|
}
|
||||||
|
],
|
||||||
|
stream: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process the stream
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||||
|
responseText += chunk.delta.text;
|
||||||
}
|
}
|
||||||
]
|
}
|
||||||
});
|
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
stopLoadingIndicator(loadingIndicator);
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
|
||||||
return parseSubtasksFromText(message.content[0].text, nextSubtaskId, numSubtasks, task.id);
|
log('info', `Completed generating subtasks for task ${task.id}`);
|
||||||
|
|
||||||
|
return parseSubtasksFromText(responseText, nextSubtaskId, numSubtasks, task.id);
|
||||||
|
} catch (error) {
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Error generating subtasks: ${error.message}`);
|
log('error', `Error generating subtasks: ${error.message}`);
|
||||||
throw error;
|
throw error;
|
||||||
@@ -339,6 +390,8 @@ ${additionalContext || "No additional context provided."}
|
|||||||
|
|
||||||
// Now generate subtasks with Claude
|
// Now generate subtasks with Claude
|
||||||
const loadingIndicator = startLoadingIndicator(`Generating research-backed subtasks for task ${task.id}...`);
|
const loadingIndicator = startLoadingIndicator(`Generating research-backed subtasks for task ${task.id}...`);
|
||||||
|
let streamingInterval = null;
|
||||||
|
let responseText = '';
|
||||||
|
|
||||||
const systemPrompt = `You are an AI assistant helping with task breakdown for software development.
|
const systemPrompt = `You are an AI assistant helping with task breakdown for software development.
|
||||||
You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one.
|
You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one.
|
||||||
@@ -350,7 +403,7 @@ Subtasks should:
|
|||||||
1. Be specific and actionable implementation steps
|
1. Be specific and actionable implementation steps
|
||||||
2. Follow a logical sequence
|
2. Follow a logical sequence
|
||||||
3. Each handle a distinct part of the parent task
|
3. Each handle a distinct part of the parent task
|
||||||
4. Include clear guidance on implementation approach, referencing the research where relevant
|
4. Include clear guidance on implementation approach
|
||||||
5. Have appropriate dependency chains between subtasks
|
5. Have appropriate dependency chains between subtasks
|
||||||
6. Collectively cover all aspects of the parent task
|
6. Collectively cover all aspects of the parent task
|
||||||
|
|
||||||
@@ -362,8 +415,7 @@ For each subtask, provide:
|
|||||||
|
|
||||||
Each subtask should be implementable in a focused coding session.`;
|
Each subtask should be implementable in a focused coding session.`;
|
||||||
|
|
||||||
const userPrompt = `Please break down this task into ${numSubtasks} specific, actionable subtasks,
|
const userPrompt = `Please break down this task into ${numSubtasks} specific, well-researched, actionable subtasks:
|
||||||
using the research findings to inform your breakdown:
|
|
||||||
|
|
||||||
Task ID: ${task.id}
|
Task ID: ${task.id}
|
||||||
Title: ${task.title}
|
Title: ${task.title}
|
||||||
@@ -377,31 +429,58 @@ Return exactly ${numSubtasks} subtasks with the following JSON structure:
|
|||||||
{
|
{
|
||||||
"id": ${nextSubtaskId},
|
"id": ${nextSubtaskId},
|
||||||
"title": "First subtask title",
|
"title": "First subtask title",
|
||||||
"description": "Detailed description",
|
"description": "Detailed description incorporating research",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"details": "Implementation details"
|
"details": "Implementation details with best practices"
|
||||||
},
|
},
|
||||||
...more subtasks...
|
...more subtasks...
|
||||||
]
|
]
|
||||||
|
|
||||||
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
||||||
|
|
||||||
const message = await anthropic.messages.create({
|
try {
|
||||||
model: CONFIG.model,
|
// Update loading indicator to show streaming progress
|
||||||
max_tokens: CONFIG.maxTokens,
|
let dotCount = 0;
|
||||||
temperature: CONFIG.temperature,
|
const readline = await import('readline');
|
||||||
system: systemPrompt,
|
streamingInterval = setInterval(() => {
|
||||||
messages: [
|
readline.cursorTo(process.stdout, 0);
|
||||||
{
|
process.stdout.write(`Generating research-backed subtasks for task ${task.id}${'.'.repeat(dotCount)}`);
|
||||||
role: 'user',
|
dotCount = (dotCount + 1) % 4;
|
||||||
content: userPrompt
|
}, 500);
|
||||||
|
|
||||||
|
// Use streaming API call
|
||||||
|
const stream = await anthropic.messages.create({
|
||||||
|
model: CONFIG.model,
|
||||||
|
max_tokens: CONFIG.maxTokens,
|
||||||
|
temperature: CONFIG.temperature,
|
||||||
|
system: systemPrompt,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: userPrompt
|
||||||
|
}
|
||||||
|
],
|
||||||
|
stream: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process the stream
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||||
|
responseText += chunk.delta.text;
|
||||||
}
|
}
|
||||||
]
|
}
|
||||||
});
|
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
stopLoadingIndicator(loadingIndicator);
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
|
||||||
return parseSubtasksFromText(message.content[0].text, nextSubtaskId, numSubtasks, task.id);
|
log('info', `Completed generating research-backed subtasks for task ${task.id}`);
|
||||||
|
|
||||||
|
return parseSubtasksFromText(responseText, nextSubtaskId, numSubtasks, task.id);
|
||||||
|
} catch (error) {
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Error generating research-backed subtasks: ${error.message}`);
|
log('error', `Error generating research-backed subtasks: ${error.message}`);
|
||||||
throw error;
|
throw error;
|
||||||
|
|||||||
@@ -243,38 +243,65 @@ Return only the updated tasks as a valid JSON array.`
|
|||||||
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
|
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
|
||||||
updatedTasks = JSON.parse(jsonText);
|
updatedTasks = JSON.parse(jsonText);
|
||||||
} else {
|
} else {
|
||||||
// Call Claude to update the tasks
|
// Call Claude to update the tasks with streaming enabled
|
||||||
const message = await anthropic.messages.create({
|
let responseText = '';
|
||||||
model: CONFIG.model,
|
let streamingInterval = null;
|
||||||
max_tokens: CONFIG.maxTokens,
|
|
||||||
temperature: CONFIG.temperature,
|
try {
|
||||||
system: systemPrompt,
|
// Update loading indicator to show streaming progress
|
||||||
messages: [
|
let dotCount = 0;
|
||||||
{
|
const readline = await import('readline');
|
||||||
role: 'user',
|
streamingInterval = setInterval(() => {
|
||||||
content: `Here are the tasks to update:
|
readline.cursorTo(process.stdout, 0);
|
||||||
|
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
|
||||||
|
dotCount = (dotCount + 1) % 4;
|
||||||
|
}, 500);
|
||||||
|
|
||||||
|
// Use streaming API call
|
||||||
|
const stream = await anthropic.messages.create({
|
||||||
|
model: CONFIG.model,
|
||||||
|
max_tokens: CONFIG.maxTokens,
|
||||||
|
temperature: CONFIG.temperature,
|
||||||
|
system: systemPrompt,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: `Here are the tasks to update:
|
||||||
${taskData}
|
${taskData}
|
||||||
|
|
||||||
Please update these tasks based on the following new context:
|
Please update these tasks based on the following new context:
|
||||||
${prompt}
|
${prompt}
|
||||||
|
|
||||||
Return only the updated tasks as a valid JSON array.`
|
Return only the updated tasks as a valid JSON array.`
|
||||||
|
}
|
||||||
|
],
|
||||||
|
stream: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process the stream
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||||
|
responseText += chunk.delta.text;
|
||||||
}
|
}
|
||||||
]
|
}
|
||||||
});
|
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
const responseText = message.content[0].text;
|
log('info', "Completed streaming response from Claude API!");
|
||||||
|
|
||||||
// Extract JSON from response
|
// Extract JSON from response
|
||||||
const jsonStart = responseText.indexOf('[');
|
const jsonStart = responseText.indexOf('[');
|
||||||
const jsonEnd = responseText.lastIndexOf(']');
|
const jsonEnd = responseText.lastIndexOf(']');
|
||||||
|
|
||||||
if (jsonStart === -1 || jsonEnd === -1) {
|
if (jsonStart === -1 || jsonEnd === -1) {
|
||||||
throw new Error("Could not find valid JSON array in Claude's response");
|
throw new Error("Could not find valid JSON array in Claude's response");
|
||||||
|
}
|
||||||
|
|
||||||
|
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
|
||||||
|
updatedTasks = JSON.parse(jsonText);
|
||||||
|
} catch (error) {
|
||||||
|
if (streamingInterval) clearInterval(streamingInterval);
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
|
|
||||||
updatedTasks = JSON.parse(jsonText);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace the tasks in the original data
|
// Replace the tasks in the original data
|
||||||
|
|||||||
Reference in New Issue
Block a user