refactor: simplify package structure by eliminating templates directory

This commit is contained in:
Eyal Toledano
2025-03-04 16:50:42 -05:00
parent fdd8c5cf3b
commit 290163f53f
7 changed files with 203 additions and 99 deletions

13
assets/env.example Normal file
View File

@@ -0,0 +1,13 @@
# Required
ANTHROPIC_API_KEY=your-api-key-here # Format: sk-ant-api03-...
# Optional - defaults shown
MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229
MAX_TOKENS=4000 # Maximum tokens for model responses
TEMPERATURE=0.7 # Temperature for model responses (0.0-1.0)
DEBUG=false # Enable debug logging (true/false)
LOG_LEVEL=info # Log level (debug, info, warn, error)
DEFAULT_SUBTASKS=3 # Default number of subtasks when expanding
DEFAULT_PRIORITY=medium # Default priority for generated tasks (high, medium, low)
PROJECT_NAME={{projectName}} # Project name for tasks.json metadata
PROJECT_VERSION={{projectVersion}} # Project version for tasks.json metadata

47
assets/example_prd.txt Normal file
View File

@@ -0,0 +1,47 @@
<context>
# Overview
[Provide a high-level overview of your product here. Explain what problem it solves, who it's for, and why it's valuable.]
# Core Features
[List and describe the main features of your product. For each feature, include:
- What it does
- Why it's important
- How it works at a high level]
# User Experience
[Describe the user journey and experience. Include:
- User personas
- Key user flows
- UI/UX considerations]
# Technical Architecture
[Outline the technical implementation details:
- System components
- Data models
- APIs and integrations
- Infrastructure requirements]
# Development Roadmap
[Break down the development process into phases:
- MVP requirements
- Future enhancements
- Timeline estimates]
# Success Metrics
[Define how success will be measured:
- Key performance indicators
- User adoption metrics
- Business goals]
# Risks and Mitigations
[Identify potential risks and how they'll be addressed:
- Technical challenges
- Market risks
- Resource constraints]
# Appendix
[Include any additional information:
- Research findings
- Competitive analysis
- Technical specifications]
</context>

29
assets/gitignore Normal file
View File

@@ -0,0 +1,29 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
dev-debug.log
# Dependency directories
node_modules/
# Environment variables
.env
# Editor directories and files
.idea
.vscode
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
# OS specific
.DS_Store
# Task files
tasks.json
tasks/

View File

@@ -1,6 +1,6 @@
{
"name": "claude-task-master",
"version": "1.2.6",
"version": "1.3.0",
"description": "A task management system for AI-driven development with Claude",
"main": "index.js",
"type": "module",
@@ -44,7 +44,8 @@
"files": [
"scripts/init.js",
"scripts/dev.js",
"templates/**",
"assets/**",
".cursor/**",
"README.md",
"index.js"
]

View File

@@ -72,9 +72,39 @@ function ensureDirectoryExists(dirPath) {
// Function to copy a file from the package to the target directory
function copyTemplateFile(templateName, targetPath, replacements = {}) {
// Get the template content from the templates directory
const templatePath = path.join(__dirname, '..', 'templates', templateName);
let content = fs.readFileSync(templatePath, 'utf8');
// Get the file content from the appropriate source directory
let sourcePath;
// Map template names to their actual source paths
switch(templateName) {
case 'dev.js':
sourcePath = path.join(__dirname, 'dev.js');
break;
case 'scripts_README.md':
sourcePath = path.join(__dirname, 'README.md');
break;
case 'dev_workflow.mdc':
sourcePath = path.join(__dirname, '..', '.cursor', 'rules', 'dev_workflow.mdc');
break;
case 'README.md':
sourcePath = path.join(__dirname, '..', 'README.md');
break;
default:
// For other files like env.example, gitignore, etc. that don't have direct equivalents
sourcePath = path.join(__dirname, '..', 'assets', templateName);
}
// Check if the source file exists
if (!fs.existsSync(sourcePath)) {
// Fall back to templates directory for files that might not have been moved yet
sourcePath = path.join(__dirname, '..', 'assets', templateName);
if (!fs.existsSync(sourcePath)) {
log('error', `Source file not found: ${sourcePath}`);
return;
}
}
let content = fs.readFileSync(sourcePath, 'utf8');
// Replace placeholders with actual values
Object.entries(replacements).forEach(([key, value]) => {

View File

@@ -56,44 +56,11 @@ function ensureExecutable(filePath) {
return true;
}
// Function to sync files from source to templates
// Function to sync template files
function syncTemplateFiles() {
// Define files to sync in format: [source, target]
const filesToSync = [
// Scripts
['scripts/dev.js', 'templates/dev.js'],
// Documentation files
['README.md', 'templates/README.md'],
['scripts/README.md', 'templates/scripts_README.md'],
// Other files that might need syncing
// Add more files here as needed
];
let allSynced = true;
const rootDir = path.join(__dirname, '..');
for (const [source, target] of filesToSync) {
const sourcePath = path.join(rootDir, source);
const targetPath = path.join(rootDir, target);
try {
if (fileExists(sourcePath)) {
log('info', `Syncing ${source} to ${target}...`);
fs.copyFileSync(sourcePath, targetPath);
log('success', `Successfully synced ${source} to ${target}`);
} else {
log('error', `Source file ${source} does not exist`);
allSynced = false;
}
} catch (error) {
log('error', `Failed to sync ${source} to ${target}:`, error.message);
allSynced = false;
}
}
return allSynced;
// We no longer need to sync files since we're using them directly
log('info', 'Template syncing has been deprecated - using source files directly');
return true;
}
// Main function to prepare the package
@@ -101,19 +68,17 @@ function preparePackage() {
const rootDir = path.join(__dirname, '..');
log('info', `Preparing package in ${rootDir}`);
// Sync template files to ensure templates have the latest versions
log('info', 'Syncing template files...');
if (!syncTemplateFiles()) {
log('warn', 'Some template files could not be synced. Continuing with preparation...');
}
// Check for required files
const requiredFiles = [
'package.json',
'README.md',
'index.js',
'scripts/init.js',
'scripts/dev.js'
'scripts/dev.js',
'assets/env.example',
'assets/gitignore',
'assets/example_prd.txt',
'.cursor/rules/dev_workflow.mdc'
];
let allFilesExist = true;
@@ -148,38 +113,6 @@ function preparePackage() {
log('warn', 'Some scripts could not be made executable. This may cause issues.');
}
// Check templates directory
const templatesDir = path.join(rootDir, 'templates');
if (!fileExists(templatesDir)) {
log('error', 'Templates directory does not exist');
process.exit(1);
}
// Check template files
const requiredTemplates = [
'README.md',
'env.example',
'gitignore',
'dev_workflow.mdc',
'dev.js',
'scripts_README.md',
'example_prd.txt'
];
let allTemplatesExist = true;
for (const template of requiredTemplates) {
const templatePath = path.join(templatesDir, template);
if (!fileExists(templatePath)) {
log('error', `Required template ${template} does not exist`);
allTemplatesExist = false;
}
}
if (!allTemplatesExist) {
log('error', 'Some required templates are missing. Package preparation failed.');
process.exit(1);
}
// Run npm pack to test package creation
try {
log('info', 'Running npm pack to test package creation...');

View File

@@ -172,7 +172,7 @@ async function callClaude(prdContent, prdPath, numTasks, retryCount = 0) {
]
}`
let systemPrompt = "You are a helpful assistant that generates tasks from a PRD using the following template: " + TASKS_JSON_TEMPLATE + "ONLY RETURN THE JSON, NOTHING ELSE.";
let systemPrompt = "You are a helpful assistant that generates tasks from a PRD using the below json template. You don't worry much about non-task related content, nor do you worry about tasks that don't particularly add value to an mvp. Things like implementing security enhancements, documentation, expansive testing etc are nice to have. The most important is to turn the PRD into a task list that fully materializes the product enough so it can go to market. The JSON template goes as follows -- make sure to only return the json, nothing else: " + TASKS_JSON_TEMPLATE + "ONLY RETURN THE JSON, NOTHING ELSE.";
// Add instruction about the number of tasks if specified
if (numTasks) {
@@ -275,25 +275,21 @@ async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens,
let fullResponse = '';
let streamComplete = false;
let streamError = null;
let streamingInterval = null; // Initialize streamingInterval here
try {
const stream = await anthropic.messages.create({
max_tokens: maxTokens,
model: CONFIG.model,
temperature: CONFIG.temperature,
messages: [
{
role: "user",
content: prdContent
}
],
messages: [{ role: "user", content: prdContent }],
system: systemPrompt,
stream: true
});
// Update loading indicator to show streaming progress
let dotCount = 0;
const streamingInterval = setInterval(() => {
streamingInterval = setInterval(() => {
readline.cursorTo(process.stdout, 0);
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
dotCount = (dotCount + 1) % 4;
@@ -316,7 +312,7 @@ async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens,
return processClaudeResponse(fullResponse, numTasks, 0, prdContent, prdPath);
} catch (error) {
clearInterval(streamingInterval);
if (streamingInterval) clearInterval(streamingInterval); // Safely clear interval
stopLoadingIndicator(loadingIndicator);
log('error', "Error during streaming response:", error);
throw error;
@@ -432,17 +428,72 @@ async function updateTasks(tasksPath, fromId, prompt) {
log('error', "Invalid or missing tasks.json.");
process.exit(1);
}
log('info', `Updating tasks from ID >= ${fromId} with prompt: ${prompt}`);
// In real usage, you'd feed data.tasks + prompt to an LLM. We'll just do a naive approach:
data.tasks.forEach(task => {
if (task.id >= fromId && task.status !== "done") {
task.description += ` [UPDATED: ${prompt}]`;
const tasksToUpdate = data.tasks.filter(task => task.id >= fromId && task.status !== "done");
const systemPrompt = "You are a helpful assistant that updates tasks based on provided insights. Return only the updated tasks as a JSON array.";
const userPrompt = `Update these tasks based on the following insight: ${prompt}\nTasks: ${JSON.stringify(tasksToUpdate, null, 2)}`;
// Start loading indicator
const loadingIndicator = startLoadingIndicator("Waiting for Claude to update tasks...");
let fullResponse = '';
let streamingInterval = null;
try {
const stream = await anthropic.messages.create({
max_tokens: CONFIG.maxTokens,
model: CONFIG.model,
temperature: CONFIG.temperature,
messages: [{ role: "user", content: userPrompt }],
system: systemPrompt,
stream: true
});
// Update loading indicator to show streaming progress
let dotCount = 0;
streamingInterval = setInterval(() => {
readline.cursorTo(process.stdout, 0);
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
dotCount = (dotCount + 1) % 4;
}, 500);
// Process the stream
for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
fullResponse += chunk.delta.text;
}
}
clearInterval(streamingInterval);
stopLoadingIndicator(loadingIndicator);
log('info', "Completed streaming response from Claude API!");
log('debug', `Streaming response length: ${fullResponse.length} characters`);
try {
const updatedTasks = JSON.parse(fullResponse);
data.tasks = data.tasks.map(task => {
const updatedTask = updatedTasks.find(t => t.id === task.id);
return updatedTask || task;
});
writeJSON(tasksPath, data);
log('info', "Tasks updated successfully.");
} catch (parseError) {
log('error', "Failed to parse Claude's response as JSON:", parseError);
log('debug', "Response content:", fullResponse);
process.exit(1);
}
} catch (error) {
if (streamingInterval) clearInterval(streamingInterval);
stopLoadingIndicator(loadingIndicator);
log('error', "Error during streaming response:", error);
process.exit(1);
}
}
//