mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-30 06:12:03 +00:00
Merge pull request #178 from AutoMaker-Org/feature/shared-packages
Feature/shared packages
This commit is contained in:
5
.github/actions/setup-project/action.yml
vendored
5
.github/actions/setup-project/action.yml
vendored
@@ -52,6 +52,11 @@ runs:
|
||||
@rollup/rollup-linux-x64-gnu@4.53.3 \
|
||||
@tailwindcss/oxide-linux-x64-gnu@4.1.17
|
||||
|
||||
- name: Build shared packages
|
||||
shell: bash
|
||||
# Build shared packages (types, utils, platform, etc.) before apps can use them
|
||||
run: npm run build:packages
|
||||
|
||||
- name: Rebuild native modules (root)
|
||||
if: inputs.rebuild-node-pty-path == ''
|
||||
shell: bash
|
||||
|
||||
5
.github/workflows/test.yml
vendored
5
.github/workflows/test.yml
vendored
@@ -23,6 +23,11 @@ jobs:
|
||||
check-lockfile: "true"
|
||||
rebuild-node-pty-path: "apps/server"
|
||||
|
||||
- name: Run package tests
|
||||
run: npm run test:packages
|
||||
env:
|
||||
NODE_ENV: test
|
||||
|
||||
- name: Run server tests with coverage
|
||||
run: npm run test:server:coverage
|
||||
env:
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
"name": "@automaker/server",
|
||||
"version": "0.1.0",
|
||||
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
||||
"author": "AutoMaker Team",
|
||||
"license": "SEE LICENSE IN LICENSE",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"main": "dist/index.js",
|
||||
@@ -19,6 +21,13 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.72",
|
||||
"@automaker/dependency-resolver": "^1.0.0",
|
||||
"@automaker/git-utils": "^1.0.0",
|
||||
"@automaker/model-resolver": "^1.0.0",
|
||||
"@automaker/platform": "^1.0.0",
|
||||
"@automaker/prompts": "^1.0.0",
|
||||
"@automaker/types": "^1.0.0",
|
||||
"@automaker/utils": "^1.0.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^17.2.3",
|
||||
"express": "^5.2.1",
|
||||
|
||||
@@ -14,7 +14,7 @@ import { createServer } from "http";
|
||||
import dotenv from "dotenv";
|
||||
|
||||
import { createEventEmitter, type EventEmitter } from "./lib/events.js";
|
||||
import { initAllowedPaths } from "./lib/security.js";
|
||||
import { initAllowedPaths } from "@automaker/platform";
|
||||
import { authMiddleware, getAuthStatus } from "./lib/auth.js";
|
||||
import { createFsRoutes } from "./routes/fs/index.js";
|
||||
import { createHealthRoutes } from "./routes/health/index.js";
|
||||
|
||||
@@ -5,121 +5,9 @@
|
||||
* app specifications to ensure consistency across the application.
|
||||
*/
|
||||
|
||||
/**
|
||||
* TypeScript interface for structured spec output
|
||||
*/
|
||||
export interface SpecOutput {
|
||||
project_name: string;
|
||||
overview: string;
|
||||
technology_stack: string[];
|
||||
core_capabilities: string[];
|
||||
implemented_features: Array<{
|
||||
name: string;
|
||||
description: string;
|
||||
file_locations?: string[];
|
||||
}>;
|
||||
additional_requirements?: string[];
|
||||
development_guidelines?: string[];
|
||||
implementation_roadmap?: Array<{
|
||||
phase: string;
|
||||
status: "completed" | "in_progress" | "pending";
|
||||
description: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* JSON Schema for structured spec output
|
||||
* Used with Claude's structured output feature for reliable parsing
|
||||
*/
|
||||
export const specOutputSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
project_name: {
|
||||
type: "string",
|
||||
description: "The name of the project",
|
||||
},
|
||||
overview: {
|
||||
type: "string",
|
||||
description:
|
||||
"A comprehensive description of what the project does, its purpose, and key goals",
|
||||
},
|
||||
technology_stack: {
|
||||
type: "array",
|
||||
items: { type: "string" },
|
||||
description:
|
||||
"List of all technologies, frameworks, libraries, and tools used",
|
||||
},
|
||||
core_capabilities: {
|
||||
type: "array",
|
||||
items: { type: "string" },
|
||||
description: "List of main features and capabilities the project provides",
|
||||
},
|
||||
implemented_features: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
name: {
|
||||
type: "string",
|
||||
description: "Name of the implemented feature",
|
||||
},
|
||||
description: {
|
||||
type: "string",
|
||||
description: "Description of what the feature does",
|
||||
},
|
||||
file_locations: {
|
||||
type: "array",
|
||||
items: { type: "string" },
|
||||
description: "File paths where this feature is implemented",
|
||||
},
|
||||
},
|
||||
required: ["name", "description"],
|
||||
},
|
||||
description: "Features that have been implemented based on code analysis",
|
||||
},
|
||||
additional_requirements: {
|
||||
type: "array",
|
||||
items: { type: "string" },
|
||||
description: "Any additional requirements or constraints",
|
||||
},
|
||||
development_guidelines: {
|
||||
type: "array",
|
||||
items: { type: "string" },
|
||||
description: "Development standards and practices",
|
||||
},
|
||||
implementation_roadmap: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
phase: {
|
||||
type: "string",
|
||||
description: "Name of the implementation phase",
|
||||
},
|
||||
status: {
|
||||
type: "string",
|
||||
enum: ["completed", "in_progress", "pending"],
|
||||
description: "Current status of this phase",
|
||||
},
|
||||
description: {
|
||||
type: "string",
|
||||
description: "Description of what this phase involves",
|
||||
},
|
||||
},
|
||||
required: ["phase", "status", "description"],
|
||||
},
|
||||
description: "Phases or roadmap items for implementation",
|
||||
},
|
||||
},
|
||||
required: [
|
||||
"project_name",
|
||||
"overview",
|
||||
"technology_stack",
|
||||
"core_capabilities",
|
||||
"implemented_features",
|
||||
],
|
||||
additionalProperties: false,
|
||||
};
|
||||
// Import and re-export spec types from shared package
|
||||
export type { SpecOutput } from "@automaker/types";
|
||||
export { specOutputSchema } from "@automaker/types";
|
||||
|
||||
/**
|
||||
* Escape special XML characters
|
||||
@@ -136,7 +24,7 @@ function escapeXml(str: string): string {
|
||||
/**
|
||||
* Convert structured spec output to XML format
|
||||
*/
|
||||
export function specToXml(spec: SpecOutput): string {
|
||||
export function specToXml(spec: import("@automaker/types").SpecOutput): string {
|
||||
const indent = " ";
|
||||
|
||||
let xml = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
@@ -1,456 +1,25 @@
|
||||
/**
|
||||
* Enhancement Prompts Library - AI-powered text enhancement for task descriptions
|
||||
* Enhancement Prompts - Re-exported from @automaker/prompts
|
||||
*
|
||||
* Provides prompt templates and utilities for enhancing user-written task descriptions:
|
||||
* - Improve: Transform vague requests into clear, actionable tasks
|
||||
* - Technical: Add implementation details and technical specifications
|
||||
* - Simplify: Make verbose descriptions concise and focused
|
||||
* - Acceptance: Add testable acceptance criteria
|
||||
*
|
||||
* Uses chain-of-thought prompting with few-shot examples for consistent results.
|
||||
* This file now re-exports enhancement prompts from the shared @automaker/prompts package
|
||||
* to maintain backward compatibility with existing imports in the server codebase.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Available enhancement modes for transforming task descriptions
|
||||
*/
|
||||
export type EnhancementMode = "improve" | "technical" | "simplify" | "acceptance";
|
||||
|
||||
/**
|
||||
* Example input/output pair for few-shot learning
|
||||
*/
|
||||
export interface EnhancementExample {
|
||||
input: string;
|
||||
output: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* System prompt for the "improve" enhancement mode.
|
||||
* Transforms vague or unclear requests into clear, actionable task descriptions.
|
||||
*/
|
||||
export const IMPROVE_SYSTEM_PROMPT = `You are an expert at transforming vague, unclear, or incomplete task descriptions into clear, actionable specifications.
|
||||
|
||||
Your task is to take a user's rough description and improve it by:
|
||||
|
||||
1. ANALYZE the input:
|
||||
- Identify the core intent behind the request
|
||||
- Note any ambiguities or missing details
|
||||
- Determine what success would look like
|
||||
|
||||
2. CLARIFY the scope:
|
||||
- Define clear boundaries for the task
|
||||
- Identify implicit requirements
|
||||
- Add relevant context that may be assumed
|
||||
|
||||
3. STRUCTURE the output:
|
||||
- Write a clear, actionable title
|
||||
- Provide a concise description of what needs to be done
|
||||
- Break down into specific sub-tasks if appropriate
|
||||
|
||||
4. ENHANCE with details:
|
||||
- Add specific, measurable outcomes where possible
|
||||
- Include edge cases to consider
|
||||
- Note any dependencies or prerequisites
|
||||
|
||||
Output ONLY the improved task description. Do not include explanations, markdown formatting, or meta-commentary about your changes.`;
|
||||
|
||||
/**
|
||||
* System prompt for the "technical" enhancement mode.
|
||||
* Adds implementation details and technical specifications.
|
||||
*/
|
||||
export const TECHNICAL_SYSTEM_PROMPT = `You are a senior software engineer skilled at adding technical depth to feature descriptions.
|
||||
|
||||
Your task is to enhance a task description with technical implementation details:
|
||||
|
||||
1. ANALYZE the requirement:
|
||||
- Understand the functional goal
|
||||
- Identify the technical domain (frontend, backend, database, etc.)
|
||||
- Consider the likely tech stack based on context
|
||||
|
||||
2. ADD technical specifications:
|
||||
- Suggest specific technologies, libraries, or patterns
|
||||
- Define API contracts or data structures if relevant
|
||||
- Note performance considerations
|
||||
- Identify security implications
|
||||
|
||||
3. OUTLINE implementation approach:
|
||||
- Break down into technical sub-tasks
|
||||
- Suggest file structure or component organization
|
||||
- Note integration points with existing systems
|
||||
|
||||
4. CONSIDER edge cases:
|
||||
- Error handling requirements
|
||||
- Loading and empty states
|
||||
- Boundary conditions
|
||||
|
||||
Output ONLY the enhanced technical description. Keep it concise but comprehensive. Do not include explanations about your reasoning.`;
|
||||
|
||||
/**
|
||||
* System prompt for the "simplify" enhancement mode.
|
||||
* Makes verbose descriptions concise and focused.
|
||||
*/
|
||||
export const SIMPLIFY_SYSTEM_PROMPT = `You are an expert editor who excels at making verbose text concise without losing meaning.
|
||||
|
||||
Your task is to simplify a task description while preserving essential information:
|
||||
|
||||
1. IDENTIFY the core message:
|
||||
- Extract the primary goal or requirement
|
||||
- Note truly essential details
|
||||
- Separate nice-to-have from must-have information
|
||||
|
||||
2. ELIMINATE redundancy:
|
||||
- Remove repeated information
|
||||
- Cut unnecessary qualifiers and hedging language
|
||||
- Remove filler words and phrases
|
||||
|
||||
3. CONSOLIDATE related points:
|
||||
- Merge overlapping requirements
|
||||
- Group related items together
|
||||
- Use concise language
|
||||
|
||||
4. PRESERVE critical details:
|
||||
- Keep specific technical requirements
|
||||
- Retain important constraints
|
||||
- Maintain actionable specifics
|
||||
|
||||
Output ONLY the simplified description. Aim for 30-50% reduction in length while keeping all essential information. Do not explain your changes.`;
|
||||
|
||||
/**
|
||||
* System prompt for the "acceptance" enhancement mode.
|
||||
* Adds testable acceptance criteria to task descriptions.
|
||||
*/
|
||||
export const ACCEPTANCE_SYSTEM_PROMPT = `You are a QA specialist skilled at defining testable acceptance criteria for software features.
|
||||
|
||||
Your task is to enhance a task description by adding clear acceptance criteria:
|
||||
|
||||
1. UNDERSTAND the feature:
|
||||
- Identify all user-facing behaviors
|
||||
- Note system state changes
|
||||
- Consider different user roles or scenarios
|
||||
|
||||
2. DEFINE acceptance criteria using Given-When-Then format:
|
||||
- Given: The initial context or preconditions
|
||||
- When: The action or trigger
|
||||
- Then: The expected outcome
|
||||
|
||||
3. COVER key scenarios:
|
||||
- Happy path (successful completion)
|
||||
- Edge cases (boundary conditions)
|
||||
- Error scenarios (what should NOT happen)
|
||||
- Performance requirements if relevant
|
||||
|
||||
4. MAKE criteria testable:
|
||||
- Use specific, measurable outcomes
|
||||
- Avoid vague terms like "quickly" or "easily"
|
||||
- Include specific values where applicable
|
||||
|
||||
Output the original description followed by a clear "Acceptance Criteria:" section with numbered, testable criteria. Do not include explanations about your process.`;
|
||||
|
||||
/**
|
||||
* Few-shot examples for the "improve" enhancement mode
|
||||
*/
|
||||
export const IMPROVE_EXAMPLES: EnhancementExample[] = [
|
||||
{
|
||||
input: "add dark mode",
|
||||
output: `Add Dark Mode Theme Support
|
||||
|
||||
Implement a dark mode option that allows users to switch between light and dark themes throughout the application.
|
||||
|
||||
Requirements:
|
||||
- Add a theme toggle switch in the settings area
|
||||
- Store user's theme preference persistently
|
||||
- Apply dark theme colors to all UI components
|
||||
- Respect system preference as default if user hasn't set a preference
|
||||
- Ensure smooth transition when switching themes`,
|
||||
},
|
||||
{
|
||||
input: "fix the login bug",
|
||||
output: `Fix Login Authentication Flow
|
||||
|
||||
Investigate and resolve issues preventing users from successfully logging in to the application.
|
||||
|
||||
Requirements:
|
||||
- Identify the specific failure point in the login process
|
||||
- Ensure proper credential validation
|
||||
- Handle error states with clear user feedback
|
||||
- Verify session token is correctly stored after successful login
|
||||
- Test with various credential combinations (valid, invalid, edge cases)`,
|
||||
},
|
||||
{
|
||||
input: "make it faster",
|
||||
output: `Improve Application Performance
|
||||
|
||||
Optimize application loading and response times to enhance user experience.
|
||||
|
||||
Requirements:
|
||||
- Profile current performance to identify bottlenecks
|
||||
- Implement code splitting and lazy loading for routes/components
|
||||
- Optimize database queries and add appropriate indexes
|
||||
- Add caching for frequently accessed data
|
||||
- Set performance budget targets (e.g., LCP < 2.5s, FID < 100ms)
|
||||
- Measure and document improvements`,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Few-shot examples for the "technical" enhancement mode
|
||||
*/
|
||||
export const TECHNICAL_EXAMPLES: EnhancementExample[] = [
|
||||
{
|
||||
input: "Add user profile page",
|
||||
output: `Add User Profile Page
|
||||
|
||||
Create a dedicated profile page for viewing and editing user information.
|
||||
|
||||
Technical Implementation:
|
||||
- Frontend: React component at /profile route with form validation
|
||||
- API Endpoint: GET/PUT /api/users/:id for fetching and updating profile
|
||||
- Data Model: Extend User schema with profile fields (avatar, bio, preferences)
|
||||
- File Upload: Integrate cloud storage for avatar images with client-side resizing
|
||||
- State Management: Use optimistic updates for better UX
|
||||
- Validation: Server-side validation matching client-side rules
|
||||
|
||||
Components:
|
||||
- ProfileHeader (avatar, name, edit button)
|
||||
- ProfileForm (editable fields with validation)
|
||||
- AvatarUploader (drag-drop with preview)
|
||||
|
||||
Security: Ensure users can only edit their own profile (auth middleware)`,
|
||||
},
|
||||
{
|
||||
input: "Add search functionality",
|
||||
output: `Add Search Functionality
|
||||
|
||||
Implement full-text search across application content.
|
||||
|
||||
Technical Implementation:
|
||||
- Search Engine: Use Elasticsearch or PostgreSQL full-text search
|
||||
- API: GET /api/search?q={query}&type={type}&page={page}
|
||||
- Indexing: Create search index with relevant fields, update on content changes
|
||||
- Frontend: Debounced search input (300ms) with typeahead suggestions
|
||||
- Results: Paginated results with highlighted matching text
|
||||
|
||||
Architecture:
|
||||
- SearchService class handling query building and execution
|
||||
- SearchIndex worker for background indexing
|
||||
- SearchResults component with filtering and sorting
|
||||
|
||||
Performance:
|
||||
- Implement search result caching (Redis, 5-minute TTL)
|
||||
- Limit results per page (20 items)
|
||||
- Add query complexity limits to prevent abuse`,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Few-shot examples for the "simplify" enhancement mode
|
||||
*/
|
||||
export const SIMPLIFY_EXAMPLES: EnhancementExample[] = [
|
||||
{
|
||||
input: `We need to implement a feature that would allow our users to be able to export their data in various different formats. The formats we are thinking about supporting include CSV which is commonly used for spreadsheet applications, and also JSON which is good for developers, and possibly PDF for people who want a printable version. The user should be able to go somewhere in the UI and select which format they want and then download their data in that format. We should probably also show some kind of progress indicator while the export is happening since it might take a while for large datasets.`,
|
||||
output: `Add Data Export Feature
|
||||
|
||||
Allow users to export their data in CSV, JSON, or PDF format.
|
||||
|
||||
Requirements:
|
||||
- Add export button/menu in the UI
|
||||
- Support three formats: CSV (spreadsheets), JSON (developers), PDF (printing)
|
||||
- Show progress indicator during export
|
||||
- Handle large datasets gracefully`,
|
||||
},
|
||||
{
|
||||
input: `The current notification system is not working very well and users are complaining about it. Sometimes they don't receive notifications at all, and other times they receive too many notifications and feel overwhelmed. We also have had reports that the notifications are not appearing correctly on mobile devices. Additionally, some users have requested the ability to customize which types of notifications they receive. We should look into all of these issues and make the notification system work better overall.`,
|
||||
output: `Fix and Improve Notification System
|
||||
|
||||
Address reliability issues and add user controls for notifications.
|
||||
|
||||
Issues to fix:
|
||||
- Missing notifications (delivery reliability)
|
||||
- Notification overload (implement batching/digest)
|
||||
- Mobile display problems
|
||||
|
||||
Enhancements:
|
||||
- Add notification preferences (per-type controls)
|
||||
- Test across devices and platforms`,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Few-shot examples for the "acceptance" enhancement mode
|
||||
*/
|
||||
export const ACCEPTANCE_EXAMPLES: EnhancementExample[] = [
|
||||
{
|
||||
input: "Add password reset functionality",
|
||||
output: `Add Password Reset Functionality
|
||||
|
||||
Allow users to reset their password via email when they forget it.
|
||||
|
||||
Acceptance Criteria:
|
||||
|
||||
1. Given a user is on the login page, when they click "Forgot Password", then they should see a password reset form requesting their email.
|
||||
|
||||
2. Given a user enters a valid registered email, when they submit the form, then they should receive a password reset email within 2 minutes.
|
||||
|
||||
3. Given a user clicks the reset link in their email, when the link is less than 1 hour old, then they should see a form to enter a new password.
|
||||
|
||||
4. Given a user enters a new valid password (min 8 chars, 1 number, 1 special char), when they submit, then their password should be updated and they should be redirected to login.
|
||||
|
||||
5. Given a user enters an unregistered email, when they submit the reset form, then they should see the same success message (security: don't reveal which emails exist).
|
||||
|
||||
6. Given a reset link older than 1 hour, when a user clicks it, then they should see an expiration message with option to request a new link.`,
|
||||
},
|
||||
{
|
||||
input: "Shopping cart checkout",
|
||||
output: `Shopping Cart Checkout
|
||||
|
||||
Implement the checkout flow for purchasing items in the shopping cart.
|
||||
|
||||
Acceptance Criteria:
|
||||
|
||||
1. Given a user has items in their cart, when they click "Checkout", then they should see an order summary with item details and total price.
|
||||
|
||||
2. Given a user is on the checkout page, when they enter valid shipping information, then the form should validate in real-time and show estimated delivery date.
|
||||
|
||||
3. Given valid shipping info is entered, when the user proceeds to payment, then they should see available payment methods (credit card, PayPal).
|
||||
|
||||
4. Given valid payment details are entered, when the user confirms the order, then the payment should be processed and order confirmation displayed within 5 seconds.
|
||||
|
||||
5. Given a successful order, when confirmation is shown, then the user should receive an email receipt and their cart should be emptied.
|
||||
|
||||
6. Given a payment failure, when the error occurs, then the user should see a clear error message and their cart should remain intact.
|
||||
|
||||
7. Given the user closes the browser during checkout, when they return, then their cart contents should still be available.`,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Map of enhancement modes to their system prompts
|
||||
*/
|
||||
const SYSTEM_PROMPTS: Record<EnhancementMode, string> = {
|
||||
improve: IMPROVE_SYSTEM_PROMPT,
|
||||
technical: TECHNICAL_SYSTEM_PROMPT,
|
||||
simplify: SIMPLIFY_SYSTEM_PROMPT,
|
||||
acceptance: ACCEPTANCE_SYSTEM_PROMPT,
|
||||
};
|
||||
|
||||
/**
|
||||
* Map of enhancement modes to their few-shot examples
|
||||
*/
|
||||
const EXAMPLES: Record<EnhancementMode, EnhancementExample[]> = {
|
||||
improve: IMPROVE_EXAMPLES,
|
||||
technical: TECHNICAL_EXAMPLES,
|
||||
simplify: SIMPLIFY_EXAMPLES,
|
||||
acceptance: ACCEPTANCE_EXAMPLES,
|
||||
};
|
||||
|
||||
/**
|
||||
* Enhancement prompt configuration returned by getEnhancementPrompt
|
||||
*/
|
||||
export interface EnhancementPromptConfig {
|
||||
/** System prompt for the enhancement mode */
|
||||
systemPrompt: string;
|
||||
/** Description of what this mode does */
|
||||
description: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Descriptions for each enhancement mode
|
||||
*/
|
||||
const MODE_DESCRIPTIONS: Record<EnhancementMode, string> = {
|
||||
improve: "Transform vague requests into clear, actionable task descriptions",
|
||||
technical: "Add implementation details and technical specifications",
|
||||
simplify: "Make verbose descriptions concise and focused",
|
||||
acceptance: "Add testable acceptance criteria to task descriptions",
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the enhancement prompt configuration for a given mode
|
||||
*
|
||||
* @param mode - The enhancement mode (falls back to 'improve' if invalid)
|
||||
* @returns The enhancement prompt configuration
|
||||
*/
|
||||
export function getEnhancementPrompt(mode: string): EnhancementPromptConfig {
|
||||
const normalizedMode = mode.toLowerCase() as EnhancementMode;
|
||||
const validMode = normalizedMode in SYSTEM_PROMPTS ? normalizedMode : "improve";
|
||||
|
||||
return {
|
||||
systemPrompt: SYSTEM_PROMPTS[validMode],
|
||||
description: MODE_DESCRIPTIONS[validMode],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the system prompt for a specific enhancement mode
|
||||
*
|
||||
* @param mode - The enhancement mode to get the prompt for
|
||||
* @returns The system prompt string
|
||||
*/
|
||||
export function getSystemPrompt(mode: EnhancementMode): string {
|
||||
return SYSTEM_PROMPTS[mode];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the few-shot examples for a specific enhancement mode
|
||||
*
|
||||
* @param mode - The enhancement mode to get examples for
|
||||
* @returns Array of input/output example pairs
|
||||
*/
|
||||
export function getExamples(mode: EnhancementMode): EnhancementExample[] {
|
||||
return EXAMPLES[mode];
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a user prompt for enhancement with optional few-shot examples
|
||||
*
|
||||
* @param mode - The enhancement mode
|
||||
* @param text - The text to enhance
|
||||
* @param includeExamples - Whether to include few-shot examples (default: true)
|
||||
* @returns The formatted user prompt string
|
||||
*/
|
||||
export function buildUserPrompt(
|
||||
mode: EnhancementMode,
|
||||
text: string,
|
||||
includeExamples: boolean = true
|
||||
): string {
|
||||
const examples = includeExamples ? getExamples(mode) : [];
|
||||
|
||||
if (examples.length === 0) {
|
||||
return `Please enhance the following task description:\n\n${text}`;
|
||||
}
|
||||
|
||||
// Build few-shot examples section
|
||||
const examplesSection = examples
|
||||
.map(
|
||||
(example, index) =>
|
||||
`Example ${index + 1}:\nInput: ${example.input}\nOutput: ${example.output}`
|
||||
)
|
||||
.join("\n\n---\n\n");
|
||||
|
||||
return `Here are some examples of how to enhance task descriptions:
|
||||
|
||||
${examplesSection}
|
||||
|
||||
---
|
||||
|
||||
Now, please enhance the following task description:
|
||||
|
||||
${text}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a mode is a valid enhancement mode
|
||||
*
|
||||
* @param mode - The mode to check
|
||||
* @returns True if the mode is valid
|
||||
*/
|
||||
export function isValidEnhancementMode(mode: string): mode is EnhancementMode {
|
||||
return mode in SYSTEM_PROMPTS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available enhancement modes
|
||||
*
|
||||
* @returns Array of available enhancement mode names
|
||||
*/
|
||||
export function getAvailableEnhancementModes(): EnhancementMode[] {
|
||||
return Object.keys(SYSTEM_PROMPTS) as EnhancementMode[];
|
||||
}
|
||||
export {
|
||||
IMPROVE_SYSTEM_PROMPT,
|
||||
TECHNICAL_SYSTEM_PROMPT,
|
||||
SIMPLIFY_SYSTEM_PROMPT,
|
||||
ACCEPTANCE_SYSTEM_PROMPT,
|
||||
IMPROVE_EXAMPLES,
|
||||
TECHNICAL_EXAMPLES,
|
||||
SIMPLIFY_EXAMPLES,
|
||||
ACCEPTANCE_EXAMPLES,
|
||||
getEnhancementPrompt,
|
||||
getSystemPrompt,
|
||||
getExamples,
|
||||
buildUserPrompt,
|
||||
isValidEnhancementMode,
|
||||
getAvailableEnhancementModes,
|
||||
} from '@automaker/prompts';
|
||||
|
||||
export type { EnhancementMode, EnhancementExample } from '@automaker/prompts';
|
||||
|
||||
@@ -2,31 +2,10 @@
|
||||
* Event emitter for streaming events to WebSocket clients
|
||||
*/
|
||||
|
||||
export type EventType =
|
||||
| "agent:stream"
|
||||
| "auto-mode:event"
|
||||
| "auto-mode:started"
|
||||
| "auto-mode:stopped"
|
||||
| "auto-mode:idle"
|
||||
| "auto-mode:error"
|
||||
| "feature:started"
|
||||
| "feature:completed"
|
||||
| "feature:stopped"
|
||||
| "feature:error"
|
||||
| "feature:progress"
|
||||
| "feature:tool-use"
|
||||
| "feature:follow-up-started"
|
||||
| "feature:follow-up-completed"
|
||||
| "feature:verified"
|
||||
| "feature:committed"
|
||||
| "project:analysis-started"
|
||||
| "project:analysis-progress"
|
||||
| "project:analysis-completed"
|
||||
| "project:analysis-error"
|
||||
| "suggestions:event"
|
||||
| "spec-regeneration:event";
|
||||
import type { EventType, EventCallback } from "@automaker/types";
|
||||
|
||||
export type EventCallback = (type: EventType, payload: unknown) => void;
|
||||
// Re-export event types from shared package
|
||||
export type { EventType, EventCallback };
|
||||
|
||||
export interface EventEmitter {
|
||||
emit: (type: EventType, payload: unknown) => void;
|
||||
|
||||
@@ -12,11 +12,8 @@
|
||||
*/
|
||||
|
||||
import type { Options } from "@anthropic-ai/claude-agent-sdk";
|
||||
import {
|
||||
resolveModelString,
|
||||
DEFAULT_MODELS,
|
||||
CLAUDE_MODEL_MAP,
|
||||
} from "./model-resolver.js";
|
||||
import { resolveModelString } from "@automaker/model-resolver";
|
||||
import { DEFAULT_MODELS, CLAUDE_MODEL_MAP } from "@automaker/types";
|
||||
|
||||
/**
|
||||
* Tool presets for different use cases
|
||||
|
||||
@@ -1,168 +1,23 @@
|
||||
/**
|
||||
* Secure File System Adapter
|
||||
*
|
||||
* All file I/O operations must go through this adapter to enforce
|
||||
* ALLOWED_ROOT_DIRECTORY restrictions at the actual access point,
|
||||
* not just at the API layer. This provides defense-in-depth security.
|
||||
* Re-export secure file system utilities from @automaker/platform
|
||||
* This file exists for backward compatibility with existing imports
|
||||
*/
|
||||
|
||||
import fs from "fs/promises";
|
||||
import type { Dirent } from "fs";
|
||||
import path from "path";
|
||||
import { validatePath } from "./security.js";
|
||||
import { secureFs } from "@automaker/platform";
|
||||
|
||||
/**
|
||||
* Wrapper around fs.access that validates path first
|
||||
*/
|
||||
export async function access(filePath: string, mode?: number): Promise<void> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
return fs.access(validatedPath, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.readFile that validates path first
|
||||
*/
|
||||
export async function readFile(
|
||||
filePath: string,
|
||||
encoding?: BufferEncoding
|
||||
): Promise<string | Buffer> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
if (encoding) {
|
||||
return fs.readFile(validatedPath, encoding);
|
||||
}
|
||||
return fs.readFile(validatedPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.writeFile that validates path first
|
||||
*/
|
||||
export async function writeFile(
|
||||
filePath: string,
|
||||
data: string | Buffer,
|
||||
encoding?: BufferEncoding
|
||||
): Promise<void> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
return fs.writeFile(validatedPath, data, encoding);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.mkdir that validates path first
|
||||
*/
|
||||
export async function mkdir(
|
||||
dirPath: string,
|
||||
options?: { recursive?: boolean; mode?: number }
|
||||
): Promise<string | undefined> {
|
||||
const validatedPath = validatePath(dirPath);
|
||||
return fs.mkdir(validatedPath, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.readdir that validates path first
|
||||
*/
|
||||
export async function readdir(
|
||||
dirPath: string,
|
||||
options?: { withFileTypes?: false; encoding?: BufferEncoding }
|
||||
): Promise<string[]>;
|
||||
export async function readdir(
|
||||
dirPath: string,
|
||||
options: { withFileTypes: true; encoding?: BufferEncoding }
|
||||
): Promise<Dirent[]>;
|
||||
export async function readdir(
|
||||
dirPath: string,
|
||||
options?: { withFileTypes?: boolean; encoding?: BufferEncoding }
|
||||
): Promise<string[] | Dirent[]> {
|
||||
const validatedPath = validatePath(dirPath);
|
||||
if (options?.withFileTypes === true) {
|
||||
return fs.readdir(validatedPath, { withFileTypes: true });
|
||||
}
|
||||
return fs.readdir(validatedPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.stat that validates path first
|
||||
*/
|
||||
export async function stat(filePath: string): Promise<any> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
return fs.stat(validatedPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.rm that validates path first
|
||||
*/
|
||||
export async function rm(
|
||||
filePath: string,
|
||||
options?: { recursive?: boolean; force?: boolean }
|
||||
): Promise<void> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
return fs.rm(validatedPath, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.unlink that validates path first
|
||||
*/
|
||||
export async function unlink(filePath: string): Promise<void> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
return fs.unlink(validatedPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.copyFile that validates both paths first
|
||||
*/
|
||||
export async function copyFile(
|
||||
src: string,
|
||||
dest: string,
|
||||
mode?: number
|
||||
): Promise<void> {
|
||||
const validatedSrc = validatePath(src);
|
||||
const validatedDest = validatePath(dest);
|
||||
return fs.copyFile(validatedSrc, validatedDest, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.appendFile that validates path first
|
||||
*/
|
||||
export async function appendFile(
|
||||
filePath: string,
|
||||
data: string | Buffer,
|
||||
encoding?: BufferEncoding
|
||||
): Promise<void> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
return fs.appendFile(validatedPath, data, encoding);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.rename that validates both paths first
|
||||
*/
|
||||
export async function rename(
|
||||
oldPath: string,
|
||||
newPath: string
|
||||
): Promise<void> {
|
||||
const validatedOldPath = validatePath(oldPath);
|
||||
const validatedNewPath = validatePath(newPath);
|
||||
return fs.rename(validatedOldPath, validatedNewPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around fs.lstat that validates path first
|
||||
* Returns file stats without following symbolic links
|
||||
*/
|
||||
export async function lstat(filePath: string): Promise<any> {
|
||||
const validatedPath = validatePath(filePath);
|
||||
return fs.lstat(validatedPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around path.join that returns resolved path
|
||||
* Does NOT validate - use this for path construction, then pass to other operations
|
||||
*/
|
||||
export function joinPath(...pathSegments: string[]): string {
|
||||
return path.join(...pathSegments);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper around path.resolve that returns resolved path
|
||||
* Does NOT validate - use this for path construction, then pass to other operations
|
||||
*/
|
||||
export function resolvePath(...pathSegments: string[]): string {
|
||||
return path.resolve(...pathSegments);
|
||||
}
|
||||
export const {
|
||||
access,
|
||||
readFile,
|
||||
writeFile,
|
||||
mkdir,
|
||||
readdir,
|
||||
stat,
|
||||
rm,
|
||||
unlink,
|
||||
copyFile,
|
||||
appendFile,
|
||||
rename,
|
||||
lstat,
|
||||
joinPath,
|
||||
resolvePath,
|
||||
} = secureFs;
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response, NextFunction } from "express";
|
||||
import { validatePath, PathNotAllowedError } from "../lib/security.js";
|
||||
import { validatePath, PathNotAllowedError } from "@automaker/platform";
|
||||
|
||||
/**
|
||||
* Creates a middleware that validates specified path parameters in req.body
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for agent routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import { AgentService } from "../../../services/agent-service.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
const logger = createLogger("Agent");
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import { AgentService } from "../../../services/agent-service.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
const logger = createLogger("Agent");
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities and state management for spec regeneration
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
|
||||
const logger = createLogger("SpecRegeneration");
|
||||
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
import { query } from "@anthropic-ai/claude-agent-sdk";
|
||||
import fs from "fs/promises";
|
||||
import type { EventEmitter } from "../../lib/events.js";
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { createFeatureGenerationOptions } from "../../lib/sdk-options.js";
|
||||
import { logAuthStatus } from "./common.js";
|
||||
import { parseAndCreateFeatures } from "./parse-and-create-features.js";
|
||||
import { getAppSpecPath } from "../../lib/automaker-paths.js";
|
||||
import { getAppSpecPath } from "@automaker/platform";
|
||||
|
||||
const logger = createLogger("SpecRegeneration");
|
||||
|
||||
|
||||
@@ -12,11 +12,11 @@ import {
|
||||
getStructuredSpecPromptInstruction,
|
||||
type SpecOutput,
|
||||
} from "../../lib/app-spec-format.js";
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { createSpecGenerationOptions } from "../../lib/sdk-options.js";
|
||||
import { logAuthStatus } from "./common.js";
|
||||
import { generateFeaturesFromSpec } from "./generate-features-from-spec.js";
|
||||
import { ensureAutomakerDir, getAppSpecPath } from "../../lib/automaker-paths.js";
|
||||
import { ensureAutomakerDir, getAppSpecPath } from "@automaker/platform";
|
||||
|
||||
const logger = createLogger("SpecRegeneration");
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
import path from "path";
|
||||
import fs from "fs/promises";
|
||||
import type { EventEmitter } from "../../lib/events.js";
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { getFeaturesDir } from "../../lib/automaker-paths.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getFeaturesDir } from "@automaker/platform";
|
||||
|
||||
const logger = createLogger("SpecRegeneration");
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { EventEmitter } from "../../../lib/events.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getSpecRegenerationStatus,
|
||||
setRunningState,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { EventEmitter } from "../../../lib/events.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getSpecRegenerationStatus,
|
||||
setRunningState,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { EventEmitter } from "../../../lib/events.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getSpecRegenerationStatus,
|
||||
setRunningState,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for auto-mode routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { AutoModeService } from "../../../services/auto-mode-service.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
const logger = createLogger("AutoMode");
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { AutoModeService } from "../../../services/auto-mode-service.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
const logger = createLogger("AutoMode");
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { AutoModeService } from "../../../services/auto-mode-service.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
const logger = createLogger("AutoMode");
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { AutoModeService } from "../../../services/auto-mode-service.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
const logger = createLogger("AutoMode");
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { AutoModeService } from "../../../services/auto-mode-service.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
const logger = createLogger("AutoMode");
|
||||
|
||||
@@ -2,368 +2,24 @@
|
||||
* Common utilities shared across all route modules
|
||||
*/
|
||||
|
||||
import { createLogger } from "../lib/logger.js";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { exec } from "child_process";
|
||||
import { promisify } from "util";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
|
||||
// Re-export git utilities from shared package
|
||||
export {
|
||||
BINARY_EXTENSIONS,
|
||||
GIT_STATUS_MAP,
|
||||
type FileStatus,
|
||||
isGitRepo,
|
||||
parseGitStatus,
|
||||
generateSyntheticDiffForNewFile,
|
||||
appendUntrackedFileDiffs,
|
||||
listAllFilesInDirectory,
|
||||
generateDiffsForNonGitDirectory,
|
||||
getGitRepositoryDiffs,
|
||||
} from "@automaker/git-utils";
|
||||
|
||||
type Logger = ReturnType<typeof createLogger>;
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
const logger = createLogger("Common");
|
||||
|
||||
// Max file size for generating synthetic diffs (1MB)
|
||||
const MAX_SYNTHETIC_DIFF_SIZE = 1024 * 1024;
|
||||
|
||||
// Binary file extensions to skip
|
||||
const BINARY_EXTENSIONS = new Set([
|
||||
".png", ".jpg", ".jpeg", ".gif", ".bmp", ".ico", ".webp", ".svg",
|
||||
".pdf", ".doc", ".docx", ".xls", ".xlsx", ".ppt", ".pptx",
|
||||
".zip", ".tar", ".gz", ".rar", ".7z",
|
||||
".exe", ".dll", ".so", ".dylib",
|
||||
".mp3", ".mp4", ".wav", ".avi", ".mov", ".mkv",
|
||||
".ttf", ".otf", ".woff", ".woff2", ".eot",
|
||||
".db", ".sqlite", ".sqlite3",
|
||||
".pyc", ".pyo", ".class", ".o", ".obj",
|
||||
]);
|
||||
|
||||
// Status map for git status codes
|
||||
// Git porcelain format uses XY where X=staging area, Y=working tree
|
||||
const GIT_STATUS_MAP: Record<string, string> = {
|
||||
M: "Modified",
|
||||
A: "Added",
|
||||
D: "Deleted",
|
||||
R: "Renamed",
|
||||
C: "Copied",
|
||||
U: "Updated",
|
||||
"?": "Untracked",
|
||||
"!": "Ignored",
|
||||
" ": "Unmodified",
|
||||
};
|
||||
|
||||
/**
|
||||
* Get a readable status text from git status codes
|
||||
* Handles both single character and XY format status codes
|
||||
*/
|
||||
function getStatusText(indexStatus: string, workTreeStatus: string): string {
|
||||
// Untracked files
|
||||
if (indexStatus === "?" && workTreeStatus === "?") {
|
||||
return "Untracked";
|
||||
}
|
||||
|
||||
// Ignored files
|
||||
if (indexStatus === "!" && workTreeStatus === "!") {
|
||||
return "Ignored";
|
||||
}
|
||||
|
||||
// Prioritize staging area status, then working tree
|
||||
const primaryStatus = indexStatus !== " " && indexStatus !== "?" ? indexStatus : workTreeStatus;
|
||||
|
||||
// Handle combined statuses
|
||||
if (indexStatus !== " " && indexStatus !== "?" && workTreeStatus !== " " && workTreeStatus !== "?") {
|
||||
// Both staging and working tree have changes
|
||||
const indexText = GIT_STATUS_MAP[indexStatus] || "Changed";
|
||||
const workText = GIT_STATUS_MAP[workTreeStatus] || "Changed";
|
||||
if (indexText === workText) {
|
||||
return indexText;
|
||||
}
|
||||
return `${indexText} (staged), ${workText} (unstaged)`;
|
||||
}
|
||||
|
||||
return GIT_STATUS_MAP[primaryStatus] || "Changed";
|
||||
}
|
||||
|
||||
/**
|
||||
* File status interface for git status results
|
||||
*/
|
||||
export interface FileStatus {
|
||||
status: string;
|
||||
path: string;
|
||||
statusText: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a file is likely binary based on extension
|
||||
*/
|
||||
function isBinaryFile(filePath: string): boolean {
|
||||
const ext = path.extname(filePath).toLowerCase();
|
||||
return BINARY_EXTENSIONS.has(ext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a path is a git repository
|
||||
*/
|
||||
export async function isGitRepo(repoPath: string): Promise<boolean> {
|
||||
try {
|
||||
await execAsync("git rev-parse --is-inside-work-tree", { cwd: repoPath });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the output of `git status --porcelain` into FileStatus array
|
||||
* Git porcelain format: XY PATH where X=staging area status, Y=working tree status
|
||||
* For renamed files: XY ORIG_PATH -> NEW_PATH
|
||||
*/
|
||||
export function parseGitStatus(statusOutput: string): FileStatus[] {
|
||||
return statusOutput
|
||||
.split("\n")
|
||||
.filter(Boolean)
|
||||
.map((line) => {
|
||||
// Git porcelain format uses two status characters: XY
|
||||
// X = status in staging area (index)
|
||||
// Y = status in working tree
|
||||
const indexStatus = line[0] || " ";
|
||||
const workTreeStatus = line[1] || " ";
|
||||
|
||||
// File path starts at position 3 (after "XY ")
|
||||
let filePath = line.slice(3);
|
||||
|
||||
// Handle renamed files (format: "R old_path -> new_path")
|
||||
if (indexStatus === "R" || workTreeStatus === "R") {
|
||||
const arrowIndex = filePath.indexOf(" -> ");
|
||||
if (arrowIndex !== -1) {
|
||||
filePath = filePath.slice(arrowIndex + 4); // Use new path
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the primary status character for backwards compatibility
|
||||
// Prioritize staging area status, then working tree
|
||||
let primaryStatus: string;
|
||||
if (indexStatus === "?" && workTreeStatus === "?") {
|
||||
primaryStatus = "?"; // Untracked
|
||||
} else if (indexStatus !== " " && indexStatus !== "?") {
|
||||
primaryStatus = indexStatus; // Staged change
|
||||
} else {
|
||||
primaryStatus = workTreeStatus; // Working tree change
|
||||
}
|
||||
|
||||
return {
|
||||
status: primaryStatus,
|
||||
path: filePath,
|
||||
statusText: getStatusText(indexStatus, workTreeStatus),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a synthetic unified diff for an untracked (new) file
|
||||
* This is needed because `git diff HEAD` doesn't include untracked files
|
||||
*/
|
||||
export async function generateSyntheticDiffForNewFile(
|
||||
basePath: string,
|
||||
relativePath: string
|
||||
): Promise<string> {
|
||||
const fullPath = path.join(basePath, relativePath);
|
||||
|
||||
try {
|
||||
// Check if it's a binary file
|
||||
if (isBinaryFile(relativePath)) {
|
||||
return `diff --git a/${relativePath} b/${relativePath}
|
||||
new file mode 100644
|
||||
index 0000000..0000000
|
||||
Binary file ${relativePath} added
|
||||
`;
|
||||
}
|
||||
|
||||
// Get file stats to check size
|
||||
const stats = await fs.stat(fullPath);
|
||||
if (stats.size > MAX_SYNTHETIC_DIFF_SIZE) {
|
||||
const sizeKB = Math.round(stats.size / 1024);
|
||||
return `diff --git a/${relativePath} b/${relativePath}
|
||||
new file mode 100644
|
||||
index 0000000..0000000
|
||||
--- /dev/null
|
||||
+++ b/${relativePath}
|
||||
@@ -0,0 +1 @@
|
||||
+[File too large to display: ${sizeKB}KB]
|
||||
`;
|
||||
}
|
||||
|
||||
// Read file content
|
||||
const content = await fs.readFile(fullPath, "utf-8");
|
||||
const hasTrailingNewline = content.endsWith("\n");
|
||||
const lines = content.split("\n");
|
||||
|
||||
// Remove trailing empty line if the file ends with newline
|
||||
if (lines.length > 0 && lines.at(-1) === "") {
|
||||
lines.pop();
|
||||
}
|
||||
|
||||
// Generate diff format
|
||||
const lineCount = lines.length;
|
||||
const addedLines = lines.map(line => `+${line}`).join("\n");
|
||||
|
||||
let diff = `diff --git a/${relativePath} b/${relativePath}
|
||||
new file mode 100644
|
||||
index 0000000..0000000
|
||||
--- /dev/null
|
||||
+++ b/${relativePath}
|
||||
@@ -0,0 +1,${lineCount} @@
|
||||
${addedLines}`;
|
||||
|
||||
// Add "No newline at end of file" indicator if needed
|
||||
if (!hasTrailingNewline && content.length > 0) {
|
||||
diff += "\n\\ No newline at end of file";
|
||||
}
|
||||
|
||||
return diff + "\n";
|
||||
} catch (error) {
|
||||
// Log the error for debugging
|
||||
logger.error(`Failed to generate synthetic diff for ${fullPath}:`, error);
|
||||
// Return a placeholder diff
|
||||
return `diff --git a/${relativePath} b/${relativePath}
|
||||
new file mode 100644
|
||||
index 0000000..0000000
|
||||
--- /dev/null
|
||||
+++ b/${relativePath}
|
||||
@@ -0,0 +1 @@
|
||||
+[Unable to read file content]
|
||||
`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate synthetic diffs for all untracked files and combine with existing diff
|
||||
*/
|
||||
export async function appendUntrackedFileDiffs(
|
||||
basePath: string,
|
||||
existingDiff: string,
|
||||
files: Array<{ status: string; path: string }>
|
||||
): Promise<string> {
|
||||
// Find untracked files (status "?")
|
||||
const untrackedFiles = files.filter(f => f.status === "?");
|
||||
|
||||
if (untrackedFiles.length === 0) {
|
||||
return existingDiff;
|
||||
}
|
||||
|
||||
// Generate synthetic diffs for each untracked file
|
||||
const syntheticDiffs = await Promise.all(
|
||||
untrackedFiles.map(f => generateSyntheticDiffForNewFile(basePath, f.path))
|
||||
);
|
||||
|
||||
// Combine existing diff with synthetic diffs
|
||||
const combinedDiff = existingDiff + syntheticDiffs.join("");
|
||||
|
||||
return combinedDiff;
|
||||
}
|
||||
|
||||
/**
|
||||
* List all files in a directory recursively (for non-git repositories)
|
||||
* Excludes hidden files/folders and common build artifacts
|
||||
*/
|
||||
export async function listAllFilesInDirectory(
|
||||
basePath: string,
|
||||
relativePath: string = ""
|
||||
): Promise<string[]> {
|
||||
const files: string[] = [];
|
||||
const fullPath = path.join(basePath, relativePath);
|
||||
|
||||
// Directories to skip
|
||||
const skipDirs = new Set([
|
||||
"node_modules", ".git", ".automaker", "dist", "build",
|
||||
".next", ".nuxt", "__pycache__", ".cache", "coverage"
|
||||
]);
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(fullPath, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
// Skip hidden files/folders (except we want to allow some)
|
||||
if (entry.name.startsWith(".") && entry.name !== ".env") {
|
||||
continue;
|
||||
}
|
||||
|
||||
const entryRelPath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
if (!skipDirs.has(entry.name)) {
|
||||
const subFiles = await listAllFilesInDirectory(basePath, entryRelPath);
|
||||
files.push(...subFiles);
|
||||
}
|
||||
} else if (entry.isFile()) {
|
||||
files.push(entryRelPath);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Log the error to help diagnose file system issues
|
||||
logger.error(`Error reading directory ${fullPath}:`, error);
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diffs for all files in a non-git directory
|
||||
* Treats all files as "new" files
|
||||
*/
|
||||
export async function generateDiffsForNonGitDirectory(
|
||||
basePath: string
|
||||
): Promise<{ diff: string; files: FileStatus[] }> {
|
||||
const allFiles = await listAllFilesInDirectory(basePath);
|
||||
|
||||
const files: FileStatus[] = allFiles.map(filePath => ({
|
||||
status: "?",
|
||||
path: filePath,
|
||||
statusText: "New",
|
||||
}));
|
||||
|
||||
// Generate synthetic diffs for all files
|
||||
const syntheticDiffs = await Promise.all(
|
||||
files.map(f => generateSyntheticDiffForNewFile(basePath, f.path))
|
||||
);
|
||||
|
||||
return {
|
||||
diff: syntheticDiffs.join(""),
|
||||
files,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get git repository diffs for a given path
|
||||
* Handles both git repos and non-git directories
|
||||
*/
|
||||
export async function getGitRepositoryDiffs(
|
||||
repoPath: string
|
||||
): Promise<{ diff: string; files: FileStatus[]; hasChanges: boolean }> {
|
||||
// Check if it's a git repository
|
||||
const isRepo = await isGitRepo(repoPath);
|
||||
|
||||
if (!isRepo) {
|
||||
// Not a git repo - list all files and treat them as new
|
||||
const result = await generateDiffsForNonGitDirectory(repoPath);
|
||||
return {
|
||||
diff: result.diff,
|
||||
files: result.files,
|
||||
hasChanges: result.files.length > 0,
|
||||
};
|
||||
}
|
||||
|
||||
// Get git diff and status
|
||||
const { stdout: diff } = await execAsync("git diff HEAD", {
|
||||
cwd: repoPath,
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
});
|
||||
const { stdout: status } = await execAsync("git status --porcelain", {
|
||||
cwd: repoPath,
|
||||
});
|
||||
|
||||
const files = parseGitStatus(status);
|
||||
|
||||
// Generate synthetic diffs for untracked (new) files
|
||||
const combinedDiff = await appendUntrackedFileDiffs(repoPath, diff, files);
|
||||
|
||||
return {
|
||||
diff: combinedDiff,
|
||||
files,
|
||||
hasChanges: files.length > 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get error message from error object
|
||||
*/
|
||||
|
||||
@@ -7,14 +7,15 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import { query } from "@anthropic-ai/claude-agent-sdk";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { resolveModelString } from "@automaker/model-resolver";
|
||||
import { CLAUDE_MODEL_MAP } from "@automaker/types";
|
||||
import {
|
||||
getSystemPrompt,
|
||||
buildUserPrompt,
|
||||
isValidEnhancementMode,
|
||||
type EnhancementMode,
|
||||
} from "../../../lib/enhancement-prompts.js";
|
||||
import { resolveModelString, CLAUDE_MODEL_MAP } from "../../../lib/model-resolver.js";
|
||||
|
||||
const logger = createLogger("EnhancePrompt");
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for features routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -3,10 +3,8 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import {
|
||||
FeatureLoader,
|
||||
type Feature,
|
||||
} from "../../../services/feature-loader.js";
|
||||
import { FeatureLoader } from "../../../services/feature-loader.js";
|
||||
import type { Feature } from "@automaker/types";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createCreateHandler(featureLoader: FeatureLoader) {
|
||||
@@ -18,9 +16,7 @@ export function createCreateHandler(featureLoader: FeatureLoader) {
|
||||
};
|
||||
|
||||
if (!projectPath || !feature) {
|
||||
res
|
||||
.status(400)
|
||||
.json({
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: "projectPath and feature are required",
|
||||
});
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import { query } from "@anthropic-ai/claude-agent-sdk";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { CLAUDE_MODEL_MAP } from "../../../lib/model-resolver.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { CLAUDE_MODEL_MAP } from "@automaker/model-resolver";
|
||||
|
||||
const logger = createLogger("GenerateTitle");
|
||||
|
||||
|
||||
@@ -3,10 +3,8 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import {
|
||||
FeatureLoader,
|
||||
type Feature,
|
||||
} from "../../../services/feature-loader.js";
|
||||
import { FeatureLoader } from "../../../services/feature-loader.js";
|
||||
import type { Feature } from "@automaker/types";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for fs routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
getAllowedRootDirectory,
|
||||
isPathAllowed,
|
||||
PathNotAllowedError,
|
||||
} from "../../../lib/security.js";
|
||||
} from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createBrowseHandler() {
|
||||
|
||||
@@ -6,7 +6,7 @@ import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
import { getBoardDir } from "../../../lib/automaker-paths.js";
|
||||
import { getBoardDir } from "@automaker/platform";
|
||||
|
||||
export function createDeleteBoardBackgroundHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import { validatePath, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { validatePath, PathNotAllowedError } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createDeleteHandler() {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { isPathAllowed, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { isPathAllowed, PathNotAllowedError } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createExistsHandler() {
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { isPathAllowed, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { isPathAllowed, PathNotAllowedError } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createMkdirHandler() {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import { validatePath, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { validatePath, PathNotAllowedError } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
// Optional files that are expected to not exist in new projects
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import { validatePath, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { validatePath, PathNotAllowedError } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createReaddirHandler() {
|
||||
|
||||
@@ -6,7 +6,7 @@ import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
import { getBoardDir } from "../../../lib/automaker-paths.js";
|
||||
import { getBoardDir } from "@automaker/platform";
|
||||
|
||||
export function createSaveBoardBackgroundHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
|
||||
@@ -6,7 +6,7 @@ import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
import { getImagesDir } from "../../../lib/automaker-paths.js";
|
||||
import { getImagesDir } from "@automaker/platform";
|
||||
|
||||
export function createSaveImageHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import { validatePath, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { validatePath, PathNotAllowedError } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createStatHandler() {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { isPathAllowed } from "../../../lib/security.js";
|
||||
import { isPathAllowed } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createValidatePathHandler() {
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { validatePath, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { validatePath, PathNotAllowedError } from "@automaker/platform";
|
||||
import { mkdirSafe } from "@automaker/utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
import { mkdirSafe } from "../../../lib/fs-utils.js";
|
||||
|
||||
export function createWriteHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for git routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for health routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for models routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for running-agents routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for sessions routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* Re-exports error handling helpers from the parent routes module.
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities and state for setup routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import path from "path";
|
||||
import fs from "fs/promises";
|
||||
import {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import path from "path";
|
||||
import fs from "fs/promises";
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import {
|
||||
getErrorMessage,
|
||||
logError,
|
||||
} from "../common.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
|
||||
const logger = createLogger("Setup");
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import { query } from "@anthropic-ai/claude-agent-sdk";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { getApiKey } from "../common.js";
|
||||
|
||||
const logger = createLogger("Setup");
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities and state for suggestions routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import { query } from "@anthropic-ai/claude-agent-sdk";
|
||||
import type { EventEmitter } from "../../lib/events.js";
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { createSuggestionsOptions } from "../../lib/sdk-options.js";
|
||||
|
||||
const logger = createLogger("Suggestions");
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import type { EventEmitter } from "../../../lib/events.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getSuggestionsStatus,
|
||||
setRunningState,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for templates routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -6,7 +6,7 @@ import type { Request, Response } from "express";
|
||||
import { spawn } from "child_process";
|
||||
import path from "path";
|
||||
import fs from "fs/promises";
|
||||
import { isPathAllowed, PathNotAllowedError } from "../../../lib/security.js";
|
||||
import { isPathAllowed } from "@automaker/platform";
|
||||
import { logger, getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createCloneHandler() {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities and state for terminal routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import type { Request, Response, NextFunction } from "express";
|
||||
import { getTerminalService } from "../../services/terminal-service.js";
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
import type { Request, Response } from "express";
|
||||
import { getTerminalService } from "../../../services/terminal-service.js";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
import { createLogger } from "../../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
|
||||
const logger = createLogger("Terminal");
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for workspace routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import {
|
||||
getErrorMessage as getErrorMessageShared,
|
||||
createLogError,
|
||||
|
||||
@@ -8,7 +8,7 @@ import path from "path";
|
||||
import {
|
||||
getAllowedRootDirectory,
|
||||
getDataDirectory,
|
||||
} from "../../../lib/security.js";
|
||||
} from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createConfigHandler() {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
import type { Request, Response } from "express";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import { getAllowedRootDirectory } from "../../../lib/security.js";
|
||||
import { getAllowedRootDirectory } from "@automaker/platform";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
export function createDirectoriesHandler() {
|
||||
@@ -35,7 +35,9 @@ export function createDirectoriesHandler() {
|
||||
}
|
||||
|
||||
// Read directory contents
|
||||
const entries = await fs.readdir(resolvedWorkspaceDir, { withFileTypes: true });
|
||||
const entries = await fs.readdir(resolvedWorkspaceDir, {
|
||||
withFileTypes: true,
|
||||
});
|
||||
|
||||
// Filter to directories only and map to result format
|
||||
const directories = entries
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* Common utilities for worktree routes
|
||||
*/
|
||||
|
||||
import { createLogger } from "../../lib/logger.js";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import { exec } from "child_process";
|
||||
import { promisify } from "util";
|
||||
import path from "path";
|
||||
|
||||
@@ -10,7 +10,7 @@ import path from "path";
|
||||
import {
|
||||
getBranchTrackingPath,
|
||||
ensureAutomakerDir,
|
||||
} from "../../../lib/automaker-paths.js";
|
||||
} from "@automaker/platform";
|
||||
|
||||
export interface TrackedBranch {
|
||||
name: string;
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
import type { Request, Response } from "express";
|
||||
import { exec } from "child_process";
|
||||
import { promisify } from "util";
|
||||
import { isGitRepo, getErrorMessage, logError } from "../common.js";
|
||||
import { isGitRepo } from "@automaker/git-utils";
|
||||
import { getErrorMessage, logError } from "../common.js";
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
|
||||
@@ -9,7 +9,8 @@ import type { Request, Response } from "express";
|
||||
import { exec } from "child_process";
|
||||
import { promisify } from "util";
|
||||
import { existsSync } from "fs";
|
||||
import { isGitRepo, getErrorMessage, logError, normalizePath } from "../common.js";
|
||||
import { isGitRepo } from "@automaker/git-utils";
|
||||
import { getErrorMessage, logError, normalizePath } from "../common.js";
|
||||
import { readAllWorktreeMetadata, type WorktreePRInfo } from "../../../lib/worktree-metadata.js";
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response } from "express";
|
||||
import { getAutomakerDir } from "../../../lib/automaker-paths.js";
|
||||
import { getAutomakerDir } from "@automaker/platform";
|
||||
|
||||
export function createMigrateHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
|
||||
@@ -3,17 +3,18 @@
|
||||
* Manages conversation sessions and streams responses via WebSocket
|
||||
*/
|
||||
|
||||
import { AbortError } from "@anthropic-ai/claude-agent-sdk";
|
||||
import path from "path";
|
||||
import * as secureFs from "../lib/secure-fs.js";
|
||||
import type { EventEmitter } from "../lib/events.js";
|
||||
import type { ExecuteOptions } from "@automaker/types";
|
||||
import {
|
||||
readImageAsBase64,
|
||||
buildPromptWithImages,
|
||||
isAbortError,
|
||||
} from "@automaker/utils";
|
||||
import { ProviderFactory } from "../providers/provider-factory.js";
|
||||
import type { ExecuteOptions } from "../providers/types.js";
|
||||
import { readImageAsBase64 } from "../lib/image-handler.js";
|
||||
import { buildPromptWithImages } from "../lib/prompt-builder.js";
|
||||
import { createChatOptions } from "../lib/sdk-options.js";
|
||||
import { isAbortError } from "../lib/error-handler.js";
|
||||
import { isPathAllowed, PathNotAllowedError } from "../lib/security.js";
|
||||
import { isPathAllowed, PathNotAllowedError } from "@automaker/platform";
|
||||
|
||||
interface Message {
|
||||
id: string;
|
||||
@@ -87,7 +88,9 @@ export class AgentService {
|
||||
|
||||
// Validate that the working directory is allowed
|
||||
if (!isPathAllowed(resolvedWorkingDirectory)) {
|
||||
throw new PathNotAllowedError(effectiveWorkingDirectory);
|
||||
throw new Error(
|
||||
`Working directory ${effectiveWorkingDirectory} is not allowed`
|
||||
);
|
||||
}
|
||||
|
||||
this.sessions.set(sessionId, {
|
||||
@@ -401,7 +404,7 @@ export class AgentService {
|
||||
const sessionFile = path.join(this.stateDir, `${sessionId}.json`);
|
||||
|
||||
try {
|
||||
const data = await secureFs.readFile(sessionFile, "utf-8") as string;
|
||||
const data = (await secureFs.readFile(sessionFile, "utf-8")) as string;
|
||||
return JSON.parse(data);
|
||||
} catch {
|
||||
return [];
|
||||
@@ -425,7 +428,10 @@ export class AgentService {
|
||||
|
||||
async loadMetadata(): Promise<Record<string, SessionMetadata>> {
|
||||
try {
|
||||
const data = await secureFs.readFile(this.metadataFile, "utf-8") as string;
|
||||
const data = (await secureFs.readFile(
|
||||
this.metadataFile,
|
||||
"utf-8"
|
||||
)) as string;
|
||||
return JSON.parse(data);
|
||||
} catch {
|
||||
return {};
|
||||
@@ -472,7 +478,8 @@ export class AgentService {
|
||||
const metadata = await this.loadMetadata();
|
||||
|
||||
// Determine the effective working directory
|
||||
const effectiveWorkingDirectory = workingDirectory || projectPath || process.cwd();
|
||||
const effectiveWorkingDirectory =
|
||||
workingDirectory || projectPath || process.cwd();
|
||||
const resolvedWorkingDirectory = path.resolve(effectiveWorkingDirectory);
|
||||
|
||||
// Validate that the working directory is allowed
|
||||
|
||||
@@ -10,37 +10,47 @@
|
||||
*/
|
||||
|
||||
import { ProviderFactory } from "../providers/provider-factory.js";
|
||||
import type { ExecuteOptions } from "../providers/types.js";
|
||||
import type { ExecuteOptions, Feature } from "@automaker/types";
|
||||
import {
|
||||
buildPromptWithImages,
|
||||
isAbortError,
|
||||
classifyError,
|
||||
} from "@automaker/utils";
|
||||
import { resolveModelString, DEFAULT_MODELS } from "@automaker/model-resolver";
|
||||
import {
|
||||
resolveDependencies,
|
||||
areDependenciesSatisfied,
|
||||
} from "@automaker/dependency-resolver";
|
||||
import {
|
||||
getFeatureDir,
|
||||
getAutomakerDir,
|
||||
getFeaturesDir,
|
||||
getContextDir,
|
||||
} from "@automaker/platform";
|
||||
import { exec } from "child_process";
|
||||
import { promisify } from "util";
|
||||
import path from "path";
|
||||
import * as secureFs from "../lib/secure-fs.js";
|
||||
import type { EventEmitter } from "../lib/events.js";
|
||||
import { buildPromptWithImages } from "../lib/prompt-builder.js";
|
||||
import { resolveModelString, DEFAULT_MODELS } from "../lib/model-resolver.js";
|
||||
import { createAutoModeOptions } from "../lib/sdk-options.js";
|
||||
import { isAbortError, classifyError } from "../lib/error-handler.js";
|
||||
import { resolveDependencies, areDependenciesSatisfied } from "../lib/dependency-resolver.js";
|
||||
import type { Feature } from "./feature-loader.js";
|
||||
import { FeatureLoader } from "./feature-loader.js";
|
||||
import { getFeatureDir, getAutomakerDir, getFeaturesDir, getContextDir } from "../lib/automaker-paths.js";
|
||||
import { isPathAllowed, PathNotAllowedError } from "../lib/security.js";
|
||||
import { isPathAllowed, PathNotAllowedError } from "@automaker/platform";
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
// Planning mode types for spec-driven development
|
||||
type PlanningMode = 'skip' | 'lite' | 'spec' | 'full';
|
||||
type PlanningMode = "skip" | "lite" | "spec" | "full";
|
||||
|
||||
interface ParsedTask {
|
||||
id: string; // e.g., "T001"
|
||||
description: string; // e.g., "Create user model"
|
||||
filePath?: string; // e.g., "src/models/user.ts"
|
||||
phase?: string; // e.g., "Phase 1: Foundation" (for full mode)
|
||||
status: 'pending' | 'in_progress' | 'completed' | 'failed';
|
||||
status: "pending" | "in_progress" | "completed" | "failed";
|
||||
}
|
||||
|
||||
interface PlanSpec {
|
||||
status: 'pending' | 'generating' | 'generated' | 'approved' | 'rejected';
|
||||
status: "pending" | "generating" | "generated" | "approved" | "rejected";
|
||||
content?: string;
|
||||
version: number;
|
||||
generatedAt?: string;
|
||||
@@ -205,7 +215,7 @@ When approved, execute tasks SEQUENTIALLY by phase. For each task:
|
||||
After completing all tasks in a phase, output:
|
||||
"[PHASE_COMPLETE] Phase N complete"
|
||||
|
||||
This allows real-time progress tracking during implementation.`
|
||||
This allows real-time progress tracking during implementation.`,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -236,7 +246,7 @@ function parseTasksFromSpec(specContent: string): ParsedTask[] {
|
||||
}
|
||||
|
||||
const tasksContent = tasksBlockMatch[1];
|
||||
const lines = tasksContent.split('\n');
|
||||
const lines = tasksContent.split("\n");
|
||||
|
||||
let currentPhase: string | undefined;
|
||||
|
||||
@@ -251,7 +261,7 @@ function parseTasksFromSpec(specContent: string): ParsedTask[] {
|
||||
}
|
||||
|
||||
// Check for task line
|
||||
if (trimmedLine.startsWith('- [ ]')) {
|
||||
if (trimmedLine.startsWith("- [ ]")) {
|
||||
const parsed = parseTaskLine(trimmedLine, currentPhase);
|
||||
if (parsed) {
|
||||
tasks.push(parsed);
|
||||
@@ -268,7 +278,9 @@ function parseTasksFromSpec(specContent: string): ParsedTask[] {
|
||||
*/
|
||||
function parseTaskLine(line: string, currentPhase?: string): ParsedTask | null {
|
||||
// Match pattern: - [ ] T###: Description | File: path
|
||||
const taskMatch = line.match(/- \[ \] (T\d{3}):\s*([^|]+)(?:\|\s*File:\s*(.+))?$/);
|
||||
const taskMatch = line.match(
|
||||
/- \[ \] (T\d{3}):\s*([^|]+)(?:\|\s*File:\s*(.+))?$/
|
||||
);
|
||||
if (!taskMatch) {
|
||||
// Try simpler pattern without file
|
||||
const simpleMatch = line.match(/- \[ \] (T\d{3}):\s*(.+)$/);
|
||||
@@ -277,7 +289,7 @@ function parseTaskLine(line: string, currentPhase?: string): ParsedTask | null {
|
||||
id: simpleMatch[1],
|
||||
description: simpleMatch[2].trim(),
|
||||
phase: currentPhase,
|
||||
status: 'pending',
|
||||
status: "pending",
|
||||
};
|
||||
}
|
||||
return null;
|
||||
@@ -288,7 +300,7 @@ function parseTaskLine(line: string, currentPhase?: string): ParsedTask | null {
|
||||
description: taskMatch[2].trim(),
|
||||
filePath: taskMatch[3]?.trim(),
|
||||
phase: currentPhase,
|
||||
status: 'pending',
|
||||
status: "pending",
|
||||
};
|
||||
}
|
||||
|
||||
@@ -318,7 +330,11 @@ interface AutoLoopState {
|
||||
}
|
||||
|
||||
interface PendingApproval {
|
||||
resolve: (result: { approved: boolean; editedPlan?: string; feedback?: string }) => void;
|
||||
resolve: (result: {
|
||||
approved: boolean;
|
||||
editedPlan?: string;
|
||||
feedback?: string;
|
||||
}) => void;
|
||||
reject: (error: Error) => void;
|
||||
featureId: string;
|
||||
projectPath: string;
|
||||
@@ -576,7 +592,9 @@ export class AutoModeService {
|
||||
// Continuation prompt is used when recovering from a plan approval
|
||||
// The plan was already approved, so skip the planning phase
|
||||
prompt = options.continuationPrompt;
|
||||
console.log(`[AutoMode] Using continuation prompt for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Using continuation prompt for feature ${featureId}`
|
||||
);
|
||||
} else {
|
||||
// Normal flow: build prompt with planning phase
|
||||
const featurePrompt = this.buildFeaturePrompt(feature);
|
||||
@@ -584,11 +602,11 @@ export class AutoModeService {
|
||||
prompt = planningPrefix + featurePrompt;
|
||||
|
||||
// Emit planning mode info
|
||||
if (feature.planningMode && feature.planningMode !== 'skip') {
|
||||
this.emitAutoModeEvent('planning_started', {
|
||||
if (feature.planningMode && feature.planningMode !== "skip") {
|
||||
this.emitAutoModeEvent("planning_started", {
|
||||
featureId: feature.id,
|
||||
mode: feature.planningMode,
|
||||
message: `Starting ${feature.planningMode} planning phase`
|
||||
message: `Starting ${feature.planningMode} planning phase`,
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -658,8 +676,12 @@ export class AutoModeService {
|
||||
});
|
||||
}
|
||||
} finally {
|
||||
console.log(`[AutoMode] Feature ${featureId} execution ended, cleaning up runningFeatures`);
|
||||
console.log(`[AutoMode] Pending approvals at cleanup: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`);
|
||||
console.log(
|
||||
`[AutoMode] Feature ${featureId} execution ended, cleaning up runningFeatures`
|
||||
);
|
||||
console.log(
|
||||
`[AutoMode] Pending approvals at cleanup: ${Array.from(this.pendingApprovals.keys()).join(", ") || "none"}`
|
||||
);
|
||||
this.runningFeatures.delete(featureId);
|
||||
}
|
||||
}
|
||||
@@ -706,7 +728,7 @@ export class AutoModeService {
|
||||
|
||||
if (hasContext) {
|
||||
// Load previous context and continue
|
||||
const context = await secureFs.readFile(contextPath, "utf-8") as string;
|
||||
const context = (await secureFs.readFile(contextPath, "utf-8")) as string;
|
||||
return this.executeFeatureWithContext(
|
||||
projectPath,
|
||||
featureId,
|
||||
@@ -766,7 +788,10 @@ export class AutoModeService {
|
||||
const contextPath = path.join(featureDir, "agent-output.md");
|
||||
let previousContext = "";
|
||||
try {
|
||||
previousContext = await secureFs.readFile(contextPath, "utf-8") as string;
|
||||
previousContext = (await secureFs.readFile(
|
||||
contextPath,
|
||||
"utf-8"
|
||||
)) as string;
|
||||
} catch {
|
||||
// No previous context
|
||||
}
|
||||
@@ -883,7 +908,10 @@ Address the follow-up instructions above. Review the previous work and make the
|
||||
const featurePath = path.join(featureDirForSave, "feature.json");
|
||||
|
||||
try {
|
||||
await secureFs.writeFile(featurePath, JSON.stringify(feature, null, 2));
|
||||
await secureFs.writeFile(
|
||||
featurePath,
|
||||
JSON.stringify(feature, null, 2)
|
||||
);
|
||||
} catch (error) {
|
||||
console.error(`[AutoMode] Failed to save feature.json:`, error);
|
||||
}
|
||||
@@ -903,7 +931,7 @@ Address the follow-up instructions above. Review the previous work and make the
|
||||
model,
|
||||
{
|
||||
projectPath,
|
||||
planningMode: 'skip', // Follow-ups don't require approval
|
||||
planningMode: "skip", // Follow-ups don't require approval
|
||||
previousContent: previousContext || undefined,
|
||||
systemPrompt: contextFiles || undefined,
|
||||
}
|
||||
@@ -1130,7 +1158,7 @@ Address the follow-up instructions above. Review the previous work and make the
|
||||
for (const file of textFiles) {
|
||||
// Use path.join for cross-platform path construction
|
||||
const filePath = path.join(contextDir, file);
|
||||
const content = await secureFs.readFile(filePath, "utf-8") as string;
|
||||
const content = (await secureFs.readFile(filePath, "utf-8")) as string;
|
||||
contents.push(`## ${file}\n\n${content}`);
|
||||
}
|
||||
|
||||
@@ -1249,7 +1277,6 @@ Format your response as a structured markdown document.`;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get current status
|
||||
*/
|
||||
@@ -1290,8 +1317,12 @@ Format your response as a structured markdown document.`;
|
||||
featureId: string,
|
||||
projectPath: string
|
||||
): Promise<{ approved: boolean; editedPlan?: string; feedback?: string }> {
|
||||
console.log(`[AutoMode] Registering pending approval for feature ${featureId}`);
|
||||
console.log(`[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`);
|
||||
console.log(
|
||||
`[AutoMode] Registering pending approval for feature ${featureId}`
|
||||
);
|
||||
console.log(
|
||||
`[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(", ") || "none"}`
|
||||
);
|
||||
return new Promise((resolve, reject) => {
|
||||
this.pendingApprovals.set(featureId, {
|
||||
resolve,
|
||||
@@ -1299,7 +1330,9 @@ Format your response as a structured markdown document.`;
|
||||
featureId,
|
||||
projectPath,
|
||||
});
|
||||
console.log(`[AutoMode] Pending approval registered for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Pending approval registered for feature ${featureId}`
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1314,61 +1347,89 @@ Format your response as a structured markdown document.`;
|
||||
feedback?: string,
|
||||
projectPathFromClient?: string
|
||||
): Promise<{ success: boolean; error?: string }> {
|
||||
console.log(`[AutoMode] resolvePlanApproval called for feature ${featureId}, approved=${approved}`);
|
||||
console.log(`[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`);
|
||||
console.log(
|
||||
`[AutoMode] resolvePlanApproval called for feature ${featureId}, approved=${approved}`
|
||||
);
|
||||
console.log(
|
||||
`[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(", ") || "none"}`
|
||||
);
|
||||
const pending = this.pendingApprovals.get(featureId);
|
||||
|
||||
if (!pending) {
|
||||
console.log(`[AutoMode] No pending approval in Map for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] No pending approval in Map for feature ${featureId}`
|
||||
);
|
||||
|
||||
// RECOVERY: If no pending approval but we have projectPath from client,
|
||||
// check if feature's planSpec.status is 'generated' and handle recovery
|
||||
if (projectPathFromClient) {
|
||||
console.log(`[AutoMode] Attempting recovery with projectPath: ${projectPathFromClient}`);
|
||||
const feature = await this.loadFeature(projectPathFromClient, featureId);
|
||||
console.log(
|
||||
`[AutoMode] Attempting recovery with projectPath: ${projectPathFromClient}`
|
||||
);
|
||||
const feature = await this.loadFeature(
|
||||
projectPathFromClient,
|
||||
featureId
|
||||
);
|
||||
|
||||
if (feature?.planSpec?.status === 'generated') {
|
||||
console.log(`[AutoMode] Feature ${featureId} has planSpec.status='generated', performing recovery`);
|
||||
if (feature?.planSpec?.status === "generated") {
|
||||
console.log(
|
||||
`[AutoMode] Feature ${featureId} has planSpec.status='generated', performing recovery`
|
||||
);
|
||||
|
||||
if (approved) {
|
||||
// Update planSpec to approved
|
||||
await this.updateFeaturePlanSpec(projectPathFromClient, featureId, {
|
||||
status: 'approved',
|
||||
status: "approved",
|
||||
approvedAt: new Date().toISOString(),
|
||||
reviewedByUser: true,
|
||||
content: editedPlan || feature.planSpec.content,
|
||||
});
|
||||
|
||||
// Build continuation prompt and re-run the feature
|
||||
const planContent = editedPlan || feature.planSpec.content || '';
|
||||
const planContent = editedPlan || feature.planSpec.content || "";
|
||||
let continuationPrompt = `The plan/specification has been approved. `;
|
||||
if (feedback) {
|
||||
continuationPrompt += `\n\nUser feedback: ${feedback}\n\n`;
|
||||
}
|
||||
continuationPrompt += `Now proceed with the implementation as specified in the plan:\n\n${planContent}\n\nImplement the feature now.`;
|
||||
|
||||
console.log(`[AutoMode] Starting recovery execution for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Starting recovery execution for feature ${featureId}`
|
||||
);
|
||||
|
||||
// Start feature execution with the continuation prompt (async, don't await)
|
||||
// Pass undefined for providedWorktreePath, use options for continuation prompt
|
||||
this.executeFeature(projectPathFromClient, featureId, true, false, undefined, {
|
||||
this.executeFeature(
|
||||
projectPathFromClient,
|
||||
featureId,
|
||||
true,
|
||||
false,
|
||||
undefined,
|
||||
{
|
||||
continuationPrompt,
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error(`[AutoMode] Recovery execution failed for feature ${featureId}:`, error);
|
||||
}
|
||||
).catch((error) => {
|
||||
console.error(
|
||||
`[AutoMode] Recovery execution failed for feature ${featureId}:`,
|
||||
error
|
||||
);
|
||||
});
|
||||
|
||||
return { success: true };
|
||||
} else {
|
||||
// Rejected - update status and emit event
|
||||
await this.updateFeaturePlanSpec(projectPathFromClient, featureId, {
|
||||
status: 'rejected',
|
||||
status: "rejected",
|
||||
reviewedByUser: true,
|
||||
});
|
||||
|
||||
await this.updateFeatureStatus(projectPathFromClient, featureId, 'backlog');
|
||||
await this.updateFeatureStatus(
|
||||
projectPathFromClient,
|
||||
featureId,
|
||||
"backlog"
|
||||
);
|
||||
|
||||
this.emitAutoModeEvent('plan_rejected', {
|
||||
this.emitAutoModeEvent("plan_rejected", {
|
||||
featureId,
|
||||
projectPath: projectPathFromClient,
|
||||
feedback,
|
||||
@@ -1379,16 +1440,23 @@ Format your response as a structured markdown document.`;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[AutoMode] ERROR: No pending approval found for feature ${featureId} and recovery not possible`);
|
||||
return { success: false, error: `No pending approval for feature ${featureId}` };
|
||||
console.log(
|
||||
`[AutoMode] ERROR: No pending approval found for feature ${featureId} and recovery not possible`
|
||||
);
|
||||
return {
|
||||
success: false,
|
||||
error: `No pending approval for feature ${featureId}`,
|
||||
};
|
||||
}
|
||||
console.log(`[AutoMode] Found pending approval for feature ${featureId}, proceeding...`);
|
||||
console.log(
|
||||
`[AutoMode] Found pending approval for feature ${featureId}, proceeding...`
|
||||
);
|
||||
|
||||
const { projectPath } = pending;
|
||||
|
||||
// Update feature's planSpec status
|
||||
await this.updateFeaturePlanSpec(projectPath, featureId, {
|
||||
status: approved ? 'approved' : 'rejected',
|
||||
status: approved ? "approved" : "rejected",
|
||||
approvedAt: approved ? new Date().toISOString() : undefined,
|
||||
reviewedByUser: true,
|
||||
content: editedPlan, // Update content if user provided an edited version
|
||||
@@ -1397,7 +1465,7 @@ Format your response as a structured markdown document.`;
|
||||
// If rejected with feedback, we can store it for the user to see
|
||||
if (!approved && feedback) {
|
||||
// Emit event so client knows the rejection reason
|
||||
this.emitAutoModeEvent('plan_rejected', {
|
||||
this.emitAutoModeEvent("plan_rejected", {
|
||||
featureId,
|
||||
projectPath,
|
||||
feedback,
|
||||
@@ -1415,15 +1483,25 @@ Format your response as a structured markdown document.`;
|
||||
* Cancel a pending plan approval (e.g., when feature is stopped).
|
||||
*/
|
||||
cancelPlanApproval(featureId: string): void {
|
||||
console.log(`[AutoMode] cancelPlanApproval called for feature ${featureId}`);
|
||||
console.log(`[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`);
|
||||
console.log(
|
||||
`[AutoMode] cancelPlanApproval called for feature ${featureId}`
|
||||
);
|
||||
console.log(
|
||||
`[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(", ") || "none"}`
|
||||
);
|
||||
const pending = this.pendingApprovals.get(featureId);
|
||||
if (pending) {
|
||||
console.log(`[AutoMode] Found and cancelling pending approval for feature ${featureId}`);
|
||||
pending.reject(new Error('Plan approval cancelled - feature was stopped'));
|
||||
console.log(
|
||||
`[AutoMode] Found and cancelling pending approval for feature ${featureId}`
|
||||
);
|
||||
pending.reject(
|
||||
new Error("Plan approval cancelled - feature was stopped")
|
||||
);
|
||||
this.pendingApprovals.delete(featureId);
|
||||
} else {
|
||||
console.log(`[AutoMode] No pending approval to cancel for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] No pending approval to cancel for feature ${featureId}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1436,7 +1514,6 @@ Format your response as a structured markdown document.`;
|
||||
|
||||
// Private helpers
|
||||
|
||||
|
||||
/**
|
||||
* Find an existing worktree for a given branch by checking git worktree list
|
||||
*/
|
||||
@@ -1498,7 +1575,7 @@ Format your response as a structured markdown document.`;
|
||||
const featurePath = path.join(featureDir, "feature.json");
|
||||
|
||||
try {
|
||||
const data = await secureFs.readFile(featurePath, "utf-8") as string;
|
||||
const data = (await secureFs.readFile(featurePath, "utf-8")) as string;
|
||||
return JSON.parse(data);
|
||||
} catch {
|
||||
return null;
|
||||
@@ -1515,7 +1592,7 @@ Format your response as a structured markdown document.`;
|
||||
const featurePath = path.join(featureDir, "feature.json");
|
||||
|
||||
try {
|
||||
const data = await secureFs.readFile(featurePath, "utf-8") as string;
|
||||
const data = (await secureFs.readFile(featurePath, "utf-8")) as string;
|
||||
const feature = JSON.parse(data);
|
||||
feature.status = status;
|
||||
feature.updatedAt = new Date().toISOString();
|
||||
@@ -1550,13 +1627,13 @@ Format your response as a structured markdown document.`;
|
||||
);
|
||||
|
||||
try {
|
||||
const data = await secureFs.readFile(featurePath, "utf-8") as string;
|
||||
const data = (await secureFs.readFile(featurePath, "utf-8")) as string;
|
||||
const feature = JSON.parse(data);
|
||||
|
||||
// Initialize planSpec if it doesn't exist
|
||||
if (!feature.planSpec) {
|
||||
feature.planSpec = {
|
||||
status: 'pending',
|
||||
status: "pending",
|
||||
version: 1,
|
||||
reviewedByUser: false,
|
||||
};
|
||||
@@ -1573,7 +1650,10 @@ Format your response as a structured markdown document.`;
|
||||
feature.updatedAt = new Date().toISOString();
|
||||
await secureFs.writeFile(featurePath, JSON.stringify(feature, null, 2));
|
||||
} catch (error) {
|
||||
console.error(`[AutoMode] Failed to update planSpec for ${featureId}:`, error);
|
||||
console.error(
|
||||
`[AutoMode] Failed to update planSpec for ${featureId}:`,
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1582,7 +1662,9 @@ Format your response as a structured markdown document.`;
|
||||
const featuresDir = getFeaturesDir(projectPath);
|
||||
|
||||
try {
|
||||
const entries = await secureFs.readdir(featuresDir, { withFileTypes: true });
|
||||
const entries = await secureFs.readdir(featuresDir, {
|
||||
withFileTypes: true,
|
||||
});
|
||||
const allFeatures: Feature[] = [];
|
||||
const pendingFeatures: Feature[] = [];
|
||||
|
||||
@@ -1595,7 +1677,10 @@ Format your response as a structured markdown document.`;
|
||||
"feature.json"
|
||||
);
|
||||
try {
|
||||
const data = await secureFs.readFile(featurePath, "utf-8") as string;
|
||||
const data = (await secureFs.readFile(
|
||||
featurePath,
|
||||
"utf-8"
|
||||
)) as string;
|
||||
const feature = JSON.parse(data);
|
||||
allFeatures.push(feature);
|
||||
|
||||
@@ -1617,7 +1702,7 @@ Format your response as a structured markdown document.`;
|
||||
const { orderedFeatures } = resolveDependencies(pendingFeatures);
|
||||
|
||||
// Filter to only features with satisfied dependencies
|
||||
const readyFeatures = orderedFeatures.filter(feature =>
|
||||
const readyFeatures = orderedFeatures.filter((feature: Feature) =>
|
||||
areDependenciesSatisfied(feature, allFeatures)
|
||||
);
|
||||
|
||||
@@ -1649,24 +1734,25 @@ Format your response as a structured markdown document.`;
|
||||
* Get the planning prompt prefix based on feature's planning mode
|
||||
*/
|
||||
private getPlanningPromptPrefix(feature: Feature): string {
|
||||
const mode = feature.planningMode || 'skip';
|
||||
const mode = feature.planningMode || "skip";
|
||||
|
||||
if (mode === 'skip') {
|
||||
return ''; // No planning phase
|
||||
if (mode === "skip") {
|
||||
return ""; // No planning phase
|
||||
}
|
||||
|
||||
// For lite mode, use the approval variant if requirePlanApproval is true
|
||||
let promptKey: string = mode;
|
||||
if (mode === 'lite' && feature.requirePlanApproval === true) {
|
||||
promptKey = 'lite_with_approval';
|
||||
if (mode === "lite" && feature.requirePlanApproval === true) {
|
||||
promptKey = "lite_with_approval";
|
||||
}
|
||||
|
||||
const planningPrompt = PLANNING_PROMPTS[promptKey as keyof typeof PLANNING_PROMPTS];
|
||||
const planningPrompt =
|
||||
PLANNING_PROMPTS[promptKey as keyof typeof PLANNING_PROMPTS];
|
||||
if (!planningPrompt) {
|
||||
return '';
|
||||
return "";
|
||||
}
|
||||
|
||||
return planningPrompt + '\n\n---\n\n## Feature Request\n\n';
|
||||
return planningPrompt + "\n\n---\n\n## Feature Request\n\n";
|
||||
}
|
||||
|
||||
private buildFeaturePrompt(feature: Feature): string {
|
||||
@@ -1760,17 +1846,18 @@ This helps parse your summary correctly in the output logs.`;
|
||||
}
|
||||
): Promise<void> {
|
||||
const finalProjectPath = options?.projectPath || projectPath;
|
||||
const planningMode = options?.planningMode || 'skip';
|
||||
const planningMode = options?.planningMode || "skip";
|
||||
const previousContent = options?.previousContent;
|
||||
|
||||
// Check if this planning mode can generate a spec/plan that needs approval
|
||||
// - spec and full always generate specs
|
||||
// - lite only generates approval-ready content when requirePlanApproval is true
|
||||
const planningModeRequiresApproval =
|
||||
planningMode === 'spec' ||
|
||||
planningMode === 'full' ||
|
||||
(planningMode === 'lite' && options?.requirePlanApproval === true);
|
||||
const requiresApproval = planningModeRequiresApproval && options?.requirePlanApproval === true;
|
||||
planningMode === "spec" ||
|
||||
planningMode === "full" ||
|
||||
(planningMode === "lite" && options?.requirePlanApproval === true);
|
||||
const requiresApproval =
|
||||
planningModeRequiresApproval && options?.requirePlanApproval === true;
|
||||
|
||||
// CI/CD Mock Mode: Return early with mock response when AUTOMAKER_MOCK_AGENT is set
|
||||
// This prevents actual API calls during automated testing
|
||||
@@ -1953,11 +2040,15 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
scheduleWrite();
|
||||
|
||||
// Check for [SPEC_GENERATED] marker in planning modes (spec or full)
|
||||
if (planningModeRequiresApproval && !specDetected && responseText.includes('[SPEC_GENERATED]')) {
|
||||
if (
|
||||
planningModeRequiresApproval &&
|
||||
!specDetected &&
|
||||
responseText.includes("[SPEC_GENERATED]")
|
||||
) {
|
||||
specDetected = true;
|
||||
|
||||
// Extract plan content (everything before the marker)
|
||||
const markerIndex = responseText.indexOf('[SPEC_GENERATED]');
|
||||
const markerIndex = responseText.indexOf("[SPEC_GENERATED]");
|
||||
const planContent = responseText.substring(0, markerIndex).trim();
|
||||
|
||||
// Parse tasks from the generated spec (for spec and full modes)
|
||||
@@ -1965,14 +2056,18 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
let parsedTasks = parseTasksFromSpec(planContent);
|
||||
const tasksTotal = parsedTasks.length;
|
||||
|
||||
console.log(`[AutoMode] Parsed ${tasksTotal} tasks from spec for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Parsed ${tasksTotal} tasks from spec for feature ${featureId}`
|
||||
);
|
||||
if (parsedTasks.length > 0) {
|
||||
console.log(`[AutoMode] Tasks: ${parsedTasks.map(t => t.id).join(', ')}`);
|
||||
console.log(
|
||||
`[AutoMode] Tasks: ${parsedTasks.map((t) => t.id).join(", ")}`
|
||||
);
|
||||
}
|
||||
|
||||
// Update planSpec status to 'generated' and save content with parsed tasks
|
||||
await this.updateFeaturePlanSpec(projectPath, featureId, {
|
||||
status: 'generated',
|
||||
status: "generated",
|
||||
content: planContent,
|
||||
version: 1,
|
||||
generatedAt: new Date().toISOString(),
|
||||
@@ -1996,13 +2091,18 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
let planApproved = false;
|
||||
|
||||
while (!planApproved) {
|
||||
console.log(`[AutoMode] Spec v${planVersion} generated for feature ${featureId}, waiting for approval`);
|
||||
console.log(
|
||||
`[AutoMode] Spec v${planVersion} generated for feature ${featureId}, waiting for approval`
|
||||
);
|
||||
|
||||
// CRITICAL: Register pending approval BEFORE emitting event
|
||||
const approvalPromise = this.waitForPlanApproval(featureId, projectPath);
|
||||
const approvalPromise = this.waitForPlanApproval(
|
||||
featureId,
|
||||
projectPath
|
||||
);
|
||||
|
||||
// Emit plan_approval_required event
|
||||
this.emitAutoModeEvent('plan_approval_required', {
|
||||
this.emitAutoModeEvent("plan_approval_required", {
|
||||
featureId,
|
||||
projectPath,
|
||||
planContent: currentPlanContent,
|
||||
@@ -2016,15 +2116,21 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
|
||||
if (approvalResult.approved) {
|
||||
// User approved the plan
|
||||
console.log(`[AutoMode] Plan v${planVersion} approved for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Plan v${planVersion} approved for feature ${featureId}`
|
||||
);
|
||||
planApproved = true;
|
||||
|
||||
// If user provided edits, use the edited version
|
||||
if (approvalResult.editedPlan) {
|
||||
approvedPlanContent = approvalResult.editedPlan;
|
||||
await this.updateFeaturePlanSpec(projectPath, featureId, {
|
||||
await this.updateFeaturePlanSpec(
|
||||
projectPath,
|
||||
featureId,
|
||||
{
|
||||
content: approvalResult.editedPlan,
|
||||
});
|
||||
}
|
||||
);
|
||||
} else {
|
||||
approvedPlanContent = currentPlanContent;
|
||||
}
|
||||
@@ -2033,30 +2139,37 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
userFeedback = approvalResult.feedback;
|
||||
|
||||
// Emit approval event
|
||||
this.emitAutoModeEvent('plan_approved', {
|
||||
this.emitAutoModeEvent("plan_approved", {
|
||||
featureId,
|
||||
projectPath,
|
||||
hasEdits: !!approvalResult.editedPlan,
|
||||
planVersion,
|
||||
});
|
||||
|
||||
} else {
|
||||
// User rejected - check if they provided feedback for revision
|
||||
const hasFeedback = approvalResult.feedback && approvalResult.feedback.trim().length > 0;
|
||||
const hasEdits = approvalResult.editedPlan && approvalResult.editedPlan.trim().length > 0;
|
||||
const hasFeedback =
|
||||
approvalResult.feedback &&
|
||||
approvalResult.feedback.trim().length > 0;
|
||||
const hasEdits =
|
||||
approvalResult.editedPlan &&
|
||||
approvalResult.editedPlan.trim().length > 0;
|
||||
|
||||
if (!hasFeedback && !hasEdits) {
|
||||
// No feedback or edits = explicit cancel
|
||||
console.log(`[AutoMode] Plan rejected without feedback for feature ${featureId}, cancelling`);
|
||||
throw new Error('Plan cancelled by user');
|
||||
console.log(
|
||||
`[AutoMode] Plan rejected without feedback for feature ${featureId}, cancelling`
|
||||
);
|
||||
throw new Error("Plan cancelled by user");
|
||||
}
|
||||
|
||||
// User wants revisions - regenerate the plan
|
||||
console.log(`[AutoMode] Plan v${planVersion} rejected with feedback for feature ${featureId}, regenerating...`);
|
||||
console.log(
|
||||
`[AutoMode] Plan v${planVersion} rejected with feedback for feature ${featureId}, regenerating...`
|
||||
);
|
||||
planVersion++;
|
||||
|
||||
// Emit revision event
|
||||
this.emitAutoModeEvent('plan_revision_requested', {
|
||||
this.emitAutoModeEvent("plan_revision_requested", {
|
||||
featureId,
|
||||
projectPath,
|
||||
feedback: approvalResult.feedback,
|
||||
@@ -2071,7 +2184,7 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
${hasEdits ? approvalResult.editedPlan : currentPlanContent}
|
||||
|
||||
## User Feedback
|
||||
${approvalResult.feedback || 'Please revise the plan based on the edits above.'}
|
||||
${approvalResult.feedback || "Please revise the plan based on the edits above."}
|
||||
|
||||
## Instructions
|
||||
Please regenerate the specification incorporating the user's feedback.
|
||||
@@ -2082,7 +2195,7 @@ After generating the revised spec, output:
|
||||
|
||||
// Update status to regenerating
|
||||
await this.updateFeaturePlanSpec(projectPath, featureId, {
|
||||
status: 'generating',
|
||||
status: "generating",
|
||||
version: planVersion,
|
||||
});
|
||||
|
||||
@@ -2109,27 +2222,38 @@ After generating the revised spec, output:
|
||||
}
|
||||
}
|
||||
} else if (msg.type === "error") {
|
||||
throw new Error(msg.error || "Error during plan revision");
|
||||
} else if (msg.type === "result" && msg.subtype === "success") {
|
||||
throw new Error(
|
||||
msg.error || "Error during plan revision"
|
||||
);
|
||||
} else if (
|
||||
msg.type === "result" &&
|
||||
msg.subtype === "success"
|
||||
) {
|
||||
revisionText += msg.result || "";
|
||||
}
|
||||
}
|
||||
|
||||
// Extract new plan content
|
||||
const markerIndex = revisionText.indexOf('[SPEC_GENERATED]');
|
||||
const markerIndex =
|
||||
revisionText.indexOf("[SPEC_GENERATED]");
|
||||
if (markerIndex > 0) {
|
||||
currentPlanContent = revisionText.substring(0, markerIndex).trim();
|
||||
currentPlanContent = revisionText
|
||||
.substring(0, markerIndex)
|
||||
.trim();
|
||||
} else {
|
||||
currentPlanContent = revisionText.trim();
|
||||
}
|
||||
|
||||
// Re-parse tasks from revised plan
|
||||
const revisedTasks = parseTasksFromSpec(currentPlanContent);
|
||||
console.log(`[AutoMode] Revised plan has ${revisedTasks.length} tasks`);
|
||||
const revisedTasks =
|
||||
parseTasksFromSpec(currentPlanContent);
|
||||
console.log(
|
||||
`[AutoMode] Revised plan has ${revisedTasks.length} tasks`
|
||||
);
|
||||
|
||||
// Update planSpec with revised content
|
||||
await this.updateFeaturePlanSpec(projectPath, featureId, {
|
||||
status: 'generated',
|
||||
status: "generated",
|
||||
content: currentPlanContent,
|
||||
version: planVersion,
|
||||
tasks: revisedTasks,
|
||||
@@ -2142,21 +2266,23 @@ After generating the revised spec, output:
|
||||
|
||||
responseText += revisionText;
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
if ((error as Error).message.includes('cancelled')) {
|
||||
if ((error as Error).message.includes("cancelled")) {
|
||||
throw error;
|
||||
}
|
||||
throw new Error(`Plan approval failed: ${(error as Error).message}`);
|
||||
throw new Error(
|
||||
`Plan approval failed: ${(error as Error).message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// Auto-approve: requirePlanApproval is false, just continue without pausing
|
||||
console.log(`[AutoMode] Spec generated for feature ${featureId}, auto-approving (requirePlanApproval=false)`);
|
||||
console.log(
|
||||
`[AutoMode] Spec generated for feature ${featureId}, auto-approving (requirePlanApproval=false)`
|
||||
);
|
||||
|
||||
// Emit info event for frontend
|
||||
this.emitAutoModeEvent('plan_auto_approved', {
|
||||
this.emitAutoModeEvent("plan_auto_approved", {
|
||||
featureId,
|
||||
projectPath,
|
||||
planContent,
|
||||
@@ -2168,11 +2294,13 @@ After generating the revised spec, output:
|
||||
|
||||
// CRITICAL: After approval, we need to make a second call to continue implementation
|
||||
// The agent is waiting for "approved" - we need to send it and continue
|
||||
console.log(`[AutoMode] Making continuation call after plan approval for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Making continuation call after plan approval for feature ${featureId}`
|
||||
);
|
||||
|
||||
// Update planSpec status to approved (handles both manual and auto-approval paths)
|
||||
await this.updateFeaturePlanSpec(projectPath, featureId, {
|
||||
status: 'approved',
|
||||
status: "approved",
|
||||
approvedAt: new Date().toISOString(),
|
||||
reviewedByUser: requiresApproval,
|
||||
});
|
||||
@@ -2183,19 +2311,27 @@ After generating the revised spec, output:
|
||||
// ========================================
|
||||
|
||||
if (parsedTasks.length > 0) {
|
||||
console.log(`[AutoMode] Starting multi-agent execution: ${parsedTasks.length} tasks for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Starting multi-agent execution: ${parsedTasks.length} tasks for feature ${featureId}`
|
||||
);
|
||||
|
||||
// Execute each task with a separate agent
|
||||
for (let taskIndex = 0; taskIndex < parsedTasks.length; taskIndex++) {
|
||||
for (
|
||||
let taskIndex = 0;
|
||||
taskIndex < parsedTasks.length;
|
||||
taskIndex++
|
||||
) {
|
||||
const task = parsedTasks[taskIndex];
|
||||
|
||||
// Check for abort
|
||||
if (abortController.signal.aborted) {
|
||||
throw new Error('Feature execution aborted');
|
||||
throw new Error("Feature execution aborted");
|
||||
}
|
||||
|
||||
// Emit task started
|
||||
console.log(`[AutoMode] Starting task ${task.id}: ${task.description}`);
|
||||
console.log(
|
||||
`[AutoMode] Starting task ${task.id}: ${task.description}`
|
||||
);
|
||||
this.emitAutoModeEvent("auto_mode_task_started", {
|
||||
featureId,
|
||||
projectPath,
|
||||
@@ -2211,7 +2347,13 @@ After generating the revised spec, output:
|
||||
});
|
||||
|
||||
// Build focused prompt for this specific task
|
||||
const taskPrompt = this.buildTaskPrompt(task, parsedTasks, taskIndex, approvedPlanContent, userFeedback);
|
||||
const taskPrompt = this.buildTaskPrompt(
|
||||
task,
|
||||
parsedTasks,
|
||||
taskIndex,
|
||||
approvedPlanContent,
|
||||
userFeedback
|
||||
);
|
||||
|
||||
// Execute task with dedicated agent
|
||||
const taskStream = provider.executeQuery({
|
||||
@@ -2245,15 +2387,22 @@ After generating the revised spec, output:
|
||||
}
|
||||
}
|
||||
} else if (msg.type === "error") {
|
||||
throw new Error(msg.error || `Error during task ${task.id}`);
|
||||
} else if (msg.type === "result" && msg.subtype === "success") {
|
||||
throw new Error(
|
||||
msg.error || `Error during task ${task.id}`
|
||||
);
|
||||
} else if (
|
||||
msg.type === "result" &&
|
||||
msg.subtype === "success"
|
||||
) {
|
||||
taskOutput += msg.result || "";
|
||||
responseText += msg.result || "";
|
||||
}
|
||||
}
|
||||
|
||||
// Emit task completed
|
||||
console.log(`[AutoMode] Task ${task.id} completed for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Task ${task.id} completed for feature ${featureId}`
|
||||
);
|
||||
this.emitAutoModeEvent("auto_mode_task_complete", {
|
||||
featureId,
|
||||
projectPath,
|
||||
@@ -2284,13 +2433,17 @@ After generating the revised spec, output:
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[AutoMode] All ${parsedTasks.length} tasks completed for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] All ${parsedTasks.length} tasks completed for feature ${featureId}`
|
||||
);
|
||||
} else {
|
||||
// No parsed tasks - fall back to single-agent execution
|
||||
console.log(`[AutoMode] No parsed tasks, using single-agent execution for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] No parsed tasks, using single-agent execution for feature ${featureId}`
|
||||
);
|
||||
|
||||
const continuationPrompt = `The plan/specification has been approved. Now implement it.
|
||||
${userFeedback ? `\n## User Feedback\n${userFeedback}\n` : ''}
|
||||
${userFeedback ? `\n## User Feedback\n${userFeedback}\n` : ""}
|
||||
## Approved Plan
|
||||
|
||||
${approvedPlanContent}
|
||||
@@ -2326,14 +2479,21 @@ Implement all the changes described in the plan above.`;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === "error") {
|
||||
throw new Error(msg.error || "Unknown error during implementation");
|
||||
} else if (msg.type === "result" && msg.subtype === "success") {
|
||||
throw new Error(
|
||||
msg.error || "Unknown error during implementation"
|
||||
);
|
||||
} else if (
|
||||
msg.type === "result" &&
|
||||
msg.subtype === "success"
|
||||
) {
|
||||
responseText += msg.result || "";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[AutoMode] Implementation completed for feature ${featureId}`);
|
||||
console.log(
|
||||
`[AutoMode] Implementation completed for feature ${featureId}`
|
||||
);
|
||||
// Exit the original stream loop since continuation is done
|
||||
break streamLoop;
|
||||
}
|
||||
@@ -2410,9 +2570,16 @@ ${context}
|
||||
## Instructions
|
||||
Review the previous work and continue the implementation. If the feature appears complete, verify it works correctly.`;
|
||||
|
||||
return this.executeFeature(projectPath, featureId, useWorktrees, false, undefined, {
|
||||
return this.executeFeature(
|
||||
projectPath,
|
||||
featureId,
|
||||
useWorktrees,
|
||||
false,
|
||||
undefined,
|
||||
{
|
||||
continuationPrompt: prompt,
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2437,8 +2604,8 @@ You are executing a specific task as part of a larger feature implementation.
|
||||
|
||||
**Task ID:** ${task.id}
|
||||
**Description:** ${task.description}
|
||||
${task.filePath ? `**Primary File:** ${task.filePath}` : ''}
|
||||
${task.phase ? `**Phase:** ${task.phase}` : ''}
|
||||
${task.filePath ? `**Primary File:** ${task.filePath}` : ""}
|
||||
${task.phase ? `**Phase:** ${task.phase}` : ""}
|
||||
|
||||
## Context
|
||||
|
||||
@@ -2447,7 +2614,7 @@ ${task.phase ? `**Phase:** ${task.phase}` : ''}
|
||||
// Show what's already done
|
||||
if (completedTasks.length > 0) {
|
||||
prompt += `### Already Completed (${completedTasks.length} tasks)
|
||||
${completedTasks.map(t => `- [x] ${t.id}: ${t.description}`).join('\n')}
|
||||
${completedTasks.map((t) => `- [x] ${t.id}: ${t.description}`).join("\n")}
|
||||
|
||||
`;
|
||||
}
|
||||
@@ -2455,8 +2622,11 @@ ${completedTasks.map(t => `- [x] ${t.id}: ${t.description}`).join('\n')}
|
||||
// Show remaining tasks
|
||||
if (remainingTasks.length > 0) {
|
||||
prompt += `### Coming Up Next (${remainingTasks.length} tasks remaining)
|
||||
${remainingTasks.slice(0, 3).map(t => `- [ ] ${t.id}: ${t.description}`).join('\n')}
|
||||
${remainingTasks.length > 3 ? `... and ${remainingTasks.length - 3} more tasks` : ''}
|
||||
${remainingTasks
|
||||
.slice(0, 3)
|
||||
.map((t) => `- [ ] ${t.id}: ${t.description}`)
|
||||
.join("\n")}
|
||||
${remainingTasks.length > 3 ? `... and ${remainingTasks.length - 3} more tasks` : ""}
|
||||
|
||||
`;
|
||||
}
|
||||
|
||||
@@ -4,49 +4,20 @@
|
||||
*/
|
||||
|
||||
import path from "path";
|
||||
import type { Feature } from "@automaker/types";
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import * as secureFs from "../lib/secure-fs.js";
|
||||
import {
|
||||
getFeaturesDir,
|
||||
getFeatureDir,
|
||||
getFeatureImagesDir,
|
||||
ensureAutomakerDir,
|
||||
} from "../lib/automaker-paths.js";
|
||||
} from "@automaker/platform";
|
||||
|
||||
export interface Feature {
|
||||
id: string;
|
||||
title?: string;
|
||||
titleGenerating?: boolean;
|
||||
category: string;
|
||||
description: string;
|
||||
steps?: string[];
|
||||
passes?: boolean;
|
||||
priority?: number;
|
||||
status?: string;
|
||||
dependencies?: string[];
|
||||
spec?: string;
|
||||
model?: string;
|
||||
imagePaths?: Array<string | { path: string; [key: string]: unknown }>;
|
||||
// Branch info - worktree path is derived at runtime from branchName
|
||||
branchName?: string; // Name of the feature branch (undefined = use current worktree)
|
||||
skipTests?: boolean;
|
||||
thinkingLevel?: string;
|
||||
planningMode?: 'skip' | 'lite' | 'spec' | 'full';
|
||||
requirePlanApproval?: boolean;
|
||||
planSpec?: {
|
||||
status: 'pending' | 'generating' | 'generated' | 'approved' | 'rejected';
|
||||
content?: string;
|
||||
version: number;
|
||||
generatedAt?: string;
|
||||
approvedAt?: string;
|
||||
reviewedByUser: boolean;
|
||||
tasksCompleted?: number;
|
||||
tasksTotal?: number;
|
||||
};
|
||||
error?: string;
|
||||
summary?: string;
|
||||
startedAt?: string;
|
||||
[key: string]: unknown; // Keep catch-all for extensibility
|
||||
}
|
||||
const logger = createLogger("FeatureLoader");
|
||||
|
||||
// Re-export Feature type for convenience
|
||||
export type { Feature };
|
||||
|
||||
export class FeatureLoader {
|
||||
/**
|
||||
@@ -68,8 +39,12 @@ export class FeatureLoader {
|
||||
*/
|
||||
private async deleteOrphanedImages(
|
||||
projectPath: string,
|
||||
oldPaths: Array<string | { path: string; [key: string]: unknown }> | undefined,
|
||||
newPaths: Array<string | { path: string; [key: string]: unknown }> | undefined
|
||||
oldPaths:
|
||||
| Array<string | { path: string; [key: string]: unknown }>
|
||||
| undefined,
|
||||
newPaths:
|
||||
| Array<string | { path: string; [key: string]: unknown }>
|
||||
| undefined
|
||||
): Promise<void> {
|
||||
if (!oldPaths || oldPaths.length === 0) {
|
||||
return;
|
||||
@@ -92,7 +67,7 @@ export class FeatureLoader {
|
||||
console.log(`[FeatureLoader] Deleted orphaned image: ${oldPath}`);
|
||||
} catch (error) {
|
||||
// Ignore errors when deleting (file may already be gone)
|
||||
console.warn(
|
||||
logger.warn(
|
||||
`[FeatureLoader] Failed to delete image: ${oldPath}`,
|
||||
error
|
||||
);
|
||||
@@ -118,8 +93,9 @@ export class FeatureLoader {
|
||||
const featureImagesDir = this.getFeatureImagesDir(projectPath, featureId);
|
||||
await secureFs.mkdir(featureImagesDir, { recursive: true });
|
||||
|
||||
const updatedPaths: Array<string | { path: string; [key: string]: unknown }> =
|
||||
[];
|
||||
const updatedPaths: Array<
|
||||
string | { path: string; [key: string]: unknown }
|
||||
> = [];
|
||||
|
||||
for (const imagePath of imagePaths) {
|
||||
try {
|
||||
@@ -141,7 +117,7 @@ export class FeatureLoader {
|
||||
try {
|
||||
await secureFs.access(fullOriginalPath);
|
||||
} catch {
|
||||
console.warn(
|
||||
logger.warn(
|
||||
`[FeatureLoader] Image not found, skipping: ${fullOriginalPath}`
|
||||
);
|
||||
continue;
|
||||
@@ -171,9 +147,10 @@ export class FeatureLoader {
|
||||
updatedPaths.push({ ...imagePath, path: newPath });
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`[FeatureLoader] Failed to migrate image:`, error);
|
||||
// Keep original path if migration fails
|
||||
updatedPaths.push(imagePath);
|
||||
logger.error(`Failed to migrate image:`, error);
|
||||
// Rethrow error to let caller decide how to handle it
|
||||
// Keeping original path could lead to broken references
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,14 +168,20 @@ export class FeatureLoader {
|
||||
* Get the path to a feature's feature.json file
|
||||
*/
|
||||
getFeatureJsonPath(projectPath: string, featureId: string): string {
|
||||
return path.join(this.getFeatureDir(projectPath, featureId), "feature.json");
|
||||
return path.join(
|
||||
this.getFeatureDir(projectPath, featureId),
|
||||
"feature.json"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path to a feature's agent-output.md file
|
||||
*/
|
||||
getAgentOutputPath(projectPath: string, featureId: string): string {
|
||||
return path.join(this.getFeatureDir(projectPath, featureId), "agent-output.md");
|
||||
return path.join(
|
||||
this.getFeatureDir(projectPath, featureId),
|
||||
"agent-output.md"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -223,7 +206,9 @@ export class FeatureLoader {
|
||||
}
|
||||
|
||||
// Read all feature directories
|
||||
const entries = await secureFs.readdir(featuresDir, { withFileTypes: true }) as any[];
|
||||
const entries = (await secureFs.readdir(featuresDir, {
|
||||
withFileTypes: true,
|
||||
})) as any[];
|
||||
const featureDirs = entries.filter((entry) => entry.isDirectory());
|
||||
|
||||
// Load each feature
|
||||
@@ -233,11 +218,14 @@ export class FeatureLoader {
|
||||
const featureJsonPath = this.getFeatureJsonPath(projectPath, featureId);
|
||||
|
||||
try {
|
||||
const content = await secureFs.readFile(featureJsonPath, "utf-8") as string;
|
||||
const content = (await secureFs.readFile(
|
||||
featureJsonPath,
|
||||
"utf-8"
|
||||
)) as string;
|
||||
const feature = JSON.parse(content);
|
||||
|
||||
if (!feature.id) {
|
||||
console.warn(
|
||||
logger.warn(
|
||||
`[FeatureLoader] Feature ${featureId} missing required 'id' field, skipping`
|
||||
);
|
||||
continue;
|
||||
@@ -248,11 +236,11 @@ export class FeatureLoader {
|
||||
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
|
||||
continue;
|
||||
} else if (error instanceof SyntaxError) {
|
||||
console.warn(
|
||||
logger.warn(
|
||||
`[FeatureLoader] Failed to parse feature.json for ${featureId}: ${error.message}`
|
||||
);
|
||||
} else {
|
||||
console.error(
|
||||
logger.error(
|
||||
`[FeatureLoader] Failed to load feature ${featureId}:`,
|
||||
(error as Error).message
|
||||
);
|
||||
@@ -269,7 +257,7 @@ export class FeatureLoader {
|
||||
|
||||
return features;
|
||||
} catch (error) {
|
||||
console.error("[FeatureLoader] Failed to get all features:", error);
|
||||
logger.error("Failed to get all features:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
@@ -280,13 +268,16 @@ export class FeatureLoader {
|
||||
async get(projectPath: string, featureId: string): Promise<Feature | null> {
|
||||
try {
|
||||
const featureJsonPath = this.getFeatureJsonPath(projectPath, featureId);
|
||||
const content = await secureFs.readFile(featureJsonPath, "utf-8") as string;
|
||||
const content = (await secureFs.readFile(
|
||||
featureJsonPath,
|
||||
"utf-8"
|
||||
)) as string;
|
||||
return JSON.parse(content);
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
|
||||
return null;
|
||||
}
|
||||
console.error(
|
||||
logger.error(
|
||||
`[FeatureLoader] Failed to get feature ${featureId}:`,
|
||||
error
|
||||
);
|
||||
@@ -334,7 +325,7 @@ export class FeatureLoader {
|
||||
"utf-8"
|
||||
);
|
||||
|
||||
console.log(`[FeatureLoader] Created feature ${featureId}`);
|
||||
logger.info(`Created feature ${featureId}`);
|
||||
return feature;
|
||||
}
|
||||
|
||||
@@ -386,7 +377,7 @@ export class FeatureLoader {
|
||||
"utf-8"
|
||||
);
|
||||
|
||||
console.log(`[FeatureLoader] Updated feature ${featureId}`);
|
||||
logger.info(`Updated feature ${featureId}`);
|
||||
return updatedFeature;
|
||||
}
|
||||
|
||||
@@ -400,7 +391,7 @@ export class FeatureLoader {
|
||||
console.log(`[FeatureLoader] Deleted feature ${featureId}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
logger.error(
|
||||
`[FeatureLoader] Failed to delete feature ${featureId}:`,
|
||||
error
|
||||
);
|
||||
@@ -417,13 +408,16 @@ export class FeatureLoader {
|
||||
): Promise<string | null> {
|
||||
try {
|
||||
const agentOutputPath = this.getAgentOutputPath(projectPath, featureId);
|
||||
const content = await secureFs.readFile(agentOutputPath, "utf-8") as string;
|
||||
const content = (await secureFs.readFile(
|
||||
agentOutputPath,
|
||||
"utf-8"
|
||||
)) as string;
|
||||
return content;
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
|
||||
return null;
|
||||
}
|
||||
console.error(
|
||||
logger.error(
|
||||
`[FeatureLoader] Failed to get agent output for ${featureId}:`,
|
||||
error
|
||||
);
|
||||
|
||||
@@ -7,15 +7,16 @@
|
||||
* - Per-project settings ({projectPath}/.automaker/settings.json)
|
||||
*/
|
||||
|
||||
import { createLogger } from "@automaker/utils";
|
||||
import * as secureFs from "../lib/secure-fs.js";
|
||||
import { createLogger } from "../lib/logger.js";
|
||||
|
||||
import {
|
||||
getGlobalSettingsPath,
|
||||
getCredentialsPath,
|
||||
getProjectSettingsPath,
|
||||
ensureDataDir,
|
||||
ensureAutomakerDir,
|
||||
} from "../lib/automaker-paths.js";
|
||||
} from "@automaker/platform";
|
||||
import type {
|
||||
GlobalSettings,
|
||||
Credentials,
|
||||
@@ -64,7 +65,7 @@ async function atomicWriteJson(filePath: string, data: unknown): Promise<void> {
|
||||
*/
|
||||
async function readJsonFile<T>(filePath: string, defaultValue: T): Promise<T> {
|
||||
try {
|
||||
const content = await secureFs.readFile(filePath, "utf-8") as string;
|
||||
const content = (await secureFs.readFile(filePath, "utf-8")) as string;
|
||||
return JSON.parse(content) as T;
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
|
||||
@@ -231,9 +232,7 @@ export class SettingsService {
|
||||
* @param updates - Partial Credentials (usually just apiKeys)
|
||||
* @returns Promise resolving to complete updated Credentials object
|
||||
*/
|
||||
async updateCredentials(
|
||||
updates: Partial<Credentials>
|
||||
): Promise<Credentials> {
|
||||
async updateCredentials(updates: Partial<Credentials>): Promise<Credentials> {
|
||||
await ensureDataDir(this.dataDir);
|
||||
const credentialsPath = getCredentialsPath(this.dataDir);
|
||||
|
||||
@@ -495,10 +494,14 @@ export class SettingsService {
|
||||
if (appState.apiKeys) {
|
||||
const apiKeys = appState.apiKeys as {
|
||||
anthropic?: string;
|
||||
google?: string;
|
||||
openai?: string;
|
||||
};
|
||||
await this.updateCredentials({
|
||||
apiKeys: {
|
||||
anthropic: apiKeys.anthropic || "",
|
||||
google: apiKeys.google || "",
|
||||
openai: apiKeys.openai || "",
|
||||
},
|
||||
});
|
||||
migratedCredentials = true;
|
||||
@@ -548,8 +551,7 @@ export class SettingsService {
|
||||
// Get theme from project object
|
||||
const project = projects.find((p) => p.path === projectPath);
|
||||
if (project?.theme) {
|
||||
projectSettings.theme =
|
||||
project.theme as ProjectSettings["theme"];
|
||||
projectSettings.theme = project.theme as ProjectSettings["theme"];
|
||||
}
|
||||
|
||||
if (boardBackgroundByProject?.[projectPath]) {
|
||||
@@ -571,7 +573,9 @@ export class SettingsService {
|
||||
migratedProjectCount++;
|
||||
}
|
||||
} catch (e) {
|
||||
errors.push(`Failed to migrate project settings for ${projectPath}: ${e}`);
|
||||
errors.push(
|
||||
`Failed to migrate project settings for ${projectPath}: ${e}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,422 +1,35 @@
|
||||
/**
|
||||
* Settings Types - Shared types for file-based settings storage
|
||||
* Settings Types - Re-exported from @automaker/types
|
||||
*
|
||||
* Defines the structure for global settings, credentials, and per-project settings
|
||||
* that are persisted to disk in JSON format. These types are used by both the server
|
||||
* (for file I/O via SettingsService) and the UI (for state management and sync).
|
||||
* This file now re-exports settings types from the shared @automaker/types package
|
||||
* to maintain backward compatibility with existing imports in the server codebase.
|
||||
*/
|
||||
|
||||
/**
|
||||
* ThemeMode - Available color themes for the UI
|
||||
*
|
||||
* Includes system theme and multiple color schemes:
|
||||
* - System: Respects OS dark/light mode preference
|
||||
* - Light/Dark: Basic light and dark variants
|
||||
* - Color Schemes: Retro, Dracula, Nord, Monokai, Tokyo Night, Solarized, Gruvbox,
|
||||
* Catppuccin, OneDark, Synthwave, Red, Cream, Sunset, Gray
|
||||
*/
|
||||
export type ThemeMode =
|
||||
| "light"
|
||||
| "dark"
|
||||
| "system"
|
||||
| "retro"
|
||||
| "dracula"
|
||||
| "nord"
|
||||
| "monokai"
|
||||
| "tokyonight"
|
||||
| "solarized"
|
||||
| "gruvbox"
|
||||
| "catppuccin"
|
||||
| "onedark"
|
||||
| "synthwave"
|
||||
| "red"
|
||||
| "cream"
|
||||
| "sunset"
|
||||
| "gray";
|
||||
export type {
|
||||
ThemeMode,
|
||||
KanbanCardDetailLevel,
|
||||
AgentModel,
|
||||
PlanningMode,
|
||||
ThinkingLevel,
|
||||
ModelProvider,
|
||||
KeyboardShortcuts,
|
||||
AIProfile,
|
||||
ProjectRef,
|
||||
TrashedProjectRef,
|
||||
ChatSessionRef,
|
||||
GlobalSettings,
|
||||
Credentials,
|
||||
BoardBackgroundSettings,
|
||||
WorktreeInfo,
|
||||
ProjectSettings,
|
||||
} from "@automaker/types";
|
||||
|
||||
/** KanbanCardDetailLevel - Controls how much information is displayed on kanban cards */
|
||||
export type KanbanCardDetailLevel = "minimal" | "standard" | "detailed";
|
||||
|
||||
/** AgentModel - Available Claude models for feature generation and planning */
|
||||
export type AgentModel = "opus" | "sonnet" | "haiku";
|
||||
|
||||
/** PlanningMode - Planning levels for feature generation workflows */
|
||||
export type PlanningMode = "skip" | "lite" | "spec" | "full";
|
||||
|
||||
/** ThinkingLevel - Extended thinking levels for Claude models (reasoning intensity) */
|
||||
export type ThinkingLevel = "none" | "low" | "medium" | "high" | "ultrathink";
|
||||
|
||||
/** ModelProvider - AI model provider for credentials and API key management */
|
||||
export type ModelProvider = "claude";
|
||||
|
||||
/**
|
||||
* KeyboardShortcuts - User-configurable keyboard bindings for common actions
|
||||
*
|
||||
* Each property maps an action to a keyboard shortcut string
|
||||
* (e.g., "Ctrl+K", "Alt+N", "Shift+P")
|
||||
*/
|
||||
export interface KeyboardShortcuts {
|
||||
/** Open board view */
|
||||
board: string;
|
||||
/** Open agent panel */
|
||||
agent: string;
|
||||
/** Open feature spec editor */
|
||||
spec: string;
|
||||
/** Open context files panel */
|
||||
context: string;
|
||||
/** Open settings */
|
||||
settings: string;
|
||||
/** Open AI profiles */
|
||||
profiles: string;
|
||||
/** Open terminal */
|
||||
terminal: string;
|
||||
/** Toggle sidebar visibility */
|
||||
toggleSidebar: string;
|
||||
/** Add new feature */
|
||||
addFeature: string;
|
||||
/** Add context file */
|
||||
addContextFile: string;
|
||||
/** Start next feature generation */
|
||||
startNext: string;
|
||||
/** Create new chat session */
|
||||
newSession: string;
|
||||
/** Open project picker */
|
||||
openProject: string;
|
||||
/** Open project picker (alternate) */
|
||||
projectPicker: string;
|
||||
/** Cycle to previous project */
|
||||
cyclePrevProject: string;
|
||||
/** Cycle to next project */
|
||||
cycleNextProject: string;
|
||||
/** Add new AI profile */
|
||||
addProfile: string;
|
||||
/** Split terminal right */
|
||||
splitTerminalRight: string;
|
||||
/** Split terminal down */
|
||||
splitTerminalDown: string;
|
||||
/** Close current terminal */
|
||||
closeTerminal: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* AIProfile - Configuration for an AI model with specific parameters
|
||||
*
|
||||
* Profiles can be built-in defaults or user-created. They define which model to use,
|
||||
* thinking level, and other parameters for feature generation tasks.
|
||||
*/
|
||||
export interface AIProfile {
|
||||
/** Unique identifier for the profile */
|
||||
id: string;
|
||||
/** Display name for the profile */
|
||||
name: string;
|
||||
/** User-friendly description */
|
||||
description: string;
|
||||
/** Which Claude model to use (opus, sonnet, haiku) */
|
||||
model: AgentModel;
|
||||
/** Extended thinking level for reasoning-based tasks */
|
||||
thinkingLevel: ThinkingLevel;
|
||||
/** Provider (currently only "claude") */
|
||||
provider: ModelProvider;
|
||||
/** Whether this is a built-in default profile */
|
||||
isBuiltIn: boolean;
|
||||
/** Optional icon identifier or emoji */
|
||||
icon?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* ProjectRef - Minimal reference to a project stored in global settings
|
||||
*
|
||||
* Used for the projects list and project history. Full project data is loaded separately.
|
||||
*/
|
||||
export interface ProjectRef {
|
||||
/** Unique identifier */
|
||||
id: string;
|
||||
/** Display name */
|
||||
name: string;
|
||||
/** Absolute filesystem path to project directory */
|
||||
path: string;
|
||||
/** ISO timestamp of last time project was opened */
|
||||
lastOpened?: string;
|
||||
/** Project-specific theme override (or undefined to use global) */
|
||||
theme?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* TrashedProjectRef - Reference to a project in the trash/recycle bin
|
||||
*
|
||||
* Extends ProjectRef with deletion metadata. User can permanently delete or restore.
|
||||
*/
|
||||
export interface TrashedProjectRef extends ProjectRef {
|
||||
/** ISO timestamp when project was moved to trash */
|
||||
trashedAt: string;
|
||||
/** Whether project folder was deleted from disk */
|
||||
deletedFromDisk?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* ChatSessionRef - Minimal reference to a chat session
|
||||
*
|
||||
* Used for session lists and history. Full session content is stored separately.
|
||||
*/
|
||||
export interface ChatSessionRef {
|
||||
/** Unique session identifier */
|
||||
id: string;
|
||||
/** User-given or AI-generated title */
|
||||
title: string;
|
||||
/** Project that session belongs to */
|
||||
projectId: string;
|
||||
/** ISO timestamp of creation */
|
||||
createdAt: string;
|
||||
/** ISO timestamp of last message */
|
||||
updatedAt: string;
|
||||
/** Whether session is archived */
|
||||
archived: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* GlobalSettings - User preferences and state stored globally in {DATA_DIR}/settings.json
|
||||
*
|
||||
* This is the main settings file that persists user preferences across sessions.
|
||||
* Includes theme, UI state, feature defaults, keyboard shortcuts, AI profiles, and projects.
|
||||
* Format: JSON with version field for migration support.
|
||||
*/
|
||||
export interface GlobalSettings {
|
||||
/** Version number for schema migration */
|
||||
version: number;
|
||||
|
||||
// Theme Configuration
|
||||
/** Currently selected theme */
|
||||
theme: ThemeMode;
|
||||
|
||||
// UI State Preferences
|
||||
/** Whether sidebar is currently open */
|
||||
sidebarOpen: boolean;
|
||||
/** Whether chat history panel is open */
|
||||
chatHistoryOpen: boolean;
|
||||
/** How much detail to show on kanban cards */
|
||||
kanbanCardDetailLevel: KanbanCardDetailLevel;
|
||||
|
||||
// Feature Generation Defaults
|
||||
/** Max features to generate concurrently */
|
||||
maxConcurrency: number;
|
||||
/** Default: skip tests during feature generation */
|
||||
defaultSkipTests: boolean;
|
||||
/** Default: enable dependency blocking */
|
||||
enableDependencyBlocking: boolean;
|
||||
/** Default: use git worktrees for feature branches */
|
||||
useWorktrees: boolean;
|
||||
/** Default: only show AI profiles (hide other settings) */
|
||||
showProfilesOnly: boolean;
|
||||
/** Default: planning approach (skip/lite/spec/full) */
|
||||
defaultPlanningMode: PlanningMode;
|
||||
/** Default: require manual approval before generating */
|
||||
defaultRequirePlanApproval: boolean;
|
||||
/** ID of currently selected AI profile (null = use built-in) */
|
||||
defaultAIProfileId: string | null;
|
||||
|
||||
// Audio Preferences
|
||||
/** Mute completion notification sound */
|
||||
muteDoneSound: boolean;
|
||||
|
||||
// AI Model Selection
|
||||
/** Which model to use for feature name/description enhancement */
|
||||
enhancementModel: AgentModel;
|
||||
|
||||
// Input Configuration
|
||||
/** User's keyboard shortcut bindings */
|
||||
keyboardShortcuts: KeyboardShortcuts;
|
||||
|
||||
// AI Profiles
|
||||
/** User-created AI profiles */
|
||||
aiProfiles: AIProfile[];
|
||||
|
||||
// Project Management
|
||||
/** List of active projects */
|
||||
projects: ProjectRef[];
|
||||
/** Projects in trash/recycle bin */
|
||||
trashedProjects: TrashedProjectRef[];
|
||||
/** History of recently opened project IDs */
|
||||
projectHistory: string[];
|
||||
/** Current position in project history for navigation */
|
||||
projectHistoryIndex: number;
|
||||
|
||||
// File Browser and UI Preferences
|
||||
/** Last directory opened in file picker */
|
||||
lastProjectDir?: string;
|
||||
/** Recently accessed folders for quick access */
|
||||
recentFolders: string[];
|
||||
/** Whether worktree panel is collapsed in current view */
|
||||
worktreePanelCollapsed: boolean;
|
||||
|
||||
// Session Tracking
|
||||
/** Maps project path -> last selected session ID in that project */
|
||||
lastSelectedSessionByProject: Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Credentials - API keys stored in {DATA_DIR}/credentials.json
|
||||
*
|
||||
* Sensitive data stored separately from general settings.
|
||||
* Keys should never be exposed in UI or logs.
|
||||
*/
|
||||
export interface Credentials {
|
||||
/** Version number for schema migration */
|
||||
version: number;
|
||||
/** API keys for various providers */
|
||||
apiKeys: {
|
||||
/** Anthropic Claude API key */
|
||||
anthropic: string;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* BoardBackgroundSettings - Kanban board appearance customization
|
||||
*
|
||||
* Controls background images, opacity, borders, and visual effects for the board.
|
||||
*/
|
||||
export interface BoardBackgroundSettings {
|
||||
/** Path to background image file (null = no image) */
|
||||
imagePath: string | null;
|
||||
/** Version/timestamp of image for cache busting */
|
||||
imageVersion?: number;
|
||||
/** Opacity of cards (0-1) */
|
||||
cardOpacity: number;
|
||||
/** Opacity of columns (0-1) */
|
||||
columnOpacity: number;
|
||||
/** Show border around columns */
|
||||
columnBorderEnabled: boolean;
|
||||
/** Apply glassmorphism effect to cards */
|
||||
cardGlassmorphism: boolean;
|
||||
/** Show border around cards */
|
||||
cardBorderEnabled: boolean;
|
||||
/** Opacity of card borders (0-1) */
|
||||
cardBorderOpacity: number;
|
||||
/** Hide scrollbar in board view */
|
||||
hideScrollbar: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* WorktreeInfo - Information about a git worktree
|
||||
*
|
||||
* Tracks worktree location, branch, and dirty state for project management.
|
||||
*/
|
||||
export interface WorktreeInfo {
|
||||
/** Absolute path to worktree directory */
|
||||
path: string;
|
||||
/** Branch checked out in this worktree */
|
||||
branch: string;
|
||||
/** Whether this is the main worktree */
|
||||
isMain: boolean;
|
||||
/** Whether worktree has uncommitted changes */
|
||||
hasChanges?: boolean;
|
||||
/** Number of files with changes */
|
||||
changedFilesCount?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* ProjectSettings - Project-specific overrides stored in {projectPath}/.automaker/settings.json
|
||||
*
|
||||
* Allows per-project customization without affecting global settings.
|
||||
* All fields are optional - missing values fall back to global settings.
|
||||
*/
|
||||
export interface ProjectSettings {
|
||||
/** Version number for schema migration */
|
||||
version: number;
|
||||
|
||||
// Theme Configuration (project-specific override)
|
||||
/** Project theme (undefined = use global setting) */
|
||||
theme?: ThemeMode;
|
||||
|
||||
// Worktree Management
|
||||
/** Project-specific worktree preference override */
|
||||
useWorktrees?: boolean;
|
||||
/** Current worktree being used in this project */
|
||||
currentWorktree?: { path: string | null; branch: string };
|
||||
/** List of worktrees available in this project */
|
||||
worktrees?: WorktreeInfo[];
|
||||
|
||||
// Board Customization
|
||||
/** Project-specific board background settings */
|
||||
boardBackground?: BoardBackgroundSettings;
|
||||
|
||||
// Session Tracking
|
||||
/** Last chat session selected in this project */
|
||||
lastSelectedSessionId?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default values and constants
|
||||
*/
|
||||
|
||||
/** Default keyboard shortcut bindings */
|
||||
export const DEFAULT_KEYBOARD_SHORTCUTS: KeyboardShortcuts = {
|
||||
board: "K",
|
||||
agent: "A",
|
||||
spec: "D",
|
||||
context: "C",
|
||||
settings: "S",
|
||||
profiles: "M",
|
||||
terminal: "T",
|
||||
toggleSidebar: "`",
|
||||
addFeature: "N",
|
||||
addContextFile: "N",
|
||||
startNext: "G",
|
||||
newSession: "N",
|
||||
openProject: "O",
|
||||
projectPicker: "P",
|
||||
cyclePrevProject: "Q",
|
||||
cycleNextProject: "E",
|
||||
addProfile: "N",
|
||||
splitTerminalRight: "Alt+D",
|
||||
splitTerminalDown: "Alt+S",
|
||||
closeTerminal: "Alt+W",
|
||||
};
|
||||
|
||||
/** Default global settings used when no settings file exists */
|
||||
export const DEFAULT_GLOBAL_SETTINGS: GlobalSettings = {
|
||||
version: 1,
|
||||
theme: "dark",
|
||||
sidebarOpen: true,
|
||||
chatHistoryOpen: false,
|
||||
kanbanCardDetailLevel: "standard",
|
||||
maxConcurrency: 3,
|
||||
defaultSkipTests: true,
|
||||
enableDependencyBlocking: true,
|
||||
useWorktrees: false,
|
||||
showProfilesOnly: false,
|
||||
defaultPlanningMode: "skip",
|
||||
defaultRequirePlanApproval: false,
|
||||
defaultAIProfileId: null,
|
||||
muteDoneSound: false,
|
||||
enhancementModel: "sonnet",
|
||||
keyboardShortcuts: DEFAULT_KEYBOARD_SHORTCUTS,
|
||||
aiProfiles: [],
|
||||
projects: [],
|
||||
trashedProjects: [],
|
||||
projectHistory: [],
|
||||
projectHistoryIndex: -1,
|
||||
lastProjectDir: undefined,
|
||||
recentFolders: [],
|
||||
worktreePanelCollapsed: false,
|
||||
lastSelectedSessionByProject: {},
|
||||
};
|
||||
|
||||
/** Default credentials (empty strings - user must provide API keys) */
|
||||
export const DEFAULT_CREDENTIALS: Credentials = {
|
||||
version: 1,
|
||||
apiKeys: {
|
||||
anthropic: "",
|
||||
},
|
||||
};
|
||||
|
||||
/** Default project settings (empty - all settings are optional and fall back to global) */
|
||||
export const DEFAULT_PROJECT_SETTINGS: ProjectSettings = {
|
||||
version: 1,
|
||||
};
|
||||
|
||||
/** Current version of the global settings schema */
|
||||
export const SETTINGS_VERSION = 1;
|
||||
/** Current version of the credentials schema */
|
||||
export const CREDENTIALS_VERSION = 1;
|
||||
/** Current version of the project settings schema */
|
||||
export const PROJECT_SETTINGS_VERSION = 1;
|
||||
export {
|
||||
DEFAULT_KEYBOARD_SHORTCUTS,
|
||||
DEFAULT_GLOBAL_SETTINGS,
|
||||
DEFAULT_CREDENTIALS,
|
||||
DEFAULT_PROJECT_SETTINGS,
|
||||
SETTINGS_VERSION,
|
||||
CREDENTIALS_VERSION,
|
||||
PROJECT_SETTINGS_VERSION,
|
||||
} from "@automaker/types";
|
||||
|
||||
2
apps/server/tests/fixtures/messages.ts
vendored
2
apps/server/tests/fixtures/messages.ts
vendored
@@ -6,7 +6,7 @@ import type {
|
||||
ConversationMessage,
|
||||
ProviderMessage,
|
||||
ContentBlock,
|
||||
} from "../../src/providers/types.js";
|
||||
} from "@automaker/types";
|
||||
|
||||
export const conversationHistoryFixture: ConversationMessage[] = [
|
||||
{
|
||||
|
||||
@@ -17,7 +17,7 @@ import {
|
||||
getCredentialsPath,
|
||||
getProjectSettingsPath,
|
||||
ensureDataDir,
|
||||
} from "@/lib/automaker-paths.js";
|
||||
} from "@automaker/platform";
|
||||
|
||||
describe("automaker-paths.ts", () => {
|
||||
const projectPath = path.join("/test", "project");
|
||||
|
||||
@@ -4,7 +4,7 @@ import {
|
||||
normalizeContentBlocks,
|
||||
formatHistoryAsText,
|
||||
convertHistoryToMessages,
|
||||
} from "@/lib/conversation-utils.js";
|
||||
} from "@automaker/utils";
|
||||
import { conversationHistoryFixture } from "../../fixtures/messages.js";
|
||||
|
||||
describe("conversation-utils.ts", () => {
|
||||
|
||||
@@ -4,8 +4,8 @@ import {
|
||||
areDependenciesSatisfied,
|
||||
getBlockingDependencies,
|
||||
type DependencyResolutionResult,
|
||||
} from "@/lib/dependency-resolver.js";
|
||||
import type { Feature } from "@/services/feature-loader.js";
|
||||
} from "@automaker/dependency-resolver";
|
||||
import type { Feature } from "@automaker/types";
|
||||
|
||||
// Helper to create test features
|
||||
function createFeature(
|
||||
|
||||
@@ -6,7 +6,7 @@ import {
|
||||
classifyError,
|
||||
getUserFriendlyErrorMessage,
|
||||
type ErrorType,
|
||||
} from "@/lib/error-handler.js";
|
||||
} from "@automaker/utils";
|
||||
|
||||
describe("error-handler.ts", () => {
|
||||
describe("isAbortError", () => {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { mkdirSafe, existsSafe } from "@/lib/fs-utils.js";
|
||||
import { mkdirSafe, existsSafe } from "@automaker/utils";
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import os from "os";
|
||||
|
||||
@@ -4,7 +4,7 @@ import {
|
||||
readImageAsBase64,
|
||||
convertImagesToContentBlocks,
|
||||
formatImagePathsForPrompt,
|
||||
} from "@/lib/image-handler.js";
|
||||
} from "@automaker/utils";
|
||||
import { pngBase64Fixture } from "../../fixtures/images.js";
|
||||
import * as fs from "fs/promises";
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import {
|
||||
createLogger,
|
||||
getLogLevel,
|
||||
setLogLevel,
|
||||
} from "@/lib/logger.js";
|
||||
} from "@automaker/utils";
|
||||
|
||||
describe("logger.ts", () => {
|
||||
let consoleSpy: {
|
||||
|
||||
@@ -4,7 +4,7 @@ import {
|
||||
getEffectiveModel,
|
||||
CLAUDE_MODEL_MAP,
|
||||
DEFAULT_MODELS,
|
||||
} from "@/lib/model-resolver.js";
|
||||
} from "@automaker/model-resolver";
|
||||
|
||||
describe("model-resolver.ts", () => {
|
||||
let consoleSpy: any;
|
||||
|
||||
@@ -1,17 +1,24 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { buildPromptWithImages } from "@/lib/prompt-builder.js";
|
||||
import * as imageHandler from "@/lib/image-handler.js";
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import * as utils from "@automaker/utils";
|
||||
import * as fs from "fs/promises";
|
||||
|
||||
vi.mock("@/lib/image-handler.js");
|
||||
// Mock fs module for the image-handler's readFile calls
|
||||
vi.mock("fs/promises");
|
||||
|
||||
describe("prompt-builder.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
// Setup default mock for fs.readFile to return a valid image buffer
|
||||
vi.mocked(fs.readFile).mockResolvedValue(Buffer.from("fake-image-data"));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe("buildPromptWithImages", () => {
|
||||
it("should return plain text when no images provided", async () => {
|
||||
const result = await buildPromptWithImages("Hello world");
|
||||
const result = await utils.buildPromptWithImages("Hello world");
|
||||
|
||||
expect(result).toEqual({
|
||||
content: "Hello world",
|
||||
@@ -20,7 +27,7 @@ describe("prompt-builder.ts", () => {
|
||||
});
|
||||
|
||||
it("should return plain text when imagePaths is empty array", async () => {
|
||||
const result = await buildPromptWithImages("Hello world", []);
|
||||
const result = await utils.buildPromptWithImages("Hello world", []);
|
||||
|
||||
expect(result).toEqual({
|
||||
content: "Hello world",
|
||||
@@ -29,44 +36,26 @@ describe("prompt-builder.ts", () => {
|
||||
});
|
||||
|
||||
it("should build content blocks with single image", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "base64data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Describe this image", [
|
||||
const result = await utils.buildPromptWithImages("Describe this image", [
|
||||
"/test.png",
|
||||
]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
expect(Array.isArray(result.content)).toBe(true);
|
||||
const content = result.content as Array<any>;
|
||||
const content = result.content as Array<{ type: string; text?: string }>;
|
||||
expect(content).toHaveLength(2);
|
||||
expect(content[0]).toEqual({ type: "text", text: "Describe this image" });
|
||||
expect(content[1].type).toBe("image");
|
||||
});
|
||||
|
||||
it("should build content blocks with multiple images", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data1" },
|
||||
},
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/jpeg", data: "data2" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Analyze these", [
|
||||
const result = await utils.buildPromptWithImages("Analyze these", [
|
||||
"/a.png",
|
||||
"/b.jpg",
|
||||
]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
const content = result.content as Array<any>;
|
||||
const content = result.content as Array<{ type: string }>;
|
||||
expect(content).toHaveLength(3); // 1 text + 2 images
|
||||
expect(content[0].type).toBe("text");
|
||||
expect(content[1].type).toBe("image");
|
||||
@@ -74,124 +63,67 @@ describe("prompt-builder.ts", () => {
|
||||
});
|
||||
|
||||
it("should include image paths in text when requested", async () => {
|
||||
vi.mocked(imageHandler.formatImagePathsForPrompt).mockReturnValue(
|
||||
"\n\nAttached images:\n- /test.png"
|
||||
);
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages(
|
||||
const result = await utils.buildPromptWithImages(
|
||||
"Base prompt",
|
||||
["/test.png"],
|
||||
undefined,
|
||||
true
|
||||
);
|
||||
|
||||
expect(imageHandler.formatImagePathsForPrompt).toHaveBeenCalledWith([
|
||||
"/test.png",
|
||||
]);
|
||||
const content = result.content as Array<any>;
|
||||
const content = result.content as Array<{ type: string; text?: string }>;
|
||||
expect(content[0].text).toContain("Base prompt");
|
||||
expect(content[0].text).toContain("Attached images:");
|
||||
expect(content[0].text).toContain("/test.png");
|
||||
});
|
||||
|
||||
it("should not include image paths by default", async () => {
|
||||
vi.mocked(imageHandler.formatImagePathsForPrompt).mockReturnValue(
|
||||
"\n\nAttached images:\n- /test.png"
|
||||
);
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
const result = await utils.buildPromptWithImages("Base prompt", ["/test.png"]);
|
||||
|
||||
const result = await buildPromptWithImages("Base prompt", ["/test.png"]);
|
||||
|
||||
expect(imageHandler.formatImagePathsForPrompt).not.toHaveBeenCalled();
|
||||
const content = result.content as Array<any>;
|
||||
const content = result.content as Array<{ type: string; text?: string }>;
|
||||
expect(content[0].text).toBe("Base prompt");
|
||||
});
|
||||
|
||||
it("should pass workDir to convertImagesToContentBlocks", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
await buildPromptWithImages("Test", ["/test.png"], "/work/dir");
|
||||
|
||||
expect(imageHandler.convertImagesToContentBlocks).toHaveBeenCalledWith(
|
||||
["/test.png"],
|
||||
"/work/dir"
|
||||
);
|
||||
expect(content[0].text).not.toContain("Attached");
|
||||
});
|
||||
|
||||
it("should handle empty text content", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("", ["/test.png"]);
|
||||
const result = await utils.buildPromptWithImages("", ["/test.png"]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
// When text is empty/whitespace, should only have image blocks
|
||||
const content = result.content as Array<any>;
|
||||
const content = result.content as Array<{ type: string }>;
|
||||
expect(content.every((block) => block.type === "image")).toBe(true);
|
||||
});
|
||||
|
||||
it("should trim text content before checking if empty", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
const result = await utils.buildPromptWithImages(" ", ["/test.png"]);
|
||||
|
||||
const result = await buildPromptWithImages(" ", ["/test.png"]);
|
||||
|
||||
const content = result.content as Array<any>;
|
||||
const content = result.content as Array<{ type: string }>;
|
||||
// Whitespace-only text should be excluded
|
||||
expect(content.every((block) => block.type === "image")).toBe(true);
|
||||
});
|
||||
|
||||
it("should return text when only one block and it's text", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([]);
|
||||
// Make readFile reject to simulate image load failure
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error("File not found"));
|
||||
|
||||
const result = await buildPromptWithImages("Just text", ["/missing.png"]);
|
||||
const result = await utils.buildPromptWithImages("Just text", ["/missing.png"]);
|
||||
|
||||
// If no images are successfully loaded, should return just the text
|
||||
expect(result.content).toBe("Just text");
|
||||
expect(result.hasImages).toBe(true); // Still true because images were requested
|
||||
});
|
||||
|
||||
it("should handle workDir with relative paths", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
await buildPromptWithImages(
|
||||
it("should pass workDir for path resolution", async () => {
|
||||
// The function should use workDir to resolve relative paths
|
||||
const result = await utils.buildPromptWithImages(
|
||||
"Test",
|
||||
["relative.png"],
|
||||
"/absolute/work/dir"
|
||||
"/work/dir"
|
||||
);
|
||||
|
||||
expect(imageHandler.convertImagesToContentBlocks).toHaveBeenCalledWith(
|
||||
["relative.png"],
|
||||
"/absolute/work/dir"
|
||||
);
|
||||
// Verify it tried to read the file (with resolved path including workDir)
|
||||
expect(fs.readFile).toHaveBeenCalled();
|
||||
// The path should be resolved using workDir
|
||||
const readCall = vi.mocked(fs.readFile).mock.calls[0][0];
|
||||
expect(readCall).toContain("relative.png");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -13,35 +13,34 @@ describe("security.ts", () => {
|
||||
describe("initAllowedPaths", () => {
|
||||
it("should load ALLOWED_ROOT_DIRECTORY if set", async () => {
|
||||
process.env.ALLOWED_ROOT_DIRECTORY = "/projects";
|
||||
process.env.DATA_DIR = "";
|
||||
delete process.env.DATA_DIR;
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths, getAllowedRootDirectory } =
|
||||
await import("@/lib/security.js");
|
||||
const { initAllowedPaths, getAllowedPaths } =
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/projects"));
|
||||
expect(getAllowedRootDirectory()).toBe(path.resolve("/projects"));
|
||||
});
|
||||
|
||||
it("should always include DATA_DIR if set", async () => {
|
||||
it("should include DATA_DIR if set", async () => {
|
||||
delete process.env.ALLOWED_ROOT_DIRECTORY;
|
||||
process.env.DATA_DIR = "/data/dir";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/data/dir"));
|
||||
});
|
||||
|
||||
it("should handle both ALLOWED_ROOT_DIRECTORY and DATA_DIR", async () => {
|
||||
it("should include both ALLOWED_ROOT_DIRECTORY and DATA_DIR if both set", async () => {
|
||||
process.env.ALLOWED_ROOT_DIRECTORY = "/projects";
|
||||
process.env.DATA_DIR = "/data";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
@@ -49,6 +48,18 @@ describe("security.ts", () => {
|
||||
expect(allowed).toContain(path.resolve("/data"));
|
||||
expect(allowed).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("should return empty array when no paths configured", async () => {
|
||||
delete process.env.ALLOWED_ROOT_DIRECTORY;
|
||||
delete process.env.DATA_DIR;
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } =
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPathAllowed", () => {
|
||||
@@ -57,7 +68,7 @@ describe("security.ts", () => {
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
// Paths within allowed directory should be allowed
|
||||
@@ -75,7 +86,7 @@ describe("security.ts", () => {
|
||||
delete process.env.ALLOWED_ROOT_DIRECTORY;
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
// All paths should be allowed when no restrictions are configured
|
||||
@@ -91,7 +102,7 @@ describe("security.ts", () => {
|
||||
delete process.env.ALLOWED_ROOT_DIRECTORY;
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
// DATA_DIR should be allowed
|
||||
@@ -111,7 +122,7 @@ describe("security.ts", () => {
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const result = validatePath("/allowed/file.txt");
|
||||
@@ -123,7 +134,7 @@ describe("security.ts", () => {
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
// Disallowed paths should throw PathNotAllowedError
|
||||
@@ -135,7 +146,7 @@ describe("security.ts", () => {
|
||||
delete process.env.ALLOWED_ROOT_DIRECTORY;
|
||||
|
||||
const { initAllowedPaths, validatePath } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
// All paths are allowed when no restrictions configured
|
||||
@@ -151,7 +162,7 @@ describe("security.ts", () => {
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const result = validatePath("./file.txt");
|
||||
@@ -165,7 +176,7 @@ describe("security.ts", () => {
|
||||
process.env.DATA_DIR = "/data";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const result = getAllowedPaths();
|
||||
@@ -180,7 +191,7 @@ describe("security.ts", () => {
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
await import("@automaker/platform");
|
||||
initAllowedPaths();
|
||||
|
||||
const result = getAllowedPaths();
|
||||
|
||||
@@ -6,7 +6,7 @@ import type {
|
||||
ProviderMessage,
|
||||
InstallationStatus,
|
||||
ModelDefinition,
|
||||
} from "@/providers/types.js";
|
||||
} from "@automaker/types";
|
||||
|
||||
// Concrete implementation for testing the abstract class
|
||||
class TestProvider extends BaseProvider {
|
||||
|
||||
@@ -2,14 +2,14 @@ import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { AgentService } from "@/services/agent-service.js";
|
||||
import { ProviderFactory } from "@/providers/provider-factory.js";
|
||||
import * as fs from "fs/promises";
|
||||
import * as imageHandler from "@/lib/image-handler.js";
|
||||
import * as promptBuilder from "@/lib/prompt-builder.js";
|
||||
import * as imageHandler from "@automaker/utils";
|
||||
import * as promptBuilder from "@automaker/utils";
|
||||
import { collectAsyncGenerator } from "../../utils/helpers.js";
|
||||
|
||||
vi.mock("fs/promises");
|
||||
vi.mock("@/providers/provider-factory.js");
|
||||
vi.mock("@/lib/image-handler.js");
|
||||
vi.mock("@/lib/prompt-builder.js");
|
||||
vi.mock("@automaker/utils");
|
||||
vi.mock("@automaker/utils");
|
||||
|
||||
describe("agent-service.ts", () => {
|
||||
let service: AgentService;
|
||||
|
||||
@@ -144,6 +144,7 @@ describe("feature-loader.ts", () => {
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe("feature-2");
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
"[FeatureLoader]",
|
||||
expect.stringContaining("missing required 'id' field")
|
||||
);
|
||||
|
||||
@@ -189,7 +190,10 @@ describe("feature-loader.ts", () => {
|
||||
const result = await loader.getAll(testProjectPath);
|
||||
|
||||
expect(result).toEqual([]);
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
"[FeatureLoader]",
|
||||
expect.stringContaining("Failed to parse feature.json")
|
||||
);
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
@@ -362,6 +366,11 @@ describe("feature-loader.ts", () => {
|
||||
const result = await loader.delete(testProjectPath, "feature-123");
|
||||
|
||||
expect(result).toBe(false);
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
"[FeatureLoader]",
|
||||
expect.stringContaining("Failed to delete feature"),
|
||||
expect.objectContaining({ message: "Permission denied" })
|
||||
);
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -17,10 +17,12 @@ export default defineConfig({
|
||||
"src/routes/**", // Routes are better tested with integration tests
|
||||
],
|
||||
thresholds: {
|
||||
lines: 65,
|
||||
// Increased thresholds to ensure better code quality
|
||||
// Current coverage: 64% stmts, 56% branches, 78% funcs, 64% lines
|
||||
lines: 60,
|
||||
functions: 75,
|
||||
branches: 58,
|
||||
statements: 65,
|
||||
branches: 55,
|
||||
statements: 60,
|
||||
},
|
||||
},
|
||||
include: ["tests/**/*.test.ts", "tests/**/*.spec.ts"],
|
||||
@@ -32,6 +34,13 @@ export default defineConfig({
|
||||
resolve: {
|
||||
alias: {
|
||||
"@": path.resolve(__dirname, "./src"),
|
||||
// Resolve shared packages to source files for proper mocking in tests
|
||||
"@automaker/utils": path.resolve(__dirname, "../../libs/utils/src/index.ts"),
|
||||
"@automaker/platform": path.resolve(__dirname, "../../libs/platform/src/index.ts"),
|
||||
"@automaker/types": path.resolve(__dirname, "../../libs/types/src/index.ts"),
|
||||
"@automaker/model-resolver": path.resolve(__dirname, "../../libs/model-resolver/src/index.ts"),
|
||||
"@automaker/dependency-resolver": path.resolve(__dirname, "../../libs/dependency-resolver/src/index.ts"),
|
||||
"@automaker/git-utils": path.resolve(__dirname, "../../libs/git-utils/src/index.ts"),
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/AutoMaker-Org/automaker.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Cody Seibert",
|
||||
"email": "webdevcody@gmail.com"
|
||||
},
|
||||
"author": "AutoMaker Team",
|
||||
"license": "SEE LICENSE IN LICENSE",
|
||||
"private": true,
|
||||
"license": "Unlicense",
|
||||
"main": "dist-electron/main.js",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
@@ -38,6 +35,8 @@
|
||||
"dev:electron:wsl:gpu": "cross-env MESA_D3D12_DEFAULT_ADAPTER_NAME=NVIDIA vite"
|
||||
},
|
||||
"dependencies": {
|
||||
"@automaker/dependency-resolver": "^1.0.0",
|
||||
"@automaker/types": "^1.0.0",
|
||||
"@codemirror/lang-xml": "^6.1.0",
|
||||
"@codemirror/theme-one-dark": "^6.1.3",
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
|
||||
@@ -16,8 +16,20 @@ const __dirname = dirname(__filename);
|
||||
|
||||
const APP_DIR = join(__dirname, '..');
|
||||
const SERVER_DIR = join(APP_DIR, '..', 'server');
|
||||
const LIBS_DIR = join(APP_DIR, '..', '..', 'libs');
|
||||
const BUNDLE_DIR = join(APP_DIR, 'server-bundle');
|
||||
|
||||
// Local workspace packages that need to be bundled
|
||||
const LOCAL_PACKAGES = [
|
||||
'@automaker/types',
|
||||
'@automaker/utils',
|
||||
'@automaker/prompts',
|
||||
'@automaker/platform',
|
||||
'@automaker/model-resolver',
|
||||
'@automaker/dependency-resolver',
|
||||
'@automaker/git-utils'
|
||||
];
|
||||
|
||||
console.log('🔧 Preparing server for Electron bundling...\n');
|
||||
|
||||
// Step 1: Clean up previous bundle
|
||||
@@ -35,16 +47,55 @@ execSync('npm run build', { cwd: SERVER_DIR, stdio: 'inherit' });
|
||||
console.log('📋 Copying server dist...');
|
||||
cpSync(join(SERVER_DIR, 'dist'), join(BUNDLE_DIR, 'dist'), { recursive: true });
|
||||
|
||||
// Step 4: Create a minimal package.json for the server
|
||||
// Step 4: Copy local workspace packages
|
||||
console.log('📦 Copying local workspace packages...');
|
||||
const bundleLibsDir = join(BUNDLE_DIR, 'libs');
|
||||
mkdirSync(bundleLibsDir, { recursive: true });
|
||||
|
||||
for (const pkgName of LOCAL_PACKAGES) {
|
||||
const pkgDir = pkgName.replace('@automaker/', '');
|
||||
const srcDir = join(LIBS_DIR, pkgDir);
|
||||
const destDir = join(bundleLibsDir, pkgDir);
|
||||
|
||||
if (!existsSync(srcDir)) {
|
||||
console.warn(`⚠️ Warning: Package ${pkgName} not found at ${srcDir}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
mkdirSync(destDir, { recursive: true });
|
||||
|
||||
// Copy dist folder
|
||||
if (existsSync(join(srcDir, 'dist'))) {
|
||||
cpSync(join(srcDir, 'dist'), join(destDir, 'dist'), { recursive: true });
|
||||
}
|
||||
|
||||
// Copy package.json
|
||||
if (existsSync(join(srcDir, 'package.json'))) {
|
||||
cpSync(join(srcDir, 'package.json'), join(destDir, 'package.json'));
|
||||
}
|
||||
|
||||
console.log(` ✓ ${pkgName}`);
|
||||
}
|
||||
|
||||
// Step 5: Create a minimal package.json for the server
|
||||
console.log('📝 Creating server package.json...');
|
||||
const serverPkg = JSON.parse(readFileSync(join(SERVER_DIR, 'package.json'), 'utf-8'));
|
||||
|
||||
// Replace local package versions with file: references
|
||||
const dependencies = { ...serverPkg.dependencies };
|
||||
for (const pkgName of LOCAL_PACKAGES) {
|
||||
if (dependencies[pkgName]) {
|
||||
const pkgDir = pkgName.replace('@automaker/', '');
|
||||
dependencies[pkgName] = `file:libs/${pkgDir}`;
|
||||
}
|
||||
}
|
||||
|
||||
const bundlePkg = {
|
||||
name: '@automaker/server-bundle',
|
||||
version: serverPkg.version,
|
||||
type: 'module',
|
||||
main: 'dist/index.js',
|
||||
dependencies: serverPkg.dependencies
|
||||
dependencies
|
||||
};
|
||||
|
||||
writeFileSync(
|
||||
@@ -52,7 +103,7 @@ writeFileSync(
|
||||
JSON.stringify(bundlePkg, null, 2)
|
||||
);
|
||||
|
||||
// Step 5: Install production dependencies
|
||||
// Step 6: Install production dependencies
|
||||
console.log('📥 Installing server production dependencies...');
|
||||
execSync('npm install --omit=dev', {
|
||||
cwd: BUNDLE_DIR,
|
||||
@@ -64,7 +115,7 @@ execSync('npm install --omit=dev', {
|
||||
}
|
||||
});
|
||||
|
||||
// Step 6: Rebuild native modules for current architecture
|
||||
// Step 7: Rebuild native modules for current architecture
|
||||
// This is critical for modules like node-pty that have native bindings
|
||||
console.log('🔨 Rebuilding native modules for current architecture...');
|
||||
try {
|
||||
|
||||
@@ -4,14 +4,7 @@ import { cn } from "@/lib/utils";
|
||||
import { ImageIcon, X, Loader2 } from "lucide-react";
|
||||
import { Textarea } from "@/components/ui/textarea";
|
||||
import { getElectronAPI } from "@/lib/electron";
|
||||
import { useAppStore } from "@/store/app-store";
|
||||
|
||||
export interface FeatureImagePath {
|
||||
id: string;
|
||||
path: string; // Path to the temp file
|
||||
filename: string;
|
||||
mimeType: string;
|
||||
}
|
||||
import { useAppStore, type FeatureImagePath } from "@/store/app-store";
|
||||
|
||||
// Map to store preview data by image ID (persisted across component re-mounts)
|
||||
export type ImagePreviewMap = Map<string, string>;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
|
||||
import * as React from "react";
|
||||
import { useEffect, useCallback, useRef } from "react";
|
||||
import React, { useEffect, useCallback, useRef } from "react";
|
||||
import { Button, buttonVariants } from "./button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import type { VariantProps } from "class-variance-authority";
|
||||
|
||||
@@ -10,7 +10,7 @@ import { useAppStore, Feature } from "@/store/app-store";
|
||||
import { getElectronAPI } from "@/lib/electron";
|
||||
import type { AutoModeEvent } from "@/types/electron";
|
||||
import { pathsEqual } from "@/lib/utils";
|
||||
import { getBlockingDependencies } from "@/lib/dependency-resolver";
|
||||
import { getBlockingDependencies } from "@automaker/dependency-resolver";
|
||||
import { BoardBackgroundModal } from "@/components/dialogs/board-background-modal";
|
||||
import { RefreshCw } from "lucide-react";
|
||||
import { useAutoMode } from "@/hooks/use-auto-mode";
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { AlertCircle, Lock, Hand, Sparkles } from "lucide-react";
|
||||
import { getBlockingDependencies } from "@/lib/dependency-resolver";
|
||||
import { getBlockingDependencies } from "@automaker/dependency-resolver";
|
||||
|
||||
interface CardBadgeProps {
|
||||
children: React.ReactNode;
|
||||
|
||||
@@ -12,7 +12,7 @@ import { getElectronAPI } from "@/lib/electron";
|
||||
import { toast } from "sonner";
|
||||
import { useAutoMode } from "@/hooks/use-auto-mode";
|
||||
import { truncateDescription } from "@/lib/utils";
|
||||
import { getBlockingDependencies } from "@/lib/dependency-resolver";
|
||||
import { getBlockingDependencies } from "@automaker/dependency-resolver";
|
||||
|
||||
interface UseBoardActionsProps {
|
||||
currentProject: { path: string; id: string } | null;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useMemo, useCallback } from "react";
|
||||
import { Feature, useAppStore } from "@/store/app-store";
|
||||
import { resolveDependencies, getBlockingDependencies } from "@/lib/dependency-resolver";
|
||||
import { resolveDependencies, getBlockingDependencies } from "@automaker/dependency-resolver";
|
||||
|
||||
type ColumnId = Feature["status"];
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { AgentModel, ThinkingLevel } from "@/store/app-store";
|
||||
import type { AgentModel, ThinkingLevel } from "@/store/app-store";
|
||||
import {
|
||||
Brain,
|
||||
Zap,
|
||||
|
||||
@@ -6,48 +6,12 @@
|
||||
* - AUTOMAKER_MODEL_DEFAULT: Fallback model for all operations
|
||||
*/
|
||||
|
||||
/**
|
||||
* Claude model aliases for convenience
|
||||
*/
|
||||
export const CLAUDE_MODEL_MAP: Record<string, string> = {
|
||||
haiku: "claude-haiku-4-5",
|
||||
sonnet: "claude-sonnet-4-20250514",
|
||||
opus: "claude-opus-4-5-20251101",
|
||||
} as const;
|
||||
// Import shared model constants and types
|
||||
import { CLAUDE_MODEL_MAP, DEFAULT_MODELS } from "@automaker/types";
|
||||
import { resolveModelString } from "@automaker/model-resolver";
|
||||
|
||||
/**
|
||||
* Default models per use case
|
||||
*/
|
||||
export const DEFAULT_MODELS = {
|
||||
chat: "claude-opus-4-5-20251101",
|
||||
default: "claude-opus-4-5-20251101",
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Resolve a model alias to a full model string
|
||||
*/
|
||||
export function resolveModelString(
|
||||
modelKey?: string,
|
||||
defaultModel: string = DEFAULT_MODELS.default
|
||||
): string {
|
||||
if (!modelKey) {
|
||||
return defaultModel;
|
||||
}
|
||||
|
||||
// Full Claude model string - pass through
|
||||
if (modelKey.includes("claude-")) {
|
||||
return modelKey;
|
||||
}
|
||||
|
||||
// Check alias map
|
||||
const resolved = CLAUDE_MODEL_MAP[modelKey];
|
||||
if (resolved) {
|
||||
return resolved;
|
||||
}
|
||||
|
||||
// Unknown key - use default
|
||||
return defaultModel;
|
||||
}
|
||||
// Re-export for backward compatibility
|
||||
export { CLAUDE_MODEL_MAP, DEFAULT_MODELS, resolveModelString };
|
||||
|
||||
/**
|
||||
* Get the model for chat operations
|
||||
@@ -64,13 +28,13 @@ export function getChatModel(explicitModel?: string): string {
|
||||
}
|
||||
|
||||
const envModel =
|
||||
process.env.AUTOMAKER_MODEL_CHAT || process.env.AUTOMAKER_MODEL_DEFAULT;
|
||||
import.meta.env.AUTOMAKER_MODEL_CHAT || import.meta.env.AUTOMAKER_MODEL_DEFAULT;
|
||||
|
||||
if (envModel) {
|
||||
return resolveModelString(envModel);
|
||||
}
|
||||
|
||||
return DEFAULT_MODELS.chat;
|
||||
return DEFAULT_MODELS.claude;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -91,4 +55,3 @@ export const CHAT_TOOLS = [
|
||||
* Default max turns for chat
|
||||
*/
|
||||
export const CHAT_MAX_TURNS = 1000;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user