Merge main into massive-terminal-upgrade

Resolves merge conflicts:
- apps/server/src/routes/terminal/common.ts: Keep randomBytes import, use @automaker/utils for createLogger
- apps/ui/eslint.config.mjs: Use main's explicit globals list with XMLHttpRequest and MediaQueryListEvent additions
- apps/ui/src/components/views/terminal-view.tsx: Keep our terminal improvements (killAllSessions, beforeunload, better error handling)
- apps/ui/src/config/terminal-themes.ts: Keep our search highlight colors for all themes
- apps/ui/src/store/app-store.ts: Keep our terminal settings persistence improvements (merge function)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
SuperComboGamer
2025-12-21 20:27:44 -05:00
393 changed files with 32473 additions and 17974 deletions

View File

@@ -0,0 +1,188 @@
# @automaker/dependency-resolver
Feature dependency resolution using topological sorting.
## Overview
This package provides dependency resolution for AutoMaker features using Kahn's algorithm with priority-aware ordering. It ensures features are executed in the correct order based on their dependencies.
## Installation
```bash
npm install @automaker/dependency-resolver
```
## Exports
### Resolve Dependencies
Order features based on dependencies and priorities.
```typescript
import { resolveDependencies } from '@automaker/dependency-resolver';
import type { Feature } from '@automaker/types';
const features: Feature[] = [
{
id: 'database',
category: 'backend',
description: 'Setup database',
priority: 1,
},
{
id: 'auth',
category: 'backend',
description: 'Add authentication',
dependencies: ['database'],
priority: 2,
},
{
id: 'api',
category: 'backend',
description: 'Create API endpoints',
dependencies: ['auth'],
priority: 3,
},
];
const result = resolveDependencies(features);
console.log(result.orderedFeatures);
// [database, auth, api]
if (result.hasCycle) {
console.error('Circular dependency detected!');
console.error('Features in cycle:', result.cyclicFeatures);
}
```
### Check Dependencies Satisfied
Check if a feature's dependencies are satisfied.
```typescript
import { areDependenciesSatisfied } from '@automaker/dependency-resolver';
const allFeatures: Feature[] = [
{ id: 'database', status: 'completed', ... },
{ id: 'auth', status: 'pending', dependencies: ['database'], ... }
];
const authFeature = allFeatures.find(f => f.id === 'auth');
if (areDependenciesSatisfied(authFeature, allFeatures)) {
console.log('Auth feature is ready to execute');
} else {
console.log('Waiting for dependencies');
}
```
### Get Blocking Dependencies
Get list of incomplete dependencies blocking a feature.
```typescript
import { getBlockingDependencies } from '@automaker/dependency-resolver';
const blocking = getBlockingDependencies(feature, allFeatures);
if (blocking.length > 0) {
console.log(`Feature blocked by: ${blocking.join(', ')}`);
} else {
console.log('No blocking dependencies');
}
```
## Usage Example
```typescript
import {
resolveDependencies,
areDependenciesSatisfied,
getBlockingDependencies,
} from '@automaker/dependency-resolver';
import type { Feature } from '@automaker/types';
async function executeFeatures(features: Feature[]) {
// Resolve dependency order
const { orderedFeatures, hasCycle, cyclicFeatures } = resolveDependencies(features);
if (hasCycle) {
throw new Error(`Circular dependency: ${cyclicFeatures.join(' → ')}`);
}
// Execute in order
for (const feature of orderedFeatures) {
// Check if dependencies are satisfied
if (!areDependenciesSatisfied(feature, features)) {
const blocking = getBlockingDependencies(feature, features);
console.log(`Skipping ${feature.id}, blocked by: ${blocking.join(', ')}`);
continue;
}
// Execute feature
console.log(`Executing: ${feature.id}`);
await executeFeature(feature);
// Mark as completed
feature.status = 'completed';
}
}
```
## Algorithm
### Topological Sort (Kahn's Algorithm)
1. Calculate in-degree for each feature (number of dependencies)
2. Start with features that have no dependencies (in-degree = 0)
3. Process features in priority order
4. Remove processed features from dependency graph
5. Repeat until all features processed or cycle detected
### Priority Handling
- Features with lower priority numbers execute first
- When multiple features have same in-degree, priority determines order
- Features without explicit priority default to lowest priority
### Cycle Detection
- Detects circular dependencies
- Returns affected features in cycle
- Prevents infinite loops in execution
## Return Types
### DependencyResolutionResult
```typescript
interface DependencyResolutionResult {
orderedFeatures: Feature[]; // Features in execution order
hasCycle: boolean; // True if circular dependency detected
cyclicFeatures: string[]; // Feature IDs involved in cycle
}
```
## Edge Cases
### Missing Dependencies
Features with dependencies on non-existent features are treated as if the dependency is satisfied (allows flexibility).
### Self-Dependencies
Features depending on themselves are detected as cycles.
### Empty Dependencies Array
Treated same as no dependencies - feature is ready immediately.
## Dependencies
- `@automaker/types` - Feature type definition
## Used By
- `@automaker/server` - Auto-mode feature execution
- `@automaker/ui` - Board view feature ordering

View File

@@ -0,0 +1,36 @@
{
"name": "@automaker/dependency-resolver",
"version": "1.0.0",
"description": "Feature dependency resolution for AutoMaker",
"type": "module",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"exports": {
".": {
"types": "./dist/index.d.ts",
"import": "./dist/index.js",
"default": "./dist/index.js"
}
},
"scripts": {
"build": "tsc",
"watch": "tsc --watch",
"test": "vitest run",
"test:watch": "vitest"
},
"keywords": [
"automaker",
"dependency",
"resolver"
],
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
"dependencies": {
"@automaker/types": "^1.0.0"
},
"devDependencies": {
"@types/node": "^22.10.5",
"typescript": "^5.7.3",
"vitest": "^4.0.16"
}
}

View File

@@ -0,0 +1,11 @@
/**
* @automaker/dependency-resolver
* Feature dependency resolution for AutoMaker
*/
export {
resolveDependencies,
areDependenciesSatisfied,
getBlockingDependencies,
type DependencyResolutionResult,
} from './resolver.js';

View File

@@ -0,0 +1,211 @@
/**
* Dependency Resolution Utility
*
* Provides topological sorting and dependency analysis for features.
* Uses a modified Kahn's algorithm that respects both dependencies and priorities.
*/
import type { Feature } from '@automaker/types';
export interface DependencyResolutionResult {
orderedFeatures: Feature[]; // Features in dependency-aware order
circularDependencies: string[][]; // Groups of IDs forming cycles
missingDependencies: Map<string, string[]>; // featureId -> missing dep IDs
blockedFeatures: Map<string, string[]>; // featureId -> blocking dep IDs (incomplete dependencies)
}
/**
* Resolves feature dependencies using topological sort with priority-aware ordering.
*
* Algorithm:
* 1. Build dependency graph and detect missing/blocked dependencies
* 2. Apply Kahn's algorithm for topological sort
* 3. Within same dependency level, sort by priority (1=high, 2=medium, 3=low)
* 4. Detect circular dependencies for features that can't be ordered
*
* @param features - Array of features to order
* @returns Resolution result with ordered features and dependency metadata
*/
export function resolveDependencies(features: Feature[]): DependencyResolutionResult {
const featureMap = new Map<string, Feature>(features.map((f) => [f.id, f]));
const inDegree = new Map<string, number>();
const adjacencyList = new Map<string, string[]>(); // dependencyId -> [dependentIds]
const missingDependencies = new Map<string, string[]>();
const blockedFeatures = new Map<string, string[]>();
// Initialize graph structures
for (const feature of features) {
inDegree.set(feature.id, 0);
adjacencyList.set(feature.id, []);
}
// Build dependency graph and detect missing/blocked dependencies
for (const feature of features) {
const deps = feature.dependencies || [];
for (const depId of deps) {
if (!featureMap.has(depId)) {
// Missing dependency - track it
if (!missingDependencies.has(feature.id)) {
missingDependencies.set(feature.id, []);
}
missingDependencies.get(feature.id)!.push(depId);
} else {
// Valid dependency - add edge to graph
adjacencyList.get(depId)!.push(feature.id);
inDegree.set(feature.id, (inDegree.get(feature.id) || 0) + 1);
// Check if dependency is incomplete (blocking)
const depFeature = featureMap.get(depId)!;
if (depFeature.status !== 'completed' && depFeature.status !== 'verified') {
if (!blockedFeatures.has(feature.id)) {
blockedFeatures.set(feature.id, []);
}
blockedFeatures.get(feature.id)!.push(depId);
}
}
}
}
// Kahn's algorithm with priority-aware selection
const queue: Feature[] = [];
const orderedFeatures: Feature[] = [];
// Helper to sort features by priority (lower number = higher priority)
const sortByPriority = (a: Feature, b: Feature) => (a.priority ?? 2) - (b.priority ?? 2);
// Start with features that have no dependencies (in-degree 0)
for (const [id, degree] of inDegree) {
if (degree === 0) {
queue.push(featureMap.get(id)!);
}
}
// Sort initial queue by priority
queue.sort(sortByPriority);
// Process features in topological order
while (queue.length > 0) {
// Take highest priority feature from queue
const current = queue.shift()!;
orderedFeatures.push(current);
// Process features that depend on this one
for (const dependentId of adjacencyList.get(current.id) || []) {
const currentDegree = inDegree.get(dependentId);
if (currentDegree === undefined) {
throw new Error(`In-degree not initialized for feature ${dependentId}`);
}
const newDegree = currentDegree - 1;
inDegree.set(dependentId, newDegree);
if (newDegree === 0) {
queue.push(featureMap.get(dependentId)!);
// Re-sort queue to maintain priority order
queue.sort(sortByPriority);
}
}
}
// Detect circular dependencies (features not in output = part of cycle)
const circularDependencies: string[][] = [];
const processedIds = new Set(orderedFeatures.map((f) => f.id));
if (orderedFeatures.length < features.length) {
// Find cycles using DFS
const remaining = features.filter((f) => !processedIds.has(f.id));
const cycles = detectCycles(remaining, featureMap);
circularDependencies.push(...cycles);
// Add remaining features at end (part of cycles)
orderedFeatures.push(...remaining);
}
return {
orderedFeatures,
circularDependencies,
missingDependencies,
blockedFeatures,
};
}
/**
* Detects circular dependencies using depth-first search
*
* @param features - Features that couldn't be topologically sorted (potential cycles)
* @param featureMap - Map of all features by ID
* @returns Array of cycles, where each cycle is an array of feature IDs
*/
function detectCycles(features: Feature[], featureMap: Map<string, Feature>): string[][] {
const cycles: string[][] = [];
const visited = new Set<string>();
const recursionStack = new Set<string>();
const currentPath: string[] = [];
function dfs(featureId: string): boolean {
visited.add(featureId);
recursionStack.add(featureId);
currentPath.push(featureId);
const feature = featureMap.get(featureId);
if (feature) {
for (const depId of feature.dependencies || []) {
if (!visited.has(depId)) {
if (dfs(depId)) return true;
} else if (recursionStack.has(depId)) {
// Found cycle - extract it
const cycleStart = currentPath.indexOf(depId);
cycles.push(currentPath.slice(cycleStart));
return true;
}
}
}
currentPath.pop();
recursionStack.delete(featureId);
return false;
}
for (const feature of features) {
if (!visited.has(feature.id)) {
dfs(feature.id);
}
}
return cycles;
}
/**
* Checks if a feature's dependencies are satisfied (all complete or verified)
*
* @param feature - Feature to check
* @param allFeatures - All features in the project
* @returns true if all dependencies are satisfied, false otherwise
*/
export function areDependenciesSatisfied(feature: Feature, allFeatures: Feature[]): boolean {
if (!feature.dependencies || feature.dependencies.length === 0) {
return true; // No dependencies = always ready
}
return feature.dependencies.every((depId: string) => {
const dep = allFeatures.find((f) => f.id === depId);
return dep && (dep.status === 'completed' || dep.status === 'verified');
});
}
/**
* Gets the blocking dependencies for a feature (dependencies that are incomplete)
*
* @param feature - Feature to check
* @param allFeatures - All features in the project
* @returns Array of feature IDs that are blocking this feature
*/
export function getBlockingDependencies(feature: Feature, allFeatures: Feature[]): string[] {
if (!feature.dependencies || feature.dependencies.length === 0) {
return [];
}
return feature.dependencies.filter((depId: string) => {
const dep = allFeatures.find((f) => f.id === depId);
return dep && dep.status !== 'completed' && dep.status !== 'verified';
});
}

View File

@@ -0,0 +1,351 @@
import { describe, it, expect } from 'vitest';
import {
resolveDependencies,
areDependenciesSatisfied,
getBlockingDependencies,
} from '../src/resolver';
import type { Feature } from '@automaker/types';
// Helper to create test features
function createFeature(
id: string,
options: {
dependencies?: string[];
status?: string;
priority?: number;
} = {}
): Feature {
return {
id,
category: 'test',
description: `Feature ${id}`,
dependencies: options.dependencies,
status: options.status || 'pending',
priority: options.priority,
};
}
describe('resolver.ts', () => {
describe('resolveDependencies', () => {
it('should handle features with no dependencies', () => {
const features = [createFeature('A'), createFeature('B'), createFeature('C')];
const result = resolveDependencies(features);
expect(result.orderedFeatures).toHaveLength(3);
expect(result.circularDependencies).toEqual([]);
expect(result.missingDependencies.size).toBe(0);
expect(result.blockedFeatures.size).toBe(0);
});
it('should order features with linear dependencies', () => {
const features = [
createFeature('C', { dependencies: ['B'] }),
createFeature('A'),
createFeature('B', { dependencies: ['A'] }),
];
const result = resolveDependencies(features);
const ids = result.orderedFeatures.map((f) => f.id);
expect(ids.indexOf('A')).toBeLessThan(ids.indexOf('B'));
expect(ids.indexOf('B')).toBeLessThan(ids.indexOf('C'));
expect(result.circularDependencies).toEqual([]);
});
it('should respect priority within same dependency level', () => {
const features = [
createFeature('Low', { priority: 3 }),
createFeature('High', { priority: 1 }),
createFeature('Medium', { priority: 2 }),
];
const result = resolveDependencies(features);
const ids = result.orderedFeatures.map((f) => f.id);
expect(ids).toEqual(['High', 'Medium', 'Low']);
});
it('should use default priority 2 when not specified', () => {
const features = [
createFeature('NoPriority'),
createFeature('HighPriority', { priority: 1 }),
createFeature('LowPriority', { priority: 3 }),
];
const result = resolveDependencies(features);
const ids = result.orderedFeatures.map((f) => f.id);
expect(ids.indexOf('HighPriority')).toBeLessThan(ids.indexOf('NoPriority'));
expect(ids.indexOf('NoPriority')).toBeLessThan(ids.indexOf('LowPriority'));
});
it('should respect dependencies over priority', () => {
const features = [
createFeature('B', { dependencies: ['A'], priority: 1 }), // High priority but depends on A
createFeature('A', { priority: 3 }), // Low priority but no dependencies
];
const result = resolveDependencies(features);
const ids = result.orderedFeatures.map((f) => f.id);
expect(ids.indexOf('A')).toBeLessThan(ids.indexOf('B'));
});
it('should detect circular dependencies (simple cycle)', () => {
const features = [
createFeature('A', { dependencies: ['B'] }),
createFeature('B', { dependencies: ['A'] }),
];
const result = resolveDependencies(features);
expect(result.circularDependencies).toHaveLength(1);
expect(result.circularDependencies[0]).toContain('A');
expect(result.circularDependencies[0]).toContain('B');
expect(result.orderedFeatures).toHaveLength(2); // All features still included
});
it('should detect circular dependencies (3-way cycle)', () => {
const features = [
createFeature('A', { dependencies: ['C'] }),
createFeature('B', { dependencies: ['A'] }),
createFeature('C', { dependencies: ['B'] }),
];
const result = resolveDependencies(features);
expect(result.circularDependencies.length).toBeGreaterThan(0);
const allCycleIds = result.circularDependencies.flat();
expect(allCycleIds).toContain('A');
expect(allCycleIds).toContain('B');
expect(allCycleIds).toContain('C');
});
it('should detect missing dependencies', () => {
const features = [createFeature('A', { dependencies: ['NonExistent'] }), createFeature('B')];
const result = resolveDependencies(features);
expect(result.missingDependencies.has('A')).toBe(true);
expect(result.missingDependencies.get('A')).toContain('NonExistent');
});
it('should detect blocked features (incomplete dependencies)', () => {
const features = [
createFeature('A', { status: 'pending' }),
createFeature('B', { dependencies: ['A'], status: 'pending' }),
];
const result = resolveDependencies(features);
expect(result.blockedFeatures.has('B')).toBe(true);
expect(result.blockedFeatures.get('B')).toContain('A');
});
it('should not mark features as blocked if dependencies are completed', () => {
const features = [
createFeature('A', { status: 'completed' }),
createFeature('B', { dependencies: ['A'], status: 'pending' }),
];
const result = resolveDependencies(features);
expect(result.blockedFeatures.has('B')).toBe(false);
});
it('should not mark features as blocked if dependencies are verified', () => {
const features = [
createFeature('A', { status: 'verified' }),
createFeature('B', { dependencies: ['A'], status: 'pending' }),
];
const result = resolveDependencies(features);
expect(result.blockedFeatures.has('B')).toBe(false);
});
it('should handle complex dependency graph', () => {
const features = [
createFeature('E', { dependencies: ['C', 'D'] }),
createFeature('D', { dependencies: ['B'] }),
createFeature('C', { dependencies: ['A', 'B'] }),
createFeature('B'),
createFeature('A'),
];
const result = resolveDependencies(features);
const ids = result.orderedFeatures.map((f) => f.id);
// A and B have no dependencies - can be first or second
expect(ids.indexOf('A')).toBeLessThan(ids.indexOf('C'));
expect(ids.indexOf('B')).toBeLessThan(ids.indexOf('C'));
expect(ids.indexOf('B')).toBeLessThan(ids.indexOf('D'));
// C depends on A and B
expect(ids.indexOf('C')).toBeLessThan(ids.indexOf('E'));
// D depends on B
expect(ids.indexOf('D')).toBeLessThan(ids.indexOf('E'));
expect(result.circularDependencies).toEqual([]);
});
it('should handle multiple missing dependencies', () => {
const features = [createFeature('A', { dependencies: ['X', 'Y', 'Z'] })];
const result = resolveDependencies(features);
expect(result.missingDependencies.get('A')).toEqual(['X', 'Y', 'Z']);
});
it('should handle empty feature list', () => {
const result = resolveDependencies([]);
expect(result.orderedFeatures).toEqual([]);
expect(result.circularDependencies).toEqual([]);
expect(result.missingDependencies.size).toBe(0);
expect(result.blockedFeatures.size).toBe(0);
});
it('should handle features with both missing and existing dependencies', () => {
const features = [
createFeature('A'),
createFeature('B', { dependencies: ['A', 'NonExistent'] }),
];
const result = resolveDependencies(features);
expect(result.missingDependencies.get('B')).toContain('NonExistent');
const ids = result.orderedFeatures.map((f) => f.id);
expect(ids.indexOf('A')).toBeLessThan(ids.indexOf('B'));
});
});
describe('areDependenciesSatisfied', () => {
it('should return true for feature with no dependencies', () => {
const feature = createFeature('A');
const allFeatures = [feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(true);
});
it('should return true for feature with empty dependencies array', () => {
const feature = createFeature('A', { dependencies: [] });
const allFeatures = [feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(true);
});
it('should return true when all dependencies are completed', () => {
const dep = createFeature('Dep', { status: 'completed' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(true);
});
it('should return true when all dependencies are verified', () => {
const dep = createFeature('Dep', { status: 'verified' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(true);
});
it('should return false when any dependency is pending', () => {
const dep = createFeature('Dep', { status: 'pending' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(false);
});
it('should return false when any dependency is running', () => {
const dep = createFeature('Dep', { status: 'running' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(false);
});
it('should return false when dependency is missing', () => {
const feature = createFeature('A', { dependencies: ['NonExistent'] });
const allFeatures = [feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(false);
});
it('should check all dependencies', () => {
const dep1 = createFeature('Dep1', { status: 'completed' });
const dep2 = createFeature('Dep2', { status: 'pending' });
const feature = createFeature('A', { dependencies: ['Dep1', 'Dep2'] });
const allFeatures = [dep1, dep2, feature];
expect(areDependenciesSatisfied(feature, allFeatures)).toBe(false);
});
});
describe('getBlockingDependencies', () => {
it('should return empty array for feature with no dependencies', () => {
const feature = createFeature('A');
const allFeatures = [feature];
expect(getBlockingDependencies(feature, allFeatures)).toEqual([]);
});
it('should return empty array when all dependencies are completed', () => {
const dep = createFeature('Dep', { status: 'completed' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(getBlockingDependencies(feature, allFeatures)).toEqual([]);
});
it('should return empty array when all dependencies are verified', () => {
const dep = createFeature('Dep', { status: 'verified' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(getBlockingDependencies(feature, allFeatures)).toEqual([]);
});
it('should return pending dependencies', () => {
const dep = createFeature('Dep', { status: 'pending' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(getBlockingDependencies(feature, allFeatures)).toEqual(['Dep']);
});
it('should return running dependencies', () => {
const dep = createFeature('Dep', { status: 'running' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(getBlockingDependencies(feature, allFeatures)).toEqual(['Dep']);
});
it('should return failed dependencies', () => {
const dep = createFeature('Dep', { status: 'failed' });
const feature = createFeature('A', { dependencies: ['Dep'] });
const allFeatures = [dep, feature];
expect(getBlockingDependencies(feature, allFeatures)).toEqual(['Dep']);
});
it('should return all incomplete dependencies', () => {
const dep1 = createFeature('Dep1', { status: 'pending' });
const dep2 = createFeature('Dep2', { status: 'completed' });
const dep3 = createFeature('Dep3', { status: 'running' });
const feature = createFeature('A', { dependencies: ['Dep1', 'Dep2', 'Dep3'] });
const allFeatures = [dep1, dep2, dep3, feature];
const blocking = getBlockingDependencies(feature, allFeatures);
expect(blocking).toContain('Dep1');
expect(blocking).toContain('Dep3');
expect(blocking).not.toContain('Dep2');
});
});
});

View File

@@ -0,0 +1,9 @@
{
"extends": "../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,21 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['tests/**/*.test.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
include: ['src/**/*.ts'],
exclude: ['src/**/*.d.ts', 'src/index.ts'],
thresholds: {
lines: 90,
functions: 100,
branches: 85,
statements: 90,
},
},
},
});

276
libs/git-utils/README.md Normal file
View File

@@ -0,0 +1,276 @@
# @automaker/git-utils
Git operations and utilities for AutoMaker.
## Overview
This package provides git-related utilities including repository detection, status parsing, and diff generation for both tracked and untracked files.
## Installation
```bash
npm install @automaker/git-utils
```
## Exports
### Repository Detection
Check if a path is a git repository.
```typescript
import { isGitRepo } from '@automaker/git-utils';
const isRepo = await isGitRepo('/project/path');
if (isRepo) {
console.log('This is a git repository');
}
```
### Status Parsing
Parse git status output into structured data.
```typescript
import { parseGitStatus } from '@automaker/git-utils';
import type { FileStatus } from '@automaker/git-utils';
const statusOutput = await execAsync('git status --porcelain');
const files: FileStatus[] = parseGitStatus(statusOutput.stdout);
files.forEach((file) => {
console.log(`${file.statusText}: ${file.path}`);
// Example: "Modified: src/index.ts"
// Example: "Untracked: new-file.ts"
});
```
### Diff Generation
Generate diffs including untracked files.
```typescript
import {
generateSyntheticDiffForNewFile,
appendUntrackedFileDiffs,
getGitRepositoryDiffs,
} from '@automaker/git-utils';
// Generate diff for single untracked file
const diff = await generateSyntheticDiffForNewFile('/project/path', 'src/new-file.ts');
// Get complete repository diffs (tracked + untracked)
const result = await getGitRepositoryDiffs('/project/path');
console.log(result.diff); // Combined diff string
console.log(result.files); // Array of FileStatus
console.log(result.hasChanges); // Boolean
```
### Non-Git Directory Support
Handle non-git directories by treating all files as new.
```typescript
import { listAllFilesInDirectory, generateDiffsForNonGitDirectory } from '@automaker/git-utils';
// List all files (excluding build artifacts)
const files = await listAllFilesInDirectory('/project/path');
// Generate diffs for non-git directory
const result = await generateDiffsForNonGitDirectory('/project/path');
console.log(result.diff); // Synthetic diffs for all files
console.log(result.files); // All files as "New" status
```
## Types
### FileStatus
```typescript
interface FileStatus {
status: string; // Git status code (M/A/D/R/C/U/?/!)
path: string; // File path relative to repo root
statusText: string; // Human-readable status
}
```
### Status Codes
- `M` - Modified
- `A` - Added
- `D` - Deleted
- `R` - Renamed
- `C` - Copied
- `U` - Updated
- `?` - Untracked
- `!` - Ignored
- ` ` - Unmodified
### Status Text Examples
- `"Modified"` - File has changes
- `"Added"` - New file in staging
- `"Deleted"` - File removed
- `"Renamed"` - File renamed
- `"Untracked"` - New file not in git
- `"Modified (staged), Modified (unstaged)"` - Changes in both areas
## Usage Example
```typescript
import { isGitRepo, getGitRepositoryDiffs, parseGitStatus } from '@automaker/git-utils';
async function getProjectChanges(projectPath: string) {
const isRepo = await isGitRepo(projectPath);
if (!isRepo) {
console.log('Not a git repository, analyzing all files...');
}
const result = await getGitRepositoryDiffs(projectPath);
if (!result.hasChanges) {
console.log('No changes detected');
return;
}
console.log(`Found ${result.files.length} changed files:\n`);
// Group by status
const byStatus = result.files.reduce(
(acc, file) => {
acc[file.statusText] = acc[file.statusText] || [];
acc[file.statusText].push(file.path);
return acc;
},
{} as Record<string, string[]>
);
Object.entries(byStatus).forEach(([status, paths]) => {
console.log(`${status}:`);
paths.forEach((path) => console.log(` - ${path}`));
});
return result.diff;
}
```
## Features
### Binary File Detection
Automatically detects binary files by extension and generates appropriate diff markers.
**Supported binary extensions:**
- Images: `.png`, `.jpg`, `.jpeg`, `.gif`, `.svg`, etc.
- Documents: `.pdf`, `.doc`, `.docx`, etc.
- Archives: `.zip`, `.tar`, `.gz`, etc.
- Media: `.mp3`, `.mp4`, `.wav`, etc.
- Fonts: `.ttf`, `.otf`, `.woff`, etc.
### Large File Handling
Files larger than 1MB show size information instead of full content.
### Synthetic Diff Format
Generates unified diff format for untracked files:
```diff
diff --git a/new-file.ts b/new-file.ts
new file mode 100644
index 0000000..0000000
--- /dev/null
+++ b/new-file.ts
@@ -0,0 +1,10 @@
+export function hello() {
+ console.log('Hello');
+}
```
### Directory Filtering
When scanning non-git directories, automatically excludes:
- `node_modules`, `.git`, `.automaker`
- Build outputs: `dist`, `build`, `out`, `tmp`, `.tmp`
- Framework caches: `.next`, `.nuxt`, `.cache`, `coverage`
- Language-specific: `__pycache__` (Python), `target` (Rust), `vendor` (Go/PHP), `.gradle` (Gradle), `.venv`/`venv` (Python)
## Error Handling
Git operations can fail for various reasons. This package provides graceful error handling patterns:
### Common Error Scenarios
**1. Repository Not Found**
```typescript
const isRepo = await isGitRepo('/path/does/not/exist');
// Returns: false (no exception thrown)
```
**2. Not a Git Repository**
```typescript
const result = await getGitRepositoryDiffs('/not/a/git/repo');
// Fallback behavior: treats all files as "new"
// Returns synthetic diffs for all files in directory
```
**3. Git Command Failures**
```typescript
// Permission errors, corrupted repos, or git not installed
try {
const result = await getGitRepositoryDiffs('/project');
} catch (error) {
// Handle errors from git commands
// Errors are logged via @automaker/utils logger
console.error('Git operation failed:', error);
}
```
**4. File Read Errors**
```typescript
// When generating synthetic diffs for inaccessible files
const diff = await generateSyntheticDiffForNewFile('/path', 'locked-file.txt');
// Returns placeholder: "[Unable to read file content]"
// Error is logged but doesn't throw
```
### Best Practices
1. **Check repository status first**:
```typescript
const isRepo = await isGitRepo(path);
if (!isRepo) {
// Handle non-git case appropriately
}
```
2. **Expect non-git directories**:
- `getGitRepositoryDiffs()` automatically handles both cases
- Always returns a valid result structure
3. **Monitor logs**:
- Errors are logged with the `[GitUtils]` prefix
- Check logs for permission issues or git configuration problems
4. **Handle edge cases**:
- Empty repositories (no commits yet)
- Detached HEAD states
- Corrupted git repositories
- Missing git binary
## Dependencies
- `@automaker/types` - FileStatus type definition
- `@automaker/utils` - Logger utilities
## Used By
- `@automaker/server` - Git routes, worktree operations, feature context

View File

@@ -0,0 +1,30 @@
{
"name": "@automaker/git-utils",
"version": "1.0.0",
"type": "module",
"description": "Git operations utilities for AutoMaker",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"watch": "tsc --watch",
"test": "vitest run",
"test:watch": "vitest"
},
"keywords": [
"automaker",
"git",
"utils"
],
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
"dependencies": {
"@automaker/types": "^1.0.0",
"@automaker/utils": "^1.0.0"
},
"devDependencies": {
"@types/node": "^22.10.5",
"typescript": "^5.7.3",
"vitest": "^4.0.16"
}
}

256
libs/git-utils/src/diff.ts Normal file
View File

@@ -0,0 +1,256 @@
/**
* Git diff generation utilities
*/
import { createLogger } from '@automaker/utils';
import { secureFs } from '@automaker/platform';
import path from 'path';
import { exec } from 'child_process';
import { promisify } from 'util';
import { BINARY_EXTENSIONS, type FileStatus } from './types.js';
import { isGitRepo, parseGitStatus } from './status.js';
const execAsync = promisify(exec);
const logger = createLogger('GitUtils');
// Max file size for generating synthetic diffs (1MB)
const MAX_SYNTHETIC_DIFF_SIZE = 1024 * 1024;
/**
* Check if a file is likely binary based on extension
*/
function isBinaryFile(filePath: string): boolean {
const ext = path.extname(filePath).toLowerCase();
return BINARY_EXTENSIONS.has(ext);
}
/**
* Generate a synthetic unified diff for an untracked (new) file
* This is needed because `git diff HEAD` doesn't include untracked files
*/
export async function generateSyntheticDiffForNewFile(
basePath: string,
relativePath: string
): Promise<string> {
const fullPath = path.join(basePath, relativePath);
try {
// Check if it's a binary file
if (isBinaryFile(relativePath)) {
return `diff --git a/${relativePath} b/${relativePath}
new file mode 100644
index 0000000..0000000
Binary file ${relativePath} added
`;
}
// Get file stats to check size
const stats = await secureFs.stat(fullPath);
if (stats.size > MAX_SYNTHETIC_DIFF_SIZE) {
const sizeKB = Math.round(stats.size / 1024);
return `diff --git a/${relativePath} b/${relativePath}
new file mode 100644
index 0000000..0000000
--- /dev/null
+++ b/${relativePath}
@@ -0,0 +1 @@
+[File too large to display: ${sizeKB}KB]
`;
}
// Read file content
const content = (await secureFs.readFile(fullPath, 'utf-8')) as string;
const hasTrailingNewline = content.endsWith('\n');
const lines = content.split('\n');
// Remove trailing empty line if the file ends with newline
if (lines.length > 0 && lines.at(-1) === '') {
lines.pop();
}
// Generate diff format
const lineCount = lines.length;
const addedLines = lines.map((line) => `+${line}`).join('\n');
let diff = `diff --git a/${relativePath} b/${relativePath}
new file mode 100644
index 0000000..0000000
--- /dev/null
+++ b/${relativePath}
@@ -0,0 +1,${lineCount} @@
${addedLines}`;
// Add "No newline at end of file" indicator if needed
if (!hasTrailingNewline && content.length > 0) {
diff += '\n\\ No newline at end of file';
}
return diff + '\n';
} catch (error) {
// Log the error for debugging
logger.error(`Failed to generate synthetic diff for ${fullPath}:`, error);
// Return a placeholder diff
return `diff --git a/${relativePath} b/${relativePath}
new file mode 100644
index 0000000..0000000
--- /dev/null
+++ b/${relativePath}
@@ -0,0 +1 @@
+[Unable to read file content]
`;
}
}
/**
* Generate synthetic diffs for all untracked files and combine with existing diff
*/
export async function appendUntrackedFileDiffs(
basePath: string,
existingDiff: string,
files: Array<{ status: string; path: string }>
): Promise<string> {
// Find untracked files (status "?")
const untrackedFiles = files.filter((f) => f.status === '?');
if (untrackedFiles.length === 0) {
return existingDiff;
}
// Generate synthetic diffs for each untracked file
const syntheticDiffs = await Promise.all(
untrackedFiles.map((f) => generateSyntheticDiffForNewFile(basePath, f.path))
);
// Combine existing diff with synthetic diffs
const combinedDiff = existingDiff + syntheticDiffs.join('');
return combinedDiff;
}
/**
* List all files in a directory recursively (for non-git repositories)
* Excludes hidden files/folders and common build artifacts
*/
export async function listAllFilesInDirectory(
basePath: string,
relativePath: string = ''
): Promise<string[]> {
const files: string[] = [];
const fullPath = path.join(basePath, relativePath);
// Directories to skip
const skipDirs = new Set([
'node_modules',
'.git',
'.automaker',
'dist',
'build',
'.next',
'.nuxt',
'__pycache__',
'.cache',
'coverage',
'.venv',
'venv',
'target',
'vendor',
'.gradle',
'out',
'tmp',
'.tmp',
]);
try {
const entries = await secureFs.readdir(fullPath, { withFileTypes: true });
for (const entry of entries) {
// Skip hidden files/folders (except we want to allow some)
if (entry.name.startsWith('.') && entry.name !== '.env') {
continue;
}
const entryRelPath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
if (entry.isDirectory()) {
if (!skipDirs.has(entry.name)) {
const subFiles = await listAllFilesInDirectory(basePath, entryRelPath);
files.push(...subFiles);
}
} else if (entry.isFile()) {
files.push(entryRelPath);
}
}
} catch (error) {
// Log the error to help diagnose file system issues
logger.error(`Error reading directory ${fullPath}:`, error);
}
return files;
}
/**
* Generate diffs for all files in a non-git directory
* Treats all files as "new" files
*/
export async function generateDiffsForNonGitDirectory(
basePath: string
): Promise<{ diff: string; files: FileStatus[] }> {
const allFiles = await listAllFilesInDirectory(basePath);
const files: FileStatus[] = allFiles.map((filePath) => ({
status: '?',
path: filePath,
statusText: 'New',
}));
// Generate synthetic diffs for all files
const syntheticDiffs = await Promise.all(
files.map((f) => generateSyntheticDiffForNewFile(basePath, f.path))
);
return {
diff: syntheticDiffs.join(''),
files,
};
}
/**
* Get git repository diffs for a given path
* Handles both git repos and non-git directories
*/
export async function getGitRepositoryDiffs(
repoPath: string
): Promise<{ diff: string; files: FileStatus[]; hasChanges: boolean }> {
// Check if it's a git repository
const isRepo = await isGitRepo(repoPath);
if (!isRepo) {
// Not a git repo - list all files and treat them as new
const result = await generateDiffsForNonGitDirectory(repoPath);
return {
diff: result.diff,
files: result.files,
hasChanges: result.files.length > 0,
};
}
// Get git diff and status
const { stdout: diff } = await execAsync('git diff HEAD', {
cwd: repoPath,
maxBuffer: 10 * 1024 * 1024,
});
const { stdout: status } = await execAsync('git status --porcelain', {
cwd: repoPath,
});
const files = parseGitStatus(status);
// Generate synthetic diffs for untracked (new) files
const combinedDiff = await appendUntrackedFileDiffs(repoPath, diff, files);
return {
diff: combinedDiff,
files,
hasChanges: files.length > 0,
};
}

View File

@@ -0,0 +1,19 @@
/**
* @automaker/git-utils
* Git operations utilities for AutoMaker
*/
// Export types and constants
export { BINARY_EXTENSIONS, GIT_STATUS_MAP, type FileStatus } from './types.js';
// Export status utilities
export { isGitRepo, parseGitStatus } from './status.js';
// Export diff utilities
export {
generateSyntheticDiffForNewFile,
appendUntrackedFileDiffs,
listAllFilesInDirectory,
generateDiffsForNonGitDirectory,
getGitRepositoryDiffs,
} from './diff.js';

View File

@@ -0,0 +1,104 @@
/**
* Git status parsing utilities
*/
import { exec } from 'child_process';
import { promisify } from 'util';
import { GIT_STATUS_MAP, type FileStatus } from './types.js';
const execAsync = promisify(exec);
/**
* Get a readable status text from git status codes
* Handles both single character and XY format status codes
*/
function getStatusText(indexStatus: string, workTreeStatus: string): string {
// Untracked files
if (indexStatus === '?' && workTreeStatus === '?') {
return 'Untracked';
}
// Ignored files
if (indexStatus === '!' && workTreeStatus === '!') {
return 'Ignored';
}
// Prioritize staging area status, then working tree
const primaryStatus = indexStatus !== ' ' && indexStatus !== '?' ? indexStatus : workTreeStatus;
// Handle combined statuses
if (
indexStatus !== ' ' &&
indexStatus !== '?' &&
workTreeStatus !== ' ' &&
workTreeStatus !== '?'
) {
// Both staging and working tree have changes
const indexText = GIT_STATUS_MAP[indexStatus] || 'Changed';
const workText = GIT_STATUS_MAP[workTreeStatus] || 'Changed';
if (indexText === workText) {
return indexText;
}
return `${indexText} (staged), ${workText} (unstaged)`;
}
return GIT_STATUS_MAP[primaryStatus] || 'Changed';
}
/**
* Check if a path is a git repository
*/
export async function isGitRepo(repoPath: string): Promise<boolean> {
try {
await execAsync('git rev-parse --is-inside-work-tree', { cwd: repoPath });
return true;
} catch {
return false;
}
}
/**
* Parse the output of `git status --porcelain` into FileStatus array
* Git porcelain format: XY PATH where X=staging area status, Y=working tree status
* For renamed files: XY ORIG_PATH -> NEW_PATH
*/
export function parseGitStatus(statusOutput: string): FileStatus[] {
return statusOutput
.split('\n')
.filter(Boolean)
.map((line) => {
// Git porcelain format uses two status characters: XY
// X = status in staging area (index)
// Y = status in working tree
const indexStatus = line[0] || ' ';
const workTreeStatus = line[1] || ' ';
// File path starts at position 3 (after "XY ")
let filePath = line.slice(3);
// Handle renamed files (format: "R old_path -> new_path")
if (indexStatus === 'R' || workTreeStatus === 'R') {
const arrowIndex = filePath.indexOf(' -> ');
if (arrowIndex !== -1) {
filePath = filePath.slice(arrowIndex + 4); // Use new path
}
}
// Determine the primary status character for backwards compatibility
// Prioritize staging area status, then working tree
let primaryStatus: string;
if (indexStatus === '?' && workTreeStatus === '?') {
primaryStatus = '?'; // Untracked
} else if (indexStatus !== ' ' && indexStatus !== '?') {
primaryStatus = indexStatus; // Staged change
} else {
primaryStatus = workTreeStatus; // Working tree change
}
return {
status: primaryStatus,
path: filePath,
statusText: getStatusText(indexStatus, workTreeStatus),
};
});
}

View File

@@ -0,0 +1,73 @@
/**
* Git utilities types and constants
*/
// Binary file extensions to skip
export const BINARY_EXTENSIONS = new Set([
'.png',
'.jpg',
'.jpeg',
'.gif',
'.bmp',
'.ico',
'.webp',
'.svg',
'.pdf',
'.doc',
'.docx',
'.xls',
'.xlsx',
'.ppt',
'.pptx',
'.zip',
'.tar',
'.gz',
'.rar',
'.7z',
'.exe',
'.dll',
'.so',
'.dylib',
'.mp3',
'.mp4',
'.wav',
'.avi',
'.mov',
'.mkv',
'.ttf',
'.otf',
'.woff',
'.woff2',
'.eot',
'.db',
'.sqlite',
'.sqlite3',
'.pyc',
'.pyo',
'.class',
'.o',
'.obj',
]);
// Status map for git status codes
// Git porcelain format uses XY where X=staging area, Y=working tree
export const GIT_STATUS_MAP: Record<string, string> = {
M: 'Modified',
A: 'Added',
D: 'Deleted',
R: 'Renamed',
C: 'Copied',
U: 'Updated',
'?': 'Untracked',
'!': 'Ignored',
' ': 'Unmodified',
};
/**
* File status interface for git status results
*/
export interface FileStatus {
status: string;
path: string;
statusText: string;
}

View File

@@ -0,0 +1,306 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import {
generateSyntheticDiffForNewFile,
appendUntrackedFileDiffs,
listAllFilesInDirectory,
generateDiffsForNonGitDirectory,
getGitRepositoryDiffs,
} from '../src/diff';
import fs from 'fs/promises';
import path from 'path';
import os from 'os';
describe('diff.ts', () => {
let tempDir: string;
beforeEach(async () => {
// Create a temporary directory for each test
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'git-utils-test-'));
});
afterEach(async () => {
// Clean up temporary directory
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe('generateSyntheticDiffForNewFile', () => {
it('should generate diff for binary file', async () => {
const fileName = 'test.png';
const filePath = path.join(tempDir, fileName);
await fs.writeFile(filePath, Buffer.from([0x89, 0x50, 0x4e, 0x47]));
const diff = await generateSyntheticDiffForNewFile(tempDir, fileName);
expect(diff).toContain(`diff --git a/${fileName} b/${fileName}`);
expect(diff).toContain('new file mode 100644');
expect(diff).toContain(`Binary file ${fileName} added`);
});
it('should generate diff for large text file', async () => {
const fileName = 'large.txt';
const filePath = path.join(tempDir, fileName);
// Create a file > 1MB
const largeContent = 'x'.repeat(1024 * 1024 + 100);
await fs.writeFile(filePath, largeContent);
const diff = await generateSyntheticDiffForNewFile(tempDir, fileName);
expect(diff).toContain(`diff --git a/${fileName} b/${fileName}`);
expect(diff).toContain('[File too large to display:');
expect(diff).toMatch(/\d+KB\]/);
});
it('should generate diff for small text file with trailing newline', async () => {
const fileName = 'test.txt';
const filePath = path.join(tempDir, fileName);
const content = 'line 1\nline 2\nline 3\n';
await fs.writeFile(filePath, content);
const diff = await generateSyntheticDiffForNewFile(tempDir, fileName);
expect(diff).toContain(`diff --git a/${fileName} b/${fileName}`);
expect(diff).toContain('new file mode 100644');
expect(diff).toContain('--- /dev/null');
expect(diff).toContain(`+++ b/${fileName}`);
expect(diff).toContain('@@ -0,0 +1,3 @@');
expect(diff).toContain('+line 1');
expect(diff).toContain('+line 2');
expect(diff).toContain('+line 3');
expect(diff).not.toContain('\\ No newline at end of file');
});
it('should generate diff for text file without trailing newline', async () => {
const fileName = 'no-newline.txt';
const filePath = path.join(tempDir, fileName);
const content = 'line 1\nline 2';
await fs.writeFile(filePath, content);
const diff = await generateSyntheticDiffForNewFile(tempDir, fileName);
expect(diff).toContain(`diff --git a/${fileName} b/${fileName}`);
expect(diff).toContain('+line 1');
expect(diff).toContain('+line 2');
expect(diff).toContain('\\ No newline at end of file');
});
it('should generate diff for empty file', async () => {
const fileName = 'empty.txt';
const filePath = path.join(tempDir, fileName);
await fs.writeFile(filePath, '');
const diff = await generateSyntheticDiffForNewFile(tempDir, fileName);
expect(diff).toContain(`diff --git a/${fileName} b/${fileName}`);
expect(diff).toContain('@@ -0,0 +1,0 @@');
});
it('should generate diff for single line file', async () => {
const fileName = 'single.txt';
const filePath = path.join(tempDir, fileName);
await fs.writeFile(filePath, 'single line\n');
const diff = await generateSyntheticDiffForNewFile(tempDir, fileName);
expect(diff).toContain('@@ -0,0 +1,1 @@');
expect(diff).toContain('+single line');
});
it('should handle file not found error', async () => {
const fileName = 'nonexistent.txt';
const diff = await generateSyntheticDiffForNewFile(tempDir, fileName);
expect(diff).toContain(`diff --git a/${fileName} b/${fileName}`);
expect(diff).toContain('[Unable to read file content]');
});
});
describe('appendUntrackedFileDiffs', () => {
it('should return existing diff when no untracked files', async () => {
const existingDiff = 'diff --git a/test.txt b/test.txt\n';
const files = [
{ status: 'M', path: 'test.txt' },
{ status: 'A', path: 'new.txt' },
];
const result = await appendUntrackedFileDiffs(tempDir, existingDiff, files);
expect(result).toBe(existingDiff);
});
it('should append synthetic diffs for untracked files', async () => {
const existingDiff = 'existing diff\n';
const untrackedFile = 'untracked.txt';
const filePath = path.join(tempDir, untrackedFile);
await fs.writeFile(filePath, 'content\n');
const files = [
{ status: 'M', path: 'modified.txt' },
{ status: '?', path: untrackedFile },
];
const result = await appendUntrackedFileDiffs(tempDir, existingDiff, files);
expect(result).toContain('existing diff');
expect(result).toContain(`diff --git a/${untrackedFile} b/${untrackedFile}`);
expect(result).toContain('+content');
});
it('should handle multiple untracked files', async () => {
const file1 = 'file1.txt';
const file2 = 'file2.txt';
await fs.writeFile(path.join(tempDir, file1), 'file1\n');
await fs.writeFile(path.join(tempDir, file2), 'file2\n');
const files = [
{ status: '?', path: file1 },
{ status: '?', path: file2 },
];
const result = await appendUntrackedFileDiffs(tempDir, '', files);
expect(result).toContain(`diff --git a/${file1} b/${file1}`);
expect(result).toContain(`diff --git a/${file2} b/${file2}`);
expect(result).toContain('+file1');
expect(result).toContain('+file2');
});
});
describe('listAllFilesInDirectory', () => {
it('should list files in empty directory', async () => {
const files = await listAllFilesInDirectory(tempDir);
expect(files).toEqual([]);
});
it('should list files in flat directory', async () => {
await fs.writeFile(path.join(tempDir, 'file1.txt'), 'content');
await fs.writeFile(path.join(tempDir, 'file2.js'), 'code');
const files = await listAllFilesInDirectory(tempDir);
expect(files).toHaveLength(2);
expect(files).toContain('file1.txt');
expect(files).toContain('file2.js');
});
it('should list files in nested directories', async () => {
await fs.mkdir(path.join(tempDir, 'subdir'));
await fs.writeFile(path.join(tempDir, 'root.txt'), '');
await fs.writeFile(path.join(tempDir, 'subdir', 'nested.txt'), '');
const files = await listAllFilesInDirectory(tempDir);
expect(files).toHaveLength(2);
expect(files).toContain('root.txt');
expect(files).toContain('subdir/nested.txt');
});
it('should skip node_modules directory', async () => {
await fs.mkdir(path.join(tempDir, 'node_modules'));
await fs.writeFile(path.join(tempDir, 'app.js'), '');
await fs.writeFile(path.join(tempDir, 'node_modules', 'package.js'), '');
const files = await listAllFilesInDirectory(tempDir);
expect(files).toHaveLength(1);
expect(files).toContain('app.js');
expect(files).not.toContain('node_modules/package.js');
});
it('should skip common build directories', async () => {
await fs.mkdir(path.join(tempDir, 'dist'));
await fs.mkdir(path.join(tempDir, 'build'));
await fs.mkdir(path.join(tempDir, '.next'));
await fs.writeFile(path.join(tempDir, 'source.ts'), '');
await fs.writeFile(path.join(tempDir, 'dist', 'output.js'), '');
await fs.writeFile(path.join(tempDir, 'build', 'output.js'), '');
const files = await listAllFilesInDirectory(tempDir);
expect(files).toHaveLength(1);
expect(files).toContain('source.ts');
});
it('should skip hidden files except .env', async () => {
await fs.writeFile(path.join(tempDir, '.hidden'), '');
await fs.writeFile(path.join(tempDir, '.env'), '');
await fs.writeFile(path.join(tempDir, 'visible.txt'), '');
const files = await listAllFilesInDirectory(tempDir);
expect(files).toHaveLength(2);
expect(files).toContain('.env');
expect(files).toContain('visible.txt');
expect(files).not.toContain('.hidden');
});
it('should skip .git directory', async () => {
await fs.mkdir(path.join(tempDir, '.git'));
await fs.writeFile(path.join(tempDir, '.git', 'config'), '');
await fs.writeFile(path.join(tempDir, 'README.md'), '');
const files = await listAllFilesInDirectory(tempDir);
expect(files).toHaveLength(1);
expect(files).toContain('README.md');
});
});
describe('generateDiffsForNonGitDirectory', () => {
it('should generate diffs for all files in directory', async () => {
await fs.writeFile(path.join(tempDir, 'file1.txt'), 'content1\n');
await fs.writeFile(path.join(tempDir, 'file2.js'), "console.log('hi');\n");
const result = await generateDiffsForNonGitDirectory(tempDir);
expect(result.files).toHaveLength(2);
expect(result.files.every((f) => f.status === '?')).toBe(true);
expect(result.diff).toContain('diff --git a/file1.txt b/file1.txt');
expect(result.diff).toContain('diff --git a/file2.js b/file2.js');
expect(result.diff).toContain('+content1');
expect(result.diff).toContain("+console.log('hi');");
});
it('should return empty result for empty directory', async () => {
const result = await generateDiffsForNonGitDirectory(tempDir);
expect(result.files).toEqual([]);
expect(result.diff).toBe('');
});
it('should mark all files as untracked', async () => {
await fs.writeFile(path.join(tempDir, 'test.txt'), 'test');
const result = await generateDiffsForNonGitDirectory(tempDir);
expect(result.files).toHaveLength(1);
expect(result.files[0].status).toBe('?');
expect(result.files[0].statusText).toBe('New');
});
});
describe('getGitRepositoryDiffs', () => {
it('should treat non-git directory as all new files', async () => {
await fs.writeFile(path.join(tempDir, 'file.txt'), 'content\n');
const result = await getGitRepositoryDiffs(tempDir);
expect(result.hasChanges).toBe(true);
expect(result.files).toHaveLength(1);
expect(result.files[0].status).toBe('?');
expect(result.diff).toContain('diff --git a/file.txt b/file.txt');
});
it('should return no changes for empty non-git directory', async () => {
const result = await getGitRepositoryDiffs(tempDir);
expect(result.hasChanges).toBe(false);
expect(result.files).toEqual([]);
expect(result.diff).toBe('');
});
});
});

View File

@@ -0,0 +1,9 @@
{
"extends": "../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,21 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['tests/**/*.test.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
include: ['src/**/*.ts'],
exclude: ['src/**/*.d.ts', 'src/index.ts', 'src/types.ts'],
thresholds: {
lines: 65,
functions: 75,
branches: 35,
statements: 65,
},
},
},
});

View File

@@ -0,0 +1,135 @@
# @automaker/model-resolver
Claude model resolution and mapping utilities.
## Overview
This package handles Claude model resolution, converting user-friendly aliases to actual Claude model identifiers and providing default model configurations.
## Installation
```bash
npm install @automaker/model-resolver
```
## Exports
### Model Resolution
Convert model aliases to full model identifiers.
```typescript
import { resolveModelString, DEFAULT_MODELS } from '@automaker/model-resolver';
import { CLAUDE_MODEL_MAP } from '@automaker/types';
// Resolve model string
const model = resolveModelString('sonnet');
// Returns: 'claude-sonnet-4-20250514'
const model2 = resolveModelString('haiku');
// Returns: 'claude-haiku-4-5'
const model3 = resolveModelString('opus');
// Returns: 'claude-opus-4-5-20251101'
// Use with custom default
const model4 = resolveModelString(undefined, 'claude-sonnet-4-20250514');
// Returns: 'claude-sonnet-4-20250514' (default)
// Direct model ID passthrough
const model5 = resolveModelString('claude-opus-4-5-20251101');
// Returns: 'claude-opus-4-5-20251101' (unchanged)
```
### Get Effective Model
Get the actual model that will be used.
```typescript
import { getEffectiveModel } from '@automaker/model-resolver';
// Get effective model with fallback chain
const model = getEffectiveModel({
requestedModel: 'sonnet',
featureModel: undefined,
defaultModel: 'claude-sonnet-4-20250514',
});
```
### Model Constants
Access model mappings and defaults.
```typescript
import { DEFAULT_MODELS } from '@automaker/model-resolver';
import { CLAUDE_MODEL_MAP } from '@automaker/types';
// Default models for different contexts
console.log(DEFAULT_MODELS.claude); // 'claude-sonnet-4-20250514'
console.log(DEFAULT_MODELS.autoMode); // 'claude-sonnet-4-20250514'
console.log(DEFAULT_MODELS.chat); // 'claude-sonnet-4-20250514'
// Model alias mappings
console.log(CLAUDE_MODEL_MAP.haiku); // 'claude-haiku-4-5'
console.log(CLAUDE_MODEL_MAP.sonnet); // 'claude-sonnet-4-20250514'
console.log(CLAUDE_MODEL_MAP.opus); // 'claude-opus-4-5-20251101'
```
## Usage Example
```typescript
import { resolveModelString, DEFAULT_MODELS } from '@automaker/model-resolver';
import type { Feature } from '@automaker/types';
function prepareFeatureExecution(feature: Feature) {
// Resolve model from feature or use default
const model = resolveModelString(feature.model, DEFAULT_MODELS.autoMode);
console.log(`Executing feature with model: ${model}`);
return {
featureId: feature.id,
model,
// ... other options
};
}
// Example usage
const feature: Feature = {
id: 'auth-feature',
category: 'backend',
description: 'Add authentication',
model: 'opus', // User-friendly alias
};
prepareFeatureExecution(feature);
// Output: Executing feature with model: claude-opus-4-5-20251101
```
## Supported Models
### Current Model Aliases
- `haiku``claude-haiku-4-5`
- `sonnet``claude-sonnet-4-20250514`
- `opus``claude-opus-4-5-20251101`
### Model Selection Guide
- **Haiku**: Fast responses, simple tasks, lower cost
- **Sonnet**: Balanced performance, most tasks (recommended default)
- **Opus**: Maximum capability, complex reasoning, highest cost
## Dependencies
- `@automaker/types` - Model type definitions and constants
## Used By
- `@automaker/server` - Feature execution, agent chat, enhancement
## Notes
- Model strings that don't match aliases are passed through unchanged
- This allows direct use of specific model versions like `claude-sonnet-4-20250514`
- Always falls back to a sensible default if no model is specified

View File

@@ -0,0 +1,29 @@
{
"name": "@automaker/model-resolver",
"version": "1.0.0",
"type": "module",
"description": "Model resolution utilities for AutoMaker",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"watch": "tsc --watch",
"test": "vitest run",
"test:watch": "vitest"
},
"keywords": [
"automaker",
"model",
"resolver"
],
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
"dependencies": {
"@automaker/types": "^1.0.0"
},
"devDependencies": {
"@types/node": "^22.10.5",
"typescript": "^5.7.3",
"vitest": "^4.0.16"
}
}

View File

@@ -0,0 +1,10 @@
/**
* @automaker/model-resolver
* Model resolution utilities for AutoMaker
*/
// Re-export constants from types
export { CLAUDE_MODEL_MAP, DEFAULT_MODELS, type ModelAlias } from '@automaker/types';
// Export resolver functions
export { resolveModelString, getEffectiveModel } from './resolver.js';

View File

@@ -0,0 +1,61 @@
/**
* Model resolution utilities for handling model string mapping
*
* Provides centralized model resolution logic:
* - Maps Claude model aliases to full model strings
* - Provides default models per provider
* - Handles multiple model sources with priority
*/
import { CLAUDE_MODEL_MAP, DEFAULT_MODELS } from '@automaker/types';
/**
* Resolve a model key/alias to a full model string
*
* @param modelKey - Model key (e.g., "opus", "gpt-5.2", "claude-sonnet-4-20250514")
* @param defaultModel - Fallback model if modelKey is undefined
* @returns Full model string
*/
export function resolveModelString(
modelKey?: string,
defaultModel: string = DEFAULT_MODELS.claude
): string {
// No model specified - use default
if (!modelKey) {
return defaultModel;
}
// Full Claude model string - pass through unchanged
if (modelKey.includes('claude-')) {
console.log(`[ModelResolver] Using full Claude model string: ${modelKey}`);
return modelKey;
}
// Look up Claude model alias
const resolved = CLAUDE_MODEL_MAP[modelKey];
if (resolved) {
console.log(`[ModelResolver] Resolved model alias: "${modelKey}" -> "${resolved}"`);
return resolved;
}
// Unknown model key - use default
console.warn(`[ModelResolver] Unknown model key "${modelKey}", using default: "${defaultModel}"`);
return defaultModel;
}
/**
* Get the effective model from multiple sources
* Priority: explicit model > session model > default
*
* @param explicitModel - Explicitly provided model (highest priority)
* @param sessionModel - Model from session (medium priority)
* @param defaultModel - Fallback default model (lowest priority)
* @returns Resolved model string
*/
export function getEffectiveModel(
explicitModel?: string,
sessionModel?: string,
defaultModel?: string
): string {
return resolveModelString(explicitModel || sessionModel, defaultModel);
}

View File

@@ -0,0 +1,300 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { resolveModelString, getEffectiveModel } from '../src/resolver';
import { CLAUDE_MODEL_MAP, DEFAULT_MODELS } from '@automaker/types';
describe('model-resolver', () => {
let consoleLogSpy: ReturnType<typeof vi.spyOn>;
let consoleWarnSpy: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
consoleWarnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
});
afterEach(() => {
consoleLogSpy.mockRestore();
consoleWarnSpy.mockRestore();
});
describe('resolveModelString', () => {
describe('with undefined/null input', () => {
it('should return default model when modelKey is undefined', () => {
const result = resolveModelString(undefined);
expect(result).toBe(DEFAULT_MODELS.claude);
});
it('should return custom default when modelKey is undefined', () => {
const customDefault = 'claude-opus-4-20241113';
const result = resolveModelString(undefined, customDefault);
expect(result).toBe(customDefault);
});
it('should return default when modelKey is empty string', () => {
const result = resolveModelString('');
expect(result).toBe(DEFAULT_MODELS.claude);
});
});
describe('with full Claude model strings', () => {
it('should pass through full Claude model string unchanged', () => {
const fullModel = 'claude-sonnet-4-20250514';
const result = resolveModelString(fullModel);
expect(result).toBe(fullModel);
expect(consoleLogSpy).toHaveBeenCalledWith(
expect.stringContaining('Using full Claude model string')
);
});
it('should handle claude-opus model strings', () => {
const fullModel = 'claude-opus-4-20241113';
const result = resolveModelString(fullModel);
expect(result).toBe(fullModel);
});
it('should handle claude-haiku model strings', () => {
const fullModel = 'claude-3-5-haiku-20241022';
const result = resolveModelString(fullModel);
expect(result).toBe(fullModel);
});
it("should handle any string containing 'claude-'", () => {
const customModel = 'claude-custom-experimental-v1';
const result = resolveModelString(customModel);
expect(result).toBe(customModel);
});
});
describe('with model aliases', () => {
it("should resolve 'sonnet' alias", () => {
const result = resolveModelString('sonnet');
expect(result).toBe(CLAUDE_MODEL_MAP.sonnet);
expect(consoleLogSpy).toHaveBeenCalledWith(
expect.stringContaining('Resolved model alias: "sonnet"')
);
});
it("should resolve 'opus' alias", () => {
const result = resolveModelString('opus');
expect(result).toBe(CLAUDE_MODEL_MAP.opus);
expect(consoleLogSpy).toHaveBeenCalledWith(
expect.stringContaining('Resolved model alias: "opus"')
);
});
it("should resolve 'haiku' alias", () => {
const result = resolveModelString('haiku');
expect(result).toBe(CLAUDE_MODEL_MAP.haiku);
});
it('should log the resolution for aliases', () => {
resolveModelString('sonnet');
expect(consoleLogSpy).toHaveBeenCalledWith(expect.stringContaining('Resolved model alias'));
expect(consoleLogSpy).toHaveBeenCalledWith(
expect.stringContaining(CLAUDE_MODEL_MAP.sonnet)
);
});
});
describe('with unknown model keys', () => {
it('should return default for unknown model key', () => {
const result = resolveModelString('unknown-model');
expect(result).toBe(DEFAULT_MODELS.claude);
});
it('should warn about unknown model key', () => {
resolveModelString('unknown-model');
expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('Unknown model key'));
expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining('unknown-model'));
});
it('should use custom default for unknown model key', () => {
const customDefault = 'claude-opus-4-20241113';
const result = resolveModelString('gpt-4', customDefault);
expect(result).toBe(customDefault);
});
it('should warn and show default being used', () => {
const customDefault = 'claude-custom-default';
resolveModelString('invalid-key', customDefault);
expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining(customDefault));
});
});
describe('case sensitivity', () => {
it('should be case-sensitive for aliases', () => {
const resultUpper = resolveModelString('SONNET');
const resultLower = resolveModelString('sonnet');
// Uppercase should not resolve (falls back to default)
expect(resultUpper).toBe(DEFAULT_MODELS.claude);
// Lowercase should resolve
expect(resultLower).toBe(CLAUDE_MODEL_MAP.sonnet);
});
it('should handle mixed case in claude- strings', () => {
const result = resolveModelString('Claude-Sonnet-4-20250514');
// Capital 'C' means it won't match 'claude-', falls back to default
expect(result).toBe(DEFAULT_MODELS.claude);
});
});
describe('edge cases', () => {
it('should handle model key with whitespace', () => {
const result = resolveModelString(' sonnet ');
// Will not match due to whitespace, falls back to default
expect(result).toBe(DEFAULT_MODELS.claude);
});
it('should handle special characters in model key', () => {
const result = resolveModelString('model@123');
expect(result).toBe(DEFAULT_MODELS.claude);
});
});
});
describe('getEffectiveModel', () => {
describe('priority handling', () => {
it('should prioritize explicit model over all others', () => {
const explicit = 'claude-opus-4-20241113';
const session = 'claude-sonnet-4-20250514';
const defaultModel = 'claude-3-5-haiku-20241022';
const result = getEffectiveModel(explicit, session, defaultModel);
expect(result).toBe(explicit);
});
it('should use session model when explicit is undefined', () => {
const session = 'claude-sonnet-4-20250514';
const defaultModel = 'claude-3-5-haiku-20241022';
const result = getEffectiveModel(undefined, session, defaultModel);
expect(result).toBe(session);
});
it('should use default model when both explicit and session are undefined', () => {
const defaultModel = 'claude-opus-4-20241113';
const result = getEffectiveModel(undefined, undefined, defaultModel);
expect(result).toBe(defaultModel);
});
it('should use system default when all are undefined', () => {
const result = getEffectiveModel(undefined, undefined, undefined);
expect(result).toBe(DEFAULT_MODELS.claude);
});
});
describe('with aliases', () => {
it('should resolve explicit model alias', () => {
const result = getEffectiveModel('opus', 'sonnet');
expect(result).toBe(CLAUDE_MODEL_MAP.opus);
});
it('should resolve session model alias when explicit is undefined', () => {
const result = getEffectiveModel(undefined, 'haiku');
expect(result).toBe(CLAUDE_MODEL_MAP.haiku);
});
it('should prioritize explicit alias over session full string', () => {
const result = getEffectiveModel('sonnet', 'claude-opus-4-20241113');
expect(result).toBe(CLAUDE_MODEL_MAP.sonnet);
});
});
describe('with empty strings', () => {
it('should treat empty explicit string as undefined', () => {
const session = 'claude-sonnet-4-20250514';
const result = getEffectiveModel('', session);
expect(result).toBe(session);
});
it('should treat empty session string as undefined', () => {
const defaultModel = 'claude-opus-4-20241113';
const result = getEffectiveModel(undefined, '', defaultModel);
expect(result).toBe(defaultModel);
});
it('should handle all empty strings', () => {
const result = getEffectiveModel('', '', '');
// Empty strings are falsy, so explicit || session becomes "" || "" = ""
// Then resolveModelString("", "") returns "" (not in CLAUDE_MODEL_MAP, not containing "claude-")
// This actually returns the custom default which is ""
expect(result).toBe('');
});
});
describe('integration scenarios', () => {
it('should handle user overriding session model with alias', () => {
const sessionModel = 'claude-sonnet-4-20250514';
const userChoice = 'opus';
const result = getEffectiveModel(userChoice, sessionModel);
expect(result).toBe(CLAUDE_MODEL_MAP.opus);
});
it('should handle fallback chain: unknown -> session -> default', () => {
const result = getEffectiveModel('invalid', 'also-invalid', 'claude-opus-4-20241113');
// Both invalid models fall back to default
expect(result).toBe('claude-opus-4-20241113');
});
it('should handle session with alias, no explicit', () => {
const result = getEffectiveModel(undefined, 'haiku');
expect(result).toBe(CLAUDE_MODEL_MAP.haiku);
});
});
});
describe('CLAUDE_MODEL_MAP integration', () => {
it('should have valid mappings for all known aliases', () => {
const aliases = ['sonnet', 'opus', 'haiku'];
for (const alias of aliases) {
const resolved = resolveModelString(alias);
expect(resolved).toBeDefined();
expect(resolved).toContain('claude-');
expect(resolved).toBe(CLAUDE_MODEL_MAP[alias]);
}
});
});
describe('DEFAULT_MODELS integration', () => {
it('should use DEFAULT_MODELS.claude as fallback', () => {
const result = resolveModelString(undefined);
expect(result).toBe(DEFAULT_MODELS.claude);
expect(DEFAULT_MODELS.claude).toBeDefined();
expect(DEFAULT_MODELS.claude).toContain('claude-');
});
});
});

View File

@@ -0,0 +1,9 @@
{
"extends": "../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,21 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['tests/**/*.test.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
include: ['src/**/*.ts'],
exclude: ['src/**/*.d.ts', 'src/index.ts'],
thresholds: {
lines: 95,
functions: 95,
branches: 90,
statements: 95,
},
},
},
});

217
libs/platform/README.md Normal file
View File

@@ -0,0 +1,217 @@
# @automaker/platform
Platform-specific utilities for AutoMaker.
## Overview
This package provides platform-specific utilities including path management, subprocess handling, and security validation. It handles AutoMaker's directory structure and system operations.
## Installation
```bash
npm install @automaker/platform
```
## Exports
### Path Management
AutoMaker directory structure utilities.
```typescript
import {
getAutomakerDir,
getFeaturesDir,
getFeatureDir,
getFeatureImagesDir,
getBoardDir,
getImagesDir,
getContextDir,
getWorktreesDir,
getAppSpecPath,
getBranchTrackingPath,
ensureAutomakerDir,
} from '@automaker/platform';
// Get AutoMaker directory: /project/.automaker
const automakerDir = getAutomakerDir('/project/path');
// Get features directory: /project/.automaker/features
const featuresDir = getFeaturesDir('/project/path');
// Get specific feature directory: /project/.automaker/features/feature-id
const featureDir = getFeatureDir('/project/path', 'feature-id');
// Get feature images: /project/.automaker/features/feature-id/images
const imagesDir = getFeatureImagesDir('/project/path', 'feature-id');
// Ensure .automaker directory exists
await ensureAutomakerDir('/project/path');
```
### Subprocess Management
Spawn and manage subprocesses with JSON-lines output.
```typescript
import { spawnJSONLProcess, spawnProcess } from '@automaker/platform';
// Spawn process with JSONL output parsing
const result = await spawnJSONLProcess({
command: 'claude-agent',
args: ['--output', 'jsonl'],
cwd: '/project/path',
onLine: (data) => console.log('Received:', data),
onError: (error) => console.error('Error:', error),
});
// Spawn regular process
const output = await spawnProcess({
command: 'git',
args: ['status'],
cwd: '/project/path',
});
```
### Security Validation
Path validation and security checks.
```typescript
import {
initAllowedPaths,
isPathAllowed,
validatePath,
getAllowedPaths,
getAllowedRootDirectory,
getDataDirectory,
PathNotAllowedError,
} from '@automaker/platform';
// Initialize allowed paths from environment
// Reads ALLOWED_ROOT_DIRECTORY and DATA_DIR environment variables
initAllowedPaths();
// Check if path is allowed
if (isPathAllowed('/project/path')) {
console.log('Path is allowed');
}
// Validate and normalize path (throws PathNotAllowedError if not allowed)
try {
const safePath = validatePath('/requested/path');
} catch (error) {
if (error instanceof PathNotAllowedError) {
console.error('Access denied:', error.message);
}
}
// Get configured directories
const rootDir = getAllowedRootDirectory(); // or null if not configured
const dataDir = getDataDirectory(); // or null if not configured
const allowed = getAllowedPaths(); // array of all allowed paths
```
## Usage Example
```typescript
import {
getFeatureDir,
ensureAutomakerDir,
spawnJSONLProcess,
validatePath,
} from '@automaker/platform';
async function executeFeature(projectPath: string, featureId: string) {
// Validate project path
const safePath = validatePath(projectPath);
// Ensure AutoMaker directory exists
await ensureAutomakerDir(safePath);
// Get feature directory
const featureDir = getFeatureDir(safePath, featureId);
// Execute agent in feature directory
const result = await spawnJSONLProcess({
command: 'claude-agent',
args: ['execute'],
cwd: featureDir,
onLine: (data) => {
if (data.type === 'progress') {
console.log('Progress:', data.progress);
}
},
});
return result;
}
```
## Security Model
Path security is enforced through two environment variables:
### Environment Variables
- **ALLOWED_ROOT_DIRECTORY**: Primary security boundary. When set, all file operations must be within this directory.
- **DATA_DIR**: Application data directory (settings, credentials). Always allowed regardless of ALLOWED_ROOT_DIRECTORY.
### Behavior
1. **When ALLOWED_ROOT_DIRECTORY is set**: Only paths within this directory (or DATA_DIR) are allowed. Attempts to access other paths will throw `PathNotAllowedError`.
2. **When ALLOWED_ROOT_DIRECTORY is not set**: All paths are allowed (backward compatibility mode).
3. **DATA_DIR exception**: Paths within DATA_DIR are always allowed, even if outside ALLOWED_ROOT_DIRECTORY. This ensures settings and credentials are always accessible.
### Example Configuration
```bash
# Docker/containerized environment
ALLOWED_ROOT_DIRECTORY=/workspace
DATA_DIR=/app/data
# Development (no restrictions)
# Leave ALLOWED_ROOT_DIRECTORY unset for full access
```
### Secure File System
The `secureFs` module wraps Node.js `fs` operations with path validation:
```typescript
import { secureFs } from '@automaker/platform';
// All operations validate paths before execution
await secureFs.readFile('/workspace/project/file.txt');
await secureFs.writeFile('/workspace/project/output.txt', data);
await secureFs.mkdir('/workspace/project/new-dir', { recursive: true });
```
## Directory Structure
AutoMaker uses the following directory structure:
```
/project/
├── .automaker/
│ ├── features/ # Feature storage
│ │ └── {featureId}/
│ │ ├── feature.json
│ │ └── images/
│ ├── board/ # Board configuration
│ ├── context/ # Context files
│ ├── images/ # Global images
│ ├── worktrees/ # Git worktrees
│ ├── app-spec.md # App specification
│ └── branch-tracking.json
```
## Dependencies
- `@automaker/types` - Type definitions
## Used By
- `@automaker/server`

View File

@@ -0,0 +1,28 @@
{
"name": "@automaker/platform",
"version": "1.0.0",
"type": "module",
"description": "Platform-specific utilities for AutoMaker",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"watch": "tsc --watch",
"test": "vitest run",
"test:watch": "vitest"
},
"keywords": [
"automaker",
"platform"
],
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
"dependencies": {
"@automaker/types": "^1.0.0"
},
"devDependencies": {
"@types/node": "^22.10.5",
"typescript": "^5.7.3",
"vitest": "^4.0.16"
}
}

View File

@@ -0,0 +1,46 @@
/**
* @automaker/platform
* Platform-specific utilities for AutoMaker
*/
// Path utilities
export {
getAutomakerDir,
getFeaturesDir,
getFeatureDir,
getFeatureImagesDir,
getBoardDir,
getImagesDir,
getContextDir,
getWorktreesDir,
getAppSpecPath,
getBranchTrackingPath,
ensureAutomakerDir,
getGlobalSettingsPath,
getCredentialsPath,
getProjectSettingsPath,
ensureDataDir,
} from './paths.js';
// Subprocess management
export {
spawnJSONLProcess,
spawnProcess,
type SubprocessOptions,
type SubprocessResult,
} from './subprocess.js';
// Security
export {
PathNotAllowedError,
initAllowedPaths,
isPathAllowed,
validatePath,
isPathWithinDirectory,
getAllowedRootDirectory,
getDataDirectory,
getAllowedPaths,
} from './security.js';
// Secure file system (validates paths before I/O operations)
export * as secureFs from './secure-fs.js';

213
libs/platform/src/paths.ts Normal file
View File

@@ -0,0 +1,213 @@
/**
* Automaker Paths - Utilities for managing automaker data storage
*
* Provides functions to construct paths for:
* - Project-level data stored in {projectPath}/.automaker/
* - Global user data stored in app userData directory
*
* All returned paths are absolute and ready to use with fs module.
* Directory creation is handled separately by ensure* functions.
*/
import * as secureFs from './secure-fs.js';
import path from 'path';
/**
* Get the automaker data directory root for a project
*
* All project-specific automaker data is stored under {projectPath}/.automaker/
* This directory is created when needed via ensureAutomakerDir().
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker
*/
export function getAutomakerDir(projectPath: string): string {
return path.join(projectPath, '.automaker');
}
/**
* Get the features directory for a project
*
* Contains subdirectories for each feature, keyed by featureId.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/features
*/
export function getFeaturesDir(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'features');
}
/**
* Get the directory for a specific feature
*
* Contains feature-specific data like generated code, tests, and logs.
*
* @param projectPath - Absolute path to project directory
* @param featureId - Feature identifier
* @returns Absolute path to {projectPath}/.automaker/features/{featureId}
*/
export function getFeatureDir(projectPath: string, featureId: string): string {
return path.join(getFeaturesDir(projectPath), featureId);
}
/**
* Get the images directory for a feature
*
* Stores screenshots, diagrams, or other images related to the feature.
*
* @param projectPath - Absolute path to project directory
* @param featureId - Feature identifier
* @returns Absolute path to {projectPath}/.automaker/features/{featureId}/images
*/
export function getFeatureImagesDir(projectPath: string, featureId: string): string {
return path.join(getFeatureDir(projectPath, featureId), 'images');
}
/**
* Get the board directory for a project
*
* Contains board-related data like background images and customization files.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/board
*/
export function getBoardDir(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'board');
}
/**
* Get the general images directory for a project
*
* Stores project-level images like background images or shared assets.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/images
*/
export function getImagesDir(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'images');
}
/**
* Get the context files directory for a project
*
* Stores user-uploaded context files for reference during generation.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/context
*/
export function getContextDir(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'context');
}
/**
* Get the worktrees metadata directory for a project
*
* Stores information about git worktrees associated with the project.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/worktrees
*/
export function getWorktreesDir(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'worktrees');
}
/**
* Get the app spec file path for a project
*
* Stores the application specification document used for generation.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/app_spec.txt
*/
export function getAppSpecPath(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'app_spec.txt');
}
/**
* Get the branch tracking file path for a project
*
* Stores JSON metadata about active git branches and worktrees.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/active-branches.json
*/
export function getBranchTrackingPath(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'active-branches.json');
}
/**
* Create the automaker directory structure for a project if it doesn't exist
*
* Creates {projectPath}/.automaker with all subdirectories recursively.
* Safe to call multiple times - uses recursive: true.
*
* @param projectPath - Absolute path to project directory
* @returns Promise resolving to the created automaker directory path
*/
export async function ensureAutomakerDir(projectPath: string): Promise<string> {
const automakerDir = getAutomakerDir(projectPath);
await secureFs.mkdir(automakerDir, { recursive: true });
return automakerDir;
}
// ============================================================================
// Global Settings Paths (stored in DATA_DIR from app.getPath('userData'))
// ============================================================================
/**
* Get the global settings file path
*
* Stores user preferences, keyboard shortcuts, AI profiles, and project history.
* Located in the platform-specific userData directory.
*
* Default locations:
* - macOS: ~/Library/Application Support/automaker
* - Windows: %APPDATA%\automaker
* - Linux: ~/.config/automaker
*
* @param dataDir - User data directory (from app.getPath('userData'))
* @returns Absolute path to {dataDir}/settings.json
*/
export function getGlobalSettingsPath(dataDir: string): string {
return path.join(dataDir, 'settings.json');
}
/**
* Get the credentials file path
*
* Stores sensitive API keys separately from other settings for security.
* Located in the platform-specific userData directory.
*
* @param dataDir - User data directory (from app.getPath('userData'))
* @returns Absolute path to {dataDir}/credentials.json
*/
export function getCredentialsPath(dataDir: string): string {
return path.join(dataDir, 'credentials.json');
}
/**
* Get the project settings file path
*
* Stores project-specific settings that override global settings.
* Located within the project's .automaker directory.
*
* @param projectPath - Absolute path to project directory
* @returns Absolute path to {projectPath}/.automaker/settings.json
*/
export function getProjectSettingsPath(projectPath: string): string {
return path.join(getAutomakerDir(projectPath), 'settings.json');
}
/**
* Create the global data directory if it doesn't exist
*
* Creates the userData directory for storing global settings and credentials.
* Safe to call multiple times - uses recursive: true.
*
* @param dataDir - User data directory path to create
* @returns Promise resolving to the created data directory path
*/
export async function ensureDataDir(dataDir: string): Promise<string> {
await secureFs.mkdir(dataDir, { recursive: true });
return dataDir;
}

View File

@@ -0,0 +1,161 @@
/**
* Secure File System Adapter
*
* All file I/O operations must go through this adapter to enforce
* ALLOWED_ROOT_DIRECTORY restrictions at the actual access point,
* not just at the API layer. This provides defense-in-depth security.
*/
import fs from 'fs/promises';
import type { Dirent } from 'fs';
import path from 'path';
import { validatePath } from './security.js';
/**
* Wrapper around fs.access that validates path first
*/
export async function access(filePath: string, mode?: number): Promise<void> {
const validatedPath = validatePath(filePath);
return fs.access(validatedPath, mode);
}
/**
* Wrapper around fs.readFile that validates path first
*/
export async function readFile(
filePath: string,
encoding?: BufferEncoding
): Promise<string | Buffer> {
const validatedPath = validatePath(filePath);
if (encoding) {
return fs.readFile(validatedPath, encoding);
}
return fs.readFile(validatedPath);
}
/**
* Wrapper around fs.writeFile that validates path first
*/
export async function writeFile(
filePath: string,
data: string | Buffer,
encoding?: BufferEncoding
): Promise<void> {
const validatedPath = validatePath(filePath);
return fs.writeFile(validatedPath, data, encoding);
}
/**
* Wrapper around fs.mkdir that validates path first
*/
export async function mkdir(
dirPath: string,
options?: { recursive?: boolean; mode?: number }
): Promise<string | undefined> {
const validatedPath = validatePath(dirPath);
return fs.mkdir(validatedPath, options);
}
/**
* Wrapper around fs.readdir that validates path first
*/
export async function readdir(
dirPath: string,
options?: { withFileTypes?: false; encoding?: BufferEncoding }
): Promise<string[]>;
export async function readdir(
dirPath: string,
options: { withFileTypes: true; encoding?: BufferEncoding }
): Promise<Dirent[]>;
export async function readdir(
dirPath: string,
options?: { withFileTypes?: boolean; encoding?: BufferEncoding }
): Promise<string[] | Dirent[]> {
const validatedPath = validatePath(dirPath);
if (options?.withFileTypes === true) {
return fs.readdir(validatedPath, { withFileTypes: true });
}
return fs.readdir(validatedPath);
}
/**
* Wrapper around fs.stat that validates path first
*/
export async function stat(filePath: string): Promise<any> {
const validatedPath = validatePath(filePath);
return fs.stat(validatedPath);
}
/**
* Wrapper around fs.rm that validates path first
*/
export async function rm(
filePath: string,
options?: { recursive?: boolean; force?: boolean }
): Promise<void> {
const validatedPath = validatePath(filePath);
return fs.rm(validatedPath, options);
}
/**
* Wrapper around fs.unlink that validates path first
*/
export async function unlink(filePath: string): Promise<void> {
const validatedPath = validatePath(filePath);
return fs.unlink(validatedPath);
}
/**
* Wrapper around fs.copyFile that validates both paths first
*/
export async function copyFile(src: string, dest: string, mode?: number): Promise<void> {
const validatedSrc = validatePath(src);
const validatedDest = validatePath(dest);
return fs.copyFile(validatedSrc, validatedDest, mode);
}
/**
* Wrapper around fs.appendFile that validates path first
*/
export async function appendFile(
filePath: string,
data: string | Buffer,
encoding?: BufferEncoding
): Promise<void> {
const validatedPath = validatePath(filePath);
return fs.appendFile(validatedPath, data, encoding);
}
/**
* Wrapper around fs.rename that validates both paths first
*/
export async function rename(oldPath: string, newPath: string): Promise<void> {
const validatedOldPath = validatePath(oldPath);
const validatedNewPath = validatePath(newPath);
return fs.rename(validatedOldPath, validatedNewPath);
}
/**
* Wrapper around fs.lstat that validates path first
* Returns file stats without following symbolic links
*/
export async function lstat(filePath: string): Promise<any> {
const validatedPath = validatePath(filePath);
return fs.lstat(validatedPath);
}
/**
* Wrapper around path.join that returns resolved path
* Does NOT validate - use this for path construction, then pass to other operations
*/
export function joinPath(...pathSegments: string[]): string {
return path.join(...pathSegments);
}
/**
* Wrapper around path.resolve that returns resolved path
* Does NOT validate - use this for path construction, then pass to other operations
*/
export function resolvePath(...pathSegments: string[]): string {
return path.resolve(...pathSegments);
}

View File

@@ -0,0 +1,131 @@
/**
* Security utilities for path validation
* Enforces ALLOWED_ROOT_DIRECTORY constraint with appData exception
*/
import path from 'path';
/**
* Error thrown when a path is not allowed by security policy
*/
export class PathNotAllowedError extends Error {
constructor(filePath: string) {
super(`Path not allowed: ${filePath}. Must be within ALLOWED_ROOT_DIRECTORY or DATA_DIR.`);
this.name = 'PathNotAllowedError';
}
}
// Allowed root directory - main security boundary
let allowedRootDirectory: string | null = null;
// Data directory - always allowed for settings/credentials
let dataDirectory: string | null = null;
/**
* Initialize security settings from environment variables
* - ALLOWED_ROOT_DIRECTORY: main security boundary
* - DATA_DIR: appData exception, always allowed
*/
export function initAllowedPaths(): void {
// Load ALLOWED_ROOT_DIRECTORY
const rootDir = process.env.ALLOWED_ROOT_DIRECTORY;
if (rootDir) {
allowedRootDirectory = path.resolve(rootDir);
console.log(`[Security] ✓ ALLOWED_ROOT_DIRECTORY configured: ${allowedRootDirectory}`);
} else {
console.log('[Security] ⚠️ ALLOWED_ROOT_DIRECTORY not set - allowing access to all paths');
}
// Load DATA_DIR (appData exception - always allowed)
const dataDir = process.env.DATA_DIR;
if (dataDir) {
dataDirectory = path.resolve(dataDir);
console.log(`[Security] ✓ DATA_DIR configured: ${dataDirectory}`);
}
}
/**
* Check if a path is allowed based on ALLOWED_ROOT_DIRECTORY
* Returns true if:
* - Path is within ALLOWED_ROOT_DIRECTORY, OR
* - Path is within DATA_DIR (appData exception), OR
* - No restrictions are configured (backward compatibility)
*/
export function isPathAllowed(filePath: string): boolean {
const resolvedPath = path.resolve(filePath);
// Always allow appData directory (settings, credentials)
if (dataDirectory && isPathWithinDirectory(resolvedPath, dataDirectory)) {
return true;
}
// If no ALLOWED_ROOT_DIRECTORY restriction is configured, allow all paths
// Note: DATA_DIR is checked above as an exception, but doesn't restrict other paths
if (!allowedRootDirectory) {
return true;
}
// Allow if within ALLOWED_ROOT_DIRECTORY
if (allowedRootDirectory && isPathWithinDirectory(resolvedPath, allowedRootDirectory)) {
return true;
}
// If restrictions are configured but path doesn't match, deny
return false;
}
/**
* Validate a path - resolves it and checks permissions
* Throws PathNotAllowedError if path is not allowed
*/
export function validatePath(filePath: string): string {
const resolvedPath = path.resolve(filePath);
if (!isPathAllowed(resolvedPath)) {
throw new PathNotAllowedError(filePath);
}
return resolvedPath;
}
/**
* Check if a path is within a directory, with protection against path traversal
* Returns true only if resolvedPath is within directoryPath
*/
export function isPathWithinDirectory(resolvedPath: string, directoryPath: string): boolean {
// Get the relative path from directory to the target
const relativePath = path.relative(directoryPath, resolvedPath);
// If relative path starts with "..", it's outside the directory
// If relative path is absolute, it's outside the directory
// If relative path is empty or ".", it's the directory itself
return !relativePath.startsWith('..') && !path.isAbsolute(relativePath);
}
/**
* Get the configured allowed root directory
*/
export function getAllowedRootDirectory(): string | null {
return allowedRootDirectory;
}
/**
* Get the configured data directory
*/
export function getDataDirectory(): string | null {
return dataDirectory;
}
/**
* Get list of allowed paths (for debugging)
*/
export function getAllowedPaths(): string[] {
const paths: string[] = [];
if (allowedRootDirectory) {
paths.push(allowedRootDirectory);
}
if (dataDirectory) {
paths.push(dataDirectory);
}
return paths;
}

View File

@@ -0,0 +1,197 @@
/**
* Subprocess management utilities for CLI providers
*/
import { spawn, type ChildProcess } from 'child_process';
import readline from 'readline';
export interface SubprocessOptions {
command: string;
args: string[];
cwd: string;
env?: Record<string, string>;
abortController?: AbortController;
timeout?: number; // Milliseconds of no output before timeout
}
export interface SubprocessResult {
stdout: string;
stderr: string;
exitCode: number | null;
}
/**
* Spawns a subprocess and streams JSONL output line-by-line
*/
export async function* spawnJSONLProcess(options: SubprocessOptions): AsyncGenerator<unknown> {
const { command, args, cwd, env, abortController, timeout = 30000 } = options;
const processEnv = {
...process.env,
...env,
};
console.log(`[SubprocessManager] Spawning: ${command} ${args.slice(0, -1).join(' ')}`);
console.log(`[SubprocessManager] Working directory: ${cwd}`);
const childProcess: ChildProcess = spawn(command, args, {
cwd,
env: processEnv,
stdio: ['ignore', 'pipe', 'pipe'],
});
let stderrOutput = '';
let lastOutputTime = Date.now();
let timeoutHandle: NodeJS.Timeout | null = null;
// Collect stderr for error reporting
if (childProcess.stderr) {
childProcess.stderr.on('data', (data: Buffer) => {
const text = data.toString();
stderrOutput += text;
console.error(`[SubprocessManager] stderr: ${text}`);
});
}
// Setup timeout detection
const resetTimeout = () => {
lastOutputTime = Date.now();
if (timeoutHandle) {
clearTimeout(timeoutHandle);
}
timeoutHandle = setTimeout(() => {
const elapsed = Date.now() - lastOutputTime;
if (elapsed >= timeout) {
console.error(`[SubprocessManager] Process timeout: no output for ${timeout}ms`);
childProcess.kill('SIGTERM');
}
}, timeout);
};
resetTimeout();
// Setup abort handling
if (abortController) {
abortController.signal.addEventListener('abort', () => {
console.log('[SubprocessManager] Abort signal received, killing process');
if (timeoutHandle) {
clearTimeout(timeoutHandle);
}
childProcess.kill('SIGTERM');
});
}
// Parse stdout as JSONL (one JSON object per line)
if (childProcess.stdout) {
const rl = readline.createInterface({
input: childProcess.stdout,
crlfDelay: Infinity,
});
try {
for await (const line of rl) {
resetTimeout();
if (!line.trim()) continue;
try {
const parsed = JSON.parse(line);
yield parsed;
} catch (parseError) {
console.error(`[SubprocessManager] Failed to parse JSONL line: ${line}`, parseError);
// Yield error but continue processing
yield {
type: 'error',
error: `Failed to parse output: ${line}`,
};
}
}
} catch (error) {
console.error('[SubprocessManager] Error reading stdout:', error);
throw error;
} finally {
if (timeoutHandle) {
clearTimeout(timeoutHandle);
}
}
}
// Wait for process to exit
const exitCode = await new Promise<number | null>((resolve) => {
childProcess.on('exit', (code) => {
console.log(`[SubprocessManager] Process exited with code: ${code}`);
resolve(code);
});
childProcess.on('error', (error) => {
console.error('[SubprocessManager] Process error:', error);
resolve(null);
});
});
// Handle non-zero exit codes
if (exitCode !== 0 && exitCode !== null) {
const errorMessage = stderrOutput || `Process exited with code ${exitCode}`;
console.error(`[SubprocessManager] Process failed: ${errorMessage}`);
yield {
type: 'error',
error: errorMessage,
};
}
// Process completed successfully
if (exitCode === 0 && !stderrOutput) {
console.log('[SubprocessManager] Process completed successfully');
}
}
/**
* Spawns a subprocess and collects all output
*/
export async function spawnProcess(options: SubprocessOptions): Promise<SubprocessResult> {
const { command, args, cwd, env, abortController } = options;
const processEnv = {
...process.env,
...env,
};
return new Promise((resolve, reject) => {
const childProcess = spawn(command, args, {
cwd,
env: processEnv,
stdio: ['ignore', 'pipe', 'pipe'],
});
let stdout = '';
let stderr = '';
if (childProcess.stdout) {
childProcess.stdout.on('data', (data: Buffer) => {
stdout += data.toString();
});
}
if (childProcess.stderr) {
childProcess.stderr.on('data', (data: Buffer) => {
stderr += data.toString();
});
}
// Setup abort handling
if (abortController) {
abortController.signal.addEventListener('abort', () => {
childProcess.kill('SIGTERM');
reject(new Error('Process aborted'));
});
}
childProcess.on('exit', (code) => {
resolve({ stdout, stderr, exitCode: code });
});
childProcess.on('error', (error) => {
reject(error);
});
});
}

View File

@@ -0,0 +1,212 @@
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import fs from 'fs/promises';
import path from 'path';
import os from 'os';
import {
getAutomakerDir,
getFeaturesDir,
getFeatureDir,
getFeatureImagesDir,
getBoardDir,
getImagesDir,
getContextDir,
getWorktreesDir,
getAppSpecPath,
getBranchTrackingPath,
ensureAutomakerDir,
getGlobalSettingsPath,
getCredentialsPath,
getProjectSettingsPath,
ensureDataDir,
} from '../src/paths';
describe('paths.ts', () => {
let tempDir: string;
let projectPath: string;
let dataDir: string;
beforeEach(async () => {
// Create a temporary directory for testing
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'platform-paths-test-'));
projectPath = path.join(tempDir, 'test-project');
dataDir = path.join(tempDir, 'user-data');
await fs.mkdir(projectPath, { recursive: true });
});
afterEach(async () => {
// Clean up temporary directory
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe('Project-level path construction', () => {
it('should return automaker directory path', () => {
const result = getAutomakerDir(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker'));
});
it('should return features directory path', () => {
const result = getFeaturesDir(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'features'));
});
it('should return feature directory path', () => {
const featureId = 'auth-feature';
const result = getFeatureDir(projectPath, featureId);
expect(result).toBe(path.join(projectPath, '.automaker', 'features', featureId));
});
it('should return feature images directory path', () => {
const featureId = 'auth-feature';
const result = getFeatureImagesDir(projectPath, featureId);
expect(result).toBe(path.join(projectPath, '.automaker', 'features', featureId, 'images'));
});
it('should return board directory path', () => {
const result = getBoardDir(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'board'));
});
it('should return images directory path', () => {
const result = getImagesDir(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'images'));
});
it('should return context directory path', () => {
const result = getContextDir(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'context'));
});
it('should return worktrees directory path', () => {
const result = getWorktreesDir(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'worktrees'));
});
it('should return app spec file path', () => {
const result = getAppSpecPath(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'app_spec.txt'));
});
it('should return branch tracking file path', () => {
const result = getBranchTrackingPath(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'active-branches.json'));
});
it('should return project settings file path', () => {
const result = getProjectSettingsPath(projectPath);
expect(result).toBe(path.join(projectPath, '.automaker', 'settings.json'));
});
});
describe('Global settings path construction', () => {
it('should return global settings path', () => {
const result = getGlobalSettingsPath(dataDir);
expect(result).toBe(path.join(dataDir, 'settings.json'));
});
it('should return credentials path', () => {
const result = getCredentialsPath(dataDir);
expect(result).toBe(path.join(dataDir, 'credentials.json'));
});
});
describe('Directory creation', () => {
it('should create automaker directory', async () => {
const automakerDir = await ensureAutomakerDir(projectPath);
expect(automakerDir).toBe(path.join(projectPath, '.automaker'));
const stats = await fs.stat(automakerDir);
expect(stats.isDirectory()).toBe(true);
});
it('should be idempotent when creating automaker directory', async () => {
// Create directory first time
const firstResult = await ensureAutomakerDir(projectPath);
// Create directory second time
const secondResult = await ensureAutomakerDir(projectPath);
expect(firstResult).toBe(secondResult);
const stats = await fs.stat(firstResult);
expect(stats.isDirectory()).toBe(true);
});
it('should create data directory', async () => {
const result = await ensureDataDir(dataDir);
expect(result).toBe(dataDir);
const stats = await fs.stat(dataDir);
expect(stats.isDirectory()).toBe(true);
});
it('should be idempotent when creating data directory', async () => {
// Create directory first time
const firstResult = await ensureDataDir(dataDir);
// Create directory second time
const secondResult = await ensureDataDir(dataDir);
expect(firstResult).toBe(secondResult);
const stats = await fs.stat(firstResult);
expect(stats.isDirectory()).toBe(true);
});
it('should create nested directories recursively', async () => {
const deepProjectPath = path.join(tempDir, 'nested', 'deep', 'project');
await fs.mkdir(deepProjectPath, { recursive: true });
const automakerDir = await ensureAutomakerDir(deepProjectPath);
const stats = await fs.stat(automakerDir);
expect(stats.isDirectory()).toBe(true);
});
});
describe('Path handling with special characters', () => {
it('should handle feature IDs with special characters', () => {
const featureId = 'feature-with-dashes_and_underscores';
const result = getFeatureDir(projectPath, featureId);
expect(result).toContain(featureId);
});
it('should handle paths with spaces', () => {
const pathWithSpaces = path.join(tempDir, 'path with spaces');
const result = getAutomakerDir(pathWithSpaces);
expect(result).toBe(path.join(pathWithSpaces, '.automaker'));
});
});
describe('Path relationships', () => {
it('should have feature dir as child of features dir', () => {
const featuresDir = getFeaturesDir(projectPath);
const featureDir = getFeatureDir(projectPath, 'test-feature');
expect(featureDir.startsWith(featuresDir)).toBe(true);
});
it('should have all project paths under automaker dir', () => {
const automakerDir = getAutomakerDir(projectPath);
const paths = [
getFeaturesDir(projectPath),
getBoardDir(projectPath),
getImagesDir(projectPath),
getContextDir(projectPath),
getWorktreesDir(projectPath),
getAppSpecPath(projectPath),
getBranchTrackingPath(projectPath),
getProjectSettingsPath(projectPath),
];
paths.forEach((p) => {
expect(p.startsWith(automakerDir)).toBe(true);
});
});
});
});

View File

@@ -0,0 +1,234 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import path from 'path';
describe('security.ts', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
// Save original environment
originalEnv = { ...process.env };
// Reset modules to get fresh state
vi.resetModules();
});
afterEach(() => {
// Restore original environment
process.env = originalEnv;
});
describe('initAllowedPaths', () => {
it('should load ALLOWED_ROOT_DIRECTORY if set', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/projects';
delete process.env.DATA_DIR;
const { initAllowedPaths, getAllowedPaths } = await import('../src/security');
initAllowedPaths();
const allowed = getAllowedPaths();
expect(allowed).toContain(path.resolve('/projects'));
});
it('should load DATA_DIR if set', async () => {
delete process.env.ALLOWED_ROOT_DIRECTORY;
process.env.DATA_DIR = '/data/directory';
const { initAllowedPaths, getAllowedPaths } = await import('../src/security');
initAllowedPaths();
const allowed = getAllowedPaths();
expect(allowed).toContain(path.resolve('/data/directory'));
});
it('should load both ALLOWED_ROOT_DIRECTORY and DATA_DIR if both set', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/projects';
process.env.DATA_DIR = '/app/data';
const { initAllowedPaths, getAllowedPaths } = await import('../src/security');
initAllowedPaths();
const allowed = getAllowedPaths();
expect(allowed).toContain(path.resolve('/projects'));
expect(allowed).toContain(path.resolve('/app/data'));
});
it('should handle missing environment variables gracefully', async () => {
delete process.env.ALLOWED_ROOT_DIRECTORY;
delete process.env.DATA_DIR;
const { initAllowedPaths } = await import('../src/security');
expect(() => initAllowedPaths()).not.toThrow();
});
});
describe('isPathAllowed', () => {
it('should allow paths within ALLOWED_ROOT_DIRECTORY', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/allowed';
delete process.env.DATA_DIR;
const { initAllowedPaths, isPathAllowed } = await import('../src/security');
initAllowedPaths();
expect(isPathAllowed('/allowed/file.txt')).toBe(true);
expect(isPathAllowed('/allowed/subdir/file.txt')).toBe(true);
});
it('should deny paths outside ALLOWED_ROOT_DIRECTORY', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/allowed';
delete process.env.DATA_DIR;
const { initAllowedPaths, isPathAllowed } = await import('../src/security');
initAllowedPaths();
expect(isPathAllowed('/not-allowed/file.txt')).toBe(false);
expect(isPathAllowed('/etc/passwd')).toBe(false);
});
it('should always allow DATA_DIR paths', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/projects';
process.env.DATA_DIR = '/app/data';
const { initAllowedPaths, isPathAllowed } = await import('../src/security');
initAllowedPaths();
// DATA_DIR paths are always allowed
expect(isPathAllowed('/app/data/settings.json')).toBe(true);
expect(isPathAllowed('/app/data/credentials.json')).toBe(true);
});
it('should allow all paths when no restrictions configured', async () => {
delete process.env.ALLOWED_ROOT_DIRECTORY;
delete process.env.DATA_DIR;
const { initAllowedPaths, isPathAllowed } = await import('../src/security');
initAllowedPaths();
expect(isPathAllowed('/any/path')).toBe(true);
expect(isPathAllowed('/etc/passwd')).toBe(true);
});
it('should allow all paths when only DATA_DIR is configured', async () => {
delete process.env.ALLOWED_ROOT_DIRECTORY;
process.env.DATA_DIR = '/data';
const { initAllowedPaths, isPathAllowed } = await import('../src/security');
initAllowedPaths();
// DATA_DIR should be allowed
expect(isPathAllowed('/data/file.txt')).toBe(true);
// And all other paths should be allowed since no ALLOWED_ROOT_DIRECTORY restriction
expect(isPathAllowed('/any/path')).toBe(true);
});
});
describe('validatePath', () => {
it('should return resolved path for allowed paths', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/allowed';
delete process.env.DATA_DIR;
const { initAllowedPaths, validatePath } = await import('../src/security');
initAllowedPaths();
const result = validatePath('/allowed/file.txt');
expect(result).toBe(path.resolve('/allowed/file.txt'));
});
it('should throw error for paths outside allowed directories', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/allowed';
delete process.env.DATA_DIR;
const { initAllowedPaths, validatePath, PathNotAllowedError } =
await import('../src/security');
initAllowedPaths();
expect(() => validatePath('/not-allowed/file.txt')).toThrow(PathNotAllowedError);
});
it('should resolve relative paths', async () => {
const cwd = process.cwd();
process.env.ALLOWED_ROOT_DIRECTORY = cwd;
delete process.env.DATA_DIR;
const { initAllowedPaths, validatePath } = await import('../src/security');
initAllowedPaths();
const result = validatePath('./file.txt');
expect(result).toBe(path.resolve(cwd, './file.txt'));
});
it('should not throw when no restrictions configured', async () => {
delete process.env.ALLOWED_ROOT_DIRECTORY;
delete process.env.DATA_DIR;
const { initAllowedPaths, validatePath } = await import('../src/security');
initAllowedPaths();
expect(() => validatePath('/any/path')).not.toThrow();
});
});
describe('getAllowedPaths', () => {
it('should return empty array when no paths configured', async () => {
delete process.env.ALLOWED_ROOT_DIRECTORY;
delete process.env.DATA_DIR;
const { initAllowedPaths, getAllowedPaths } = await import('../src/security');
initAllowedPaths();
const allowed = getAllowedPaths();
expect(Array.isArray(allowed)).toBe(true);
expect(allowed).toHaveLength(0);
});
it('should return configured paths', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/projects';
process.env.DATA_DIR = '/data';
const { initAllowedPaths, getAllowedPaths } = await import('../src/security');
initAllowedPaths();
const allowed = getAllowedPaths();
expect(allowed).toContain(path.resolve('/projects'));
expect(allowed).toContain(path.resolve('/data'));
});
});
describe('getAllowedRootDirectory', () => {
it('should return the configured root directory', async () => {
process.env.ALLOWED_ROOT_DIRECTORY = '/projects';
const { initAllowedPaths, getAllowedRootDirectory } = await import('../src/security');
initAllowedPaths();
expect(getAllowedRootDirectory()).toBe(path.resolve('/projects'));
});
it('should return null when not configured', async () => {
delete process.env.ALLOWED_ROOT_DIRECTORY;
const { initAllowedPaths, getAllowedRootDirectory } = await import('../src/security');
initAllowedPaths();
expect(getAllowedRootDirectory()).toBeNull();
});
});
describe('getDataDirectory', () => {
it('should return the configured data directory', async () => {
process.env.DATA_DIR = '/data';
const { initAllowedPaths, getDataDirectory } = await import('../src/security');
initAllowedPaths();
expect(getDataDirectory()).toBe(path.resolve('/data'));
});
it('should return null when not configured', async () => {
delete process.env.DATA_DIR;
const { initAllowedPaths, getDataDirectory } = await import('../src/security');
initAllowedPaths();
expect(getDataDirectory()).toBeNull();
});
});
});

View File

@@ -0,0 +1,502 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { spawnJSONLProcess, spawnProcess, type SubprocessOptions } from '../src/subprocess';
import * as cp from 'child_process';
import { EventEmitter } from 'events';
import { Readable } from 'stream';
vi.mock('child_process');
/**
* Helper to collect all items from an async generator
*/
async function collectAsyncGenerator<T>(generator: AsyncGenerator<T>): Promise<T[]> {
const results: T[] = [];
for await (const item of generator) {
results.push(item);
}
return results;
}
describe('subprocess.ts', () => {
let consoleSpy: {
log: ReturnType<typeof vi.spyOn>;
error: ReturnType<typeof vi.spyOn>;
};
beforeEach(() => {
vi.clearAllMocks();
consoleSpy = {
log: vi.spyOn(console, 'log').mockImplementation(() => {}),
error: vi.spyOn(console, 'error').mockImplementation(() => {}),
};
});
afterEach(() => {
consoleSpy.log.mockRestore();
consoleSpy.error.mockRestore();
});
/**
* Helper to create a mock ChildProcess with stdout/stderr streams
*/
function createMockProcess(config: {
stdoutLines?: string[];
stderrLines?: string[];
exitCode?: number;
error?: Error;
delayMs?: number;
}) {
const mockProcess = new EventEmitter() as cp.ChildProcess & {
stdout: Readable;
stderr: Readable;
kill: ReturnType<typeof vi.fn>;
};
// Create readable streams for stdout and stderr
const stdout = new Readable({ read() {} });
const stderr = new Readable({ read() {} });
mockProcess.stdout = stdout;
mockProcess.stderr = stderr;
mockProcess.kill = vi.fn().mockReturnValue(true);
// Use process.nextTick to ensure readline interface is set up first
process.nextTick(() => {
// Emit stderr lines immediately
if (config.stderrLines) {
for (const line of config.stderrLines) {
stderr.emit('data', Buffer.from(line));
}
}
// Emit stdout lines with small delays to ensure readline processes them
const emitLines = async () => {
if (config.stdoutLines) {
for (const line of config.stdoutLines) {
stdout.push(line + '\n');
// Small delay to allow readline to process
await new Promise((resolve) => setImmediate(resolve));
}
}
// Small delay before ending stream
await new Promise((resolve) => setImmediate(resolve));
stdout.push(null); // End stdout
// Small delay before exit
await new Promise((resolve) => setTimeout(resolve, config.delayMs ?? 10));
// Emit exit or error
if (config.error) {
mockProcess.emit('error', config.error);
} else {
mockProcess.emit('exit', config.exitCode ?? 0);
}
};
emitLines();
});
return mockProcess;
}
describe('spawnJSONLProcess', () => {
const baseOptions: SubprocessOptions = {
command: 'test-command',
args: ['arg1', 'arg2'],
cwd: '/test/dir',
};
it('should yield parsed JSONL objects line by line', async () => {
const mockProcess = createMockProcess({
stdoutLines: [
'{"type":"start","id":1}',
'{"type":"progress","value":50}',
'{"type":"complete","result":"success"}',
],
exitCode: 0,
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
const results = await collectAsyncGenerator(generator);
expect(results).toHaveLength(3);
expect(results[0]).toEqual({ type: 'start', id: 1 });
expect(results[1]).toEqual({ type: 'progress', value: 50 });
expect(results[2]).toEqual({ type: 'complete', result: 'success' });
});
it('should skip empty lines', async () => {
const mockProcess = createMockProcess({
stdoutLines: ['{"type":"first"}', '', ' ', '{"type":"second"}'],
exitCode: 0,
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
const results = await collectAsyncGenerator(generator);
expect(results).toHaveLength(2);
expect(results[0]).toEqual({ type: 'first' });
expect(results[1]).toEqual({ type: 'second' });
});
it('should yield error for malformed JSON and continue processing', async () => {
const mockProcess = createMockProcess({
stdoutLines: ['{"type":"valid"}', '{invalid json}', '{"type":"also_valid"}'],
exitCode: 0,
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
const results = await collectAsyncGenerator(generator);
expect(results).toHaveLength(3);
expect(results[0]).toEqual({ type: 'valid' });
expect(results[1]).toMatchObject({
type: 'error',
error: expect.stringContaining('Failed to parse output'),
});
expect(results[2]).toEqual({ type: 'also_valid' });
});
it('should collect stderr output', async () => {
const mockProcess = createMockProcess({
stdoutLines: ['{"type":"test"}'],
stderrLines: ['Warning: something happened', 'Error: critical issue'],
exitCode: 0,
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
await collectAsyncGenerator(generator);
expect(consoleSpy.error).toHaveBeenCalledWith(
expect.stringContaining('Warning: something happened')
);
expect(consoleSpy.error).toHaveBeenCalledWith(
expect.stringContaining('Error: critical issue')
);
});
it('should yield error on non-zero exit code', async () => {
const mockProcess = createMockProcess({
stdoutLines: ['{"type":"started"}'],
stderrLines: ['Process failed with error'],
exitCode: 1,
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
const results = await collectAsyncGenerator(generator);
expect(results).toHaveLength(2);
expect(results[0]).toEqual({ type: 'started' });
expect(results[1]).toMatchObject({
type: 'error',
error: expect.stringContaining('Process failed with error'),
});
});
it('should yield error with exit code when stderr is empty', async () => {
const mockProcess = createMockProcess({
stdoutLines: ['{"type":"test"}'],
exitCode: 127,
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
const results = await collectAsyncGenerator(generator);
expect(results).toHaveLength(2);
expect(results[1]).toMatchObject({
type: 'error',
error: 'Process exited with code 127',
});
});
it('should handle process spawn errors', async () => {
const mockProcess = createMockProcess({
error: new Error('Command not found'),
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
const results = await collectAsyncGenerator(generator);
// When process.on('error') fires, exitCode is null
// The generator should handle this gracefully
expect(results).toEqual([]);
});
it('should kill process on AbortController signal', async () => {
const abortController = new AbortController();
const mockProcess = createMockProcess({
stdoutLines: ['{"type":"start"}'],
exitCode: 0,
delayMs: 100, // Delay to allow abort
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess({
...baseOptions,
abortController,
});
// Start consuming the generator
const promise = collectAsyncGenerator(generator);
// Abort after a short delay
setTimeout(() => abortController.abort(), 20);
await promise;
expect(mockProcess.kill).toHaveBeenCalledWith('SIGTERM');
expect(consoleSpy.log).toHaveBeenCalledWith(expect.stringContaining('Abort signal received'));
});
it('should spawn process with correct arguments', async () => {
const mockProcess = createMockProcess({ exitCode: 0 });
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const options: SubprocessOptions = {
command: 'my-command',
args: ['--flag', 'value'],
cwd: '/work/dir',
env: { CUSTOM_VAR: 'test' },
};
const generator = spawnJSONLProcess(options);
await collectAsyncGenerator(generator);
expect(cp.spawn).toHaveBeenCalledWith('my-command', ['--flag', 'value'], {
cwd: '/work/dir',
env: expect.objectContaining({ CUSTOM_VAR: 'test' }),
stdio: ['ignore', 'pipe', 'pipe'],
});
});
it('should merge env with process.env', async () => {
const mockProcess = createMockProcess({ exitCode: 0 });
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const options: SubprocessOptions = {
command: 'test',
args: [],
cwd: '/test',
env: { CUSTOM: 'value' },
};
const generator = spawnJSONLProcess(options);
await collectAsyncGenerator(generator);
expect(cp.spawn).toHaveBeenCalledWith(
'test',
[],
expect.objectContaining({
env: expect.objectContaining({
CUSTOM: 'value',
// Should also include existing process.env
NODE_ENV: process.env.NODE_ENV,
}),
})
);
});
it('should handle complex JSON objects', async () => {
const complexObject = {
type: 'complex',
nested: { deep: { value: [1, 2, 3] } },
array: [{ id: 1 }, { id: 2 }],
string: 'with "quotes" and \\backslashes',
};
const mockProcess = createMockProcess({
stdoutLines: [JSON.stringify(complexObject)],
exitCode: 0,
});
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
const generator = spawnJSONLProcess(baseOptions);
const results = await collectAsyncGenerator(generator);
expect(results).toHaveLength(1);
expect(results[0]).toEqual(complexObject);
});
});
describe('spawnProcess', () => {
const baseOptions: SubprocessOptions = {
command: 'test-command',
args: ['arg1'],
cwd: '/test',
};
it('should collect stdout and stderr', async () => {
const mockProcess = new EventEmitter() as cp.ChildProcess & {
stdout: Readable;
stderr: Readable;
kill: ReturnType<typeof vi.fn>;
};
const stdout = new Readable({ read() {} });
const stderr = new Readable({ read() {} });
mockProcess.stdout = stdout;
mockProcess.stderr = stderr;
mockProcess.kill = vi.fn().mockReturnValue(true);
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
setTimeout(() => {
stdout.push('line 1\n');
stdout.push('line 2\n');
stdout.push(null);
stderr.push('error 1\n');
stderr.push('error 2\n');
stderr.push(null);
mockProcess.emit('exit', 0);
}, 10);
const result = await spawnProcess(baseOptions);
expect(result.stdout).toBe('line 1\nline 2\n');
expect(result.stderr).toBe('error 1\nerror 2\n');
expect(result.exitCode).toBe(0);
});
it('should return correct exit code', async () => {
const mockProcess = new EventEmitter() as cp.ChildProcess & {
stdout: Readable;
stderr: Readable;
kill: ReturnType<typeof vi.fn>;
};
mockProcess.stdout = new Readable({ read() {} });
mockProcess.stderr = new Readable({ read() {} });
mockProcess.kill = vi.fn().mockReturnValue(true);
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
setTimeout(() => {
mockProcess.stdout.push(null);
mockProcess.stderr.push(null);
mockProcess.emit('exit', 42);
}, 10);
const result = await spawnProcess(baseOptions);
expect(result.exitCode).toBe(42);
});
it('should handle process errors', async () => {
const mockProcess = new EventEmitter() as cp.ChildProcess & {
stdout: Readable;
stderr: Readable;
kill: ReturnType<typeof vi.fn>;
};
mockProcess.stdout = new Readable({ read() {} });
mockProcess.stderr = new Readable({ read() {} });
mockProcess.kill = vi.fn().mockReturnValue(true);
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
setTimeout(() => {
mockProcess.emit('error', new Error('Spawn failed'));
}, 10);
await expect(spawnProcess(baseOptions)).rejects.toThrow('Spawn failed');
});
it('should handle AbortController signal', async () => {
const abortController = new AbortController();
const mockProcess = new EventEmitter() as cp.ChildProcess & {
stdout: Readable;
stderr: Readable;
kill: ReturnType<typeof vi.fn>;
};
mockProcess.stdout = new Readable({ read() {} });
mockProcess.stderr = new Readable({ read() {} });
mockProcess.kill = vi.fn().mockReturnValue(true);
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
setTimeout(() => abortController.abort(), 20);
await expect(spawnProcess({ ...baseOptions, abortController })).rejects.toThrow(
'Process aborted'
);
expect(mockProcess.kill).toHaveBeenCalledWith('SIGTERM');
});
it('should spawn with correct options', async () => {
const mockProcess = new EventEmitter() as cp.ChildProcess & {
stdout: Readable;
stderr: Readable;
kill: ReturnType<typeof vi.fn>;
};
mockProcess.stdout = new Readable({ read() {} });
mockProcess.stderr = new Readable({ read() {} });
mockProcess.kill = vi.fn().mockReturnValue(true);
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
setTimeout(() => {
mockProcess.stdout.push(null);
mockProcess.stderr.push(null);
mockProcess.emit('exit', 0);
}, 10);
const options: SubprocessOptions = {
command: 'my-cmd',
args: ['--verbose'],
cwd: '/my/dir',
env: { MY_VAR: 'value' },
};
await spawnProcess(options);
expect(cp.spawn).toHaveBeenCalledWith('my-cmd', ['--verbose'], {
cwd: '/my/dir',
env: expect.objectContaining({ MY_VAR: 'value' }),
stdio: ['ignore', 'pipe', 'pipe'],
});
});
it('should handle empty stdout and stderr', async () => {
const mockProcess = new EventEmitter() as cp.ChildProcess & {
stdout: Readable;
stderr: Readable;
kill: ReturnType<typeof vi.fn>;
};
mockProcess.stdout = new Readable({ read() {} });
mockProcess.stderr = new Readable({ read() {} });
mockProcess.kill = vi.fn().mockReturnValue(true);
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
setTimeout(() => {
mockProcess.stdout.push(null);
mockProcess.stderr.push(null);
mockProcess.emit('exit', 0);
}, 10);
const result = await spawnProcess(baseOptions);
expect(result.stdout).toBe('');
expect(result.stderr).toBe('');
expect(result.exitCode).toBe(0);
});
});
});

View File

@@ -0,0 +1,9 @@
{
"extends": "../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,23 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['tests/**/*.test.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
include: ['src/**/*.ts'],
exclude: ['src/**/*.d.ts', 'src/index.ts'],
thresholds: {
// Excellent coverage: 94.69% stmts, 80.48% branches, 97.14% funcs, 94.64% lines
// All files now have comprehensive tests
lines: 90,
functions: 95,
branches: 75,
statements: 90,
},
},
},
});

257
libs/prompts/README.md Normal file
View File

@@ -0,0 +1,257 @@
# @automaker/prompts
AI prompt templates for text enhancement and other AI-powered features in AutoMaker.
## Overview
This package provides professionally-crafted prompt templates for enhancing user-written task descriptions using Claude. It includes system prompts, few-shot examples, and utility functions for different enhancement modes: improve, technical, simplify, and acceptance.
## Installation
```bash
npm install @automaker/prompts
```
## Exports
### Enhancement Modes
Four modes are available, each optimized for a specific enhancement task:
- **improve** - Transform vague requests into clear, actionable tasks
- **technical** - Add implementation details and technical specifications
- **simplify** - Make verbose descriptions concise and focused
- **acceptance** - Add testable acceptance criteria
### System Prompts
Direct access to system prompts for each mode:
```typescript
import {
IMPROVE_SYSTEM_PROMPT,
TECHNICAL_SYSTEM_PROMPT,
SIMPLIFY_SYSTEM_PROMPT,
ACCEPTANCE_SYSTEM_PROMPT,
} from '@automaker/prompts';
console.log(IMPROVE_SYSTEM_PROMPT); // Full system prompt for improve mode
```
### Helper Functions
#### `getEnhancementPrompt(mode, description)`
Get complete prompt (system + user) for an enhancement mode:
```typescript
import { getEnhancementPrompt } from '@automaker/prompts';
const result = getEnhancementPrompt('improve', 'make app faster');
console.log(result.systemPrompt); // System instructions for improve mode
console.log(result.userPrompt); // User prompt with examples and input
```
#### `getSystemPrompt(mode)`
Get only the system prompt for a mode:
```typescript
import { getSystemPrompt } from '@automaker/prompts';
const systemPrompt = getSystemPrompt('technical');
```
#### `getExamples(mode)`
Get few-shot examples for a mode:
```typescript
import { getExamples } from '@automaker/prompts';
const examples = getExamples('simplify');
// Returns array of { input, output } pairs
```
#### `buildUserPrompt(description, mode)`
Build user prompt with examples:
```typescript
import { buildUserPrompt } from '@automaker/prompts';
const userPrompt = buildUserPrompt('add login page', 'improve');
// Includes examples + user's description
```
#### `isValidEnhancementMode(mode)`
Check if a mode is valid:
```typescript
import { isValidEnhancementMode } from '@automaker/prompts';
if (isValidEnhancementMode('improve')) {
// Mode is valid
}
```
#### `getAvailableEnhancementModes()`
Get list of all available modes:
```typescript
import { getAvailableEnhancementModes } from '@automaker/prompts';
const modes = getAvailableEnhancementModes();
// Returns: ['improve', 'technical', 'simplify', 'acceptance']
```
## Usage Examples
### Basic Enhancement
```typescript
import { getEnhancementPrompt } from '@automaker/prompts';
async function enhanceDescription(description: string, mode: string) {
const { systemPrompt, userPrompt } = getEnhancementPrompt(mode, description);
const response = await claude.messages.create({
model: 'claude-sonnet-4-20250514',
max_tokens: 1024,
system: systemPrompt,
messages: [{ role: 'user', content: userPrompt }],
});
return response.content[0].text;
}
// Example usage
const improved = await enhanceDescription('make app faster', 'improve');
// → "Optimize application performance by profiling bottlenecks..."
const technical = await enhanceDescription('add search', 'technical');
// → "Implement full-text search with the following components:..."
```
### Mode Validation
```typescript
import { isValidEnhancementMode, getAvailableEnhancementModes } from '@automaker/prompts';
function validateAndEnhance(mode: string, description: string) {
if (!isValidEnhancementMode(mode)) {
const available = getAvailableEnhancementModes().join(', ');
throw new Error(`Invalid mode "${mode}". Available: ${available}`);
}
return enhanceDescription(description, mode);
}
```
### Custom Prompt Building
```typescript
import { getSystemPrompt, buildUserPrompt, getExamples } from '@automaker/prompts';
// Get components separately for custom workflows
const systemPrompt = getSystemPrompt('simplify');
const examples = getExamples('simplify');
const userPrompt = buildUserPrompt(userInput, 'simplify');
// Use with custom processing
const response = await processWithClaude(systemPrompt, userPrompt);
```
### Server Route Example
```typescript
import { getEnhancementPrompt, isValidEnhancementMode } from '@automaker/prompts';
import { createLogger } from '@automaker/utils';
const logger = createLogger('EnhancementRoute');
app.post('/api/enhance', async (req, res) => {
const { description, mode } = req.body;
if (!isValidEnhancementMode(mode)) {
return res.status(400).json({ error: 'Invalid enhancement mode' });
}
try {
const { systemPrompt, userPrompt } = getEnhancementPrompt(mode, description);
const result = await claude.messages.create({
model: 'claude-sonnet-4-20250514',
max_tokens: 1024,
system: systemPrompt,
messages: [{ role: 'user', content: userPrompt }],
});
logger.info(`Enhanced with mode: ${mode}`);
res.json({ enhanced: result.content[0].text });
} catch (error) {
logger.error('Enhancement failed:', error);
res.status(500).json({ error: 'Enhancement failed' });
}
});
```
## Enhancement Mode Details
### Improve Mode
Transforms vague or unclear requests into clear, actionable specifications.
**Before:** "make app faster"
**After:** "Optimize application performance by:
1. Profiling code to identify bottlenecks
2. Implementing caching for frequently accessed data
3. Optimizing database queries..."
### Technical Mode
Adds implementation details and technical specifications.
**Before:** "add search"
**After:** "Implement full-text search using:
- Backend: Elasticsearch or PostgreSQL full-text search
- Frontend: Debounced search input with loading states
- API: GET /api/search endpoint with pagination..."
### Simplify Mode
Makes verbose descriptions concise while preserving essential information.
**Before:** "We really need to make sure that the application has the capability to allow users to be able to search for various items..."
**After:** "Add search functionality for items with filters and results display."
### Acceptance Mode
Adds testable acceptance criteria to feature descriptions.
**Before:** "user login"
**After:** "User login feature
- User can enter email and password
- System validates credentials
- On success: redirect to dashboard
- On failure: show error message
- Remember me option persists login..."
## Dependencies
- `@automaker/types` - Type definitions for EnhancementMode and EnhancementExample
## Used By
- `@automaker/server` - Enhancement API routes
- Future packages requiring AI-powered text enhancement
## License
SEE LICENSE IN LICENSE

29
libs/prompts/package.json Normal file
View File

@@ -0,0 +1,29 @@
{
"name": "@automaker/prompts",
"version": "1.0.0",
"type": "module",
"description": "AI prompt templates for AutoMaker",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"watch": "tsc --watch",
"test": "vitest run",
"test:watch": "vitest"
},
"keywords": [
"automaker",
"prompts",
"ai"
],
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
"dependencies": {
"@automaker/types": "^1.0.0"
},
"devDependencies": {
"@types/node": "^22.10.5",
"typescript": "^5.7.3",
"vitest": "^4.0.16"
}
}

View File

@@ -0,0 +1,448 @@
/**
* Enhancement Prompts Library - AI-powered text enhancement for task descriptions
*
* Provides prompt templates and utilities for enhancing user-written task descriptions:
* - Improve: Transform vague requests into clear, actionable tasks
* - Technical: Add implementation details and technical specifications
* - Simplify: Make verbose descriptions concise and focused
* - Acceptance: Add testable acceptance criteria
*
* Uses chain-of-thought prompting with few-shot examples for consistent results.
*/
import type { EnhancementMode, EnhancementExample } from '@automaker/types';
// Re-export enhancement types from shared package
export type { EnhancementMode, EnhancementExample } from '@automaker/types';
/**
* System prompt for the "improve" enhancement mode.
* Transforms vague or unclear requests into clear, actionable task descriptions.
*/
export const IMPROVE_SYSTEM_PROMPT = `You are an expert at transforming vague, unclear, or incomplete task descriptions into clear, actionable specifications.
Your task is to take a user's rough description and improve it by:
1. ANALYZE the input:
- Identify the core intent behind the request
- Note any ambiguities or missing details
- Determine what success would look like
2. CLARIFY the scope:
- Define clear boundaries for the task
- Identify implicit requirements
- Add relevant context that may be assumed
3. STRUCTURE the output:
- Write a clear, actionable title
- Provide a concise description of what needs to be done
- Break down into specific sub-tasks if appropriate
4. ENHANCE with details:
- Add specific, measurable outcomes where possible
- Include edge cases to consider
- Note any dependencies or prerequisites
Output ONLY the improved task description. Do not include explanations, markdown formatting, or meta-commentary about your changes.`;
/**
* System prompt for the "technical" enhancement mode.
* Adds implementation details and technical specifications.
*/
export const TECHNICAL_SYSTEM_PROMPT = `You are a senior software engineer skilled at adding technical depth to feature descriptions.
Your task is to enhance a task description with technical implementation details:
1. ANALYZE the requirement:
- Understand the functional goal
- Identify the technical domain (frontend, backend, database, etc.)
- Consider the likely tech stack based on context
2. ADD technical specifications:
- Suggest specific technologies, libraries, or patterns
- Define API contracts or data structures if relevant
- Note performance considerations
- Identify security implications
3. OUTLINE implementation approach:
- Break down into technical sub-tasks
- Suggest file structure or component organization
- Note integration points with existing systems
4. CONSIDER edge cases:
- Error handling requirements
- Loading and empty states
- Boundary conditions
Output ONLY the enhanced technical description. Keep it concise but comprehensive. Do not include explanations about your reasoning.`;
/**
* System prompt for the "simplify" enhancement mode.
* Makes verbose descriptions concise and focused.
*/
export const SIMPLIFY_SYSTEM_PROMPT = `You are an expert editor who excels at making verbose text concise without losing meaning.
Your task is to simplify a task description while preserving essential information:
1. IDENTIFY the core message:
- Extract the primary goal or requirement
- Note truly essential details
- Separate nice-to-have from must-have information
2. ELIMINATE redundancy:
- Remove repeated information
- Cut unnecessary qualifiers and hedging language
- Remove filler words and phrases
3. CONSOLIDATE related points:
- Merge overlapping requirements
- Group related items together
- Use concise language
4. PRESERVE critical details:
- Keep specific technical requirements
- Retain important constraints
- Maintain actionable specifics
Output ONLY the simplified description. Aim for 30-50% reduction in length while keeping all essential information. Do not explain your changes.`;
/**
* System prompt for the "acceptance" enhancement mode.
* Adds testable acceptance criteria to task descriptions.
*/
export const ACCEPTANCE_SYSTEM_PROMPT = `You are a QA specialist skilled at defining testable acceptance criteria for software features.
Your task is to enhance a task description by adding clear acceptance criteria:
1. UNDERSTAND the feature:
- Identify all user-facing behaviors
- Note system state changes
- Consider different user roles or scenarios
2. DEFINE acceptance criteria using Given-When-Then format:
- Given: The initial context or preconditions
- When: The action or trigger
- Then: The expected outcome
3. COVER key scenarios:
- Happy path (successful completion)
- Edge cases (boundary conditions)
- Error scenarios (what should NOT happen)
- Performance requirements if relevant
4. MAKE criteria testable:
- Use specific, measurable outcomes
- Avoid vague terms like "quickly" or "easily"
- Include specific values where applicable
Output the original description followed by a clear "Acceptance Criteria:" section with numbered, testable criteria. Do not include explanations about your process.`;
/**
* Few-shot examples for the "improve" enhancement mode
*/
export const IMPROVE_EXAMPLES: EnhancementExample[] = [
{
input: 'add dark mode',
output: `Add Dark Mode Theme Support
Implement a dark mode option that allows users to switch between light and dark themes throughout the application.
Requirements:
- Add a theme toggle switch in the settings area
- Store user's theme preference persistently
- Apply dark theme colors to all UI components
- Respect system preference as default if user hasn't set a preference
- Ensure smooth transition when switching themes`,
},
{
input: 'fix the login bug',
output: `Fix Login Authentication Flow
Investigate and resolve issues preventing users from successfully logging in to the application.
Requirements:
- Identify the specific failure point in the login process
- Ensure proper credential validation
- Handle error states with clear user feedback
- Verify session token is correctly stored after successful login
- Test with various credential combinations (valid, invalid, edge cases)`,
},
{
input: 'make it faster',
output: `Improve Application Performance
Optimize application loading and response times to enhance user experience.
Requirements:
- Profile current performance to identify bottlenecks
- Implement code splitting and lazy loading for routes/components
- Optimize database queries and add appropriate indexes
- Add caching for frequently accessed data
- Set performance budget targets (e.g., LCP < 2.5s, FID < 100ms)
- Measure and document improvements`,
},
];
/**
* Few-shot examples for the "technical" enhancement mode
*/
export const TECHNICAL_EXAMPLES: EnhancementExample[] = [
{
input: 'Add user profile page',
output: `Add User Profile Page
Create a dedicated profile page for viewing and editing user information.
Technical Implementation:
- Frontend: React component at /profile route with form validation
- API Endpoint: GET/PUT /api/users/:id for fetching and updating profile
- Data Model: Extend User schema with profile fields (avatar, bio, preferences)
- File Upload: Integrate cloud storage for avatar images with client-side resizing
- State Management: Use optimistic updates for better UX
- Validation: Server-side validation matching client-side rules
Components:
- ProfileHeader (avatar, name, edit button)
- ProfileForm (editable fields with validation)
- AvatarUploader (drag-drop with preview)
Security: Ensure users can only edit their own profile (auth middleware)`,
},
{
input: 'Add search functionality',
output: `Add Search Functionality
Implement full-text search across application content.
Technical Implementation:
- Search Engine: Use Elasticsearch or PostgreSQL full-text search
- API: GET /api/search?q={query}&type={type}&page={page}
- Indexing: Create search index with relevant fields, update on content changes
- Frontend: Debounced search input (300ms) with typeahead suggestions
- Results: Paginated results with highlighted matching text
Architecture:
- SearchService class handling query building and execution
- SearchIndex worker for background indexing
- SearchResults component with filtering and sorting
Performance:
- Implement search result caching (Redis, 5-minute TTL)
- Limit results per page (20 items)
- Add query complexity limits to prevent abuse`,
},
];
/**
* Few-shot examples for the "simplify" enhancement mode
*/
export const SIMPLIFY_EXAMPLES: EnhancementExample[] = [
{
input: `We need to implement a feature that would allow our users to be able to export their data in various different formats. The formats we are thinking about supporting include CSV which is commonly used for spreadsheet applications, and also JSON which is good for developers, and possibly PDF for people who want a printable version. The user should be able to go somewhere in the UI and select which format they want and then download their data in that format. We should probably also show some kind of progress indicator while the export is happening since it might take a while for large datasets.`,
output: `Add Data Export Feature
Allow users to export their data in CSV, JSON, or PDF format.
Requirements:
- Add export button/menu in the UI
- Support three formats: CSV (spreadsheets), JSON (developers), PDF (printing)
- Show progress indicator during export
- Handle large datasets gracefully`,
},
{
input: `The current notification system is not working very well and users are complaining about it. Sometimes they don't receive notifications at all, and other times they receive too many notifications and feel overwhelmed. We also have had reports that the notifications are not appearing correctly on mobile devices. Additionally, some users have requested the ability to customize which types of notifications they receive. We should look into all of these issues and make the notification system work better overall.`,
output: `Fix and Improve Notification System
Address reliability issues and add user controls for notifications.
Issues to fix:
- Missing notifications (delivery reliability)
- Notification overload (implement batching/digest)
- Mobile display problems
Enhancements:
- Add notification preferences (per-type controls)
- Test across devices and platforms`,
},
];
/**
* Few-shot examples for the "acceptance" enhancement mode
*/
export const ACCEPTANCE_EXAMPLES: EnhancementExample[] = [
{
input: 'Add password reset functionality',
output: `Add Password Reset Functionality
Allow users to reset their password via email when they forget it.
Acceptance Criteria:
1. Given a user is on the login page, when they click "Forgot Password", then they should see a password reset form requesting their email.
2. Given a user enters a valid registered email, when they submit the form, then they should receive a password reset email within 2 minutes.
3. Given a user clicks the reset link in their email, when the link is less than 1 hour old, then they should see a form to enter a new password.
4. Given a user enters a new valid password (min 8 chars, 1 number, 1 special char), when they submit, then their password should be updated and they should be redirected to login.
5. Given a user enters an unregistered email, when they submit the reset form, then they should see the same success message (security: don't reveal which emails exist).
6. Given a reset link older than 1 hour, when a user clicks it, then they should see an expiration message with option to request a new link.`,
},
{
input: 'Shopping cart checkout',
output: `Shopping Cart Checkout
Implement the checkout flow for purchasing items in the shopping cart.
Acceptance Criteria:
1. Given a user has items in their cart, when they click "Checkout", then they should see an order summary with item details and total price.
2. Given a user is on the checkout page, when they enter valid shipping information, then the form should validate in real-time and show estimated delivery date.
3. Given valid shipping info is entered, when the user proceeds to payment, then they should see available payment methods (credit card, PayPal).
4. Given valid payment details are entered, when the user confirms the order, then the payment should be processed and order confirmation displayed within 5 seconds.
5. Given a successful order, when confirmation is shown, then the user should receive an email receipt and their cart should be emptied.
6. Given a payment failure, when the error occurs, then the user should see a clear error message and their cart should remain intact.
7. Given the user closes the browser during checkout, when they return, then their cart contents should still be available.`,
},
];
/**
* Map of enhancement modes to their system prompts
*/
const SYSTEM_PROMPTS: Record<EnhancementMode, string> = {
improve: IMPROVE_SYSTEM_PROMPT,
technical: TECHNICAL_SYSTEM_PROMPT,
simplify: SIMPLIFY_SYSTEM_PROMPT,
acceptance: ACCEPTANCE_SYSTEM_PROMPT,
};
/**
* Map of enhancement modes to their few-shot examples
*/
const EXAMPLES: Record<EnhancementMode, EnhancementExample[]> = {
improve: IMPROVE_EXAMPLES,
technical: TECHNICAL_EXAMPLES,
simplify: SIMPLIFY_EXAMPLES,
acceptance: ACCEPTANCE_EXAMPLES,
};
/**
* Enhancement prompt configuration returned by getEnhancementPrompt
*/
export interface EnhancementPromptConfig {
/** System prompt for the enhancement mode */
systemPrompt: string;
/** Description of what this mode does */
description: string;
}
/**
* Descriptions for each enhancement mode
*/
const MODE_DESCRIPTIONS: Record<EnhancementMode, string> = {
improve: 'Transform vague requests into clear, actionable task descriptions',
technical: 'Add implementation details and technical specifications',
simplify: 'Make verbose descriptions concise and focused',
acceptance: 'Add testable acceptance criteria to task descriptions',
};
/**
* Get the enhancement prompt configuration for a given mode
*
* @param mode - The enhancement mode (falls back to 'improve' if invalid)
* @returns The enhancement prompt configuration
*/
export function getEnhancementPrompt(mode: string): EnhancementPromptConfig {
const normalizedMode = mode.toLowerCase() as EnhancementMode;
const validMode = normalizedMode in SYSTEM_PROMPTS ? normalizedMode : 'improve';
return {
systemPrompt: SYSTEM_PROMPTS[validMode],
description: MODE_DESCRIPTIONS[validMode],
};
}
/**
* Get the system prompt for a specific enhancement mode
*
* @param mode - The enhancement mode to get the prompt for
* @returns The system prompt string
*/
export function getSystemPrompt(mode: EnhancementMode): string {
return SYSTEM_PROMPTS[mode];
}
/**
* Get the few-shot examples for a specific enhancement mode
*
* @param mode - The enhancement mode to get examples for
* @returns Array of input/output example pairs
*/
export function getExamples(mode: EnhancementMode): EnhancementExample[] {
return EXAMPLES[mode];
}
/**
* Build a user prompt for enhancement with optional few-shot examples
*
* @param mode - The enhancement mode
* @param text - The text to enhance
* @param includeExamples - Whether to include few-shot examples (default: true)
* @returns The formatted user prompt string
*/
export function buildUserPrompt(
mode: EnhancementMode,
text: string,
includeExamples: boolean = true
): string {
const examples = includeExamples ? getExamples(mode) : [];
if (examples.length === 0) {
return `Please enhance the following task description:\n\n${text}`;
}
// Build few-shot examples section
const examplesSection = examples
.map(
(example, index) =>
`Example ${index + 1}:\nInput: ${example.input}\nOutput: ${example.output}`
)
.join('\n\n---\n\n');
return `Here are some examples of how to enhance task descriptions:
${examplesSection}
---
Now, please enhance the following task description:
${text}`;
}
/**
* Check if a mode is a valid enhancement mode
*
* @param mode - The mode to check
* @returns True if the mode is valid
*/
export function isValidEnhancementMode(mode: string): mode is EnhancementMode {
return mode in SYSTEM_PROMPTS;
}
/**
* Get all available enhancement modes
*
* @returns Array of available enhancement mode names
*/
export function getAvailableEnhancementModes(): EnhancementMode[] {
return Object.keys(SYSTEM_PROMPTS) as EnhancementMode[];
}

25
libs/prompts/src/index.ts Normal file
View File

@@ -0,0 +1,25 @@
/**
* @automaker/prompts
* AI prompt templates for AutoMaker
*/
// Enhancement prompts
export {
IMPROVE_SYSTEM_PROMPT,
TECHNICAL_SYSTEM_PROMPT,
SIMPLIFY_SYSTEM_PROMPT,
ACCEPTANCE_SYSTEM_PROMPT,
IMPROVE_EXAMPLES,
TECHNICAL_EXAMPLES,
SIMPLIFY_EXAMPLES,
ACCEPTANCE_EXAMPLES,
getEnhancementPrompt,
getSystemPrompt,
getExamples,
buildUserPrompt,
isValidEnhancementMode,
getAvailableEnhancementModes,
} from './enhancement.js';
// Re-export types from @automaker/types
export type { EnhancementMode, EnhancementExample } from '@automaker/types';

View File

@@ -0,0 +1,525 @@
import { describe, it, expect } from 'vitest';
import {
getEnhancementPrompt,
getSystemPrompt,
getExamples,
buildUserPrompt,
isValidEnhancementMode,
getAvailableEnhancementModes,
IMPROVE_SYSTEM_PROMPT,
TECHNICAL_SYSTEM_PROMPT,
SIMPLIFY_SYSTEM_PROMPT,
ACCEPTANCE_SYSTEM_PROMPT,
IMPROVE_EXAMPLES,
TECHNICAL_EXAMPLES,
SIMPLIFY_EXAMPLES,
ACCEPTANCE_EXAMPLES,
} from '../src/enhancement.js';
describe('enhancement.ts', () => {
describe('System Prompt Constants', () => {
it('should export IMPROVE_SYSTEM_PROMPT', () => {
expect(IMPROVE_SYSTEM_PROMPT).toBeDefined();
expect(typeof IMPROVE_SYSTEM_PROMPT).toBe('string');
expect(IMPROVE_SYSTEM_PROMPT).toContain('vague, unclear');
expect(IMPROVE_SYSTEM_PROMPT).toContain('actionable');
});
it('should export TECHNICAL_SYSTEM_PROMPT', () => {
expect(TECHNICAL_SYSTEM_PROMPT).toBeDefined();
expect(typeof TECHNICAL_SYSTEM_PROMPT).toBe('string');
expect(TECHNICAL_SYSTEM_PROMPT).toContain('technical');
expect(TECHNICAL_SYSTEM_PROMPT).toContain('implementation');
});
it('should export SIMPLIFY_SYSTEM_PROMPT', () => {
expect(SIMPLIFY_SYSTEM_PROMPT).toBeDefined();
expect(typeof SIMPLIFY_SYSTEM_PROMPT).toBe('string');
expect(SIMPLIFY_SYSTEM_PROMPT).toContain('verbose');
expect(SIMPLIFY_SYSTEM_PROMPT).toContain('concise');
});
it('should export ACCEPTANCE_SYSTEM_PROMPT', () => {
expect(ACCEPTANCE_SYSTEM_PROMPT).toBeDefined();
expect(typeof ACCEPTANCE_SYSTEM_PROMPT).toBe('string');
expect(ACCEPTANCE_SYSTEM_PROMPT).toContain('acceptance criteria');
expect(ACCEPTANCE_SYSTEM_PROMPT).toContain('testable');
});
});
describe('Examples Constants', () => {
it('should export IMPROVE_EXAMPLES with valid structure', () => {
expect(IMPROVE_EXAMPLES).toBeDefined();
expect(Array.isArray(IMPROVE_EXAMPLES)).toBe(true);
expect(IMPROVE_EXAMPLES.length).toBeGreaterThan(0);
IMPROVE_EXAMPLES.forEach((example) => {
expect(example).toHaveProperty('input');
expect(example).toHaveProperty('output');
expect(typeof example.input).toBe('string');
expect(typeof example.output).toBe('string');
});
});
it('should export TECHNICAL_EXAMPLES with valid structure', () => {
expect(TECHNICAL_EXAMPLES).toBeDefined();
expect(Array.isArray(TECHNICAL_EXAMPLES)).toBe(true);
expect(TECHNICAL_EXAMPLES.length).toBeGreaterThan(0);
TECHNICAL_EXAMPLES.forEach((example) => {
expect(example).toHaveProperty('input');
expect(example).toHaveProperty('output');
expect(typeof example.input).toBe('string');
expect(typeof example.output).toBe('string');
});
});
it('should export SIMPLIFY_EXAMPLES with valid structure', () => {
expect(SIMPLIFY_EXAMPLES).toBeDefined();
expect(Array.isArray(SIMPLIFY_EXAMPLES)).toBe(true);
expect(SIMPLIFY_EXAMPLES.length).toBeGreaterThan(0);
SIMPLIFY_EXAMPLES.forEach((example) => {
expect(example).toHaveProperty('input');
expect(example).toHaveProperty('output');
expect(typeof example.input).toBe('string');
expect(typeof example.output).toBe('string');
});
});
it('should export ACCEPTANCE_EXAMPLES with valid structure', () => {
expect(ACCEPTANCE_EXAMPLES).toBeDefined();
expect(Array.isArray(ACCEPTANCE_EXAMPLES)).toBe(true);
expect(ACCEPTANCE_EXAMPLES.length).toBeGreaterThan(0);
ACCEPTANCE_EXAMPLES.forEach((example) => {
expect(example).toHaveProperty('input');
expect(example).toHaveProperty('output');
expect(typeof example.input).toBe('string');
expect(typeof example.output).toBe('string');
});
});
it('should have shorter outputs in SIMPLIFY_EXAMPLES', () => {
SIMPLIFY_EXAMPLES.forEach((example) => {
// Simplify examples should have shorter output than input
// (though not always strictly enforced, it's the general pattern)
expect(example.output).toBeDefined();
expect(example.output.length).toBeGreaterThan(0);
});
});
});
describe('getEnhancementPrompt', () => {
it("should return prompt config for 'improve' mode", () => {
const result = getEnhancementPrompt('improve');
expect(result).toHaveProperty('systemPrompt');
expect(result).toHaveProperty('description');
expect(result.systemPrompt).toBe(IMPROVE_SYSTEM_PROMPT);
expect(result.description).toContain('vague');
expect(result.description).toContain('actionable');
});
it("should return prompt config for 'technical' mode", () => {
const result = getEnhancementPrompt('technical');
expect(result).toHaveProperty('systemPrompt');
expect(result).toHaveProperty('description');
expect(result.systemPrompt).toBe(TECHNICAL_SYSTEM_PROMPT);
expect(result.description).toContain('implementation');
});
it("should return prompt config for 'simplify' mode", () => {
const result = getEnhancementPrompt('simplify');
expect(result).toHaveProperty('systemPrompt');
expect(result).toHaveProperty('description');
expect(result.systemPrompt).toBe(SIMPLIFY_SYSTEM_PROMPT);
expect(result.description).toContain('verbose');
});
it("should return prompt config for 'acceptance' mode", () => {
const result = getEnhancementPrompt('acceptance');
expect(result).toHaveProperty('systemPrompt');
expect(result).toHaveProperty('description');
expect(result.systemPrompt).toBe(ACCEPTANCE_SYSTEM_PROMPT);
expect(result.description).toContain('acceptance');
});
it('should handle uppercase mode', () => {
const result = getEnhancementPrompt('IMPROVE');
expect(result.systemPrompt).toBe(IMPROVE_SYSTEM_PROMPT);
});
it('should handle mixed case mode', () => {
const result = getEnhancementPrompt('TeChnIcaL');
expect(result.systemPrompt).toBe(TECHNICAL_SYSTEM_PROMPT);
});
it("should fall back to 'improve' for invalid mode", () => {
const result = getEnhancementPrompt('invalid-mode');
expect(result.systemPrompt).toBe(IMPROVE_SYSTEM_PROMPT);
expect(result.description).toContain('vague');
});
it("should fall back to 'improve' for empty string", () => {
const result = getEnhancementPrompt('');
expect(result.systemPrompt).toBe(IMPROVE_SYSTEM_PROMPT);
});
});
describe('getSystemPrompt', () => {
it("should return IMPROVE_SYSTEM_PROMPT for 'improve'", () => {
const result = getSystemPrompt('improve');
expect(result).toBe(IMPROVE_SYSTEM_PROMPT);
});
it("should return TECHNICAL_SYSTEM_PROMPT for 'technical'", () => {
const result = getSystemPrompt('technical');
expect(result).toBe(TECHNICAL_SYSTEM_PROMPT);
});
it("should return SIMPLIFY_SYSTEM_PROMPT for 'simplify'", () => {
const result = getSystemPrompt('simplify');
expect(result).toBe(SIMPLIFY_SYSTEM_PROMPT);
});
it("should return ACCEPTANCE_SYSTEM_PROMPT for 'acceptance'", () => {
const result = getSystemPrompt('acceptance');
expect(result).toBe(ACCEPTANCE_SYSTEM_PROMPT);
});
});
describe('getExamples', () => {
it("should return IMPROVE_EXAMPLES for 'improve'", () => {
const result = getExamples('improve');
expect(result).toBe(IMPROVE_EXAMPLES);
expect(result.length).toBeGreaterThan(0);
});
it("should return TECHNICAL_EXAMPLES for 'technical'", () => {
const result = getExamples('technical');
expect(result).toBe(TECHNICAL_EXAMPLES);
expect(result.length).toBeGreaterThan(0);
});
it("should return SIMPLIFY_EXAMPLES for 'simplify'", () => {
const result = getExamples('simplify');
expect(result).toBe(SIMPLIFY_EXAMPLES);
expect(result.length).toBeGreaterThan(0);
});
it("should return ACCEPTANCE_EXAMPLES for 'acceptance'", () => {
const result = getExamples('acceptance');
expect(result).toBe(ACCEPTANCE_EXAMPLES);
expect(result.length).toBeGreaterThan(0);
});
});
describe('buildUserPrompt', () => {
const testText = 'Add a login feature';
describe('with examples (default)', () => {
it("should include examples by default for 'improve' mode", () => {
const result = buildUserPrompt('improve', testText);
expect(result).toContain('Here are some examples');
expect(result).toContain('Example 1:');
expect(result).toContain(IMPROVE_EXAMPLES[0].input);
expect(result).toContain(IMPROVE_EXAMPLES[0].output);
expect(result).toContain(testText);
});
it("should include examples by default for 'technical' mode", () => {
const result = buildUserPrompt('technical', testText);
expect(result).toContain('Here are some examples');
expect(result).toContain('Example 1:');
expect(result).toContain(TECHNICAL_EXAMPLES[0].input);
expect(result).toContain(testText);
});
it('should include examples when explicitly set to true', () => {
const result = buildUserPrompt('improve', testText, true);
expect(result).toContain('Here are some examples');
expect(result).toContain(testText);
});
it('should format all examples with numbered labels', () => {
const result = buildUserPrompt('improve', testText);
IMPROVE_EXAMPLES.forEach((_, index) => {
expect(result).toContain(`Example ${index + 1}:`);
});
});
it('should separate examples with dividers', () => {
const result = buildUserPrompt('improve', testText);
// Count dividers (---) - should be (examples.length) + 1
const dividerCount = (result.match(/---/g) || []).length;
expect(dividerCount).toBe(IMPROVE_EXAMPLES.length);
});
it("should include 'Now, please enhance' before user text", () => {
const result = buildUserPrompt('improve', testText);
expect(result).toContain('Now, please enhance the following');
expect(result).toContain(testText);
});
});
describe('without examples', () => {
it('should not include examples when includeExamples is false', () => {
const result = buildUserPrompt('improve', testText, false);
expect(result).not.toContain('Here are some examples');
expect(result).not.toContain('Example 1:');
expect(result).not.toContain(IMPROVE_EXAMPLES[0].input);
});
it('should have simple prompt without examples', () => {
const result = buildUserPrompt('improve', testText, false);
expect(result).toBe(`Please enhance the following task description:\n\n${testText}`);
});
it('should preserve user text without examples', () => {
const result = buildUserPrompt('technical', testText, false);
expect(result).toContain(testText);
expect(result).toContain('Please enhance');
});
});
describe('text formatting', () => {
it('should preserve multiline text', () => {
const multilineText = 'Line 1\nLine 2\nLine 3';
const result = buildUserPrompt('improve', multilineText);
expect(result).toContain(multilineText);
});
it('should handle empty text', () => {
const result = buildUserPrompt('improve', '');
// With examples by default, it should contain "Now, please enhance"
expect(result).toContain('Now, please enhance');
expect(result).toContain('Here are some examples');
});
it('should handle whitespace-only text', () => {
const result = buildUserPrompt('improve', ' ');
expect(result).toContain(' ');
});
it('should handle special characters in text', () => {
const specialText = 'Test <html> & "quotes" \'apostrophes\'';
const result = buildUserPrompt('improve', specialText);
expect(result).toContain(specialText);
});
});
describe('all modes', () => {
it('should work for all valid enhancement modes', () => {
const modes: Array<'improve' | 'technical' | 'simplify' | 'acceptance'> = [
'improve',
'technical',
'simplify',
'acceptance',
];
modes.forEach((mode) => {
const result = buildUserPrompt(mode, testText);
expect(result).toBeDefined();
expect(result).toContain(testText);
expect(result.length).toBeGreaterThan(testText.length);
});
});
});
});
describe('isValidEnhancementMode', () => {
it("should return true for 'improve'", () => {
expect(isValidEnhancementMode('improve')).toBe(true);
});
it("should return true for 'technical'", () => {
expect(isValidEnhancementMode('technical')).toBe(true);
});
it("should return true for 'simplify'", () => {
expect(isValidEnhancementMode('simplify')).toBe(true);
});
it("should return true for 'acceptance'", () => {
expect(isValidEnhancementMode('acceptance')).toBe(true);
});
it('should return false for invalid mode', () => {
expect(isValidEnhancementMode('invalid')).toBe(false);
});
it('should return false for empty string', () => {
expect(isValidEnhancementMode('')).toBe(false);
});
it('should return false for uppercase mode', () => {
// Should be case-sensitive since we check object keys directly
expect(isValidEnhancementMode('IMPROVE')).toBe(false);
});
it('should return false for mixed case mode', () => {
expect(isValidEnhancementMode('ImProve')).toBe(false);
});
it('should return false for partial mode names', () => {
expect(isValidEnhancementMode('impro')).toBe(false);
expect(isValidEnhancementMode('tech')).toBe(false);
});
it('should return false for mode with extra characters', () => {
expect(isValidEnhancementMode('improve ')).toBe(false);
expect(isValidEnhancementMode(' improve')).toBe(false);
});
});
describe('getAvailableEnhancementModes', () => {
it('should return array of all enhancement modes', () => {
const modes = getAvailableEnhancementModes();
expect(Array.isArray(modes)).toBe(true);
expect(modes.length).toBe(4);
});
it('should include all valid modes', () => {
const modes = getAvailableEnhancementModes();
expect(modes).toContain('improve');
expect(modes).toContain('technical');
expect(modes).toContain('simplify');
expect(modes).toContain('acceptance');
});
it('should return modes in consistent order', () => {
const modes1 = getAvailableEnhancementModes();
const modes2 = getAvailableEnhancementModes();
expect(modes1).toEqual(modes2);
});
it('should return all valid modes that pass isValidEnhancementMode', () => {
const modes = getAvailableEnhancementModes();
modes.forEach((mode) => {
expect(isValidEnhancementMode(mode)).toBe(true);
});
});
});
describe('Integration tests', () => {
it('should work together: getEnhancementPrompt + buildUserPrompt', () => {
const mode = 'improve';
const text = 'Add search feature';
const { systemPrompt, description } = getEnhancementPrompt(mode);
const userPrompt = buildUserPrompt(mode, text);
expect(systemPrompt).toBe(IMPROVE_SYSTEM_PROMPT);
expect(description).toBeDefined();
expect(userPrompt).toContain(text);
});
it('should handle complete enhancement workflow', () => {
const availableModes = getAvailableEnhancementModes();
expect(availableModes.length).toBeGreaterThan(0);
availableModes.forEach((mode) => {
const isValid = isValidEnhancementMode(mode);
expect(isValid).toBe(true);
const systemPrompt = getSystemPrompt(mode);
expect(systemPrompt).toBeDefined();
expect(systemPrompt.length).toBeGreaterThan(0);
const examples = getExamples(mode);
expect(Array.isArray(examples)).toBe(true);
expect(examples.length).toBeGreaterThan(0);
const userPrompt = buildUserPrompt(mode, 'test description');
expect(userPrompt).toContain('test description');
});
});
it('should provide consistent data across functions', () => {
const mode = 'technical';
const promptConfig = getEnhancementPrompt(mode);
const systemPrompt = getSystemPrompt(mode);
const examples = getExamples(mode);
expect(promptConfig.systemPrompt).toBe(systemPrompt);
expect(examples).toBe(TECHNICAL_EXAMPLES);
});
});
describe('Examples content validation', () => {
it('IMPROVE_EXAMPLES should demonstrate improvement', () => {
IMPROVE_EXAMPLES.forEach((example) => {
// Output should be longer and more detailed than input
expect(example.output.length).toBeGreaterThan(example.input.length);
// Input should be brief/vague
expect(example.input.length).toBeLessThan(100);
});
});
it('TECHNICAL_EXAMPLES should contain technical terms', () => {
const technicalTerms = [
'API',
'endpoint',
'component',
'database',
'frontend',
'backend',
'validation',
'schema',
'React',
'GET',
'PUT',
'POST',
];
TECHNICAL_EXAMPLES.forEach((example) => {
const hasAnyTechnicalTerm = technicalTerms.some((term) => example.output.includes(term));
expect(hasAnyTechnicalTerm).toBe(true);
});
});
it('ACCEPTANCE_EXAMPLES should contain acceptance criteria format', () => {
ACCEPTANCE_EXAMPLES.forEach((example) => {
// Should contain numbered criteria or Given-When-Then format
const hasAcceptanceCriteria =
example.output.includes('Acceptance Criteria') || example.output.match(/\d+\./g);
expect(hasAcceptanceCriteria).toBeTruthy();
// Should contain Given-When-Then format
const hasGWT =
example.output.includes('Given') &&
example.output.includes('when') &&
example.output.includes('then');
expect(hasGWT).toBe(true);
});
});
});
});

View File

@@ -0,0 +1,9 @@
{
"extends": "../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,21 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['tests/**/*.test.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
include: ['src/**/*.ts'],
exclude: ['src/**/*.d.ts', 'src/index.ts'],
thresholds: {
lines: 90,
functions: 95,
branches: 85,
statements: 90,
},
},
},
});

16
libs/tsconfig.base.json Normal file
View File

@@ -0,0 +1,16 @@
{
"compilerOptions": {
"target": "ES2020",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"lib": ["ES2020"],
"types": ["node"],
"declaration": true,
"declarationMap": true,
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true
}
}

142
libs/types/README.md Normal file
View File

@@ -0,0 +1,142 @@
# @automaker/types
Shared TypeScript type definitions for AutoMaker.
## Overview
This package contains all core type definitions used across AutoMaker's server and UI components. It has no dependencies and serves as the foundation for other packages.
## Installation
```bash
npm install @automaker/types
```
## Exports
### Provider Types
Types for AI provider integration and Claude SDK.
```typescript
import type {
ProviderConfig,
ConversationMessage,
ExecuteOptions,
ContentBlock,
ProviderMessage,
InstallationStatus,
ValidationResult,
ModelDefinition,
} from '@automaker/types';
```
### Feature Types
Feature management and workflow types.
```typescript
import type { Feature, FeatureStatus, PlanningMode, PlanSpec } from '@automaker/types';
```
**Feature Interface:**
- `id` - Unique feature identifier
- `category` - Feature category/type
- `description` - Feature description
- `dependencies` - Array of feature IDs this depends on
- `status` - Current status (pending/running/completed/failed/verified)
- `planningMode` - Planning approach (skip/lite/spec/full)
- `planSpec` - Plan specification and approval status
### Session Types
Agent session management.
```typescript
import type {
AgentSession,
SessionListItem,
CreateSessionParams,
UpdateSessionParams,
} from '@automaker/types';
```
### Error Types
Error classification and handling.
```typescript
import type { ErrorType, ErrorInfo } from '@automaker/types';
```
### Image Types
Image handling for prompts.
```typescript
import type { ImageData, ImageContentBlock } from '@automaker/types';
```
### Model Types
Claude model definitions and mappings.
```typescript
import { CLAUDE_MODEL_MAP, DEFAULT_MODELS, type ModelAlias } from '@automaker/types';
```
## Usage Example
```typescript
import type { Feature, ExecuteOptions } from '@automaker/types';
const feature: Feature = {
id: 'auth-feature',
category: 'backend',
description: 'Implement user authentication',
dependencies: ['database-setup'],
status: 'pending',
planningMode: 'spec',
};
const options: ExecuteOptions = {
model: 'claude-sonnet-4-20250514',
temperature: 0.7,
};
```
## Dependencies
None - this is a pure types package.
**IMPORTANT**: This package must NEVER depend on other `@automaker/*` packages to prevent circular dependencies. All other packages depend on this one, making it the foundation of the dependency tree.
## Used By
- `@automaker/utils`
- `@automaker/platform`
- `@automaker/model-resolver`
- `@automaker/dependency-resolver`
- `@automaker/git-utils`
- `@automaker/server`
- `@automaker/ui`
## Circular Dependency Prevention
To maintain the package dependency hierarchy and prevent circular dependencies:
1. **Never add dependencies** to other `@automaker/*` packages in `package.json`
2. **Keep result types here** - For example, `DependencyResolutionResult` should stay in `@automaker/dependency-resolver`, not be moved here
3. **Import only base types** - Other packages can import from here, but this package cannot import from them
4. **Document the rule** - When adding new functionality, ensure it follows this constraint
This constraint ensures a clean one-way dependency flow:
```
@automaker/types (foundation - no dependencies)
@automaker/utils, @automaker/platform, etc.
@automaker/server, @automaker/ui
```

22
libs/types/package.json Normal file
View File

@@ -0,0 +1,22 @@
{
"name": "@automaker/types",
"version": "1.0.0",
"type": "module",
"description": "Shared type definitions for AutoMaker",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"watch": "tsc --watch"
},
"keywords": [
"automaker",
"types"
],
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
"devDependencies": {
"@types/node": "^22.10.5",
"typescript": "^5.7.3"
}
}

View File

@@ -0,0 +1,16 @@
/**
* Enhancement types for AI-powered task description improvements
*/
/**
* Available enhancement modes for transforming task descriptions
*/
export type EnhancementMode = 'improve' | 'technical' | 'simplify' | 'acceptance';
/**
* Example input/output pair for few-shot learning
*/
export interface EnhancementExample {
input: string;
output: string;
}

16
libs/types/src/error.ts Normal file
View File

@@ -0,0 +1,16 @@
/**
* Error type classification
*/
export type ErrorType = 'authentication' | 'cancellation' | 'abort' | 'execution' | 'unknown';
/**
* Classified error information
*/
export interface ErrorInfo {
type: ErrorType;
message: string;
isAbort: boolean;
isAuth: boolean;
isCancellation: boolean;
originalError: unknown;
}

29
libs/types/src/event.ts Normal file
View File

@@ -0,0 +1,29 @@
/**
* Event types for AutoMaker event system
*/
export type EventType =
| 'agent:stream'
| 'auto-mode:event'
| 'auto-mode:started'
| 'auto-mode:stopped'
| 'auto-mode:idle'
| 'auto-mode:error'
| 'feature:started'
| 'feature:completed'
| 'feature:stopped'
| 'feature:error'
| 'feature:progress'
| 'feature:tool-use'
| 'feature:follow-up-started'
| 'feature:follow-up-completed'
| 'feature:verified'
| 'feature:committed'
| 'project:analysis-started'
| 'project:analysis-progress'
| 'project:analysis-completed'
| 'project:analysis-error'
| 'suggestions:event'
| 'spec-regeneration:event';
export type EventCallback = (type: EventType, payload: unknown) => void;

51
libs/types/src/feature.ts Normal file
View File

@@ -0,0 +1,51 @@
/**
* Feature types for AutoMaker feature management
*/
import type { PlanningMode } from './settings.js';
export interface FeatureImagePath {
id: string;
path: string;
filename: string;
mimeType: string;
[key: string]: unknown;
}
export interface Feature {
id: string;
title?: string;
titleGenerating?: boolean;
category: string;
description: string;
steps?: string[];
passes?: boolean;
priority?: number;
status?: string;
dependencies?: string[];
spec?: string;
model?: string;
imagePaths?: Array<string | FeatureImagePath | { path: string; [key: string]: unknown }>;
// Branch info - worktree path is derived at runtime from branchName
branchName?: string; // Name of the feature branch (undefined = use current worktree)
skipTests?: boolean;
thinkingLevel?: string;
planningMode?: PlanningMode;
requirePlanApproval?: boolean;
planSpec?: {
status: 'pending' | 'generating' | 'generated' | 'approved' | 'rejected';
content?: string;
version: number;
generatedAt?: string;
approvedAt?: string;
reviewedByUser: boolean;
tasksCompleted?: number;
tasksTotal?: number;
};
error?: string;
summary?: string;
startedAt?: string;
[key: string]: unknown; // Keep catch-all for extensibility
}
export type FeatureStatus = 'pending' | 'running' | 'completed' | 'failed' | 'verified';

21
libs/types/src/image.ts Normal file
View File

@@ -0,0 +1,21 @@
/**
* Image data with base64 encoding and metadata
*/
export interface ImageData {
base64: string;
mimeType: string;
filename: string;
originalPath: string;
}
/**
* Content block for image (Claude SDK format)
*/
export interface ImageContentBlock {
type: 'image';
source: {
type: 'base64';
media_type: string;
data: string;
};
}

83
libs/types/src/index.ts Normal file
View File

@@ -0,0 +1,83 @@
/**
* @automaker/types
* Shared type definitions for AutoMaker
*/
// Provider types
export type {
ProviderConfig,
ConversationMessage,
ExecuteOptions,
ContentBlock,
ProviderMessage,
InstallationStatus,
ValidationResult,
ModelDefinition,
} from './provider.js';
// Feature types
export type { Feature, FeatureImagePath, FeatureStatus } from './feature.js';
// Session types
export type {
AgentSession,
SessionListItem,
CreateSessionParams,
UpdateSessionParams,
} from './session.js';
// Error types
export type { ErrorType, ErrorInfo } from './error.js';
// Image types
export type { ImageData, ImageContentBlock } from './image.js';
// Model types and constants
export { CLAUDE_MODEL_MAP, DEFAULT_MODELS, type ModelAlias, type AgentModel } from './model.js';
// Event types
export type { EventType, EventCallback } from './event.js';
// Spec types
export type { SpecOutput } from './spec.js';
export { specOutputSchema } from './spec.js';
// Enhancement types
export type { EnhancementMode, EnhancementExample } from './enhancement.js';
// Settings types and constants
export type {
ThemeMode,
KanbanCardDetailLevel,
PlanningMode,
ThinkingLevel,
ModelProvider,
KeyboardShortcuts,
AIProfile,
ProjectRef,
TrashedProjectRef,
ChatSessionRef,
GlobalSettings,
Credentials,
BoardBackgroundSettings,
WorktreeInfo,
ProjectSettings,
} from './settings.js';
export {
DEFAULT_KEYBOARD_SHORTCUTS,
DEFAULT_GLOBAL_SETTINGS,
DEFAULT_CREDENTIALS,
DEFAULT_PROJECT_SETTINGS,
SETTINGS_VERSION,
CREDENTIALS_VERSION,
PROJECT_SETTINGS_VERSION,
} from './settings.js';
// Model display constants
export type { ModelOption, ThinkingLevelOption } from './model-display.js';
export {
CLAUDE_MODELS,
THINKING_LEVELS,
THINKING_LEVEL_LABELS,
getModelDisplayName,
} from './model-display.js';

View File

@@ -0,0 +1,111 @@
/**
* Model Display Constants - UI metadata for AI models
*
* Provides display labels, descriptions, and metadata for AI models
* and thinking levels used throughout the application UI.
*/
import type { AgentModel, ThinkingLevel } from './settings.js';
/**
* ModelOption - Display metadata for a model option in the UI
*/
export interface ModelOption {
/** Model identifier */
id: AgentModel;
/** Display name shown to user */
label: string;
/** Descriptive text explaining model capabilities */
description: string;
/** Optional badge text (e.g., "Speed", "Balanced", "Premium") */
badge?: string;
/** AI provider (currently only "claude") */
provider: 'claude';
}
/**
* ThinkingLevelOption - Display metadata for thinking level selection
*/
export interface ThinkingLevelOption {
/** Thinking level identifier */
id: ThinkingLevel;
/** Display label */
label: string;
}
/**
* Claude model options with full metadata for UI display
*
* Ordered from fastest/cheapest (Haiku) to most capable (Opus).
*/
export const CLAUDE_MODELS: ModelOption[] = [
{
id: 'haiku',
label: 'Claude Haiku',
description: 'Fast and efficient for simple tasks.',
badge: 'Speed',
provider: 'claude',
},
{
id: 'sonnet',
label: 'Claude Sonnet',
description: 'Balanced performance with strong reasoning.',
badge: 'Balanced',
provider: 'claude',
},
{
id: 'opus',
label: 'Claude Opus',
description: 'Most capable model for complex work.',
badge: 'Premium',
provider: 'claude',
},
];
/**
* Thinking level options with display labels
*
* Ordered from least to most intensive reasoning.
*/
export const THINKING_LEVELS: ThinkingLevelOption[] = [
{ id: 'none', label: 'None' },
{ id: 'low', label: 'Low' },
{ id: 'medium', label: 'Medium' },
{ id: 'high', label: 'High' },
{ id: 'ultrathink', label: 'Ultrathink' },
];
/**
* Map of thinking levels to short display labels
*
* Used for compact UI elements like badges or dropdowns.
*/
export const THINKING_LEVEL_LABELS: Record<ThinkingLevel, string> = {
none: 'None',
low: 'Low',
medium: 'Med',
high: 'High',
ultrathink: 'Ultra',
};
/**
* Get display name for a model
*
* @param model - Model identifier or full model string
* @returns Human-readable model name
*
* @example
* ```typescript
* getModelDisplayName("haiku"); // "Claude Haiku"
* getModelDisplayName("sonnet"); // "Claude Sonnet"
* getModelDisplayName("claude-opus-4-20250514"); // "claude-opus-4-20250514"
* ```
*/
export function getModelDisplayName(model: AgentModel | string): string {
const displayNames: Record<string, string> = {
haiku: 'Claude Haiku',
sonnet: 'Claude Sonnet',
opus: 'Claude Opus',
};
return displayNames[model] || model;
}

23
libs/types/src/model.ts Normal file
View File

@@ -0,0 +1,23 @@
/**
* Model alias mapping for Claude models
*/
export const CLAUDE_MODEL_MAP: Record<string, string> = {
haiku: 'claude-haiku-4-5',
sonnet: 'claude-sonnet-4-20250514',
opus: 'claude-opus-4-5-20251101',
} as const;
/**
* Default models per provider
*/
export const DEFAULT_MODELS = {
claude: 'claude-opus-4-5-20251101',
} as const;
export type ModelAlias = keyof typeof CLAUDE_MODEL_MAP;
/**
* AgentModel - Alias for ModelAlias for backward compatibility
* Represents available Claude models: "opus" | "sonnet" | "haiku"
*/
export type AgentModel = ModelAlias;

104
libs/types/src/provider.ts Normal file
View File

@@ -0,0 +1,104 @@
/**
* Shared types for AI model providers
*/
/**
* Configuration for a provider instance
*/
export interface ProviderConfig {
apiKey?: string;
cliPath?: string;
env?: Record<string, string>;
}
/**
* Message in conversation history
*/
export interface ConversationMessage {
role: 'user' | 'assistant';
content: string | Array<{ type: string; text?: string; source?: object }>;
}
/**
* Options for executing a query via a provider
*/
export interface ExecuteOptions {
prompt: string | Array<{ type: string; text?: string; source?: object }>;
model: string;
cwd: string;
systemPrompt?: string;
maxTurns?: number;
allowedTools?: string[];
mcpServers?: Record<string, unknown>;
abortController?: AbortController;
conversationHistory?: ConversationMessage[]; // Previous messages for context
sdkSessionId?: string; // Claude SDK session ID for resuming conversations
}
/**
* Content block in a provider message (matches Claude SDK format)
*/
export interface ContentBlock {
type: 'text' | 'tool_use' | 'thinking' | 'tool_result';
text?: string;
thinking?: string;
name?: string;
input?: unknown;
tool_use_id?: string;
content?: string;
}
/**
* Message returned by a provider (matches Claude SDK streaming format)
*/
export interface ProviderMessage {
type: 'assistant' | 'user' | 'error' | 'result';
subtype?: 'success' | 'error';
session_id?: string;
message?: {
role: 'user' | 'assistant';
content: ContentBlock[];
};
result?: string;
error?: string;
parent_tool_use_id?: string | null;
}
/**
* Installation status for a provider
*/
export interface InstallationStatus {
installed: boolean;
path?: string;
version?: string;
method?: 'cli' | 'npm' | 'brew' | 'sdk';
hasApiKey?: boolean;
authenticated?: boolean;
error?: string;
}
/**
* Validation result
*/
export interface ValidationResult {
valid: boolean;
errors: string[];
warnings?: string[];
}
/**
* Model definition
*/
export interface ModelDefinition {
id: string;
name: string;
modelString: string;
provider: string;
description: string;
contextWindow?: number;
maxOutputTokens?: number;
supportsVision?: boolean;
supportsTools?: boolean;
tier?: 'basic' | 'standard' | 'premium';
default?: boolean;
}

31
libs/types/src/session.ts Normal file
View File

@@ -0,0 +1,31 @@
/**
* Session types for agent conversations
*/
export interface AgentSession {
id: string;
name: string;
projectPath: string;
createdAt: string;
updatedAt: string;
messageCount: number;
isArchived: boolean;
isDirty?: boolean; // Indicates session has completed work that needs review
tags?: string[];
}
export interface SessionListItem extends AgentSession {
preview?: string; // Last message preview
}
export interface CreateSessionParams {
name: string;
projectPath: string;
workingDirectory?: string;
}
export interface UpdateSessionParams {
id: string;
name?: string;
tags?: string[];
}

449
libs/types/src/settings.ts Normal file
View File

@@ -0,0 +1,449 @@
/**
* Settings Types - Shared types for file-based settings storage
*
* Defines the structure for global settings, credentials, and per-project settings
* that are persisted to disk in JSON format. These types are used by both the server
* (for file I/O via SettingsService) and the UI (for state management and sync).
*/
import type { AgentModel } from './model.js';
// Re-export AgentModel for convenience
export type { AgentModel };
/**
* ThemeMode - Available color themes for the UI
*
* Includes system theme and multiple color schemes organized by dark/light:
* - System: Respects OS dark/light mode preference
* - Dark themes (16): dark, retro, dracula, nord, monokai, tokyonight, solarized,
* gruvbox, catppuccin, onedark, synthwave, red, sunset, gray, forest, ocean
* - Light themes (16): light, cream, solarizedlight, github, paper, rose, mint,
* lavender, sand, sky, peach, snow, sepia, gruvboxlight, nordlight, blossom
*/
export type ThemeMode =
| 'system'
// Dark themes (16)
| 'dark'
| 'retro'
| 'dracula'
| 'nord'
| 'monokai'
| 'tokyonight'
| 'solarized'
| 'gruvbox'
| 'catppuccin'
| 'onedark'
| 'synthwave'
| 'red'
| 'sunset'
| 'gray'
| 'forest'
| 'ocean'
// Light themes (16)
| 'light'
| 'cream'
| 'solarizedlight'
| 'github'
| 'paper'
| 'rose'
| 'mint'
| 'lavender'
| 'sand'
| 'sky'
| 'peach'
| 'snow'
| 'sepia'
| 'gruvboxlight'
| 'nordlight'
| 'blossom';
/** KanbanCardDetailLevel - Controls how much information is displayed on kanban cards */
export type KanbanCardDetailLevel = 'minimal' | 'standard' | 'detailed';
/** PlanningMode - Planning levels for feature generation workflows */
export type PlanningMode = 'skip' | 'lite' | 'spec' | 'full';
/** ThinkingLevel - Extended thinking levels for Claude models (reasoning intensity) */
export type ThinkingLevel = 'none' | 'low' | 'medium' | 'high' | 'ultrathink';
/** ModelProvider - AI model provider for credentials and API key management */
export type ModelProvider = 'claude';
/**
* KeyboardShortcuts - User-configurable keyboard bindings for common actions
*
* Each property maps an action to a keyboard shortcut string
* (e.g., "Ctrl+K", "Alt+N", "Shift+P")
*/
export interface KeyboardShortcuts {
/** Open board view */
board: string;
/** Open agent panel */
agent: string;
/** Open feature spec editor */
spec: string;
/** Open context files panel */
context: string;
/** Open settings */
settings: string;
/** Open AI profiles */
profiles: string;
/** Open terminal */
terminal: string;
/** Toggle sidebar visibility */
toggleSidebar: string;
/** Add new feature */
addFeature: string;
/** Add context file */
addContextFile: string;
/** Start next feature generation */
startNext: string;
/** Create new chat session */
newSession: string;
/** Open project picker */
openProject: string;
/** Open project picker (alternate) */
projectPicker: string;
/** Cycle to previous project */
cyclePrevProject: string;
/** Cycle to next project */
cycleNextProject: string;
/** Add new AI profile */
addProfile: string;
/** Split terminal right */
splitTerminalRight: string;
/** Split terminal down */
splitTerminalDown: string;
/** Close current terminal */
closeTerminal: string;
}
/**
* AIProfile - Configuration for an AI model with specific parameters
*
* Profiles can be built-in defaults or user-created. They define which model to use,
* thinking level, and other parameters for feature generation tasks.
*/
export interface AIProfile {
/** Unique identifier for the profile */
id: string;
/** Display name for the profile */
name: string;
/** User-friendly description */
description: string;
/** Which Claude model to use (opus, sonnet, haiku) */
model: AgentModel;
/** Extended thinking level for reasoning-based tasks */
thinkingLevel: ThinkingLevel;
/** Provider (currently only "claude") */
provider: ModelProvider;
/** Whether this is a built-in default profile */
isBuiltIn: boolean;
/** Optional icon identifier or emoji */
icon?: string;
}
/**
* ProjectRef - Minimal reference to a project stored in global settings
*
* Used for the projects list and project history. Full project data is loaded separately.
*/
export interface ProjectRef {
/** Unique identifier */
id: string;
/** Display name */
name: string;
/** Absolute filesystem path to project directory */
path: string;
/** ISO timestamp of last time project was opened */
lastOpened?: string;
/** Project-specific theme override (or undefined to use global) */
theme?: string;
}
/**
* TrashedProjectRef - Reference to a project in the trash/recycle bin
*
* Extends ProjectRef with deletion metadata. User can permanently delete or restore.
*/
export interface TrashedProjectRef extends ProjectRef {
/** ISO timestamp when project was moved to trash */
trashedAt: string;
/** Whether project folder was deleted from disk */
deletedFromDisk?: boolean;
}
/**
* ChatSessionRef - Minimal reference to a chat session
*
* Used for session lists and history. Full session content is stored separately.
*/
export interface ChatSessionRef {
/** Unique session identifier */
id: string;
/** User-given or AI-generated title */
title: string;
/** Project that session belongs to */
projectId: string;
/** ISO timestamp of creation */
createdAt: string;
/** ISO timestamp of last message */
updatedAt: string;
/** Whether session is archived */
archived: boolean;
}
/**
* GlobalSettings - User preferences and state stored globally in {DATA_DIR}/settings.json
*
* This is the main settings file that persists user preferences across sessions.
* Includes theme, UI state, feature defaults, keyboard shortcuts, AI profiles, and projects.
* Format: JSON with version field for migration support.
*/
export interface GlobalSettings {
/** Version number for schema migration */
version: number;
// Theme Configuration
/** Currently selected theme */
theme: ThemeMode;
// UI State Preferences
/** Whether sidebar is currently open */
sidebarOpen: boolean;
/** Whether chat history panel is open */
chatHistoryOpen: boolean;
/** How much detail to show on kanban cards */
kanbanCardDetailLevel: KanbanCardDetailLevel;
// Feature Generation Defaults
/** Max features to generate concurrently */
maxConcurrency: number;
/** Default: skip tests during feature generation */
defaultSkipTests: boolean;
/** Default: enable dependency blocking */
enableDependencyBlocking: boolean;
/** Default: use git worktrees for feature branches */
useWorktrees: boolean;
/** Default: only show AI profiles (hide other settings) */
showProfilesOnly: boolean;
/** Default: planning approach (skip/lite/spec/full) */
defaultPlanningMode: PlanningMode;
/** Default: require manual approval before generating */
defaultRequirePlanApproval: boolean;
/** ID of currently selected AI profile (null = use built-in) */
defaultAIProfileId: string | null;
// Audio Preferences
/** Mute completion notification sound */
muteDoneSound: boolean;
// AI Model Selection
/** Which model to use for feature name/description enhancement */
enhancementModel: AgentModel;
// Input Configuration
/** User's keyboard shortcut bindings */
keyboardShortcuts: KeyboardShortcuts;
// AI Profiles
/** User-created AI profiles */
aiProfiles: AIProfile[];
// Project Management
/** List of active projects */
projects: ProjectRef[];
/** Projects in trash/recycle bin */
trashedProjects: TrashedProjectRef[];
/** History of recently opened project IDs */
projectHistory: string[];
/** Current position in project history for navigation */
projectHistoryIndex: number;
// File Browser and UI Preferences
/** Last directory opened in file picker */
lastProjectDir?: string;
/** Recently accessed folders for quick access */
recentFolders: string[];
/** Whether worktree panel is collapsed in current view */
worktreePanelCollapsed: boolean;
// Session Tracking
/** Maps project path -> last selected session ID in that project */
lastSelectedSessionByProject: Record<string, string>;
}
/**
* Credentials - API keys stored in {DATA_DIR}/credentials.json
*
* Sensitive data stored separately from general settings.
* Keys should never be exposed in UI or logs.
*/
export interface Credentials {
/** Version number for schema migration */
version: number;
/** API keys for various providers */
apiKeys: {
/** Anthropic Claude API key */
anthropic: string;
/** Google API key (for embeddings or other services) */
google: string;
/** OpenAI API key (for compatibility or alternative providers) */
openai: string;
};
}
/**
* BoardBackgroundSettings - Kanban board appearance customization
*
* Controls background images, opacity, borders, and visual effects for the board.
*/
export interface BoardBackgroundSettings {
/** Path to background image file (null = no image) */
imagePath: string | null;
/** Version/timestamp of image for cache busting */
imageVersion?: number;
/** Opacity of cards (0-1) */
cardOpacity: number;
/** Opacity of columns (0-1) */
columnOpacity: number;
/** Show border around columns */
columnBorderEnabled: boolean;
/** Apply glassmorphism effect to cards */
cardGlassmorphism: boolean;
/** Show border around cards */
cardBorderEnabled: boolean;
/** Opacity of card borders (0-1) */
cardBorderOpacity: number;
/** Hide scrollbar in board view */
hideScrollbar: boolean;
}
/**
* WorktreeInfo - Information about a git worktree
*
* Tracks worktree location, branch, and dirty state for project management.
*/
export interface WorktreeInfo {
/** Absolute path to worktree directory */
path: string;
/** Branch checked out in this worktree */
branch: string;
/** Whether this is the main worktree */
isMain: boolean;
/** Whether worktree has uncommitted changes */
hasChanges?: boolean;
/** Number of files with changes */
changedFilesCount?: number;
}
/**
* ProjectSettings - Project-specific overrides stored in {projectPath}/.automaker/settings.json
*
* Allows per-project customization without affecting global settings.
* All fields are optional - missing values fall back to global settings.
*/
export interface ProjectSettings {
/** Version number for schema migration */
version: number;
// Theme Configuration (project-specific override)
/** Project theme (undefined = use global setting) */
theme?: ThemeMode;
// Worktree Management
/** Project-specific worktree preference override */
useWorktrees?: boolean;
/** Current worktree being used in this project */
currentWorktree?: { path: string | null; branch: string };
/** List of worktrees available in this project */
worktrees?: WorktreeInfo[];
// Board Customization
/** Project-specific board background settings */
boardBackground?: BoardBackgroundSettings;
// Session Tracking
/** Last chat session selected in this project */
lastSelectedSessionId?: string;
}
/**
* Default values and constants
*/
/** Default keyboard shortcut bindings */
export const DEFAULT_KEYBOARD_SHORTCUTS: KeyboardShortcuts = {
board: 'K',
agent: 'A',
spec: 'D',
context: 'C',
settings: 'S',
profiles: 'M',
terminal: 'T',
toggleSidebar: '`',
addFeature: 'N',
addContextFile: 'N',
startNext: 'G',
newSession: 'N',
openProject: 'O',
projectPicker: 'P',
cyclePrevProject: 'Q',
cycleNextProject: 'E',
addProfile: 'N',
splitTerminalRight: 'Alt+D',
splitTerminalDown: 'Alt+S',
closeTerminal: 'Alt+W',
};
/** Default global settings used when no settings file exists */
export const DEFAULT_GLOBAL_SETTINGS: GlobalSettings = {
version: 1,
theme: 'dark',
sidebarOpen: true,
chatHistoryOpen: false,
kanbanCardDetailLevel: 'standard',
maxConcurrency: 3,
defaultSkipTests: true,
enableDependencyBlocking: true,
useWorktrees: false,
showProfilesOnly: false,
defaultPlanningMode: 'skip',
defaultRequirePlanApproval: false,
defaultAIProfileId: null,
muteDoneSound: false,
enhancementModel: 'sonnet',
keyboardShortcuts: DEFAULT_KEYBOARD_SHORTCUTS,
aiProfiles: [],
projects: [],
trashedProjects: [],
projectHistory: [],
projectHistoryIndex: -1,
lastProjectDir: undefined,
recentFolders: [],
worktreePanelCollapsed: false,
lastSelectedSessionByProject: {},
};
/** Default credentials (empty strings - user must provide API keys) */
export const DEFAULT_CREDENTIALS: Credentials = {
version: 1,
apiKeys: {
anthropic: '',
google: '',
openai: '',
},
};
/** Default project settings (empty - all settings are optional and fall back to global) */
export const DEFAULT_PROJECT_SETTINGS: ProjectSettings = {
version: 1,
};
/** Current version of the global settings schema */
export const SETTINGS_VERSION = 1;
/** Current version of the credentials schema */
export const CREDENTIALS_VERSION = 1;
/** Current version of the project settings schema */
export const PROJECT_SETTINGS_VERSION = 1;

118
libs/types/src/spec.ts Normal file
View File

@@ -0,0 +1,118 @@
/**
* App specification types
*/
/**
* TypeScript interface for structured spec output
*/
export interface SpecOutput {
project_name: string;
overview: string;
technology_stack: string[];
core_capabilities: string[];
implemented_features: Array<{
name: string;
description: string;
file_locations?: string[];
}>;
additional_requirements?: string[];
development_guidelines?: string[];
implementation_roadmap?: Array<{
phase: string;
status: 'completed' | 'in_progress' | 'pending';
description: string;
}>;
}
/**
* JSON Schema for structured spec output
* Used with Claude's structured output feature for reliable parsing
*/
export const specOutputSchema = {
type: 'object',
properties: {
project_name: {
type: 'string',
description: 'The name of the project',
},
overview: {
type: 'string',
description:
'A comprehensive description of what the project does, its purpose, and key goals',
},
technology_stack: {
type: 'array',
items: { type: 'string' },
description: 'List of all technologies, frameworks, libraries, and tools used',
},
core_capabilities: {
type: 'array',
items: { type: 'string' },
description: 'List of main features and capabilities the project provides',
},
implemented_features: {
type: 'array',
items: {
type: 'object',
properties: {
name: {
type: 'string',
description: 'Name of the implemented feature',
},
description: {
type: 'string',
description: 'Description of what the feature does',
},
file_locations: {
type: 'array',
items: { type: 'string' },
description: 'File paths where this feature is implemented',
},
},
required: ['name', 'description'],
},
description: 'Features that have been implemented based on code analysis',
},
additional_requirements: {
type: 'array',
items: { type: 'string' },
description: 'Any additional requirements or constraints',
},
development_guidelines: {
type: 'array',
items: { type: 'string' },
description: 'Development standards and practices',
},
implementation_roadmap: {
type: 'array',
items: {
type: 'object',
properties: {
phase: {
type: 'string',
description: 'Name of the implementation phase',
},
status: {
type: 'string',
enum: ['completed', 'in_progress', 'pending'],
description: 'Current status of this phase',
},
description: {
type: 'string',
description: 'Description of what this phase involves',
},
},
required: ['phase', 'status', 'description'],
},
description: 'Phases or roadmap items for implementation',
},
},
required: [
'project_name',
'overview',
'technology_stack',
'core_capabilities',
'implemented_features',
],
additionalProperties: false,
};

9
libs/types/tsconfig.json Normal file
View File

@@ -0,0 +1,9 @@
{
"extends": "../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

155
libs/utils/README.md Normal file
View File

@@ -0,0 +1,155 @@
# @automaker/utils
Shared utility functions for AutoMaker.
## Overview
This package provides common utility functions used across AutoMaker's server and UI. It includes error handling, logging, conversation utilities, image handling, and prompt building.
## Installation
```bash
npm install @automaker/utils
```
## Exports
### Logger
Structured logging with context.
```typescript
import { createLogger, LogLevel } from '@automaker/utils';
const logger = createLogger('MyComponent');
logger.info('Processing request');
logger.error('Failed to process:', error);
logger.debug('Debug information', { data });
```
### Error Handler
Error classification and user-friendly messages.
```typescript
import {
isAbortError,
isCancellationError,
isAuthenticationError,
classifyError,
getUserFriendlyErrorMessage,
} from '@automaker/utils';
try {
await operation();
} catch (error) {
if (isAbortError(error)) {
console.log('Operation was aborted');
}
const errorInfo = classifyError(error);
const message = getUserFriendlyErrorMessage(error);
}
```
### Conversation Utils
Message formatting and conversion.
```typescript
import {
extractTextFromContent,
normalizeContentBlocks,
formatHistoryAsText,
convertHistoryToMessages,
} from '@automaker/utils';
const text = extractTextFromContent(contentBlocks);
const normalized = normalizeContentBlocks(content);
const formatted = formatHistoryAsText(messages);
const converted = convertHistoryToMessages(history);
```
### Image Handler
Image processing for Claude prompts.
```typescript
import {
getMimeTypeForImage,
readImageAsBase64,
convertImagesToContentBlocks,
formatImagePathsForPrompt,
} from '@automaker/utils';
const mimeType = getMimeTypeForImage('screenshot.png');
const base64 = await readImageAsBase64('/path/to/image.jpg');
const blocks = await convertImagesToContentBlocks(imagePaths, basePath);
const formatted = formatImagePathsForPrompt(imagePaths);
```
### Prompt Builder
Build prompts with images for Claude.
```typescript
import { buildPromptWithImages } from '@automaker/utils';
const result = await buildPromptWithImages({
basePrompt: 'Analyze this screenshot',
imagePaths: ['/path/to/screenshot.png'],
basePath: '/project/path',
});
console.log(result.prompt); // Prompt with image references
console.log(result.images); // Image data for Claude
```
### File System Utils
Common file system operations.
```typescript
import { ensureDir, fileExists, readJsonFile, writeJsonFile } from '@automaker/utils';
await ensureDir('/path/to/dir');
const exists = await fileExists('/path/to/file');
const data = await readJsonFile('/config.json');
await writeJsonFile('/config.json', data);
```
## Usage Example
```typescript
import { createLogger, classifyError, buildPromptWithImages } from '@automaker/utils';
const logger = createLogger('FeatureExecutor');
async function executeWithImages(prompt: string, images: string[]) {
try {
logger.info('Building prompt with images');
const result = await buildPromptWithImages({
basePrompt: prompt,
imagePaths: images,
basePath: process.cwd(),
});
logger.debug('Prompt built successfully', { imageCount: result.images.length });
return result;
} catch (error) {
const errorInfo = classifyError(error);
logger.error('Failed to build prompt:', errorInfo.message);
throw error;
}
}
```
## Dependencies
- `@automaker/types` - Type definitions
## Used By
- `@automaker/server`
- `@automaker/ui`

29
libs/utils/package.json Normal file
View File

@@ -0,0 +1,29 @@
{
"name": "@automaker/utils",
"version": "1.0.0",
"type": "module",
"description": "Shared utility functions for AutoMaker",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"watch": "tsc --watch",
"test": "vitest run",
"test:watch": "vitest"
},
"keywords": [
"automaker",
"utils"
],
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
"dependencies": {
"@automaker/platform": "^1.0.0",
"@automaker/types": "^1.0.0"
},
"devDependencies": {
"@types/node": "^22.10.5",
"typescript": "^5.7.3",
"vitest": "^4.0.16"
}
}

View File

@@ -0,0 +1,95 @@
/**
* Conversation history utilities for processing message history
*
* Provides standardized conversation history handling:
* - Extract text from content (string or array format)
* - Normalize content blocks to array format
* - Format history as plain text for CLI-based providers
* - Convert history to Claude SDK message format
*/
import type { ConversationMessage } from '@automaker/types';
/**
* Extract plain text from message content (handles both string and array formats)
*
* @param content - Message content (string or array of content blocks)
* @returns Extracted text content
*/
export function extractTextFromContent(
content: string | Array<{ type: string; text?: string; source?: object }>
): string {
if (typeof content === 'string') {
return content;
}
// Extract text blocks only
return content
.filter((block) => block.type === 'text')
.map((block) => block.text || '')
.join('\n');
}
/**
* Normalize message content to array format
*
* @param content - Message content (string or array)
* @returns Content as array of blocks
*/
export function normalizeContentBlocks(
content: string | Array<{ type: string; text?: string; source?: object }>
): Array<{ type: string; text?: string; source?: object }> {
if (Array.isArray(content)) {
return content;
}
return [{ type: 'text', text: content }];
}
/**
* Format conversation history as plain text for CLI-based providers
*
* @param history - Array of conversation messages
* @returns Formatted text with role labels
*/
export function formatHistoryAsText(history: ConversationMessage[]): string {
if (history.length === 0) {
return '';
}
let historyText = 'Previous conversation:\n\n';
for (const msg of history) {
const contentText = extractTextFromContent(msg.content);
const role = msg.role === 'user' ? 'User' : 'Assistant';
historyText += `${role}: ${contentText}\n\n`;
}
historyText += '---\n\n';
return historyText;
}
/**
* Convert conversation history to Claude SDK message format
*
* @param history - Array of conversation messages
* @returns Array of Claude SDK formatted messages
*/
export function convertHistoryToMessages(history: ConversationMessage[]): Array<{
type: 'user' | 'assistant';
session_id: string;
message: {
role: 'user' | 'assistant';
content: Array<{ type: string; text?: string; source?: object }>;
};
parent_tool_use_id: null;
}> {
return history.map((historyMsg) => ({
type: historyMsg.role,
session_id: '',
message: {
role: historyMsg.role,
content: normalizeContentBlocks(historyMsg.content),
},
parent_tool_use_id: null,
}));
}

View File

@@ -0,0 +1,131 @@
/**
* Error handling utilities for standardized error classification
*
* Provides utilities for:
* - Detecting abort/cancellation errors
* - Detecting authentication errors
* - Classifying errors by type
* - Generating user-friendly error messages
*/
import type { ErrorType, ErrorInfo } from '@automaker/types';
/**
* Check if an error is an abort/cancellation error
*
* @param error - The error to check
* @returns True if the error is an abort error
*/
export function isAbortError(error: unknown): boolean {
return error instanceof Error && (error.name === 'AbortError' || error.message.includes('abort'));
}
/**
* Check if an error is a user-initiated cancellation
*
* @param errorMessage - The error message to check
* @returns True if the error is a user-initiated cancellation
*/
export function isCancellationError(errorMessage: string): boolean {
const lowerMessage = errorMessage.toLowerCase();
return (
lowerMessage.includes('cancelled') ||
lowerMessage.includes('canceled') ||
lowerMessage.includes('stopped') ||
lowerMessage.includes('aborted')
);
}
/**
* Check if an error is an authentication/API key error
*
* @param errorMessage - The error message to check
* @returns True if the error is authentication-related
*/
export function isAuthenticationError(errorMessage: string): boolean {
return (
errorMessage.includes('Authentication failed') ||
errorMessage.includes('Invalid API key') ||
errorMessage.includes('authentication_failed') ||
errorMessage.includes('Fix external API key')
);
}
/**
* Classify an error into a specific type
*
* @param error - The error to classify
* @returns Classified error information
*/
export function classifyError(error: unknown): ErrorInfo {
const message = error instanceof Error ? error.message : String(error || 'Unknown error');
const isAbort = isAbortError(error);
const isAuth = isAuthenticationError(message);
const isCancellation = isCancellationError(message);
let type: ErrorType;
if (isAuth) {
type = 'authentication';
} else if (isAbort) {
type = 'abort';
} else if (isCancellation) {
type = 'cancellation';
} else if (error instanceof Error) {
type = 'execution';
} else {
type = 'unknown';
}
return {
type,
message,
isAbort,
isAuth,
isCancellation,
originalError: error,
};
}
/**
* Get a user-friendly error message
*
* @param error - The error to convert
* @returns User-friendly error message
*/
export function getUserFriendlyErrorMessage(error: unknown): string {
const info = classifyError(error);
if (info.isAbort) {
return 'Operation was cancelled';
}
if (info.isAuth) {
return 'Authentication failed. Please check your API key.';
}
return info.message;
}
/**
* Extract error message from an unknown error value
*
* Simple utility for getting a string error message from any error type.
* Returns the error's message property if it's an Error, otherwise
* converts to string. Used throughout the codebase for consistent
* error message extraction.
*
* @param error - The error value (Error object, string, or unknown)
* @returns Error message string
*
* @example
* ```typescript
* try {
* throw new Error("Something went wrong");
* } catch (error) {
* const message = getErrorMessage(error); // "Something went wrong"
* }
* ```
*/
export function getErrorMessage(error: unknown): string {
return error instanceof Error ? error.message : 'Unknown error';
}

View File

@@ -0,0 +1,67 @@
/**
* File system utilities that handle symlinks safely
*/
import { secureFs } from '@automaker/platform';
import path from 'path';
/**
* Create a directory, handling symlinks safely to avoid ELOOP errors.
* If the path already exists as a directory or symlink, returns success.
*/
export async function mkdirSafe(dirPath: string): Promise<void> {
const resolvedPath = path.resolve(dirPath);
// Check if path already exists using lstat (doesn't follow symlinks)
try {
const stats = await secureFs.lstat(resolvedPath);
// Path exists - if it's a directory or symlink, consider it success
if (stats.isDirectory() || stats.isSymbolicLink()) {
return;
}
// It's a file - can't create directory
throw new Error(`Path exists and is not a directory: ${resolvedPath}`);
} catch (error: any) {
// ENOENT means path doesn't exist - we should create it
if (error.code !== 'ENOENT') {
// Some other error (could be ELOOP in parent path)
// If it's ELOOP, the path involves symlinks - don't try to create
if (error.code === 'ELOOP') {
console.warn(`[fs-utils] Symlink loop detected at ${resolvedPath}, skipping mkdir`);
return;
}
throw error;
}
}
// Path doesn't exist, create it
try {
await secureFs.mkdir(resolvedPath, { recursive: true });
} catch (error: any) {
// Handle race conditions and symlink issues
if (error.code === 'EEXIST' || error.code === 'ELOOP') {
return;
}
throw error;
}
}
/**
* Check if a path exists, handling symlinks safely.
* Returns true if the path exists as a file, directory, or symlink.
*/
export async function existsSafe(filePath: string): Promise<boolean> {
try {
await secureFs.lstat(filePath);
return true;
} catch (error: any) {
if (error.code === 'ENOENT') {
return false;
}
// ELOOP or other errors - path exists but is problematic
if (error.code === 'ELOOP') {
return true; // Symlink exists, even if looping
}
throw error;
}
}

View File

@@ -0,0 +1,113 @@
/**
* Image handling utilities for processing image files
*
* Provides utilities for:
* - MIME type detection based on file extensions
* - Base64 encoding of image files
* - Content block generation for Claude SDK format
* - Path resolution (relative/absolute)
*/
import { secureFs } from '@automaker/platform';
import path from 'path';
import type { ImageData, ImageContentBlock } from '@automaker/types';
/**
* MIME type mapping for image file extensions
*/
const IMAGE_MIME_TYPES: Record<string, string> = {
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.png': 'image/png',
'.gif': 'image/gif',
'.webp': 'image/webp',
} as const;
/**
* Get MIME type for an image file based on extension
*
* @param imagePath - Path to the image file
* @returns MIME type string (defaults to "image/png" for unknown extensions)
*/
export function getMimeTypeForImage(imagePath: string): string {
const ext = path.extname(imagePath).toLowerCase();
return IMAGE_MIME_TYPES[ext] || 'image/png';
}
/**
* Read an image file and convert to base64 with metadata
*
* @param imagePath - Path to the image file
* @returns Promise resolving to image data with base64 encoding
* @throws Error if file cannot be read
*/
export async function readImageAsBase64(imagePath: string): Promise<ImageData> {
const imageBuffer = (await secureFs.readFile(imagePath)) as Buffer;
const base64Data = imageBuffer.toString('base64');
const mimeType = getMimeTypeForImage(imagePath);
return {
base64: base64Data,
mimeType,
filename: path.basename(imagePath),
originalPath: imagePath,
};
}
/**
* Convert image paths to content blocks (Claude SDK format)
* Handles both relative and absolute paths
*
* @param imagePaths - Array of image file paths
* @param workDir - Optional working directory for resolving relative paths
* @returns Promise resolving to array of image content blocks
*/
export async function convertImagesToContentBlocks(
imagePaths: string[],
workDir?: string
): Promise<ImageContentBlock[]> {
const blocks: ImageContentBlock[] = [];
for (const imagePath of imagePaths) {
try {
// Resolve to absolute path if needed
const absolutePath =
workDir && !path.isAbsolute(imagePath) ? path.join(workDir, imagePath) : imagePath;
const imageData = await readImageAsBase64(absolutePath);
blocks.push({
type: 'image',
source: {
type: 'base64',
media_type: imageData.mimeType,
data: imageData.base64,
},
});
} catch (error) {
console.error(`[ImageHandler] Failed to load image ${imagePath}:`, error);
// Continue processing other images
}
}
return blocks;
}
/**
* Build a list of image paths for text prompts
* Formats image paths as a bulleted list for inclusion in text prompts
*
* @param imagePaths - Array of image file paths
* @returns Formatted string with image paths, or empty string if no images
*/
export function formatImagePathsForPrompt(imagePaths: string[]): string {
if (imagePaths.length === 0) {
return '';
}
let text = '\n\nAttached images:\n';
for (const imagePath of imagePaths) {
text += `- ${imagePath}\n`;
}
return text;
}

46
libs/utils/src/index.ts Normal file
View File

@@ -0,0 +1,46 @@
/**
* @automaker/utils
* Shared utility functions for AutoMaker
*/
// Error handling
export {
isAbortError,
isCancellationError,
isAuthenticationError,
classifyError,
getUserFriendlyErrorMessage,
getErrorMessage,
} from './error-handler.js';
// Conversation utilities
export {
extractTextFromContent,
normalizeContentBlocks,
formatHistoryAsText,
convertHistoryToMessages,
} from './conversation-utils.js';
// Image handling
export {
getMimeTypeForImage,
readImageAsBase64,
convertImagesToContentBlocks,
formatImagePathsForPrompt,
} from './image-handler.js';
// Prompt building
export {
buildPromptWithImages,
type PromptContent,
type PromptWithImages,
} from './prompt-builder.js';
// Logger
export { createLogger, getLogLevel, setLogLevel, LogLevel } from './logger.js';
// File system utilities
export { mkdirSafe, existsSafe } from './fs-utils.js';
// Path utilities
export { normalizePath, pathsEqual } from './path-utils.js';

74
libs/utils/src/logger.ts Normal file
View File

@@ -0,0 +1,74 @@
/**
* Simple logger utility with log levels
* Configure via LOG_LEVEL environment variable: error, warn, info, debug
* Defaults to 'info' if not set
*/
export enum LogLevel {
ERROR = 0,
WARN = 1,
INFO = 2,
DEBUG = 3,
}
const LOG_LEVEL_NAMES: Record<string, LogLevel> = {
error: LogLevel.ERROR,
warn: LogLevel.WARN,
info: LogLevel.INFO,
debug: LogLevel.DEBUG,
};
let currentLogLevel: LogLevel = LogLevel.INFO;
// Initialize log level from environment variable
const envLogLevel = process.env.LOG_LEVEL?.toLowerCase();
if (envLogLevel && LOG_LEVEL_NAMES[envLogLevel] !== undefined) {
currentLogLevel = LOG_LEVEL_NAMES[envLogLevel];
}
/**
* Create a logger instance with a context prefix
*/
export function createLogger(context: string) {
const prefix = `[${context}]`;
return {
error: (...args: unknown[]): void => {
if (currentLogLevel >= LogLevel.ERROR) {
console.error(prefix, ...args);
}
},
warn: (...args: unknown[]): void => {
if (currentLogLevel >= LogLevel.WARN) {
console.warn(prefix, ...args);
}
},
info: (...args: unknown[]): void => {
if (currentLogLevel >= LogLevel.INFO) {
console.log(prefix, ...args);
}
},
debug: (...args: unknown[]): void => {
if (currentLogLevel >= LogLevel.DEBUG) {
console.log(prefix, '[DEBUG]', ...args);
}
},
};
}
/**
* Get the current log level
*/
export function getLogLevel(): LogLevel {
return currentLogLevel;
}
/**
* Set the log level programmatically (useful for testing)
*/
export function setLogLevel(level: LogLevel): void {
currentLogLevel = level;
}

View File

@@ -0,0 +1,51 @@
/**
* Path Utilities - Cross-platform path manipulation helpers
*
* Provides functions for normalizing and comparing file system paths
* across different operating systems (Windows, macOS, Linux).
*/
/**
* Normalize a path by converting backslashes to forward slashes
*
* This ensures consistent path representation across platforms:
* - Windows: C:\Users\foo\bar -> C:/Users/foo/bar
* - Unix: /home/foo/bar -> /home/foo/bar (unchanged)
*
* @param p - Path string to normalize
* @returns Normalized path with forward slashes
*
* @example
* ```typescript
* normalizePath("C:\\Users\\foo\\bar"); // "C:/Users/foo/bar"
* normalizePath("/home/foo/bar"); // "/home/foo/bar"
* ```
*/
export function normalizePath(p: string): string {
return p.replace(/\\/g, '/');
}
/**
* Compare two paths for equality after normalization
*
* Handles null/undefined values and normalizes paths before comparison.
* Useful for checking if two paths refer to the same location regardless
* of platform-specific path separators.
*
* @param p1 - First path to compare (or null/undefined)
* @param p2 - Second path to compare (or null/undefined)
* @returns true if paths are equal (or both null/undefined), false otherwise
*
* @example
* ```typescript
* pathsEqual("C:\\foo\\bar", "C:/foo/bar"); // true
* pathsEqual("/home/user", "/home/user"); // true
* pathsEqual("/home/user", "/home/other"); // false
* pathsEqual(null, undefined); // false
* pathsEqual(null, null); // true
* ```
*/
export function pathsEqual(p1: string | undefined | null, p2: string | undefined | null): boolean {
if (!p1 || !p2) return p1 === p2;
return normalizePath(p1) === normalizePath(p2);
}

View File

@@ -0,0 +1,79 @@
/**
* Prompt building utilities for constructing prompts with images
*
* Provides standardized prompt building that:
* - Combines text prompts with image attachments
* - Handles content block array generation
* - Optionally includes image paths in text
* - Supports both vision and non-vision models
*/
import { convertImagesToContentBlocks, formatImagePathsForPrompt } from './image-handler.js';
/**
* Content that can be either simple text or structured blocks
*/
export type PromptContent =
| string
| Array<{
type: string;
text?: string;
source?: object;
}>;
/**
* Result of building a prompt with optional images
*/
export interface PromptWithImages {
content: PromptContent;
hasImages: boolean;
}
/**
* Build a prompt with optional image attachments
*
* @param basePrompt - The text prompt
* @param imagePaths - Optional array of image file paths
* @param workDir - Optional working directory for resolving relative paths
* @param includeImagePaths - Whether to append image paths to the text (default: false)
* @returns Promise resolving to prompt content and metadata
*/
export async function buildPromptWithImages(
basePrompt: string,
imagePaths?: string[],
workDir?: string,
includeImagePaths: boolean = false
): Promise<PromptWithImages> {
// No images - return plain text
if (!imagePaths || imagePaths.length === 0) {
return { content: basePrompt, hasImages: false };
}
// Build text content with optional image path listing
let textContent = basePrompt;
if (includeImagePaths) {
textContent += formatImagePathsForPrompt(imagePaths);
}
// Build content blocks array
const contentBlocks: Array<{
type: string;
text?: string;
source?: object;
}> = [];
// Add text block if we have text
if (textContent.trim()) {
contentBlocks.push({ type: 'text', text: textContent });
}
// Add image blocks
const imageBlocks = await convertImagesToContentBlocks(imagePaths, workDir);
contentBlocks.push(...imageBlocks);
// Return appropriate format
const content: PromptContent =
contentBlocks.length > 1 || contentBlocks[0]?.type === 'image' ? contentBlocks : textContent;
return { content, hasImages: true };
}

View File

@@ -0,0 +1,241 @@
import { describe, it, expect } from 'vitest';
import type { ConversationMessage } from '@automaker/types';
import {
extractTextFromContent,
normalizeContentBlocks,
formatHistoryAsText,
convertHistoryToMessages,
} from '../src/conversation-utils';
describe('conversation-utils.ts', () => {
describe('extractTextFromContent', () => {
it('should extract text from string content', () => {
const content = 'Hello, world!';
const result = extractTextFromContent(content);
expect(result).toBe('Hello, world!');
});
it('should extract text from array content with text blocks', () => {
const content = [
{ type: 'text', text: 'First block' },
{ type: 'text', text: 'Second block' },
];
const result = extractTextFromContent(content);
expect(result).toBe('First block\nSecond block');
});
it('should filter out non-text blocks', () => {
const content = [
{ type: 'text', text: 'Text block' },
{ type: 'image', source: { data: '...' } },
{ type: 'text', text: 'Another text' },
];
const result = extractTextFromContent(content);
expect(result).toBe('Text block\nAnother text');
});
it('should handle empty text blocks', () => {
const content = [
{ type: 'text', text: 'First' },
{ type: 'text' },
{ type: 'text', text: 'Third' },
];
const result = extractTextFromContent(content);
expect(result).toBe('First\n\nThird');
});
it('should return empty string for array with only non-text blocks', () => {
const content = [
{ type: 'image', source: {} },
{ type: 'tool_use', source: {} },
];
const result = extractTextFromContent(content);
expect(result).toBe('');
});
it('should return empty string for empty array', () => {
const content: Array<{ type: string; text?: string }> = [];
const result = extractTextFromContent(content);
expect(result).toBe('');
});
});
describe('normalizeContentBlocks', () => {
it('should convert string to array of text blocks', () => {
const content = 'Simple text';
const result = normalizeContentBlocks(content);
expect(result).toEqual([{ type: 'text', text: 'Simple text' }]);
});
it('should return array as-is', () => {
const content = [
{ type: 'text', text: 'First' },
{ type: 'image', source: {} },
];
const result = normalizeContentBlocks(content);
expect(result).toBe(content);
expect(result).toEqual(content);
});
it('should handle empty string', () => {
const content = '';
const result = normalizeContentBlocks(content);
expect(result).toEqual([{ type: 'text', text: '' }]);
});
it('should handle multiline string', () => {
const content = 'Line 1\nLine 2\nLine 3';
const result = normalizeContentBlocks(content);
expect(result).toEqual([{ type: 'text', text: 'Line 1\nLine 2\nLine 3' }]);
});
});
describe('formatHistoryAsText', () => {
it('should format empty history as empty string', () => {
const history: ConversationMessage[] = [];
const result = formatHistoryAsText(history);
expect(result).toBe('');
});
it('should format single user message', () => {
const history: ConversationMessage[] = [{ role: 'user', content: 'Hello!' }];
const result = formatHistoryAsText(history);
expect(result).toBe('Previous conversation:\n\nUser: Hello!\n\n---\n\n');
});
it('should format single assistant message', () => {
const history: ConversationMessage[] = [{ role: 'assistant', content: 'Hi there!' }];
const result = formatHistoryAsText(history);
expect(result).toBe('Previous conversation:\n\nAssistant: Hi there!\n\n---\n\n');
});
it('should format conversation with multiple messages', () => {
const history: ConversationMessage[] = [
{ role: 'user', content: "What's 2+2?" },
{ role: 'assistant', content: 'The answer is 4.' },
{ role: 'user', content: 'Thanks!' },
];
const result = formatHistoryAsText(history);
expect(result).toBe(
'Previous conversation:\n\n' +
"User: What's 2+2?\n\n" +
'Assistant: The answer is 4.\n\n' +
'User: Thanks!\n\n' +
'---\n\n'
);
});
it('should handle array content by extracting text', () => {
const history: ConversationMessage[] = [
{
role: 'user',
content: [
{ type: 'text', text: 'First part' },
{ type: 'text', text: 'Second part' },
],
},
];
const result = formatHistoryAsText(history);
expect(result).toBe('Previous conversation:\n\nUser: First part\nSecond part\n\n---\n\n');
});
it('should handle mixed string and array content', () => {
const history: ConversationMessage[] = [
{ role: 'user', content: 'String message' },
{
role: 'assistant',
content: [{ type: 'text', text: 'Array message' }],
},
];
const result = formatHistoryAsText(history);
expect(result).toContain('User: String message');
expect(result).toContain('Assistant: Array message');
});
});
describe('convertHistoryToMessages', () => {
it('should convert empty history', () => {
const history: ConversationMessage[] = [];
const result = convertHistoryToMessages(history);
expect(result).toEqual([]);
});
it('should convert single user message', () => {
const history: ConversationMessage[] = [{ role: 'user', content: 'Hello!' }];
const result = convertHistoryToMessages(history);
expect(result).toHaveLength(1);
expect(result[0]).toMatchObject({
type: 'user',
session_id: '',
message: {
role: 'user',
content: [{ type: 'text', text: 'Hello!' }],
},
parent_tool_use_id: null,
});
});
it('should convert single assistant message', () => {
const history: ConversationMessage[] = [{ role: 'assistant', content: 'Hi there!' }];
const result = convertHistoryToMessages(history);
expect(result).toHaveLength(1);
expect(result[0]).toMatchObject({
type: 'assistant',
session_id: '',
message: {
role: 'assistant',
content: [{ type: 'text', text: 'Hi there!' }],
},
parent_tool_use_id: null,
});
});
it('should preserve array content as-is', () => {
const content = [
{ type: 'text', text: 'Text' },
{ type: 'image', source: { data: '...' } },
];
const history: ConversationMessage[] = [{ role: 'user', content }];
const result = convertHistoryToMessages(history);
expect(result[0].message.content).toEqual(content);
});
it('should convert multiple messages', () => {
const history: ConversationMessage[] = [
{ role: 'user', content: 'First' },
{ role: 'assistant', content: 'Second' },
{ role: 'user', content: 'Third' },
];
const result = convertHistoryToMessages(history);
expect(result).toHaveLength(3);
expect(result[0].type).toBe('user');
expect(result[1].type).toBe('assistant');
expect(result[2].type).toBe('user');
});
it('should set session_id to empty string', () => {
const history: ConversationMessage[] = [{ role: 'user', content: 'Test' }];
const result = convertHistoryToMessages(history);
expect(result[0].session_id).toBe('');
});
it('should set parent_tool_use_id to null', () => {
const history: ConversationMessage[] = [{ role: 'user', content: 'Test' }];
const result = convertHistoryToMessages(history);
expect(result[0].parent_tool_use_id).toBeNull();
});
it('should normalize string content to blocks', () => {
const history: ConversationMessage[] = [{ role: 'user', content: 'String content' }];
const result = convertHistoryToMessages(history);
expect(result[0].message.content).toEqual([{ type: 'text', text: 'String content' }]);
});
});
});

View File

@@ -0,0 +1,261 @@
import { describe, it, expect } from 'vitest';
import {
isAbortError,
isCancellationError,
isAuthenticationError,
classifyError,
getUserFriendlyErrorMessage,
} from '../src/error-handler';
describe('error-handler.ts', () => {
describe('isAbortError', () => {
it("should return true for Error with name 'AbortError'", () => {
const error = new Error('Operation aborted');
error.name = 'AbortError';
expect(isAbortError(error)).toBe(true);
});
it("should return true for Error with message containing 'abort'", () => {
const error = new Error('Request was aborted');
expect(isAbortError(error)).toBe(true);
});
it('should return false for regular Error', () => {
const error = new Error('Something went wrong');
expect(isAbortError(error)).toBe(false);
});
it('should return false for non-Error values', () => {
expect(isAbortError('abort')).toBe(false);
expect(isAbortError(null)).toBe(false);
expect(isAbortError(undefined)).toBe(false);
expect(isAbortError({})).toBe(false);
});
it('should handle Error with both AbortError name and abort message', () => {
const error = new Error('abort');
error.name = 'AbortError';
expect(isAbortError(error)).toBe(true);
});
});
describe('isCancellationError', () => {
it("should return true for 'cancelled' message", () => {
expect(isCancellationError('Operation cancelled')).toBe(true);
expect(isCancellationError('CANCELLED')).toBe(true);
});
it("should return true for 'canceled' message (US spelling)", () => {
expect(isCancellationError('Operation canceled')).toBe(true);
expect(isCancellationError('CANCELED')).toBe(true);
});
it("should return true for 'stopped' message", () => {
expect(isCancellationError('Process stopped')).toBe(true);
expect(isCancellationError('STOPPED')).toBe(true);
});
it("should return true for 'aborted' message", () => {
expect(isCancellationError('Request aborted')).toBe(true);
expect(isCancellationError('ABORTED')).toBe(true);
});
it('should return false for non-cancellation messages', () => {
expect(isCancellationError('Something went wrong')).toBe(false);
expect(isCancellationError('Error occurred')).toBe(false);
expect(isCancellationError('')).toBe(false);
});
it('should be case-insensitive', () => {
expect(isCancellationError('CaNcElLeD')).toBe(true);
expect(isCancellationError('StOpPeD')).toBe(true);
});
});
describe('isAuthenticationError', () => {
it("should return true for 'Authentication failed' message", () => {
expect(isAuthenticationError('Authentication failed')).toBe(true);
});
it("should return true for 'Invalid API key' message", () => {
expect(isAuthenticationError('Invalid API key provided')).toBe(true);
});
it("should return true for 'authentication_failed' message", () => {
expect(isAuthenticationError('Error: authentication_failed')).toBe(true);
});
it("should return true for 'Fix external API key' message", () => {
expect(isAuthenticationError('Fix external API key configuration')).toBe(true);
});
it('should return false for non-authentication errors', () => {
expect(isAuthenticationError('Something went wrong')).toBe(false);
expect(isAuthenticationError('Network error')).toBe(false);
expect(isAuthenticationError('')).toBe(false);
});
it('should be case-sensitive', () => {
expect(isAuthenticationError('authentication failed')).toBe(false);
expect(isAuthenticationError('AUTHENTICATION FAILED')).toBe(false);
});
});
describe('classifyError', () => {
it('should classify authentication errors', () => {
const error = new Error('Authentication failed');
const result = classifyError(error);
expect(result.type).toBe('authentication');
expect(result.isAuth).toBe(true);
expect(result.isAbort).toBe(false);
expect(result.isCancellation).toBe(false);
expect(result.message).toBe('Authentication failed');
expect(result.originalError).toBe(error);
});
it('should classify abort errors', () => {
const error = new Error('aborted');
const result = classifyError(error);
expect(result.type).toBe('abort');
expect(result.isAbort).toBe(true);
expect(result.isAuth).toBe(false);
expect(result.message).toBe('aborted');
});
it('should classify AbortError by name', () => {
const error = new Error('Request cancelled');
error.name = 'AbortError';
const result = classifyError(error);
expect(result.type).toBe('abort');
expect(result.isAbort).toBe(true);
});
it('should classify cancellation errors', () => {
const error = new Error('Operation cancelled');
const result = classifyError(error);
expect(result.type).toBe('cancellation');
expect(result.isCancellation).toBe(true);
expect(result.isAbort).toBe(false);
});
it('should classify execution errors (regular Error)', () => {
const error = new Error('Something went wrong');
const result = classifyError(error);
expect(result.type).toBe('execution');
expect(result.isAuth).toBe(false);
expect(result.isAbort).toBe(false);
expect(result.isCancellation).toBe(false);
});
it('should classify unknown errors (non-Error)', () => {
const result = classifyError('string error');
expect(result.type).toBe('unknown');
expect(result.message).toBe('string error');
});
it('should handle null/undefined errors', () => {
const result1 = classifyError(null);
expect(result1.type).toBe('unknown');
expect(result1.message).toBe('Unknown error');
const result2 = classifyError(undefined);
expect(result2.type).toBe('unknown');
expect(result2.message).toBe('Unknown error');
});
it('should prioritize authentication over abort', () => {
const error = new Error('Authentication failed - aborted');
const result = classifyError(error);
expect(result.type).toBe('authentication');
expect(result.isAuth).toBe(true);
expect(result.isAbort).toBe(true); // Both flags can be true
});
it('should prioritize abort over cancellation', () => {
const error = new Error('Request cancelled');
error.name = 'AbortError';
const result = classifyError(error);
expect(result.type).toBe('abort');
expect(result.isAbort).toBe(true);
expect(result.isCancellation).toBe(true); // Both flags can be true
});
it('should convert object errors to string', () => {
const result = classifyError({ code: 500, message: 'Server error' });
expect(result.message).toContain('Object');
});
it('should convert number errors to string', () => {
const result = classifyError(404);
expect(result.message).toBe('404');
expect(result.type).toBe('unknown');
});
});
describe('getUserFriendlyErrorMessage', () => {
it('should return friendly message for abort errors', () => {
const error = new Error('abort');
const message = getUserFriendlyErrorMessage(error);
expect(message).toBe('Operation was cancelled');
});
it('should return friendly message for AbortError by name', () => {
const error = new Error('Something');
error.name = 'AbortError';
const message = getUserFriendlyErrorMessage(error);
expect(message).toBe('Operation was cancelled');
});
it('should return friendly message for authentication errors', () => {
const error = new Error('Authentication failed');
const message = getUserFriendlyErrorMessage(error);
expect(message).toBe('Authentication failed. Please check your API key.');
});
it('should prioritize abort message over auth', () => {
const error = new Error('Authentication failed - abort');
const message = getUserFriendlyErrorMessage(error);
// Auth is checked first in classifyError, but abort check happens before auth in getUserFriendlyErrorMessage
expect(message).toBe('Operation was cancelled');
});
it('should return original message for other errors', () => {
const error = new Error('Network timeout');
const message = getUserFriendlyErrorMessage(error);
expect(message).toBe('Network timeout');
});
it('should handle non-Error values', () => {
expect(getUserFriendlyErrorMessage('string error')).toBe('string error');
expect(getUserFriendlyErrorMessage(null)).toBe('Unknown error');
expect(getUserFriendlyErrorMessage(undefined)).toBe('Unknown error');
});
it('should return original message for cancellation errors', () => {
const error = new Error('Operation cancelled by user');
const message = getUserFriendlyErrorMessage(error);
expect(message).toBe('Operation cancelled by user');
});
it('should handle Error without message', () => {
const error = new Error();
const message = getUserFriendlyErrorMessage(error);
expect(message).toBe('');
});
});
});

View File

@@ -0,0 +1,246 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import fs from 'fs/promises';
import path from 'path';
import os from 'os';
import { mkdirSafe, existsSafe } from '../src/fs-utils';
describe('fs-utils.ts', () => {
let tempDir: string;
beforeEach(async () => {
// Create a temporary directory for testing
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fs-utils-test-'));
});
afterEach(async () => {
// Clean up temporary directory
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe('mkdirSafe', () => {
it('should create a new directory', async () => {
const newDir = path.join(tempDir, 'new-directory');
await mkdirSafe(newDir);
const stats = await fs.stat(newDir);
expect(stats.isDirectory()).toBe(true);
});
it('should create nested directories recursively', async () => {
const nestedDir = path.join(tempDir, 'level1', 'level2', 'level3');
await mkdirSafe(nestedDir);
const stats = await fs.stat(nestedDir);
expect(stats.isDirectory()).toBe(true);
});
it('should succeed when directory already exists', async () => {
const existingDir = path.join(tempDir, 'existing');
await fs.mkdir(existingDir);
await expect(mkdirSafe(existingDir)).resolves.not.toThrow();
});
it('should succeed when path is a symlink to a directory', async () => {
const targetDir = path.join(tempDir, 'target');
const symlinkPath = path.join(tempDir, 'symlink');
await fs.mkdir(targetDir);
await fs.symlink(targetDir, symlinkPath, 'dir');
await expect(mkdirSafe(symlinkPath)).resolves.not.toThrow();
});
it('should throw when path exists as a file', async () => {
const filePath = path.join(tempDir, 'existing-file.txt');
await fs.writeFile(filePath, 'content');
await expect(mkdirSafe(filePath)).rejects.toThrow('Path exists and is not a directory');
});
it('should resolve relative paths', async () => {
const originalCwd = process.cwd();
try {
process.chdir(tempDir);
await mkdirSafe('relative-dir');
const stats = await fs.stat(path.join(tempDir, 'relative-dir'));
expect(stats.isDirectory()).toBe(true);
} finally {
process.chdir(originalCwd);
}
});
it('should handle concurrent creation gracefully', async () => {
const newDir = path.join(tempDir, 'concurrent');
const promises = [mkdirSafe(newDir), mkdirSafe(newDir), mkdirSafe(newDir)];
await expect(Promise.all(promises)).resolves.not.toThrow();
const stats = await fs.stat(newDir);
expect(stats.isDirectory()).toBe(true);
});
it('should handle paths with special characters', async () => {
const specialDir = path.join(tempDir, 'dir with spaces & special-chars');
await mkdirSafe(specialDir);
const stats = await fs.stat(specialDir);
expect(stats.isDirectory()).toBe(true);
});
});
describe('existsSafe', () => {
it('should return true for existing directory', async () => {
const existingDir = path.join(tempDir, 'exists');
await fs.mkdir(existingDir);
const result = await existsSafe(existingDir);
expect(result).toBe(true);
});
it('should return true for existing file', async () => {
const filePath = path.join(tempDir, 'file.txt');
await fs.writeFile(filePath, 'content');
const result = await existsSafe(filePath);
expect(result).toBe(true);
});
it('should return false for non-existent path', async () => {
const nonExistent = path.join(tempDir, 'does-not-exist');
const result = await existsSafe(nonExistent);
expect(result).toBe(false);
});
it('should return true for symlink', async () => {
const target = path.join(tempDir, 'target.txt');
const symlink = path.join(tempDir, 'link.txt');
await fs.writeFile(target, 'content');
await fs.symlink(target, symlink);
const result = await existsSafe(symlink);
expect(result).toBe(true);
});
it('should return true for broken symlink', async () => {
const symlink = path.join(tempDir, 'broken-link');
// Create symlink to non-existent target
await fs.symlink('/non/existent/path', symlink);
const result = await existsSafe(symlink);
// lstat succeeds on broken symlinks
expect(result).toBe(true);
});
it('should handle relative paths', async () => {
const originalCwd = process.cwd();
try {
process.chdir(tempDir);
await fs.writeFile('test.txt', 'content');
const result = await existsSafe('test.txt');
expect(result).toBe(true);
} finally {
process.chdir(originalCwd);
}
});
it('should handle paths with special characters', async () => {
const specialFile = path.join(tempDir, 'file with spaces & chars.txt');
await fs.writeFile(specialFile, 'content');
const result = await existsSafe(specialFile);
expect(result).toBe(true);
});
it('should return false for parent of non-existent nested path', async () => {
const nonExistent = path.join(tempDir, 'does', 'not', 'exist');
const result = await existsSafe(nonExistent);
expect(result).toBe(false);
});
});
describe('Error handling', () => {
it('should handle permission errors in mkdirSafe', async () => {
// Skip on Windows where permissions work differently
if (process.platform === 'win32') {
return;
}
const restrictedDir = path.join(tempDir, 'restricted');
await fs.mkdir(restrictedDir);
// Make directory read-only
await fs.chmod(restrictedDir, 0o444);
const newDir = path.join(restrictedDir, 'new');
try {
await expect(mkdirSafe(newDir)).rejects.toThrow();
} finally {
// Restore permissions for cleanup
await fs.chmod(restrictedDir, 0o755);
}
});
it('should propagate unexpected errors in existsSafe', async () => {
const mockError = new Error('Unexpected error');
(mockError as any).code = 'EACCES';
const spy = vi.spyOn(fs, 'lstat').mockRejectedValueOnce(mockError);
await expect(existsSafe('/some/path')).rejects.toThrow('Unexpected error');
spy.mockRestore();
});
});
describe('Integration scenarios', () => {
it('should work together: check existence then create if missing', async () => {
const dirPath = path.join(tempDir, 'check-then-create');
const existsBefore = await existsSafe(dirPath);
expect(existsBefore).toBe(false);
await mkdirSafe(dirPath);
const existsAfter = await existsSafe(dirPath);
expect(existsAfter).toBe(true);
});
it('should handle nested directory creation with existence checks', async () => {
const level1 = path.join(tempDir, 'level1');
const level2 = path.join(level1, 'level2');
const level3 = path.join(level2, 'level3');
await mkdirSafe(level3);
expect(await existsSafe(level1)).toBe(true);
expect(await existsSafe(level2)).toBe(true);
expect(await existsSafe(level3)).toBe(true);
});
});
});

View File

@@ -0,0 +1,244 @@
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import fs from 'fs/promises';
import path from 'path';
import os from 'os';
import {
getMimeTypeForImage,
readImageAsBase64,
convertImagesToContentBlocks,
formatImagePathsForPrompt,
} from '../src/image-handler';
describe('image-handler.ts', () => {
let tempDir: string;
beforeEach(async () => {
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'image-handler-test-'));
});
afterEach(async () => {
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe('getMimeTypeForImage', () => {
it('should return correct MIME type for .jpg', () => {
expect(getMimeTypeForImage('image.jpg')).toBe('image/jpeg');
expect(getMimeTypeForImage('/path/to/image.jpg')).toBe('image/jpeg');
});
it('should return correct MIME type for .jpeg', () => {
expect(getMimeTypeForImage('image.jpeg')).toBe('image/jpeg');
});
it('should return correct MIME type for .png', () => {
expect(getMimeTypeForImage('image.png')).toBe('image/png');
});
it('should return correct MIME type for .gif', () => {
expect(getMimeTypeForImage('image.gif')).toBe('image/gif');
});
it('should return correct MIME type for .webp', () => {
expect(getMimeTypeForImage('image.webp')).toBe('image/webp');
});
it('should be case-insensitive', () => {
expect(getMimeTypeForImage('image.JPG')).toBe('image/jpeg');
expect(getMimeTypeForImage('image.PNG')).toBe('image/png');
expect(getMimeTypeForImage('image.GIF')).toBe('image/gif');
});
it('should default to image/png for unknown extensions', () => {
expect(getMimeTypeForImage('file.xyz')).toBe('image/png');
expect(getMimeTypeForImage('file.txt')).toBe('image/png');
expect(getMimeTypeForImage('file')).toBe('image/png');
});
it('should handle filenames with multiple dots', () => {
expect(getMimeTypeForImage('my.file.name.jpg')).toBe('image/jpeg');
});
});
describe('readImageAsBase64', () => {
it('should read image and return base64 data', async () => {
const imagePath = path.join(tempDir, 'test.png');
const imageContent = Buffer.from('fake png data');
await fs.writeFile(imagePath, imageContent);
const result = await readImageAsBase64(imagePath);
expect(result.base64).toBe(imageContent.toString('base64'));
expect(result.mimeType).toBe('image/png');
expect(result.filename).toBe('test.png');
expect(result.originalPath).toBe(imagePath);
});
it('should handle different image formats', async () => {
const formats = [
{ ext: 'jpg', mime: 'image/jpeg' },
{ ext: 'png', mime: 'image/png' },
{ ext: 'gif', mime: 'image/gif' },
{ ext: 'webp', mime: 'image/webp' },
];
for (const format of formats) {
const imagePath = path.join(tempDir, `image.${format.ext}`);
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await readImageAsBase64(imagePath);
expect(result.mimeType).toBe(format.mime);
expect(result.filename).toBe(`image.${format.ext}`);
}
});
it("should throw error if file doesn't exist", async () => {
const imagePath = path.join(tempDir, 'nonexistent.png');
await expect(readImageAsBase64(imagePath)).rejects.toThrow();
});
it('should handle binary image data correctly', async () => {
const imagePath = path.join(tempDir, 'binary.png');
const binaryData = Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a]);
await fs.writeFile(imagePath, binaryData);
const result = await readImageAsBase64(imagePath);
expect(result.base64).toBe(binaryData.toString('base64'));
});
});
describe('convertImagesToContentBlocks', () => {
it('should convert single image to content block', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('image data'));
const result = await convertImagesToContentBlocks([imagePath]);
expect(result).toHaveLength(1);
expect(result[0]).toMatchObject({
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
},
});
expect(result[0].source.data).toBeTruthy();
});
it('should convert multiple images', async () => {
const image1 = path.join(tempDir, 'image1.jpg');
const image2 = path.join(tempDir, 'image2.png');
await fs.writeFile(image1, Buffer.from('jpg data'));
await fs.writeFile(image2, Buffer.from('png data'));
const result = await convertImagesToContentBlocks([image1, image2]);
expect(result).toHaveLength(2);
expect(result[0].source.media_type).toBe('image/jpeg');
expect(result[1].source.media_type).toBe('image/png');
});
it('should resolve relative paths with workDir', async () => {
const image = 'test.png';
const imagePath = path.join(tempDir, image);
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await convertImagesToContentBlocks([image], tempDir);
expect(result).toHaveLength(1);
expect(result[0].type).toBe('image');
});
it('should handle absolute paths without workDir', async () => {
const imagePath = path.join(tempDir, 'absolute.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await convertImagesToContentBlocks([imagePath]);
expect(result).toHaveLength(1);
});
it('should skip images that fail to load', async () => {
const validImage = path.join(tempDir, 'valid.png');
const invalidImage = path.join(tempDir, 'nonexistent.png');
await fs.writeFile(validImage, Buffer.from('data'));
const result = await convertImagesToContentBlocks([validImage, invalidImage]);
expect(result).toHaveLength(1);
expect(result[0].source.media_type).toBe('image/png');
});
it('should return empty array for empty input', async () => {
const result = await convertImagesToContentBlocks([]);
expect(result).toEqual([]);
});
it('should preserve order of images', async () => {
const images = ['img1.jpg', 'img2.png', 'img3.gif'];
for (const img of images) {
await fs.writeFile(path.join(tempDir, img), Buffer.from('data'));
}
const result = await convertImagesToContentBlocks(images, tempDir);
expect(result).toHaveLength(3);
expect(result[0].source.media_type).toBe('image/jpeg');
expect(result[1].source.media_type).toBe('image/png');
expect(result[2].source.media_type).toBe('image/gif');
});
});
describe('formatImagePathsForPrompt', () => {
it('should return empty string for empty array', () => {
const result = formatImagePathsForPrompt([]);
expect(result).toBe('');
});
it('should format single image path', () => {
const result = formatImagePathsForPrompt(['/path/to/image.png']);
expect(result).toBe('\n\nAttached images:\n- /path/to/image.png\n');
});
it('should format multiple image paths', () => {
const result = formatImagePathsForPrompt([
'/path/image1.png',
'/path/image2.jpg',
'/path/image3.gif',
]);
expect(result).toBe(
'\n\nAttached images:\n' +
'- /path/image1.png\n' +
'- /path/image2.jpg\n' +
'- /path/image3.gif\n'
);
});
it('should handle relative paths', () => {
const result = formatImagePathsForPrompt(['relative/path/image.png', 'another/image.jpg']);
expect(result).toContain('- relative/path/image.png');
expect(result).toContain('- another/image.jpg');
});
it('should start with newlines', () => {
const result = formatImagePathsForPrompt(['/image.png']);
expect(result.startsWith('\n\n')).toBe(true);
});
it('should include header text', () => {
const result = formatImagePathsForPrompt(['/image.png']);
expect(result).toContain('Attached images:');
});
});
});

View File

@@ -0,0 +1,292 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { createLogger, LogLevel, getLogLevel, setLogLevel } from '../src/logger';
describe('logger.ts', () => {
let originalConsoleError: typeof console.error;
let originalConsoleWarn: typeof console.warn;
let originalConsoleLog: typeof console.log;
let originalLogLevel: LogLevel;
beforeEach(() => {
// Save original console methods and log level
originalConsoleError = console.error;
originalConsoleWarn = console.warn;
originalConsoleLog = console.log;
originalLogLevel = getLogLevel();
// Mock console methods
console.error = vi.fn();
console.warn = vi.fn();
console.log = vi.fn();
});
afterEach(() => {
// Restore original console methods and log level
console.error = originalConsoleError;
console.warn = originalConsoleWarn;
console.log = originalConsoleLog;
setLogLevel(originalLogLevel);
});
describe('createLogger', () => {
it('should create logger with context prefix', () => {
const logger = createLogger('TestContext');
setLogLevel(LogLevel.INFO);
logger.info('test message');
expect(console.log).toHaveBeenCalledWith('[TestContext]', 'test message');
});
it('should handle multiple arguments', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.INFO);
logger.info('message', { data: 123 }, [1, 2, 3]);
expect(console.log).toHaveBeenCalledWith('[Test]', 'message', { data: 123 }, [1, 2, 3]);
});
});
describe('Log levels', () => {
it('should log error at ERROR level', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.ERROR);
logger.error('error message');
logger.warn('warn message');
logger.info('info message');
logger.debug('debug message');
expect(console.error).toHaveBeenCalledTimes(1);
expect(console.warn).not.toHaveBeenCalled();
expect(console.log).not.toHaveBeenCalled();
});
it('should log error and warn at WARN level', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.WARN);
logger.error('error message');
logger.warn('warn message');
logger.info('info message');
logger.debug('debug message');
expect(console.error).toHaveBeenCalledTimes(1);
expect(console.warn).toHaveBeenCalledTimes(1);
expect(console.log).not.toHaveBeenCalled();
});
it('should log error, warn, and info at INFO level', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.INFO);
logger.error('error message');
logger.warn('warn message');
logger.info('info message');
logger.debug('debug message');
expect(console.error).toHaveBeenCalledTimes(1);
expect(console.warn).toHaveBeenCalledTimes(1);
expect(console.log).toHaveBeenCalledTimes(1); // Only info, not debug
});
it('should log all messages at DEBUG level', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.DEBUG);
logger.error('error message');
logger.warn('warn message');
logger.info('info message');
logger.debug('debug message');
expect(console.error).toHaveBeenCalledTimes(1);
expect(console.warn).toHaveBeenCalledTimes(1);
expect(console.log).toHaveBeenCalledTimes(2); // info + debug
});
});
describe('error method', () => {
it('should use console.error', () => {
const logger = createLogger('ErrorTest');
setLogLevel(LogLevel.ERROR);
logger.error('error occurred', { code: 500 });
expect(console.error).toHaveBeenCalledWith('[ErrorTest]', 'error occurred', { code: 500 });
});
it('should not log when level is below ERROR', () => {
const logger = createLogger('Test');
setLogLevel((LogLevel.ERROR - 1) as LogLevel);
logger.error('should not appear');
expect(console.error).not.toHaveBeenCalled();
});
});
describe('warn method', () => {
it('should use console.warn', () => {
const logger = createLogger('WarnTest');
setLogLevel(LogLevel.WARN);
logger.warn('warning message');
expect(console.warn).toHaveBeenCalledWith('[WarnTest]', 'warning message');
});
it('should not log when level is below WARN', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.ERROR);
logger.warn('should not appear');
expect(console.warn).not.toHaveBeenCalled();
});
});
describe('info method', () => {
it('should use console.log', () => {
const logger = createLogger('InfoTest');
setLogLevel(LogLevel.INFO);
logger.info('info message');
expect(console.log).toHaveBeenCalledWith('[InfoTest]', 'info message');
});
it('should not log when level is below INFO', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.WARN);
logger.info('should not appear');
expect(console.log).not.toHaveBeenCalled();
});
});
describe('debug method', () => {
it('should use console.log with DEBUG prefix', () => {
const logger = createLogger('DebugTest');
setLogLevel(LogLevel.DEBUG);
logger.debug('debug details', { trace: '...' });
expect(console.log).toHaveBeenCalledWith('[DebugTest]', '[DEBUG]', 'debug details', {
trace: '...',
});
});
it('should not log when level is below DEBUG', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.INFO);
logger.debug('should not appear');
expect(console.log).not.toHaveBeenCalled();
});
});
describe('getLogLevel', () => {
it('should return current log level', () => {
setLogLevel(LogLevel.DEBUG);
expect(getLogLevel()).toBe(LogLevel.DEBUG);
setLogLevel(LogLevel.ERROR);
expect(getLogLevel()).toBe(LogLevel.ERROR);
});
});
describe('setLogLevel', () => {
it('should change log level', () => {
setLogLevel(LogLevel.WARN);
expect(getLogLevel()).toBe(LogLevel.WARN);
setLogLevel(LogLevel.DEBUG);
expect(getLogLevel()).toBe(LogLevel.DEBUG);
});
it('should affect subsequent logging', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.ERROR);
logger.info('should not log');
expect(console.log).not.toHaveBeenCalled();
setLogLevel(LogLevel.INFO);
logger.info('should log');
expect(console.log).toHaveBeenCalledWith('[Test]', 'should log');
});
});
describe('Multiple logger instances', () => {
it('should maintain separate contexts', () => {
const logger1 = createLogger('Service1');
const logger2 = createLogger('Service2');
setLogLevel(LogLevel.INFO);
logger1.info('from service 1');
logger2.info('from service 2');
expect(console.log).toHaveBeenNthCalledWith(1, '[Service1]', 'from service 1');
expect(console.log).toHaveBeenNthCalledWith(2, '[Service2]', 'from service 2');
});
it('should share log level setting', () => {
const logger1 = createLogger('Service1');
const logger2 = createLogger('Service2');
setLogLevel(LogLevel.ERROR);
logger1.info('should not log');
logger2.info('should not log');
expect(console.log).not.toHaveBeenCalled();
});
});
describe('Edge cases', () => {
it('should handle empty context string', () => {
const logger = createLogger('');
setLogLevel(LogLevel.INFO);
logger.info('message');
expect(console.log).toHaveBeenCalledWith('[]', 'message');
});
it('should handle context with special characters', () => {
const logger = createLogger('Test-Service_v2.0');
setLogLevel(LogLevel.INFO);
logger.info('message');
expect(console.log).toHaveBeenCalledWith('[Test-Service_v2.0]', 'message');
});
it('should handle no arguments to log methods', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.INFO);
logger.info();
expect(console.log).toHaveBeenCalledWith('[Test]');
});
it('should handle complex object arguments', () => {
const logger = createLogger('Test');
setLogLevel(LogLevel.INFO);
const complexObj = {
nested: { deep: { value: 123 } },
array: [1, 2, 3],
fn: () => {},
};
logger.info('complex', complexObj);
expect(console.log).toHaveBeenCalledWith('[Test]', 'complex', complexObj);
});
});
});

View File

@@ -0,0 +1,283 @@
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import fs from 'fs/promises';
import path from 'path';
import os from 'os';
import { buildPromptWithImages } from '../src/prompt-builder';
describe('prompt-builder.ts', () => {
let tempDir: string;
beforeEach(async () => {
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'prompt-builder-test-'));
});
afterEach(async () => {
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe('buildPromptWithImages - no images', () => {
it('should return plain text when no images provided', async () => {
const basePrompt = 'Hello, world!';
const result = await buildPromptWithImages(basePrompt);
expect(result.content).toBe('Hello, world!');
expect(result.hasImages).toBe(false);
});
it('should return plain text when empty image array provided', async () => {
const basePrompt = 'Test prompt';
const result = await buildPromptWithImages(basePrompt, []);
expect(result.content).toBe('Test prompt');
expect(result.hasImages).toBe(false);
});
it('should handle multiline prompts', async () => {
const basePrompt = 'Line 1\nLine 2\nLine 3';
const result = await buildPromptWithImages(basePrompt);
expect(result.content).toBe('Line 1\nLine 2\nLine 3');
});
});
describe('buildPromptWithImages - with images', () => {
it('should build content blocks with single image', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('image data'));
const result = await buildPromptWithImages('Check this image', [imagePath]);
expect(result.hasImages).toBe(true);
expect(Array.isArray(result.content)).toBe(true);
const blocks = result.content as Array<{
type: string;
text?: string;
source?: object;
}>;
expect(blocks).toHaveLength(2);
expect(blocks[0]).toMatchObject({
type: 'text',
text: 'Check this image',
});
expect(blocks[1]).toMatchObject({
type: 'image',
});
});
it('should build content blocks with multiple images', async () => {
const image1 = path.join(tempDir, 'img1.jpg');
const image2 = path.join(tempDir, 'img2.png');
await fs.writeFile(image1, Buffer.from('jpg data'));
await fs.writeFile(image2, Buffer.from('png data'));
const result = await buildPromptWithImages('Two images', [image1, image2]);
expect(result.hasImages).toBe(true);
const blocks = result.content as Array<{
type: string;
text?: string;
source?: object;
}>;
expect(blocks).toHaveLength(3); // 1 text + 2 images
expect(blocks[0].type).toBe('text');
expect(blocks[1].type).toBe('image');
expect(blocks[2].type).toBe('image');
});
it('should resolve relative paths with workDir', async () => {
const imagePath = 'test.png';
const fullPath = path.join(tempDir, imagePath);
await fs.writeFile(fullPath, Buffer.from('data'));
const result = await buildPromptWithImages('Test', [imagePath], tempDir);
expect(result.hasImages).toBe(true);
expect(Array.isArray(result.content)).toBe(true);
});
it('should handle absolute paths without workDir', async () => {
const imagePath = path.join(tempDir, 'absolute.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await buildPromptWithImages('Test', [imagePath]);
expect(result.hasImages).toBe(true);
});
});
describe('buildPromptWithImages - includeImagePaths option', () => {
it('should not include image paths by default', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await buildPromptWithImages('Prompt', [imagePath]);
const blocks = result.content as Array<{
type: string;
text?: string;
}>;
const textBlock = blocks.find((b) => b.type === 'text');
expect(textBlock?.text).not.toContain('Attached images:');
expect(textBlock?.text).toBe('Prompt');
});
it('should include image paths when requested', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await buildPromptWithImages('Prompt', [imagePath], undefined, true);
const blocks = result.content as Array<{
type: string;
text?: string;
}>;
const textBlock = blocks.find((b) => b.type === 'text');
expect(textBlock?.text).toContain('Prompt');
expect(textBlock?.text).toContain('Attached images:');
expect(textBlock?.text).toContain(imagePath);
});
it('should format multiple image paths when included', async () => {
const img1 = path.join(tempDir, 'img1.png');
const img2 = path.join(tempDir, 'img2.jpg');
await fs.writeFile(img1, Buffer.from('data1'));
await fs.writeFile(img2, Buffer.from('data2'));
const result = await buildPromptWithImages('Test', [img1, img2], undefined, true);
const blocks = result.content as Array<{
type: string;
text?: string;
}>;
const textBlock = blocks.find((b) => b.type === 'text');
expect(textBlock?.text).toContain('Attached images:');
expect(textBlock?.text).toContain(img1);
expect(textBlock?.text).toContain(img2);
});
});
describe('buildPromptWithImages - edge cases', () => {
it('should handle empty prompt with images', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await buildPromptWithImages('', [imagePath]);
expect(result.hasImages).toBe(true);
const blocks = result.content as Array<{
type: string;
text?: string;
source?: object;
}>;
// Should only have image block, no text block for empty string
expect(blocks.length).toBeGreaterThan(0);
expect(blocks.some((b) => b.type === 'image')).toBe(true);
});
it('should handle whitespace-only prompt with images', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await buildPromptWithImages(' ', [imagePath]);
expect(result.hasImages).toBe(true);
const blocks = result.content as Array<{
type: string;
text?: string;
source?: object;
}>;
// Whitespace-only is trimmed, so no text block should be added
expect(blocks.every((b) => b.type !== 'text')).toBe(true);
});
it('should skip failed image loads', async () => {
const validImage = path.join(tempDir, 'valid.png');
const invalidImage = path.join(tempDir, 'nonexistent.png');
await fs.writeFile(validImage, Buffer.from('data'));
const result = await buildPromptWithImages('Test', [validImage, invalidImage]);
expect(result.hasImages).toBe(true);
const blocks = result.content as Array<{
type: string;
text?: string;
source?: object;
}>;
const imageBlocks = blocks.filter((b) => b.type === 'image');
// Only valid image should be included
expect(imageBlocks).toHaveLength(1);
});
it('should handle mixed case in includeImagePaths parameter', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const resultFalse = await buildPromptWithImages('Test', [imagePath], undefined, false);
const resultTrue = await buildPromptWithImages('Test', [imagePath], undefined, true);
const blocksFalse = resultFalse.content as Array<{
type: string;
text?: string;
}>;
const blocksTrue = resultTrue.content as Array<{
type: string;
text?: string;
}>;
expect(blocksFalse[0].text).not.toContain('Attached images:');
expect(blocksTrue[0].text).toContain('Attached images:');
});
});
describe('buildPromptWithImages - content format', () => {
it('should return string when only text and includeImagePaths false', async () => {
const result = await buildPromptWithImages('Just text', undefined);
expect(typeof result.content).toBe('string');
});
it('should return array when has images', async () => {
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await buildPromptWithImages('Text', [imagePath]);
expect(Array.isArray(result.content)).toBe(true);
});
it('should preserve prompt formatting', async () => {
const basePrompt = 'Line 1\n\nLine 2\n Indented line';
const imagePath = path.join(tempDir, 'test.png');
await fs.writeFile(imagePath, Buffer.from('data'));
const result = await buildPromptWithImages(basePrompt, [imagePath]);
const blocks = result.content as Array<{
type: string;
text?: string;
}>;
const textBlock = blocks.find((b) => b.type === 'text');
expect(textBlock?.text).toBe(basePrompt);
});
});
});

9
libs/utils/tsconfig.json Normal file
View File

@@ -0,0 +1,9 @@
{
"extends": "../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,23 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['tests/**/*.test.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
include: ['src/**/*.ts'],
exclude: ['src/**/*.d.ts', 'src/index.ts'],
thresholds: {
// Excellent coverage: 94.3% stmts, 89.77% branches, 100% funcs, 94.21% lines
// All files now have comprehensive tests
lines: 90,
functions: 95,
branches: 85,
statements: 90,
},
},
},
});