Compare commits

..

3 Commits

Author SHA1 Message Date
Stefan de Vogelaere
b88c940a36 feat: unify Claude API key and profile system with flexible key sourcing
- Add ApiKeySource type ('inline' | 'env' | 'credentials') to ClaudeApiProfile
- Allow profiles to source API keys from credentials.json or environment variables
- Add provider templates: OpenRouter, MiniMax, MiniMax (China)
- Auto-migrate existing users with Anthropic key to "Direct Anthropic" profile
- Update all API call sites to pass credentials for key resolution
- Add API key source selector to profile creation UI
- Increment settings version to 5 for migration support

This allows users to:
- Share a single API key across multiple profile configurations
- Use environment variables for CI/CD deployments
- Easily switch between providers without re-entering keys
2026-01-19 17:28:28 +01:00
Stefan de Vogelaere
10b49bd3b4 Merge remote-tracking branch 'origin/v0.13.0rc' into feature/claude-code-max-glm-api-keys 2026-01-19 14:42:15 +01:00
Stefan de Vogelaere
53298106e9 feat: add Claude API provider profiles for alternative endpoints
Add support for managing multiple Claude-compatible API endpoints
(z.AI GLM, AWS Bedrock, etc.) through provider profiles in settings.

Features:
- New ClaudeApiProfile type with base URL, API key, model mappings
- Pre-configured z.AI GLM template with correct model names
- Profile selector in Settings > Claude > API Profiles
- Clean switching between profiles and direct Anthropic API
- Immediate persistence to prevent data loss on restart

Profile support added to all execution paths:
- Agent service (chat)
- Ideation service
- Auto-mode service (feature agents, enhancements)
- Simple query service (title generation, descriptions, etc.)
- Backlog planning, commit messages, spec generation
- GitHub issue validation, suggestions

Environment variables set when profile is active:
- ANTHROPIC_BASE_URL, ANTHROPIC_AUTH_TOKEN/API_KEY
- ANTHROPIC_DEFAULT_HAIKU/SONNET/OPUS_MODEL
- API_TIMEOUT_MS, CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC
2026-01-18 13:50:41 +01:00
504 changed files with 17959 additions and 60157 deletions

View File

@@ -4,9 +4,6 @@ on:
release: release:
types: [published] types: [published]
permissions:
contents: write
jobs: jobs:
build: build:
strategy: strategy:
@@ -65,10 +62,7 @@ jobs:
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: macos-builds name: macos-builds
path: | path: apps/ui/release/*.{dmg,zip}
apps/ui/release/*.dmg
apps/ui/release/*.zip
if-no-files-found: error
retention-days: 30 retention-days: 30
- name: Upload Windows artifacts - name: Upload Windows artifacts
@@ -77,7 +71,6 @@ jobs:
with: with:
name: windows-builds name: windows-builds
path: apps/ui/release/*.exe path: apps/ui/release/*.exe
if-no-files-found: error
retention-days: 30 retention-days: 30
- name: Upload Linux artifacts - name: Upload Linux artifacts
@@ -85,11 +78,7 @@ jobs:
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: linux-builds name: linux-builds
path: | path: apps/ui/release/*.{AppImage,deb,rpm}
apps/ui/release/*.AppImage
apps/ui/release/*.deb
apps/ui/release/*.rpm
if-no-files-found: error
retention-days: 30 retention-days: 30
upload: upload:
@@ -119,13 +108,9 @@ jobs:
- name: Upload to GitHub Release - name: Upload to GitHub Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v2
with: with:
fail_on_unmatched_files: true
files: | files: |
artifacts/macos-builds/*.dmg artifacts/macos-builds/*.{dmg,zip,blockmap}
artifacts/macos-builds/*.zip artifacts/windows-builds/*.{exe,blockmap}
artifacts/windows-builds/*.exe artifacts/linux-builds/*.{AppImage,deb,rpm,blockmap}
artifacts/linux-builds/*.AppImage
artifacts/linux-builds/*.deb
artifacts/linux-builds/*.rpm
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

3
.gitignore vendored
View File

@@ -95,6 +95,3 @@ data/.api-key
data/credentials.json data/credentials.json
data/ data/
.codex/ .codex/
# GSD planning docs (local-only)
.planning/

View File

@@ -1,81 +0,0 @@
# AutoModeService Refactoring
## What This Is
A comprehensive refactoring of the `auto-mode-service.ts` file (5k+ lines) into smaller, focused services with clear boundaries. This is an architectural cleanup of accumulated technical debt from rapid development, breaking the "god object" anti-pattern into maintainable, debuggable modules.
## Core Value
All existing auto-mode functionality continues working — features execute, pipelines flow, merges complete — while the codebase becomes maintainable.
## Requirements
### Validated
<!-- Existing functionality that must be preserved -->
- ✓ Single feature execution with AI agent — existing
- ✓ Concurrent execution with configurable limits — existing
- ✓ Pipeline orchestration (backlog → in-progress → approval → verified) — existing
- ✓ Git worktree isolation per feature — existing
- ✓ Automatic merging of completed work — existing
- ✓ Custom pipeline support — existing
- ✓ Test runner integration — existing
- ✓ Event streaming to frontend — existing
### Active
<!-- Refactoring goals -->
- [ ] No service file exceeds ~500 lines
- [ ] Each service has single, clear responsibility
- [ ] Service boundaries make debugging obvious
- [ ] Changes to one service don't risk breaking unrelated features
- [ ] Test coverage for critical paths
### Out of Scope
- New auto-mode features — this is cleanup, not enhancement
- UI changes — backend refactor only
- Performance optimization — maintain current performance, don't optimize
- Other service refactoring — focus on auto-mode-service.ts only
## Context
**Current state:** `apps/server/src/services/auto-mode-service.ts` is ~5700 lines handling:
- Worktree management (create, cleanup, track)
- Agent/task execution coordination
- Concurrency control and queue management
- Pipeline state machine (column transitions)
- Merge handling and conflict resolution
- Event emission for real-time updates
**Technical environment:**
- Express 5 backend, TypeScript
- Event-driven architecture via EventEmitter
- WebSocket streaming to React frontend
- Git worktrees via @automaker/git-utils
- Minimal existing test coverage
**Codebase analysis:** See `.planning/codebase/` for full architecture, conventions, and existing patterns.
## Constraints
- **Breaking changes**: Acceptable — other parts of the app can be updated to match new service interfaces
- **Test coverage**: Currently minimal — must add tests during refactoring to catch regressions
- **Incremental approach**: Required — can't do big-bang rewrite with everything critical
- **Existing patterns**: Follow conventions in `.planning/codebase/CONVENTIONS.md`
## Key Decisions
| Decision | Rationale | Outcome |
| ------------------------- | --------------------------------------------------- | --------- |
| Accept breaking changes | Allows cleaner interfaces, worth the migration cost | — Pending |
| Add tests during refactor | No existing safety net, need to build one | — Pending |
| Incremental extraction | Everything is critical, can't break it all at once | — Pending |
---
_Last updated: 2026-01-27 after initialization_

View File

@@ -1,234 +0,0 @@
# Architecture
**Analysis Date:** 2026-01-27
## Pattern Overview
**Overall:** Monorepo with layered client-server architecture (Electron-first) and pluggable provider abstraction for AI models.
**Key Characteristics:**
- Event-driven communication via WebSocket between frontend and backend
- Multi-provider AI model abstraction layer (Claude, Cursor, Codex, Gemini, OpenCode, Copilot)
- Feature-centric workflow stored in `.automaker/` directories
- Isolated git worktree execution for each feature
- State management through Zustand stores with API persistence
## Layers
**Presentation Layer (UI):**
- Purpose: React 19 Electron/web frontend with TanStack Router file-based routing
- Location: `apps/ui/src/`
- Contains: Route components, view pages, custom React hooks, Zustand stores, API client
- Depends on: @automaker/types, @automaker/utils, HTTP API backend
- Used by: Electron main process (desktop), web browser (web mode)
**API Layer (Server):**
- Purpose: Express 5 backend exposing RESTful and WebSocket endpoints
- Location: `apps/server/src/`
- Contains: Route handlers, business logic services, middleware, provider adapters
- Depends on: @automaker/types, @automaker/utils, @automaker/platform, Claude Agent SDK
- Used by: UI frontend via HTTP/WebSocket
**Service Layer (Server):**
- Purpose: Business logic and domain operations
- Location: `apps/server/src/services/`
- Contains: AgentService, FeatureLoader, AutoModeService, SettingsService, DevServerService, etc.
- Depends on: Providers, secure filesystem, feature storage
- Used by: Route handlers
**Provider Abstraction (Server):**
- Purpose: Unified interface for different AI model providers
- Location: `apps/server/src/providers/`
- Contains: ProviderFactory, specific provider implementations (ClaudeProvider, CursorProvider, CodexProvider, GeminiProvider, OpencodeProvider, CopilotProvider)
- Depends on: @automaker/types, provider SDKs
- Used by: AgentService
**Shared Library Layer:**
- Purpose: Type definitions and utilities shared across apps
- Location: `libs/`
- Contains: @automaker/types, @automaker/utils, @automaker/platform, @automaker/prompts, @automaker/model-resolver, @automaker/dependency-resolver, @automaker/git-utils, @automaker/spec-parser
- Depends on: None (types has no external deps)
- Used by: All apps and services
## Data Flow
**Feature Execution Flow:**
1. User creates/updates feature via UI (`apps/ui/src/`)
2. UI sends HTTP request to backend (`POST /api/features`)
3. Server route handler invokes FeatureLoader to persist to `.automaker/features/{featureId}/`
4. When executing, AgentService loads feature, creates isolated git worktree via @automaker/git-utils
5. AgentService invokes ProviderFactory to get appropriate AI provider (Claude, Cursor, etc.)
6. Provider executes with context from CLAUDE.md files via @automaker/utils loadContextFiles()
7. Server emits events via EventEmitter throughout execution
8. Events stream to frontend via WebSocket
9. UI updates stores and renders real-time progress
10. Feature results persist back to `.automaker/features/` with generated agent-output.md
**State Management:**
**Frontend State (Zustand):**
- `app-store.ts`: Global app state (projects, features, settings, boards, themes)
- `setup-store.ts`: First-time setup wizard flow
- `ideation-store.ts`: Ideation feature state
- `test-runners-store.ts`: Test runner configurations
- Settings now persist via API (`/api/settings`) rather than localStorage (see use-settings-sync.ts)
**Backend State (Services):**
- SettingsService: Global and project-specific settings (in-memory with file persistence)
- AgentService: Active agent sessions and conversation history
- FeatureLoader: Feature data model operations
- DevServerService: Development server logs
- EventHistoryService: Persists event logs for replay
**Real-Time Updates (WebSocket):**
- Server EventEmitter emits TypedEvent (type + payload)
- WebSocket handler subscribes to events and broadcasts to all clients
- Frontend listens on multiple WebSocket subscriptions and updates stores
## Key Abstractions
**Feature:**
- Purpose: Represents a development task/story with rich metadata
- Location: @automaker/types`libs/types/src/feature.ts`
- Fields: id, title, description, status, images, tasks, priority, etc.
- Stored: `.automaker/features/{featureId}/feature.json`
**Provider:**
- Purpose: Abstracts different AI model implementations
- Location: `apps/server/src/providers/{provider}-provider.ts`
- Interface: Common execute() method with consistent message format
- Implementations: Claude, Cursor, Codex, Gemini, OpenCode, Copilot
- Factory: ProviderFactory picks correct provider based on model ID
**Event:**
- Purpose: Real-time updates streamed to frontend
- Location: @automaker/types`libs/types/src/event.ts`
- Format: { type: EventType, payload: unknown }
- Examples: agent-started, agent-step, agent-complete, feature-updated, etc.
**AgentSession:**
- Purpose: Represents a conversation between user and AI agent
- Location: @automaker/types`libs/types/src/session.ts`
- Contains: Messages (user + assistant), metadata, creation timestamp
- Stored: `{DATA_DIR}/agent-sessions/{sessionId}.json`
**Settings:**
- Purpose: Configuration for global and per-project behavior
- Location: @automaker/types`libs/types/src/settings.ts`
- Stored: Global in `{DATA_DIR}/settings.json`, per-project in `.automaker/settings.json`
- Service: SettingsService in `apps/server/src/services/settings-service.ts`
## Entry Points
**Server:**
- Location: `apps/server/src/index.ts`
- Triggers: `npm run dev:server` or Docker startup
- Responsibilities:
- Initialize Express app with middleware
- Create shared EventEmitter for WebSocket streaming
- Bootstrap services (SettingsService, AgentService, FeatureLoader, etc.)
- Mount API routes at `/api/*`
- Create WebSocket servers for agent streaming and terminal sessions
- Load and apply user settings (log level, request logging, etc.)
**UI (Web):**
- Location: `apps/ui/src/main.ts` (Vite entry), `apps/ui/src/app.tsx` (React component)
- Triggers: `npm run dev:web` or `npm run build`
- Responsibilities:
- Initialize Zustand stores from API settings
- Setup React Router with TanStack Router
- Render root layout with sidebar and main content area
- Handle authentication via verifySession()
**UI (Electron):**
- Location: `apps/ui/src/main.ts` (Vite entry), `apps/ui/electron/main-process.ts` (Electron main process)
- Triggers: `npm run dev:electron`
- Responsibilities:
- Launch local server via node-pty
- Create native Electron window
- Bridge IPC between renderer and main process
- Provide file system access via preload.ts APIs
## Error Handling
**Strategy:** Layered error classification and user-friendly messaging
**Patterns:**
**Backend Error Handling:**
- Errors classified via `classifyError()` from @automaker/utils
- Classification: ParseError, NetworkError, AuthenticationError, RateLimitError, etc.
- Response format: `{ success: false, error: { type, message, code }, details? }`
- Example: `apps/server/src/lib/error-handler.ts`
**Frontend Error Handling:**
- HTTP errors caught by api-fetch.ts with retry logic
- WebSocket disconnects trigger reconnection with exponential backoff
- Errors shown in toast notifications via `sonner` library
- Validation errors caught and displayed inline in forms
**Agent Execution Errors:**
- AgentService wraps provider calls in try-catch
- Aborts handled specially via `isAbortError()` check
- Rate limit errors trigger cooldown before retry
- Model-specific errors mapped to user guidance
## Cross-Cutting Concerns
**Logging:**
- Framework: @automaker/utils createLogger()
- Pattern: `const logger = createLogger('ModuleName')`
- Levels: ERROR, WARN, INFO, DEBUG (configurable via settings)
- Output: stdout (dev), files (production)
**Validation:**
- File path validation: @automaker/platform initAllowedPaths() enforces restrictions
- Model ID validation: @automaker/model-resolver resolveModelString()
- JSON schema validation: Manual checks in route handlers (no JSON schema lib)
- Authentication: Session token validation via validateWsConnectionToken()
**Authentication:**
- Frontend: Session token stored in httpOnly cookie
- Backend: authMiddleware checks token on protected routes
- WebSocket: validateWsConnectionToken() for upgrade requests
- Providers: API keys stored encrypted in `{DATA_DIR}/credentials.json`
**Internationalization:**
- Not detected - strings are English-only
**Performance:**
- Code splitting: File-based routing via TanStack Router
- Lazy loading: React.lazy() in route components
- Caching: React Query for HTTP requests (query-keys.ts defines cache strategy)
- Image optimization: Automatic base64 encoding for agent context
- State hydration: Settings loaded once at startup, synced via API
---
_Architecture analysis: 2026-01-27_

View File

@@ -1,245 +0,0 @@
# Codebase Concerns
**Analysis Date:** 2026-01-27
## Tech Debt
**Loose Type Safety in Error Handling:**
- Issue: Multiple uses of `as any` type assertions bypass TypeScript safety, particularly in error context handling and provider responses
- Files: `apps/server/src/providers/claude-provider.ts` (lines 318-322), `apps/server/src/lib/error-handler.ts`, `apps/server/src/routes/settings/routes/update-global.ts`
- Impact: Errors could have unchecked properties; refactoring becomes risky without compiler assistance
- Fix approach: Replace `as any` with proper type guards and discriminated unions; create helper functions for safe property access
**Missing Test Coverage for Critical Services:**
- Issue: Several core services explicitly excluded from test coverage thresholds due to integration complexity
- Files: `apps/server/vitest.config.ts` (line 22), explicitly excluded: `claude-usage-service.ts`, `mcp-test-service.ts`, `cli-provider.ts`, `cursor-provider.ts`
- Impact: Usage tracking, MCP integration, and CLI detection could break undetected; regression detection is limited
- Fix approach: Create integration test fixtures for CLI providers; mock MCP SDK for mcp-test-service tests; add usage tracking unit tests with mocked API calls
**Unused/Stub TODO Item Processing:**
- Issue: TodoWrite tool implementation exists but is partially integrated; tool name constants scattered across codex provider
- Files: `apps/server/src/providers/codex-tool-mapping.ts`, `apps/server/src/providers/codex-provider.ts`
- Impact: Todo list updates may not synchronize properly with all providers; unclear which providers support TodoWrite
- Fix approach: Consolidate tool name constants; add provider capability flags for todo support
**Electron Electron.ts Size and Complexity:**
- Issue: Single 3741-line file handles all Electron IPC, native bindings, and communication
- Files: `apps/ui/src/lib/electron.ts`
- Impact: Difficult to test; hard to isolate bugs; changes require full testing of all features; potential memory overhead from monolithic file
- Fix approach: Split by responsibility (IPC, window management, file operations, debug tools); create separate bridge layers
## Known Bugs
**API Key Management Incomplete for Gemini:**
- Symptoms: Gemini API key verification endpoint not implemented despite other providers having verification
- Files: `apps/ui/src/components/views/settings-view/api-keys/hooks/use-api-key-management.ts` (line 122)
- Trigger: User tries to verify Gemini API key in settings
- Workaround: Key verification skipped for Gemini; settings page still accepts and stores key
**Orphaned Features Detection Vulnerable to False Negatives:**
- Symptoms: Features marked as orphaned when branch matching logic doesn't account for all scenarios
- Files: `apps/server/src/services/auto-mode-service.ts` (lines 5714-5773)
- Trigger: Features that were manually switched branches or rebased
- Workaround: Manual cleanup via feature deletion; branch comparison is basic name matching only
**Terminal Themes Incomplete:**
- Symptoms: Light theme themes (solarizedlight, github) map to same generic lightTheme; no dedicated implementations
- Files: `apps/ui/src/config/terminal-themes.ts` (lines 593-594)
- Trigger: User selects solarizedlight or github terminal theme
- Workaround: Uses generic light theme instead of specific scheme; visual appearance doesn't match expectation
## Security Considerations
**Process Environment Variable Exposure:**
- Risk: Child processes inherit all parent `process.env` including sensitive credentials (API keys, tokens)
- Files: `apps/server/src/providers/cursor-provider.ts` (line 993), `apps/server/src/providers/codex-provider.ts` (line 1099)
- Current mitigation: Dotenv provides isolation at app startup; selective env passing to some providers
- Recommendations: Use explicit allowlists for env vars passed to child processes (only pass REQUIRED_KEYS); audit all spawn calls for env handling; document which providers need which credentials
**Unvalidated Provider Tool Input:**
- Risk: Tool input from CLI providers (Cursor, Copilot, Codex) is partially validated through Record<string, unknown> patterns; execution context could be escaped
- Files: `apps/server/src/providers/codex-provider.ts` (lines 506-543), `apps/server/src/providers/tool-normalization.ts`
- Current mitigation: Status enums validated; tool names checked against allow-lists in some providers
- Recommendations: Implement comprehensive schema validation for all tool inputs before execution; use zod or similar for runtime validation; add security tests for injection patterns
**API Key Storage in Settings Files:**
- Risk: API keys stored in plaintext in `~/.automaker/settings.json` and `data/settings.json`; file permissions may not be restricted
- Files: `apps/server/src/services/settings-service.ts`, uses `atomicWriteJson` without file permission enforcement
- Current mitigation: Limited by file system permissions; Electron mode has single-user access
- Recommendations: Encrypt sensitive settings fields (apiKeys, tokens); use OS credential stores (Keychain/Credential Manager) for production; add file permission checks on startup
## Performance Bottlenecks
**Synchronous Feature Loading at Startup:**
- Problem: All features loaded synchronously at project load; blocks UI with 1000+ features
- Files: `apps/server/src/services/feature-loader.ts` (line 230 Promise.all, but synchronous enumeration)
- Cause: Feature directory walk and JSON parsing is not paginated or lazy-loaded
- Improvement path: Implement lazy loading with pagination (load first 50, fetch more on scroll); add caching layer with TTL; move to background indexing; add feature count limits with warnings
**Auto-Mode Concurrency at Max Can Exceed Rate Limits:**
- Problem: maxConcurrency = 10 can quickly exhaust Claude API rate limits if all features execute simultaneously
- Files: `apps/server/src/services/auto-mode-service.ts` (line 2931 Promise.all for concurrent agents)
- Cause: No adaptive backoff; no API usage tracking before queuing; hint mentions reducing concurrency but doesn't enforce it
- Improvement path: Integrate with claude-usage-service to check remaining quota before starting features; implement exponential backoff on 429 errors; add per-model rate limit tracking
**Terminal Session Memory Leak Risk:**
- Problem: Terminal sessions accumulate in memory; expired sessions not cleaned up reliably
- Files: `apps/server/src/routes/terminal/common.ts` (line 66 cleanup runs every 5 minutes, but only for tokens)
- Cause: Cleanup interval is arbitrary; session map not bounded; no session lifespan limit
- Improvement path: Implement LRU eviction with max session count; reduce cleanup interval to 1 minute; add memory usage monitoring; auto-close idle sessions after 30 minutes
**Large File Content Loading Without Limits:**
- Problem: File content loaded entirely into memory; `describe-file.ts` truncates at 50KB but loads all content first
- Files: `apps/server/src/routes/context/routes/describe-file.ts` (line 128)
- Cause: Synchronous file read; no streaming; no check before reading large files
- Improvement path: Check file size before reading; stream large files; add file size warnings; implement chunked processing for analysis
## Fragile Areas
**Provider Factory Model Resolution:**
- Files: `apps/server/src/providers/provider-factory.ts`, `apps/server/src/providers/simple-query-service.ts`
- Why fragile: Each provider interprets model strings differently; no central registry; model aliases resolved at multiple layers (model-resolver, provider-specific maps, CLI validation)
- Safe modification: Add integration tests for each model alias per provider; create model capability matrix; centralize model validation before dispatch
- Test coverage: No dedicated tests; relies on E2E; no isolated unit tests for model resolution
**WebSocket Session Authentication:**
- Files: `apps/server/src/lib/auth.ts` (line 40 setInterval), `apps/server/src/index.ts` (token validation per message)
- Why fragile: Session tokens generated and validated at multiple points; no single source of truth; expiration is not atomic
- Safe modification: Add tests for token expiration edge cases; ensure cleanup removes all references; log all auth failures
- Test coverage: Auth middleware tested, but not session lifecycle
**Auto-Mode Feature State Machine:**
- Files: `apps/server/src/services/auto-mode-service.ts` (lines 465-600)
- Why fragile: Multiple states (running, queued, completed, error) managed across different methods; no explicit state transition validation; error recovery is defensive (catches all, logs, continues)
- Safe modification: Create explicit state enum with valid transitions; add invariant checks; unit test state transitions with all error cases
- Test coverage: Gaps in error recovery paths; no tests for concurrent state changes
## Scaling Limits
**Feature Count Scalability:**
- Current capacity: ~1000 features tested; UI performance degrades with pagination required
- Limit: 10K+ features cause >5s load times; memory usage ~100MB for metadata alone
- Scaling path: Implement feature database instead of file-per-feature; add ElasticSearch indexing for search; paginate API responses (50 per page); add feature archiving
**Concurrent Auto-Mode Executions:**
- Current capacity: maxConcurrency = 10 features; limited by Claude API rate limits
- Limit: Rate limit hits at ~4-5 simultaneous features with extended context (100K+ tokens)
- Scaling path: Implement token usage budgeting before feature start; queue features with estimated token cost; add provider-specific rate limit handling
**Terminal Session Count:**
- Current capacity: ~100 active terminal sessions per server
- Limit: Memory grows unbounded; no session count limit enforced
- Scaling path: Add max session count with least-recently-used eviction; implement session federation for distributed setup
**Worktree Disk Usage:**
- Current capacity: 10K worktrees (~20GB with typical repos)
- Limit: `.worktrees` directory grows without cleanup; old worktrees accumulate
- Scaling path: Add worktree TTL (delete if not used for 30 days); implement cleanup job; add quota warnings at 50/80% disk
## Dependencies at Risk
**node-pty Beta Version:**
- Risk: `node-pty@1.1.0-beta41` used for terminal emulation; beta status indicates possible instability
- Impact: Terminal features could break on minor platform changes; no guarantees on bug fixes
- Migration plan: Monitor releases for stable version; pin to specific commit if needed; test extensively on target platforms (macOS, Linux, Windows)
**@anthropic-ai/claude-agent-sdk 0.1.x:**
- Risk: Pre-1.0 version; SDK API may change in future releases; limited version stability guarantees
- Impact: Breaking changes could require significant refactoring; feature additions in SDK may not align with Automaker roadmap
- Migration plan: Pin to specific 0.1.x version; review SDK changelogs before upgrades; maintain SDK compatibility tests; consider fallback implementation for critical paths
**@openai/codex-sdk 0.77.x:**
- Risk: Codex model deprecated by OpenAI; SDK may be archived or unsupported
- Impact: Codex provider could become non-functional; error messages may not be actionable
- Migration plan: Monitor OpenAI roadmap for migration path; implement fallback to Claude for Codex requests; add deprecation warning in UI
**Express 5.2.x RC Stage:**
- Risk: Express 5 is still in release candidate phase (as of Node 22); full stability not guaranteed
- Impact: Minor version updates could include breaking changes; middleware compatibility issues possible
- Migration plan: Maintain compatibility layer for Express 5 API; test with latest major before release; document any version-specific workarounds
## Missing Critical Features
**Persistent Session Storage:**
- Problem: Agent conversation sessions stored only in-memory; restart loses all chat history
- Blocks: Long-running analysis across server restarts; session recovery not possible
- Impact: Users must re-run entire analysis if server restarts; lost productivity
**Rate Limit Awareness:**
- Problem: No tracking of API usage relative to rate limits before executing features
- Blocks: Predictable concurrent feature execution; users frequently hit rate limits unexpectedly
- Impact: Feature execution fails with cryptic rate limit errors; poor user experience
**Feature Dependency Visualization:**
- Problem: Dependency-resolver package exists but no UI to visualize or manage dependencies
- Blocks: Users cannot plan feature order; complex dependencies not visible
- Impact: Features implemented in wrong order; blocking dependencies missed
## Test Coverage Gaps
**CLI Provider Integration:**
- What's not tested: Actual CLI execution paths; environment setup; error recovery from CLI crashes
- Files: `apps/server/src/providers/cli-provider.ts`, `apps/server/src/lib/cli-detection.ts`
- Risk: Changes to CLI handling could break silently; detection logic not validated on target platforms
- Priority: High - affects all CLI-based providers (Cursor, Copilot, Codex)
**Cursor Provider Platform-Specific Paths:**
- What's not tested: Windows/Linux Cursor installation detection; version directory parsing; APPDATA environment variable handling
- Files: `apps/server/src/providers/cursor-provider.ts` (lines 267-498)
- Risk: Platform-specific bugs not caught; Cursor detection fails on non-standard installations
- Priority: High - Cursor is primary provider; platform differences critical
**Event Hook System State Changes:**
- What's not tested: Concurrent hook execution; cleanup on server shutdown; webhook delivery retries
- Files: `apps/server/src/services/event-hook-service.ts` (line 248 Promise.allSettled)
- Risk: Hooks may not execute in expected order; memory not cleaned up; webhooks lost on failure
- Priority: Medium - affects automation workflows
**Error Classification for New Providers:**
- What's not tested: Each provider's unique error patterns mapped to ErrorType enum; new provider errors not classified
- Files: `apps/server/src/lib/error-handler.ts` (lines 58-80), each provider error mapping
- Risk: User sees generic "unknown error" instead of actionable message; categorization regresses with new providers
- Priority: Medium - impacts user experience
**Feature State Corruption Scenarios:**
- What's not tested: Concurrent feature updates; partial writes with power loss; JSON parsing recovery
- Files: `apps/server/src/services/feature-loader.ts`, `@automaker/utils` (atomicWriteJson)
- Risk: Feature data corrupted on concurrent access; recovery incomplete; no validation before use
- Priority: High - data loss risk
---
_Concerns audit: 2026-01-27_

View File

@@ -1,255 +0,0 @@
# Coding Conventions
**Analysis Date:** 2026-01-27
## Naming Patterns
**Files:**
- PascalCase for class/service files: `auto-mode-service.ts`, `feature-loader.ts`, `claude-provider.ts`
- kebab-case for route/handler directories: `auto-mode/`, `features/`, `event-history/`
- kebab-case for utility files: `secure-fs.ts`, `sdk-options.ts`, `settings-helpers.ts`
- kebab-case for React components: `card.tsx`, `ansi-output.tsx`, `count-up-timer.tsx`
- kebab-case for hooks: `use-board-background-settings.ts`, `use-responsive-kanban.ts`, `use-test-logs.ts`
- kebab-case for store files: `app-store.ts`, `auth-store.ts`, `setup-store.ts`
- Organized by functionality: `routes/features/routes/list.ts`, `routes/features/routes/get.ts`
**Functions:**
- camelCase for all function names: `createEventEmitter()`, `getAutomakerDir()`, `executeQuery()`
- Verb-first for action functions: `buildPrompt()`, `classifyError()`, `loadContextFiles()`, `atomicWriteJson()`
- Prefix with `use` for React hooks: `useBoardBackgroundSettings()`, `useAppStore()`, `useUpdateProjectSettings()`
- Private methods prefixed with underscore: `_deleteOrphanedImages()`, `_migrateImages()`
**Variables:**
- camelCase for constants and variables: `featureId`, `projectPath`, `modelId`, `tempDir`
- UPPER_SNAKE_CASE for global constants/enums: `DEFAULT_MAX_CONCURRENCY`, `DEFAULT_PHASE_MODELS`
- Meaningful naming over abbreviations: `featureDirectory` not `fd`, `featureImages` not `img`
- Prefixes for computed values: `is*` for booleans: `isClaudeModel`, `isContainerized`, `isAutoLoginEnabled`
**Types:**
- PascalCase for interfaces and types: `Feature`, `ExecuteOptions`, `EventEmitter`, `ProviderConfig`
- Type files suffixed with `.d.ts`: `paths.d.ts`, `types.d.ts`
- Organized by domain: `src/store/types/`, `src/lib/`
- Re-export pattern from main package indexes: `export type { Feature };`
## Code Style
**Formatting:**
- Tool: Prettier 3.7.4
- Print width: 100 characters
- Tab width: 2 spaces
- Single quotes for strings
- Semicolons required
- Trailing commas: es5 (trailing in arrays/objects, not in params)
- Arrow functions always include parentheses: `(x) => x * 2`
- Line endings: LF (Unix)
- Bracket spacing: `{ key: value }`
**Linting:**
- Tool: ESLint (flat config in `apps/ui/eslint.config.mjs`)
- TypeScript ESLint plugin for `.ts`/`.tsx` files
- Recommended configs: `@eslint/js`, `@typescript-eslint/recommended`
- Unused variables warning with exception for parameters starting with `_`
- Type assertions are allowed with description when using `@ts-ignore`
- `@typescript-eslint/no-explicit-any` is warn-level (allow with caution)
## Import Organization
**Order:**
1. Node.js standard library: `import fs from 'fs/promises'`, `import path from 'path'`
2. Third-party packages: `import { describe, it } from 'vitest'`, `import { Router } from 'express'`
3. Shared packages (monorepo): `import type { Feature } from '@automaker/types'`, `import { createLogger } from '@automaker/utils'`
4. Local relative imports: `import { FeatureLoader } from './feature-loader.js'`, `import * as secureFs from '../lib/secure-fs.js'`
5. Type imports: separated with `import type { ... } from`
**Path Aliases:**
- `@/` - resolves to `./src` in both UI (`apps/ui/`) and server (`apps/server/`)
- Shared packages prefixed with `@automaker/`:
- `@automaker/types` - core TypeScript definitions
- `@automaker/utils` - logging, errors, utilities
- `@automaker/prompts` - AI prompt templates
- `@automaker/platform` - path management, security, processes
- `@automaker/model-resolver` - model alias resolution
- `@automaker/dependency-resolver` - feature dependency ordering
- `@automaker/git-utils` - git operations
- Extensions: `.js` extension used in imports for ESM imports
**Import Rules:**
- Always import from shared packages, never from old paths
- No circular dependencies between layers
- Services import from providers and utilities
- Routes import from services
- Shared packages have strict dependency hierarchy (types → utils → platform → git-utils → server/ui)
## Error Handling
**Patterns:**
- Use `try-catch` blocks for async operations: wraps feature execution, file operations, git commands
- Throw `new Error(message)` with descriptive messages: `throw new Error('already running')`, `throw new Error('Feature ${featureId} not found')`
- Classify errors with `classifyError()` from `@automaker/utils` for categorization
- Log errors with context using `createLogger()`: includes error classification
- Return error info objects: `{ valid: false, errors: [...], warnings: [...] }`
- Validation returns structured result: `{ valid, errors, warnings }` from provider `validateConfig()`
**Error Types:**
- Authentication errors: distinguish from validation/runtime errors
- Path validation errors: caught by middleware in Express routes
- File system errors: logged and recovery attempted with backups
- SDK/API errors: classified and wrapped with context
- Abort/cancellation errors: handled without stack traces (graceful shutdown)
**Error Messages:**
- Descriptive and actionable: not vague error codes
- Include context when helpful: file paths, feature IDs, model names
- User-friendly messages via `getUserFriendlyErrorMessage()` for client display
## Logging
**Framework:**
- Built-in `createLogger()` from `@automaker/utils`
- Each module creates logger: `const logger = createLogger('ModuleName')`
- Logger functions: `info()`, `warn()`, `error()`, `debug()`
**Patterns:**
- Log operation start and completion for significant operations
- Log warnings for non-critical issues: file deletion failures, missing optional configs
- Log errors with full error object: `logger.error('operation failed', error)`
- Use module name as logger context: `createLogger('AutoMode')`, `createLogger('HttpClient')`
- Avoid logging sensitive data (API keys, passwords)
- No console.log in production code - use logger
**What to Log:**
- Feature execution start/completion
- Error classification and recovery attempts
- File operations (create, delete, migrate)
- API calls and responses (in debug mode)
- Async operation start/end
- Warnings for deprecated patterns
## Comments
**When to Comment:**
- Complex algorithms or business logic: explain the "why" not the "what"
- Integration points: explain how modules communicate
- Workarounds: explain the constraint that made the workaround necessary
- Non-obvious performance implications
- Edge cases and their handling
**JSDoc/TSDoc:**
- Used for public functions and classes
- Document parameters with `@param`
- Document return types with `@returns`
- Document exceptions with `@throws`
- Used for service classes: `/**\n * Module description\n * Manages: ...\n */`
- Not required for simple getters/setters
**Example JSDoc Pattern:**
```typescript
/**
* Delete images that were removed from a feature
*/
private async deleteOrphanedImages(
projectPath: string,
oldPaths: Array<string>,
newPaths: Array<string>
): Promise<void> {
// Implementation
}
```
## Function Design
**Size:**
- Keep functions under 100 lines when possible
- Large services split into multiple related methods
- Private helper methods extracted for complex logic
**Parameters:**
- Use destructuring for object parameters with multiple properties
- Document parameter types with TypeScript types
- Optional parameters marked with `?`
- Use `Record<string, unknown>` for flexible object parameters
**Return Values:**
- Explicit return types required for all public functions
- Return structured objects for multiple values
- Use `Promise<T>` for async functions
- Async generators use `AsyncGenerator<T>` for streaming responses
- Never implicitly return `undefined` (explicit return or throw)
## Module Design
**Exports:**
- Default export for class instantiation: `export default class FeatureLoader {}`
- Named exports for functions: `export function createEventEmitter() {}`
- Type exports separated: `export type { Feature };`
- Barrel files (index.ts) re-export from module
**Barrel Files:**
- Used in routes: `routes/features/index.ts` creates router and exports
- Used in stores: `store/index.ts` exports all store hooks
- Pattern: group related exports for easier importing
**Service Classes:**
- Instantiated once and dependency injected
- Public methods for API surface
- Private methods prefixed with `_`
- No static methods - prefer instances or functions
- Constructor takes dependencies: `constructor(config?: ProviderConfig)`
**Provider Pattern:**
- Abstract base class: `BaseProvider` with abstract methods
- Concrete implementations: `ClaudeProvider`, `CodexProvider`, `CursorProvider`
- Common interface: `executeQuery()`, `detectInstallation()`, `validateConfig()`
- Factory for instantiation: `ProviderFactory.create()`
## TypeScript Specific
**Strict Mode:** Always enabled globally
- `strict: true` in all tsconfigs
- No implicit `any` - declare types explicitly
- No optional chaining on base types without narrowing
**Type Definitions:**
- Interface for shapes: `interface Feature { ... }`
- Type for unions/aliases: `type ModelAlias = 'haiku' | 'sonnet' | 'opus'`
- Type guards for narrowing: `if (typeof x === 'string') { ... }`
- Generic types for reusable patterns: `EventCallback<T>`
**React Specific (UI):**
- Functional components only
- React 19 with hooks
- Type props interface: `interface CardProps extends React.ComponentProps<'div'> { ... }`
- Zustand stores for state management
- Custom hooks for shared logic
---
_Convention analysis: 2026-01-27_

View File

@@ -1,232 +0,0 @@
# External Integrations
**Analysis Date:** 2026-01-27
## APIs & External Services
**AI/LLM Providers:**
- Claude (Anthropic)
- SDK: `@anthropic-ai/claude-agent-sdk` (0.1.76)
- Auth: `ANTHROPIC_API_KEY` environment variable or stored credentials
- Features: Extended thinking, vision/images, tools, streaming
- Implementation: `apps/server/src/providers/claude-provider.ts`
- Models: Opus 4.5, Sonnet 4, Haiku 4.5, and legacy models
- Custom endpoints: `ANTHROPIC_BASE_URL` (optional)
- GitHub Copilot
- SDK: `@github/copilot-sdk` (0.1.16)
- Auth: GitHub OAuth (via `gh` CLI) or `GITHUB_TOKEN` environment variable
- Features: Tools, streaming, runtime model discovery
- Implementation: `apps/server/src/providers/copilot-provider.ts`
- CLI detection: Searches for Copilot CLI binary
- Models: Dynamic discovery via `copilot models list`
- OpenAI Codex/GPT-4
- SDK: `@openai/codex-sdk` (0.77.0)
- Auth: `OPENAI_API_KEY` environment variable or stored credentials
- Features: Extended thinking, tools, sandbox execution
- Implementation: `apps/server/src/providers/codex-provider.ts`
- Execution modes: CLI (with sandbox) or SDK (direct API)
- Models: Dynamic discovery via Codex CLI or SDK
- Google Gemini
- Implementation: `apps/server/src/providers/gemini-provider.ts`
- Features: Vision support, tools, streaming
- OpenCode (AWS/Azure/other)
- Implementation: `apps/server/src/providers/opencode-provider.ts`
- Supports: Amazon Bedrock, Azure models, local models
- Features: Flexible provider architecture
- Cursor Editor
- Implementation: `apps/server/src/providers/cursor-provider.ts`
- Features: Integration with Cursor IDE
**Model Context Protocol (MCP):**
- SDK: `@modelcontextprotocol/sdk` (1.25.2)
- Purpose: Connect AI agents to external tools and data sources
- Implementation: `apps/server/src/services/mcp-test-service.ts`, `apps/server/src/routes/mcp/`
- Configuration: Per-project in `.automaker/` directory
## Data Storage
**Databases:**
- None - This codebase does NOT use traditional databases (SQL/NoSQL)
- All data stored as files in local filesystem
**File Storage:**
- Local filesystem only
- Locations:
- `.automaker/` - Project-specific data (features, context, settings)
- `./data/` or `DATA_DIR` env var - Global data (settings, credentials, sessions)
- Secure file operations: `@automaker/platform` exports `secureFs` for restricted file access
**Caching:**
- In-memory caches for:
- Model lists (Copilot, Codex runtime discovery)
- Feature metadata
- Project specifications
- No distributed/persistent caching system
## Authentication & Identity
**Auth Provider:**
- Custom implementation (no third-party provider)
- Authentication methods:
1. Claude Max Plan (OAuth via Anthropic CLI)
2. API Key mode (ANTHROPIC_API_KEY)
3. Custom provider profiles with API keys
4. Token-based session authentication for WebSocket
**Implementation:**
- `apps/server/src/lib/auth.ts` - Auth middleware
- `apps/server/src/routes/auth/` - Auth routes
- Session tokens for WebSocket connections
- Credential storage in `./data/credentials.json` (encrypted/protected)
## Monitoring & Observability
**Error Tracking:**
- None - No automatic error reporting service integrated
- Custom error classification: `@automaker/utils` exports `classifyError()`
- User-friendly error messages: `getUserFriendlyErrorMessage()`
**Logs:**
- Console logging with configurable levels
- Logger: `@automaker/utils` exports `createLogger()`
- Log levels: ERROR, WARN, INFO, DEBUG
- Environment: `LOG_LEVEL` env var (optional)
- Storage: Logs output to console/stdout (no persistent logging to files)
**Usage Tracking:**
- Claude API usage: `apps/server/src/services/claude-usage-service.ts`
- Codex API usage: `apps/server/src/services/codex-usage-service.ts`
- Tracks: Tokens, costs, rates
## CI/CD & Deployment
**Hosting:**
- Local development: Node.js server + Vite dev server
- Desktop: Electron application (macOS, Windows, Linux)
- Web: Express server deployed to any Node.js host
**CI Pipeline:**
- GitHub Actions likely (`.github/workflows/` present in repo)
- Testing: Playwright E2E, Vitest unit tests
- Linting: ESLint
- Formatting: Prettier
**Build Process:**
- `npm run build:packages` - Build shared packages
- `npm run build` - Build web UI
- `npm run build:electron` - Build Electron apps (platform-specific)
- Electron Builder handles code signing and distribution
## Environment Configuration
**Required env vars:**
- `ANTHROPIC_API_KEY` - For Claude provider (or provide in settings)
- `OPENAI_API_KEY` - For Codex provider (optional)
- `GITHUB_TOKEN` - For GitHub operations (optional)
**Optional env vars:**
- `PORT` - Server port (default 3008)
- `HOST` - Server bind address (default 0.0.0.0)
- `HOSTNAME` - Public hostname (default localhost)
- `DATA_DIR` - Data storage directory (default ./data)
- `ANTHROPIC_BASE_URL` - Custom Claude endpoint
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to directory
- `AUTOMAKER_MOCK_AGENT` - Enable mock agent for testing
- `AUTOMAKER_AUTO_LOGIN` - Skip login prompt in dev
**Secrets location:**
- Runtime: Environment variables (`process.env`)
- Stored: `./data/credentials.json` (file-based)
- Retrieval: `apps/server/src/services/settings-service.ts`
## Webhooks & Callbacks
**Incoming:**
- WebSocket connections for real-time agent event streaming
- GitHub webhook routes (optional): `apps/server/src/routes/github/`
- Terminal WebSocket connections: `apps/server/src/routes/terminal/`
**Outgoing:**
- GitHub PRs: `apps/server/src/routes/worktree/routes/create-pr.ts`
- Git operations: `@automaker/git-utils` handles commits, pushes
- Terminal output streaming via WebSocket to clients
- Event hooks: `apps/server/src/services/event-hook-service.ts`
## Credential Management
**API Keys Storage:**
- File: `./data/credentials.json`
- Format: JSON with nested structure for different providers
```json
{
"apiKeys": {
"anthropic": "sk-...",
"openai": "sk-...",
"github": "ghp_..."
}
}
```
- Access: `SettingsService.getCredentials()` from `apps/server/src/services/settings-service.ts`
- Security: File permissions should restrict to current user only
**Profile/Provider Configuration:**
- File: `./data/settings.json` (global) or `.automaker/settings.json` (per-project)
- Stores: Alternative provider profiles, model mappings, sandbox settings
- Types: `ClaudeApiProfile`, `ClaudeCompatibleProvider` from `@automaker/types`
## Third-Party Service Integration Points
**Git/GitHub:**
- `@automaker/git-utils` - Git operations (worktrees, commits, diffs)
- Codex/Cursor providers can create GitHub PRs
- GitHub CLI (`gh`) detection for Copilot authentication
**Terminal Access:**
- `node-pty` (1.1.0-beta41) - Pseudo-terminal interface
- `TerminalService` manages terminal sessions
- WebSocket streaming to frontend
**AI Models - Multi-Provider Abstraction:**
- `BaseProvider` interface: `apps/server/src/providers/base-provider.ts`
- Factory pattern: `apps/server/src/providers/provider-factory.ts`
- Allows swapping providers without changing agent logic
- All providers implement: `executeQuery()`, `detectInstallation()`, `getAvailableModels()`
**Process Spawning:**
- `@automaker/platform` exports `spawnProcess()`, `spawnJSONLProcess()`
- Codex CLI execution: JSONL output parsing
- Copilot CLI execution: Subprocess management
- Cursor IDE interaction: Process spawning for tool execution
---
_Integration audit: 2026-01-27_

View File

@@ -1,230 +0,0 @@
# Technology Stack
**Analysis Date:** 2026-01-27
## Languages
**Primary:**
- TypeScript 5.9.3 - Used across all packages, apps, and configuration
- JavaScript (Node.js) - Runtime execution for scripts and tooling
**Secondary:**
- YAML 2.7.0 - Configuration files
- CSS/Tailwind CSS 4.1.18 - Frontend styling
## Runtime
**Environment:**
- Node.js 22.x (>=22.0.0 <23.0.0) - Required version, specified in `.nvmrc`
**Package Manager:**
- npm - Monorepo workspace management via npm workspaces
- Lockfile: `package-lock.json` (present)
## Frameworks
**Core - Frontend:**
- React 19.2.3 - UI framework with hooks and concurrent features
- Vite 7.3.0 - Build tool and dev server (`apps/ui/vite.config.ts`)
- Electron 39.2.7 - Desktop application runtime (`apps/ui/package.json`)
- TanStack Router 1.141.6 - File-based routing (React)
- Zustand 5.0.9 - State management (lightweight alternative to Redux)
- TanStack Query (React Query) 5.90.17 - Server state management
**Core - Backend:**
- Express 5.2.1 - HTTP server framework (`apps/server/package.json`)
- WebSocket (ws) 8.18.3 - Real-time bidirectional communication
- Claude Agent SDK (@anthropic-ai/claude-agent-sdk) 0.1.76 - AI provider integration
**Testing:**
- Playwright 1.57.0 - End-to-end testing (`apps/ui` E2E tests)
- Vitest 4.0.16 - Unit testing framework (runs on all packages and server)
- @vitest/ui 4.0.16 - Visual test runner UI
- @vitest/coverage-v8 4.0.16 - Code coverage reporting
**Build/Dev:**
- electron-builder 26.0.12 - Electron app packaging and distribution
- @vitejs/plugin-react 5.1.2 - Vite React support
- vite-plugin-electron 0.29.0 - Vite plugin for Electron main process
- vite-plugin-electron-renderer 0.14.6 - Vite plugin for Electron renderer
- ESLint 9.39.2 - Code linting (`apps/ui`)
- @typescript-eslint/eslint-plugin 8.50.0 - TypeScript ESLint rules
- Prettier 3.7.4 - Code formatting (root-level config)
- Tailwind CSS 4.1.18 - Utility-first CSS framework
- @tailwindcss/vite 4.1.18 - Tailwind Vite integration
**UI Components & Libraries:**
- Radix UI - Unstyled accessible component library (@radix-ui packages)
- react-dropdown-menu 2.1.16
- react-dialog 1.1.15
- react-select 2.2.6
- react-tooltip 1.2.8
- react-tabs 1.1.13
- react-collapsible 1.1.12
- react-checkbox 1.3.3
- react-radio-group 1.3.8
- react-popover 1.1.15
- react-slider 1.3.6
- react-switch 1.2.6
- react-scroll-area 1.2.10
- react-label 2.1.8
- Lucide React 0.562.0 - Icon library
- Geist 1.5.1 - Design system UI library
- Sonner 2.0.7 - Toast notifications
**Code Editor & Terminal:**
- @uiw/react-codemirror 4.25.4 - Code editor React component
- CodeMirror (@codemirror packages) 6.x - Editor toolkit
- xterm.js (@xterm/xterm) 5.5.0 - Terminal emulator
- @xterm/addon-fit 0.10.0 - Fit addon for terminal
- @xterm/addon-search 0.15.0 - Search addon for terminal
- @xterm/addon-web-links 0.11.0 - Web links addon
- @xterm/addon-webgl 0.18.0 - WebGL renderer for terminal
**Diagram/Graph Visualization:**
- @xyflow/react 12.10.0 - React flow diagram library
- dagre 0.8.5 - Graph layout algorithms
**Markdown/Content Rendering:**
- react-markdown 10.1.0 - Markdown parser and renderer
- remark-gfm 4.0.1 - GitHub Flavored Markdown support
- rehype-raw 7.0.0 - Raw HTML support in markdown
- rehype-sanitize 6.0.0 - HTML sanitization
**Data Validation & Parsing:**
- zod 3.24.1 or 4.0.0 - Schema validation and TypeScript type inference
**Utilities:**
- class-variance-authority 0.7.1 - CSS variant utilities
- clsx 2.1.1 - Conditional className utility
- cmdk 1.1.1 - Command menu/palette
- tailwind-merge 3.4.0 - Tailwind CSS conflict resolution
- usehooks-ts 3.1.1 - TypeScript React hooks
- @dnd-kit (drag-and-drop) 6.3.1 - Drag and drop library
**Font Libraries:**
- @fontsource - Web font packages (Cascadia Code, Fira Code, IBM Plex, Inconsolata, Inter, etc.)
**Development Utilities:**
- cross-spawn 7.0.6 - Cross-platform process spawning
- dotenv 17.2.3 - Environment variable loading
- tsx 4.21.0 - TypeScript execution for Node.js
- tree-kill 1.2.2 - Process tree killer utility
- node-pty 1.1.0-beta41 - PTY/terminal interface for Node.js
## Key Dependencies
**Critical - AI/Agent Integration:**
- @anthropic-ai/claude-agent-sdk 0.1.76 - Core Claude AI provider
- @github/copilot-sdk 0.1.16 - GitHub Copilot integration
- @openai/codex-sdk 0.77.0 - OpenAI Codex/GPT-4 integration
- @modelcontextprotocol/sdk 1.25.2 - Model Context Protocol servers
**Infrastructure - Internal Packages:**
- @automaker/types 1.0.0 - Shared TypeScript type definitions
- @automaker/utils 1.0.0 - Logging, error handling, utilities
- @automaker/platform 1.0.0 - Path management, security, process spawning
- @automaker/prompts 1.0.0 - AI prompt templates
- @automaker/model-resolver 1.0.0 - Claude model alias resolution
- @automaker/dependency-resolver 1.0.0 - Feature dependency ordering
- @automaker/git-utils 1.0.0 - Git operations & worktree management
- @automaker/spec-parser 1.0.0 - Project specification parsing
**Server Utilities:**
- express 5.2.1 - Web framework
- cors 2.8.5 - CORS middleware
- morgan 1.10.1 - HTTP request logger
- cookie-parser 1.4.7 - Cookie parsing middleware
- yaml 2.7.0 - YAML parsing and generation
**Type Definitions:**
- @types/express 5.0.6
- @types/node 22.19.3
- @types/react 19.2.7
- @types/react-dom 19.2.3
- @types/dagre 0.7.53
- @types/ws 8.18.1
- @types/cookie 0.6.0
- @types/cookie-parser 1.4.10
- @types/cors 2.8.19
- @types/morgan 1.9.10
**Optional Dependencies (Platform-specific):**
- lightningcss (various platforms) 1.29.2 - CSS parser (alternate to PostCSS)
- dmg-license 1.0.11 - DMG license dialog for macOS
## Configuration
**Environment:**
- `.env` and `.env.example` files in `apps/server/` and `apps/ui/`
- `dotenv` library loads variables from `.env` files
- Key env vars:
- `ANTHROPIC_API_KEY` - Claude API authentication
- `OPENAI_API_KEY` - OpenAI/Codex authentication
- `GITHUB_TOKEN` - GitHub API access
- `ANTHROPIC_BASE_URL` - Custom Claude endpoint (optional)
- `HOST` - Server bind address (default: 0.0.0.0)
- `HOSTNAME` - Hostname for URLs (default: localhost)
- `PORT` - Server port (default: 3008)
- `DATA_DIR` - Data storage directory (default: ./data)
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations
- `AUTOMAKER_MOCK_AGENT` - Enable mock agent for testing
- `AUTOMAKER_AUTO_LOGIN` - Skip login in dev (disabled in production)
- `VITE_HOSTNAME` - Frontend API hostname
**Build:**
- `apps/ui/electron-builder.config.json` or `apps/ui/package.json` build config
- Electron builder targets:
- macOS: DMG and ZIP
- Windows: NSIS installer
- Linux: AppImage, DEB, RPM
- Vite config: `apps/ui/vite.config.ts`, `apps/server/tsconfig.json`
- TypeScript config: `tsconfig.json` files in each package
## Platform Requirements
**Development:**
- Node.js 22.x
- npm (included with Node.js)
- Git (for worktree operations)
- Python (optional, for some dev scripts)
**Production:**
- Electron desktop app: Windows, macOS, Linux
- Web browser: Modern Chromium-based browsers
- Server: Any platform supporting Node.js 22.x
**Deployment Target:**
- Local desktop (Electron)
- Local web server (Express + Vite)
- Remote server deployment (Docker, systemd, or other orchestration)
---
_Stack analysis: 2026-01-27_

View File

@@ -1,340 +0,0 @@
# Codebase Structure
**Analysis Date:** 2026-01-27
## Directory Layout
```
automaker/
├── apps/ # Application packages
│ ├── ui/ # React + Electron frontend (port 3007)
│ │ ├── src/
│ │ │ ├── main.ts # Electron/Vite entry point
│ │ │ ├── app.tsx # Root React component (splash, router)
│ │ │ ├── renderer.tsx # Electron renderer entry
│ │ │ ├── routes/ # TanStack Router file-based routes
│ │ │ ├── components/ # React components (views, dialogs, UI, layout)
│ │ │ ├── store/ # Zustand state management
│ │ │ ├── hooks/ # Custom React hooks
│ │ │ ├── lib/ # Utilities (API client, electron, queries, etc.)
│ │ │ ├── electron/ # Electron main & preload process files
│ │ │ ├── config/ # UI configuration (fonts, themes, routes)
│ │ │ └── styles/ # CSS and theme files
│ │ ├── public/ # Static assets
│ │ └── tests/ # E2E Playwright tests
│ │
│ └── server/ # Express backend (port 3008)
│ ├── src/
│ │ ├── index.ts # Express app initialization, route mounting
│ │ ├── routes/ # REST API endpoints (30+ route folders)
│ │ ├── services/ # Business logic services
│ │ ├── providers/ # AI model provider implementations
│ │ ├── lib/ # Utilities (events, auth, helpers, etc.)
│ │ ├── middleware/ # Express middleware
│ │ └── types/ # Server-specific type definitions
│ └── tests/ # Unit tests (Vitest)
├── libs/ # Shared npm packages (@automaker/*)
│ ├── types/ # @automaker/types (no dependencies)
│ │ └── src/
│ │ ├── index.ts # Main export with all type definitions
│ │ ├── feature.ts # Feature, FeatureStatus, etc.
│ │ ├── provider.ts # Provider interfaces, model definitions
│ │ ├── settings.ts # Global and project settings types
│ │ ├── event.ts # Event types for real-time updates
│ │ ├── session.ts # AgentSession, conversation types
│ │ ├── model*.ts # Model-specific types (cursor, codex, gemini, etc.)
│ │ └── ... 20+ more type files
│ │
│ ├── utils/ # @automaker/utils (logging, errors, images, context)
│ │ └── src/
│ │ ├── logger.ts # createLogger() with LogLevel enum
│ │ ├── errors.ts # classifyError(), error types
│ │ ├── image-utils.ts # Image processing, base64 encoding
│ │ ├── context-loader.ts # loadContextFiles() for AI prompts
│ │ └── ... more utilities
│ │
│ ├── platform/ # @automaker/platform (paths, security, OS)
│ │ └── src/
│ │ ├── index.ts # Path getters (getFeatureDir, getFeaturesDir, etc.)
│ │ ├── secure-fs.ts # Secure filesystem operations
│ │ └── config/ # Claude auth detection, allowed paths
│ │
│ ├── prompts/ # @automaker/prompts (AI prompt templates)
│ │ └── src/
│ │ ├── index.ts # Main prompts export
│ │ └── *-prompt.ts # Prompt templates for different features
│ │
│ ├── model-resolver/ # @automaker/model-resolver
│ │ └── src/
│ │ └── index.ts # resolveModelString() for model aliases
│ │
│ ├── dependency-resolver/ # @automaker/dependency-resolver
│ │ └── src/
│ │ └── index.ts # Resolve feature dependencies
│ │
│ ├── git-utils/ # @automaker/git-utils (git operations)
│ │ └── src/
│ │ ├── index.ts # getGitRepositoryDiffs(), worktree management
│ │ └── ... git helpers
│ │
│ ├── spec-parser/ # @automaker/spec-parser
│ │ └── src/
│ │ └── ... spec parsing utilities
│ │
│ └── tsconfig.base.json # Base TypeScript config for all packages
├── .automaker/ # Project data directory (created by app)
│ ├── features/ # Feature storage
│ │ └── {featureId}/
│ │ ├── feature.json # Feature metadata and content
│ │ ├── agent-output.md # Agent execution results
│ │ └── images/ # Feature images
│ ├── context/ # Context files (CLAUDE.md, etc.)
│ ├── settings.json # Per-project settings
│ ├── spec.md # Project specification
│ └── analysis.json # Project structure analysis
├── data/ # Global data directory (default, configurable)
│ ├── settings.json # Global settings, profiles
│ ├── credentials.json # Encrypted API keys
│ ├── sessions-metadata.json # Chat session metadata
│ └── agent-sessions/ # Conversation histories
├── .planning/ # Generated documentation by GSD orchestrator
│ └── codebase/ # Codebase analysis documents
│ ├── ARCHITECTURE.md # Architecture patterns and layers
│ ├── STRUCTURE.md # This file
│ ├── STACK.md # Technology stack
│ ├── INTEGRATIONS.md # External API integrations
│ ├── CONVENTIONS.md # Code style and naming
│ ├── TESTING.md # Testing patterns
│ └── CONCERNS.md # Technical debt and issues
├── .github/ # GitHub Actions workflows
├── scripts/ # Build and utility scripts
├── tests/ # Test data and utilities
├── docs/ # Documentation
├── package.json # Root workspace config
├── package-lock.json # Lock file
├── CLAUDE.md # Project instructions for Claude Code
├── DEVELOPMENT_WORKFLOW.md # Development guidelines
└── README.md # Project overview
```
## Directory Purposes
**apps/ui/:**
- Purpose: React frontend for desktop (Electron) and web modes
- Build system: Vite 7 with TypeScript
- Styling: Tailwind CSS 4
- State: Zustand 5 with API persistence
- Routing: TanStack Router with file-based structure
- Desktop: Electron 39 with preload IPC bridge
**apps/server/:**
- Purpose: Express backend API and service layer
- Build system: TypeScript → JavaScript
- Runtime: Node.js 18+
- WebSocket: ws library for real-time streaming
- Process management: node-pty for terminal isolation
**libs/types/:**
- Purpose: Central type definitions (no dependencies, fast import)
- Used by: All other packages and apps
- Pattern: Single namespace export from index.ts
- Build: Compiled to ESM only
**libs/utils/:**
- Purpose: Shared utilities for logging, errors, file operations, image processing
- Used by: Server, UI, other libraries
- Notable: `createLogger()`, `classifyError()`, `loadContextFiles()`, `readImageAsBase64()`
**libs/platform/:**
- Purpose: OS-agnostic path management and security enforcement
- Used by: Server services for file operations
- Notable: Path normalization, allowed directory enforcement, Claude auth detection
**libs/prompts/:**
- Purpose: AI prompt templates injected into agent context
- Used by: AgentService when executing features
- Pattern: Function exports that return prompt strings
## Key File Locations
**Entry Points:**
**Server:**
- `apps/server/src/index.ts`: Express server initialization, route mounting, WebSocket setup
**UI (Web):**
- `apps/ui/src/main.ts`: Vite entry point
- `apps/ui/src/app.tsx`: Root React component
**UI (Electron):**
- `apps/ui/src/main.ts`: Vite entry point
- `apps/ui/src/electron/main-process.ts`: Electron main process
- `apps/ui/src/preload.ts`: Electron preload script for IPC bridge
**Configuration:**
- `apps/server/src/index.ts`: PORT, HOST, HOSTNAME, DATA_DIR env vars
- `apps/ui/src/config/`: Theme options, fonts, model aliases
- `libs/types/src/settings.ts`: Settings schema
- `.env.local`: Local development overrides (git-ignored)
**Core Logic:**
**Server:**
- `apps/server/src/services/agent-service.ts`: AI agent execution engine (31KB)
- `apps/server/src/services/auto-mode-service.ts`: Feature batching and automation (216KB - largest)
- `apps/server/src/services/feature-loader.ts`: Feature persistence and loading
- `apps/server/src/services/settings-service.ts`: Settings management
- `apps/server/src/providers/provider-factory.ts`: AI provider selection
**UI:**
- `apps/ui/src/store/app-store.ts`: Global state (84KB - largest frontend file)
- `apps/ui/src/lib/http-api-client.ts`: API client with auth (92KB)
- `apps/ui/src/components/views/board-view.tsx`: Kanban board (70KB)
- `apps/ui/src/routes/__root.tsx`: Root layout with session init (32KB)
**Testing:**
**E2E Tests:**
- `apps/ui/tests/`: Playwright tests organized by feature area
- `settings/`, `features/`, `projects/`, `agent/`, `utils/`, `context/`
**Unit Tests:**
- `libs/*/tests/`: Package-specific Vitest tests
- `apps/server/src/tests/`: Server integration tests
**Test Config:**
- `vitest.config.ts`: Root Vitest configuration
- `apps/ui/playwright.config.ts`: Playwright configuration
## Naming Conventions
**Files:**
- **Components:** PascalCase.tsx (e.g., `board-view.tsx`, `session-manager.tsx`)
- **Services:** camelCase-service.ts (e.g., `agent-service.ts`, `settings-service.ts`)
- **Hooks:** use-kebab-case.ts (e.g., `use-auto-mode.ts`, `use-settings-sync.ts`)
- **Utilities:** camelCase.ts (e.g., `api-fetch.ts`, `log-parser.ts`)
- **Routes:** kebab-case with index.ts pattern (e.g., `routes/agent/index.ts`)
- **Tests:** _.test.ts or _.spec.ts (co-located with source)
**Directories:**
- **Feature domains:** kebab-case (e.g., `auto-mode/`, `event-history/`, `project-settings-view/`)
- **Type categories:** kebab-case plural (e.g., `types/`, `services/`, `providers/`, `routes/`)
- **Shared utilities:** kebab-case (e.g., `lib/`, `utils/`, `hooks/`)
**TypeScript:**
- **Types:** PascalCase (e.g., `Feature`, `AgentSession`, `ProviderMessage`)
- **Interfaces:** PascalCase (e.g., `EventEmitter`, `ProviderFactory`)
- **Enums:** PascalCase (e.g., `LogLevel`, `FeatureStatus`)
- **Functions:** camelCase (e.g., `createLogger()`, `classifyError()`)
- **Constants:** UPPER_SNAKE_CASE (e.g., `DEFAULT_TIMEOUT_MS`, `MAX_RETRIES`)
- **Variables:** camelCase (e.g., `featureId`, `settingsService`)
## Where to Add New Code
**New Feature (end-to-end):**
- API Route: `apps/server/src/routes/{feature-name}/index.ts`
- Service Logic: `apps/server/src/services/{feature-name}-service.ts`
- UI Route: `apps/ui/src/routes/{feature-name}.tsx` (simple) or `{feature-name}/` (complex with subdir)
- Store: `apps/ui/src/store/{feature-name}-store.ts` (if complex state)
- Tests: `apps/ui/tests/{feature-name}/` or `apps/server/src/tests/`
**New Component/Module:**
- View Components: `apps/ui/src/components/views/{component-name}/`
- Dialog Components: `apps/ui/src/components/dialogs/{dialog-name}.tsx`
- Shared Components: `apps/ui/src/components/shared/` or `components/ui/` (shadcn)
- Layout Components: `apps/ui/src/components/layout/`
**Utilities:**
- New Library: Create in `libs/{package-name}/` with package.json and tsconfig.json
- Server Utilities: `apps/server/src/lib/{utility-name}.ts`
- Shared Utilities: Extend `libs/utils/src/` or create new lib if self-contained
- UI Utilities: `apps/ui/src/lib/{utility-name}.ts`
**New Provider (AI Model):**
- Implementation: `apps/server/src/providers/{provider-name}-provider.ts`
- Types: Add to `libs/types/src/{provider-name}-models.ts`
- Model Resolver: Update `libs/model-resolver/src/index.ts` with model alias mapping
- Settings: Update `libs/types/src/settings.ts` for provider-specific config
## Special Directories
**apps/ui/electron/:**
- Purpose: Electron-specific code (main process, IPC handlers, native APIs)
- Generated: Yes (preload.ts)
- Committed: Yes
**apps/ui/public/**
- Purpose: Static assets (sounds, images, icons)
- Generated: No
- Committed: Yes
**apps/ui/dist/:**
- Purpose: Built web application
- Generated: Yes
- Committed: No (.gitignore)
**apps/ui/dist-electron/:**
- Purpose: Built Electron app bundle
- Generated: Yes
- Committed: No (.gitignore)
**.automaker/features/{featureId}/:**
- Purpose: Per-feature persistent storage
- Structure: feature.json, agent-output.md, images/
- Generated: Yes (at runtime)
- Committed: Yes (tracked in project git)
**data/:**
- Purpose: Global data directory (global settings, credentials, sessions)
- Generated: Yes (created at first run)
- Committed: No (.gitignore)
- Configurable: Via DATA_DIR env var
**node_modules/:**
- Purpose: Installed dependencies
- Generated: Yes
- Committed: No (.gitignore)
**dist/**, **build/:**
- Purpose: Build artifacts
- Generated: Yes
- Committed: No (.gitignore)
---
_Structure analysis: 2026-01-27_

View File

@@ -1,389 +0,0 @@
# Testing Patterns
**Analysis Date:** 2026-01-27
## Test Framework
**Runner:**
- Vitest 4.0.16 (for unit and integration tests)
- Playwright (for E2E tests)
- Config: `apps/server/vitest.config.ts`, `libs/*/vitest.config.ts`, `apps/ui/playwright.config.ts`
**Assertion Library:**
- Vitest built-in expect assertions
- API: `expect().toBe()`, `expect().toEqual()`, `expect().toHaveLength()`, `expect().toHaveProperty()`
**Run Commands:**
```bash
npm run test # E2E tests (Playwright, headless)
npm run test:headed # E2E tests with browser visible
npm run test:packages # All shared package unit tests (vitest)
npm run test:server # Server unit tests (vitest run)
npm run test:server:coverage # Server tests with coverage report
npm run test:all # All tests (packages + server)
npm run test:unit # Vitest run (all projects)
npm run test:unit:watch # Vitest watch mode
```
## Test File Organization
**Location:**
- Co-located with source: `src/module.ts` has `tests/unit/module.test.ts`
- Server tests: `apps/server/tests/` (separate directory)
- Library tests: `libs/*/tests/` (each package)
- E2E tests: `apps/ui/tests/` (Playwright)
**Naming:**
- Pattern: `{moduleName}.test.ts` for unit tests
- Pattern: `{moduleName}.spec.ts` for specification tests
- Glob pattern: `tests/**/*.test.ts`, `tests/**/*.spec.ts`
**Structure:**
```
apps/server/
├── tests/
│ ├── setup.ts # Global test setup
│ ├── unit/
│ │ ├── providers/ # Provider tests
│ │ │ ├── claude-provider.test.ts
│ │ │ ├── codex-provider.test.ts
│ │ │ └── base-provider.test.ts
│ │ └── services/
│ └── utils/
│ └── helpers.ts # Test utilities
└── src/
libs/platform/
├── tests/
│ ├── paths.test.ts
│ ├── security.test.ts
│ ├── subprocess.test.ts
│ └── node-finder.test.ts
└── src/
```
## Test Structure
**Suite Organization:**
```typescript
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { FeatureLoader } from '@/services/feature-loader.js';
describe('feature-loader.ts', () => {
let featureLoader: FeatureLoader;
beforeEach(() => {
vi.clearAllMocks();
featureLoader = new FeatureLoader();
});
afterEach(async () => {
// Cleanup resources
});
describe('methodName', () => {
it('should do specific thing', () => {
expect(result).toBe(expected);
});
});
});
```
**Patterns:**
- Setup pattern: `beforeEach()` initializes test instance, clears mocks
- Teardown pattern: `afterEach()` cleans up temp directories, removes created files
- Assertion pattern: one logical assertion per test (or multiple closely related)
- Test isolation: each test runs with fresh setup
## Mocking
**Framework:**
- Vitest `vi` module: `vi.mock()`, `vi.mocked()`, `vi.clearAllMocks()`
- Mock patterns: module mocking, function spying, return value mocking
**Patterns:**
Module mocking:
```typescript
vi.mock('@anthropic-ai/claude-agent-sdk');
// In test:
vi.mocked(sdk.query).mockReturnValue(
(async function* () {
yield { type: 'text', text: 'Response 1' };
})()
);
```
Async generator mocking (for streaming APIs):
```typescript
const generator = provider.executeQuery({
prompt: 'Hello',
model: 'claude-opus-4-5-20251101',
cwd: '/test',
});
const results = await collectAsyncGenerator(generator);
```
Partial mocking with spies:
```typescript
const provider = new TestProvider();
const spy = vi.spyOn(provider, 'getName');
spy.mockReturnValue('mocked-name');
```
**What to Mock:**
- External APIs (Claude SDK, GitHub SDK, cloud services)
- File system operations (use temp directories instead when possible)
- Network calls
- Process execution
- Time-dependent operations
**What NOT to Mock:**
- Core business logic (test the actual implementation)
- Type definitions
- Internal module dependencies (test integration with real services)
- Standard library functions (fs, path, etc. - use fixtures instead)
## Fixtures and Factories
**Test Data:**
```typescript
// Test helper for collecting async generator results
async function collectAsyncGenerator<T>(generator: AsyncGenerator<T>): Promise<T[]> {
const results: T[] = [];
for await (const item of generator) {
results.push(item);
}
return results;
}
// Temporary directory fixture
beforeEach(async () => {
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'test-'));
projectPath = path.join(tempDir, 'test-project');
await fs.mkdir(projectPath, { recursive: true });
});
afterEach(async () => {
try {
await fs.rm(tempDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
```
**Location:**
- Inline in test files for simple fixtures
- `tests/utils/helpers.ts` for shared test utilities
- Factory functions for complex test objects: `createTestProvider()`, `createMockFeature()`
## Coverage
**Requirements (Server):**
- Lines: 60%
- Functions: 75%
- Branches: 55%
- Statements: 60%
- Config: `apps/server/vitest.config.ts` with thresholds
**Excluded from Coverage:**
- Route handlers: tested via integration/E2E tests
- Type re-exports
- Middleware: tested via integration tests
- Prompt templates
- MCP integration: awaits MCP SDK integration tests
- Provider CLI integrations: awaits integration tests
**View Coverage:**
```bash
npm run test:server:coverage # Generate coverage report
# Opens HTML report in: apps/server/coverage/index.html
```
**Coverage Tools:**
- Provider: v8
- Reporters: text, json, html, lcov
- File inclusion: `src/**/*.ts`
- File exclusion: `src/**/*.d.ts`, specific service files in thresholds
## Test Types
**Unit Tests:**
- Scope: Individual functions and methods
- Approach: Test inputs → outputs with mocked dependencies
- Location: `apps/server/tests/unit/`
- Examples:
- Provider executeQuery() with mocked SDK
- Path construction functions with assertions
- Error classification with different error types
- Config validation with various inputs
**Integration Tests:**
- Scope: Multiple modules working together
- Approach: Test actual service calls with real file system or temp directories
- Pattern: Setup data → call method → verify results
- Example: Feature loader reading/writing feature.json files
- Example: Auto-mode service coordinating with multiple services
**E2E Tests:**
- Framework: Playwright
- Scope: Full user workflows from UI
- Location: `apps/ui/tests/`
- Config: `apps/ui/playwright.config.ts`
- Setup:
- Backend server with mock agent enabled
- Frontend Vite dev server
- Sequential execution (workers: 1) to avoid auth conflicts
- Screenshots/traces on failure
- Auth: Global setup authentication in `tests/global-setup.ts`
- Fixtures: `tests/e2e-fixtures/` for test project data
## Common Patterns
**Async Testing:**
```typescript
it('should execute async operation', async () => {
const result = await featureLoader.loadFeature(projectPath, featureId);
expect(result).toBeDefined();
expect(result.id).toBe(featureId);
});
// For streams/generators:
const generator = provider.executeQuery({ prompt, model, cwd });
const results = await collectAsyncGenerator(generator);
expect(results).toHaveLength(2);
```
**Error Testing:**
```typescript
it('should throw error when feature not found', async () => {
await expect(featureLoader.getFeature(projectPath, 'nonexistent')).rejects.toThrow('not found');
});
// Testing error classification:
const errorInfo = classifyError(new Error('ENOENT'));
expect(errorInfo.category).toBe('FileSystem');
```
**Fixture Setup:**
```typescript
it('should create feature with images', async () => {
// Setup: create temp feature directory
const featureDir = path.join(projectPath, '.automaker', 'features', featureId);
await fs.mkdir(featureDir, { recursive: true });
// Act: perform operation
const result = await featureLoader.updateFeature(projectPath, {
id: featureId,
imagePaths: ['/temp/image.png'],
});
// Assert: verify file operations
const migratedPath = path.join(featureDir, 'images', 'image.png');
expect(fs.existsSync(migratedPath)).toBe(true);
});
```
**Mock Reset Pattern:**
```typescript
// In vitest.config.ts:
mockReset: true, // Reset all mocks before each test
restoreMocks: true, // Restore original implementations
clearMocks: true, // Clear mock call history
// In test:
beforeEach(() => {
vi.clearAllMocks();
delete process.env.ANTHROPIC_API_KEY;
});
```
## Test Configuration
**Vitest Config Patterns:**
Server config (`apps/server/vitest.config.ts`):
- Environment: node
- Globals: true (describe/it without imports)
- Setup files: `./tests/setup.ts`
- Alias resolution: resolves `@automaker/*` to source files for mocking
Library config:
- Simpler setup: just environment and globals
- Coverage with high thresholds (90%+ lines)
**Global Setup:**
```typescript
// tests/setup.ts
import { vi, beforeEach } from 'vitest';
process.env.NODE_ENV = 'test';
process.env.DATA_DIR = '/tmp/test-data';
beforeEach(() => {
vi.clearAllMocks();
});
```
## Testing Best Practices
**Isolation:**
- Each test is independent (no state sharing)
- Cleanup temp files in afterEach
- Reset mocks and environment variables in beforeEach
**Clarity:**
- Descriptive test names: "should do X when Y condition"
- One logical assertion per test
- Clear arrange-act-assert structure
**Speed:**
- Mock external services
- Use in-memory temp directories
- Avoid real network calls
- Sequential E2E tests to prevent conflicts
**Maintainability:**
- Use beforeEach/afterEach for common setup
- Extract test helpers to `tests/utils/`
- Keep test data simple and local
- Mock consistently across tests
---
_Testing analysis: 2026-01-27_

View File

@@ -25,11 +25,9 @@ COPY libs/types/package*.json ./libs/types/
COPY libs/utils/package*.json ./libs/utils/ COPY libs/utils/package*.json ./libs/utils/
COPY libs/prompts/package*.json ./libs/prompts/ COPY libs/prompts/package*.json ./libs/prompts/
COPY libs/platform/package*.json ./libs/platform/ COPY libs/platform/package*.json ./libs/platform/
COPY libs/spec-parser/package*.json ./libs/spec-parser/
COPY libs/model-resolver/package*.json ./libs/model-resolver/ COPY libs/model-resolver/package*.json ./libs/model-resolver/
COPY libs/dependency-resolver/package*.json ./libs/dependency-resolver/ COPY libs/dependency-resolver/package*.json ./libs/dependency-resolver/
COPY libs/git-utils/package*.json ./libs/git-utils/ COPY libs/git-utils/package*.json ./libs/git-utils/
COPY libs/spec-parser/package*.json ./libs/spec-parser/
# Copy scripts (needed by npm workspace) # Copy scripts (needed by npm workspace)
COPY scripts ./scripts COPY scripts ./scripts

17
TODO.md Normal file
View File

@@ -0,0 +1,17 @@
# Bugs
- Setting the default model does not seem like it works.
# UX
- Consolidate all models to a single place in the settings instead of having AI profiles and all this other stuff
- Simplify the create feature modal. It should just be one page. I don't need nessa tabs and all these nested buttons. It's too complex.
- added to do's list checkbox directly into the card so as it's going through if there's any to do items we can see those update live
- When the feature is done, I want to see a summary of the LLM. That's the first thing I should see when I double click the card.
- I went away to mass edit all my features. For example, when I created a new project, it added auto testing on every single feature card. Now I have to manually go through one by one and change those. Have a way to mass edit those, the configuration of all them.
- Double check and debug if there's memory leaks. It seems like the memory of automaker grows like 3 gigabytes. It's 5gb right now and I'm running three different cursor cli features implementing at the same time.
- Typing in the text area of the plan mode was super laggy.
- When I have a bunch of features running at the same time, it seems like I cannot edit the features in the backlog. Like they don't persist their file changes and I think this is because of the secure FS file has an internal queue to prevent hitting that file open write limit. We may have to reconsider refactoring away from file system and do Postgres or SQLite or something.
- modals are not scrollable if height of the screen is small enough
- and the Agent Runner add an archival button for the new sessions.
- investigate a potential issue with the feature cards not refreshing. I see a lock icon on the feature card But it doesn't go away until I open the card and edit it and I turn the testing mode off. I think there's like a refresh sync issue.

View File

@@ -1,6 +1,6 @@
{ {
"name": "@automaker/server", "name": "@automaker/server",
"version": "0.13.0", "version": "0.12.0",
"description": "Backend server for Automaker - provides API for both web and Electron modes", "description": "Backend server for Automaker - provides API for both web and Electron modes",
"author": "AutoMaker Team", "author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE", "license": "SEE LICENSE IN LICENSE",
@@ -32,7 +32,6 @@
"@automaker/prompts": "1.0.0", "@automaker/prompts": "1.0.0",
"@automaker/types": "1.0.0", "@automaker/types": "1.0.0",
"@automaker/utils": "1.0.0", "@automaker/utils": "1.0.0",
"@github/copilot-sdk": "^0.1.16",
"@modelcontextprotocol/sdk": "1.25.2", "@modelcontextprotocol/sdk": "1.25.2",
"@openai/codex-sdk": "^0.77.0", "@openai/codex-sdk": "^0.77.0",
"cookie-parser": "1.4.7", "cookie-parser": "1.4.7",
@@ -41,8 +40,7 @@
"express": "5.2.1", "express": "5.2.1",
"morgan": "1.10.1", "morgan": "1.10.1",
"node-pty": "1.1.0-beta41", "node-pty": "1.1.0-beta41",
"ws": "8.18.3", "ws": "8.18.3"
"yaml": "2.7.0"
}, },
"devDependencies": { "devDependencies": {
"@types/cookie": "0.6.0", "@types/cookie": "0.6.0",

View File

@@ -16,7 +16,7 @@ import { createServer } from 'http';
import dotenv from 'dotenv'; import dotenv from 'dotenv';
import { createEventEmitter, type EventEmitter } from './lib/events.js'; import { createEventEmitter, type EventEmitter } from './lib/events.js';
import { initAllowedPaths, getClaudeAuthIndicators } from '@automaker/platform'; import { initAllowedPaths } from '@automaker/platform';
import { createLogger, setLogLevel, LogLevel } from '@automaker/utils'; import { createLogger, setLogLevel, LogLevel } from '@automaker/utils';
const logger = createLogger('Server'); const logger = createLogger('Server');
@@ -43,6 +43,7 @@ import { createEnhancePromptRoutes } from './routes/enhance-prompt/index.js';
import { createWorktreeRoutes } from './routes/worktree/index.js'; import { createWorktreeRoutes } from './routes/worktree/index.js';
import { createGitRoutes } from './routes/git/index.js'; import { createGitRoutes } from './routes/git/index.js';
import { createSetupRoutes } from './routes/setup/index.js'; import { createSetupRoutes } from './routes/setup/index.js';
import { createSuggestionsRoutes } from './routes/suggestions/index.js';
import { createModelsRoutes } from './routes/models/index.js'; import { createModelsRoutes } from './routes/models/index.js';
import { createRunningAgentsRoutes } from './routes/running-agents/index.js'; import { createRunningAgentsRoutes } from './routes/running-agents/index.js';
import { createWorkspaceRoutes } from './routes/workspace/index.js'; import { createWorkspaceRoutes } from './routes/workspace/index.js';
@@ -56,7 +57,7 @@ import {
import { createSettingsRoutes } from './routes/settings/index.js'; import { createSettingsRoutes } from './routes/settings/index.js';
import { AgentService } from './services/agent-service.js'; import { AgentService } from './services/agent-service.js';
import { FeatureLoader } from './services/feature-loader.js'; import { FeatureLoader } from './services/feature-loader.js';
import { AutoModeServiceCompat } from './services/auto-mode/index.js'; import { AutoModeService } from './services/auto-mode-service.js';
import { getTerminalService } from './services/terminal-service.js'; import { getTerminalService } from './services/terminal-service.js';
import { SettingsService } from './services/settings-service.js'; import { SettingsService } from './services/settings-service.js';
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js'; import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
@@ -82,8 +83,6 @@ import { createNotificationsRoutes } from './routes/notifications/index.js';
import { getNotificationService } from './services/notification-service.js'; import { getNotificationService } from './services/notification-service.js';
import { createEventHistoryRoutes } from './routes/event-history/index.js'; import { createEventHistoryRoutes } from './routes/event-history/index.js';
import { getEventHistoryService } from './services/event-history-service.js'; import { getEventHistoryService } from './services/event-history-service.js';
import { getTestRunnerService } from './services/test-runner-service.js';
import { createProjectsRoutes } from './routes/projects/index.js';
// Load environment variables // Load environment variables
dotenv.config(); dotenv.config();
@@ -117,44 +116,15 @@ export function isRequestLoggingEnabled(): boolean {
// Width for log box content (excluding borders) // Width for log box content (excluding borders)
const BOX_CONTENT_WIDTH = 67; const BOX_CONTENT_WIDTH = 67;
// Check for Claude authentication (async - runs in background) // Check for required environment variables
// The Claude Agent SDK can use either ANTHROPIC_API_KEY or Claude Code CLI authentication const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
(async () => {
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
if (hasAnthropicKey) { if (!hasAnthropicKey) {
logger.info('✓ ANTHROPIC_API_KEY detected');
return;
}
// Check for Claude Code CLI authentication
try {
const indicators = await getClaudeAuthIndicators();
const hasCliAuth =
indicators.hasStatsCacheWithActivity ||
(indicators.hasSettingsFile && indicators.hasProjectsSessions) ||
(indicators.hasCredentialsFile &&
(indicators.credentials?.hasOAuthToken || indicators.credentials?.hasApiKey));
if (hasCliAuth) {
logger.info('✓ Claude Code CLI authentication detected');
return;
}
} catch (error) {
// Ignore errors checking CLI auth - will fall through to warning
logger.warn('Error checking for Claude Code CLI authentication:', error);
}
// No authentication found - show warning
const wHeader = '⚠️ WARNING: No Claude authentication configured'.padEnd(BOX_CONTENT_WIDTH); const wHeader = '⚠️ WARNING: No Claude authentication configured'.padEnd(BOX_CONTENT_WIDTH);
const w1 = 'The Claude Agent SDK requires authentication to function.'.padEnd(BOX_CONTENT_WIDTH); const w1 = 'The Claude Agent SDK requires authentication to function.'.padEnd(BOX_CONTENT_WIDTH);
const w2 = 'Options:'.padEnd(BOX_CONTENT_WIDTH); const w2 = 'Set your Anthropic API key:'.padEnd(BOX_CONTENT_WIDTH);
const w3 = '1. Install Claude Code CLI and authenticate with subscription'.padEnd( const w3 = ' export ANTHROPIC_API_KEY="sk-ant-..."'.padEnd(BOX_CONTENT_WIDTH);
BOX_CONTENT_WIDTH const w4 = 'Or use the setup wizard in Settings to configure authentication.'.padEnd(
);
const w4 = '2. Set your Anthropic API key:'.padEnd(BOX_CONTENT_WIDTH);
const w5 = ' export ANTHROPIC_API_KEY="sk-ant-..."'.padEnd(BOX_CONTENT_WIDTH);
const w6 = '3. Use the setup wizard in Settings to configure authentication.'.padEnd(
BOX_CONTENT_WIDTH BOX_CONTENT_WIDTH
); );
@@ -167,13 +137,14 @@ const BOX_CONTENT_WIDTH = 67;
║ ║ ║ ║
${w2} ${w2}
${w3} ${w3}
║ ║
${w4} ${w4}
${w5}
${w6}
║ ║ ║ ║
╚═════════════════════════════════════════════════════════════════════╝ ╚═════════════════════════════════════════════════════════════════════╝
`); `);
})(); } else {
logger.info('✓ ANTHROPIC_API_KEY detected');
}
// Initialize security // Initialize security
initAllowedPaths(); initAllowedPaths();
@@ -258,9 +229,7 @@ const events: EventEmitter = createEventEmitter();
const settingsService = new SettingsService(DATA_DIR); const settingsService = new SettingsService(DATA_DIR);
const agentService = new AgentService(DATA_DIR, events, settingsService); const agentService = new AgentService(DATA_DIR, events, settingsService);
const featureLoader = new FeatureLoader(); const featureLoader = new FeatureLoader();
const autoModeService = new AutoModeService(events, settingsService);
// Auto-mode services: compatibility layer provides old interface while using new architecture
const autoModeService = new AutoModeServiceCompat(events, settingsService, featureLoader);
const claudeUsageService = new ClaudeUsageService(); const claudeUsageService = new ClaudeUsageService();
const codexAppServerService = new CodexAppServerService(); const codexAppServerService = new CodexAppServerService();
const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService); const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
@@ -279,12 +248,8 @@ notificationService.setEventEmitter(events);
// Initialize Event History Service // Initialize Event History Service
const eventHistoryService = getEventHistoryService(); const eventHistoryService = getEventHistoryService();
// Initialize Test Runner Service with event emitter for real-time test output streaming
const testRunnerService = getTestRunnerService();
testRunnerService.setEventEmitter(events);
// Initialize Event Hook Service for custom event triggers (with history storage) // Initialize Event Hook Service for custom event triggers (with history storage)
eventHookService.initialize(events, settingsService, eventHistoryService, featureLoader); eventHookService.initialize(events, settingsService, eventHistoryService);
// Initialize services // Initialize services
(async () => { (async () => {
@@ -356,14 +321,12 @@ app.get('/api/health/detailed', createDetailedHandler());
app.use('/api/fs', createFsRoutes(events)); app.use('/api/fs', createFsRoutes(events));
app.use('/api/agent', createAgentRoutes(agentService, events)); app.use('/api/agent', createAgentRoutes(agentService, events));
app.use('/api/sessions', createSessionsRoutes(agentService)); app.use('/api/sessions', createSessionsRoutes(agentService));
app.use( app.use('/api/features', createFeaturesRoutes(featureLoader, settingsService, events));
'/api/features',
createFeaturesRoutes(featureLoader, settingsService, events, autoModeService)
);
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService)); app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService)); app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
app.use('/api/worktree', createWorktreeRoutes(events, settingsService)); app.use('/api/worktree', createWorktreeRoutes(events, settingsService));
app.use('/api/git', createGitRoutes()); app.use('/api/git', createGitRoutes());
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
app.use('/api/models', createModelsRoutes()); app.use('/api/models', createModelsRoutes());
app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events, settingsService)); app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events, settingsService));
app.use('/api/running-agents', createRunningAgentsRoutes(autoModeService)); app.use('/api/running-agents', createRunningAgentsRoutes(autoModeService));
@@ -381,10 +344,6 @@ app.use('/api/pipeline', createPipelineRoutes(pipelineService));
app.use('/api/ideation', createIdeationRoutes(events, ideationService, featureLoader)); app.use('/api/ideation', createIdeationRoutes(events, ideationService, featureLoader));
app.use('/api/notifications', createNotificationsRoutes(notificationService)); app.use('/api/notifications', createNotificationsRoutes(notificationService));
app.use('/api/event-history', createEventHistoryRoutes(eventHistoryService, settingsService)); app.use('/api/event-history', createEventHistoryRoutes(eventHistoryService, settingsService));
app.use(
'/api/projects',
createProjectsRoutes(featureLoader, autoModeService, settingsService, notificationService)
);
// Create HTTP server // Create HTTP server
const server = createServer(app); const server = createServer(app);
@@ -802,36 +761,21 @@ process.on('uncaughtException', (error: Error) => {
process.exit(1); process.exit(1);
}); });
// Graceful shutdown timeout (30 seconds) // Graceful shutdown
const SHUTDOWN_TIMEOUT_MS = 30000; process.on('SIGTERM', () => {
logger.info('SIGTERM received, shutting down...');
// Graceful shutdown helper
const gracefulShutdown = async (signal: string) => {
logger.info(`${signal} received, shutting down...`);
// Set up a force-exit timeout to prevent hanging
const forceExitTimeout = setTimeout(() => {
logger.error(`Shutdown timed out after ${SHUTDOWN_TIMEOUT_MS}ms, forcing exit`);
process.exit(1);
}, SHUTDOWN_TIMEOUT_MS);
// Mark all running features as interrupted before shutdown
// This ensures they can be resumed when the server restarts
// Note: markAllRunningFeaturesInterrupted handles errors internally and never rejects
await autoModeService.markAllRunningFeaturesInterrupted(`${signal} signal received`);
terminalService.cleanup(); terminalService.cleanup();
server.close(() => { server.close(() => {
clearTimeout(forceExitTimeout);
logger.info('Server closed'); logger.info('Server closed');
process.exit(0); process.exit(0);
}); });
};
process.on('SIGTERM', () => {
gracefulShutdown('SIGTERM');
}); });
process.on('SIGINT', () => { process.on('SIGINT', () => {
gracefulShutdown('SIGINT'); logger.info('SIGINT received, shutting down...');
terminalService.cleanup();
server.close(() => {
logger.info('Server closed');
process.exit(0);
});
}); });

View File

@@ -23,13 +23,6 @@ const SESSION_COOKIE_NAME = 'automaker_session';
const SESSION_MAX_AGE_MS = 30 * 24 * 60 * 60 * 1000; // 30 days const SESSION_MAX_AGE_MS = 30 * 24 * 60 * 60 * 1000; // 30 days
const WS_TOKEN_MAX_AGE_MS = 5 * 60 * 1000; // 5 minutes for WebSocket connection tokens const WS_TOKEN_MAX_AGE_MS = 5 * 60 * 1000; // 5 minutes for WebSocket connection tokens
/**
* Check if an environment variable is set to 'true'
*/
function isEnvTrue(envVar: string | undefined): boolean {
return envVar === 'true';
}
// Session store - persisted to file for survival across server restarts // Session store - persisted to file for survival across server restarts
const validSessions = new Map<string, { createdAt: number; expiresAt: number }>(); const validSessions = new Map<string, { createdAt: number; expiresAt: number }>();
@@ -141,8 +134,8 @@ const API_KEY = ensureApiKey();
const BOX_CONTENT_WIDTH = 67; const BOX_CONTENT_WIDTH = 67;
// Print API key to console for web mode users (unless suppressed for production logging) // Print API key to console for web mode users (unless suppressed for production logging)
if (!isEnvTrue(process.env.AUTOMAKER_HIDE_API_KEY)) { if (process.env.AUTOMAKER_HIDE_API_KEY !== 'true') {
const autoLoginEnabled = isEnvTrue(process.env.AUTOMAKER_AUTO_LOGIN); const autoLoginEnabled = process.env.AUTOMAKER_AUTO_LOGIN === 'true';
const autoLoginStatus = autoLoginEnabled ? 'enabled (auto-login active)' : 'disabled'; const autoLoginStatus = autoLoginEnabled ? 'enabled (auto-login active)' : 'disabled';
// Build box lines with exact padding // Build box lines with exact padding
@@ -382,12 +375,6 @@ function checkAuthentication(
* 5. Session cookie (for web mode) * 5. Session cookie (for web mode)
*/ */
export function authMiddleware(req: Request, res: Response, next: NextFunction): void { export function authMiddleware(req: Request, res: Response, next: NextFunction): void {
// Allow disabling auth for local/trusted networks
if (isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH)) {
next();
return;
}
const result = checkAuthentication( const result = checkAuthentication(
req.headers as Record<string, string | string[] | undefined>, req.headers as Record<string, string | string[] | undefined>,
req.query as Record<string, string | undefined>, req.query as Record<string, string | undefined>,
@@ -433,10 +420,9 @@ export function isAuthEnabled(): boolean {
* Get authentication status for health endpoint * Get authentication status for health endpoint
*/ */
export function getAuthStatus(): { enabled: boolean; method: string } { export function getAuthStatus(): { enabled: boolean; method: string } {
const disabled = isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH);
return { return {
enabled: !disabled, enabled: true,
method: disabled ? 'disabled' : 'api_key_or_session', method: 'api_key_or_session',
}; };
} }
@@ -444,7 +430,6 @@ export function getAuthStatus(): { enabled: boolean; method: string } {
* Check if a request is authenticated (for status endpoint) * Check if a request is authenticated (for status endpoint)
*/ */
export function isRequestAuthenticated(req: Request): boolean { export function isRequestAuthenticated(req: Request): boolean {
if (isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH)) return true;
const result = checkAuthentication( const result = checkAuthentication(
req.headers as Record<string, string | string[] | undefined>, req.headers as Record<string, string | string[] | undefined>,
req.query as Record<string, string | undefined>, req.query as Record<string, string | undefined>,
@@ -462,6 +447,5 @@ export function checkRawAuthentication(
query: Record<string, string | undefined>, query: Record<string, string | undefined>,
cookies: Record<string, string | undefined> cookies: Record<string, string | undefined>
): boolean { ): boolean {
if (isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH)) return true;
return checkAuthentication(headers, query, cookies).authenticated; return checkAuthentication(headers, query, cookies).authenticated;
} }

View File

@@ -10,12 +10,7 @@ import type {
McpServerConfig, McpServerConfig,
PromptCustomization, PromptCustomization,
ClaudeApiProfile, ClaudeApiProfile,
ClaudeCompatibleProvider,
PhaseModelKey,
PhaseModelEntry,
Credentials,
} from '@automaker/types'; } from '@automaker/types';
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
import { import {
mergeAutoModePrompts, mergeAutoModePrompts,
mergeAgentPrompts, mergeAgentPrompts,
@@ -365,22 +360,16 @@ export interface ActiveClaudeApiProfileResult {
} }
/** /**
* Get the active Claude API profile and credentials from settings. * Get the active Claude API profile and credentials from global settings.
* Checks project settings first for per-project overrides, then falls back to global settings.
* Returns both the profile and credentials for resolving 'credentials' apiKeySource. * Returns both the profile and credentials for resolving 'credentials' apiKeySource.
* *
* @deprecated Use getProviderById and getPhaseModelWithOverrides instead for the new provider system.
* This function is kept for backward compatibility during migration.
*
* @param settingsService - Optional settings service instance * @param settingsService - Optional settings service instance
* @param logPrefix - Prefix for log messages (e.g., '[AgentService]') * @param logPrefix - Prefix for log messages (e.g., '[AgentService]')
* @param projectPath - Optional project path for per-project override
* @returns Promise resolving to object with profile and credentials * @returns Promise resolving to object with profile and credentials
*/ */
export async function getActiveClaudeApiProfile( export async function getActiveClaudeApiProfile(
settingsService?: SettingsService | null, settingsService?: SettingsService | null,
logPrefix = '[SettingsHelper]', logPrefix = '[SettingsHelper]'
projectPath?: string
): Promise<ActiveClaudeApiProfileResult> { ): Promise<ActiveClaudeApiProfileResult> {
if (!settingsService) { if (!settingsService) {
return { profile: undefined, credentials: undefined }; return { profile: undefined, credentials: undefined };
@@ -390,30 +379,10 @@ export async function getActiveClaudeApiProfile(
const globalSettings = await settingsService.getGlobalSettings(); const globalSettings = await settingsService.getGlobalSettings();
const credentials = await settingsService.getCredentials(); const credentials = await settingsService.getCredentials();
const profiles = globalSettings.claudeApiProfiles || []; const profiles = globalSettings.claudeApiProfiles || [];
const activeProfileId = globalSettings.activeClaudeApiProfileId;
// Check for project-level override first
let activeProfileId: string | null | undefined;
let isProjectOverride = false;
if (projectPath) {
const projectSettings = await settingsService.getProjectSettings(projectPath);
// undefined = use global, null = explicit no profile, string = specific profile
if (projectSettings.activeClaudeApiProfileId !== undefined) {
activeProfileId = projectSettings.activeClaudeApiProfileId;
isProjectOverride = true;
}
}
// Fall back to global if project doesn't specify
if (activeProfileId === undefined && !isProjectOverride) {
activeProfileId = globalSettings.activeClaudeApiProfileId;
}
// No active profile selected - use direct Anthropic API // No active profile selected - use direct Anthropic API
if (!activeProfileId) { if (!activeProfileId) {
if (isProjectOverride && activeProfileId === null) {
logger.info(`${logPrefix} Project explicitly using Direct Anthropic API`);
}
return { profile: undefined, credentials }; return { profile: undefined, credentials };
} }
@@ -421,8 +390,7 @@ export async function getActiveClaudeApiProfile(
const activeProfile = profiles.find((p) => p.id === activeProfileId); const activeProfile = profiles.find((p) => p.id === activeProfileId);
if (activeProfile) { if (activeProfile) {
const overrideSuffix = isProjectOverride ? ' (project override)' : ''; logger.info(`${logPrefix} Using Claude API profile: ${activeProfile.name}`);
logger.info(`${logPrefix} Using Claude API profile: ${activeProfile.name}${overrideSuffix}`);
return { profile: activeProfile, credentials }; return { profile: activeProfile, credentials };
} else { } else {
logger.warn( logger.warn(
@@ -435,296 +403,3 @@ export async function getActiveClaudeApiProfile(
return { profile: undefined, credentials: undefined }; return { profile: undefined, credentials: undefined };
} }
} }
// ============================================================================
// New Provider System Helpers
// ============================================================================
/** Result from getProviderById */
export interface ProviderByIdResult {
/** The provider, or undefined if not found */
provider: ClaudeCompatibleProvider | undefined;
/** Credentials for resolving 'credentials' apiKeySource */
credentials: Credentials | undefined;
}
/**
* Get a ClaudeCompatibleProvider by its ID.
* Returns the provider configuration and credentials for API key resolution.
*
* @param providerId - The provider ID to look up
* @param settingsService - Settings service instance
* @param logPrefix - Prefix for log messages
* @returns Promise resolving to object with provider and credentials
*/
export async function getProviderById(
providerId: string,
settingsService: SettingsService,
logPrefix = '[SettingsHelper]'
): Promise<ProviderByIdResult> {
try {
const globalSettings = await settingsService.getGlobalSettings();
const credentials = await settingsService.getCredentials();
const providers = globalSettings.claudeCompatibleProviders || [];
const provider = providers.find((p) => p.id === providerId);
if (provider) {
if (provider.enabled === false) {
logger.warn(`${logPrefix} Provider "${provider.name}" (${providerId}) is disabled`);
} else {
logger.debug(`${logPrefix} Found provider: ${provider.name}`);
}
return { provider, credentials };
} else {
logger.warn(`${logPrefix} Provider not found: ${providerId}`);
return { provider: undefined, credentials };
}
} catch (error) {
logger.error(`${logPrefix} Failed to load provider by ID:`, error);
return { provider: undefined, credentials: undefined };
}
}
/** Result from getPhaseModelWithOverrides */
export interface PhaseModelWithOverridesResult {
/** The resolved phase model entry */
phaseModel: PhaseModelEntry;
/** Whether a project override was applied */
isProjectOverride: boolean;
/** The provider if providerId is set and found */
provider: ClaudeCompatibleProvider | undefined;
/** Credentials for API key resolution */
credentials: Credentials | undefined;
}
/**
* Get the phase model configuration for a specific phase, applying project overrides if available.
* Also resolves the provider if the phase model has a providerId.
*
* @param phase - The phase key (e.g., 'enhancementModel', 'specGenerationModel')
* @param settingsService - Optional settings service instance (returns defaults if undefined)
* @param projectPath - Optional project path for checking overrides
* @param logPrefix - Prefix for log messages
* @returns Promise resolving to phase model with provider info
*/
export async function getPhaseModelWithOverrides(
phase: PhaseModelKey,
settingsService?: SettingsService | null,
projectPath?: string,
logPrefix = '[SettingsHelper]'
): Promise<PhaseModelWithOverridesResult> {
// Handle undefined settingsService gracefully
if (!settingsService) {
logger.info(`${logPrefix} SettingsService not available, using default for ${phase}`);
return {
phaseModel: DEFAULT_PHASE_MODELS[phase] || { model: 'sonnet' },
isProjectOverride: false,
provider: undefined,
credentials: undefined,
};
}
try {
const globalSettings = await settingsService.getGlobalSettings();
const credentials = await settingsService.getCredentials();
const globalPhaseModels = globalSettings.phaseModels || {};
// Start with global phase model
let phaseModel = globalPhaseModels[phase];
let isProjectOverride = false;
// Check for project override
if (projectPath) {
const projectSettings = await settingsService.getProjectSettings(projectPath);
const projectOverrides = projectSettings.phaseModelOverrides || {};
if (projectOverrides[phase]) {
phaseModel = projectOverrides[phase];
isProjectOverride = true;
logger.debug(`${logPrefix} Using project override for ${phase}`);
}
}
// If no phase model found, use per-phase default
if (!phaseModel) {
phaseModel = DEFAULT_PHASE_MODELS[phase] || { model: 'sonnet' };
logger.debug(`${logPrefix} No ${phase} configured, using default: ${phaseModel.model}`);
}
// Resolve provider if providerId is set
let provider: ClaudeCompatibleProvider | undefined;
if (phaseModel.providerId) {
const providers = globalSettings.claudeCompatibleProviders || [];
provider = providers.find((p) => p.id === phaseModel.providerId);
if (provider) {
if (provider.enabled === false) {
logger.warn(
`${logPrefix} Provider "${provider.name}" for ${phase} is disabled, falling back to direct API`
);
provider = undefined;
} else {
logger.debug(`${logPrefix} Using provider "${provider.name}" for ${phase}`);
}
} else {
logger.warn(
`${logPrefix} Provider ${phaseModel.providerId} not found for ${phase}, falling back to direct API`
);
}
}
return {
phaseModel,
isProjectOverride,
provider,
credentials,
};
} catch (error) {
logger.error(`${logPrefix} Failed to get phase model with overrides:`, error);
// Return a safe default
return {
phaseModel: { model: 'sonnet' },
isProjectOverride: false,
provider: undefined,
credentials: undefined,
};
}
}
/** Result from getProviderByModelId */
export interface ProviderByModelIdResult {
/** The provider that contains this model, or undefined if not found */
provider: ClaudeCompatibleProvider | undefined;
/** The model configuration if found */
modelConfig: import('@automaker/types').ProviderModel | undefined;
/** Credentials for API key resolution */
credentials: Credentials | undefined;
/** The resolved Claude model ID to use for API calls (from mapsToClaudeModel) */
resolvedModel: string | undefined;
}
/**
* Find a ClaudeCompatibleProvider by one of its model IDs.
* Searches through all enabled providers to find one that contains the specified model.
* This is useful when you have a model string from the UI but need the provider config.
*
* Also resolves the `mapsToClaudeModel` field to get the actual Claude model ID to use
* when calling the API (e.g., "GLM-4.5-Air" -> "claude-haiku-4-5").
*
* @param modelId - The model ID to search for (e.g., "GLM-4.7", "MiniMax-M2.1")
* @param settingsService - Settings service instance
* @param logPrefix - Prefix for log messages
* @returns Promise resolving to object with provider, model config, credentials, and resolved model
*/
export async function getProviderByModelId(
modelId: string,
settingsService: SettingsService,
logPrefix = '[SettingsHelper]'
): Promise<ProviderByModelIdResult> {
try {
const globalSettings = await settingsService.getGlobalSettings();
const credentials = await settingsService.getCredentials();
const providers = globalSettings.claudeCompatibleProviders || [];
// Search through all enabled providers for this model
for (const provider of providers) {
// Skip disabled providers
if (provider.enabled === false) {
continue;
}
// Check if this provider has the model
const modelConfig = provider.models?.find(
(m) => m.id === modelId || m.id.toLowerCase() === modelId.toLowerCase()
);
if (modelConfig) {
logger.info(`${logPrefix} Found model "${modelId}" in provider "${provider.name}"`);
// Resolve the mapped Claude model if specified
let resolvedModel: string | undefined;
if (modelConfig.mapsToClaudeModel) {
// Import resolveModelString to convert alias to full model ID
const { resolveModelString } = await import('@automaker/model-resolver');
resolvedModel = resolveModelString(modelConfig.mapsToClaudeModel);
logger.info(
`${logPrefix} Model "${modelId}" maps to Claude model "${modelConfig.mapsToClaudeModel}" -> "${resolvedModel}"`
);
}
return { provider, modelConfig, credentials, resolvedModel };
}
}
// Model not found in any provider
logger.debug(`${logPrefix} Model "${modelId}" not found in any provider`);
return {
provider: undefined,
modelConfig: undefined,
credentials: undefined,
resolvedModel: undefined,
};
} catch (error) {
logger.error(`${logPrefix} Failed to find provider by model ID:`, error);
return {
provider: undefined,
modelConfig: undefined,
credentials: undefined,
resolvedModel: undefined,
};
}
}
/**
* Get all enabled provider models for use in model dropdowns.
* Returns models from all enabled ClaudeCompatibleProviders.
*
* @param settingsService - Settings service instance
* @param logPrefix - Prefix for log messages
* @returns Promise resolving to array of provider models with their provider info
*/
export async function getAllProviderModels(
settingsService: SettingsService,
logPrefix = '[SettingsHelper]'
): Promise<
Array<{
providerId: string;
providerName: string;
model: import('@automaker/types').ProviderModel;
}>
> {
try {
const globalSettings = await settingsService.getGlobalSettings();
const providers = globalSettings.claudeCompatibleProviders || [];
const allModels: Array<{
providerId: string;
providerName: string;
model: import('@automaker/types').ProviderModel;
}> = [];
for (const provider of providers) {
// Skip disabled providers
if (provider.enabled === false) {
continue;
}
for (const model of provider.models || []) {
allModels.push({
providerId: provider.id,
providerName: provider.name,
model,
});
}
}
logger.debug(
`${logPrefix} Found ${allModels.length} models from ${providers.length} providers`
);
return allModels;
} catch (error) {
logger.error(`${logPrefix} Failed to get all provider models:`, error);
return [];
}
}

View File

@@ -14,17 +14,8 @@ import {
getThinkingTokenBudget, getThinkingTokenBudget,
validateBareModelId, validateBareModelId,
type ClaudeApiProfile, type ClaudeApiProfile,
type ClaudeCompatibleProvider,
type Credentials, type Credentials,
} from '@automaker/types'; } from '@automaker/types';
/**
* ProviderConfig - Union type for provider configuration
*
* Accepts either the legacy ClaudeApiProfile or new ClaudeCompatibleProvider.
* Both share the same connection settings structure.
*/
type ProviderConfig = ClaudeApiProfile | ClaudeCompatibleProvider;
import type { import type {
ExecuteOptions, ExecuteOptions,
ProviderMessage, ProviderMessage,
@@ -60,48 +51,34 @@ const ALLOWED_ENV_VARS = [
// System vars are always passed from process.env regardless of profile // System vars are always passed from process.env regardless of profile
const SYSTEM_ENV_VARS = ['PATH', 'HOME', 'SHELL', 'TERM', 'USER', 'LANG', 'LC_ALL']; const SYSTEM_ENV_VARS = ['PATH', 'HOME', 'SHELL', 'TERM', 'USER', 'LANG', 'LC_ALL'];
/**
* Check if the config is a ClaudeCompatibleProvider (new system)
* by checking for the 'models' array property
*/
function isClaudeCompatibleProvider(config: ProviderConfig): config is ClaudeCompatibleProvider {
return 'models' in config && Array.isArray(config.models);
}
/** /**
* Build environment for the SDK with only explicitly allowed variables. * Build environment for the SDK with only explicitly allowed variables.
* When a provider/profile is provided, uses its configuration (clean switch - don't inherit from process.env). * When a profile is provided, uses profile configuration (clean switch - don't inherit from process.env).
* When no provider is provided, uses direct Anthropic API settings from process.env. * When no profile is provided, uses direct Anthropic API settings from process.env.
* *
* Supports both: * @param profile - Optional Claude API profile for alternative endpoint configuration
* - ClaudeCompatibleProvider (new system with models[] array)
* - ClaudeApiProfile (legacy system with modelMappings)
*
* @param providerConfig - Optional provider configuration for alternative endpoint
* @param credentials - Optional credentials object for resolving 'credentials' apiKeySource * @param credentials - Optional credentials object for resolving 'credentials' apiKeySource
*/ */
function buildEnv( function buildEnv(
providerConfig?: ProviderConfig, profile?: ClaudeApiProfile,
credentials?: Credentials credentials?: Credentials
): Record<string, string | undefined> { ): Record<string, string | undefined> {
const env: Record<string, string | undefined> = {}; const env: Record<string, string | undefined> = {};
if (providerConfig) { if (profile) {
// Use provider configuration (clean switch - don't inherit non-system vars from process.env) // Use profile configuration (clean switch - don't inherit non-system vars from process.env)
logger.debug('[buildEnv] Using provider configuration:', { logger.debug('Building environment from Claude API profile:', {
name: providerConfig.name, name: profile.name,
baseUrl: providerConfig.baseUrl, apiKeySource: profile.apiKeySource ?? 'inline',
apiKeySource: providerConfig.apiKeySource ?? 'inline',
isNewProvider: isClaudeCompatibleProvider(providerConfig),
}); });
// Resolve API key based on source strategy // Resolve API key based on source strategy
let apiKey: string | undefined; let apiKey: string | undefined;
const source = providerConfig.apiKeySource ?? 'inline'; // Default to inline for backwards compat const source = profile.apiKeySource ?? 'inline'; // Default to inline for backwards compat
switch (source) { switch (source) {
case 'inline': case 'inline':
apiKey = providerConfig.apiKey; apiKey = profile.apiKey;
break; break;
case 'env': case 'env':
apiKey = process.env.ANTHROPIC_API_KEY; apiKey = process.env.ANTHROPIC_API_KEY;
@@ -113,55 +90,46 @@ function buildEnv(
// Warn if no API key found // Warn if no API key found
if (!apiKey) { if (!apiKey) {
logger.warn(`No API key found for provider "${providerConfig.name}" with source "${source}"`); logger.warn(`No API key found for profile "${profile.name}" with source "${source}"`);
} }
// Authentication // Authentication
if (providerConfig.useAuthToken) { if (profile.useAuthToken) {
env['ANTHROPIC_AUTH_TOKEN'] = apiKey; env['ANTHROPIC_AUTH_TOKEN'] = apiKey;
} else { } else {
env['ANTHROPIC_API_KEY'] = apiKey; env['ANTHROPIC_API_KEY'] = apiKey;
} }
// Endpoint configuration // Endpoint configuration
env['ANTHROPIC_BASE_URL'] = providerConfig.baseUrl; env['ANTHROPIC_BASE_URL'] = profile.baseUrl;
logger.debug(`[buildEnv] Set ANTHROPIC_BASE_URL to: ${providerConfig.baseUrl}`);
if (providerConfig.timeoutMs) { if (profile.timeoutMs) {
env['API_TIMEOUT_MS'] = String(providerConfig.timeoutMs); env['API_TIMEOUT_MS'] = String(profile.timeoutMs);
} }
// Model mappings - only for legacy ClaudeApiProfile // Model mappings
// For ClaudeCompatibleProvider, the model is passed directly (no mapping needed) if (profile.modelMappings?.haiku) {
if (!isClaudeCompatibleProvider(providerConfig) && providerConfig.modelMappings) { env['ANTHROPIC_DEFAULT_HAIKU_MODEL'] = profile.modelMappings.haiku;
if (providerConfig.modelMappings.haiku) { }
env['ANTHROPIC_DEFAULT_HAIKU_MODEL'] = providerConfig.modelMappings.haiku; if (profile.modelMappings?.sonnet) {
} env['ANTHROPIC_DEFAULT_SONNET_MODEL'] = profile.modelMappings.sonnet;
if (providerConfig.modelMappings.sonnet) { }
env['ANTHROPIC_DEFAULT_SONNET_MODEL'] = providerConfig.modelMappings.sonnet; if (profile.modelMappings?.opus) {
} env['ANTHROPIC_DEFAULT_OPUS_MODEL'] = profile.modelMappings.opus;
if (providerConfig.modelMappings.opus) {
env['ANTHROPIC_DEFAULT_OPUS_MODEL'] = providerConfig.modelMappings.opus;
}
} }
// Traffic control // Traffic control
if (providerConfig.disableNonessentialTraffic) { if (profile.disableNonessentialTraffic) {
env['CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC'] = '1'; env['CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC'] = '1';
} }
} else { } else {
// Use direct Anthropic API - pass through credentials or environment variables // Use direct Anthropic API - two modes:
// This supports: // 1. API Key mode: ANTHROPIC_API_KEY from credentials/env
// 1. API Key mode: ANTHROPIC_API_KEY from credentials (UI settings) or env
// 2. Claude Max plan: Uses CLI OAuth auth (SDK handles this automatically) // 2. Claude Max plan: Uses CLI OAuth auth (SDK handles this automatically)
// 3. Custom endpoints via ANTHROPIC_BASE_URL env var (backward compatibility)
// //
// Priority: credentials file (UI settings) -> environment variable // IMPORTANT: Do NOT set any profile vars (base URL, model mappings, etc.)
// Note: Only auth and endpoint vars are passed. Model mappings and traffic // This ensures clean switching - only pass through what's in process.env
// control are NOT passed (those require a profile for explicit configuration). if (process.env.ANTHROPIC_API_KEY) {
if (credentials?.apiKeys?.anthropic) {
env['ANTHROPIC_API_KEY'] = credentials.apiKeys.anthropic;
} else if (process.env.ANTHROPIC_API_KEY) {
env['ANTHROPIC_API_KEY'] = process.env.ANTHROPIC_API_KEY; env['ANTHROPIC_API_KEY'] = process.env.ANTHROPIC_API_KEY;
} }
// If using Claude Max plan via CLI auth, the SDK handles auth automatically // If using Claude Max plan via CLI auth, the SDK handles auth automatically
@@ -170,10 +138,9 @@ function buildEnv(
if (process.env.ANTHROPIC_AUTH_TOKEN) { if (process.env.ANTHROPIC_AUTH_TOKEN) {
env['ANTHROPIC_AUTH_TOKEN'] = process.env.ANTHROPIC_AUTH_TOKEN; env['ANTHROPIC_AUTH_TOKEN'] = process.env.ANTHROPIC_AUTH_TOKEN;
} }
// Pass through ANTHROPIC_BASE_URL if set in environment (backward compatibility) // Do NOT set ANTHROPIC_BASE_URL - let SDK use default Anthropic endpoint
if (process.env.ANTHROPIC_BASE_URL) { // Do NOT set model mappings - use standard Claude model names
env['ANTHROPIC_BASE_URL'] = process.env.ANTHROPIC_BASE_URL; // Do NOT set CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC
}
} }
// Always add system vars from process.env // Always add system vars from process.env
@@ -211,14 +178,9 @@ export class ClaudeProvider extends BaseProvider {
sdkSessionId, sdkSessionId,
thinkingLevel, thinkingLevel,
claudeApiProfile, claudeApiProfile,
claudeCompatibleProvider,
credentials, credentials,
} = options; } = options;
// Determine which provider config to use
// claudeCompatibleProvider takes precedence over claudeApiProfile
const providerConfig = claudeCompatibleProvider || claudeApiProfile;
// Convert thinking level to token budget // Convert thinking level to token budget
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel); const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
@@ -229,9 +191,9 @@ export class ClaudeProvider extends BaseProvider {
maxTurns, maxTurns,
cwd, cwd,
// Pass only explicitly allowed environment variables to SDK // Pass only explicitly allowed environment variables to SDK
// When a provider is active, uses provider settings (clean switch) // When a profile is active, uses profile settings (clean switch)
// When no provider, uses direct Anthropic API (from process.env or CLI OAuth) // When no profile, uses direct Anthropic API (from process.env or CLI OAuth)
env: buildEnv(providerConfig, credentials), env: buildEnv(claudeApiProfile, credentials),
// Pass through allowedTools if provided by caller (decided by sdk-options.ts) // Pass through allowedTools if provided by caller (decided by sdk-options.ts)
...(allowedTools && { allowedTools }), ...(allowedTools && { allowedTools }),
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation // AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
@@ -276,18 +238,6 @@ export class ClaudeProvider extends BaseProvider {
promptPayload = prompt; promptPayload = prompt;
} }
// Log the environment being passed to the SDK for debugging
const envForSdk = sdkOptions.env as Record<string, string | undefined>;
logger.debug('[ClaudeProvider] SDK Configuration:', {
model: sdkOptions.model,
baseUrl: envForSdk?.['ANTHROPIC_BASE_URL'] || '(default Anthropic API)',
hasApiKey: !!envForSdk?.['ANTHROPIC_API_KEY'],
hasAuthToken: !!envForSdk?.['ANTHROPIC_AUTH_TOKEN'],
providerName: providerConfig?.name || '(direct Anthropic)',
maxTurns: sdkOptions.maxTurns,
maxThinkingTokens: sdkOptions.maxThinkingTokens,
});
// Execute via Claude Agent SDK // Execute via Claude Agent SDK
try { try {
const stream = query({ prompt: promptPayload, options: sdkOptions }); const stream = query({ prompt: promptPayload, options: sdkOptions });

View File

@@ -98,14 +98,9 @@ const TEXT_ENCODING = 'utf-8';
* This is the "no output" timeout - if the CLI doesn't produce any JSONL output * This is the "no output" timeout - if the CLI doesn't produce any JSONL output
* for this duration, the process is killed. For reasoning models with high * for this duration, the process is killed. For reasoning models with high
* reasoning effort, this timeout is dynamically extended via calculateReasoningTimeout(). * reasoning effort, this timeout is dynamically extended via calculateReasoningTimeout().
*
* For feature generation (which can generate 50+ features), we use a much longer
* base timeout (5 minutes) since Codex models are slower at generating large JSON responses.
*
* @see calculateReasoningTimeout from @automaker/types * @see calculateReasoningTimeout from @automaker/types
*/ */
const CODEX_CLI_TIMEOUT_MS = DEFAULT_TIMEOUT_MS; const CODEX_CLI_TIMEOUT_MS = DEFAULT_TIMEOUT_MS;
const CODEX_FEATURE_GENERATION_BASE_TIMEOUT_MS = 300000; // 5 minutes for feature generation
const CONTEXT_WINDOW_256K = 256000; const CONTEXT_WINDOW_256K = 256000;
const MAX_OUTPUT_32K = 32000; const MAX_OUTPUT_32K = 32000;
const MAX_OUTPUT_16K = 16000; const MAX_OUTPUT_16K = 16000;
@@ -832,14 +827,7 @@ export class CodexProvider extends BaseProvider {
// Higher reasoning effort (e.g., 'xhigh' for "xtra thinking" mode) requires more time // Higher reasoning effort (e.g., 'xhigh' for "xtra thinking" mode) requires more time
// for the model to generate reasoning tokens before producing output. // for the model to generate reasoning tokens before producing output.
// This fixes GitHub issue #530 where features would get stuck with reasoning models. // This fixes GitHub issue #530 where features would get stuck with reasoning models.
// const timeout = calculateReasoningTimeout(options.reasoningEffort, CODEX_CLI_TIMEOUT_MS);
// For feature generation with 'xhigh', use the extended 5-minute base timeout
// since generating 50+ features takes significantly longer than normal operations.
const baseTimeout =
options.reasoningEffort === 'xhigh'
? CODEX_FEATURE_GENERATION_BASE_TIMEOUT_MS
: CODEX_CLI_TIMEOUT_MS;
const timeout = calculateReasoningTimeout(options.reasoningEffort, baseTimeout);
const stream = spawnJSONLProcess({ const stream = spawnJSONLProcess({
command: commandPath, command: commandPath,

View File

@@ -1,942 +0,0 @@
/**
* Copilot Provider - Executes queries using the GitHub Copilot SDK
*
* Uses the official @github/copilot-sdk for:
* - Session management and streaming responses
* - GitHub OAuth authentication (via gh CLI)
* - Tool call handling and permission management
* - Runtime model discovery
*
* Based on https://github.com/github/copilot-sdk
*/
import { execSync } from 'child_process';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';
import { CliProvider, type CliSpawnConfig, type CliErrorInfo } from './cli-provider.js';
import type {
ProviderConfig,
ExecuteOptions,
ProviderMessage,
InstallationStatus,
ModelDefinition,
} from './types.js';
// Note: validateBareModelId is not used because Copilot's bare model IDs
// legitimately contain prefixes like claude-, gemini-, gpt-
import {
COPILOT_MODEL_MAP,
type CopilotAuthStatus,
type CopilotRuntimeModel,
} from '@automaker/types';
import { createLogger, isAbortError } from '@automaker/utils';
import { CopilotClient, type PermissionRequest } from '@github/copilot-sdk';
import {
normalizeTodos,
normalizeFilePathInput,
normalizeCommandInput,
normalizePatternInput,
} from './tool-normalization.js';
// Create logger for this module
const logger = createLogger('CopilotProvider');
// Default bare model (without copilot- prefix) for SDK calls
const DEFAULT_BARE_MODEL = 'claude-sonnet-4.5';
// =============================================================================
// SDK Event Types (from @github/copilot-sdk)
// =============================================================================
/**
* SDK session event data types
*/
interface SdkEvent {
type: string;
data?: unknown;
}
interface SdkMessageEvent extends SdkEvent {
type: 'assistant.message';
data: {
content: string;
};
}
// Note: SdkMessageDeltaEvent is not used - we skip delta events to reduce noise
// The final assistant.message event contains the complete content
interface SdkToolExecutionStartEvent extends SdkEvent {
type: 'tool.execution_start';
data: {
toolName: string;
toolCallId: string;
input?: Record<string, unknown>;
};
}
interface SdkToolExecutionEndEvent extends SdkEvent {
type: 'tool.execution_end';
data: {
toolName: string;
toolCallId: string;
result?: string;
error?: string;
};
}
interface SdkSessionIdleEvent extends SdkEvent {
type: 'session.idle';
}
interface SdkSessionErrorEvent extends SdkEvent {
type: 'session.error';
data: {
message: string;
code?: string;
};
}
// =============================================================================
// Error Codes
// =============================================================================
export enum CopilotErrorCode {
NOT_INSTALLED = 'COPILOT_NOT_INSTALLED',
NOT_AUTHENTICATED = 'COPILOT_NOT_AUTHENTICATED',
RATE_LIMITED = 'COPILOT_RATE_LIMITED',
MODEL_UNAVAILABLE = 'COPILOT_MODEL_UNAVAILABLE',
NETWORK_ERROR = 'COPILOT_NETWORK_ERROR',
PROCESS_CRASHED = 'COPILOT_PROCESS_CRASHED',
TIMEOUT = 'COPILOT_TIMEOUT',
CLI_ERROR = 'COPILOT_CLI_ERROR',
SDK_ERROR = 'COPILOT_SDK_ERROR',
UNKNOWN = 'COPILOT_UNKNOWN_ERROR',
}
export interface CopilotError extends Error {
code: CopilotErrorCode;
recoverable: boolean;
suggestion?: string;
}
// =============================================================================
// Tool Name Normalization
// =============================================================================
/**
* Copilot SDK tool name to standard tool name mapping
*
* Maps Copilot CLI tool names to our standard tool names for consistent UI display.
* Tool names are case-insensitive (normalized to lowercase before lookup).
*/
const COPILOT_TOOL_NAME_MAP: Record<string, string> = {
// File operations
read_file: 'Read',
read: 'Read',
view: 'Read', // Copilot uses 'view' for reading files
read_many_files: 'Read',
write_file: 'Write',
write: 'Write',
create_file: 'Write',
edit_file: 'Edit',
edit: 'Edit',
replace: 'Edit',
patch: 'Edit',
// Shell operations
run_shell: 'Bash',
run_shell_command: 'Bash',
shell: 'Bash',
bash: 'Bash',
execute: 'Bash',
terminal: 'Bash',
// Search operations
search: 'Grep',
grep: 'Grep',
search_file_content: 'Grep',
find_files: 'Glob',
glob: 'Glob',
list_dir: 'Ls',
list_directory: 'Ls',
ls: 'Ls',
// Web operations
web_fetch: 'WebFetch',
fetch: 'WebFetch',
web_search: 'WebSearch',
search_web: 'WebSearch',
google_web_search: 'WebSearch',
// Todo operations
todo_write: 'TodoWrite',
write_todos: 'TodoWrite',
update_todos: 'TodoWrite',
// Planning/intent operations (Copilot-specific)
report_intent: 'ReportIntent', // Keep as-is, it's a planning tool
think: 'Think',
plan: 'Plan',
};
/**
* Normalize Copilot tool names to standard tool names
*/
function normalizeCopilotToolName(copilotToolName: string): string {
const lowerName = copilotToolName.toLowerCase();
return COPILOT_TOOL_NAME_MAP[lowerName] || copilotToolName;
}
/**
* Normalize Copilot tool input parameters to standard format
*
* Maps Copilot's parameter names to our standard parameter names.
* Uses shared utilities from tool-normalization.ts for common normalizations.
*/
function normalizeCopilotToolInput(
toolName: string,
input: Record<string, unknown>
): Record<string, unknown> {
const normalizedName = normalizeCopilotToolName(toolName);
// Normalize todo_write / write_todos: ensure proper format
if (normalizedName === 'TodoWrite' && Array.isArray(input.todos)) {
return { todos: normalizeTodos(input.todos) };
}
// Normalize file path parameters for Read/Write/Edit tools
if (normalizedName === 'Read' || normalizedName === 'Write' || normalizedName === 'Edit') {
return normalizeFilePathInput(input);
}
// Normalize shell command parameters for Bash tool
if (normalizedName === 'Bash') {
return normalizeCommandInput(input);
}
// Normalize search parameters for Grep tool
if (normalizedName === 'Grep') {
return normalizePatternInput(input);
}
return input;
}
/**
* CopilotProvider - Integrates GitHub Copilot SDK as an AI provider
*
* Features:
* - GitHub OAuth authentication
* - SDK-based session management
* - Runtime model discovery
* - Tool call normalization
* - Per-execution working directory support
*/
export class CopilotProvider extends CliProvider {
private runtimeModels: CopilotRuntimeModel[] | null = null;
constructor(config: ProviderConfig = {}) {
super(config);
// Trigger CLI detection on construction
this.ensureCliDetected();
}
// ==========================================================================
// CliProvider Abstract Method Implementations
// ==========================================================================
getName(): string {
return 'copilot';
}
getCliName(): string {
return 'copilot';
}
getSpawnConfig(): CliSpawnConfig {
return {
windowsStrategy: 'npx', // Copilot CLI can be run via npx
npxPackage: '@github/copilot', // Official GitHub Copilot CLI package
commonPaths: {
linux: [
path.join(os.homedir(), '.local/bin/copilot'),
'/usr/local/bin/copilot',
path.join(os.homedir(), '.npm-global/bin/copilot'),
],
darwin: [
path.join(os.homedir(), '.local/bin/copilot'),
'/usr/local/bin/copilot',
'/opt/homebrew/bin/copilot',
path.join(os.homedir(), '.npm-global/bin/copilot'),
],
win32: [
path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'copilot.cmd'),
path.join(os.homedir(), '.npm-global', 'copilot.cmd'),
],
},
};
}
/**
* Extract prompt text from ExecuteOptions
*
* Note: CopilotProvider does not yet support vision/image inputs.
* If non-text content is provided, an error is thrown.
*/
private extractPromptText(options: ExecuteOptions): string {
if (typeof options.prompt === 'string') {
return options.prompt;
} else if (Array.isArray(options.prompt)) {
// Check for non-text content (images, etc.) which we don't support yet
const hasNonText = options.prompt.some((p) => p.type !== 'text');
if (hasNonText) {
throw new Error(
'CopilotProvider does not yet support non-text prompt parts (e.g., images). ' +
'Please use text-only prompts or switch to a provider that supports vision.'
);
}
return options.prompt
.filter((p) => p.type === 'text' && p.text)
.map((p) => p.text)
.join('\n');
} else {
throw new Error('Invalid prompt format');
}
}
/**
* Not used with SDK approach - kept for interface compatibility
*/
buildCliArgs(_options: ExecuteOptions): string[] {
return [];
}
/**
* Convert SDK event to AutoMaker ProviderMessage format
*/
normalizeEvent(event: unknown): ProviderMessage | null {
const sdkEvent = event as SdkEvent;
switch (sdkEvent.type) {
case 'assistant.message': {
const messageEvent = sdkEvent as SdkMessageEvent;
return {
type: 'assistant',
message: {
role: 'assistant',
content: [{ type: 'text', text: messageEvent.data.content }],
},
};
}
case 'assistant.message_delta': {
// Skip delta events - they create too much noise
// The final assistant.message event has the complete content
return null;
}
case 'tool.execution_start': {
const toolEvent = sdkEvent as SdkToolExecutionStartEvent;
const normalizedName = normalizeCopilotToolName(toolEvent.data.toolName);
const normalizedInput = toolEvent.data.input
? normalizeCopilotToolInput(toolEvent.data.toolName, toolEvent.data.input)
: {};
return {
type: 'assistant',
message: {
role: 'assistant',
content: [
{
type: 'tool_use',
name: normalizedName,
tool_use_id: toolEvent.data.toolCallId,
input: normalizedInput,
},
],
},
};
}
case 'tool.execution_end': {
const toolResultEvent = sdkEvent as SdkToolExecutionEndEvent;
const isError = !!toolResultEvent.data.error;
const content = isError
? `[ERROR] ${toolResultEvent.data.error}`
: toolResultEvent.data.result || '';
return {
type: 'assistant',
message: {
role: 'assistant',
content: [
{
type: 'tool_result',
tool_use_id: toolResultEvent.data.toolCallId,
content,
},
],
},
};
}
case 'session.idle': {
logger.debug('Copilot session idle');
return {
type: 'result',
subtype: 'success',
};
}
case 'session.error': {
const errorEvent = sdkEvent as SdkSessionErrorEvent;
return {
type: 'error',
error: errorEvent.data.message || 'Unknown error',
};
}
default:
logger.debug(`Unknown Copilot SDK event type: ${sdkEvent.type}`);
return null;
}
}
// ==========================================================================
// CliProvider Overrides
// ==========================================================================
/**
* Override error mapping for Copilot-specific error codes
*/
protected mapError(stderr: string, exitCode: number | null): CliErrorInfo {
const lower = stderr.toLowerCase();
if (
lower.includes('not authenticated') ||
lower.includes('please log in') ||
lower.includes('unauthorized') ||
lower.includes('login required') ||
lower.includes('authentication required') ||
lower.includes('github login')
) {
return {
code: CopilotErrorCode.NOT_AUTHENTICATED,
message: 'GitHub Copilot is not authenticated',
recoverable: true,
suggestion: 'Run "gh auth login" or "copilot auth login" to authenticate with GitHub',
};
}
if (
lower.includes('rate limit') ||
lower.includes('too many requests') ||
lower.includes('429') ||
lower.includes('quota exceeded')
) {
return {
code: CopilotErrorCode.RATE_LIMITED,
message: 'Copilot API rate limit exceeded',
recoverable: true,
suggestion: 'Wait a few minutes and try again',
};
}
if (
lower.includes('model not available') ||
lower.includes('invalid model') ||
lower.includes('unknown model') ||
lower.includes('model not found') ||
(lower.includes('not found') && lower.includes('404'))
) {
return {
code: CopilotErrorCode.MODEL_UNAVAILABLE,
message: 'Requested model is not available',
recoverable: true,
suggestion: `Try using "${DEFAULT_BARE_MODEL}" or select a different model`,
};
}
if (
lower.includes('network') ||
lower.includes('connection') ||
lower.includes('econnrefused') ||
lower.includes('timeout')
) {
return {
code: CopilotErrorCode.NETWORK_ERROR,
message: 'Network connection error',
recoverable: true,
suggestion: 'Check your internet connection and try again',
};
}
if (exitCode === 137 || lower.includes('killed') || lower.includes('sigterm')) {
return {
code: CopilotErrorCode.PROCESS_CRASHED,
message: 'Copilot CLI process was terminated',
recoverable: true,
suggestion: 'The process may have run out of memory. Try a simpler task.',
};
}
return {
code: CopilotErrorCode.UNKNOWN,
message: stderr || `Copilot CLI exited with code ${exitCode}`,
recoverable: false,
};
}
/**
* Override install instructions for Copilot-specific guidance
*/
protected getInstallInstructions(): string {
return 'Install with: npm install -g @github/copilot (or visit https://github.com/github/copilot)';
}
/**
* Execute a prompt using Copilot SDK with real-time streaming
*
* Creates a new CopilotClient for each execution with the correct working directory.
* Streams tool execution events in real-time for UI display.
*/
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
this.ensureCliDetected();
// Note: We don't use validateBareModelId here because Copilot's model IDs
// legitimately contain prefixes like claude-, gemini-, gpt- which are the
// actual model names from the Copilot CLI. We only need to ensure the
// copilot- prefix has been stripped by the ProviderFactory.
if (options.model?.startsWith('copilot-')) {
throw new Error(
`[CopilotProvider] Model ID should not have 'copilot-' prefix. Got: '${options.model}'. ` +
`The ProviderFactory should strip this prefix before passing to the provider.`
);
}
if (!this.cliPath) {
throw this.createError(
CopilotErrorCode.NOT_INSTALLED,
'Copilot CLI is not installed',
true,
this.getInstallInstructions()
);
}
const promptText = this.extractPromptText(options);
const bareModel = options.model || DEFAULT_BARE_MODEL;
const workingDirectory = options.cwd || process.cwd();
logger.debug(
`CopilotProvider.executeQuery called with model: "${bareModel}", cwd: "${workingDirectory}"`
);
logger.debug(`Prompt length: ${promptText.length} characters`);
// Create a client for this execution with the correct working directory
const client = new CopilotClient({
logLevel: 'warning',
autoRestart: false,
cwd: workingDirectory,
});
// Use an async queue to bridge callback-based SDK events to async generator
const eventQueue: SdkEvent[] = [];
let resolveWaiting: (() => void) | null = null;
let sessionComplete = false;
let sessionError: Error | null = null;
const pushEvent = (event: SdkEvent) => {
eventQueue.push(event);
if (resolveWaiting) {
resolveWaiting();
resolveWaiting = null;
}
};
const waitForEvent = (): Promise<void> => {
if (eventQueue.length > 0 || sessionComplete) {
return Promise.resolve();
}
return new Promise((resolve) => {
resolveWaiting = resolve;
});
};
try {
await client.start();
logger.debug(`CopilotClient started with cwd: ${workingDirectory}`);
// Create session with streaming enabled for real-time events
const session = await client.createSession({
model: bareModel,
streaming: true,
// AUTONOMOUS MODE: Auto-approve all permission requests.
// AutoMaker is designed for fully autonomous AI agent operation.
// Security boundary is provided by Docker containerization (see CLAUDE.md).
// User is warned about this at app startup.
onPermissionRequest: async (
request: PermissionRequest
): Promise<{ kind: 'approved' } | { kind: 'denied-interactively-by-user' }> => {
logger.debug(`Permission request: ${request.kind}`);
return { kind: 'approved' };
},
});
const sessionId = session.sessionId;
logger.debug(`Session created: ${sessionId}`);
// Set up event handler to push events to queue
session.on((event: SdkEvent) => {
logger.debug(`SDK event: ${event.type}`);
if (event.type === 'session.idle') {
sessionComplete = true;
pushEvent(event);
} else if (event.type === 'session.error') {
const errorEvent = event as SdkSessionErrorEvent;
sessionError = new Error(errorEvent.data.message);
sessionComplete = true;
pushEvent(event);
} else {
// Push all other events (tool.execution_start, tool.execution_end, assistant.message, etc.)
pushEvent(event);
}
});
// Send the prompt (non-blocking)
await session.send({ prompt: promptText });
// Process events as they arrive
while (!sessionComplete || eventQueue.length > 0) {
await waitForEvent();
// Check for errors first (before processing events to avoid race condition)
if (sessionError) {
await session.destroy();
await client.stop();
throw sessionError;
}
// Process all queued events
while (eventQueue.length > 0) {
const event = eventQueue.shift()!;
const normalized = this.normalizeEvent(event);
if (normalized) {
// Add session_id if not present
if (!normalized.session_id) {
normalized.session_id = sessionId;
}
yield normalized;
}
}
}
// Cleanup
await session.destroy();
await client.stop();
logger.debug('CopilotClient stopped successfully');
} catch (error) {
// Ensure client is stopped on error
try {
await client.stop();
} catch (cleanupError) {
// Log but don't throw cleanup errors - the original error is more important
logger.debug(`Failed to stop client during cleanup: ${cleanupError}`);
}
if (isAbortError(error)) {
logger.debug('Query aborted');
return;
}
// Map errors to CopilotError
if (error instanceof Error) {
logger.error(`Copilot SDK error: ${error.message}`);
const errorInfo = this.mapError(error.message, null);
throw this.createError(
errorInfo.code as CopilotErrorCode,
errorInfo.message,
errorInfo.recoverable,
errorInfo.suggestion
);
}
throw error;
}
}
// ==========================================================================
// Copilot-Specific Methods
// ==========================================================================
/**
* Create a CopilotError with details
*/
private createError(
code: CopilotErrorCode,
message: string,
recoverable: boolean = false,
suggestion?: string
): CopilotError {
const error = new Error(message) as CopilotError;
error.code = code;
error.recoverable = recoverable;
error.suggestion = suggestion;
error.name = 'CopilotError';
return error;
}
/**
* Get Copilot CLI version
*/
async getVersion(): Promise<string | null> {
this.ensureCliDetected();
if (!this.cliPath) return null;
try {
const result = execSync(`"${this.cliPath}" --version`, {
encoding: 'utf8',
timeout: 5000,
stdio: 'pipe',
}).trim();
return result;
} catch {
return null;
}
}
/**
* Check authentication status
*
* Uses GitHub CLI (gh) to check Copilot authentication status.
* The Copilot CLI relies on gh auth for authentication.
*/
async checkAuth(): Promise<CopilotAuthStatus> {
this.ensureCliDetected();
if (!this.cliPath) {
logger.debug('checkAuth: CLI not found');
return { authenticated: false, method: 'none' };
}
logger.debug('checkAuth: Starting credential check');
// Try to check GitHub CLI authentication status first
// The Copilot CLI uses gh auth for authentication
try {
const ghStatus = execSync('gh auth status --hostname github.com', {
encoding: 'utf8',
timeout: 10000,
stdio: 'pipe',
});
logger.debug(`checkAuth: gh auth status output: ${ghStatus.substring(0, 200)}`);
// Parse gh auth status output
const loggedInMatch = ghStatus.match(/Logged in to github\.com account (\S+)/);
if (loggedInMatch) {
return {
authenticated: true,
method: 'oauth',
login: loggedInMatch[1],
host: 'github.com',
};
}
// Check for token auth
if (ghStatus.includes('Logged in') || ghStatus.includes('Token:')) {
return {
authenticated: true,
method: 'oauth',
host: 'github.com',
};
}
} catch (ghError) {
logger.debug(`checkAuth: gh auth status failed: ${ghError}`);
}
// Try Copilot-specific auth check if gh is not available
try {
const result = execSync(`"${this.cliPath}" auth status`, {
encoding: 'utf8',
timeout: 10000,
stdio: 'pipe',
});
logger.debug(`checkAuth: copilot auth status output: ${result.substring(0, 200)}`);
if (result.includes('authenticated') || result.includes('logged in')) {
return {
authenticated: true,
method: 'cli',
};
}
} catch (copilotError) {
logger.debug(`checkAuth: copilot auth status failed: ${copilotError}`);
}
// Check for GITHUB_TOKEN environment variable
if (process.env.GITHUB_TOKEN) {
logger.debug('checkAuth: Found GITHUB_TOKEN environment variable');
return {
authenticated: true,
method: 'oauth',
statusMessage: 'Using GITHUB_TOKEN environment variable',
};
}
// Check for gh config file
const ghConfigPath = path.join(os.homedir(), '.config', 'gh', 'hosts.yml');
try {
await fs.access(ghConfigPath);
const content = await fs.readFile(ghConfigPath, 'utf8');
if (content.includes('github.com') && content.includes('oauth_token')) {
logger.debug('checkAuth: Found gh config with oauth_token');
return {
authenticated: true,
method: 'oauth',
host: 'github.com',
};
}
} catch {
logger.debug('checkAuth: No gh config found');
}
// No credentials found
logger.debug('checkAuth: No valid credentials found');
return {
authenticated: false,
method: 'none',
error:
'No authentication configured. Run "gh auth login" or install GitHub Copilot extension.',
};
}
/**
* Fetch available models from the CLI at runtime
*/
async fetchRuntimeModels(): Promise<CopilotRuntimeModel[]> {
this.ensureCliDetected();
if (!this.cliPath) {
return [];
}
try {
// Try to list models using the CLI
const result = execSync(`"${this.cliPath}" models list --format json`, {
encoding: 'utf8',
timeout: 15000,
stdio: 'pipe',
});
const models = JSON.parse(result) as CopilotRuntimeModel[];
this.runtimeModels = models;
logger.debug(`Fetched ${models.length} runtime models from Copilot CLI`);
return models;
} catch (error) {
// Clear cache on failure to avoid returning stale data
this.runtimeModels = null;
logger.debug(`Failed to fetch runtime models: ${error}`);
return [];
}
}
/**
* Detect installation status (required by BaseProvider)
*/
async detectInstallation(): Promise<InstallationStatus> {
const installed = await this.isInstalled();
const version = installed ? await this.getVersion() : undefined;
const auth = await this.checkAuth();
return {
installed,
version: version || undefined,
path: this.cliPath || undefined,
method: 'cli',
authenticated: auth.authenticated,
};
}
/**
* Get the detected CLI path (public accessor for status endpoints)
*/
getCliPath(): string | null {
this.ensureCliDetected();
return this.cliPath;
}
/**
* Get available Copilot models
*
* Returns both static model definitions and runtime-discovered models
*/
getAvailableModels(): ModelDefinition[] {
// Start with static model definitions - explicitly typed to allow runtime models
const staticModels: ModelDefinition[] = Object.entries(COPILOT_MODEL_MAP).map(
([id, config]) => ({
id, // Full model ID with copilot- prefix
name: config.label,
modelString: id.replace('copilot-', ''), // Bare model for CLI
provider: 'copilot',
description: config.description,
supportsTools: config.supportsTools,
supportsVision: config.supportsVision,
contextWindow: config.contextWindow,
})
);
// Add runtime models if available (discovered via CLI)
if (this.runtimeModels) {
for (const runtimeModel of this.runtimeModels) {
// Skip if already in static list
const staticId = `copilot-${runtimeModel.id}`;
if (staticModels.some((m) => m.id === staticId)) {
continue;
}
staticModels.push({
id: staticId,
name: runtimeModel.name || runtimeModel.id,
modelString: runtimeModel.id,
provider: 'copilot',
description: `Dynamic model: ${runtimeModel.name || runtimeModel.id}`,
supportsTools: true,
supportsVision: runtimeModel.capabilities?.supportsVision ?? false,
contextWindow: runtimeModel.capabilities?.maxInputTokens,
});
}
}
return staticModels;
}
/**
* Check if a feature is supported
*
* Note: Vision is NOT currently supported - the SDK doesn't handle image inputs yet.
* This may change in future versions of the Copilot SDK.
*/
supportsFeature(feature: string): boolean {
const supported = ['tools', 'text', 'streaming'];
return supported.includes(feature);
}
/**
* Check if runtime models have been cached
*/
hasCachedModels(): boolean {
return this.runtimeModels !== null && this.runtimeModels.length > 0;
}
/**
* Clear the runtime model cache
*/
clearModelCache(): void {
this.runtimeModels = null;
logger.debug('Cleared Copilot model cache');
}
/**
* Refresh models from CLI and return all available models
*/
async refreshModels(): Promise<ModelDefinition[]> {
logger.debug('Refreshing Copilot models from CLI');
await this.fetchRuntimeModels();
return this.getAvailableModels();
}
}

View File

@@ -337,11 +337,10 @@ export class CursorProvider extends CliProvider {
'--stream-partial-output' // Real-time streaming '--stream-partial-output' // Real-time streaming
); );
// In read-only mode, use --mode ask for Q&A style (no tools) // Only add --force if NOT in read-only mode
// Otherwise, add --force to allow file edits // Without --force, Cursor CLI suggests changes but doesn't apply them
if (options.readOnly) { // With --force, Cursor CLI can actually edit files
cliArgs.push('--mode', 'ask'); if (!options.readOnly) {
} else {
cliArgs.push('--force'); cliArgs.push('--force');
} }
@@ -673,13 +672,10 @@ export class CursorProvider extends CliProvider {
); );
} }
// Embed system prompt into user prompt (Cursor CLI doesn't support separate system messages)
const effectiveOptions = this.embedSystemPromptIntoPrompt(options);
// Extract prompt text to pass via stdin (avoids shell escaping issues) // Extract prompt text to pass via stdin (avoids shell escaping issues)
const promptText = this.extractPromptText(effectiveOptions); const promptText = this.extractPromptText(options);
const cliArgs = this.buildCliArgs(effectiveOptions); const cliArgs = this.buildCliArgs(options);
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs); const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
// Pass prompt via stdin to avoid shell interpretation of special characters // Pass prompt via stdin to avoid shell interpretation of special characters

View File

@@ -1,810 +0,0 @@
/**
* Gemini Provider - Executes queries using the Gemini CLI
*
* Extends CliProvider with Gemini-specific:
* - Event normalization for Gemini's JSONL streaming format
* - Google account and API key authentication support
* - Thinking level configuration
*
* Based on https://github.com/google-gemini/gemini-cli
*/
import { execSync } from 'child_process';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';
import { CliProvider, type CliSpawnConfig, type CliErrorInfo } from './cli-provider.js';
import type {
ProviderConfig,
ExecuteOptions,
ProviderMessage,
InstallationStatus,
ModelDefinition,
ContentBlock,
} from './types.js';
import { validateBareModelId } from '@automaker/types';
import { GEMINI_MODEL_MAP, type GeminiAuthStatus } from '@automaker/types';
import { createLogger, isAbortError } from '@automaker/utils';
import { spawnJSONLProcess } from '@automaker/platform';
import { normalizeTodos } from './tool-normalization.js';
// Create logger for this module
const logger = createLogger('GeminiProvider');
// =============================================================================
// Gemini Stream Event Types
// =============================================================================
/**
* Base event structure from Gemini CLI --output-format stream-json
*
* Actual CLI output format:
* {"type":"init","timestamp":"...","session_id":"...","model":"..."}
* {"type":"message","timestamp":"...","role":"user","content":"..."}
* {"type":"message","timestamp":"...","role":"assistant","content":"...","delta":true}
* {"type":"tool_use","timestamp":"...","tool_name":"...","tool_id":"...","parameters":{...}}
* {"type":"tool_result","timestamp":"...","tool_id":"...","status":"success","output":"..."}
* {"type":"result","timestamp":"...","status":"success","stats":{...}}
*/
interface GeminiStreamEvent {
type: 'init' | 'message' | 'tool_use' | 'tool_result' | 'result' | 'error';
timestamp?: string;
session_id?: string;
}
interface GeminiInitEvent extends GeminiStreamEvent {
type: 'init';
session_id: string;
model: string;
}
interface GeminiMessageEvent extends GeminiStreamEvent {
type: 'message';
role: 'user' | 'assistant';
content: string;
delta?: boolean;
session_id?: string;
}
interface GeminiToolUseEvent extends GeminiStreamEvent {
type: 'tool_use';
tool_id: string;
tool_name: string;
parameters: Record<string, unknown>;
session_id?: string;
}
interface GeminiToolResultEvent extends GeminiStreamEvent {
type: 'tool_result';
tool_id: string;
status: 'success' | 'error';
output: string;
session_id?: string;
}
interface GeminiResultEvent extends GeminiStreamEvent {
type: 'result';
status: 'success' | 'error';
stats?: {
total_tokens?: number;
input_tokens?: number;
output_tokens?: number;
cached?: number;
input?: number;
duration_ms?: number;
tool_calls?: number;
};
error?: string;
session_id?: string;
}
// =============================================================================
// Error Codes
// =============================================================================
export enum GeminiErrorCode {
NOT_INSTALLED = 'GEMINI_NOT_INSTALLED',
NOT_AUTHENTICATED = 'GEMINI_NOT_AUTHENTICATED',
RATE_LIMITED = 'GEMINI_RATE_LIMITED',
MODEL_UNAVAILABLE = 'GEMINI_MODEL_UNAVAILABLE',
NETWORK_ERROR = 'GEMINI_NETWORK_ERROR',
PROCESS_CRASHED = 'GEMINI_PROCESS_CRASHED',
TIMEOUT = 'GEMINI_TIMEOUT',
UNKNOWN = 'GEMINI_UNKNOWN_ERROR',
}
export interface GeminiError extends Error {
code: GeminiErrorCode;
recoverable: boolean;
suggestion?: string;
}
// =============================================================================
// Tool Name Normalization
// =============================================================================
/**
* Gemini CLI tool name to standard tool name mapping
* This allows the UI to properly categorize and display Gemini tool calls
*/
const GEMINI_TOOL_NAME_MAP: Record<string, string> = {
write_todos: 'TodoWrite',
read_file: 'Read',
read_many_files: 'Read',
replace: 'Edit',
write_file: 'Write',
run_shell_command: 'Bash',
search_file_content: 'Grep',
glob: 'Glob',
list_directory: 'Ls',
web_fetch: 'WebFetch',
google_web_search: 'WebSearch',
};
/**
* Normalize Gemini tool names to standard tool names
*/
function normalizeGeminiToolName(geminiToolName: string): string {
return GEMINI_TOOL_NAME_MAP[geminiToolName] || geminiToolName;
}
/**
* Normalize Gemini tool input parameters to standard format
*
* Uses shared normalizeTodos utility for consistent todo normalization.
*
* Gemini `write_todos` format:
* {"todos": [{"description": "Task text", "status": "pending|in_progress|completed|cancelled"}]}
*
* Claude `TodoWrite` format:
* {"todos": [{"content": "Task text", "status": "pending|in_progress|completed", "activeForm": "..."}]}
*/
function normalizeGeminiToolInput(
toolName: string,
input: Record<string, unknown>
): Record<string, unknown> {
// Normalize write_todos using shared utility
if (toolName === 'write_todos' && Array.isArray(input.todos)) {
return { todos: normalizeTodos(input.todos) };
}
return input;
}
/**
* GeminiProvider - Integrates Gemini CLI as an AI provider
*
* Features:
* - Google account OAuth login support
* - API key authentication (GEMINI_API_KEY)
* - Vertex AI support
* - Thinking level configuration
* - Streaming JSON output
*/
export class GeminiProvider extends CliProvider {
constructor(config: ProviderConfig = {}) {
super(config);
// Trigger CLI detection on construction
this.ensureCliDetected();
}
// ==========================================================================
// CliProvider Abstract Method Implementations
// ==========================================================================
getName(): string {
return 'gemini';
}
getCliName(): string {
return 'gemini';
}
getSpawnConfig(): CliSpawnConfig {
return {
windowsStrategy: 'npx', // Gemini CLI can be run via npx
npxPackage: '@google/gemini-cli', // Official Google Gemini CLI package
commonPaths: {
linux: [
path.join(os.homedir(), '.local/bin/gemini'),
'/usr/local/bin/gemini',
path.join(os.homedir(), '.npm-global/bin/gemini'),
],
darwin: [
path.join(os.homedir(), '.local/bin/gemini'),
'/usr/local/bin/gemini',
'/opt/homebrew/bin/gemini',
path.join(os.homedir(), '.npm-global/bin/gemini'),
],
win32: [
path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'gemini.cmd'),
path.join(os.homedir(), '.npm-global', 'gemini.cmd'),
],
},
};
}
/**
* Extract prompt text from ExecuteOptions
*/
private extractPromptText(options: ExecuteOptions): string {
if (typeof options.prompt === 'string') {
return options.prompt;
} else if (Array.isArray(options.prompt)) {
return options.prompt
.filter((p) => p.type === 'text' && p.text)
.map((p) => p.text)
.join('\n');
} else {
throw new Error('Invalid prompt format');
}
}
buildCliArgs(options: ExecuteOptions): string[] {
// Model comes in stripped of provider prefix (e.g., '2.5-flash' from 'gemini-2.5-flash')
// We need to add 'gemini-' back since it's part of the actual CLI model name
const bareModel = options.model || '2.5-flash';
const cliArgs: string[] = [];
// Streaming JSON output format for real-time updates
cliArgs.push('--output-format', 'stream-json');
// Model selection - Gemini CLI expects full model names like "gemini-2.5-flash"
// Unlike Cursor CLI where 'cursor-' is just a routing prefix, for Gemini CLI
// the 'gemini-' is part of the actual model name Google expects
if (bareModel && bareModel !== 'auto') {
// Add gemini- prefix if not already present (handles edge cases)
const cliModel = bareModel.startsWith('gemini-') ? bareModel : `gemini-${bareModel}`;
cliArgs.push('--model', cliModel);
}
// Disable sandbox mode for faster execution (sandbox adds overhead)
cliArgs.push('--sandbox', 'false');
// YOLO mode for automatic approval (required for non-interactive use)
// Use explicit approval-mode for clearer semantics
cliArgs.push('--approval-mode', 'yolo');
// Explicitly include the working directory in allowed workspace directories
// This ensures Gemini CLI allows file operations in the project directory,
// even if it has a different workspace cached from a previous session
if (options.cwd) {
cliArgs.push('--include-directories', options.cwd);
}
// Note: Gemini CLI doesn't have a --thinking-level flag.
// Thinking capabilities are determined by the model selection (e.g., gemini-2.5-pro).
// The model handles thinking internally based on the task complexity.
// The prompt will be passed as the last positional argument
// We'll append it in executeQuery after extracting the text
return cliArgs;
}
/**
* Convert Gemini event to AutoMaker ProviderMessage format
*/
normalizeEvent(event: unknown): ProviderMessage | null {
const geminiEvent = event as GeminiStreamEvent;
switch (geminiEvent.type) {
case 'init': {
// Init event - capture session but don't yield a message
const initEvent = geminiEvent as GeminiInitEvent;
logger.debug(
`Gemini init event: session=${initEvent.session_id}, model=${initEvent.model}`
);
return null;
}
case 'message': {
const messageEvent = geminiEvent as GeminiMessageEvent;
// Skip user messages - already handled by caller
if (messageEvent.role === 'user') {
return null;
}
// Handle assistant messages
if (messageEvent.role === 'assistant') {
return {
type: 'assistant',
session_id: messageEvent.session_id,
message: {
role: 'assistant',
content: [{ type: 'text', text: messageEvent.content }],
},
};
}
return null;
}
case 'tool_use': {
const toolEvent = geminiEvent as GeminiToolUseEvent;
const normalizedName = normalizeGeminiToolName(toolEvent.tool_name);
const normalizedInput = normalizeGeminiToolInput(
toolEvent.tool_name,
toolEvent.parameters as Record<string, unknown>
);
return {
type: 'assistant',
session_id: toolEvent.session_id,
message: {
role: 'assistant',
content: [
{
type: 'tool_use',
name: normalizedName,
tool_use_id: toolEvent.tool_id,
input: normalizedInput,
},
],
},
};
}
case 'tool_result': {
const toolResultEvent = geminiEvent as GeminiToolResultEvent;
// If tool result is an error, prefix with error indicator
const content =
toolResultEvent.status === 'error'
? `[ERROR] ${toolResultEvent.output}`
: toolResultEvent.output;
return {
type: 'assistant',
session_id: toolResultEvent.session_id,
message: {
role: 'assistant',
content: [
{
type: 'tool_result',
tool_use_id: toolResultEvent.tool_id,
content,
},
],
},
};
}
case 'result': {
const resultEvent = geminiEvent as GeminiResultEvent;
if (resultEvent.status === 'error') {
return {
type: 'error',
session_id: resultEvent.session_id,
error: resultEvent.error || 'Unknown error',
};
}
// Success result - include stats for logging
logger.debug(
`Gemini result: status=${resultEvent.status}, tokens=${resultEvent.stats?.total_tokens}`
);
return {
type: 'result',
subtype: 'success',
session_id: resultEvent.session_id,
};
}
case 'error': {
const errorEvent = geminiEvent as GeminiResultEvent;
return {
type: 'error',
session_id: errorEvent.session_id,
error: errorEvent.error || 'Unknown error',
};
}
default:
logger.debug(`Unknown Gemini event type: ${geminiEvent.type}`);
return null;
}
}
// ==========================================================================
// CliProvider Overrides
// ==========================================================================
/**
* Override error mapping for Gemini-specific error codes
*/
protected mapError(stderr: string, exitCode: number | null): CliErrorInfo {
const lower = stderr.toLowerCase();
if (
lower.includes('not authenticated') ||
lower.includes('please log in') ||
lower.includes('unauthorized') ||
lower.includes('login required') ||
lower.includes('error authenticating') ||
lower.includes('loadcodeassist') ||
(lower.includes('econnrefused') && lower.includes('8888'))
) {
return {
code: GeminiErrorCode.NOT_AUTHENTICATED,
message: 'Gemini CLI is not authenticated',
recoverable: true,
suggestion:
'Run "gemini" interactively to log in, or set GEMINI_API_KEY environment variable',
};
}
if (
lower.includes('rate limit') ||
lower.includes('too many requests') ||
lower.includes('429') ||
lower.includes('quota exceeded')
) {
return {
code: GeminiErrorCode.RATE_LIMITED,
message: 'Gemini API rate limit exceeded',
recoverable: true,
suggestion: 'Wait a few minutes and try again. Free tier: 60 req/min, 1000 req/day',
};
}
if (
lower.includes('model not available') ||
lower.includes('invalid model') ||
lower.includes('unknown model') ||
lower.includes('modelnotfounderror') ||
lower.includes('model not found') ||
(lower.includes('not found') && lower.includes('404'))
) {
return {
code: GeminiErrorCode.MODEL_UNAVAILABLE,
message: 'Requested model is not available',
recoverable: true,
suggestion: 'Try using "gemini-2.5-flash" or select a different model',
};
}
if (
lower.includes('network') ||
lower.includes('connection') ||
lower.includes('econnrefused') ||
lower.includes('timeout')
) {
return {
code: GeminiErrorCode.NETWORK_ERROR,
message: 'Network connection error',
recoverable: true,
suggestion: 'Check your internet connection and try again',
};
}
if (exitCode === 137 || lower.includes('killed') || lower.includes('sigterm')) {
return {
code: GeminiErrorCode.PROCESS_CRASHED,
message: 'Gemini CLI process was terminated',
recoverable: true,
suggestion: 'The process may have run out of memory. Try a simpler task.',
};
}
return {
code: GeminiErrorCode.UNKNOWN,
message: stderr || `Gemini CLI exited with code ${exitCode}`,
recoverable: false,
};
}
/**
* Override install instructions for Gemini-specific guidance
*/
protected getInstallInstructions(): string {
return 'Install with: npm install -g @google/gemini-cli (or visit https://github.com/google-gemini/gemini-cli)';
}
/**
* Execute a prompt using Gemini CLI with streaming
*/
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
this.ensureCliDetected();
// Validate that model doesn't have a provider prefix
validateBareModelId(options.model, 'GeminiProvider');
if (!this.cliPath) {
throw this.createError(
GeminiErrorCode.NOT_INSTALLED,
'Gemini CLI is not installed',
true,
this.getInstallInstructions()
);
}
// Extract prompt text to pass as positional argument
const promptText = this.extractPromptText(options);
// Build CLI args and append the prompt as the last positional argument
const cliArgs = this.buildCliArgs(options);
cliArgs.push(promptText); // Gemini CLI uses positional args for the prompt
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
let sessionId: string | undefined;
logger.debug(`GeminiProvider.executeQuery called with model: "${options.model}"`);
try {
for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
const event = rawEvent as GeminiStreamEvent;
// Capture session ID from init event
if (event.type === 'init') {
const initEvent = event as GeminiInitEvent;
sessionId = initEvent.session_id;
logger.debug(`Session started: ${sessionId}, model: ${initEvent.model}`);
}
// Normalize and yield the event
const normalized = this.normalizeEvent(event);
if (normalized) {
if (!normalized.session_id && sessionId) {
normalized.session_id = sessionId;
}
yield normalized;
}
}
} catch (error) {
if (isAbortError(error)) {
logger.debug('Query aborted');
return;
}
// Map CLI errors to GeminiError
if (error instanceof Error && 'stderr' in error) {
const errorInfo = this.mapError(
(error as { stderr?: string }).stderr || error.message,
(error as { exitCode?: number | null }).exitCode ?? null
);
throw this.createError(
errorInfo.code as GeminiErrorCode,
errorInfo.message,
errorInfo.recoverable,
errorInfo.suggestion
);
}
throw error;
}
}
// ==========================================================================
// Gemini-Specific Methods
// ==========================================================================
/**
* Create a GeminiError with details
*/
private createError(
code: GeminiErrorCode,
message: string,
recoverable: boolean = false,
suggestion?: string
): GeminiError {
const error = new Error(message) as GeminiError;
error.code = code;
error.recoverable = recoverable;
error.suggestion = suggestion;
error.name = 'GeminiError';
return error;
}
/**
* Get Gemini CLI version
*/
async getVersion(): Promise<string | null> {
this.ensureCliDetected();
if (!this.cliPath) return null;
try {
const result = execSync(`"${this.cliPath}" --version`, {
encoding: 'utf8',
timeout: 5000,
stdio: 'pipe',
}).trim();
return result;
} catch {
return null;
}
}
/**
* Check authentication status
*
* Uses a fast credential check approach:
* 1. Check for GEMINI_API_KEY environment variable
* 2. Check for Google Cloud credentials
* 3. Check for Gemini settings file with stored credentials
* 4. Quick CLI auth test with --help (fast, doesn't make API calls)
*/
async checkAuth(): Promise<GeminiAuthStatus> {
this.ensureCliDetected();
if (!this.cliPath) {
logger.debug('checkAuth: CLI not found');
return { authenticated: false, method: 'none' };
}
logger.debug('checkAuth: Starting credential check');
// Determine the likely auth method based on environment
const hasApiKey = !!process.env.GEMINI_API_KEY;
const hasEnvApiKey = hasApiKey;
const hasVertexAi = !!(
process.env.GOOGLE_APPLICATION_CREDENTIALS || process.env.GOOGLE_CLOUD_PROJECT
);
logger.debug(`checkAuth: hasApiKey=${hasApiKey}, hasVertexAi=${hasVertexAi}`);
// Check for Gemini credentials file (~/.gemini/settings.json)
const geminiConfigDir = path.join(os.homedir(), '.gemini');
const settingsPath = path.join(geminiConfigDir, 'settings.json');
let hasCredentialsFile = false;
let authType: string | null = null;
try {
await fs.access(settingsPath);
logger.debug(`checkAuth: Found settings file at ${settingsPath}`);
try {
const content = await fs.readFile(settingsPath, 'utf8');
const settings = JSON.parse(content);
// Auth config is at security.auth.selectedType (e.g., "oauth-personal", "oauth-adc", "api-key")
const selectedType = settings?.security?.auth?.selectedType;
if (selectedType) {
hasCredentialsFile = true;
authType = selectedType;
logger.debug(`checkAuth: Settings file has auth config, selectedType=${selectedType}`);
} else {
logger.debug(`checkAuth: Settings file found but no auth type configured`);
}
} catch (e) {
logger.debug(`checkAuth: Failed to parse settings file: ${e}`);
}
} catch {
logger.debug('checkAuth: No settings file found');
}
// If we have an API key, we're authenticated
if (hasApiKey) {
logger.debug('checkAuth: Using API key authentication');
return {
authenticated: true,
method: 'api_key',
hasApiKey,
hasEnvApiKey,
hasCredentialsFile,
};
}
// If we have Vertex AI credentials, we're authenticated
if (hasVertexAi) {
logger.debug('checkAuth: Using Vertex AI authentication');
return {
authenticated: true,
method: 'vertex_ai',
hasApiKey,
hasEnvApiKey,
hasCredentialsFile,
};
}
// Check if settings file indicates configured authentication
if (hasCredentialsFile && authType) {
// OAuth types: "oauth-personal", "oauth-adc"
// API key type: "api-key"
// Code assist: "code-assist" (requires IDE integration)
if (authType.startsWith('oauth')) {
logger.debug(`checkAuth: OAuth authentication configured (${authType})`);
return {
authenticated: true,
method: 'google_login',
hasApiKey,
hasEnvApiKey,
hasCredentialsFile,
};
}
if (authType === 'api-key') {
logger.debug('checkAuth: API key authentication configured in settings');
return {
authenticated: true,
method: 'api_key',
hasApiKey,
hasEnvApiKey,
hasCredentialsFile,
};
}
if (authType === 'code-assist' || authType === 'codeassist') {
logger.debug('checkAuth: Code Assist auth configured but requires local server');
return {
authenticated: false,
method: 'google_login',
hasApiKey,
hasEnvApiKey,
hasCredentialsFile,
error:
'Code Assist authentication requires IDE integration. Please use "gemini" CLI to log in with a different method, or set GEMINI_API_KEY.',
};
}
// Unknown auth type but something is configured
logger.debug(`checkAuth: Unknown auth type configured: ${authType}`);
return {
authenticated: true,
method: 'google_login',
hasApiKey,
hasEnvApiKey,
hasCredentialsFile,
};
}
// No credentials found
logger.debug('checkAuth: No valid credentials found');
return {
authenticated: false,
method: 'none',
hasApiKey,
hasEnvApiKey,
hasCredentialsFile,
error:
'No authentication configured. Run "gemini" interactively to log in, or set GEMINI_API_KEY.',
};
}
/**
* Detect installation status (required by BaseProvider)
*/
async detectInstallation(): Promise<InstallationStatus> {
const installed = await this.isInstalled();
const version = installed ? await this.getVersion() : undefined;
const auth = await this.checkAuth();
return {
installed,
version: version || undefined,
path: this.cliPath || undefined,
method: 'cli',
hasApiKey: !!process.env.GEMINI_API_KEY,
authenticated: auth.authenticated,
};
}
/**
* Get the detected CLI path (public accessor for status endpoints)
*/
getCliPath(): string | null {
this.ensureCliDetected();
return this.cliPath;
}
/**
* Get available Gemini models
*/
getAvailableModels(): ModelDefinition[] {
return Object.entries(GEMINI_MODEL_MAP).map(([id, config]) => ({
id, // Full model ID with gemini- prefix (e.g., 'gemini-2.5-flash')
name: config.label,
modelString: id, // Same as id - CLI uses the full model name
provider: 'gemini',
description: config.description,
supportsTools: true,
supportsVision: config.supportsVision,
contextWindow: config.contextWindow,
}));
}
/**
* Check if a feature is supported
*/
supportsFeature(feature: string): boolean {
const supported = ['tools', 'text', 'streaming', 'vision', 'thinking'];
return supported.includes(feature);
}
}

View File

@@ -16,16 +16,6 @@ export type {
ProviderMessage, ProviderMessage,
InstallationStatus, InstallationStatus,
ModelDefinition, ModelDefinition,
AgentDefinition,
ReasoningEffort,
SystemPromptPreset,
ConversationMessage,
ContentBlock,
ValidationResult,
McpServerConfig,
McpStdioServerConfig,
McpSSEServerConfig,
McpHttpServerConfig,
} from './types.js'; } from './types.js';
// Claude provider // Claude provider
@@ -38,12 +28,6 @@ export { CursorConfigManager } from './cursor-config-manager.js';
// OpenCode provider // OpenCode provider
export { OpencodeProvider } from './opencode-provider.js'; export { OpencodeProvider } from './opencode-provider.js';
// Gemini provider
export { GeminiProvider, GeminiErrorCode } from './gemini-provider.js';
// Copilot provider (GitHub Copilot SDK)
export { CopilotProvider, CopilotErrorCode } from './copilot-provider.js';
// Provider factory // Provider factory
export { ProviderFactory } from './provider-factory.js'; export { ProviderFactory } from './provider-factory.js';

View File

@@ -25,6 +25,7 @@ import type {
InstallationStatus, InstallationStatus,
ContentBlock, ContentBlock,
} from '@automaker/types'; } from '@automaker/types';
import { stripProviderPrefix } from '@automaker/types';
import { type SubprocessOptions, getOpenCodeAuthIndicators } from '@automaker/platform'; import { type SubprocessOptions, getOpenCodeAuthIndicators } from '@automaker/platform';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
@@ -327,18 +328,10 @@ export class OpencodeProvider extends CliProvider {
args.push('--format', 'json'); args.push('--format', 'json');
// Handle model selection // Handle model selection
// Convert canonical prefix format (opencode-xxx) to CLI slash format (opencode/xxx) // Strip 'opencode-' prefix if present, OpenCode uses format like 'anthropic/claude-sonnet-4-5'
// OpenCode CLI expects provider/model format (e.g., 'opencode/big-model')
if (options.model) { if (options.model) {
// Strip opencode- prefix if present, then ensure slash format const model = stripProviderPrefix(options.model);
const model = options.model.startsWith('opencode-') args.push('--model', model);
? options.model.slice('opencode-'.length)
: options.model;
// If model has slash, it's already provider/model format; otherwise prepend opencode/
const cliModel = model.includes('/') ? model : `opencode/${model}`;
args.push('--model', cliModel);
} }
// Note: OpenCode reads from stdin automatically when input is piped // Note: OpenCode reads from stdin automatically when input is piped
@@ -1042,7 +1035,7 @@ export class OpencodeProvider extends CliProvider {
'lm studio': 'lmstudio', 'lm studio': 'lmstudio',
lmstudio: 'lmstudio', lmstudio: 'lmstudio',
opencode: 'opencode', opencode: 'opencode',
'z.ai coding plan': 'zai-coding-plan', 'z.ai coding plan': 'z-ai',
'z.ai': 'z-ai', 'z.ai': 'z-ai',
}; };

View File

@@ -7,14 +7,7 @@
import { BaseProvider } from './base-provider.js'; import { BaseProvider } from './base-provider.js';
import type { InstallationStatus, ModelDefinition } from './types.js'; import type { InstallationStatus, ModelDefinition } from './types.js';
import { import { isCursorModel, isCodexModel, isOpencodeModel, type ModelProvider } from '@automaker/types';
isCursorModel,
isCodexModel,
isOpencodeModel,
isGeminiModel,
isCopilotModel,
type ModelProvider,
} from '@automaker/types';
import * as fs from 'fs'; import * as fs from 'fs';
import * as path from 'path'; import * as path from 'path';
@@ -23,8 +16,6 @@ const DISCONNECTED_MARKERS: Record<string, string> = {
codex: '.codex-disconnected', codex: '.codex-disconnected',
cursor: '.cursor-disconnected', cursor: '.cursor-disconnected',
opencode: '.opencode-disconnected', opencode: '.opencode-disconnected',
gemini: '.gemini-disconnected',
copilot: '.copilot-disconnected',
}; };
/** /**
@@ -248,8 +239,8 @@ export class ProviderFactory {
model.modelString === modelId || model.modelString === modelId ||
model.id.endsWith(`-${modelId}`) || model.id.endsWith(`-${modelId}`) ||
model.modelString.endsWith(`-${modelId}`) || model.modelString.endsWith(`-${modelId}`) ||
model.modelString === modelId.replace(/^(claude|cursor|codex|gemini)-/, '') || model.modelString === modelId.replace(/^(claude|cursor|codex)-/, '') ||
model.modelString === modelId.replace(/-(claude|cursor|codex|gemini)$/, '') model.modelString === modelId.replace(/-(claude|cursor|codex)$/, '')
) { ) {
return model.supportsVision ?? true; return model.supportsVision ?? true;
} }
@@ -276,8 +267,6 @@ import { ClaudeProvider } from './claude-provider.js';
import { CursorProvider } from './cursor-provider.js'; import { CursorProvider } from './cursor-provider.js';
import { CodexProvider } from './codex-provider.js'; import { CodexProvider } from './codex-provider.js';
import { OpencodeProvider } from './opencode-provider.js'; import { OpencodeProvider } from './opencode-provider.js';
import { GeminiProvider } from './gemini-provider.js';
import { CopilotProvider } from './copilot-provider.js';
// Register Claude provider // Register Claude provider
registerProvider('claude', { registerProvider('claude', {
@@ -312,19 +301,3 @@ registerProvider('opencode', {
canHandleModel: (model: string) => isOpencodeModel(model), canHandleModel: (model: string) => isOpencodeModel(model),
priority: 3, // Between codex (5) and claude (0) priority: 3, // Between codex (5) and claude (0)
}); });
// Register Gemini provider
registerProvider('gemini', {
factory: () => new GeminiProvider(),
aliases: ['google'],
canHandleModel: (model: string) => isGeminiModel(model),
priority: 4, // Between opencode (3) and codex (5)
});
// Register Copilot provider (GitHub Copilot SDK)
registerProvider('copilot', {
factory: () => new CopilotProvider(),
aliases: ['github-copilot', 'github'],
canHandleModel: (model: string) => isCopilotModel(model),
priority: 6, // High priority - check before Codex since both can handle GPT models
});

View File

@@ -21,7 +21,6 @@ import type {
ThinkingLevel, ThinkingLevel,
ReasoningEffort, ReasoningEffort,
ClaudeApiProfile, ClaudeApiProfile,
ClaudeCompatibleProvider,
Credentials, Credentials,
} from '@automaker/types'; } from '@automaker/types';
import { stripProviderPrefix } from '@automaker/types'; import { stripProviderPrefix } from '@automaker/types';
@@ -57,17 +56,9 @@ export interface SimpleQueryOptions {
readOnly?: boolean; readOnly?: boolean;
/** Setting sources for CLAUDE.md loading */ /** Setting sources for CLAUDE.md loading */
settingSources?: Array<'user' | 'project' | 'local'>; settingSources?: Array<'user' | 'project' | 'local'>;
/** /** Active Claude API profile for alternative endpoint configuration */
* Active Claude API profile for alternative endpoint configuration
* @deprecated Use claudeCompatibleProvider instead
*/
claudeApiProfile?: ClaudeApiProfile; claudeApiProfile?: ClaudeApiProfile;
/** /** Credentials for resolving 'credentials' apiKeySource in Claude API profiles */
* Claude-compatible provider for alternative endpoint configuration.
* Takes precedence over claudeApiProfile if both are set.
*/
claudeCompatibleProvider?: ClaudeCompatibleProvider;
/** Credentials for resolving 'credentials' apiKeySource in Claude API profiles/providers */
credentials?: Credentials; credentials?: Credentials;
} }
@@ -140,8 +131,7 @@ export async function simpleQuery(options: SimpleQueryOptions): Promise<SimpleQu
reasoningEffort: options.reasoningEffort, reasoningEffort: options.reasoningEffort,
readOnly: options.readOnly, readOnly: options.readOnly,
settingSources: options.settingSources, settingSources: options.settingSources,
claudeApiProfile: options.claudeApiProfile, // Legacy: Pass active Claude API profile for alternative endpoint configuration claudeApiProfile: options.claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
claudeCompatibleProvider: options.claudeCompatibleProvider, // New: Pass Claude-compatible provider (takes precedence)
credentials: options.credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials: options.credentials, // Pass credentials for resolving 'credentials' apiKeySource
}; };
@@ -225,8 +215,7 @@ export async function streamingQuery(options: StreamingQueryOptions): Promise<Si
reasoningEffort: options.reasoningEffort, reasoningEffort: options.reasoningEffort,
readOnly: options.readOnly, readOnly: options.readOnly,
settingSources: options.settingSources, settingSources: options.settingSources,
claudeApiProfile: options.claudeApiProfile, // Legacy: Pass active Claude API profile for alternative endpoint configuration claudeApiProfile: options.claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
claudeCompatibleProvider: options.claudeCompatibleProvider, // New: Pass Claude-compatible provider (takes precedence)
credentials: options.credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials: options.credentials, // Pass credentials for resolving 'credentials' apiKeySource
}; };

View File

@@ -1,112 +0,0 @@
/**
* Shared tool normalization utilities for AI providers
*
* These utilities help normalize tool inputs from various AI providers
* to the standard format expected by the application.
*/
/**
* Valid todo status values in the standard format
*/
type TodoStatus = 'pending' | 'in_progress' | 'completed';
/**
* Set of valid status values for validation
*/
const VALID_STATUSES = new Set<TodoStatus>(['pending', 'in_progress', 'completed']);
/**
* Todo item from various AI providers (Gemini, Copilot, etc.)
*/
interface ProviderTodo {
description?: string;
content?: string;
status?: string;
}
/**
* Standard todo format used by the application
*/
interface NormalizedTodo {
content: string;
status: TodoStatus;
activeForm: string;
}
/**
* Normalize a provider status value to a valid TodoStatus
*/
function normalizeStatus(status: string | undefined): TodoStatus {
if (!status) return 'pending';
if (status === 'cancelled' || status === 'canceled') return 'completed';
if (VALID_STATUSES.has(status as TodoStatus)) return status as TodoStatus;
return 'pending';
}
/**
* Normalize todos array from provider format to standard format
*
* Handles different formats from providers:
* - Gemini: { description, status } with 'cancelled' as possible status
* - Copilot: { content/description, status } with 'cancelled' as possible status
*
* Output format (Claude/Standard):
* - { content, status, activeForm } where status is 'pending'|'in_progress'|'completed'
*/
export function normalizeTodos(todos: ProviderTodo[] | null | undefined): NormalizedTodo[] {
if (!todos) return [];
return todos.map((todo) => ({
content: todo.content || todo.description || '',
status: normalizeStatus(todo.status),
// Use content/description as activeForm since providers may not have it
activeForm: todo.content || todo.description || '',
}));
}
/**
* Normalize file path parameters from various provider formats
*
* Different providers use different parameter names for file paths:
* - path, file, filename, filePath -> file_path
*/
export function normalizeFilePathInput(input: Record<string, unknown>): Record<string, unknown> {
const normalized = { ...input };
if (!normalized.file_path) {
if (input.path) normalized.file_path = input.path;
else if (input.file) normalized.file_path = input.file;
else if (input.filename) normalized.file_path = input.filename;
else if (input.filePath) normalized.file_path = input.filePath;
}
return normalized;
}
/**
* Normalize shell command parameters from various provider formats
*
* Different providers use different parameter names for commands:
* - cmd, script -> command
*/
export function normalizeCommandInput(input: Record<string, unknown>): Record<string, unknown> {
const normalized = { ...input };
if (!normalized.command) {
if (input.cmd) normalized.command = input.cmd;
else if (input.script) normalized.command = input.script;
}
return normalized;
}
/**
* Normalize search pattern parameters from various provider formats
*
* Different providers use different parameter names for search patterns:
* - query, search, regex -> pattern
*/
export function normalizePatternInput(input: Record<string, unknown>): Record<string, unknown> {
const normalized = { ...input };
if (!normalized.pattern) {
if (input.query) normalized.pattern = input.query;
else if (input.search) normalized.pattern = input.search;
else if (input.regex) normalized.pattern = input.regex;
}
return normalized;
}

View File

@@ -19,7 +19,4 @@ export type {
InstallationStatus, InstallationStatus,
ValidationResult, ValidationResult,
ModelDefinition, ModelDefinition,
AgentDefinition,
ReasoningEffort,
SystemPromptPreset,
} from '@automaker/types'; } from '@automaker/types';

View File

@@ -8,17 +8,16 @@
import * as secureFs from '../../lib/secure-fs.js'; import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js'; import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, supportsStructuredOutput, isCodexModel } from '@automaker/types'; import { DEFAULT_PHASE_MODELS } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver'; import { resolvePhaseModel } from '@automaker/model-resolver';
import { streamingQuery } from '../../providers/simple-query-service.js'; import { streamingQuery } from '../../providers/simple-query-service.js';
import { parseAndCreateFeatures } from './parse-and-create-features.js'; import { parseAndCreateFeatures } from './parse-and-create-features.js';
import { extractJsonWithArray } from '../../lib/json-extractor.js';
import { getAppSpecPath } from '@automaker/platform'; import { getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js'; import type { SettingsService } from '../../services/settings-service.js';
import { import {
getAutoLoadClaudeMdSetting, getAutoLoadClaudeMdSetting,
getPromptCustomization, getPromptCustomization,
getPhaseModelWithOverrides, getActiveClaudeApiProfile,
} from '../../lib/settings-helpers.js'; } from '../../lib/settings-helpers.js';
import { FeatureLoader } from '../../services/feature-loader.js'; import { FeatureLoader } from '../../services/feature-loader.js';
@@ -26,64 +25,6 @@ const logger = createLogger('SpecRegeneration');
const DEFAULT_MAX_FEATURES = 50; const DEFAULT_MAX_FEATURES = 50;
/**
* Timeout for Codex models when generating features (5 minutes).
* Codex models are slower and need more time to generate 50+ features.
*/
const CODEX_FEATURE_GENERATION_TIMEOUT_MS = 300000; // 5 minutes
/**
* Type for extracted features JSON response
*/
interface FeaturesExtractionResult {
features: Array<{
id: string;
category?: string;
title: string;
description: string;
priority?: number;
complexity?: 'simple' | 'moderate' | 'complex';
dependencies?: string[];
}>;
}
/**
* JSON schema for features output format (Claude/Codex structured output)
*/
const featuresOutputSchema = {
type: 'object',
properties: {
features: {
type: 'array',
items: {
type: 'object',
properties: {
id: { type: 'string', description: 'Unique feature identifier (kebab-case)' },
category: { type: 'string', description: 'Feature category' },
title: { type: 'string', description: 'Short, descriptive title' },
description: { type: 'string', description: 'Detailed feature description' },
priority: {
type: 'number',
description: 'Priority level: 1 (highest) to 5 (lowest)',
},
complexity: {
type: 'string',
enum: ['simple', 'moderate', 'complex'],
description: 'Implementation complexity',
},
dependencies: {
type: 'array',
items: { type: 'string' },
description: 'IDs of features this depends on',
},
},
required: ['id', 'title', 'description'],
},
},
},
required: ['features'],
} as const;
export async function generateFeaturesFromSpec( export async function generateFeaturesFromSpec(
projectPath: string, projectPath: string,
events: EventEmitter, events: EventEmitter,
@@ -178,97 +119,33 @@ Generate ${featureCount} NEW features that build on each other logically. Rememb
'[FeatureGeneration]' '[FeatureGeneration]'
); );
// Get model from phase settings with provider info // Get model from phase settings
const { const settings = await settingsService?.getGlobalSettings();
phaseModel: phaseModelEntry, const phaseModelEntry =
provider, settings?.phaseModels?.featureGenerationModel || DEFAULT_PHASE_MODELS.featureGenerationModel;
credentials, const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
} = settingsService
? await getPhaseModelWithOverrides(
'featureGenerationModel',
settingsService,
projectPath,
'[FeatureGeneration]'
)
: {
phaseModel: DEFAULT_PHASE_MODELS.featureGenerationModel,
provider: undefined,
credentials: undefined,
};
const { model, thinkingLevel, reasoningEffort } = resolvePhaseModel(phaseModelEntry);
logger.info('Using model:', model, provider ? `via provider: ${provider.name}` : 'direct API'); logger.info('Using model:', model);
// Codex models need extended timeout for generating many features. // Get active Claude API profile for alternative endpoint configuration
// Use 'xhigh' reasoning effort to get 5-minute timeout (300s base * 1.0x = 300s). const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
// The Codex provider has a special 5-minute base timeout for feature generation. settingsService,
const isCodex = isCodexModel(model); '[FeatureGeneration]'
const effectiveReasoningEffort = isCodex ? 'xhigh' : reasoningEffort;
if (isCodex) {
logger.info('Codex model detected - using extended timeout (5 minutes for feature generation)');
}
if (effectiveReasoningEffort) {
logger.info('Reasoning effort:', effectiveReasoningEffort);
}
// Determine if we should use structured output based on model type
const useStructuredOutput = supportsStructuredOutput(model);
logger.info(
`Structured output mode: ${useStructuredOutput ? 'enabled (Claude/Codex)' : 'disabled (using JSON instructions)'}`
); );
// Build the final prompt - for non-Claude/Codex models, include explicit JSON instructions
let finalPrompt = prompt;
if (!useStructuredOutput) {
finalPrompt = `${prompt}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. Return the JSON in your response only.
2. After analyzing the spec, respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
3. The JSON must have this exact structure:
{
"features": [
{
"id": "unique-feature-id",
"category": "Category Name",
"title": "Short Feature Title",
"description": "Detailed description of the feature",
"priority": 1,
"complexity": "simple|moderate|complex",
"dependencies": ["other-feature-id"]
}
]
}
4. Feature IDs must be unique, lowercase, kebab-case (e.g., "user-authentication", "data-export")
5. Priority ranges from 1 (highest) to 5 (lowest)
6. Complexity must be one of: "simple", "moderate", "complex"
7. Dependencies is an array of feature IDs that must be completed first (can be empty)
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
}
// Use streamingQuery with event callbacks // Use streamingQuery with event callbacks
const result = await streamingQuery({ const result = await streamingQuery({
prompt: finalPrompt, prompt,
model, model,
cwd: projectPath, cwd: projectPath,
maxTurns: 250, maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'], allowedTools: ['Read', 'Glob', 'Grep'],
abortController, abortController,
thinkingLevel, thinkingLevel,
reasoningEffort: effectiveReasoningEffort, // Extended timeout for Codex models
readOnly: true, // Feature generation only reads code, doesn't write readOnly: true, // Feature generation only reads code, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined, settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
outputFormat: useStructuredOutput
? {
type: 'json_schema',
schema: featuresOutputSchema,
}
: undefined,
onText: (text) => { onText: (text) => {
logger.debug(`Feature text block received (${text.length} chars)`); logger.debug(`Feature text block received (${text.length} chars)`);
events.emit('spec-regeneration:event', { events.emit('spec-regeneration:event', {
@@ -279,51 +156,15 @@ Your entire response should be valid JSON starting with { and ending with }. No
}, },
}); });
// Get response content - prefer structured output if available const responseText = result.text;
let contentForParsing: string;
if (result.structured_output) { logger.info(`Feature stream complete.`);
// Use structured output from Claude/Codex models logger.info(`Feature response length: ${responseText.length} chars`);
logger.info('✅ Received structured output from model'); logger.info('========== FULL RESPONSE TEXT ==========');
contentForParsing = JSON.stringify(result.structured_output); logger.info(responseText);
logger.debug('Structured output:', contentForParsing); logger.info('========== END RESPONSE TEXT ==========');
} else {
// Use text response (for non-Claude/Codex models or fallback)
// Pre-extract JSON to handle conversational text that may surround the JSON response
// This follows the same pattern used in generate-spec.ts and validate-issue.ts
const rawText = result.text;
logger.info(`Feature stream complete.`);
logger.info(`Feature response length: ${rawText.length} chars`);
logger.info('========== FULL RESPONSE TEXT ==========');
logger.info(rawText);
logger.info('========== END RESPONSE TEXT ==========');
// Pre-extract JSON from response - handles conversational text around the JSON await parseAndCreateFeatures(projectPath, responseText, events);
const extracted = extractJsonWithArray<FeaturesExtractionResult>(rawText, 'features', {
logger,
});
if (extracted) {
contentForParsing = JSON.stringify(extracted);
logger.info('✅ Pre-extracted JSON from text response');
} else {
// If pre-extraction fails, we know the next step will also fail.
// Throw an error here to avoid redundant parsing and make the failure point clearer.
logger.error(
'❌ Could not extract features JSON from model response. Full response text was:\n' +
rawText
);
const errorMessage =
'Failed to parse features from model response: No valid JSON with a "features" array found.';
events.emit('spec-regeneration:event', {
type: 'spec_regeneration_error',
error: errorMessage,
projectPath: projectPath,
});
throw new Error(errorMessage);
}
}
await parseAndCreateFeatures(projectPath, contentForParsing, events);
logger.debug('========== generateFeaturesFromSpec() completed =========='); logger.debug('========== generateFeaturesFromSpec() completed ==========');
} }

View File

@@ -9,7 +9,7 @@ import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js'; import type { EventEmitter } from '../../lib/events.js';
import { specOutputSchema, specToXml, type SpecOutput } from '../../lib/app-spec-format.js'; import { specOutputSchema, specToXml, type SpecOutput } from '../../lib/app-spec-format.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, supportsStructuredOutput } from '@automaker/types'; import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver'; import { resolvePhaseModel } from '@automaker/model-resolver';
import { extractJson } from '../../lib/json-extractor.js'; import { extractJson } from '../../lib/json-extractor.js';
import { streamingQuery } from '../../providers/simple-query-service.js'; import { streamingQuery } from '../../providers/simple-query-service.js';
@@ -19,7 +19,7 @@ import type { SettingsService } from '../../services/settings-service.js';
import { import {
getAutoLoadClaudeMdSetting, getAutoLoadClaudeMdSetting,
getPromptCustomization, getPromptCustomization,
getPhaseModelWithOverrides, getActiveClaudeApiProfile,
} from '../../lib/settings-helpers.js'; } from '../../lib/settings-helpers.js';
const logger = createLogger('SpecRegeneration'); const logger = createLogger('SpecRegeneration');
@@ -96,37 +96,27 @@ ${prompts.appSpec.structuredSpecInstructions}`;
'[SpecRegeneration]' '[SpecRegeneration]'
); );
// Get model from phase settings with provider info // Get model from phase settings
const { const settings = await settingsService?.getGlobalSettings();
phaseModel: phaseModelEntry, const phaseModelEntry =
provider, settings?.phaseModels?.specGenerationModel || DEFAULT_PHASE_MODELS.specGenerationModel;
credentials,
} = settingsService
? await getPhaseModelWithOverrides(
'specGenerationModel',
settingsService,
projectPath,
'[SpecRegeneration]'
)
: {
phaseModel: DEFAULT_PHASE_MODELS.specGenerationModel,
provider: undefined,
credentials: undefined,
};
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry); const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
logger.info('Using model:', model, provider ? `via provider: ${provider.name}` : 'direct API'); logger.info('Using model:', model);
// Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[SpecRegeneration]'
);
let responseText = ''; let responseText = '';
let structuredOutput: SpecOutput | null = null; let structuredOutput: SpecOutput | null = null;
// Determine if we should use structured output based on model type // Determine if we should use structured output (Claude supports it, Cursor doesn't)
const useStructuredOutput = supportsStructuredOutput(model); const useStructuredOutput = !isCursorModel(model);
logger.info(
`Structured output mode: ${useStructuredOutput ? 'enabled (Claude/Codex)' : 'disabled (using JSON instructions)'}`
);
// Build the final prompt - for non-Claude/Codex models, include JSON schema instructions // Build the final prompt - for Cursor, include JSON schema instructions
let finalPrompt = prompt; let finalPrompt = prompt;
if (!useStructuredOutput) { if (!useStructuredOutput) {
finalPrompt = `${prompt} finalPrompt = `${prompt}
@@ -152,7 +142,7 @@ Your entire response should be valid JSON starting with { and ending with }. No
thinkingLevel, thinkingLevel,
readOnly: true, // Spec generation only reads code, we write the spec ourselves readOnly: true, // Spec generation only reads code, we write the spec ourselves
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined, settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
outputFormat: useStructuredOutput outputFormat: useStructuredOutput
? { ? {

View File

@@ -10,15 +10,14 @@
import * as secureFs from '../../lib/secure-fs.js'; import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js'; import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, supportsStructuredOutput } from '@automaker/types'; import { DEFAULT_PHASE_MODELS } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver'; import { resolvePhaseModel } from '@automaker/model-resolver';
import { streamingQuery } from '../../providers/simple-query-service.js'; import { streamingQuery } from '../../providers/simple-query-service.js';
import { extractJson } from '../../lib/json-extractor.js';
import { getAppSpecPath } from '@automaker/platform'; import { getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js'; import type { SettingsService } from '../../services/settings-service.js';
import { import {
getAutoLoadClaudeMdSetting, getAutoLoadClaudeMdSetting,
getPhaseModelWithOverrides, getActiveClaudeApiProfile,
} from '../../lib/settings-helpers.js'; } from '../../lib/settings-helpers.js';
import { FeatureLoader } from '../../services/feature-loader.js'; import { FeatureLoader } from '../../services/feature-loader.js';
import { import {
@@ -35,28 +34,6 @@ import { getNotificationService } from '../../services/notification-service.js';
const logger = createLogger('SpecSync'); const logger = createLogger('SpecSync');
/**
* Type for extracted tech stack JSON response
*/
interface TechStackExtractionResult {
technologies: string[];
}
/**
* JSON schema for tech stack analysis output (Claude/Codex structured output)
*/
const techStackOutputSchema = {
type: 'object',
properties: {
technologies: {
type: 'array',
items: { type: 'string' },
description: 'List of technologies detected in the project',
},
},
required: ['technologies'],
} as const;
/** /**
* Result of a sync operation * Result of a sync operation
*/ */
@@ -178,35 +155,19 @@ export async function syncSpec(
'[SpecSync]' '[SpecSync]'
); );
// Get model from phase settings with provider info const settings = await settingsService?.getGlobalSettings();
const { const phaseModelEntry =
phaseModel: phaseModelEntry, settings?.phaseModels?.specGenerationModel || DEFAULT_PHASE_MODELS.specGenerationModel;
provider,
credentials,
} = settingsService
? await getPhaseModelWithOverrides(
'specGenerationModel',
settingsService,
projectPath,
'[SpecSync]'
)
: {
phaseModel: DEFAULT_PHASE_MODELS.specGenerationModel,
provider: undefined,
credentials: undefined,
};
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry); const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
logger.info('Using model:', model, provider ? `via provider: ${provider.name}` : 'direct API'); // Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
// Determine if we should use structured output based on model type settingsService,
const useStructuredOutput = supportsStructuredOutput(model); '[SpecSync]'
logger.info(
`Structured output mode: ${useStructuredOutput ? 'enabled (Claude/Codex)' : 'disabled (using JSON instructions)'}`
); );
// Use AI to analyze tech stack // Use AI to analyze tech stack
let techAnalysisPrompt = `Analyze this project and return ONLY a JSON object with the current technology stack. const techAnalysisPrompt = `Analyze this project and return ONLY a JSON object with the current technology stack.
Current known technologies: ${currentTechStack.join(', ')} Current known technologies: ${currentTechStack.join(', ')}
@@ -222,16 +183,6 @@ Return ONLY this JSON format, no other text:
"technologies": ["Technology 1", "Technology 2", ...] "technologies": ["Technology 1", "Technology 2", ...]
}`; }`;
// Add explicit JSON instructions for non-Claude/Codex models
if (!useStructuredOutput) {
techAnalysisPrompt = `${techAnalysisPrompt}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. Return the JSON in your response only.
2. Your entire response should be valid JSON starting with { and ending with }.
3. No explanations, no markdown, no text before or after the JSON.`;
}
try { try {
const techResult = await streamingQuery({ const techResult = await streamingQuery({
prompt: techAnalysisPrompt, prompt: techAnalysisPrompt,
@@ -243,69 +194,46 @@ CRITICAL INSTRUCTIONS:
thinkingLevel, thinkingLevel,
readOnly: true, readOnly: true,
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined, settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
outputFormat: useStructuredOutput
? {
type: 'json_schema',
schema: techStackOutputSchema,
}
: undefined,
onText: (text) => { onText: (text) => {
logger.debug(`Tech analysis text: ${text.substring(0, 100)}`); logger.debug(`Tech analysis text: ${text.substring(0, 100)}`);
}, },
}); });
// Parse tech stack from response - prefer structured output if available // Parse tech stack from response
let parsedTechnologies: string[] | null = null; const jsonMatch = techResult.text.match(/\{[\s\S]*"technologies"[\s\S]*\}/);
if (jsonMatch) {
const parsed = JSON.parse(jsonMatch[0]);
if (Array.isArray(parsed.technologies)) {
const newTechStack = parsed.technologies as string[];
if (techResult.structured_output) { // Calculate differences
// Use structured output from Claude/Codex models const currentSet = new Set(currentTechStack.map((t) => t.toLowerCase()));
const structured = techResult.structured_output as unknown as TechStackExtractionResult; const newSet = new Set(newTechStack.map((t) => t.toLowerCase()));
if (Array.isArray(structured.technologies)) {
parsedTechnologies = structured.technologies;
logger.info('✅ Received structured output for tech analysis');
}
} else {
// Fall back to text parsing for non-Claude/Codex models
const extracted = extractJson<TechStackExtractionResult>(techResult.text, {
logger,
requiredKey: 'technologies',
requireArray: true,
});
if (extracted && Array.isArray(extracted.technologies)) {
parsedTechnologies = extracted.technologies;
logger.info('✅ Extracted tech stack from text response');
} else {
logger.warn('⚠️ Failed to extract tech stack JSON from response');
}
}
if (parsedTechnologies) { for (const tech of newTechStack) {
const newTechStack = parsedTechnologies; if (!currentSet.has(tech.toLowerCase())) {
result.techStackUpdates.added.push(tech);
// Calculate differences }
const currentSet = new Set(currentTechStack.map((t) => t.toLowerCase()));
const newSet = new Set(newTechStack.map((t) => t.toLowerCase()));
for (const tech of newTechStack) {
if (!currentSet.has(tech.toLowerCase())) {
result.techStackUpdates.added.push(tech);
} }
}
for (const tech of currentTechStack) { for (const tech of currentTechStack) {
if (!newSet.has(tech.toLowerCase())) { if (!newSet.has(tech.toLowerCase())) {
result.techStackUpdates.removed.push(tech); result.techStackUpdates.removed.push(tech);
}
} }
}
// Update spec with new tech stack if there are changes // Update spec with new tech stack if there are changes
if (result.techStackUpdates.added.length > 0 || result.techStackUpdates.removed.length > 0) { if (
specContent = updateTechnologyStack(specContent, newTechStack); result.techStackUpdates.added.length > 0 ||
logger.info( result.techStackUpdates.removed.length > 0
`Updated tech stack: +${result.techStackUpdates.added.length}, -${result.techStackUpdates.removed.length}` ) {
); specContent = updateTechnologyStack(specContent, newTechStack);
logger.info(
`Updated tech stack: +${result.techStackUpdates.added.length}, -${result.techStackUpdates.removed.length}`
);
}
} }
} }
} catch (error) { } catch (error) {

View File

@@ -1,12 +1,11 @@
/** /**
* Auto Mode routes - HTTP API for autonomous feature implementation * Auto Mode routes - HTTP API for autonomous feature implementation
* *
* Uses AutoModeServiceCompat which provides the old interface while * Uses the AutoModeService for real feature execution with Claude Agent SDK
* delegating to GlobalAutoModeService and per-project facades.
*/ */
import { Router } from 'express'; import { Router } from 'express';
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js'; import type { AutoModeService } from '../../services/auto-mode-service.js';
import { validatePathParams } from '../../middleware/validate-paths.js'; import { validatePathParams } from '../../middleware/validate-paths.js';
import { createStopFeatureHandler } from './routes/stop-feature.js'; import { createStopFeatureHandler } from './routes/stop-feature.js';
import { createStatusHandler } from './routes/status.js'; import { createStatusHandler } from './routes/status.js';
@@ -22,12 +21,7 @@ import { createCommitFeatureHandler } from './routes/commit-feature.js';
import { createApprovePlanHandler } from './routes/approve-plan.js'; import { createApprovePlanHandler } from './routes/approve-plan.js';
import { createResumeInterruptedHandler } from './routes/resume-interrupted.js'; import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
/** export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
* Create auto-mode routes.
*
* @param autoModeService - AutoModeServiceCompat instance
*/
export function createAutoModeRoutes(autoModeService: AutoModeServiceCompat): Router {
const router = Router(); const router = Router();
// Auto loop control routes // Auto loop control routes

View File

@@ -3,13 +3,13 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('AutoMode'); const logger = createLogger('AutoMode');
export function createAnalyzeProjectHandler(autoModeService: AutoModeServiceCompat) { export function createAnalyzeProjectHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath } = req.body as { projectPath: string }; const { projectPath } = req.body as { projectPath: string };

View File

@@ -3,13 +3,13 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('AutoMode'); const logger = createLogger('AutoMode');
export function createApprovePlanHandler(autoModeService: AutoModeServiceCompat) { export function createApprovePlanHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { featureId, approved, editedPlan, feedback, projectPath } = req.body as { const { featureId, approved, editedPlan, feedback, projectPath } = req.body as {
@@ -48,11 +48,11 @@ export function createApprovePlanHandler(autoModeService: AutoModeServiceCompat)
// Resolve the pending approval (with recovery support) // Resolve the pending approval (with recovery support)
const result = await autoModeService.resolvePlanApproval( const result = await autoModeService.resolvePlanApproval(
projectPath || '',
featureId, featureId,
approved, approved,
editedPlan, editedPlan,
feedback feedback,
projectPath
); );
if (!result.success) { if (!result.success) {

View File

@@ -3,10 +3,10 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
export function createCommitFeatureHandler(autoModeService: AutoModeServiceCompat) { export function createCommitFeatureHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, featureId, worktreePath } = req.body as { const { projectPath, featureId, worktreePath } = req.body as {

View File

@@ -3,10 +3,10 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
export function createContextExistsHandler(autoModeService: AutoModeServiceCompat) { export function createContextExistsHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, featureId } = req.body as { const { projectPath, featureId } = req.body as {

View File

@@ -3,13 +3,13 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('AutoMode'); const logger = createLogger('AutoMode');
export function createFollowUpFeatureHandler(autoModeService: AutoModeServiceCompat) { export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, featureId, prompt, imagePaths, useWorktrees } = req.body as { const { projectPath, featureId, prompt, imagePaths, useWorktrees } = req.body as {
@@ -30,12 +30,16 @@ export function createFollowUpFeatureHandler(autoModeService: AutoModeServiceCom
// Start follow-up in background // Start follow-up in background
// followUpFeature derives workDir from feature.branchName // followUpFeature derives workDir from feature.branchName
// Default to false to match run-feature/resume-feature behavior.
// Worktrees should only be used when explicitly enabled by the user.
autoModeService autoModeService
// Default to false to match run-feature/resume-feature behavior.
// Worktrees should only be used when explicitly enabled by the user.
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false) .followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
.catch((error) => { .catch((error) => {
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error); logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
})
.finally(() => {
// Release the starting slot when follow-up completes (success or error)
// Note: The feature should be in runningFeatures by this point
}); });
res.json({ success: true }); res.json({ success: true });

View File

@@ -3,13 +3,13 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('AutoMode'); const logger = createLogger('AutoMode');
export function createResumeFeatureHandler(autoModeService: AutoModeServiceCompat) { export function createResumeFeatureHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, featureId, useWorktrees } = req.body as { const { projectPath, featureId, useWorktrees } = req.body as {

View File

@@ -7,7 +7,7 @@
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
const logger = createLogger('ResumeInterrupted'); const logger = createLogger('ResumeInterrupted');
@@ -15,7 +15,7 @@ interface ResumeInterruptedRequest {
projectPath: string; projectPath: string;
} }
export function createResumeInterruptedHandler(autoModeService: AutoModeServiceCompat) { export function createResumeInterruptedHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
const { projectPath } = req.body as ResumeInterruptedRequest; const { projectPath } = req.body as ResumeInterruptedRequest;
@@ -28,7 +28,6 @@ export function createResumeInterruptedHandler(autoModeService: AutoModeServiceC
try { try {
await autoModeService.resumeInterruptedFeatures(projectPath); await autoModeService.resumeInterruptedFeatures(projectPath);
res.json({ res.json({
success: true, success: true,
message: 'Resume check completed', message: 'Resume check completed',

View File

@@ -3,13 +3,13 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('AutoMode'); const logger = createLogger('AutoMode');
export function createRunFeatureHandler(autoModeService: AutoModeServiceCompat) { export function createRunFeatureHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, featureId, useWorktrees } = req.body as { const { projectPath, featureId, useWorktrees } = req.body as {
@@ -26,30 +26,16 @@ export function createRunFeatureHandler(autoModeService: AutoModeServiceCompat)
return; return;
} }
// Check per-worktree capacity before starting
const capacity = await autoModeService.checkWorktreeCapacity(projectPath, featureId);
if (!capacity.hasCapacity) {
const worktreeDesc = capacity.branchName
? `worktree "${capacity.branchName}"`
: 'main worktree';
res.status(429).json({
success: false,
error: `Agent limit reached for ${worktreeDesc} (${capacity.currentAgents}/${capacity.maxAgents}). Wait for running tasks to complete or increase the limit.`,
details: {
currentAgents: capacity.currentAgents,
maxAgents: capacity.maxAgents,
branchName: capacity.branchName,
},
});
return;
}
// Start execution in background // Start execution in background
// executeFeature derives workDir from feature.branchName // executeFeature derives workDir from feature.branchName
autoModeService autoModeService
.executeFeature(projectPath, featureId, useWorktrees ?? false, false) .executeFeature(projectPath, featureId, useWorktrees ?? false, false)
.catch((error) => { .catch((error) => {
logger.error(`Feature ${featureId} error:`, error); logger.error(`Feature ${featureId} error:`, error);
})
.finally(() => {
// Release the starting slot when execution completes (success or error)
// Note: The feature should be in runningFeatures by this point
}); });
res.json({ success: true }); res.json({ success: true });

View File

@@ -3,18 +3,17 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('AutoMode'); const logger = createLogger('AutoMode');
export function createStartHandler(autoModeService: AutoModeServiceCompat) { export function createStartHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, branchName, maxConcurrency } = req.body as { const { projectPath, maxConcurrency } = req.body as {
projectPath: string; projectPath: string;
branchName?: string | null;
maxConcurrency?: number; maxConcurrency?: number;
}; };
@@ -26,38 +25,26 @@ export function createStartHandler(autoModeService: AutoModeServiceCompat) {
return; return;
} }
// Normalize branchName: undefined becomes null
const normalizedBranchName = branchName ?? null;
const worktreeDesc = normalizedBranchName
? `worktree ${normalizedBranchName}`
: 'main worktree';
// Check if already running // Check if already running
if (autoModeService.isAutoLoopRunningForProject(projectPath, normalizedBranchName)) { if (autoModeService.isAutoLoopRunningForProject(projectPath)) {
res.json({ res.json({
success: true, success: true,
message: `Auto mode is already running for ${worktreeDesc}`, message: 'Auto mode is already running for this project',
alreadyRunning: true, alreadyRunning: true,
branchName: normalizedBranchName,
}); });
return; return;
} }
// Start the auto loop for this project/worktree // Start the auto loop for this project
const resolvedMaxConcurrency = await autoModeService.startAutoLoopForProject( await autoModeService.startAutoLoopForProject(projectPath, maxConcurrency ?? 3);
projectPath,
normalizedBranchName,
maxConcurrency
);
logger.info( logger.info(
`Started auto loop for ${worktreeDesc} in project: ${projectPath} with maxConcurrency: ${resolvedMaxConcurrency}` `Started auto loop for project: ${projectPath} with maxConcurrency: ${maxConcurrency ?? 3}`
); );
res.json({ res.json({
success: true, success: true,
message: `Auto mode started with max ${resolvedMaxConcurrency} concurrent features`, message: `Auto mode started with max ${maxConcurrency ?? 3} concurrent features`,
branchName: normalizedBranchName,
}); });
} catch (error) { } catch (error) {
logError(error, 'Start auto mode failed'); logError(error, 'Start auto mode failed');

View File

@@ -6,29 +6,17 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
/** export function createStatusHandler(autoModeService: AutoModeService) {
* Create status handler.
*/
export function createStatusHandler(autoModeService: AutoModeServiceCompat) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, branchName } = req.body as { const { projectPath } = req.body as { projectPath?: string };
projectPath?: string;
branchName?: string | null;
};
// If projectPath is provided, return per-project/worktree status // If projectPath is provided, return per-project status
if (projectPath) { if (projectPath) {
// Normalize branchName: undefined becomes null const projectStatus = autoModeService.getStatusForProject(projectPath);
const normalizedBranchName = branchName ?? null;
const projectStatus = autoModeService.getStatusForProject(
projectPath,
normalizedBranchName
);
res.json({ res.json({
success: true, success: true,
isRunning: projectStatus.runningCount > 0, isRunning: projectStatus.runningCount > 0,
@@ -37,20 +25,17 @@ export function createStatusHandler(autoModeService: AutoModeServiceCompat) {
runningCount: projectStatus.runningCount, runningCount: projectStatus.runningCount,
maxConcurrency: projectStatus.maxConcurrency, maxConcurrency: projectStatus.maxConcurrency,
projectPath, projectPath,
branchName: normalizedBranchName,
}); });
return; return;
} }
// Global status for backward compatibility // Fall back to global status for backward compatibility
const status = autoModeService.getStatus(); const status = autoModeService.getStatus();
const activeProjects = autoModeService.getActiveAutoLoopProjects(); const activeProjects = autoModeService.getActiveAutoLoopProjects();
const activeWorktrees = autoModeService.getActiveAutoLoopWorktrees();
res.json({ res.json({
success: true, success: true,
...status, ...status,
activeAutoLoopProjects: activeProjects, activeAutoLoopProjects: activeProjects,
activeAutoLoopWorktrees: activeWorktrees,
}); });
} catch (error) { } catch (error) {
logError(error, 'Get status failed'); logError(error, 'Get status failed');

View File

@@ -3,10 +3,10 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
export function createStopFeatureHandler(autoModeService: AutoModeServiceCompat) { export function createStopFeatureHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { featureId } = req.body as { featureId: string }; const { featureId } = req.body as { featureId: string };

View File

@@ -3,18 +3,17 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('AutoMode'); const logger = createLogger('AutoMode');
export function createStopHandler(autoModeService: AutoModeServiceCompat) { export function createStopHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, branchName } = req.body as { const { projectPath } = req.body as {
projectPath: string; projectPath: string;
branchName?: string | null;
}; };
if (!projectPath) { if (!projectPath) {
@@ -25,38 +24,27 @@ export function createStopHandler(autoModeService: AutoModeServiceCompat) {
return; return;
} }
// Normalize branchName: undefined becomes null
const normalizedBranchName = branchName ?? null;
const worktreeDesc = normalizedBranchName
? `worktree ${normalizedBranchName}`
: 'main worktree';
// Check if running // Check if running
if (!autoModeService.isAutoLoopRunningForProject(projectPath, normalizedBranchName)) { if (!autoModeService.isAutoLoopRunningForProject(projectPath)) {
res.json({ res.json({
success: true, success: true,
message: `Auto mode is not running for ${worktreeDesc}`, message: 'Auto mode is not running for this project',
wasRunning: false, wasRunning: false,
branchName: normalizedBranchName,
}); });
return; return;
} }
// Stop the auto loop for this project/worktree // Stop the auto loop for this project
const runningCount = await autoModeService.stopAutoLoopForProject( const runningCount = await autoModeService.stopAutoLoopForProject(projectPath);
projectPath,
normalizedBranchName
);
logger.info( logger.info(
`Stopped auto loop for ${worktreeDesc} in project: ${projectPath}, ${runningCount} features still running` `Stopped auto loop for project: ${projectPath}, ${runningCount} features still running`
); );
res.json({ res.json({
success: true, success: true,
message: 'Auto mode stopped', message: 'Auto mode stopped',
runningFeaturesCount: runningCount, runningFeaturesCount: runningCount,
branchName: normalizedBranchName,
}); });
} catch (error) { } catch (error) {
logError(error, 'Stop auto mode failed'); logError(error, 'Stop auto mode failed');

View File

@@ -3,10 +3,10 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
export function createVerifyFeatureHandler(autoModeService: AutoModeServiceCompat) { export function createVerifyFeatureHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, featureId } = req.body as { const { projectPath, featureId } = req.body as {

View File

@@ -28,7 +28,7 @@ import type { SettingsService } from '../../services/settings-service.js';
import { import {
getAutoLoadClaudeMdSetting, getAutoLoadClaudeMdSetting,
getPromptCustomization, getPromptCustomization,
getPhaseModelWithOverrides, getActiveClaudeApiProfile,
} from '../../lib/settings-helpers.js'; } from '../../lib/settings-helpers.js';
const featureLoader = new FeatureLoader(); const featureLoader = new FeatureLoader();
@@ -121,42 +121,18 @@ export async function generateBacklogPlan(
content: 'Generating plan with AI...', content: 'Generating plan with AI...',
}); });
// Get the model to use from settings or provided override with provider info // Get the model to use from settings or provided override
let effectiveModel = model; let effectiveModel = model;
let thinkingLevel: ThinkingLevel | undefined; let thinkingLevel: ThinkingLevel | undefined;
let claudeCompatibleProvider: import('@automaker/types').ClaudeCompatibleProvider | undefined; if (!effectiveModel) {
let credentials: import('@automaker/types').Credentials | undefined; const settings = await settingsService?.getGlobalSettings();
const phaseModelEntry =
if (effectiveModel) { settings?.phaseModels?.backlogPlanningModel || DEFAULT_PHASE_MODELS.backlogPlanningModel;
// Use explicit override - resolve model alias and get credentials const resolved = resolvePhaseModel(phaseModelEntry);
const resolved = resolvePhaseModel({ model: effectiveModel });
effectiveModel = resolved.model;
thinkingLevel = resolved.thinkingLevel;
credentials = await settingsService?.getCredentials();
} else if (settingsService) {
// Use settings-based model with provider info
const phaseResult = await getPhaseModelWithOverrides(
'backlogPlanningModel',
settingsService,
projectPath,
'[BacklogPlan]'
);
const resolved = resolvePhaseModel(phaseResult.phaseModel);
effectiveModel = resolved.model;
thinkingLevel = resolved.thinkingLevel;
claudeCompatibleProvider = phaseResult.provider;
credentials = phaseResult.credentials;
} else {
// Fallback to defaults
const resolved = resolvePhaseModel(DEFAULT_PHASE_MODELS.backlogPlanningModel);
effectiveModel = resolved.model; effectiveModel = resolved.model;
thinkingLevel = resolved.thinkingLevel; thinkingLevel = resolved.thinkingLevel;
} }
logger.info( logger.info('[BacklogPlan] Using model:', effectiveModel);
'[BacklogPlan] Using model:',
effectiveModel,
claudeCompatibleProvider ? `via provider: ${claudeCompatibleProvider.name}` : 'direct API'
);
const provider = ProviderFactory.getProviderForModel(effectiveModel); const provider = ProviderFactory.getProviderForModel(effectiveModel);
// Strip provider prefix - providers expect bare model IDs // Strip provider prefix - providers expect bare model IDs
@@ -189,6 +165,12 @@ ${userPrompt}`;
finalSystemPrompt = undefined; // System prompt is now embedded in the user prompt finalSystemPrompt = undefined; // System prompt is now embedded in the user prompt
} }
// Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[BacklogPlan]'
);
// Execute the query // Execute the query
const stream = provider.executeQuery({ const stream = provider.executeQuery({
prompt: finalPrompt, prompt: finalPrompt,
@@ -201,7 +183,7 @@ ${userPrompt}`;
settingSources: autoLoadClaudeMd ? ['user', 'project'] : undefined, settingSources: autoLoadClaudeMd ? ['user', 'project'] : undefined,
readOnly: true, // Plan generation only generates text, doesn't write files readOnly: true, // Plan generation only generates text, doesn't write files
thinkingLevel, // Pass thinking level for extended thinking thinkingLevel, // Pass thinking level for extended thinking
claudeCompatibleProvider, // Pass provider for alternative endpoint configuration claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
}); });

View File

@@ -85,9 +85,8 @@ export function createApplyHandler() {
if (!change.feature) continue; if (!change.feature) continue;
try { try {
// Create the new feature - use the AI-generated ID if provided // Create the new feature
const newFeature = await featureLoader.create(projectPath, { const newFeature = await featureLoader.create(projectPath, {
id: change.feature.id, // Use descriptive ID from AI if provided
title: change.feature.title, title: change.feature.title,
description: change.feature.description || '', description: change.feature.description || '',
category: change.feature.category || 'Uncategorized', category: change.feature.category || 'Uncategorized',

View File

@@ -12,6 +12,7 @@
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
import { PathNotAllowedError } from '@automaker/platform'; import { PathNotAllowedError } from '@automaker/platform';
import { resolvePhaseModel } from '@automaker/model-resolver'; import { resolvePhaseModel } from '@automaker/model-resolver';
import { simpleQuery } from '../../../providers/simple-query-service.js'; import { simpleQuery } from '../../../providers/simple-query-service.js';
@@ -21,7 +22,7 @@ import type { SettingsService } from '../../../services/settings-service.js';
import { import {
getAutoLoadClaudeMdSetting, getAutoLoadClaudeMdSetting,
getPromptCustomization, getPromptCustomization,
getPhaseModelWithOverrides, getActiveClaudeApiProfile,
} from '../../../lib/settings-helpers.js'; } from '../../../lib/settings-helpers.js';
const logger = createLogger('DescribeFile'); const logger = createLogger('DescribeFile');
@@ -155,22 +156,20 @@ ${contentToAnalyze}`;
'[DescribeFile]' '[DescribeFile]'
); );
// Get model from phase settings with provider info // Get model from phase settings
const { const settings = await settingsService?.getGlobalSettings();
phaseModel: phaseModelEntry, logger.info(`Raw phaseModels from settings:`, JSON.stringify(settings?.phaseModels, null, 2));
provider, const phaseModelEntry =
credentials, settings?.phaseModels?.fileDescriptionModel || DEFAULT_PHASE_MODELS.fileDescriptionModel;
} = await getPhaseModelWithOverrides( logger.info(`fileDescriptionModel entry:`, JSON.stringify(phaseModelEntry));
'fileDescriptionModel',
settingsService,
cwd,
'[DescribeFile]'
);
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry); const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
logger.info( logger.info(`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`);
`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`,
provider ? `via provider: ${provider.name}` : 'direct API' // Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[DescribeFile]'
); );
// Use simpleQuery - provider abstraction handles routing to correct provider // Use simpleQuery - provider abstraction handles routing to correct provider
@@ -183,7 +182,7 @@ ${contentToAnalyze}`;
thinkingLevel, thinkingLevel,
readOnly: true, // File description only reads, doesn't write readOnly: true, // File description only reads, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined, settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
}); });

View File

@@ -13,7 +13,7 @@
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import { createLogger, readImageAsBase64 } from '@automaker/utils'; import { createLogger, readImageAsBase64 } from '@automaker/utils';
import { isCursorModel } from '@automaker/types'; import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver'; import { resolvePhaseModel } from '@automaker/model-resolver';
import { simpleQuery } from '../../../providers/simple-query-service.js'; import { simpleQuery } from '../../../providers/simple-query-service.js';
import * as secureFs from '../../../lib/secure-fs.js'; import * as secureFs from '../../../lib/secure-fs.js';
@@ -22,7 +22,7 @@ import type { SettingsService } from '../../../services/settings-service.js';
import { import {
getAutoLoadClaudeMdSetting, getAutoLoadClaudeMdSetting,
getPromptCustomization, getPromptCustomization,
getPhaseModelWithOverrides, getActiveClaudeApiProfile,
} from '../../../lib/settings-helpers.js'; } from '../../../lib/settings-helpers.js';
const logger = createLogger('DescribeImage'); const logger = createLogger('DescribeImage');
@@ -274,27 +274,23 @@ export function createDescribeImageHandler(
'[DescribeImage]' '[DescribeImage]'
); );
// Get model from phase settings with provider info // Get model from phase settings
const { const settings = await settingsService?.getGlobalSettings();
phaseModel: phaseModelEntry, const phaseModelEntry =
provider, settings?.phaseModels?.imageDescriptionModel || DEFAULT_PHASE_MODELS.imageDescriptionModel;
credentials,
} = await getPhaseModelWithOverrides(
'imageDescriptionModel',
settingsService,
cwd,
'[DescribeImage]'
);
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry); const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
logger.info( logger.info(`[${requestId}] Using model: ${model}`);
`[${requestId}] Using model: ${model}`,
provider ? `via provider: ${provider.name}` : 'direct API'
);
// Get customized prompts from settings // Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[DescribeImage]'); const prompts = await getPromptCustomization(settingsService, '[DescribeImage]');
// Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[DescribeImage]'
);
// Build the instruction text from centralized prompts // Build the instruction text from centralized prompts
const instructionText = prompts.contextDescription.describeImagePrompt; const instructionText = prompts.contextDescription.describeImagePrompt;
@@ -336,7 +332,7 @@ export function createDescribeImageHandler(
thinkingLevel, thinkingLevel,
readOnly: true, // Image description only reads, doesn't write readOnly: true, // Image description only reads, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined, settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
}); });

View File

@@ -12,7 +12,10 @@ import { resolveModelString } from '@automaker/model-resolver';
import { CLAUDE_MODEL_MAP, type ThinkingLevel } from '@automaker/types'; import { CLAUDE_MODEL_MAP, type ThinkingLevel } from '@automaker/types';
import { simpleQuery } from '../../../providers/simple-query-service.js'; import { simpleQuery } from '../../../providers/simple-query-service.js';
import type { SettingsService } from '../../../services/settings-service.js'; import type { SettingsService } from '../../../services/settings-service.js';
import { getPromptCustomization, getProviderByModelId } from '../../../lib/settings-helpers.js'; import {
getPromptCustomization,
getActiveClaudeApiProfile,
} from '../../../lib/settings-helpers.js';
import { import {
buildUserPrompt, buildUserPrompt,
isValidEnhancementMode, isValidEnhancementMode,
@@ -33,8 +36,6 @@ interface EnhanceRequestBody {
model?: string; model?: string;
/** Optional thinking level for Claude models */ /** Optional thinking level for Claude models */
thinkingLevel?: ThinkingLevel; thinkingLevel?: ThinkingLevel;
/** Optional project path for per-project Claude API profile */
projectPath?: string;
} }
/** /**
@@ -64,7 +65,7 @@ export function createEnhanceHandler(
): (req: Request, res: Response) => Promise<void> { ): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { originalText, enhancementMode, model, thinkingLevel, projectPath } = const { originalText, enhancementMode, model, thinkingLevel } =
req.body as EnhanceRequestBody; req.body as EnhanceRequestBody;
// Validate required fields // Validate required fields
@@ -123,35 +124,17 @@ export function createEnhanceHandler(
// Build the user prompt with few-shot examples // Build the user prompt with few-shot examples
const userPrompt = buildUserPrompt(validMode, trimmedText, true); const userPrompt = buildUserPrompt(validMode, trimmedText, true);
// Check if the model is a provider model (like "GLM-4.5-Air") // Resolve the model - use the passed model, default to sonnet for quality
// If so, get the provider config and resolved Claude model const resolvedModel = resolveModelString(model, CLAUDE_MODEL_MAP.sonnet);
let claudeCompatibleProvider: import('@automaker/types').ClaudeCompatibleProvider | undefined;
let providerResolvedModel: string | undefined;
let credentials = await settingsService?.getCredentials();
if (model && settingsService) {
const providerResult = await getProviderByModelId(
model,
settingsService,
'[EnhancePrompt]'
);
if (providerResult.provider) {
claudeCompatibleProvider = providerResult.provider;
providerResolvedModel = providerResult.resolvedModel;
credentials = providerResult.credentials;
logger.info(
`Using provider "${providerResult.provider.name}" for model "${model}"` +
(providerResolvedModel ? ` -> resolved to "${providerResolvedModel}"` : '')
);
}
}
// Resolve the model - use provider resolved model, passed model, or default to sonnet
const resolvedModel =
providerResolvedModel || resolveModelString(model, CLAUDE_MODEL_MAP.sonnet);
logger.debug(`Using model: ${resolvedModel}`); logger.debug(`Using model: ${resolvedModel}`);
// Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[EnhancePrompt]'
);
// Use simpleQuery - provider abstraction handles routing to correct provider // Use simpleQuery - provider abstraction handles routing to correct provider
// The system prompt is combined with user prompt since some providers // The system prompt is combined with user prompt since some providers
// don't have a separate system prompt concept // don't have a separate system prompt concept
@@ -163,8 +146,8 @@ export function createEnhanceHandler(
allowedTools: [], allowedTools: [],
thinkingLevel, thinkingLevel,
readOnly: true, // Prompt enhancement only generates text, doesn't write files readOnly: true, // Prompt enhancement only generates text, doesn't write files
claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
claudeCompatibleProvider, // Pass provider for alternative endpoint configuration
}); });
const enhancedText = result.text; const enhancedText = result.text;

View File

@@ -5,7 +5,6 @@
import { Router } from 'express'; import { Router } from 'express';
import { FeatureLoader } from '../../services/feature-loader.js'; import { FeatureLoader } from '../../services/feature-loader.js';
import type { SettingsService } from '../../services/settings-service.js'; import type { SettingsService } from '../../services/settings-service.js';
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js';
import type { EventEmitter } from '../../lib/events.js'; import type { EventEmitter } from '../../lib/events.js';
import { validatePathParams } from '../../middleware/validate-paths.js'; import { validatePathParams } from '../../middleware/validate-paths.js';
import { createListHandler } from './routes/list.js'; import { createListHandler } from './routes/list.js';
@@ -17,22 +16,15 @@ import { createBulkDeleteHandler } from './routes/bulk-delete.js';
import { createDeleteHandler } from './routes/delete.js'; import { createDeleteHandler } from './routes/delete.js';
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js'; import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
import { createGenerateTitleHandler } from './routes/generate-title.js'; import { createGenerateTitleHandler } from './routes/generate-title.js';
import { createExportHandler } from './routes/export.js';
import { createImportHandler, createConflictCheckHandler } from './routes/import.js';
export function createFeaturesRoutes( export function createFeaturesRoutes(
featureLoader: FeatureLoader, featureLoader: FeatureLoader,
settingsService?: SettingsService, settingsService?: SettingsService,
events?: EventEmitter, events?: EventEmitter
autoModeService?: AutoModeServiceCompat
): Router { ): Router {
const router = Router(); const router = Router();
router.post( router.post('/list', validatePathParams('projectPath'), createListHandler(featureLoader));
'/list',
validatePathParams('projectPath'),
createListHandler(featureLoader, autoModeService)
);
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader)); router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
router.post( router.post(
'/create', '/create',
@@ -54,13 +46,6 @@ export function createFeaturesRoutes(
router.post('/agent-output', createAgentOutputHandler(featureLoader)); router.post('/agent-output', createAgentOutputHandler(featureLoader));
router.post('/raw-output', createRawOutputHandler(featureLoader)); router.post('/raw-output', createRawOutputHandler(featureLoader));
router.post('/generate-title', createGenerateTitleHandler(settingsService)); router.post('/generate-title', createGenerateTitleHandler(settingsService));
router.post('/export', validatePathParams('projectPath'), createExportHandler(featureLoader));
router.post('/import', validatePathParams('projectPath'), createImportHandler(featureLoader));
router.post(
'/check-conflicts',
validatePathParams('projectPath'),
createConflictCheckHandler(featureLoader)
);
return router; return router;
} }

View File

@@ -43,7 +43,7 @@ export function createCreateHandler(featureLoader: FeatureLoader, events?: Event
if (events) { if (events) {
events.emit('feature:created', { events.emit('feature:created', {
featureId: created.id, featureId: created.id,
featureName: created.title || 'Untitled Feature', featureName: created.name,
projectPath, projectPath,
}); });
} }

View File

@@ -1,96 +0,0 @@
/**
* POST /export endpoint - Export features to JSON or YAML format
*/
import type { Request, Response } from 'express';
import type { FeatureLoader } from '../../../services/feature-loader.js';
import {
getFeatureExportService,
type ExportFormat,
type BulkExportOptions,
} from '../../../services/feature-export-service.js';
import { getErrorMessage, logError } from '../common.js';
interface ExportRequest {
projectPath: string;
/** Feature IDs to export. If empty/undefined, exports all features */
featureIds?: string[];
/** Export format: 'json' or 'yaml' */
format?: ExportFormat;
/** Whether to include description history */
includeHistory?: boolean;
/** Whether to include plan spec */
includePlanSpec?: boolean;
/** Filter by category */
category?: string;
/** Filter by status */
status?: string;
/** Pretty print output */
prettyPrint?: boolean;
/** Optional metadata to include */
metadata?: {
projectName?: string;
projectPath?: string;
branch?: string;
[key: string]: unknown;
};
}
export function createExportHandler(featureLoader: FeatureLoader) {
const exportService = getFeatureExportService();
return async (req: Request, res: Response): Promise<void> => {
try {
const {
projectPath,
featureIds,
format = 'json',
includeHistory = true,
includePlanSpec = true,
category,
status,
prettyPrint = true,
metadata,
} = req.body as ExportRequest;
if (!projectPath) {
res.status(400).json({ success: false, error: 'projectPath is required' });
return;
}
// Validate format
if (format !== 'json' && format !== 'yaml') {
res.status(400).json({
success: false,
error: 'format must be "json" or "yaml"',
});
return;
}
const options: BulkExportOptions = {
format,
includeHistory,
includePlanSpec,
category,
status,
featureIds,
prettyPrint,
metadata,
};
const exportData = await exportService.exportFeatures(projectPath, options);
// Return the export data as a string in the response
res.json({
success: true,
data: exportData,
format,
contentType: format === 'json' ? 'application/json' : 'application/x-yaml',
filename: `features-export.${format === 'json' ? 'json' : 'yaml'}`,
});
} catch (error) {
logError(error, 'Export features failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -10,13 +10,15 @@ import { createLogger } from '@automaker/utils';
import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver'; import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver';
import { simpleQuery } from '../../../providers/simple-query-service.js'; import { simpleQuery } from '../../../providers/simple-query-service.js';
import type { SettingsService } from '../../../services/settings-service.js'; import type { SettingsService } from '../../../services/settings-service.js';
import { getPromptCustomization } from '../../../lib/settings-helpers.js'; import {
getPromptCustomization,
getActiveClaudeApiProfile,
} from '../../../lib/settings-helpers.js';
const logger = createLogger('GenerateTitle'); const logger = createLogger('GenerateTitle');
interface GenerateTitleRequestBody { interface GenerateTitleRequestBody {
description: string; description: string;
projectPath?: string;
} }
interface GenerateTitleSuccessResponse { interface GenerateTitleSuccessResponse {
@@ -34,7 +36,7 @@ export function createGenerateTitleHandler(
): (req: Request, res: Response) => Promise<void> { ): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { description, projectPath } = req.body as GenerateTitleRequestBody; const { description } = req.body as GenerateTitleRequestBody;
if (!description || typeof description !== 'string') { if (!description || typeof description !== 'string') {
const response: GenerateTitleErrorResponse = { const response: GenerateTitleErrorResponse = {
@@ -61,8 +63,11 @@ export function createGenerateTitleHandler(
const prompts = await getPromptCustomization(settingsService, '[GenerateTitle]'); const prompts = await getPromptCustomization(settingsService, '[GenerateTitle]');
const systemPrompt = prompts.titleGeneration.systemPrompt; const systemPrompt = prompts.titleGeneration.systemPrompt;
// Get credentials for API calls (uses hardcoded haiku model, no phase setting) // Get active Claude API profile for alternative endpoint configuration
const credentials = await settingsService?.getCredentials(); const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[GenerateTitle]'
);
const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`; const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`;
@@ -73,6 +78,7 @@ export function createGenerateTitleHandler(
cwd: process.cwd(), cwd: process.cwd(),
maxTurns: 1, maxTurns: 1,
allowedTools: [], allowedTools: [],
claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
}); });

View File

@@ -1,210 +0,0 @@
/**
* POST /import endpoint - Import features from JSON or YAML format
*/
import type { Request, Response } from 'express';
import type { FeatureLoader } from '../../../services/feature-loader.js';
import type { FeatureImportResult, Feature, FeatureExport } from '@automaker/types';
import { getFeatureExportService } from '../../../services/feature-export-service.js';
import { getErrorMessage, logError } from '../common.js';
interface ImportRequest {
projectPath: string;
/** Raw JSON or YAML string containing feature data */
data: string;
/** Whether to overwrite existing features with same ID */
overwrite?: boolean;
/** Whether to preserve branch info from imported features */
preserveBranchInfo?: boolean;
/** Optional category to assign to all imported features */
targetCategory?: string;
}
interface ConflictCheckRequest {
projectPath: string;
/** Raw JSON or YAML string containing feature data */
data: string;
}
interface ConflictInfo {
featureId: string;
title?: string;
existingTitle?: string;
hasConflict: boolean;
}
export function createImportHandler(featureLoader: FeatureLoader) {
const exportService = getFeatureExportService();
return async (req: Request, res: Response): Promise<void> => {
try {
const {
projectPath,
data,
overwrite = false,
preserveBranchInfo = false,
targetCategory,
} = req.body as ImportRequest;
if (!projectPath) {
res.status(400).json({ success: false, error: 'projectPath is required' });
return;
}
if (!data) {
res.status(400).json({ success: false, error: 'data is required' });
return;
}
// Detect format and parse the data
const format = exportService.detectFormat(data);
if (!format) {
res.status(400).json({
success: false,
error: 'Invalid data format. Expected valid JSON or YAML.',
});
return;
}
const parsed = exportService.parseImportData(data);
if (!parsed) {
res.status(400).json({
success: false,
error: 'Failed to parse import data. Ensure it is valid JSON or YAML.',
});
return;
}
// Determine if this is a single feature or bulk import
const isBulkImport =
'features' in parsed && Array.isArray((parsed as { features: unknown }).features);
let results: FeatureImportResult[];
if (isBulkImport) {
// Bulk import
results = await exportService.importFeatures(projectPath, data, {
overwrite,
preserveBranchInfo,
targetCategory,
});
} else {
// Single feature import - we know it's not a bulk export at this point
// It must be either a Feature or FeatureExport
const singleData = parsed as Feature | FeatureExport;
const result = await exportService.importFeature(projectPath, {
data: singleData,
overwrite,
preserveBranchInfo,
targetCategory,
});
results = [result];
}
const successCount = results.filter((r) => r.success).length;
const failureCount = results.filter((r) => !r.success).length;
const allSuccessful = failureCount === 0;
res.json({
success: allSuccessful,
importedCount: successCount,
failedCount: failureCount,
results,
});
} catch (error) {
logError(error, 'Import features failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}
/**
* Create handler for checking conflicts before import
*/
export function createConflictCheckHandler(featureLoader: FeatureLoader) {
const exportService = getFeatureExportService();
return async (req: Request, res: Response): Promise<void> => {
try {
const { projectPath, data } = req.body as ConflictCheckRequest;
if (!projectPath) {
res.status(400).json({ success: false, error: 'projectPath is required' });
return;
}
if (!data) {
res.status(400).json({ success: false, error: 'data is required' });
return;
}
// Parse the import data
const format = exportService.detectFormat(data);
if (!format) {
res.status(400).json({
success: false,
error: 'Invalid data format. Expected valid JSON or YAML.',
});
return;
}
const parsed = exportService.parseImportData(data);
if (!parsed) {
res.status(400).json({
success: false,
error: 'Failed to parse import data.',
});
return;
}
// Extract features from the data using type guards
let featuresToCheck: Array<{ id: string; title?: string }> = [];
if (exportService.isBulkExport(parsed)) {
// Bulk export format
featuresToCheck = parsed.features.map((f) => ({
id: f.feature.id,
title: f.feature.title,
}));
} else if (exportService.isFeatureExport(parsed)) {
// Single FeatureExport format
featuresToCheck = [
{
id: parsed.feature.id,
title: parsed.feature.title,
},
];
} else if (exportService.isRawFeature(parsed)) {
// Raw Feature format
featuresToCheck = [{ id: parsed.id, title: parsed.title }];
}
// Check each feature for conflicts in parallel
const conflicts: ConflictInfo[] = await Promise.all(
featuresToCheck.map(async (feature) => {
const existing = await featureLoader.get(projectPath, feature.id);
return {
featureId: feature.id,
title: feature.title,
existingTitle: existing?.title,
hasConflict: !!existing,
};
})
);
const hasConflicts = conflicts.some((c) => c.hasConflict);
res.json({
success: true,
hasConflicts,
conflicts,
totalFeatures: featuresToCheck.length,
conflictCount: conflicts.filter((c) => c.hasConflict).length,
});
} catch (error) {
logError(error, 'Conflict check failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -1,22 +1,12 @@
/** /**
* POST /list endpoint - List all features for a project * POST /list endpoint - List all features for a project
*
* Also performs orphan detection when a project is loaded to identify
* features whose branches no longer exist. This runs on every project load/switch.
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import { FeatureLoader } from '../../../services/feature-loader.js'; import { FeatureLoader } from '../../../services/feature-loader.js';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
import { createLogger } from '@automaker/utils';
const logger = createLogger('FeaturesListRoute'); export function createListHandler(featureLoader: FeatureLoader) {
export function createListHandler(
featureLoader: FeatureLoader,
autoModeService?: AutoModeServiceCompat
) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath } = req.body as { projectPath: string }; const { projectPath } = req.body as { projectPath: string };
@@ -27,26 +17,6 @@ export function createListHandler(
} }
const features = await featureLoader.getAll(projectPath); const features = await featureLoader.getAll(projectPath);
// Run orphan detection in background when project is loaded
// This detects features whose branches no longer exist (e.g., after merge/delete)
// We don't await this to keep the list response fast
// Note: detectOrphanedFeatures handles errors internally and always resolves
if (autoModeService) {
autoModeService.detectOrphanedFeatures(projectPath).then((orphanedFeatures) => {
if (orphanedFeatures.length > 0) {
logger.info(
`[ProjectLoad] Detected ${orphanedFeatures.length} orphaned feature(s) in ${projectPath}`
);
for (const { feature, missingBranch } of orphanedFeatures) {
logger.info(
`[ProjectLoad] Orphaned: ${feature.title || feature.id} - branch "${missingBranch}" no longer exists`
);
}
}
});
}
res.json({ success: true, features }); res.json({ success: true, features });
} catch (error) { } catch (error) {
logError(error, 'List features failed'); logError(error, 'List features failed');

View File

@@ -31,9 +31,7 @@ export function createSaveBoardBackgroundHandler() {
await secureFs.mkdir(boardDir, { recursive: true }); await secureFs.mkdir(boardDir, { recursive: true });
// Decode base64 data (remove data URL prefix if present) // Decode base64 data (remove data URL prefix if present)
// Use a regex that handles all data URL formats including those with extra params const base64Data = data.replace(/^data:image\/\w+;base64,/, '');
// e.g., data:image/gif;charset=utf-8;base64,R0lGOD...
const base64Data = data.replace(/^data:[^,]+,/, '');
const buffer = Buffer.from(base64Data, 'base64'); const buffer = Buffer.from(base64Data, 'base64');
// Use a fixed filename for the board background (overwrite previous) // Use a fixed filename for the board background (overwrite previous)

View File

@@ -31,9 +31,7 @@ export function createSaveImageHandler() {
await secureFs.mkdir(imagesDir, { recursive: true }); await secureFs.mkdir(imagesDir, { recursive: true });
// Decode base64 data (remove data URL prefix if present) // Decode base64 data (remove data URL prefix if present)
// Use a regex that handles all data URL formats including those with extra params const base64Data = data.replace(/^data:image\/\w+;base64,/, '');
// e.g., data:image/gif;charset=utf-8;base64,R0lGOD...
const base64Data = data.replace(/^data:[^,]+,/, '');
const buffer = Buffer.from(base64Data, 'base64'); const buffer = Buffer.from(base64Data, 'base64');
// Generate unique filename with timestamp // Generate unique filename with timestamp

View File

@@ -23,7 +23,6 @@ import {
isCodexModel, isCodexModel,
isCursorModel, isCursorModel,
isOpencodeModel, isOpencodeModel,
supportsStructuredOutput,
} from '@automaker/types'; } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver'; import { resolvePhaseModel } from '@automaker/model-resolver';
import { extractJson } from '../../../lib/json-extractor.js'; import { extractJson } from '../../../lib/json-extractor.js';
@@ -38,7 +37,7 @@ import {
import { import {
getPromptCustomization, getPromptCustomization,
getAutoLoadClaudeMdSetting, getAutoLoadClaudeMdSetting,
getProviderByModelId, getActiveClaudeApiProfile,
} from '../../../lib/settings-helpers.js'; } from '../../../lib/settings-helpers.js';
import { import {
trySetValidationRunning, trySetValidationRunning,
@@ -125,9 +124,8 @@ async function runValidation(
const prompts = await getPromptCustomization(settingsService, '[ValidateIssue]'); const prompts = await getPromptCustomization(settingsService, '[ValidateIssue]');
const issueValidationSystemPrompt = prompts.issueValidation.systemPrompt; const issueValidationSystemPrompt = prompts.issueValidation.systemPrompt;
// Determine if we should use structured output based on model type // Determine if we should use structured output (Claude/Codex support it, Cursor/OpenCode don't)
// Claude and Codex support it; Cursor, Gemini, OpenCode, Copilot don't const useStructuredOutput = isClaudeModel(model) || isCodexModel(model);
const useStructuredOutput = supportsStructuredOutput(model);
// Build the final prompt - for Cursor, include system prompt and JSON schema instructions // Build the final prompt - for Cursor, include system prompt and JSON schema instructions
let finalPrompt = basePrompt; let finalPrompt = basePrompt;
@@ -169,33 +167,18 @@ ${basePrompt}`;
} }
} }
// Check if the model is a provider model (like "GLM-4.5-Air") logger.info(`Using model: ${model}`);
// If so, get the provider config and resolved Claude model
let claudeCompatibleProvider: import('@automaker/types').ClaudeCompatibleProvider | undefined;
let providerResolvedModel: string | undefined;
let credentials = await settingsService?.getCredentials();
if (settingsService) { // Get active Claude API profile for alternative endpoint configuration
const providerResult = await getProviderByModelId(model, settingsService, '[ValidateIssue]'); const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
if (providerResult.provider) { settingsService,
claudeCompatibleProvider = providerResult.provider; '[IssueValidation]'
providerResolvedModel = providerResult.resolvedModel; );
credentials = providerResult.credentials;
logger.info(
`Using provider "${providerResult.provider.name}" for model "${model}"` +
(providerResolvedModel ? ` -> resolved to "${providerResolvedModel}"` : '')
);
}
}
// Use provider resolved model if available, otherwise use original model
const effectiveModel = providerResolvedModel || (model as string);
logger.info(`Using model: ${effectiveModel}`);
// Use streamingQuery with event callbacks // Use streamingQuery with event callbacks
const result = await streamingQuery({ const result = await streamingQuery({
prompt: finalPrompt, prompt: finalPrompt,
model: effectiveModel, model: model as string,
cwd: projectPath, cwd: projectPath,
systemPrompt: useStructuredOutput ? issueValidationSystemPrompt : undefined, systemPrompt: useStructuredOutput ? issueValidationSystemPrompt : undefined,
abortController, abortController,
@@ -203,7 +186,7 @@ ${basePrompt}`;
reasoningEffort: effectiveReasoningEffort, reasoningEffort: effectiveReasoningEffort,
readOnly: true, // Issue validation only reads code, doesn't write readOnly: true, // Issue validation only reads code, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined, settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
claudeCompatibleProvider, // Pass provider for alternative endpoint configuration claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
outputFormat: useStructuredOutput outputFormat: useStructuredOutput
? { ? {

View File

@@ -4,21 +4,15 @@
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { IdeationService } from '../../../services/ideation-service.js'; import type { IdeationService } from '../../../services/ideation-service.js';
import type { IdeationContextSources } from '@automaker/types';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('ideation:suggestions-generate'); const logger = createLogger('ideation:suggestions-generate');
/**
* Creates an Express route handler for generating AI-powered ideation suggestions.
* Accepts a prompt, category, and optional context sources configuration,
* then returns structured suggestions that can be added to the board.
*/
export function createSuggestionsGenerateHandler(ideationService: IdeationService) { export function createSuggestionsGenerateHandler(ideationService: IdeationService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, promptId, category, count, contextSources } = req.body; const { projectPath, promptId, category, count } = req.body;
if (!projectPath) { if (!projectPath) {
res.status(400).json({ success: false, error: 'projectPath is required' }); res.status(400).json({ success: false, error: 'projectPath is required' });
@@ -44,8 +38,7 @@ export function createSuggestionsGenerateHandler(ideationService: IdeationServic
projectPath, projectPath,
promptId, promptId,
category, category,
suggestionCount, suggestionCount
contextSources as IdeationContextSources | undefined
); );
res.json({ res.json({

View File

@@ -1,12 +0,0 @@
/**
* Common utilities for projects routes
*/
import { createLogger } from '@automaker/utils';
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
const logger = createLogger('Projects');
// Re-export shared utilities
export { getErrorMessageShared as getErrorMessage };
export const logError = createLogError(logger);

View File

@@ -1,27 +0,0 @@
/**
* Projects routes - HTTP API for multi-project overview and management
*/
import { Router } from 'express';
import type { FeatureLoader } from '../../services/feature-loader.js';
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js';
import type { SettingsService } from '../../services/settings-service.js';
import type { NotificationService } from '../../services/notification-service.js';
import { createOverviewHandler } from './routes/overview.js';
export function createProjectsRoutes(
featureLoader: FeatureLoader,
autoModeService: AutoModeServiceCompat,
settingsService: SettingsService,
notificationService: NotificationService
): Router {
const router = Router();
// GET /overview - Get aggregate status for all projects
router.get(
'/overview',
createOverviewHandler(featureLoader, autoModeService, settingsService, notificationService)
);
return router;
}

View File

@@ -1,324 +0,0 @@
/**
* GET /overview endpoint - Get aggregate status for all projects
*
* Returns a complete overview of all projects including:
* - Individual project status (features, auto-mode state)
* - Aggregate metrics across all projects
* - Recent activity feed (placeholder for future implementation)
*/
import type { Request, Response } from 'express';
import type { FeatureLoader } from '../../../services/feature-loader.js';
import type {
AutoModeServiceCompat,
RunningAgentInfo,
ProjectAutoModeStatus,
} from '../../../services/auto-mode/index.js';
import type { SettingsService } from '../../../services/settings-service.js';
import type { NotificationService } from '../../../services/notification-service.js';
import type {
ProjectStatus,
AggregateStatus,
MultiProjectOverview,
FeatureStatusCounts,
AggregateFeatureCounts,
AggregateProjectCounts,
ProjectHealthStatus,
Feature,
ProjectRef,
} from '@automaker/types';
import { getErrorMessage, logError } from '../common.js';
/**
* Compute feature status counts from a list of features
*/
function computeFeatureCounts(features: Feature[]): FeatureStatusCounts {
const counts: FeatureStatusCounts = {
pending: 0,
running: 0,
completed: 0,
failed: 0,
verified: 0,
};
for (const feature of features) {
switch (feature.status) {
case 'pending':
case 'ready':
counts.pending++;
break;
case 'running':
case 'generating_spec':
case 'in_progress':
counts.running++;
break;
case 'waiting_approval':
// waiting_approval means agent finished, needs human review - count as pending
counts.pending++;
break;
case 'completed':
counts.completed++;
break;
case 'failed':
counts.failed++;
break;
case 'verified':
counts.verified++;
break;
default:
// Unknown status, treat as pending
counts.pending++;
}
}
return counts;
}
/**
* Determine the overall health status of a project based on its feature statuses
*/
function computeHealthStatus(
featureCounts: FeatureStatusCounts,
isAutoModeRunning: boolean
): ProjectHealthStatus {
const totalFeatures =
featureCounts.pending +
featureCounts.running +
featureCounts.completed +
featureCounts.failed +
featureCounts.verified;
// If there are failed features, the project has errors
if (featureCounts.failed > 0) {
return 'error';
}
// If there are running features or auto mode is running with pending work
if (featureCounts.running > 0 || (isAutoModeRunning && featureCounts.pending > 0)) {
return 'active';
}
// Pending work but no active execution
if (featureCounts.pending > 0) {
return 'waiting';
}
// If all features are completed or verified
if (totalFeatures > 0 && featureCounts.pending === 0 && featureCounts.running === 0) {
return 'completed';
}
// Default to idle
return 'idle';
}
/**
* Get the most recent activity timestamp from features
*/
function getLastActivityAt(features: Feature[]): string | undefined {
if (features.length === 0) {
return undefined;
}
let latestTimestamp: number = 0;
for (const feature of features) {
// Check startedAt timestamp (the main timestamp available on Feature)
if (feature.startedAt) {
const timestamp = new Date(feature.startedAt).getTime();
if (!isNaN(timestamp) && timestamp > latestTimestamp) {
latestTimestamp = timestamp;
}
}
// Also check planSpec timestamps if available
if (feature.planSpec?.generatedAt) {
const timestamp = new Date(feature.planSpec.generatedAt).getTime();
if (!isNaN(timestamp) && timestamp > latestTimestamp) {
latestTimestamp = timestamp;
}
}
if (feature.planSpec?.approvedAt) {
const timestamp = new Date(feature.planSpec.approvedAt).getTime();
if (!isNaN(timestamp) && timestamp > latestTimestamp) {
latestTimestamp = timestamp;
}
}
}
return latestTimestamp > 0 ? new Date(latestTimestamp).toISOString() : undefined;
}
export function createOverviewHandler(
featureLoader: FeatureLoader,
autoModeService: AutoModeServiceCompat,
settingsService: SettingsService,
notificationService: NotificationService
) {
return async (_req: Request, res: Response): Promise<void> => {
try {
// Get all projects from settings
const settings = await settingsService.getGlobalSettings();
const projectRefs: ProjectRef[] = settings.projects || [];
// Get all running agents once to count live running features per project
const allRunningAgents: RunningAgentInfo[] = await autoModeService.getRunningAgents();
// Collect project statuses in parallel
const projectStatusPromises = projectRefs.map(async (projectRef): Promise<ProjectStatus> => {
try {
// Load features for this project
const features = await featureLoader.getAll(projectRef.path);
const featureCounts = computeFeatureCounts(features);
const totalFeatures = features.length;
// Get auto-mode status for this project (main worktree, branchName = null)
const autoModeStatus: ProjectAutoModeStatus = autoModeService.getStatusForProject(
projectRef.path,
null
);
const isAutoModeRunning = autoModeStatus.isAutoLoopRunning;
// Count live running features for this project (across all branches)
// This ensures we only count features that are actually running in memory
const liveRunningCount = allRunningAgents.filter(
(agent) => agent.projectPath === projectRef.path
).length;
featureCounts.running = liveRunningCount;
// Get notification count for this project
let unreadNotificationCount = 0;
try {
const notifications = await notificationService.getNotifications(projectRef.path);
unreadNotificationCount = notifications.filter((n) => !n.read).length;
} catch {
// Ignore notification errors - project may not have any notifications yet
}
// Compute health status
const healthStatus = computeHealthStatus(featureCounts, isAutoModeRunning);
// Get last activity timestamp
const lastActivityAt = getLastActivityAt(features);
return {
projectId: projectRef.id,
projectName: projectRef.name,
projectPath: projectRef.path,
healthStatus,
featureCounts,
totalFeatures,
lastActivityAt,
isAutoModeRunning,
activeBranch: autoModeStatus.branchName ?? undefined,
unreadNotificationCount,
};
} catch (error) {
logError(error, `Failed to load project status: ${projectRef.name}`);
// Return a minimal status for projects that fail to load
return {
projectId: projectRef.id,
projectName: projectRef.name,
projectPath: projectRef.path,
healthStatus: 'error' as ProjectHealthStatus,
featureCounts: {
pending: 0,
running: 0,
completed: 0,
failed: 0,
verified: 0,
},
totalFeatures: 0,
isAutoModeRunning: false,
unreadNotificationCount: 0,
};
}
});
const projectStatuses = await Promise.all(projectStatusPromises);
// Compute aggregate metrics
const aggregateFeatureCounts: AggregateFeatureCounts = {
total: 0,
pending: 0,
running: 0,
completed: 0,
failed: 0,
verified: 0,
};
const aggregateProjectCounts: AggregateProjectCounts = {
total: projectStatuses.length,
active: 0,
idle: 0,
waiting: 0,
withErrors: 0,
allCompleted: 0,
};
let totalUnreadNotifications = 0;
let projectsWithAutoModeRunning = 0;
for (const status of projectStatuses) {
// Aggregate feature counts
aggregateFeatureCounts.total += status.totalFeatures;
aggregateFeatureCounts.pending += status.featureCounts.pending;
aggregateFeatureCounts.running += status.featureCounts.running;
aggregateFeatureCounts.completed += status.featureCounts.completed;
aggregateFeatureCounts.failed += status.featureCounts.failed;
aggregateFeatureCounts.verified += status.featureCounts.verified;
// Aggregate project counts by health status
switch (status.healthStatus) {
case 'active':
aggregateProjectCounts.active++;
break;
case 'idle':
aggregateProjectCounts.idle++;
break;
case 'waiting':
aggregateProjectCounts.waiting++;
break;
case 'error':
aggregateProjectCounts.withErrors++;
break;
case 'completed':
aggregateProjectCounts.allCompleted++;
break;
}
// Aggregate notifications
totalUnreadNotifications += status.unreadNotificationCount;
// Count projects with auto-mode running
if (status.isAutoModeRunning) {
projectsWithAutoModeRunning++;
}
}
const aggregateStatus: AggregateStatus = {
projectCounts: aggregateProjectCounts,
featureCounts: aggregateFeatureCounts,
totalUnreadNotifications,
projectsWithAutoModeRunning,
computedAt: new Date().toISOString(),
};
// Build the response (recentActivity is empty for now - can be populated later)
const overview: MultiProjectOverview = {
projects: projectStatuses,
aggregate: aggregateStatus,
recentActivity: [], // Placeholder for future activity feed implementation
generatedAt: new Date().toISOString(),
};
res.json({
success: true,
...overview,
});
} catch (error) {
logError(error, 'Get project overview failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -3,10 +3,10 @@
*/ */
import { Router } from 'express'; import { Router } from 'express';
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js'; import type { AutoModeService } from '../../services/auto-mode-service.js';
import { createIndexHandler } from './routes/index.js'; import { createIndexHandler } from './routes/index.js';
export function createRunningAgentsRoutes(autoModeService: AutoModeServiceCompat): Router { export function createRunningAgentsRoutes(autoModeService: AutoModeService): Router {
const router = Router(); const router = Router();
router.get('/', createIndexHandler(autoModeService)); router.get('/', createIndexHandler(autoModeService));

View File

@@ -3,17 +3,16 @@
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js'; import type { AutoModeService } from '../../../services/auto-mode-service.js';
import { getBacklogPlanStatus, getRunningDetails } from '../../backlog-plan/common.js'; import { getBacklogPlanStatus, getRunningDetails } from '../../backlog-plan/common.js';
import { getAllRunningGenerations } from '../../app-spec/common.js'; import { getAllRunningGenerations } from '../../app-spec/common.js';
import path from 'path'; import path from 'path';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
export function createIndexHandler(autoModeService: AutoModeServiceCompat) { export function createIndexHandler(autoModeService: AutoModeService) {
return async (_req: Request, res: Response): Promise<void> => { return async (_req: Request, res: Response): Promise<void> => {
try { try {
const runningAgents = [...(await autoModeService.getRunningAgents())]; const runningAgents = [...(await autoModeService.getRunningAgents())];
const backlogPlanStatus = getBacklogPlanStatus(); const backlogPlanStatus = getBacklogPlanStatus();
const backlogPlanDetails = getRunningDetails(); const backlogPlanDetails = getRunningDetails();

View File

@@ -52,8 +52,3 @@ export async function persistApiKeyToEnv(key: string, value: string): Promise<vo
// Re-export shared utilities // Re-export shared utilities
export { getErrorMessageShared as getErrorMessage }; export { getErrorMessageShared as getErrorMessage };
export const logError = createLogError(logger); export const logError = createLogError(logger);
/**
* Marker file used to indicate a provider has been explicitly disconnected by user
*/
export const COPILOT_DISCONNECTED_MARKER_FILE = '.copilot-disconnected';

View File

@@ -24,17 +24,6 @@ import { createDeauthCursorHandler } from './routes/deauth-cursor.js';
import { createAuthOpencodeHandler } from './routes/auth-opencode.js'; import { createAuthOpencodeHandler } from './routes/auth-opencode.js';
import { createDeauthOpencodeHandler } from './routes/deauth-opencode.js'; import { createDeauthOpencodeHandler } from './routes/deauth-opencode.js';
import { createOpencodeStatusHandler } from './routes/opencode-status.js'; import { createOpencodeStatusHandler } from './routes/opencode-status.js';
import { createGeminiStatusHandler } from './routes/gemini-status.js';
import { createAuthGeminiHandler } from './routes/auth-gemini.js';
import { createDeauthGeminiHandler } from './routes/deauth-gemini.js';
import { createCopilotStatusHandler } from './routes/copilot-status.js';
import { createAuthCopilotHandler } from './routes/auth-copilot.js';
import { createDeauthCopilotHandler } from './routes/deauth-copilot.js';
import {
createGetCopilotModelsHandler,
createRefreshCopilotModelsHandler,
createClearCopilotCacheHandler,
} from './routes/copilot-models.js';
import { import {
createGetOpencodeModelsHandler, createGetOpencodeModelsHandler,
createRefreshOpencodeModelsHandler, createRefreshOpencodeModelsHandler,
@@ -83,21 +72,6 @@ export function createSetupRoutes(): Router {
router.post('/auth-opencode', createAuthOpencodeHandler()); router.post('/auth-opencode', createAuthOpencodeHandler());
router.post('/deauth-opencode', createDeauthOpencodeHandler()); router.post('/deauth-opencode', createDeauthOpencodeHandler());
// Gemini CLI routes
router.get('/gemini-status', createGeminiStatusHandler());
router.post('/auth-gemini', createAuthGeminiHandler());
router.post('/deauth-gemini', createDeauthGeminiHandler());
// Copilot CLI routes
router.get('/copilot-status', createCopilotStatusHandler());
router.post('/auth-copilot', createAuthCopilotHandler());
router.post('/deauth-copilot', createDeauthCopilotHandler());
// Copilot Dynamic Model Discovery routes
router.get('/copilot/models', createGetCopilotModelsHandler());
router.post('/copilot/models/refresh', createRefreshCopilotModelsHandler());
router.post('/copilot/cache/clear', createClearCopilotCacheHandler());
// OpenCode Dynamic Model Discovery routes // OpenCode Dynamic Model Discovery routes
router.get('/opencode/models', createGetOpencodeModelsHandler()); router.get('/opencode/models', createGetOpencodeModelsHandler());
router.post('/opencode/models/refresh', createRefreshOpencodeModelsHandler()); router.post('/opencode/models/refresh', createRefreshOpencodeModelsHandler());

View File

@@ -1,30 +0,0 @@
/**
* POST /auth-copilot endpoint - Connect Copilot CLI to the app
*/
import type { Request, Response } from 'express';
import { getErrorMessage, logError } from '../common.js';
import { connectCopilot } from '../../../services/copilot-connection-service.js';
/**
* Creates handler for POST /api/setup/auth-copilot
* Removes the disconnection marker to allow Copilot CLI to be used
*/
export function createAuthCopilotHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
await connectCopilot();
res.json({
success: true,
message: 'Copilot CLI connected to app',
});
} catch (error) {
logError(error, 'Auth Copilot failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
});
}
};
}

View File

@@ -1,42 +0,0 @@
/**
* POST /auth-gemini endpoint - Connect Gemini CLI to the app
*/
import type { Request, Response } from 'express';
import { getErrorMessage, logError } from '../common.js';
import * as fs from 'fs/promises';
import * as path from 'path';
const DISCONNECTED_MARKER_FILE = '.gemini-disconnected';
/**
* Creates handler for POST /api/setup/auth-gemini
* Removes the disconnection marker to allow Gemini CLI to be used
*/
export function createAuthGeminiHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
const projectRoot = process.cwd();
const automakerDir = path.join(projectRoot, '.automaker');
const markerPath = path.join(automakerDir, DISCONNECTED_MARKER_FILE);
// Remove the disconnection marker if it exists
try {
await fs.unlink(markerPath);
} catch {
// File doesn't exist, nothing to remove
}
res.json({
success: true,
message: 'Gemini CLI connected to app',
});
} catch (error) {
logError(error, 'Auth Gemini failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
});
}
};
}

View File

@@ -1,139 +0,0 @@
/**
* Copilot Dynamic Models API Routes
*
* Provides endpoints for:
* - GET /api/setup/copilot/models - Get available models (cached or refreshed)
* - POST /api/setup/copilot/models/refresh - Force refresh models from CLI
*/
import type { Request, Response } from 'express';
import { CopilotProvider } from '../../../providers/copilot-provider.js';
import { getErrorMessage, logError } from '../common.js';
import type { ModelDefinition } from '@automaker/types';
import { createLogger } from '@automaker/utils';
const logger = createLogger('CopilotModelsRoute');
// Singleton provider instance for caching
let providerInstance: CopilotProvider | null = null;
function getProvider(): CopilotProvider {
if (!providerInstance) {
providerInstance = new CopilotProvider();
}
return providerInstance;
}
/**
* Response type for models endpoint
*/
interface ModelsResponse {
success: boolean;
models?: ModelDefinition[];
count?: number;
cached?: boolean;
error?: string;
}
/**
* Creates handler for GET /api/setup/copilot/models
*
* Returns currently available models (from cache if available).
* Query params:
* - refresh=true: Force refresh from CLI before returning
*
* Note: If cache is empty, this will trigger a refresh to get dynamic models.
*/
export function createGetCopilotModelsHandler() {
return async (req: Request, res: Response): Promise<void> => {
try {
const provider = getProvider();
const forceRefresh = req.query.refresh === 'true';
let models: ModelDefinition[];
let cached = true;
if (forceRefresh) {
models = await provider.refreshModels();
cached = false;
} else {
// Check if we have cached models
if (!provider.hasCachedModels()) {
models = await provider.refreshModels();
cached = false;
} else {
models = provider.getAvailableModels();
}
}
const response: ModelsResponse = {
success: true,
models,
count: models.length,
cached,
};
res.json(response);
} catch (error) {
logError(error, 'Get Copilot models failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
} as ModelsResponse);
}
};
}
/**
* Creates handler for POST /api/setup/copilot/models/refresh
*
* Forces a refresh of models from the Copilot CLI.
*/
export function createRefreshCopilotModelsHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
const provider = getProvider();
const models = await provider.refreshModels();
const response: ModelsResponse = {
success: true,
models,
count: models.length,
cached: false,
};
res.json(response);
} catch (error) {
logError(error, 'Refresh Copilot models failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
} as ModelsResponse);
}
};
}
/**
* Creates handler for POST /api/setup/copilot/cache/clear
*
* Clears the model cache, forcing a fresh fetch on next access.
*/
export function createClearCopilotCacheHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
const provider = getProvider();
provider.clearModelCache();
res.json({
success: true,
message: 'Copilot model cache cleared',
});
} catch (error) {
logError(error, 'Clear Copilot cache failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
});
}
};
}

View File

@@ -1,78 +0,0 @@
/**
* GET /copilot-status endpoint - Get Copilot CLI installation and auth status
*/
import type { Request, Response } from 'express';
import { CopilotProvider } from '../../../providers/copilot-provider.js';
import { getErrorMessage, logError } from '../common.js';
import * as fs from 'fs/promises';
import * as path from 'path';
const DISCONNECTED_MARKER_FILE = '.copilot-disconnected';
async function isCopilotDisconnectedFromApp(): Promise<boolean> {
try {
const projectRoot = process.cwd();
const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
await fs.access(markerPath);
return true;
} catch {
return false;
}
}
/**
* Creates handler for GET /api/setup/copilot-status
* Returns Copilot CLI installation and authentication status
*/
export function createCopilotStatusHandler() {
const installCommand = 'npm install -g @github/copilot';
const loginCommand = 'gh auth login';
return async (_req: Request, res: Response): Promise<void> => {
try {
// Check if user has manually disconnected from the app
if (await isCopilotDisconnectedFromApp()) {
res.json({
success: true,
installed: true,
version: null,
path: null,
auth: {
authenticated: false,
method: 'none',
},
installCommand,
loginCommand,
});
return;
}
const provider = new CopilotProvider();
const status = await provider.detectInstallation();
const auth = await provider.checkAuth();
res.json({
success: true,
installed: status.installed,
version: status.version || null,
path: status.path || null,
auth: {
authenticated: auth.authenticated,
method: auth.method,
login: auth.login,
host: auth.host,
error: auth.error,
},
installCommand,
loginCommand,
});
} catch (error) {
logError(error, 'Get Copilot status failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
});
}
};
}

View File

@@ -1,30 +0,0 @@
/**
* POST /deauth-copilot endpoint - Disconnect Copilot CLI from the app
*/
import type { Request, Response } from 'express';
import { getErrorMessage, logError } from '../common.js';
import { disconnectCopilot } from '../../../services/copilot-connection-service.js';
/**
* Creates handler for POST /api/setup/deauth-copilot
* Creates a marker file to disconnect Copilot CLI from the app
*/
export function createDeauthCopilotHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
await disconnectCopilot();
res.json({
success: true,
message: 'Copilot CLI disconnected from app',
});
} catch (error) {
logError(error, 'Deauth Copilot failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
});
}
};
}

View File

@@ -1,42 +0,0 @@
/**
* POST /deauth-gemini endpoint - Disconnect Gemini CLI from the app
*/
import type { Request, Response } from 'express';
import { getErrorMessage, logError } from '../common.js';
import * as fs from 'fs/promises';
import * as path from 'path';
const DISCONNECTED_MARKER_FILE = '.gemini-disconnected';
/**
* Creates handler for POST /api/setup/deauth-gemini
* Creates a marker file to disconnect Gemini CLI from the app
*/
export function createDeauthGeminiHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
const projectRoot = process.cwd();
const automakerDir = path.join(projectRoot, '.automaker');
// Ensure .automaker directory exists
await fs.mkdir(automakerDir, { recursive: true });
const markerPath = path.join(automakerDir, DISCONNECTED_MARKER_FILE);
// Create the disconnection marker
await fs.writeFile(markerPath, 'Gemini CLI disconnected from app');
res.json({
success: true,
message: 'Gemini CLI disconnected from app',
});
} catch (error) {
logError(error, 'Deauth Gemini failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
});
}
};
}

View File

@@ -1,79 +0,0 @@
/**
* GET /gemini-status endpoint - Get Gemini CLI installation and auth status
*/
import type { Request, Response } from 'express';
import { GeminiProvider } from '../../../providers/gemini-provider.js';
import { getErrorMessage, logError } from '../common.js';
import * as fs from 'fs/promises';
import * as path from 'path';
const DISCONNECTED_MARKER_FILE = '.gemini-disconnected';
async function isGeminiDisconnectedFromApp(): Promise<boolean> {
try {
const projectRoot = process.cwd();
const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
await fs.access(markerPath);
return true;
} catch {
return false;
}
}
/**
* Creates handler for GET /api/setup/gemini-status
* Returns Gemini CLI installation and authentication status
*/
export function createGeminiStatusHandler() {
const installCommand = 'npm install -g @google/gemini-cli';
const loginCommand = 'gemini';
return async (_req: Request, res: Response): Promise<void> => {
try {
// Check if user has manually disconnected from the app
if (await isGeminiDisconnectedFromApp()) {
res.json({
success: true,
installed: true,
version: null,
path: null,
auth: {
authenticated: false,
method: 'none',
hasApiKey: false,
},
installCommand,
loginCommand,
});
return;
}
const provider = new GeminiProvider();
const status = await provider.detectInstallation();
const auth = await provider.checkAuth();
res.json({
success: true,
installed: status.installed,
version: status.version || null,
path: status.path || null,
auth: {
authenticated: auth.authenticated,
method: auth.method,
hasApiKey: auth.hasApiKey || false,
hasEnvApiKey: auth.hasEnvApiKey || false,
error: auth.error,
},
installCommand,
loginCommand,
});
} catch (error) {
logError(error, 'Get Gemini status failed');
res.status(500).json({
success: false,
error: getErrorMessage(error),
});
}
};
}

View File

@@ -0,0 +1,34 @@
/**
* Common utilities and state for suggestions routes
*/
import { createLogger } from '@automaker/utils';
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
const logger = createLogger('Suggestions');
// Shared state for tracking generation status - private
let isRunning = false;
let currentAbortController: AbortController | null = null;
/**
* Get the current running state
*/
export function getSuggestionsStatus(): {
isRunning: boolean;
currentAbortController: AbortController | null;
} {
return { isRunning, currentAbortController };
}
/**
* Set the running state and abort controller
*/
export function setRunningState(running: boolean, controller: AbortController | null = null): void {
isRunning = running;
currentAbortController = controller;
}
// Re-export shared utilities
export { getErrorMessageShared as getErrorMessage };
export const logError = createLogError(logger);

View File

@@ -0,0 +1,308 @@
/**
* Business logic for generating suggestions
*
* Model is configurable via phaseModels.suggestionsModel in settings
* (AI Suggestions in the UI). Supports both Claude and Cursor models.
*/
import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, isCursorModel, type ThinkingLevel } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { extractJsonWithArray } from '../../lib/json-extractor.js';
import { streamingQuery } from '../../providers/simple-query-service.js';
import { FeatureLoader } from '../../services/feature-loader.js';
import { getAppSpecPath } from '@automaker/platform';
import * as secureFs from '../../lib/secure-fs.js';
import type { SettingsService } from '../../services/settings-service.js';
import {
getAutoLoadClaudeMdSetting,
getPromptCustomization,
getActiveClaudeApiProfile,
} from '../../lib/settings-helpers.js';
const logger = createLogger('Suggestions');
/**
* Extract implemented features from app_spec.txt XML content
*
* Note: This uses regex-based parsing which is sufficient for our controlled
* XML structure. If more complex XML parsing is needed in the future, consider
* using a library like 'fast-xml-parser' or 'xml2js'.
*/
function extractImplementedFeatures(specContent: string): string[] {
const features: string[] = [];
// Match <implemented_features>...</implemented_features> section
const implementedMatch = specContent.match(
/<implemented_features>([\s\S]*?)<\/implemented_features>/
);
if (implementedMatch) {
const implementedSection = implementedMatch[1];
// Extract feature names from <name>...</name> tags using matchAll
const nameRegex = /<name>(.*?)<\/name>/g;
const matches = implementedSection.matchAll(nameRegex);
for (const match of matches) {
features.push(match[1].trim());
}
}
return features;
}
/**
* Load existing context (app spec and backlog features) to avoid duplicates
*/
async function loadExistingContext(projectPath: string): Promise<string> {
let context = '';
// 1. Read app_spec.txt for implemented features
try {
const appSpecPath = getAppSpecPath(projectPath);
const specContent = (await secureFs.readFile(appSpecPath, 'utf-8')) as string;
if (specContent && specContent.trim().length > 0) {
const implementedFeatures = extractImplementedFeatures(specContent);
if (implementedFeatures.length > 0) {
context += '\n\n=== ALREADY IMPLEMENTED FEATURES ===\n';
context += 'These features are already implemented in the codebase:\n';
context += implementedFeatures.map((feature) => `- ${feature}`).join('\n') + '\n';
}
}
} catch (error) {
// app_spec.txt doesn't exist or can't be read - that's okay
logger.debug('No app_spec.txt found or error reading it:', error);
}
// 2. Load existing features from backlog
try {
const featureLoader = new FeatureLoader();
const features = await featureLoader.getAll(projectPath);
if (features.length > 0) {
context += '\n\n=== EXISTING FEATURES IN BACKLOG ===\n';
context += 'These features are already planned or in progress:\n';
context +=
features
.map((feature) => {
const status = feature.status || 'pending';
const title = feature.title || feature.description?.substring(0, 50) || 'Untitled';
return `- ${title} (${status})`;
})
.join('\n') + '\n';
}
} catch (error) {
// Features directory doesn't exist or can't be read - that's okay
logger.debug('No features found or error loading them:', error);
}
return context;
}
/**
* JSON Schema for suggestions output
*/
const suggestionsSchema = {
type: 'object',
properties: {
suggestions: {
type: 'array',
items: {
type: 'object',
properties: {
id: { type: 'string' },
category: { type: 'string' },
description: { type: 'string' },
priority: {
type: 'number',
minimum: 1,
maximum: 3,
},
reasoning: { type: 'string' },
},
required: ['category', 'description', 'priority', 'reasoning'],
},
},
},
required: ['suggestions'],
additionalProperties: false,
};
export async function generateSuggestions(
projectPath: string,
suggestionType: string,
events: EventEmitter,
abortController: AbortController,
settingsService?: SettingsService,
modelOverride?: string,
thinkingLevelOverride?: ThinkingLevel
): Promise<void> {
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[Suggestions]');
// Map suggestion types to their prompts
const typePrompts: Record<string, string> = {
features: prompts.suggestions.featuresPrompt,
refactoring: prompts.suggestions.refactoringPrompt,
security: prompts.suggestions.securityPrompt,
performance: prompts.suggestions.performancePrompt,
};
// Load existing context to avoid duplicates
const existingContext = await loadExistingContext(projectPath);
const prompt = `${typePrompts[suggestionType] || typePrompts.features}
${existingContext}
${existingContext ? '\nIMPORTANT: Do NOT suggest features that are already implemented or already in the backlog above. Focus on NEW ideas that complement what already exists.\n' : ''}
${prompts.suggestions.baseTemplate}`;
// Don't send initial message - let the agent output speak for itself
// The first agent message will be captured as an info entry
// Load autoLoadClaudeMd setting
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
projectPath,
settingsService,
'[Suggestions]'
);
// Get model from phase settings (AI Suggestions = suggestionsModel)
// Use override if provided, otherwise fall back to settings
const settings = await settingsService?.getGlobalSettings();
let model: string;
let thinkingLevel: ThinkingLevel | undefined;
if (modelOverride) {
// Use explicit override - resolve the model string
const resolved = resolvePhaseModel({
model: modelOverride,
thinkingLevel: thinkingLevelOverride,
});
model = resolved.model;
thinkingLevel = resolved.thinkingLevel;
} else {
// Use settings-based model
const phaseModelEntry =
settings?.phaseModels?.suggestionsModel || DEFAULT_PHASE_MODELS.suggestionsModel;
const resolved = resolvePhaseModel(phaseModelEntry);
model = resolved.model;
thinkingLevel = resolved.thinkingLevel;
}
logger.info('[Suggestions] Using model:', model);
// Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[Suggestions]'
);
let responseText = '';
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
const useStructuredOutput = !isCursorModel(model);
// Build the final prompt - for Cursor, include JSON schema instructions
let finalPrompt = prompt;
if (!useStructuredOutput) {
finalPrompt = `${prompt}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. Return the JSON in your response only.
2. After analyzing the project, respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
3. The JSON must match this exact schema:
${JSON.stringify(suggestionsSchema, null, 2)}
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
}
// Use streamingQuery with event callbacks
const result = await streamingQuery({
prompt: finalPrompt,
model,
cwd: projectPath,
maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
thinkingLevel,
readOnly: true, // Suggestions only reads code, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource
outputFormat: useStructuredOutput
? {
type: 'json_schema',
schema: suggestionsSchema,
}
: undefined,
onText: (text) => {
responseText += text;
events.emit('suggestions:event', {
type: 'suggestions_progress',
content: text,
});
},
onToolUse: (tool, input) => {
events.emit('suggestions:event', {
type: 'suggestions_tool',
tool,
input,
});
},
});
// Use structured output if available, otherwise fall back to parsing text
try {
let structuredOutput: { suggestions: Array<Record<string, unknown>> } | null = null;
if (result.structured_output) {
structuredOutput = result.structured_output as {
suggestions: Array<Record<string, unknown>>;
};
logger.debug('Received structured output:', structuredOutput);
} else if (responseText) {
// Fallback: try to parse from text using shared extraction utility
logger.warn('No structured output received, attempting to parse from text');
structuredOutput = extractJsonWithArray<{ suggestions: Array<Record<string, unknown>> }>(
responseText,
'suggestions',
{ logger }
);
}
if (structuredOutput && structuredOutput.suggestions) {
// Use structured output directly
events.emit('suggestions:event', {
type: 'suggestions_complete',
suggestions: structuredOutput.suggestions.map((s: Record<string, unknown>, i: number) => ({
...s,
id: s.id || `suggestion-${Date.now()}-${i}`,
})),
});
} else {
throw new Error('No valid JSON found in response');
}
} catch (error) {
// Log the parsing error for debugging
logger.error('Failed to parse suggestions JSON from AI response:', error);
// Return generic suggestions if parsing fails
events.emit('suggestions:event', {
type: 'suggestions_complete',
suggestions: [
{
id: `suggestion-${Date.now()}-0`,
category: 'Analysis',
description: 'Review the AI analysis output for insights',
priority: 1,
reasoning: 'The AI provided analysis but suggestions need manual review',
},
],
});
}
}

View File

@@ -0,0 +1,28 @@
/**
* Suggestions routes - HTTP API for AI-powered feature suggestions
*/
import { Router } from 'express';
import type { EventEmitter } from '../../lib/events.js';
import { validatePathParams } from '../../middleware/validate-paths.js';
import { createGenerateHandler } from './routes/generate.js';
import { createStopHandler } from './routes/stop.js';
import { createStatusHandler } from './routes/status.js';
import type { SettingsService } from '../../services/settings-service.js';
export function createSuggestionsRoutes(
events: EventEmitter,
settingsService?: SettingsService
): Router {
const router = Router();
router.post(
'/generate',
validatePathParams('projectPath'),
createGenerateHandler(events, settingsService)
);
router.post('/stop', createStopHandler());
router.get('/status', createStatusHandler());
return router;
}

View File

@@ -0,0 +1,75 @@
/**
* POST /generate endpoint - Generate suggestions
*/
import type { Request, Response } from 'express';
import type { EventEmitter } from '../../../lib/events.js';
import { createLogger } from '@automaker/utils';
import type { ThinkingLevel } from '@automaker/types';
import { getSuggestionsStatus, setRunningState, getErrorMessage, logError } from '../common.js';
import { generateSuggestions } from '../generate-suggestions.js';
import type { SettingsService } from '../../../services/settings-service.js';
const logger = createLogger('Suggestions');
export function createGenerateHandler(events: EventEmitter, settingsService?: SettingsService) {
return async (req: Request, res: Response): Promise<void> => {
try {
const {
projectPath,
suggestionType = 'features',
model,
thinkingLevel,
} = req.body as {
projectPath: string;
suggestionType?: string;
model?: string;
thinkingLevel?: ThinkingLevel;
};
if (!projectPath) {
res.status(400).json({ success: false, error: 'projectPath required' });
return;
}
const { isRunning } = getSuggestionsStatus();
if (isRunning) {
res.json({
success: false,
error: 'Suggestions generation is already running',
});
return;
}
setRunningState(true);
const abortController = new AbortController();
setRunningState(true, abortController);
// Start generation in background
generateSuggestions(
projectPath,
suggestionType,
events,
abortController,
settingsService,
model,
thinkingLevel
)
.catch((error) => {
logError(error, 'Generate suggestions failed (background)');
events.emit('suggestions:event', {
type: 'suggestions_error',
error: getErrorMessage(error),
});
})
.finally(() => {
setRunningState(false, null);
});
res.json({ success: true });
} catch (error) {
logError(error, 'Generate suggestions failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -0,0 +1,18 @@
/**
* GET /status endpoint - Get status
*/
import type { Request, Response } from 'express';
import { getSuggestionsStatus, getErrorMessage, logError } from '../common.js';
export function createStatusHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
const { isRunning } = getSuggestionsStatus();
res.json({ success: true, isRunning });
} catch (error) {
logError(error, 'Get status failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -0,0 +1,22 @@
/**
* POST /stop endpoint - Stop suggestions generation
*/
import type { Request, Response } from 'express';
import { getSuggestionsStatus, setRunningState, getErrorMessage, logError } from '../common.js';
export function createStopHandler() {
return async (_req: Request, res: Response): Promise<void> => {
try {
const { currentAbortController } = getSuggestionsStatus();
if (currentAbortController) {
currentAbortController.abort();
}
setRunningState(false, null);
res.json({ success: true });
} catch (error) {
logError(error, 'Stop suggestions failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -42,18 +42,12 @@ import { createStartDevHandler } from './routes/start-dev.js';
import { createStopDevHandler } from './routes/stop-dev.js'; import { createStopDevHandler } from './routes/stop-dev.js';
import { createListDevServersHandler } from './routes/list-dev-servers.js'; import { createListDevServersHandler } from './routes/list-dev-servers.js';
import { createGetDevServerLogsHandler } from './routes/dev-server-logs.js'; import { createGetDevServerLogsHandler } from './routes/dev-server-logs.js';
import { createStartTestsHandler } from './routes/start-tests.js';
import { createStopTestsHandler } from './routes/stop-tests.js';
import { createGetTestLogsHandler } from './routes/test-logs.js';
import { import {
createGetInitScriptHandler, createGetInitScriptHandler,
createPutInitScriptHandler, createPutInitScriptHandler,
createDeleteInitScriptHandler, createDeleteInitScriptHandler,
createRunInitScriptHandler, createRunInitScriptHandler,
} from './routes/init-script.js'; } from './routes/init-script.js';
import { createDiscardChangesHandler } from './routes/discard-changes.js';
import { createListRemotesHandler } from './routes/list-remotes.js';
import { createAddRemoteHandler } from './routes/add-remote.js';
import type { SettingsService } from '../../services/settings-service.js'; import type { SettingsService } from '../../services/settings-service.js';
export function createWorktreeRoutes( export function createWorktreeRoutes(
@@ -134,7 +128,7 @@ export function createWorktreeRoutes(
router.post( router.post(
'/start-dev', '/start-dev',
validatePathParams('projectPath', 'worktreePath'), validatePathParams('projectPath', 'worktreePath'),
createStartDevHandler(settingsService) createStartDevHandler()
); );
router.post('/stop-dev', createStopDevHandler()); router.post('/stop-dev', createStopDevHandler());
router.post('/list-dev-servers', createListDevServersHandler()); router.post('/list-dev-servers', createListDevServersHandler());
@@ -144,15 +138,6 @@ export function createWorktreeRoutes(
createGetDevServerLogsHandler() createGetDevServerLogsHandler()
); );
// Test runner routes
router.post(
'/start-tests',
validatePathParams('worktreePath', 'projectPath?'),
createStartTestsHandler(settingsService)
);
router.post('/stop-tests', createStopTestsHandler());
router.get('/test-logs', validatePathParams('worktreePath?'), createGetTestLogsHandler());
// Init script routes // Init script routes
router.get('/init-script', createGetInitScriptHandler()); router.get('/init-script', createGetInitScriptHandler());
router.put('/init-script', validatePathParams('projectPath'), createPutInitScriptHandler()); router.put('/init-script', validatePathParams('projectPath'), createPutInitScriptHandler());
@@ -163,29 +148,5 @@ export function createWorktreeRoutes(
createRunInitScriptHandler(events) createRunInitScriptHandler(events)
); );
// Discard changes route
router.post(
'/discard-changes',
validatePathParams('worktreePath'),
requireGitRepoOnly,
createDiscardChangesHandler()
);
// List remotes route
router.post(
'/list-remotes',
validatePathParams('worktreePath'),
requireValidWorktree,
createListRemotesHandler()
);
// Add remote route
router.post(
'/add-remote',
validatePathParams('worktreePath'),
requireGitRepoOnly,
createAddRemoteHandler()
);
return router; return router;
} }

View File

@@ -1,166 +0,0 @@
/**
* POST /add-remote endpoint - Add a new remote to a git repository
*
* Note: Git repository validation (isGitRepo, hasCommits) is handled by
* the requireValidWorktree middleware in index.ts
*/
import type { Request, Response } from 'express';
import { execFile } from 'child_process';
import { promisify } from 'util';
import { getErrorMessage, logWorktreeError } from '../common.js';
const execFileAsync = promisify(execFile);
/** Maximum allowed length for remote names */
const MAX_REMOTE_NAME_LENGTH = 250;
/** Maximum allowed length for remote URLs */
const MAX_REMOTE_URL_LENGTH = 2048;
/** Timeout for git fetch operations (30 seconds) */
const FETCH_TIMEOUT_MS = 30000;
/**
* Validate remote name - must be alphanumeric with dashes/underscores
* Git remote names have similar restrictions to branch names
*/
function isValidRemoteName(name: string): boolean {
// Remote names should be alphanumeric, may contain dashes, underscores, periods
// Cannot start with a dash or period, cannot be empty
if (!name || name.length === 0 || name.length > MAX_REMOTE_NAME_LENGTH) {
return false;
}
return /^[a-zA-Z0-9][a-zA-Z0-9._-]*$/.test(name);
}
/**
* Validate remote URL - basic validation for git remote URLs
* Supports HTTPS, SSH, and git:// protocols
*/
function isValidRemoteUrl(url: string): boolean {
if (!url || url.length === 0 || url.length > MAX_REMOTE_URL_LENGTH) {
return false;
}
// Support common git URL formats:
// - https://github.com/user/repo.git
// - git@github.com:user/repo.git
// - git://github.com/user/repo.git
// - ssh://git@github.com/user/repo.git
const httpsPattern = /^https?:\/\/.+/;
const sshPattern = /^[a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+:.+/;
const gitProtocolPattern = /^git:\/\/.+/;
const sshProtocolPattern = /^ssh:\/\/.+/;
return (
httpsPattern.test(url) ||
sshPattern.test(url) ||
gitProtocolPattern.test(url) ||
sshProtocolPattern.test(url)
);
}
export function createAddRemoteHandler() {
return async (req: Request, res: Response): Promise<void> => {
try {
const { worktreePath, remoteName, remoteUrl } = req.body as {
worktreePath: string;
remoteName: string;
remoteUrl: string;
};
// Validate required fields
const requiredFields = { worktreePath, remoteName, remoteUrl };
for (const [key, value] of Object.entries(requiredFields)) {
if (!value) {
res.status(400).json({ success: false, error: `${key} required` });
return;
}
}
// Validate remote name
if (!isValidRemoteName(remoteName)) {
res.status(400).json({
success: false,
error:
'Invalid remote name. Must start with alphanumeric character and contain only letters, numbers, dashes, underscores, or periods.',
});
return;
}
// Validate remote URL
if (!isValidRemoteUrl(remoteUrl)) {
res.status(400).json({
success: false,
error: 'Invalid remote URL. Must be a valid git URL (HTTPS, SSH, or git:// protocol).',
});
return;
}
// Check if remote already exists
try {
const { stdout: existingRemotes } = await execFileAsync('git', ['remote'], {
cwd: worktreePath,
});
const remoteNames = existingRemotes
.trim()
.split('\n')
.filter((r) => r.trim());
if (remoteNames.includes(remoteName)) {
res.status(400).json({
success: false,
error: `Remote '${remoteName}' already exists`,
code: 'REMOTE_EXISTS',
});
return;
}
} catch (error) {
// If git remote fails, continue with adding the remote. Log for debugging.
logWorktreeError(
error,
'Checking for existing remotes failed, proceeding to add.',
worktreePath
);
}
// Add the remote using execFile with array arguments to prevent command injection
await execFileAsync('git', ['remote', 'add', remoteName, remoteUrl], {
cwd: worktreePath,
});
// Optionally fetch from the new remote to get its branches
let fetchSucceeded = false;
try {
await execFileAsync('git', ['fetch', remoteName, '--quiet'], {
cwd: worktreePath,
timeout: FETCH_TIMEOUT_MS,
});
fetchSucceeded = true;
} catch (fetchError) {
// Fetch failed (maybe offline or invalid URL), but remote was added successfully
logWorktreeError(
fetchError,
`Fetch from new remote '${remoteName}' failed (remote added successfully)`,
worktreePath
);
fetchSucceeded = false;
}
res.json({
success: true,
result: {
remoteName,
remoteUrl,
fetched: fetchSucceeded,
message: fetchSucceeded
? `Successfully added remote '${remoteName}' and fetched its branches`
: `Successfully added remote '${remoteName}' (fetch failed - you may need to fetch manually)`,
},
});
} catch (error) {
const worktreePath = req.body?.worktreePath;
logWorktreeError(error, 'Add remote failed', worktreePath);
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -39,10 +39,7 @@ export function createDiffsHandler() {
} }
// Git worktrees are stored in project directory // Git worktrees are stored in project directory
// Sanitize featureId the same way it's sanitized when creating worktrees const worktreePath = path.join(projectPath, '.worktrees', featureId);
// (see create.ts: branchName.replace(/[^a-zA-Z0-9_-]/g, '-'))
const sanitizedFeatureId = featureId.replace(/[^a-zA-Z0-9_-]/g, '-');
const worktreePath = path.join(projectPath, '.worktrees', sanitizedFeatureId);
try { try {
// Check if worktree exists // Check if worktree exists

View File

@@ -1,112 +0,0 @@
/**
* POST /discard-changes endpoint - Discard all uncommitted changes in a worktree
*
* This performs a destructive operation that:
* 1. Resets staged changes (git reset HEAD)
* 2. Discards modified tracked files (git checkout .)
* 3. Removes untracked files and directories (git clean -fd)
*
* Note: Git repository validation (isGitRepo) is handled by
* the requireGitRepoOnly middleware in index.ts
*/
import type { Request, Response } from 'express';
import { exec } from 'child_process';
import { promisify } from 'util';
import { getErrorMessage, logError } from '../common.js';
const execAsync = promisify(exec);
export function createDiscardChangesHandler() {
return async (req: Request, res: Response): Promise<void> => {
try {
const { worktreePath } = req.body as {
worktreePath: string;
};
if (!worktreePath) {
res.status(400).json({
success: false,
error: 'worktreePath required',
});
return;
}
// Check for uncommitted changes first
const { stdout: status } = await execAsync('git status --porcelain', {
cwd: worktreePath,
});
if (!status.trim()) {
res.json({
success: true,
result: {
discarded: false,
message: 'No changes to discard',
},
});
return;
}
// Count the files that will be affected
const lines = status.trim().split('\n').filter(Boolean);
const fileCount = lines.length;
// Get branch name before discarding
const { stdout: branchOutput } = await execAsync('git rev-parse --abbrev-ref HEAD', {
cwd: worktreePath,
});
const branchName = branchOutput.trim();
// Discard all changes:
// 1. Reset any staged changes
await execAsync('git reset HEAD', { cwd: worktreePath }).catch(() => {
// Ignore errors - might fail if there's nothing staged
});
// 2. Discard changes in tracked files
await execAsync('git checkout .', { cwd: worktreePath }).catch(() => {
// Ignore errors - might fail if there are no tracked changes
});
// 3. Remove untracked files and directories
await execAsync('git clean -fd', { cwd: worktreePath }).catch(() => {
// Ignore errors - might fail if there are no untracked files
});
// Verify all changes were discarded
const { stdout: finalStatus } = await execAsync('git status --porcelain', {
cwd: worktreePath,
});
if (finalStatus.trim()) {
// Some changes couldn't be discarded (possibly ignored files or permission issues)
const remainingCount = finalStatus.trim().split('\n').filter(Boolean).length;
res.json({
success: true,
result: {
discarded: true,
filesDiscarded: fileCount - remainingCount,
filesRemaining: remainingCount,
branch: branchName,
message: `Discarded ${fileCount - remainingCount} files, ${remainingCount} files could not be removed`,
},
});
} else {
res.json({
success: true,
result: {
discarded: true,
filesDiscarded: fileCount,
filesRemaining: 0,
branch: branchName,
message: `Discarded ${fileCount} ${fileCount === 1 ? 'file' : 'files'}`,
},
});
}
} catch (error) {
logError(error, 'Discard changes failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -37,10 +37,7 @@ export function createFileDiffHandler() {
} }
// Git worktrees are stored in project directory // Git worktrees are stored in project directory
// Sanitize featureId the same way it's sanitized when creating worktrees const worktreePath = path.join(projectPath, '.worktrees', featureId);
// (see create.ts: branchName.replace(/[^a-zA-Z0-9_-]/g, '-'))
const sanitizedFeatureId = featureId.replace(/[^a-zA-Z0-9_-]/g, '-');
const worktreePath = path.join(projectPath, '.worktrees', sanitizedFeatureId);
try { try {
await secureFs.access(worktreePath); await secureFs.access(worktreePath);

View File

@@ -11,13 +11,13 @@ import { promisify } from 'util';
import { existsSync } from 'fs'; import { existsSync } from 'fs';
import { join } from 'path'; import { join } from 'path';
import { createLogger } from '@automaker/utils'; import { createLogger } from '@automaker/utils';
import { isCursorModel, stripProviderPrefix } from '@automaker/types'; import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver'; import { resolvePhaseModel } from '@automaker/model-resolver';
import { mergeCommitMessagePrompts } from '@automaker/prompts'; import { mergeCommitMessagePrompts } from '@automaker/prompts';
import { ProviderFactory } from '../../../providers/provider-factory.js'; import { ProviderFactory } from '../../../providers/provider-factory.js';
import type { SettingsService } from '../../../services/settings-service.js'; import type { SettingsService } from '../../../services/settings-service.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
import { getPhaseModelWithOverrides } from '../../../lib/settings-helpers.js'; import { getActiveClaudeApiProfile } from '../../../lib/settings-helpers.js';
const logger = createLogger('GenerateCommitMessage'); const logger = createLogger('GenerateCommitMessage');
const execAsync = promisify(exec); const execAsync = promisify(exec);
@@ -157,29 +157,25 @@ export function createGenerateCommitMessageHandler(
const userPrompt = `Generate a commit message for these changes:\n\n\`\`\`diff\n${truncatedDiff}\n\`\`\``; const userPrompt = `Generate a commit message for these changes:\n\n\`\`\`diff\n${truncatedDiff}\n\`\`\``;
// Get model from phase settings with provider info // Get model from phase settings
const { const settings = await settingsService?.getGlobalSettings();
phaseModel: phaseModelEntry, const phaseModelEntry =
provider: claudeCompatibleProvider, settings?.phaseModels?.commitMessageModel || DEFAULT_PHASE_MODELS.commitMessageModel;
credentials, const { model } = resolvePhaseModel(phaseModelEntry);
} = await getPhaseModelWithOverrides(
'commitMessageModel',
settingsService,
worktreePath,
'[GenerateCommitMessage]'
);
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
logger.info( logger.info(`Using model for commit message: ${model}`);
`Using model for commit message: ${model}`,
claudeCompatibleProvider ? `via provider: ${claudeCompatibleProvider.name}` : 'direct API'
);
// Get the effective system prompt (custom or default) // Get the effective system prompt (custom or default)
const systemPrompt = await getSystemPrompt(settingsService); const systemPrompt = await getSystemPrompt(settingsService);
// Get active Claude API profile for alternative endpoint configuration
const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
settingsService,
'[GenerateCommitMessage]'
);
// Get provider for the model type // Get provider for the model type
const aiProvider = ProviderFactory.getProviderForModel(model); const provider = ProviderFactory.getProviderForModel(model);
const bareModel = stripProviderPrefix(model); const bareModel = stripProviderPrefix(model);
// For Cursor models, combine prompts since Cursor doesn't support systemPrompt separation // For Cursor models, combine prompts since Cursor doesn't support systemPrompt separation
@@ -188,10 +184,10 @@ export function createGenerateCommitMessageHandler(
: userPrompt; : userPrompt;
const effectiveSystemPrompt = isCursorModel(model) ? undefined : systemPrompt; const effectiveSystemPrompt = isCursorModel(model) ? undefined : systemPrompt;
logger.info(`Using ${aiProvider.getName()} provider for model: ${model}`); logger.info(`Using ${provider.getName()} provider for model: ${model}`);
let responseText = ''; let responseText = '';
const stream = aiProvider.executeQuery({ const stream = provider.executeQuery({
prompt: effectivePrompt, prompt: effectivePrompt,
model: bareModel, model: bareModel,
cwd: worktreePath, cwd: worktreePath,
@@ -199,8 +195,7 @@ export function createGenerateCommitMessageHandler(
maxTurns: 1, maxTurns: 1,
allowedTools: [], allowedTools: [],
readOnly: true, readOnly: true,
thinkingLevel, // Pass thinking level for extended thinking support claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
claudeCompatibleProvider, // Pass provider for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
}); });

View File

@@ -28,10 +28,7 @@ export function createInfoHandler() {
} }
// Check if worktree exists (git worktrees are stored in project directory) // Check if worktree exists (git worktrees are stored in project directory)
// Sanitize featureId the same way it's sanitized when creating worktrees const worktreePath = path.join(projectPath, '.worktrees', featureId);
// (see create.ts: branchName.replace(/[^a-zA-Z0-9_-]/g, '-'))
const sanitizedFeatureId = featureId.replace(/[^a-zA-Z0-9_-]/g, '-');
const worktreePath = path.join(projectPath, '.worktrees', sanitizedFeatureId);
try { try {
await secureFs.access(worktreePath); await secureFs.access(worktreePath);
const { stdout } = await execAsync('git rev-parse --abbrev-ref HEAD', { const { stdout } = await execAsync('git rev-parse --abbrev-ref HEAD', {

View File

@@ -110,22 +110,9 @@ export function createListBranchesHandler() {
} }
} }
// Check if any remotes are configured for this repository // Get ahead/behind count for current branch
let hasAnyRemotes = false;
try {
const { stdout: remotesOutput } = await execAsync('git remote', {
cwd: worktreePath,
});
hasAnyRemotes = remotesOutput.trim().length > 0;
} catch {
// If git remote fails, assume no remotes
hasAnyRemotes = false;
}
// Get ahead/behind count for current branch and check if remote branch exists
let aheadCount = 0; let aheadCount = 0;
let behindCount = 0; let behindCount = 0;
let hasRemoteBranch = false;
try { try {
// First check if there's a remote tracking branch // First check if there's a remote tracking branch
const { stdout: upstreamOutput } = await execAsync( const { stdout: upstreamOutput } = await execAsync(
@@ -134,7 +121,6 @@ export function createListBranchesHandler() {
); );
if (upstreamOutput.trim()) { if (upstreamOutput.trim()) {
hasRemoteBranch = true;
const { stdout: aheadBehindOutput } = await execAsync( const { stdout: aheadBehindOutput } = await execAsync(
`git rev-list --left-right --count ${currentBranch}@{upstream}...HEAD`, `git rev-list --left-right --count ${currentBranch}@{upstream}...HEAD`,
{ cwd: worktreePath } { cwd: worktreePath }
@@ -144,18 +130,7 @@ export function createListBranchesHandler() {
behindCount = behind || 0; behindCount = behind || 0;
} }
} catch { } catch {
// No upstream branch set - check if the branch exists on any remote // No upstream branch set, that's okay
try {
// Check if there's a matching branch on origin (most common remote)
const { stdout: remoteBranchOutput } = await execAsync(
`git ls-remote --heads origin ${currentBranch}`,
{ cwd: worktreePath, timeout: 5000 }
);
hasRemoteBranch = remoteBranchOutput.trim().length > 0;
} catch {
// No remote branch found or origin doesn't exist
hasRemoteBranch = false;
}
} }
res.json({ res.json({
@@ -165,8 +140,6 @@ export function createListBranchesHandler() {
branches, branches,
aheadCount, aheadCount,
behindCount, behindCount,
hasRemoteBranch,
hasAnyRemotes,
}, },
}); });
} catch (error) { } catch (error) {

View File

@@ -1,127 +0,0 @@
/**
* POST /list-remotes endpoint - List all remotes and their branches
*
* Note: Git repository validation (isGitRepo, hasCommits) is handled by
* the requireValidWorktree middleware in index.ts
*/
import type { Request, Response } from 'express';
import { exec } from 'child_process';
import { promisify } from 'util';
import { getErrorMessage, logWorktreeError } from '../common.js';
const execAsync = promisify(exec);
interface RemoteBranch {
name: string;
fullRef: string;
}
interface RemoteInfo {
name: string;
url: string;
branches: RemoteBranch[];
}
export function createListRemotesHandler() {
return async (req: Request, res: Response): Promise<void> => {
try {
const { worktreePath } = req.body as {
worktreePath: string;
};
if (!worktreePath) {
res.status(400).json({
success: false,
error: 'worktreePath required',
});
return;
}
// Get list of remotes
const { stdout: remotesOutput } = await execAsync('git remote -v', {
cwd: worktreePath,
});
// Parse remotes (each remote appears twice - once for fetch, once for push)
const remotesSet = new Map<string, string>();
remotesOutput
.trim()
.split('\n')
.filter((line) => line.trim())
.forEach((line) => {
const match = line.match(/^(\S+)\s+(\S+)\s+\(fetch\)$/);
if (match) {
remotesSet.set(match[1], match[2]);
}
});
// Fetch latest from all remotes (silently, don't fail if offline)
try {
await execAsync('git fetch --all --quiet', {
cwd: worktreePath,
timeout: 15000, // 15 second timeout
});
} catch {
// Ignore fetch errors - we'll use cached remote refs
}
// Get all remote branches
const { stdout: remoteBranchesOutput } = await execAsync(
'git branch -r --format="%(refname:short)"',
{ cwd: worktreePath }
);
// Group branches by remote
const remotesBranches = new Map<string, RemoteBranch[]>();
remotesSet.forEach((_, remoteName) => {
remotesBranches.set(remoteName, []);
});
remoteBranchesOutput
.trim()
.split('\n')
.filter((line) => line.trim())
.forEach((line) => {
const cleanLine = line.trim().replace(/^['"]|['"]$/g, '');
// Skip HEAD pointers like "origin/HEAD"
if (cleanLine.includes('/HEAD')) return;
// Parse remote name from branch ref (e.g., "origin/main" -> "origin")
const slashIndex = cleanLine.indexOf('/');
if (slashIndex === -1) return;
const remoteName = cleanLine.substring(0, slashIndex);
const branchName = cleanLine.substring(slashIndex + 1);
if (remotesBranches.has(remoteName)) {
remotesBranches.get(remoteName)!.push({
name: branchName,
fullRef: cleanLine,
});
}
});
// Build final result
const remotes: RemoteInfo[] = [];
remotesSet.forEach((url, name) => {
remotes.push({
name,
url,
branches: remotesBranches.get(name) || [],
});
});
res.json({
success: true,
result: {
remotes,
},
});
} catch (error) {
const worktreePath = req.body?.worktreePath;
logWorktreeError(error, 'List remotes failed', worktreePath);
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -39,15 +39,8 @@ interface GitHubRemoteCacheEntry {
checkedAt: number; checkedAt: number;
} }
interface GitHubPRCacheEntry {
prs: Map<string, WorktreePRInfo>;
fetchedAt: number;
}
const githubRemoteCache = new Map<string, GitHubRemoteCacheEntry>(); const githubRemoteCache = new Map<string, GitHubRemoteCacheEntry>();
const githubPRCache = new Map<string, GitHubPRCacheEntry>();
const GITHUB_REMOTE_CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes const GITHUB_REMOTE_CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes
const GITHUB_PR_CACHE_TTL_MS = 2 * 60 * 1000; // 2 minutes - avoid hitting GitHub on every poll
interface WorktreeInfo { interface WorktreeInfo {
path: string; path: string;
@@ -187,21 +180,9 @@ async function getGitHubRemoteStatus(projectPath: string): Promise<GitHubRemoteS
* This also allows detecting PRs that were created outside the app. * This also allows detecting PRs that were created outside the app.
* *
* Uses cached GitHub remote status to avoid repeated warnings when the * Uses cached GitHub remote status to avoid repeated warnings when the
* project doesn't have a GitHub remote configured. Results are cached * project doesn't have a GitHub remote configured.
* briefly to avoid hammering GitHub on frequent worktree polls.
*/ */
async function fetchGitHubPRs( async function fetchGitHubPRs(projectPath: string): Promise<Map<string, WorktreePRInfo>> {
projectPath: string,
forceRefresh = false
): Promise<Map<string, WorktreePRInfo>> {
const now = Date.now();
const cached = githubPRCache.get(projectPath);
// Return cached result if valid and not forcing refresh
if (!forceRefresh && cached && now - cached.fetchedAt < GITHUB_PR_CACHE_TTL_MS) {
return cached.prs;
}
const prMap = new Map<string, WorktreePRInfo>(); const prMap = new Map<string, WorktreePRInfo>();
try { try {
@@ -244,22 +225,8 @@ async function fetchGitHubPRs(
createdAt: pr.createdAt, createdAt: pr.createdAt,
}); });
} }
// Only update cache on successful fetch
githubPRCache.set(projectPath, {
prs: prMap,
fetchedAt: Date.now(),
});
} catch (error) { } catch (error) {
// On fetch failure, return stale cached data if available to avoid // Silently fail - PR detection is optional
// repeated API calls during GitHub API flakiness or temporary outages
if (cached) {
logger.warn(`Failed to fetch GitHub PRs, returning stale cache: ${getErrorMessage(error)}`);
// Extend cache TTL to avoid repeated retries during outages
githubPRCache.set(projectPath, { prs: cached.prs, fetchedAt: Date.now() });
return cached.prs;
}
// No cache available, log warning and return empty map
logger.warn(`Failed to fetch GitHub PRs: ${getErrorMessage(error)}`); logger.warn(`Failed to fetch GitHub PRs: ${getErrorMessage(error)}`);
} }
@@ -397,7 +364,7 @@ export function createListHandler() {
// Only fetch GitHub PRs if includeDetails is requested (performance optimization). // Only fetch GitHub PRs if includeDetails is requested (performance optimization).
// Uses --state all to detect merged/closed PRs, limited to 1000 recent PRs. // Uses --state all to detect merged/closed PRs, limited to 1000 recent PRs.
const githubPRs = includeDetails const githubPRs = includeDetails
? await fetchGitHubPRs(projectPath, forceRefreshGitHub) ? await fetchGitHubPRs(projectPath)
: new Map<string, WorktreePRInfo>(); : new Map<string, WorktreePRInfo>();
for (const worktree of worktrees) { for (const worktree of worktrees) {

View File

@@ -1,7 +1,5 @@
/** /**
* POST /merge endpoint - Merge feature (merge worktree branch into a target branch) * POST /merge endpoint - Merge feature (merge worktree branch into main)
*
* Allows merging a worktree branch into any target branch (defaults to 'main').
* *
* Note: Git repository validation (isGitRepo, hasCommits) is handled by * Note: Git repository validation (isGitRepo, hasCommits) is handled by
* the requireValidProject middleware in index.ts * the requireValidProject middleware in index.ts
@@ -10,21 +8,18 @@
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import { exec } from 'child_process'; import { exec } from 'child_process';
import { promisify } from 'util'; import { promisify } from 'util';
import { getErrorMessage, logError, isValidBranchName, execGitCommand } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
import { createLogger } from '@automaker/utils';
const execAsync = promisify(exec); const execAsync = promisify(exec);
const logger = createLogger('Worktree');
export function createMergeHandler() { export function createMergeHandler() {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, branchName, worktreePath, targetBranch, options } = req.body as { const { projectPath, branchName, worktreePath, options } = req.body as {
projectPath: string; projectPath: string;
branchName: string; branchName: string;
worktreePath: string; worktreePath: string;
targetBranch?: string; // Branch to merge into (defaults to 'main') options?: { squash?: boolean; message?: string };
options?: { squash?: boolean; message?: string; deleteWorktreeAndBranch?: boolean };
}; };
if (!projectPath || !branchName || !worktreePath) { if (!projectPath || !branchName || !worktreePath) {
@@ -35,10 +30,7 @@ export function createMergeHandler() {
return; return;
} }
// Determine the target branch (default to 'main') // Validate branch exists
const mergeTo = targetBranch || 'main';
// Validate source branch exists
try { try {
await execAsync(`git rev-parse --verify ${branchName}`, { cwd: projectPath }); await execAsync(`git rev-parse --verify ${branchName}`, { cwd: projectPath });
} catch { } catch {
@@ -49,44 +41,12 @@ export function createMergeHandler() {
return; return;
} }
// Validate target branch exists // Merge the feature branch
try {
await execAsync(`git rev-parse --verify ${mergeTo}`, { cwd: projectPath });
} catch {
res.status(400).json({
success: false,
error: `Target branch "${mergeTo}" does not exist`,
});
return;
}
// Merge the feature branch into the target branch
const mergeCmd = options?.squash const mergeCmd = options?.squash
? `git merge --squash ${branchName}` ? `git merge --squash ${branchName}`
: `git merge ${branchName} -m "${options?.message || `Merge ${branchName} into ${mergeTo}`}"`; : `git merge ${branchName} -m "${options?.message || `Merge ${branchName}`}"`;
try { await execAsync(mergeCmd, { cwd: projectPath });
await execAsync(mergeCmd, { cwd: projectPath });
} catch (mergeError: unknown) {
// Check if this is a merge conflict
const err = mergeError as { stdout?: string; stderr?: string; message?: string };
const output = `${err.stdout || ''} ${err.stderr || ''} ${err.message || ''}`;
const hasConflicts =
output.includes('CONFLICT') || output.includes('Automatic merge failed');
if (hasConflicts) {
// Return conflict-specific error message that frontend can detect
res.status(409).json({
success: false,
error: `Merge CONFLICT: Automatic merge of "${branchName}" into "${mergeTo}" failed. Please resolve conflicts manually.`,
hasConflicts: true,
});
return;
}
// Re-throw non-conflict errors to be handled by outer catch
throw mergeError;
}
// If squash merge, need to commit // If squash merge, need to commit
if (options?.squash) { if (options?.squash) {
@@ -95,46 +55,17 @@ export function createMergeHandler() {
}); });
} }
// Optionally delete the worktree and branch after merging // Clean up worktree and branch
let worktreeDeleted = false; try {
let branchDeleted = false; await execAsync(`git worktree remove "${worktreePath}" --force`, {
cwd: projectPath,
if (options?.deleteWorktreeAndBranch) { });
// Remove the worktree await execAsync(`git branch -D ${branchName}`, { cwd: projectPath });
try { } catch {
await execGitCommand(['worktree', 'remove', worktreePath, '--force'], projectPath); // Cleanup errors are non-fatal
worktreeDeleted = true;
} catch {
// Try with prune if remove fails
try {
await execGitCommand(['worktree', 'prune'], projectPath);
worktreeDeleted = true;
} catch {
logger.warn(`Failed to remove worktree: ${worktreePath}`);
}
}
// Delete the branch (but not main/master)
if (branchName !== 'main' && branchName !== 'master') {
if (!isValidBranchName(branchName)) {
logger.warn(`Invalid branch name detected, skipping deletion: ${branchName}`);
} else {
try {
await execGitCommand(['branch', '-D', branchName], projectPath);
branchDeleted = true;
} catch {
logger.warn(`Failed to delete branch: ${branchName}`);
}
}
}
} }
res.json({ res.json({ success: true, mergedBranch: branchName });
success: true,
mergedBranch: branchName,
targetBranch: mergeTo,
deleted: options?.deleteWorktreeAndBranch ? { worktreeDeleted, branchDeleted } : undefined,
});
} catch (error) { } catch (error) {
logError(error, 'Merge worktree failed'); logError(error, 'Merge worktree failed');
res.status(500).json({ success: false, error: getErrorMessage(error) }); res.status(500).json({ success: false, error: getErrorMessage(error) });

View File

@@ -15,10 +15,9 @@ const execAsync = promisify(exec);
export function createPushHandler() { export function createPushHandler() {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { worktreePath, force, remote } = req.body as { const { worktreePath, force } = req.body as {
worktreePath: string; worktreePath: string;
force?: boolean; force?: boolean;
remote?: string;
}; };
if (!worktreePath) { if (!worktreePath) {
@@ -35,18 +34,15 @@ export function createPushHandler() {
}); });
const branchName = branchOutput.trim(); const branchName = branchOutput.trim();
// Use specified remote or default to 'origin'
const targetRemote = remote || 'origin';
// Push the branch // Push the branch
const forceFlag = force ? '--force' : ''; const forceFlag = force ? '--force' : '';
try { try {
await execAsync(`git push -u ${targetRemote} ${branchName} ${forceFlag}`, { await execAsync(`git push -u origin ${branchName} ${forceFlag}`, {
cwd: worktreePath, cwd: worktreePath,
}); });
} catch { } catch {
// Try setting upstream // Try setting upstream
await execAsync(`git push --set-upstream ${targetRemote} ${branchName} ${forceFlag}`, { await execAsync(`git push --set-upstream origin ${branchName} ${forceFlag}`, {
cwd: worktreePath, cwd: worktreePath,
}); });
} }
@@ -56,7 +52,7 @@ export function createPushHandler() {
result: { result: {
branch: branchName, branch: branchName,
pushed: true, pushed: true,
message: `Successfully pushed ${branchName} to ${targetRemote}`, message: `Successfully pushed ${branchName} to origin`,
}, },
}); });
} catch (error) { } catch (error) {

View File

@@ -1,22 +1,16 @@
/** /**
* POST /start-dev endpoint - Start a dev server for a worktree * POST /start-dev endpoint - Start a dev server for a worktree
* *
* Spins up a development server in the worktree directory on a unique port, * Spins up a development server (npm run dev) in the worktree directory
* allowing preview of the worktree's changes without affecting the main dev server. * on a unique port, allowing preview of the worktree's changes without
* * affecting the main dev server.
* If a custom devCommand is configured in project settings, it will be used.
* Otherwise, auto-detection based on package manager (npm/yarn/pnpm/bun run dev) is used.
*/ */
import type { Request, Response } from 'express'; import type { Request, Response } from 'express';
import type { SettingsService } from '../../../services/settings-service.js';
import { getDevServerService } from '../../../services/dev-server-service.js'; import { getDevServerService } from '../../../services/dev-server-service.js';
import { getErrorMessage, logError } from '../common.js'; import { getErrorMessage, logError } from '../common.js';
import { createLogger } from '@automaker/utils';
const logger = createLogger('start-dev'); export function createStartDevHandler() {
export function createStartDevHandler(settingsService?: SettingsService) {
return async (req: Request, res: Response): Promise<void> => { return async (req: Request, res: Response): Promise<void> => {
try { try {
const { projectPath, worktreePath } = req.body as { const { projectPath, worktreePath } = req.body as {
@@ -40,25 +34,8 @@ export function createStartDevHandler(settingsService?: SettingsService) {
return; return;
} }
// Get custom dev command from project settings (if configured)
let customCommand: string | undefined;
if (settingsService) {
const projectSettings = await settingsService.getProjectSettings(projectPath);
const devCommand = projectSettings?.devCommand?.trim();
if (devCommand) {
customCommand = devCommand;
logger.debug(`Using custom dev command from project settings: ${customCommand}`);
} else {
logger.debug('No custom dev command configured, using auto-detection');
}
}
const devServerService = getDevServerService(); const devServerService = getDevServerService();
const result = await devServerService.startDevServer( const result = await devServerService.startDevServer(projectPath, worktreePath);
projectPath,
worktreePath,
customCommand
);
if (result.success && result.result) { if (result.success && result.result) {
res.json({ res.json({

View File

@@ -1,92 +0,0 @@
/**
* POST /start-tests endpoint - Start tests for a worktree
*
* Runs the test command configured in project settings.
* If no testCommand is configured, returns an error.
*/
import type { Request, Response } from 'express';
import type { SettingsService } from '../../../services/settings-service.js';
import { getTestRunnerService } from '../../../services/test-runner-service.js';
import { getErrorMessage, logError } from '../common.js';
export function createStartTestsHandler(settingsService?: SettingsService) {
return async (req: Request, res: Response): Promise<void> => {
try {
const body = req.body;
// Validate request body
if (!body || typeof body !== 'object') {
res.status(400).json({
success: false,
error: 'Request body must be an object',
});
return;
}
const worktreePath = typeof body.worktreePath === 'string' ? body.worktreePath : undefined;
const projectPath = typeof body.projectPath === 'string' ? body.projectPath : undefined;
const testFile = typeof body.testFile === 'string' ? body.testFile : undefined;
if (!worktreePath) {
res.status(400).json({
success: false,
error: 'worktreePath is required and must be a string',
});
return;
}
// Get project settings to find the test command
// Use projectPath if provided, otherwise use worktreePath
const settingsPath = projectPath || worktreePath;
if (!settingsService) {
res.status(500).json({
success: false,
error: 'Settings service not available',
});
return;
}
const projectSettings = await settingsService.getProjectSettings(settingsPath);
const testCommand = projectSettings?.testCommand;
if (!testCommand) {
res.status(400).json({
success: false,
error:
'No test command configured. Please configure a test command in Project Settings > Testing Configuration.',
});
return;
}
const testRunnerService = getTestRunnerService();
const result = await testRunnerService.startTests(worktreePath, {
command: testCommand,
testFile,
});
if (result.success && result.result) {
res.json({
success: true,
result: {
sessionId: result.result.sessionId,
worktreePath: result.result.worktreePath,
command: result.result.command,
status: result.result.status,
testFile: result.result.testFile,
message: result.result.message,
},
});
} else {
res.status(400).json({
success: false,
error: result.error || 'Failed to start tests',
});
}
} catch (error) {
logError(error, 'Start tests failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -28,10 +28,7 @@ export function createStatusHandler() {
} }
// Git worktrees are stored in project directory // Git worktrees are stored in project directory
// Sanitize featureId the same way it's sanitized when creating worktrees const worktreePath = path.join(projectPath, '.worktrees', featureId);
// (see create.ts: branchName.replace(/[^a-zA-Z0-9_-]/g, '-'))
const sanitizedFeatureId = featureId.replace(/[^a-zA-Z0-9_-]/g, '-');
const worktreePath = path.join(projectPath, '.worktrees', sanitizedFeatureId);
try { try {
await secureFs.access(worktreePath); await secureFs.access(worktreePath);

View File

@@ -1,58 +0,0 @@
/**
* POST /stop-tests endpoint - Stop a running test session
*
* Stops the test runner process for a specific session,
* cancelling any ongoing tests and freeing up resources.
*/
import type { Request, Response } from 'express';
import { getTestRunnerService } from '../../../services/test-runner-service.js';
import { getErrorMessage, logError } from '../common.js';
export function createStopTestsHandler() {
return async (req: Request, res: Response): Promise<void> => {
try {
const body = req.body;
// Validate request body
if (!body || typeof body !== 'object') {
res.status(400).json({
success: false,
error: 'Request body must be an object',
});
return;
}
const sessionId = typeof body.sessionId === 'string' ? body.sessionId : undefined;
if (!sessionId) {
res.status(400).json({
success: false,
error: 'sessionId is required and must be a string',
});
return;
}
const testRunnerService = getTestRunnerService();
const result = await testRunnerService.stopTests(sessionId);
if (result.success && result.result) {
res.json({
success: true,
result: {
sessionId: result.result.sessionId,
message: result.result.message,
},
});
} else {
res.status(400).json({
success: false,
error: result.error || 'Failed to stop tests',
});
}
} catch (error) {
logError(error, 'Stop tests failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -1,160 +0,0 @@
/**
* GET /test-logs endpoint - Get buffered logs for a test runner session
*
* Returns the scrollback buffer containing historical log output for a test run.
* Used by clients to populate the log panel on initial connection
* before subscribing to real-time updates via WebSocket.
*
* Query parameters:
* - worktreePath: Path to the worktree (optional if sessionId provided)
* - sessionId: Specific test session ID (optional, uses active session if not provided)
*/
import type { Request, Response } from 'express';
import { getTestRunnerService } from '../../../services/test-runner-service.js';
import { getErrorMessage, logError } from '../common.js';
interface SessionInfo {
sessionId: string;
worktreePath?: string;
command?: string;
testFile?: string;
exitCode?: number | null;
}
interface OutputResult {
sessionId: string;
status: string;
output: string;
startedAt: string;
finishedAt?: string | null;
}
function buildLogsResponse(session: SessionInfo, output: OutputResult) {
return {
success: true,
result: {
sessionId: session.sessionId,
worktreePath: session.worktreePath,
command: session.command,
status: output.status,
testFile: session.testFile,
logs: output.output,
startedAt: output.startedAt,
finishedAt: output.finishedAt,
exitCode: session.exitCode ?? null,
},
};
}
export function createGetTestLogsHandler() {
return async (req: Request, res: Response): Promise<void> => {
try {
const { worktreePath, sessionId } = req.query as {
worktreePath?: string;
sessionId?: string;
};
const testRunnerService = getTestRunnerService();
// If sessionId is provided, get logs for that specific session
if (sessionId) {
const result = testRunnerService.getSessionOutput(sessionId);
if (result.success && result.result) {
const session = testRunnerService.getSession(sessionId);
res.json(
buildLogsResponse(
{
sessionId: result.result.sessionId,
worktreePath: session?.worktreePath,
command: session?.command,
testFile: session?.testFile,
exitCode: session?.exitCode,
},
result.result
)
);
} else {
res.status(404).json({
success: false,
error: result.error || 'Failed to get test logs',
});
}
return;
}
// If worktreePath is provided, get logs for the active session
if (worktreePath) {
const activeSession = testRunnerService.getActiveSession(worktreePath);
if (activeSession) {
const result = testRunnerService.getSessionOutput(activeSession.id);
if (result.success && result.result) {
res.json(
buildLogsResponse(
{
sessionId: activeSession.id,
worktreePath: activeSession.worktreePath,
command: activeSession.command,
testFile: activeSession.testFile,
exitCode: activeSession.exitCode,
},
result.result
)
);
} else {
res.status(404).json({
success: false,
error: result.error || 'Failed to get test logs',
});
}
} else {
// No active session - check for most recent session for this worktree
const sessions = testRunnerService.listSessions(worktreePath);
if (sessions.result.sessions.length > 0) {
// Get the most recent session (list is not sorted, so find it)
const mostRecent = sessions.result.sessions.reduce((latest, current) => {
const latestTime = new Date(latest.startedAt).getTime();
const currentTime = new Date(current.startedAt).getTime();
return currentTime > latestTime ? current : latest;
});
const result = testRunnerService.getSessionOutput(mostRecent.sessionId);
if (result.success && result.result) {
res.json(
buildLogsResponse(
{
sessionId: mostRecent.sessionId,
worktreePath: mostRecent.worktreePath,
command: mostRecent.command,
testFile: mostRecent.testFile,
exitCode: mostRecent.exitCode,
},
result.result
)
);
return;
}
}
res.status(404).json({
success: false,
error: 'No test sessions found for this worktree',
});
}
return;
}
// Neither sessionId nor worktreePath provided
res.status(400).json({
success: false,
error: 'Either worktreePath or sessionId query parameter is required',
});
} catch (error) {
logError(error, 'Get test logs failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -1,83 +0,0 @@
/**
* AgentExecutor Types - Type definitions for agent execution
*/
import type {
PlanningMode,
ThinkingLevel,
ParsedTask,
ClaudeCompatibleProvider,
Credentials,
} from '@automaker/types';
import type { BaseProvider } from '../providers/base-provider.js';
export interface AgentExecutionOptions {
workDir: string;
featureId: string;
prompt: string;
projectPath: string;
abortController: AbortController;
imagePaths?: string[];
model?: string;
planningMode?: PlanningMode;
requirePlanApproval?: boolean;
previousContent?: string;
systemPrompt?: string;
autoLoadClaudeMd?: boolean;
thinkingLevel?: ThinkingLevel;
branchName?: string | null;
credentials?: Credentials;
claudeCompatibleProvider?: ClaudeCompatibleProvider;
mcpServers?: Record<string, unknown>;
sdkOptions?: {
maxTurns?: number;
allowedTools?: string[];
systemPrompt?: string | { type: 'preset'; preset: 'claude_code'; append?: string };
settingSources?: Array<'user' | 'project' | 'local'>;
};
provider: BaseProvider;
effectiveBareModel: string;
specAlreadyDetected?: boolean;
existingApprovedPlanContent?: string;
persistedTasks?: ParsedTask[];
}
export interface AgentExecutionResult {
responseText: string;
specDetected: boolean;
tasksCompleted: number;
aborted: boolean;
}
export type WaitForApprovalFn = (
featureId: string,
projectPath: string
) => Promise<{ approved: boolean; feedback?: string; editedPlan?: string }>;
export type SaveFeatureSummaryFn = (
projectPath: string,
featureId: string,
summary: string
) => Promise<void>;
export type UpdateFeatureSummaryFn = (
projectPath: string,
featureId: string,
summary: string
) => Promise<void>;
export type BuildTaskPromptFn = (
task: ParsedTask,
allTasks: ParsedTask[],
taskIndex: number,
planContent: string,
taskPromptTemplate: string,
userFeedback?: string
) => string;
export interface AgentExecutorCallbacks {
waitForApproval: WaitForApprovalFn;
saveFeatureSummary: SaveFeatureSummaryFn;
updateFeatureSummary: UpdateFeatureSummaryFn;
buildTaskPrompt: BuildTaskPromptFn;
}

View File

@@ -1,686 +0,0 @@
/**
* AgentExecutor - Core agent execution engine with streaming support
*/
import path from 'path';
import type { ExecuteOptions, ParsedTask } from '@automaker/types';
import { buildPromptWithImages, createLogger } from '@automaker/utils';
import { getFeatureDir } from '@automaker/platform';
import * as secureFs from '../lib/secure-fs.js';
import { TypedEventBus } from './typed-event-bus.js';
import { FeatureStateManager } from './feature-state-manager.js';
import { PlanApprovalService } from './plan-approval-service.js';
import type { SettingsService } from './settings-service.js';
import {
parseTasksFromSpec,
detectTaskStartMarker,
detectTaskCompleteMarker,
detectPhaseCompleteMarker,
detectSpecFallback,
extractSummary,
} from './spec-parser.js';
import { getPromptCustomization } from '../lib/settings-helpers.js';
import type {
AgentExecutionOptions,
AgentExecutionResult,
AgentExecutorCallbacks,
} from './agent-executor-types.js';
// Re-export types for backward compatibility
export type {
AgentExecutionOptions,
AgentExecutionResult,
WaitForApprovalFn,
SaveFeatureSummaryFn,
UpdateFeatureSummaryFn,
BuildTaskPromptFn,
} from './agent-executor-types.js';
const logger = createLogger('AgentExecutor');
export class AgentExecutor {
private static readonly WRITE_DEBOUNCE_MS = 500;
private static readonly STREAM_HEARTBEAT_MS = 15_000;
constructor(
private eventBus: TypedEventBus,
private featureStateManager: FeatureStateManager,
private planApprovalService: PlanApprovalService,
private settingsService: SettingsService | null = null
) {}
async execute(
options: AgentExecutionOptions,
callbacks: AgentExecutorCallbacks
): Promise<AgentExecutionResult> {
const {
workDir,
featureId,
projectPath,
abortController,
branchName = null,
provider,
effectiveBareModel,
previousContent,
planningMode = 'skip',
requirePlanApproval = false,
specAlreadyDetected = false,
existingApprovedPlanContent,
persistedTasks,
credentials,
claudeCompatibleProvider,
mcpServers,
sdkOptions,
} = options;
const { content: promptContent } = await buildPromptWithImages(
options.prompt,
options.imagePaths,
workDir,
false
);
const executeOptions: ExecuteOptions = {
prompt: promptContent,
model: effectiveBareModel,
maxTurns: sdkOptions?.maxTurns,
cwd: workDir,
allowedTools: sdkOptions?.allowedTools as string[] | undefined,
abortController,
systemPrompt: sdkOptions?.systemPrompt,
settingSources: sdkOptions?.settingSources,
mcpServers:
mcpServers && Object.keys(mcpServers).length > 0
? (mcpServers as Record<string, { command: string }>)
: undefined,
thinkingLevel: options.thinkingLevel,
credentials,
claudeCompatibleProvider,
};
const featureDirForOutput = getFeatureDir(projectPath, featureId);
const outputPath = path.join(featureDirForOutput, 'agent-output.md');
const rawOutputPath = path.join(featureDirForOutput, 'raw-output.jsonl');
const enableRawOutput =
process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === 'true' ||
process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === '1';
let responseText = previousContent
? `${previousContent}\n\n---\n\n## Follow-up Session\n\n`
: '';
let specDetected = specAlreadyDetected,
tasksCompleted = 0,
aborted = false;
let writeTimeout: ReturnType<typeof setTimeout> | null = null,
rawOutputLines: string[] = [],
rawWriteTimeout: ReturnType<typeof setTimeout> | null = null;
const writeToFile = async (): Promise<void> => {
try {
await secureFs.mkdir(path.dirname(outputPath), { recursive: true });
await secureFs.writeFile(outputPath, responseText);
} catch (error) {
logger.error(`Failed to write agent output for ${featureId}:`, error);
}
};
const scheduleWrite = (): void => {
if (writeTimeout) clearTimeout(writeTimeout);
writeTimeout = setTimeout(() => writeToFile(), AgentExecutor.WRITE_DEBOUNCE_MS);
};
const appendRawEvent = (event: unknown): void => {
if (!enableRawOutput) return;
try {
rawOutputLines.push(
JSON.stringify({ timestamp: new Date().toISOString(), event }, null, 4)
);
if (rawWriteTimeout) clearTimeout(rawWriteTimeout);
rawWriteTimeout = setTimeout(async () => {
try {
await secureFs.mkdir(path.dirname(rawOutputPath), { recursive: true });
await secureFs.appendFile(rawOutputPath, rawOutputLines.join('\n') + '\n');
rawOutputLines = [];
} catch {
/* ignore */
}
}, AgentExecutor.WRITE_DEBOUNCE_MS);
} catch {
/* ignore */
}
};
const streamStartTime = Date.now();
let receivedAnyStreamMessage = false;
const streamHeartbeat = setInterval(() => {
if (!receivedAnyStreamMessage)
logger.info(
`Waiting for first model response for feature ${featureId} (${Math.round((Date.now() - streamStartTime) / 1000)}s elapsed)...`
);
}, AgentExecutor.STREAM_HEARTBEAT_MS);
const planningModeRequiresApproval =
planningMode === 'spec' ||
planningMode === 'full' ||
(planningMode === 'lite' && requirePlanApproval);
const requiresApproval = planningModeRequiresApproval && requirePlanApproval;
if (existingApprovedPlanContent && persistedTasks && persistedTasks.length > 0) {
const result = await this.executeTasksLoop(
options,
persistedTasks,
existingApprovedPlanContent,
responseText,
scheduleWrite,
callbacks
);
clearInterval(streamHeartbeat);
if (writeTimeout) clearTimeout(writeTimeout);
if (rawWriteTimeout) clearTimeout(rawWriteTimeout);
await writeToFile();
return {
responseText: result.responseText,
specDetected: true,
tasksCompleted: result.tasksCompleted,
aborted: result.aborted,
};
}
logger.info(`Starting stream for feature ${featureId}...`);
const stream = provider.executeQuery(executeOptions);
try {
streamLoop: for await (const msg of stream) {
receivedAnyStreamMessage = true;
appendRawEvent(msg);
if (abortController.signal.aborted) {
aborted = true;
throw new Error('Feature execution aborted');
}
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text') {
const newText = block.text || '';
if (!newText) continue;
if (responseText.length > 0 && newText.length > 0) {
const endsWithSentence = /[.!?:]\s*$/.test(responseText),
endsWithNewline = /\n\s*$/.test(responseText);
if (
!endsWithNewline &&
(endsWithSentence || /^[\n#\-*>]/.test(newText)) &&
!/[a-zA-Z0-9]/.test(responseText.slice(-1))
)
responseText += '\n\n';
}
responseText += newText;
if (
block.text &&
(block.text.includes('Invalid API key') ||
block.text.includes('authentication_failed') ||
block.text.includes('Fix external API key'))
)
throw new Error(
"Authentication failed: Invalid or expired API key. Please check your ANTHROPIC_API_KEY, or run 'claude login' to re-authenticate."
);
scheduleWrite();
const hasExplicitMarker = responseText.includes('[SPEC_GENERATED]'),
hasFallbackSpec = !hasExplicitMarker && detectSpecFallback(responseText);
if (
planningModeRequiresApproval &&
!specDetected &&
(hasExplicitMarker || hasFallbackSpec)
) {
specDetected = true;
const planContent = hasExplicitMarker
? responseText.substring(0, responseText.indexOf('[SPEC_GENERATED]')).trim()
: responseText.trim();
if (!hasExplicitMarker)
logger.info(`Using fallback spec detection for feature ${featureId}`);
const result = await this.handleSpecGenerated(
options,
planContent,
responseText,
requiresApproval,
scheduleWrite,
callbacks
);
responseText = result.responseText;
tasksCompleted = result.tasksCompleted;
break streamLoop;
}
if (!specDetected)
this.eventBus.emitAutoModeEvent('auto_mode_progress', {
featureId,
branchName,
content: block.text,
});
} else if (block.type === 'tool_use') {
this.eventBus.emitAutoModeEvent('auto_mode_tool', {
featureId,
branchName,
tool: block.name,
input: block.input,
});
if (responseText.length > 0 && !responseText.endsWith('\n')) responseText += '\n';
responseText += `\n Tool: ${block.name}\n`;
if (block.input) responseText += `Input: ${JSON.stringify(block.input, null, 2)}\n`;
scheduleWrite();
}
}
} else if (msg.type === 'error') {
throw new Error(msg.error || 'Unknown error');
} else if (msg.type === 'result' && msg.subtype === 'success') scheduleWrite();
}
await writeToFile();
if (enableRawOutput && rawOutputLines.length > 0) {
try {
await secureFs.mkdir(path.dirname(rawOutputPath), { recursive: true });
await secureFs.appendFile(rawOutputPath, rawOutputLines.join('\n') + '\n');
} catch {
/* ignore */
}
}
} finally {
clearInterval(streamHeartbeat);
if (writeTimeout) clearTimeout(writeTimeout);
if (rawWriteTimeout) clearTimeout(rawWriteTimeout);
}
return { responseText, specDetected, tasksCompleted, aborted };
}
private async executeTasksLoop(
options: AgentExecutionOptions,
tasks: ParsedTask[],
planContent: string,
initialResponseText: string,
scheduleWrite: () => void,
callbacks: AgentExecutorCallbacks,
userFeedback?: string
): Promise<{ responseText: string; tasksCompleted: number; aborted: boolean }> {
const {
featureId,
projectPath,
abortController,
branchName = null,
provider,
sdkOptions,
} = options;
logger.info(`Starting task execution for feature ${featureId} with ${tasks.length} tasks`);
const taskPrompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
let responseText = initialResponseText,
tasksCompleted = 0;
for (let taskIndex = 0; taskIndex < tasks.length; taskIndex++) {
const task = tasks[taskIndex];
if (task.status === 'completed') {
tasksCompleted++;
continue;
}
if (abortController.signal.aborted) return { responseText, tasksCompleted, aborted: true };
await this.featureStateManager.updateTaskStatus(
projectPath,
featureId,
task.id,
'in_progress'
);
this.eventBus.emitAutoModeEvent('auto_mode_task_started', {
featureId,
projectPath,
branchName,
taskId: task.id,
taskDescription: task.description,
taskIndex,
tasksTotal: tasks.length,
});
await this.featureStateManager.updateFeaturePlanSpec(projectPath, featureId, {
currentTaskId: task.id,
});
const taskPrompt = callbacks.buildTaskPrompt(
task,
tasks,
taskIndex,
planContent,
taskPrompts.taskExecution.taskPromptTemplate,
userFeedback
);
const taskStream = provider.executeQuery(
this.buildExecOpts(options, taskPrompt, Math.min(sdkOptions?.maxTurns || 100, 50))
);
let taskOutput = '',
taskStartDetected = false,
taskCompleteDetected = false;
for await (const msg of taskStream) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const b of msg.message.content) {
if (b.type === 'text') {
const text = b.text || '';
taskOutput += text;
responseText += text;
this.eventBus.emitAutoModeEvent('auto_mode_progress', {
featureId,
branchName,
content: text,
});
scheduleWrite();
if (!taskStartDetected) {
const sid = detectTaskStartMarker(taskOutput);
if (sid) {
taskStartDetected = true;
await this.featureStateManager.updateTaskStatus(
projectPath,
featureId,
sid,
'in_progress'
);
}
}
if (!taskCompleteDetected) {
const cid = detectTaskCompleteMarker(taskOutput);
if (cid) {
taskCompleteDetected = true;
await this.featureStateManager.updateTaskStatus(
projectPath,
featureId,
cid,
'completed'
);
}
}
const pn = detectPhaseCompleteMarker(text);
if (pn !== null)
this.eventBus.emitAutoModeEvent('auto_mode_phase_complete', {
featureId,
projectPath,
branchName,
phaseNumber: pn,
});
} else if (b.type === 'tool_use')
this.eventBus.emitAutoModeEvent('auto_mode_tool', {
featureId,
branchName,
tool: b.name,
input: b.input,
});
}
} else if (msg.type === 'error')
throw new Error(msg.error || `Error during task ${task.id}`);
else if (msg.type === 'result' && msg.subtype === 'success') {
taskOutput += msg.result || '';
responseText += msg.result || '';
}
}
if (!taskCompleteDetected)
await this.featureStateManager.updateTaskStatus(
projectPath,
featureId,
task.id,
'completed'
);
tasksCompleted = taskIndex + 1;
this.eventBus.emitAutoModeEvent('auto_mode_task_complete', {
featureId,
projectPath,
branchName,
taskId: task.id,
tasksCompleted,
tasksTotal: tasks.length,
});
await this.featureStateManager.updateFeaturePlanSpec(projectPath, featureId, {
tasksCompleted,
});
if (task.phase) {
const next = tasks[taskIndex + 1];
if (!next || next.phase !== task.phase) {
const m = task.phase.match(/Phase\s*(\d+)/i);
if (m)
this.eventBus.emitAutoModeEvent('auto_mode_phase_complete', {
featureId,
projectPath,
branchName,
phaseNumber: parseInt(m[1], 10),
});
}
}
}
const summary = extractSummary(responseText);
if (summary) await callbacks.saveFeatureSummary(projectPath, featureId, summary);
return { responseText, tasksCompleted, aborted: false };
}
private async handleSpecGenerated(
options: AgentExecutionOptions,
planContent: string,
initialResponseText: string,
requiresApproval: boolean,
scheduleWrite: () => void,
callbacks: AgentExecutorCallbacks
): Promise<{ responseText: string; tasksCompleted: number }> {
const {
workDir,
featureId,
projectPath,
abortController,
branchName = null,
planningMode = 'skip',
provider,
effectiveBareModel,
credentials,
claudeCompatibleProvider,
mcpServers,
sdkOptions,
} = options;
let responseText = initialResponseText,
parsedTasks = parseTasksFromSpec(planContent);
logger.info(`Parsed ${parsedTasks.length} tasks from spec for feature ${featureId}`);
await this.featureStateManager.updateFeaturePlanSpec(projectPath, featureId, {
status: 'generated',
content: planContent,
version: 1,
generatedAt: new Date().toISOString(),
reviewedByUser: false,
tasks: parsedTasks,
tasksTotal: parsedTasks.length,
tasksCompleted: 0,
});
const planSummary = extractSummary(planContent);
if (planSummary) await callbacks.updateFeatureSummary(projectPath, featureId, planSummary);
let approvedPlanContent = planContent,
userFeedback: string | undefined,
currentPlanContent = planContent,
planVersion = 1;
if (requiresApproval) {
let planApproved = false;
while (!planApproved) {
logger.info(
`Spec v${planVersion} generated for feature ${featureId}, waiting for approval`
);
this.eventBus.emitAutoModeEvent('plan_approval_required', {
featureId,
projectPath,
branchName,
planContent: currentPlanContent,
planningMode,
planVersion,
});
const approvalResult = await callbacks.waitForApproval(featureId, projectPath);
if (approvalResult.approved) {
planApproved = true;
userFeedback = approvalResult.feedback;
approvedPlanContent = approvalResult.editedPlan || currentPlanContent;
if (approvalResult.editedPlan)
await this.featureStateManager.updateFeaturePlanSpec(projectPath, featureId, {
content: approvalResult.editedPlan,
});
this.eventBus.emitAutoModeEvent('plan_approved', {
featureId,
projectPath,
branchName,
hasEdits: !!approvalResult.editedPlan,
planVersion,
});
} else {
const hasFeedback = approvalResult.feedback?.trim().length,
hasEdits = approvalResult.editedPlan?.trim().length;
if (!hasFeedback && !hasEdits) throw new Error('Plan cancelled by user');
planVersion++;
this.eventBus.emitAutoModeEvent('plan_revision_requested', {
featureId,
projectPath,
branchName,
feedback: approvalResult.feedback,
hasEdits: !!hasEdits,
planVersion,
});
const revPrompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
const taskEx =
planningMode === 'full'
? '```tasks\n## Phase 1: Foundation\n- [ ] T001: [Description] | File: [path/to/file]\n```'
: '```tasks\n- [ ] T001: [Description] | File: [path/to/file]\n```';
let revPrompt = revPrompts.taskExecution.planRevisionTemplate
.replace(/\{\{planVersion\}\}/g, String(planVersion - 1))
.replace(
/\{\{previousPlan\}\}/g,
hasEdits ? approvalResult.editedPlan || currentPlanContent : currentPlanContent
)
.replace(
/\{\{userFeedback\}\}/g,
approvalResult.feedback || 'Please revise the plan based on the edits above.'
)
.replace(/\{\{planningMode\}\}/g, planningMode)
.replace(/\{\{taskFormatExample\}\}/g, taskEx);
await this.featureStateManager.updateFeaturePlanSpec(projectPath, featureId, {
status: 'generating',
version: planVersion,
});
let revText = '';
for await (const msg of provider.executeQuery(
this.buildExecOpts(options, revPrompt, sdkOptions?.maxTurns || 100)
)) {
if (msg.type === 'assistant' && msg.message?.content)
for (const b of msg.message.content)
if (b.type === 'text') {
revText += b.text || '';
this.eventBus.emitAutoModeEvent('auto_mode_progress', {
featureId,
content: b.text,
});
}
if (msg.type === 'error') throw new Error(msg.error || 'Error during plan revision');
if (msg.type === 'result' && msg.subtype === 'success') revText += msg.result || '';
}
const mi = revText.indexOf('[SPEC_GENERATED]');
currentPlanContent = mi > 0 ? revText.substring(0, mi).trim() : revText.trim();
const revisedTasks = parseTasksFromSpec(currentPlanContent);
if (revisedTasks.length === 0 && (planningMode === 'spec' || planningMode === 'full'))
this.eventBus.emitAutoModeEvent('plan_revision_warning', {
featureId,
projectPath,
branchName,
planningMode,
warning: 'Revised plan missing tasks block',
});
await this.featureStateManager.updateFeaturePlanSpec(projectPath, featureId, {
status: 'generated',
content: currentPlanContent,
version: planVersion,
tasks: revisedTasks,
tasksTotal: revisedTasks.length,
tasksCompleted: 0,
});
parsedTasks = revisedTasks;
responseText += revText;
}
}
} else {
this.eventBus.emitAutoModeEvent('plan_auto_approved', {
featureId,
projectPath,
branchName,
planContent,
planningMode,
});
}
await this.featureStateManager.updateFeaturePlanSpec(projectPath, featureId, {
status: 'approved',
approvedAt: new Date().toISOString(),
reviewedByUser: requiresApproval,
});
let tasksCompleted = 0;
if (parsedTasks.length > 0) {
const r = await this.executeTasksLoop(
options,
parsedTasks,
approvedPlanContent,
responseText,
scheduleWrite,
callbacks,
userFeedback
);
responseText = r.responseText;
tasksCompleted = r.tasksCompleted;
} else {
const r = await this.executeSingleAgentContinuation(
options,
approvedPlanContent,
userFeedback,
responseText
);
responseText = r.responseText;
}
const summary = extractSummary(responseText);
if (summary) await callbacks.saveFeatureSummary(projectPath, featureId, summary);
return { responseText, tasksCompleted };
}
private buildExecOpts(o: AgentExecutionOptions, prompt: string, maxTurns?: number) {
return {
prompt,
model: o.effectiveBareModel,
maxTurns,
cwd: o.workDir,
allowedTools: o.sdkOptions?.allowedTools as string[] | undefined,
abortController: o.abortController,
mcpServers:
o.mcpServers && Object.keys(o.mcpServers).length > 0
? (o.mcpServers as Record<string, { command: string }>)
: undefined,
credentials: o.credentials,
claudeCompatibleProvider: o.claudeCompatibleProvider,
};
}
private async executeSingleAgentContinuation(
options: AgentExecutionOptions,
planContent: string,
userFeedback: string | undefined,
initialResponseText: string
): Promise<{ responseText: string }> {
const { featureId, branchName = null, provider } = options;
logger.info(`No parsed tasks, using single-agent execution for feature ${featureId}`);
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
const contPrompt = prompts.taskExecution.continuationAfterApprovalTemplate
.replace(/\{\{userFeedback\}\}/g, userFeedback || '')
.replace(/\{\{approvedPlan\}\}/g, planContent);
let responseText = initialResponseText;
for await (const msg of provider.executeQuery(
this.buildExecOpts(options, contPrompt, options.sdkOptions?.maxTurns)
)) {
if (msg.type === 'assistant' && msg.message?.content)
for (const b of msg.message.content) {
if (b.type === 'text') {
responseText += b.text || '';
this.eventBus.emitAutoModeEvent('auto_mode_progress', {
featureId,
branchName,
content: b.text,
});
} else if (b.type === 'tool_use')
this.eventBus.emitAutoModeEvent('auto_mode_tool', {
featureId,
branchName,
tool: b.name,
input: b.input,
});
}
else if (msg.type === 'error')
throw new Error(msg.error || 'Unknown error during implementation');
else if (msg.type === 'result' && msg.subtype === 'success') responseText += msg.result || '';
}
return { responseText };
}
}

View File

@@ -29,7 +29,7 @@ import {
getSkillsConfiguration, getSkillsConfiguration,
getSubagentsConfiguration, getSubagentsConfiguration,
getCustomSubagents, getCustomSubagents,
getProviderByModelId, getActiveClaudeApiProfile,
} from '../lib/settings-helpers.js'; } from '../lib/settings-helpers.js';
interface Message { interface Message {
@@ -275,29 +275,11 @@ export class AgentService {
? await getCustomSubagents(this.settingsService, effectiveWorkDir) ? await getCustomSubagents(this.settingsService, effectiveWorkDir)
: undefined; : undefined;
// Get credentials for API calls // Get active Claude API profile for alternative endpoint configuration
const credentials = await this.settingsService?.getCredentials(); const { profile: claudeApiProfile, credentials } = await getActiveClaudeApiProfile(
this.settingsService,
// Try to find a provider for the model (if it's a provider model like "GLM-4.7") '[AgentService]'
// This allows users to select provider models in the Agent Runner UI );
let claudeCompatibleProvider: import('@automaker/types').ClaudeCompatibleProvider | undefined;
let providerResolvedModel: string | undefined;
const requestedModel = model || session.model;
if (requestedModel && this.settingsService) {
const providerResult = await getProviderByModelId(
requestedModel,
this.settingsService,
'[AgentService]'
);
if (providerResult.provider) {
claudeCompatibleProvider = providerResult.provider;
providerResolvedModel = providerResult.resolvedModel;
this.logger.info(
`[AgentService] Using provider "${providerResult.provider.name}" for model "${requestedModel}"` +
(providerResolvedModel ? ` -> resolved to "${providerResolvedModel}"` : '')
);
}
}
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files // Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
// Use the user's message as task context for smart memory selection // Use the user's message as task context for smart memory selection
@@ -324,16 +306,10 @@ export class AgentService {
// Use thinking level and reasoning effort from request, or fall back to session's stored values // Use thinking level and reasoning effort from request, or fall back to session's stored values
const effectiveThinkingLevel = thinkingLevel ?? session.thinkingLevel; const effectiveThinkingLevel = thinkingLevel ?? session.thinkingLevel;
const effectiveReasoningEffort = reasoningEffort ?? session.reasoningEffort; const effectiveReasoningEffort = reasoningEffort ?? session.reasoningEffort;
// When using a provider model, use the resolved Claude model (from mapsToClaudeModel)
// e.g., "GLM-4.5-Air" -> "claude-haiku-4-5"
const modelForSdk = providerResolvedModel || model;
const sessionModelForSdk = providerResolvedModel ? undefined : session.model;
const sdkOptions = createChatOptions({ const sdkOptions = createChatOptions({
cwd: effectiveWorkDir, cwd: effectiveWorkDir,
model: modelForSdk, model: model,
sessionModel: sessionModelForSdk, sessionModel: session.model,
systemPrompt: combinedSystemPrompt, systemPrompt: combinedSystemPrompt,
abortController: session.abortController!, abortController: session.abortController!,
autoLoadClaudeMd, autoLoadClaudeMd,
@@ -409,8 +385,8 @@ export class AgentService {
agents: customSubagents, // Pass custom subagents for task delegation agents: customSubagents, // Pass custom subagents for task delegation
thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
reasoningEffort: effectiveReasoningEffort, // Pass reasoning effort for Codex models reasoningEffort: effectiveReasoningEffort, // Pass reasoning effort for Codex models
claudeApiProfile, // Pass active Claude API profile for alternative endpoint configuration
credentials, // Pass credentials for resolving 'credentials' apiKeySource credentials, // Pass credentials for resolving 'credentials' apiKeySource
claudeCompatibleProvider, // Pass provider for alternative endpoint configuration (GLM, MiniMax, etc.)
}; };
// Build prompt content with images // Build prompt content with images

View File

@@ -1,366 +0,0 @@
/**
* AutoLoopCoordinator - Manages the auto-mode loop lifecycle and failure tracking
*/
import type { Feature } from '@automaker/types';
import { createLogger, classifyError } from '@automaker/utils';
import type { TypedEventBus } from './typed-event-bus.js';
import type { ConcurrencyManager } from './concurrency-manager.js';
import type { SettingsService } from './settings-service.js';
import { DEFAULT_MAX_CONCURRENCY } from '@automaker/types';
const logger = createLogger('AutoLoopCoordinator');
const CONSECUTIVE_FAILURE_THRESHOLD = 3;
const FAILURE_WINDOW_MS = 60000;
export interface AutoModeConfig {
maxConcurrency: number;
useWorktrees: boolean;
projectPath: string;
branchName: string | null;
}
export interface ProjectAutoLoopState {
abortController: AbortController;
config: AutoModeConfig;
isRunning: boolean;
consecutiveFailures: { timestamp: number; error: string }[];
pausedDueToFailures: boolean;
hasEmittedIdleEvent: boolean;
branchName: string | null;
}
export function getWorktreeAutoLoopKey(projectPath: string, branchName: string | null): string {
return `${projectPath}::${(branchName === 'main' ? null : branchName) ?? '__main__'}`;
}
export type ExecuteFeatureFn = (
projectPath: string,
featureId: string,
useWorktrees: boolean,
isAutoMode: boolean
) => Promise<void>;
export type LoadPendingFeaturesFn = (
projectPath: string,
branchName: string | null
) => Promise<Feature[]>;
export type SaveExecutionStateFn = (
projectPath: string,
branchName: string | null,
maxConcurrency: number
) => Promise<void>;
export type ClearExecutionStateFn = (
projectPath: string,
branchName: string | null
) => Promise<void>;
export type ResetStuckFeaturesFn = (projectPath: string) => Promise<void>;
export type IsFeatureFinishedFn = (feature: Feature) => boolean;
export class AutoLoopCoordinator {
private autoLoopsByProject = new Map<string, ProjectAutoLoopState>();
constructor(
private eventBus: TypedEventBus,
private concurrencyManager: ConcurrencyManager,
private settingsService: SettingsService | null,
private executeFeatureFn: ExecuteFeatureFn,
private loadPendingFeaturesFn: LoadPendingFeaturesFn,
private saveExecutionStateFn: SaveExecutionStateFn,
private clearExecutionStateFn: ClearExecutionStateFn,
private resetStuckFeaturesFn: ResetStuckFeaturesFn,
private isFeatureFinishedFn: IsFeatureFinishedFn,
private isFeatureRunningFn: (featureId: string) => boolean
) {}
/**
* Start the auto mode loop for a specific project/worktree (supports multiple concurrent projects and worktrees)
* @param projectPath - The project to start auto mode for
* @param branchName - The branch name for worktree scoping, null for main worktree
* @param maxConcurrency - Maximum concurrent features (default: DEFAULT_MAX_CONCURRENCY)
*/
async startAutoLoopForProject(
projectPath: string,
branchName: string | null = null,
maxConcurrency?: number
): Promise<number> {
const resolvedMaxConcurrency = await this.resolveMaxConcurrency(
projectPath,
branchName,
maxConcurrency
);
// Use worktree-scoped key
const worktreeKey = getWorktreeAutoLoopKey(projectPath, branchName);
// Check if this project/worktree already has an active autoloop
const existingState = this.autoLoopsByProject.get(worktreeKey);
if (existingState?.isRunning) {
const worktreeDesc = branchName ? `worktree ${branchName}` : 'main worktree';
throw new Error(
`Auto mode is already running for ${worktreeDesc} in project: ${projectPath}`
);
}
// Create new project/worktree autoloop state
const abortController = new AbortController();
const config: AutoModeConfig = {
maxConcurrency: resolvedMaxConcurrency,
useWorktrees: true,
projectPath,
branchName,
};
const projectState: ProjectAutoLoopState = {
abortController,
config,
isRunning: true,
consecutiveFailures: [],
pausedDueToFailures: false,
hasEmittedIdleEvent: false,
branchName,
};
this.autoLoopsByProject.set(worktreeKey, projectState);
try {
await this.resetStuckFeaturesFn(projectPath);
} catch {
/* ignore */
}
this.eventBus.emitAutoModeEvent('auto_mode_started', {
message: `Auto mode started with max ${resolvedMaxConcurrency} concurrent features`,
projectPath,
branchName,
maxConcurrency: resolvedMaxConcurrency,
});
await this.saveExecutionStateFn(projectPath, branchName, resolvedMaxConcurrency);
this.runAutoLoopForProject(worktreeKey).catch((error) => {
const errorInfo = classifyError(error);
this.eventBus.emitAutoModeEvent('auto_mode_error', {
error: errorInfo.message,
errorType: errorInfo.type,
projectPath,
branchName,
});
});
return resolvedMaxConcurrency;
}
private async runAutoLoopForProject(worktreeKey: string): Promise<void> {
const projectState = this.autoLoopsByProject.get(worktreeKey);
if (!projectState) return;
const { projectPath, branchName } = projectState.config;
let iterationCount = 0;
while (projectState.isRunning && !projectState.abortController.signal.aborted) {
iterationCount++;
try {
const runningCount = await this.getRunningCountForWorktree(projectPath, branchName);
if (runningCount >= projectState.config.maxConcurrency) {
await this.sleep(5000, projectState.abortController.signal);
continue;
}
const pendingFeatures = await this.loadPendingFeaturesFn(projectPath, branchName);
if (pendingFeatures.length === 0) {
if (runningCount === 0 && !projectState.hasEmittedIdleEvent) {
this.eventBus.emitAutoModeEvent('auto_mode_idle', {
message: 'No pending features - auto mode idle',
projectPath,
branchName,
});
projectState.hasEmittedIdleEvent = true;
}
await this.sleep(10000, projectState.abortController.signal);
continue;
}
const nextFeature = pendingFeatures.find(
(f) => !this.isFeatureRunningFn(f.id) && !this.isFeatureFinishedFn(f)
);
if (nextFeature) {
projectState.hasEmittedIdleEvent = false;
this.executeFeatureFn(
projectPath,
nextFeature.id,
projectState.config.useWorktrees,
true
).catch(() => {});
}
await this.sleep(2000, projectState.abortController.signal);
} catch {
if (projectState.abortController.signal.aborted) break;
await this.sleep(5000, projectState.abortController.signal);
}
}
projectState.isRunning = false;
}
async stopAutoLoopForProject(
projectPath: string,
branchName: string | null = null
): Promise<number> {
const worktreeKey = getWorktreeAutoLoopKey(projectPath, branchName);
const projectState = this.autoLoopsByProject.get(worktreeKey);
if (!projectState) return 0;
const wasRunning = projectState.isRunning;
projectState.isRunning = false;
projectState.abortController.abort();
await this.clearExecutionStateFn(projectPath, branchName);
if (wasRunning)
this.eventBus.emitAutoModeEvent('auto_mode_stopped', {
message: 'Auto mode stopped',
projectPath,
branchName,
});
this.autoLoopsByProject.delete(worktreeKey);
return await this.getRunningCountForWorktree(projectPath, branchName);
}
isAutoLoopRunningForProject(projectPath: string, branchName: string | null = null): boolean {
const worktreeKey = getWorktreeAutoLoopKey(projectPath, branchName);
const projectState = this.autoLoopsByProject.get(worktreeKey);
return projectState?.isRunning ?? false;
}
/**
* Get auto loop config for a specific project/worktree
* @param projectPath - The project path
* @param branchName - The branch name, or null for main worktree
*/
getAutoLoopConfigForProject(
projectPath: string,
branchName: string | null = null
): AutoModeConfig | null {
const worktreeKey = getWorktreeAutoLoopKey(projectPath, branchName);
const projectState = this.autoLoopsByProject.get(worktreeKey);
return projectState?.config ?? null;
}
/**
* Get all active auto loop worktrees with their project paths and branch names
*/
getActiveWorktrees(): Array<{ projectPath: string; branchName: string | null }> {
const activeWorktrees: Array<{ projectPath: string; branchName: string | null }> = [];
for (const [, state] of this.autoLoopsByProject) {
if (state.isRunning) {
activeWorktrees.push({
projectPath: state.config.projectPath,
branchName: state.branchName,
});
}
}
return activeWorktrees;
}
getActiveProjects(): string[] {
const activeProjects = new Set<string>();
for (const [, state] of this.autoLoopsByProject) {
if (state.isRunning) activeProjects.add(state.config.projectPath);
}
return Array.from(activeProjects);
}
async getRunningCountForWorktree(
projectPath: string,
branchName: string | null
): Promise<number> {
return this.concurrencyManager.getRunningCountForWorktree(projectPath, branchName);
}
trackFailureAndCheckPauseForProject(
projectPath: string,
errorInfo: { type: string; message: string }
): boolean {
const projectState = this.autoLoopsByProject.get(getWorktreeAutoLoopKey(projectPath, null));
if (!projectState) return false;
const now = Date.now();
projectState.consecutiveFailures.push({ timestamp: now, error: errorInfo.message });
projectState.consecutiveFailures = projectState.consecutiveFailures.filter(
(f) => now - f.timestamp < FAILURE_WINDOW_MS
);
return (
projectState.consecutiveFailures.length >= CONSECUTIVE_FAILURE_THRESHOLD ||
errorInfo.type === 'quota_exhausted' ||
errorInfo.type === 'rate_limit'
);
}
signalShouldPauseForProject(
projectPath: string,
errorInfo: { type: string; message: string }
): void {
const projectState = this.autoLoopsByProject.get(getWorktreeAutoLoopKey(projectPath, null));
if (!projectState || projectState.pausedDueToFailures) return;
projectState.pausedDueToFailures = true;
const failureCount = projectState.consecutiveFailures.length;
this.eventBus.emitAutoModeEvent('auto_mode_paused_failures', {
message:
failureCount >= CONSECUTIVE_FAILURE_THRESHOLD
? `Auto Mode paused: ${failureCount} consecutive failures detected.`
: 'Auto Mode paused: Usage limit or API error detected.',
errorType: errorInfo.type,
originalError: errorInfo.message,
failureCount,
projectPath,
});
this.stopAutoLoopForProject(projectPath);
}
resetFailureTrackingForProject(projectPath: string): void {
const projectState = this.autoLoopsByProject.get(getWorktreeAutoLoopKey(projectPath, null));
if (projectState) {
projectState.consecutiveFailures = [];
projectState.pausedDueToFailures = false;
}
}
recordSuccessForProject(projectPath: string): void {
const projectState = this.autoLoopsByProject.get(getWorktreeAutoLoopKey(projectPath, null));
if (projectState) projectState.consecutiveFailures = [];
}
async resolveMaxConcurrency(
projectPath: string,
branchName: string | null,
provided?: number
): Promise<number> {
if (typeof provided === 'number' && Number.isFinite(provided)) return provided;
if (!this.settingsService) return DEFAULT_MAX_CONCURRENCY;
try {
const settings = await this.settingsService.getGlobalSettings();
const globalMax =
typeof settings.maxConcurrency === 'number'
? settings.maxConcurrency
: DEFAULT_MAX_CONCURRENCY;
const projectId = settings.projects?.find((p) => p.path === projectPath)?.id;
const autoModeByWorktree = settings.autoModeByWorktree;
if (projectId && autoModeByWorktree && typeof autoModeByWorktree === 'object') {
const normalizedBranch =
branchName === null || branchName === 'main' ? '__main__' : branchName;
const worktreeId = `${projectId}::${normalizedBranch}`;
if (
worktreeId in autoModeByWorktree &&
typeof autoModeByWorktree[worktreeId]?.maxConcurrency === 'number'
) {
return autoModeByWorktree[worktreeId].maxConcurrency;
}
}
return globalMax;
} catch {
return DEFAULT_MAX_CONCURRENCY;
}
}
private sleep(ms: number, signal?: AbortSignal): Promise<void> {
return new Promise((resolve, reject) => {
if (signal?.aborted) {
reject(new Error('Aborted'));
return;
}
const timeout = setTimeout(resolve, ms);
signal?.addEventListener('abort', () => {
clearTimeout(timeout);
reject(new Error('Aborted'));
});
});
}
}

Some files were not shown because too many files have changed in this diff Show More