mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-03-17 22:13:08 +00:00
Compare commits
162 Commits
v0.14.0
...
feature/pu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa345a50ac | ||
|
|
0e020f7e4a | ||
|
|
0a5540c9a2 | ||
|
|
7df2182818 | ||
|
|
ee52333636 | ||
|
|
47bd7a76cf | ||
|
|
ae10dea2bf | ||
|
|
be4153c374 | ||
|
|
a144a63c51 | ||
|
|
205f662022 | ||
|
|
53d07fefb8 | ||
|
|
2d907938cc | ||
|
|
15ca1eb6d3 | ||
|
|
4ee160fae4 | ||
|
|
4ba0026aa1 | ||
|
|
983eb21faa | ||
|
|
df9a6314da | ||
|
|
6903d3c508 | ||
|
|
5c441f2313 | ||
|
|
00f9891237 | ||
|
|
d30296d559 | ||
|
|
e6e04d57bc | ||
|
|
829c16181b | ||
|
|
13261b7e8c | ||
|
|
854ba6ec74 | ||
|
|
bddf1a4bf8 | ||
|
|
887e2ea76b | ||
|
|
dd4c738e91 | ||
|
|
43c19c70ca | ||
|
|
627580a8f0 | ||
|
|
a2d5c1d546 | ||
|
|
6b9946df95 | ||
|
|
cb99c4b4e8 | ||
|
|
9af63bc1ef | ||
|
|
17a99a0e20 | ||
|
|
f4e87d4c25 | ||
|
|
c7f515adde | ||
|
|
1df778a9db | ||
|
|
cb44f8a717 | ||
|
|
7fcf3c1e1f | ||
|
|
de021f96bf | ||
|
|
8bb10632b1 | ||
|
|
06ef4f883f | ||
|
|
7e84591ef1 | ||
|
|
efcdd849b9 | ||
|
|
dee770c2ab | ||
|
|
f7b3f75163 | ||
|
|
b5ad77b0f9 | ||
|
|
98b925b821 | ||
|
|
a09a2c76ae | ||
|
|
b9653d6338 | ||
|
|
44ef2084cf | ||
|
|
57446b4fba | ||
|
|
fa799d3cb5 | ||
|
|
78ec389477 | ||
|
|
f06088a062 | ||
|
|
8af1b8bd08 | ||
|
|
d5340fd1a4 | ||
|
|
aa940d44ff | ||
|
|
381698b048 | ||
|
|
30fce3f746 | ||
|
|
4a8c6b0eba | ||
|
|
416ef3a394 | ||
|
|
2805c0ea53 | ||
|
|
727a7a5b9d | ||
|
|
46dd219d15 | ||
|
|
67dd628115 | ||
|
|
ab5d6a0e54 | ||
|
|
0b03e70f1d | ||
|
|
434792a2ef | ||
|
|
462dbf1522 | ||
|
|
eed5e20438 | ||
|
|
bea26a6b61 | ||
|
|
e9802ac00c | ||
|
|
41014f6ab6 | ||
|
|
ac2e8cfa88 | ||
|
|
7d5bc722fa | ||
|
|
7765a12868 | ||
|
|
dfe6920df9 | ||
|
|
525b2f82b6 | ||
|
|
f459b73cb5 | ||
|
|
a935229031 | ||
|
|
a3a5c9e2cb | ||
|
|
1662c6bf0b | ||
|
|
a08ba1b517 | ||
|
|
8226699734 | ||
|
|
d4439fafa0 | ||
|
|
6f1325f3ee | ||
|
|
d4f68b659b | ||
|
|
ad6ce738b4 | ||
|
|
67ebf8c14b | ||
|
|
8ed13564f6 | ||
|
|
09507bff67 | ||
|
|
c70344156d | ||
|
|
8542a32f4f | ||
|
|
0745832d1e | ||
|
|
0f0f5159d2 | ||
|
|
bcc854234c | ||
|
|
5ffbfb3217 | ||
|
|
7c89923a6e | ||
|
|
63b1a353d9 | ||
|
|
49bdaaae71 | ||
|
|
28224e1051 | ||
|
|
df10bcd6df | ||
|
|
0ed4494992 | ||
|
|
43309e383f | ||
|
|
efd4284c10 | ||
|
|
473f935c90 | ||
|
|
7fd3d61a59 | ||
|
|
7bc1f68699 | ||
|
|
ade22ef258 | ||
|
|
31f8afc115 | ||
|
|
071af1b5c3 | ||
|
|
1b32a6bc3a | ||
|
|
a0484624b7 | ||
|
|
0383f85507 | ||
|
|
1a7dd5d1eb | ||
|
|
afa60399dc | ||
|
|
1b39e25497 | ||
|
|
828d0a0148 | ||
|
|
18624d12ce | ||
|
|
71a0309a0b | ||
|
|
e0f785aa99 | ||
|
|
2aa156ecbf | ||
|
|
94a8e09516 | ||
|
|
78072550c7 | ||
|
|
0cd149f2e3 | ||
|
|
2e577bb230 | ||
|
|
4f00b41cb0 | ||
|
|
ba45587a0a | ||
|
|
4912d37990 | ||
|
|
b24839bc49 | ||
|
|
e3a1c8c312 | ||
|
|
8f245e7757 | ||
|
|
cbb45b6612 | ||
|
|
25fa6fd616 | ||
|
|
ec5179eee9 | ||
|
|
2fac438cde | ||
|
|
5dca97dab4 | ||
|
|
58facb114c | ||
|
|
8387b7669d | ||
|
|
18fd1c6caa | ||
|
|
6029e95403 | ||
|
|
1eb28206c5 | ||
|
|
bc9dae0322 | ||
|
|
3bcdc883e6 | ||
|
|
c92c8e96b7 | ||
|
|
b73ef9f801 | ||
|
|
70fc03431c | ||
|
|
a0ea65d483 | ||
|
|
ef544e70c9 | ||
|
|
152cf00735 | ||
|
|
094f0809d7 | ||
|
|
220c8e4ddf | ||
|
|
f97453484f | ||
|
|
835ffe3185 | ||
|
|
3b361cb0b9 | ||
|
|
0aef72540e | ||
|
|
aad3ff2cdf | ||
|
|
3ccea7a67b | ||
|
|
b37a287c9c | ||
|
|
45f6f17eb0 |
18
.github/actions/setup-project/action.yml
vendored
18
.github/actions/setup-project/action.yml
vendored
@@ -25,17 +25,24 @@ runs:
|
|||||||
cache: 'npm'
|
cache: 'npm'
|
||||||
cache-dependency-path: package-lock.json
|
cache-dependency-path: package-lock.json
|
||||||
|
|
||||||
- name: Check for SSH URLs in lockfile
|
|
||||||
if: inputs.check-lockfile == 'true'
|
|
||||||
shell: bash
|
|
||||||
run: npm run lint:lockfile
|
|
||||||
|
|
||||||
- name: Configure Git for HTTPS
|
- name: Configure Git for HTTPS
|
||||||
shell: bash
|
shell: bash
|
||||||
# Convert SSH URLs to HTTPS for git dependencies (e.g., @electron/node-gyp)
|
# Convert SSH URLs to HTTPS for git dependencies (e.g., @electron/node-gyp)
|
||||||
# This is needed because SSH authentication isn't available in CI
|
# This is needed because SSH authentication isn't available in CI
|
||||||
run: git config --global url."https://github.com/".insteadOf "git@github.com:"
|
run: git config --global url."https://github.com/".insteadOf "git@github.com:"
|
||||||
|
|
||||||
|
- name: Auto-fix SSH URLs in lockfile
|
||||||
|
if: inputs.check-lockfile == 'true'
|
||||||
|
shell: bash
|
||||||
|
# Auto-fix any git+ssh:// URLs in package-lock.json before linting
|
||||||
|
# This handles cases where npm reintroduces SSH URLs for git dependencies
|
||||||
|
run: node scripts/fix-lockfile-urls.mjs
|
||||||
|
|
||||||
|
- name: Check for SSH URLs in lockfile
|
||||||
|
if: inputs.check-lockfile == 'true'
|
||||||
|
shell: bash
|
||||||
|
run: npm run lint:lockfile
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
shell: bash
|
shell: bash
|
||||||
# Use npm install instead of npm ci to correctly resolve platform-specific
|
# Use npm install instead of npm ci to correctly resolve platform-specific
|
||||||
@@ -45,6 +52,7 @@ runs:
|
|||||||
run: npm install --ignore-scripts --force
|
run: npm install --ignore-scripts --force
|
||||||
|
|
||||||
- name: Install Linux native bindings
|
- name: Install Linux native bindings
|
||||||
|
if: runner.os == 'Linux'
|
||||||
shell: bash
|
shell: bash
|
||||||
# Workaround for npm optional dependencies bug (npm/cli#4828)
|
# Workaround for npm optional dependencies bug (npm/cli#4828)
|
||||||
# Explicitly install Linux bindings needed for build tools
|
# Explicitly install Linux bindings needed for build tools
|
||||||
|
|||||||
1
.github/workflows/e2e-tests.yml
vendored
1
.github/workflows/e2e-tests.yml
vendored
@@ -133,6 +133,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
VITE_SERVER_URL: http://localhost:3008
|
VITE_SERVER_URL: http://localhost:3008
|
||||||
|
SERVER_URL: http://localhost:3008
|
||||||
VITE_SKIP_SETUP: 'true'
|
VITE_SKIP_SETUP: 'true'
|
||||||
# Keep UI-side login/defaults consistent
|
# Keep UI-side login/defaults consistent
|
||||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||||
|
|||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -95,9 +95,11 @@ jobs:
|
|||||||
upload:
|
upload:
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.event.release.draft == false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Download macOS artifacts
|
- name: Download macOS artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -90,8 +90,15 @@ pnpm-lock.yaml
|
|||||||
yarn.lock
|
yarn.lock
|
||||||
|
|
||||||
# Fork-specific workflow files (should never be committed)
|
# Fork-specific workflow files (should never be committed)
|
||||||
|
DEVELOPMENT_WORKFLOW.md
|
||||||
|
check-sync.sh
|
||||||
# API key files
|
# API key files
|
||||||
data/.api-key
|
data/.api-key
|
||||||
data/credentials.json
|
data/credentials.json
|
||||||
data/
|
data/
|
||||||
.codex/
|
.codex/
|
||||||
|
|
||||||
|
# GSD planning docs (local-only)
|
||||||
|
.planning/
|
||||||
|
.mcp.json
|
||||||
|
.planning
|
||||||
|
|||||||
@@ -38,6 +38,18 @@ else
|
|||||||
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Auto-fix git+ssh:// URLs in package-lock.json if it's being committed
|
||||||
|
# This prevents CI failures from SSH URLs that npm introduces for git dependencies
|
||||||
|
if git diff --cached --name-only | grep -q "^package-lock.json$"; then
|
||||||
|
if command -v node >/dev/null 2>&1; then
|
||||||
|
if grep -q "git+ssh://" package-lock.json 2>/dev/null; then
|
||||||
|
echo "Fixing git+ssh:// URLs in package-lock.json..."
|
||||||
|
node scripts/fix-lockfile-urls.mjs
|
||||||
|
git add package-lock.json
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Run lint-staged - works with or without nvm
|
# Run lint-staged - works with or without nvm
|
||||||
# Prefer npx, fallback to npm exec, both work with system-installed Node.js
|
# Prefer npx, fallback to npm exec, both work with system-installed Node.js
|
||||||
if command -v npx >/dev/null 2>&1; then
|
if command -v npx >/dev/null 2>&1; then
|
||||||
|
|||||||
81
.planning/PROJECT.md
Normal file
81
.planning/PROJECT.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# AutoModeService Refactoring
|
||||||
|
|
||||||
|
## What This Is
|
||||||
|
|
||||||
|
A comprehensive refactoring of the `auto-mode-service.ts` file (5k+ lines) into smaller, focused services with clear boundaries. This is an architectural cleanup of accumulated technical debt from rapid development, breaking the "god object" anti-pattern into maintainable, debuggable modules.
|
||||||
|
|
||||||
|
## Core Value
|
||||||
|
|
||||||
|
All existing auto-mode functionality continues working — features execute, pipelines flow, merges complete — while the codebase becomes maintainable.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
### Validated
|
||||||
|
|
||||||
|
<!-- Existing functionality that must be preserved -->
|
||||||
|
|
||||||
|
- ✓ Single feature execution with AI agent — existing
|
||||||
|
- ✓ Concurrent execution with configurable limits — existing
|
||||||
|
- ✓ Pipeline orchestration (backlog → in-progress → approval → verified) — existing
|
||||||
|
- ✓ Git worktree isolation per feature — existing
|
||||||
|
- ✓ Automatic merging of completed work — existing
|
||||||
|
- ✓ Custom pipeline support — existing
|
||||||
|
- ✓ Test runner integration — existing
|
||||||
|
- ✓ Event streaming to frontend — existing
|
||||||
|
|
||||||
|
### Active
|
||||||
|
|
||||||
|
<!-- Refactoring goals -->
|
||||||
|
|
||||||
|
- [ ] No service file exceeds ~500 lines
|
||||||
|
- [ ] Each service has single, clear responsibility
|
||||||
|
- [ ] Service boundaries make debugging obvious
|
||||||
|
- [ ] Changes to one service don't risk breaking unrelated features
|
||||||
|
- [ ] Test coverage for critical paths
|
||||||
|
|
||||||
|
### Out of Scope
|
||||||
|
|
||||||
|
- New auto-mode features — this is cleanup, not enhancement
|
||||||
|
- UI changes — backend refactor only
|
||||||
|
- Performance optimization — maintain current performance, don't optimize
|
||||||
|
- Other service refactoring — focus on auto-mode-service.ts only
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
**Current state:** `apps/server/src/services/auto-mode-service.ts` is ~5700 lines handling:
|
||||||
|
|
||||||
|
- Worktree management (create, cleanup, track)
|
||||||
|
- Agent/task execution coordination
|
||||||
|
- Concurrency control and queue management
|
||||||
|
- Pipeline state machine (column transitions)
|
||||||
|
- Merge handling and conflict resolution
|
||||||
|
- Event emission for real-time updates
|
||||||
|
|
||||||
|
**Technical environment:**
|
||||||
|
|
||||||
|
- Express 5 backend, TypeScript
|
||||||
|
- Event-driven architecture via EventEmitter
|
||||||
|
- WebSocket streaming to React frontend
|
||||||
|
- Git worktrees via @automaker/git-utils
|
||||||
|
- Minimal existing test coverage
|
||||||
|
|
||||||
|
**Codebase analysis:** See `.planning/codebase/` for full architecture, conventions, and existing patterns.
|
||||||
|
|
||||||
|
## Constraints
|
||||||
|
|
||||||
|
- **Breaking changes**: Acceptable — other parts of the app can be updated to match new service interfaces
|
||||||
|
- **Test coverage**: Currently minimal — must add tests during refactoring to catch regressions
|
||||||
|
- **Incremental approach**: Required — can't do big-bang rewrite with everything critical
|
||||||
|
- **Existing patterns**: Follow conventions in `.planning/codebase/CONVENTIONS.md`
|
||||||
|
|
||||||
|
## Key Decisions
|
||||||
|
|
||||||
|
| Decision | Rationale | Outcome |
|
||||||
|
| ------------------------- | --------------------------------------------------- | --------- |
|
||||||
|
| Accept breaking changes | Allows cleaner interfaces, worth the migration cost | — Pending |
|
||||||
|
| Add tests during refactor | No existing safety net, need to build one | — Pending |
|
||||||
|
| Incremental extraction | Everything is critical, can't break it all at once | — Pending |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Last updated: 2026-01-27 after initialization_
|
||||||
234
.planning/codebase/ARCHITECTURE.md
Normal file
234
.planning/codebase/ARCHITECTURE.md
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
# Architecture
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Pattern Overview
|
||||||
|
|
||||||
|
**Overall:** Monorepo with layered client-server architecture (Electron-first) and pluggable provider abstraction for AI models.
|
||||||
|
|
||||||
|
**Key Characteristics:**
|
||||||
|
|
||||||
|
- Event-driven communication via WebSocket between frontend and backend
|
||||||
|
- Multi-provider AI model abstraction layer (Claude, Cursor, Codex, Gemini, OpenCode, Copilot)
|
||||||
|
- Feature-centric workflow stored in `.automaker/` directories
|
||||||
|
- Isolated git worktree execution for each feature
|
||||||
|
- State management through Zustand stores with API persistence
|
||||||
|
|
||||||
|
## Layers
|
||||||
|
|
||||||
|
**Presentation Layer (UI):**
|
||||||
|
|
||||||
|
- Purpose: React 19 Electron/web frontend with TanStack Router file-based routing
|
||||||
|
- Location: `apps/ui/src/`
|
||||||
|
- Contains: Route components, view pages, custom React hooks, Zustand stores, API client
|
||||||
|
- Depends on: @automaker/types, @automaker/utils, HTTP API backend
|
||||||
|
- Used by: Electron main process (desktop), web browser (web mode)
|
||||||
|
|
||||||
|
**API Layer (Server):**
|
||||||
|
|
||||||
|
- Purpose: Express 5 backend exposing RESTful and WebSocket endpoints
|
||||||
|
- Location: `apps/server/src/`
|
||||||
|
- Contains: Route handlers, business logic services, middleware, provider adapters
|
||||||
|
- Depends on: @automaker/types, @automaker/utils, @automaker/platform, Claude Agent SDK
|
||||||
|
- Used by: UI frontend via HTTP/WebSocket
|
||||||
|
|
||||||
|
**Service Layer (Server):**
|
||||||
|
|
||||||
|
- Purpose: Business logic and domain operations
|
||||||
|
- Location: `apps/server/src/services/`
|
||||||
|
- Contains: AgentService, FeatureLoader, AutoModeService, SettingsService, DevServerService, etc.
|
||||||
|
- Depends on: Providers, secure filesystem, feature storage
|
||||||
|
- Used by: Route handlers
|
||||||
|
|
||||||
|
**Provider Abstraction (Server):**
|
||||||
|
|
||||||
|
- Purpose: Unified interface for different AI model providers
|
||||||
|
- Location: `apps/server/src/providers/`
|
||||||
|
- Contains: ProviderFactory, specific provider implementations (ClaudeProvider, CursorProvider, CodexProvider, GeminiProvider, OpencodeProvider, CopilotProvider)
|
||||||
|
- Depends on: @automaker/types, provider SDKs
|
||||||
|
- Used by: AgentService
|
||||||
|
|
||||||
|
**Shared Library Layer:**
|
||||||
|
|
||||||
|
- Purpose: Type definitions and utilities shared across apps
|
||||||
|
- Location: `libs/`
|
||||||
|
- Contains: @automaker/types, @automaker/utils, @automaker/platform, @automaker/prompts, @automaker/model-resolver, @automaker/dependency-resolver, @automaker/git-utils, @automaker/spec-parser
|
||||||
|
- Depends on: None (types has no external deps)
|
||||||
|
- Used by: All apps and services
|
||||||
|
|
||||||
|
## Data Flow
|
||||||
|
|
||||||
|
**Feature Execution Flow:**
|
||||||
|
|
||||||
|
1. User creates/updates feature via UI (`apps/ui/src/`)
|
||||||
|
2. UI sends HTTP request to backend (`POST /api/features`)
|
||||||
|
3. Server route handler invokes FeatureLoader to persist to `.automaker/features/{featureId}/`
|
||||||
|
4. When executing, AgentService loads feature, creates isolated git worktree via @automaker/git-utils
|
||||||
|
5. AgentService invokes ProviderFactory to get appropriate AI provider (Claude, Cursor, etc.)
|
||||||
|
6. Provider executes with context from CLAUDE.md files via @automaker/utils loadContextFiles()
|
||||||
|
7. Server emits events via EventEmitter throughout execution
|
||||||
|
8. Events stream to frontend via WebSocket
|
||||||
|
9. UI updates stores and renders real-time progress
|
||||||
|
10. Feature results persist back to `.automaker/features/` with generated agent-output.md
|
||||||
|
|
||||||
|
**State Management:**
|
||||||
|
|
||||||
|
**Frontend State (Zustand):**
|
||||||
|
|
||||||
|
- `app-store.ts`: Global app state (projects, features, settings, boards, themes)
|
||||||
|
- `setup-store.ts`: First-time setup wizard flow
|
||||||
|
- `ideation-store.ts`: Ideation feature state
|
||||||
|
- `test-runners-store.ts`: Test runner configurations
|
||||||
|
- Settings now persist via API (`/api/settings`) rather than localStorage (see use-settings-sync.ts)
|
||||||
|
|
||||||
|
**Backend State (Services):**
|
||||||
|
|
||||||
|
- SettingsService: Global and project-specific settings (in-memory with file persistence)
|
||||||
|
- AgentService: Active agent sessions and conversation history
|
||||||
|
- FeatureLoader: Feature data model operations
|
||||||
|
- DevServerService: Development server logs
|
||||||
|
- EventHistoryService: Persists event logs for replay
|
||||||
|
|
||||||
|
**Real-Time Updates (WebSocket):**
|
||||||
|
|
||||||
|
- Server EventEmitter emits TypedEvent (type + payload)
|
||||||
|
- WebSocket handler subscribes to events and broadcasts to all clients
|
||||||
|
- Frontend listens on multiple WebSocket subscriptions and updates stores
|
||||||
|
|
||||||
|
## Key Abstractions
|
||||||
|
|
||||||
|
**Feature:**
|
||||||
|
|
||||||
|
- Purpose: Represents a development task/story with rich metadata
|
||||||
|
- Location: @automaker/types → `libs/types/src/feature.ts`
|
||||||
|
- Fields: id, title, description, status, images, tasks, priority, etc.
|
||||||
|
- Stored: `.automaker/features/{featureId}/feature.json`
|
||||||
|
|
||||||
|
**Provider:**
|
||||||
|
|
||||||
|
- Purpose: Abstracts different AI model implementations
|
||||||
|
- Location: `apps/server/src/providers/{provider}-provider.ts`
|
||||||
|
- Interface: Common execute() method with consistent message format
|
||||||
|
- Implementations: Claude, Cursor, Codex, Gemini, OpenCode, Copilot
|
||||||
|
- Factory: ProviderFactory picks correct provider based on model ID
|
||||||
|
|
||||||
|
**Event:**
|
||||||
|
|
||||||
|
- Purpose: Real-time updates streamed to frontend
|
||||||
|
- Location: @automaker/types → `libs/types/src/event.ts`
|
||||||
|
- Format: { type: EventType, payload: unknown }
|
||||||
|
- Examples: agent-started, agent-step, agent-complete, feature-updated, etc.
|
||||||
|
|
||||||
|
**AgentSession:**
|
||||||
|
|
||||||
|
- Purpose: Represents a conversation between user and AI agent
|
||||||
|
- Location: @automaker/types → `libs/types/src/session.ts`
|
||||||
|
- Contains: Messages (user + assistant), metadata, creation timestamp
|
||||||
|
- Stored: `{DATA_DIR}/agent-sessions/{sessionId}.json`
|
||||||
|
|
||||||
|
**Settings:**
|
||||||
|
|
||||||
|
- Purpose: Configuration for global and per-project behavior
|
||||||
|
- Location: @automaker/types → `libs/types/src/settings.ts`
|
||||||
|
- Stored: Global in `{DATA_DIR}/settings.json`, per-project in `.automaker/settings.json`
|
||||||
|
- Service: SettingsService in `apps/server/src/services/settings-service.ts`
|
||||||
|
|
||||||
|
## Entry Points
|
||||||
|
|
||||||
|
**Server:**
|
||||||
|
|
||||||
|
- Location: `apps/server/src/index.ts`
|
||||||
|
- Triggers: `npm run dev:server` or Docker startup
|
||||||
|
- Responsibilities:
|
||||||
|
- Initialize Express app with middleware
|
||||||
|
- Create shared EventEmitter for WebSocket streaming
|
||||||
|
- Bootstrap services (SettingsService, AgentService, FeatureLoader, etc.)
|
||||||
|
- Mount API routes at `/api/*`
|
||||||
|
- Create WebSocket servers for agent streaming and terminal sessions
|
||||||
|
- Load and apply user settings (log level, request logging, etc.)
|
||||||
|
|
||||||
|
**UI (Web):**
|
||||||
|
|
||||||
|
- Location: `apps/ui/src/main.ts` (Vite entry), `apps/ui/src/app.tsx` (React component)
|
||||||
|
- Triggers: `npm run dev:web` or `npm run build`
|
||||||
|
- Responsibilities:
|
||||||
|
- Initialize Zustand stores from API settings
|
||||||
|
- Setup React Router with TanStack Router
|
||||||
|
- Render root layout with sidebar and main content area
|
||||||
|
- Handle authentication via verifySession()
|
||||||
|
|
||||||
|
**UI (Electron):**
|
||||||
|
|
||||||
|
- Location: `apps/ui/src/main.ts` (Vite entry), `apps/ui/electron/main-process.ts` (Electron main process)
|
||||||
|
- Triggers: `npm run dev:electron`
|
||||||
|
- Responsibilities:
|
||||||
|
- Launch local server via node-pty
|
||||||
|
- Create native Electron window
|
||||||
|
- Bridge IPC between renderer and main process
|
||||||
|
- Provide file system access via preload.ts APIs
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
**Strategy:** Layered error classification and user-friendly messaging
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
**Backend Error Handling:**
|
||||||
|
|
||||||
|
- Errors classified via `classifyError()` from @automaker/utils
|
||||||
|
- Classification: ParseError, NetworkError, AuthenticationError, RateLimitError, etc.
|
||||||
|
- Response format: `{ success: false, error: { type, message, code }, details? }`
|
||||||
|
- Example: `apps/server/src/lib/error-handler.ts`
|
||||||
|
|
||||||
|
**Frontend Error Handling:**
|
||||||
|
|
||||||
|
- HTTP errors caught by api-fetch.ts with retry logic
|
||||||
|
- WebSocket disconnects trigger reconnection with exponential backoff
|
||||||
|
- Errors shown in toast notifications via `sonner` library
|
||||||
|
- Validation errors caught and displayed inline in forms
|
||||||
|
|
||||||
|
**Agent Execution Errors:**
|
||||||
|
|
||||||
|
- AgentService wraps provider calls in try-catch
|
||||||
|
- Aborts handled specially via `isAbortError()` check
|
||||||
|
- Rate limit errors trigger cooldown before retry
|
||||||
|
- Model-specific errors mapped to user guidance
|
||||||
|
|
||||||
|
## Cross-Cutting Concerns
|
||||||
|
|
||||||
|
**Logging:**
|
||||||
|
|
||||||
|
- Framework: @automaker/utils createLogger()
|
||||||
|
- Pattern: `const logger = createLogger('ModuleName')`
|
||||||
|
- Levels: ERROR, WARN, INFO, DEBUG (configurable via settings)
|
||||||
|
- Output: stdout (dev), files (production)
|
||||||
|
|
||||||
|
**Validation:**
|
||||||
|
|
||||||
|
- File path validation: @automaker/platform initAllowedPaths() enforces restrictions
|
||||||
|
- Model ID validation: @automaker/model-resolver resolveModelString()
|
||||||
|
- JSON schema validation: Manual checks in route handlers (no JSON schema lib)
|
||||||
|
- Authentication: Session token validation via validateWsConnectionToken()
|
||||||
|
|
||||||
|
**Authentication:**
|
||||||
|
|
||||||
|
- Frontend: Session token stored in httpOnly cookie
|
||||||
|
- Backend: authMiddleware checks token on protected routes
|
||||||
|
- WebSocket: validateWsConnectionToken() for upgrade requests
|
||||||
|
- Providers: API keys stored encrypted in `{DATA_DIR}/credentials.json`
|
||||||
|
|
||||||
|
**Internationalization:**
|
||||||
|
|
||||||
|
- Not detected - strings are English-only
|
||||||
|
|
||||||
|
**Performance:**
|
||||||
|
|
||||||
|
- Code splitting: File-based routing via TanStack Router
|
||||||
|
- Lazy loading: React.lazy() in route components
|
||||||
|
- Caching: React Query for HTTP requests (query-keys.ts defines cache strategy)
|
||||||
|
- Image optimization: Automatic base64 encoding for agent context
|
||||||
|
- State hydration: Settings loaded once at startup, synced via API
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Architecture analysis: 2026-01-27_
|
||||||
245
.planning/codebase/CONCERNS.md
Normal file
245
.planning/codebase/CONCERNS.md
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
# Codebase Concerns
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Tech Debt
|
||||||
|
|
||||||
|
**Loose Type Safety in Error Handling:**
|
||||||
|
|
||||||
|
- Issue: Multiple uses of `as any` type assertions bypass TypeScript safety, particularly in error context handling and provider responses
|
||||||
|
- Files: `apps/server/src/providers/claude-provider.ts` (lines 318-322), `apps/server/src/lib/error-handler.ts`, `apps/server/src/routes/settings/routes/update-global.ts`
|
||||||
|
- Impact: Errors could have unchecked properties; refactoring becomes risky without compiler assistance
|
||||||
|
- Fix approach: Replace `as any` with proper type guards and discriminated unions; create helper functions for safe property access
|
||||||
|
|
||||||
|
**Missing Test Coverage for Critical Services:**
|
||||||
|
|
||||||
|
- Issue: Several core services explicitly excluded from test coverage thresholds due to integration complexity
|
||||||
|
- Files: `apps/server/vitest.config.ts` (line 22), explicitly excluded: `claude-usage-service.ts`, `mcp-test-service.ts`, `cli-provider.ts`, `cursor-provider.ts`
|
||||||
|
- Impact: Usage tracking, MCP integration, and CLI detection could break undetected; regression detection is limited
|
||||||
|
- Fix approach: Create integration test fixtures for CLI providers; mock MCP SDK for mcp-test-service tests; add usage tracking unit tests with mocked API calls
|
||||||
|
|
||||||
|
**Unused/Stub TODO Item Processing:**
|
||||||
|
|
||||||
|
- Issue: TodoWrite tool implementation exists but is partially integrated; tool name constants scattered across codex provider
|
||||||
|
- Files: `apps/server/src/providers/codex-tool-mapping.ts`, `apps/server/src/providers/codex-provider.ts`
|
||||||
|
- Impact: Todo list updates may not synchronize properly with all providers; unclear which providers support TodoWrite
|
||||||
|
- Fix approach: Consolidate tool name constants; add provider capability flags for todo support
|
||||||
|
|
||||||
|
**Electron Electron.ts Size and Complexity:**
|
||||||
|
|
||||||
|
- Issue: Single 3741-line file handles all Electron IPC, native bindings, and communication
|
||||||
|
- Files: `apps/ui/src/lib/electron.ts`
|
||||||
|
- Impact: Difficult to test; hard to isolate bugs; changes require full testing of all features; potential memory overhead from monolithic file
|
||||||
|
- Fix approach: Split by responsibility (IPC, window management, file operations, debug tools); create separate bridge layers
|
||||||
|
|
||||||
|
## Known Bugs
|
||||||
|
|
||||||
|
**API Key Management Incomplete for Gemini:**
|
||||||
|
|
||||||
|
- Symptoms: Gemini API key verification endpoint not implemented despite other providers having verification
|
||||||
|
- Files: `apps/ui/src/components/views/settings-view/api-keys/hooks/use-api-key-management.ts` (line 122)
|
||||||
|
- Trigger: User tries to verify Gemini API key in settings
|
||||||
|
- Workaround: Key verification skipped for Gemini; settings page still accepts and stores key
|
||||||
|
|
||||||
|
**Orphaned Features Detection Vulnerable to False Negatives:**
|
||||||
|
|
||||||
|
- Symptoms: Features marked as orphaned when branch matching logic doesn't account for all scenarios
|
||||||
|
- Files: `apps/server/src/services/auto-mode-service.ts` (lines 5714-5773)
|
||||||
|
- Trigger: Features that were manually switched branches or rebased
|
||||||
|
- Workaround: Manual cleanup via feature deletion; branch comparison is basic name matching only
|
||||||
|
|
||||||
|
**Terminal Themes Incomplete:**
|
||||||
|
|
||||||
|
- Symptoms: Light theme themes (solarizedlight, github) map to same generic lightTheme; no dedicated implementations
|
||||||
|
- Files: `apps/ui/src/config/terminal-themes.ts` (lines 593-594)
|
||||||
|
- Trigger: User selects solarizedlight or github terminal theme
|
||||||
|
- Workaround: Uses generic light theme instead of specific scheme; visual appearance doesn't match expectation
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
**Process Environment Variable Exposure:**
|
||||||
|
|
||||||
|
- Risk: Child processes inherit all parent `process.env` including sensitive credentials (API keys, tokens)
|
||||||
|
- Files: `apps/server/src/providers/cursor-provider.ts` (line 993), `apps/server/src/providers/codex-provider.ts` (line 1099)
|
||||||
|
- Current mitigation: Dotenv provides isolation at app startup; selective env passing to some providers
|
||||||
|
- Recommendations: Use explicit allowlists for env vars passed to child processes (only pass REQUIRED_KEYS); audit all spawn calls for env handling; document which providers need which credentials
|
||||||
|
|
||||||
|
**Unvalidated Provider Tool Input:**
|
||||||
|
|
||||||
|
- Risk: Tool input from CLI providers (Cursor, Copilot, Codex) is partially validated through Record<string, unknown> patterns; execution context could be escaped
|
||||||
|
- Files: `apps/server/src/providers/codex-provider.ts` (lines 506-543), `apps/server/src/providers/tool-normalization.ts`
|
||||||
|
- Current mitigation: Status enums validated; tool names checked against allow-lists in some providers
|
||||||
|
- Recommendations: Implement comprehensive schema validation for all tool inputs before execution; use zod or similar for runtime validation; add security tests for injection patterns
|
||||||
|
|
||||||
|
**API Key Storage in Settings Files:**
|
||||||
|
|
||||||
|
- Risk: API keys stored in plaintext in `~/.automaker/settings.json` and `data/settings.json`; file permissions may not be restricted
|
||||||
|
- Files: `apps/server/src/services/settings-service.ts`, uses `atomicWriteJson` without file permission enforcement
|
||||||
|
- Current mitigation: Limited by file system permissions; Electron mode has single-user access
|
||||||
|
- Recommendations: Encrypt sensitive settings fields (apiKeys, tokens); use OS credential stores (Keychain/Credential Manager) for production; add file permission checks on startup
|
||||||
|
|
||||||
|
## Performance Bottlenecks
|
||||||
|
|
||||||
|
**Synchronous Feature Loading at Startup:**
|
||||||
|
|
||||||
|
- Problem: All features loaded synchronously at project load; blocks UI with 1000+ features
|
||||||
|
- Files: `apps/server/src/services/feature-loader.ts` (line 230 Promise.all, but synchronous enumeration)
|
||||||
|
- Cause: Feature directory walk and JSON parsing is not paginated or lazy-loaded
|
||||||
|
- Improvement path: Implement lazy loading with pagination (load first 50, fetch more on scroll); add caching layer with TTL; move to background indexing; add feature count limits with warnings
|
||||||
|
|
||||||
|
**Auto-Mode Concurrency at Max Can Exceed Rate Limits:**
|
||||||
|
|
||||||
|
- Problem: maxConcurrency = 10 can quickly exhaust Claude API rate limits if all features execute simultaneously
|
||||||
|
- Files: `apps/server/src/services/auto-mode-service.ts` (line 2931 Promise.all for concurrent agents)
|
||||||
|
- Cause: No adaptive backoff; no API usage tracking before queuing; hint mentions reducing concurrency but doesn't enforce it
|
||||||
|
- Improvement path: Integrate with claude-usage-service to check remaining quota before starting features; implement exponential backoff on 429 errors; add per-model rate limit tracking
|
||||||
|
|
||||||
|
**Terminal Session Memory Leak Risk:**
|
||||||
|
|
||||||
|
- Problem: Terminal sessions accumulate in memory; expired sessions not cleaned up reliably
|
||||||
|
- Files: `apps/server/src/routes/terminal/common.ts` (line 66 cleanup runs every 5 minutes, but only for tokens)
|
||||||
|
- Cause: Cleanup interval is arbitrary; session map not bounded; no session lifespan limit
|
||||||
|
- Improvement path: Implement LRU eviction with max session count; reduce cleanup interval to 1 minute; add memory usage monitoring; auto-close idle sessions after 30 minutes
|
||||||
|
|
||||||
|
**Large File Content Loading Without Limits:**
|
||||||
|
|
||||||
|
- Problem: File content loaded entirely into memory; `describe-file.ts` truncates at 50KB but loads all content first
|
||||||
|
- Files: `apps/server/src/routes/context/routes/describe-file.ts` (line 128)
|
||||||
|
- Cause: Synchronous file read; no streaming; no check before reading large files
|
||||||
|
- Improvement path: Check file size before reading; stream large files; add file size warnings; implement chunked processing for analysis
|
||||||
|
|
||||||
|
## Fragile Areas
|
||||||
|
|
||||||
|
**Provider Factory Model Resolution:**
|
||||||
|
|
||||||
|
- Files: `apps/server/src/providers/provider-factory.ts`, `apps/server/src/providers/simple-query-service.ts`
|
||||||
|
- Why fragile: Each provider interprets model strings differently; no central registry; model aliases resolved at multiple layers (model-resolver, provider-specific maps, CLI validation)
|
||||||
|
- Safe modification: Add integration tests for each model alias per provider; create model capability matrix; centralize model validation before dispatch
|
||||||
|
- Test coverage: No dedicated tests; relies on E2E; no isolated unit tests for model resolution
|
||||||
|
|
||||||
|
**WebSocket Session Authentication:**
|
||||||
|
|
||||||
|
- Files: `apps/server/src/lib/auth.ts` (line 40 setInterval), `apps/server/src/index.ts` (token validation per message)
|
||||||
|
- Why fragile: Session tokens generated and validated at multiple points; no single source of truth; expiration is not atomic
|
||||||
|
- Safe modification: Add tests for token expiration edge cases; ensure cleanup removes all references; log all auth failures
|
||||||
|
- Test coverage: Auth middleware tested, but not session lifecycle
|
||||||
|
|
||||||
|
**Auto-Mode Feature State Machine:**
|
||||||
|
|
||||||
|
- Files: `apps/server/src/services/auto-mode-service.ts` (lines 465-600)
|
||||||
|
- Why fragile: Multiple states (running, queued, completed, error) managed across different methods; no explicit state transition validation; error recovery is defensive (catches all, logs, continues)
|
||||||
|
- Safe modification: Create explicit state enum with valid transitions; add invariant checks; unit test state transitions with all error cases
|
||||||
|
- Test coverage: Gaps in error recovery paths; no tests for concurrent state changes
|
||||||
|
|
||||||
|
## Scaling Limits
|
||||||
|
|
||||||
|
**Feature Count Scalability:**
|
||||||
|
|
||||||
|
- Current capacity: ~1000 features tested; UI performance degrades with pagination required
|
||||||
|
- Limit: 10K+ features cause >5s load times; memory usage ~100MB for metadata alone
|
||||||
|
- Scaling path: Implement feature database instead of file-per-feature; add ElasticSearch indexing for search; paginate API responses (50 per page); add feature archiving
|
||||||
|
|
||||||
|
**Concurrent Auto-Mode Executions:**
|
||||||
|
|
||||||
|
- Current capacity: maxConcurrency = 10 features; limited by Claude API rate limits
|
||||||
|
- Limit: Rate limit hits at ~4-5 simultaneous features with extended context (100K+ tokens)
|
||||||
|
- Scaling path: Implement token usage budgeting before feature start; queue features with estimated token cost; add provider-specific rate limit handling
|
||||||
|
|
||||||
|
**Terminal Session Count:**
|
||||||
|
|
||||||
|
- Current capacity: ~100 active terminal sessions per server
|
||||||
|
- Limit: Memory grows unbounded; no session count limit enforced
|
||||||
|
- Scaling path: Add max session count with least-recently-used eviction; implement session federation for distributed setup
|
||||||
|
|
||||||
|
**Worktree Disk Usage:**
|
||||||
|
|
||||||
|
- Current capacity: 10K worktrees (~20GB with typical repos)
|
||||||
|
- Limit: `.worktrees` directory grows without cleanup; old worktrees accumulate
|
||||||
|
- Scaling path: Add worktree TTL (delete if not used for 30 days); implement cleanup job; add quota warnings at 50/80% disk
|
||||||
|
|
||||||
|
## Dependencies at Risk
|
||||||
|
|
||||||
|
**node-pty Beta Version:**
|
||||||
|
|
||||||
|
- Risk: `node-pty@1.1.0-beta41` used for terminal emulation; beta status indicates possible instability
|
||||||
|
- Impact: Terminal features could break on minor platform changes; no guarantees on bug fixes
|
||||||
|
- Migration plan: Monitor releases for stable version; pin to specific commit if needed; test extensively on target platforms (macOS, Linux, Windows)
|
||||||
|
|
||||||
|
**@anthropic-ai/claude-agent-sdk 0.1.x:**
|
||||||
|
|
||||||
|
- Risk: Pre-1.0 version; SDK API may change in future releases; limited version stability guarantees
|
||||||
|
- Impact: Breaking changes could require significant refactoring; feature additions in SDK may not align with Automaker roadmap
|
||||||
|
- Migration plan: Pin to specific 0.1.x version; review SDK changelogs before upgrades; maintain SDK compatibility tests; consider fallback implementation for critical paths
|
||||||
|
|
||||||
|
**@openai/codex-sdk 0.77.x:**
|
||||||
|
|
||||||
|
- Risk: Codex model deprecated by OpenAI; SDK may be archived or unsupported
|
||||||
|
- Impact: Codex provider could become non-functional; error messages may not be actionable
|
||||||
|
- Migration plan: Monitor OpenAI roadmap for migration path; implement fallback to Claude for Codex requests; add deprecation warning in UI
|
||||||
|
|
||||||
|
**Express 5.2.x RC Stage:**
|
||||||
|
|
||||||
|
- Risk: Express 5 is still in release candidate phase (as of Node 22); full stability not guaranteed
|
||||||
|
- Impact: Minor version updates could include breaking changes; middleware compatibility issues possible
|
||||||
|
- Migration plan: Maintain compatibility layer for Express 5 API; test with latest major before release; document any version-specific workarounds
|
||||||
|
|
||||||
|
## Missing Critical Features
|
||||||
|
|
||||||
|
**Persistent Session Storage:**
|
||||||
|
|
||||||
|
- Problem: Agent conversation sessions stored only in-memory; restart loses all chat history
|
||||||
|
- Blocks: Long-running analysis across server restarts; session recovery not possible
|
||||||
|
- Impact: Users must re-run entire analysis if server restarts; lost productivity
|
||||||
|
|
||||||
|
**Rate Limit Awareness:**
|
||||||
|
|
||||||
|
- Problem: No tracking of API usage relative to rate limits before executing features
|
||||||
|
- Blocks: Predictable concurrent feature execution; users frequently hit rate limits unexpectedly
|
||||||
|
- Impact: Feature execution fails with cryptic rate limit errors; poor user experience
|
||||||
|
|
||||||
|
**Feature Dependency Visualization:**
|
||||||
|
|
||||||
|
- Problem: Dependency-resolver package exists but no UI to visualize or manage dependencies
|
||||||
|
- Blocks: Users cannot plan feature order; complex dependencies not visible
|
||||||
|
- Impact: Features implemented in wrong order; blocking dependencies missed
|
||||||
|
|
||||||
|
## Test Coverage Gaps
|
||||||
|
|
||||||
|
**CLI Provider Integration:**
|
||||||
|
|
||||||
|
- What's not tested: Actual CLI execution paths; environment setup; error recovery from CLI crashes
|
||||||
|
- Files: `apps/server/src/providers/cli-provider.ts`, `apps/server/src/lib/cli-detection.ts`
|
||||||
|
- Risk: Changes to CLI handling could break silently; detection logic not validated on target platforms
|
||||||
|
- Priority: High - affects all CLI-based providers (Cursor, Copilot, Codex)
|
||||||
|
|
||||||
|
**Cursor Provider Platform-Specific Paths:**
|
||||||
|
|
||||||
|
- What's not tested: Windows/Linux Cursor installation detection; version directory parsing; APPDATA environment variable handling
|
||||||
|
- Files: `apps/server/src/providers/cursor-provider.ts` (lines 267-498)
|
||||||
|
- Risk: Platform-specific bugs not caught; Cursor detection fails on non-standard installations
|
||||||
|
- Priority: High - Cursor is primary provider; platform differences critical
|
||||||
|
|
||||||
|
**Event Hook System State Changes:**
|
||||||
|
|
||||||
|
- What's not tested: Concurrent hook execution; cleanup on server shutdown; webhook delivery retries
|
||||||
|
- Files: `apps/server/src/services/event-hook-service.ts` (line 248 Promise.allSettled)
|
||||||
|
- Risk: Hooks may not execute in expected order; memory not cleaned up; webhooks lost on failure
|
||||||
|
- Priority: Medium - affects automation workflows
|
||||||
|
|
||||||
|
**Error Classification for New Providers:**
|
||||||
|
|
||||||
|
- What's not tested: Each provider's unique error patterns mapped to ErrorType enum; new provider errors not classified
|
||||||
|
- Files: `apps/server/src/lib/error-handler.ts` (lines 58-80), each provider error mapping
|
||||||
|
- Risk: User sees generic "unknown error" instead of actionable message; categorization regresses with new providers
|
||||||
|
- Priority: Medium - impacts user experience
|
||||||
|
|
||||||
|
**Feature State Corruption Scenarios:**
|
||||||
|
|
||||||
|
- What's not tested: Concurrent feature updates; partial writes with power loss; JSON parsing recovery
|
||||||
|
- Files: `apps/server/src/services/feature-loader.ts`, `@automaker/utils` (atomicWriteJson)
|
||||||
|
- Risk: Feature data corrupted on concurrent access; recovery incomplete; no validation before use
|
||||||
|
- Priority: High - data loss risk
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Concerns audit: 2026-01-27_
|
||||||
255
.planning/codebase/CONVENTIONS.md
Normal file
255
.planning/codebase/CONVENTIONS.md
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
# Coding Conventions
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Naming Patterns
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
|
||||||
|
- PascalCase for class/service files: `auto-mode-service.ts`, `feature-loader.ts`, `claude-provider.ts`
|
||||||
|
- kebab-case for route/handler directories: `auto-mode/`, `features/`, `event-history/`
|
||||||
|
- kebab-case for utility files: `secure-fs.ts`, `sdk-options.ts`, `settings-helpers.ts`
|
||||||
|
- kebab-case for React components: `card.tsx`, `ansi-output.tsx`, `count-up-timer.tsx`
|
||||||
|
- kebab-case for hooks: `use-board-background-settings.ts`, `use-responsive-kanban.ts`, `use-test-logs.ts`
|
||||||
|
- kebab-case for store files: `app-store.ts`, `auth-store.ts`, `setup-store.ts`
|
||||||
|
- Organized by functionality: `routes/features/routes/list.ts`, `routes/features/routes/get.ts`
|
||||||
|
|
||||||
|
**Functions:**
|
||||||
|
|
||||||
|
- camelCase for all function names: `createEventEmitter()`, `getAutomakerDir()`, `executeQuery()`
|
||||||
|
- Verb-first for action functions: `buildPrompt()`, `classifyError()`, `loadContextFiles()`, `atomicWriteJson()`
|
||||||
|
- Prefix with `use` for React hooks: `useBoardBackgroundSettings()`, `useAppStore()`, `useUpdateProjectSettings()`
|
||||||
|
- Private methods prefixed with underscore: `_deleteOrphanedImages()`, `_migrateImages()`
|
||||||
|
|
||||||
|
**Variables:**
|
||||||
|
|
||||||
|
- camelCase for constants and variables: `featureId`, `projectPath`, `modelId`, `tempDir`
|
||||||
|
- UPPER_SNAKE_CASE for global constants/enums: `DEFAULT_MAX_CONCURRENCY`, `DEFAULT_PHASE_MODELS`
|
||||||
|
- Meaningful naming over abbreviations: `featureDirectory` not `fd`, `featureImages` not `img`
|
||||||
|
- Prefixes for computed values: `is*` for booleans: `isClaudeModel`, `isContainerized`, `isAutoLoginEnabled`
|
||||||
|
|
||||||
|
**Types:**
|
||||||
|
|
||||||
|
- PascalCase for interfaces and types: `Feature`, `ExecuteOptions`, `EventEmitter`, `ProviderConfig`
|
||||||
|
- Type files suffixed with `.d.ts`: `paths.d.ts`, `types.d.ts`
|
||||||
|
- Organized by domain: `src/store/types/`, `src/lib/`
|
||||||
|
- Re-export pattern from main package indexes: `export type { Feature };`
|
||||||
|
|
||||||
|
## Code Style
|
||||||
|
|
||||||
|
**Formatting:**
|
||||||
|
|
||||||
|
- Tool: Prettier 3.7.4
|
||||||
|
- Print width: 100 characters
|
||||||
|
- Tab width: 2 spaces
|
||||||
|
- Single quotes for strings
|
||||||
|
- Semicolons required
|
||||||
|
- Trailing commas: es5 (trailing in arrays/objects, not in params)
|
||||||
|
- Arrow functions always include parentheses: `(x) => x * 2`
|
||||||
|
- Line endings: LF (Unix)
|
||||||
|
- Bracket spacing: `{ key: value }`
|
||||||
|
|
||||||
|
**Linting:**
|
||||||
|
|
||||||
|
- Tool: ESLint (flat config in `apps/ui/eslint.config.mjs`)
|
||||||
|
- TypeScript ESLint plugin for `.ts`/`.tsx` files
|
||||||
|
- Recommended configs: `@eslint/js`, `@typescript-eslint/recommended`
|
||||||
|
- Unused variables warning with exception for parameters starting with `_`
|
||||||
|
- Type assertions are allowed with description when using `@ts-ignore`
|
||||||
|
- `@typescript-eslint/no-explicit-any` is warn-level (allow with caution)
|
||||||
|
|
||||||
|
## Import Organization
|
||||||
|
|
||||||
|
**Order:**
|
||||||
|
|
||||||
|
1. Node.js standard library: `import fs from 'fs/promises'`, `import path from 'path'`
|
||||||
|
2. Third-party packages: `import { describe, it } from 'vitest'`, `import { Router } from 'express'`
|
||||||
|
3. Shared packages (monorepo): `import type { Feature } from '@automaker/types'`, `import { createLogger } from '@automaker/utils'`
|
||||||
|
4. Local relative imports: `import { FeatureLoader } from './feature-loader.js'`, `import * as secureFs from '../lib/secure-fs.js'`
|
||||||
|
5. Type imports: separated with `import type { ... } from`
|
||||||
|
|
||||||
|
**Path Aliases:**
|
||||||
|
|
||||||
|
- `@/` - resolves to `./src` in both UI (`apps/ui/`) and server (`apps/server/`)
|
||||||
|
- Shared packages prefixed with `@automaker/`:
|
||||||
|
- `@automaker/types` - core TypeScript definitions
|
||||||
|
- `@automaker/utils` - logging, errors, utilities
|
||||||
|
- `@automaker/prompts` - AI prompt templates
|
||||||
|
- `@automaker/platform` - path management, security, processes
|
||||||
|
- `@automaker/model-resolver` - model alias resolution
|
||||||
|
- `@automaker/dependency-resolver` - feature dependency ordering
|
||||||
|
- `@automaker/git-utils` - git operations
|
||||||
|
- Extensions: `.js` extension used in imports for ESM imports
|
||||||
|
|
||||||
|
**Import Rules:**
|
||||||
|
|
||||||
|
- Always import from shared packages, never from old paths
|
||||||
|
- No circular dependencies between layers
|
||||||
|
- Services import from providers and utilities
|
||||||
|
- Routes import from services
|
||||||
|
- Shared packages have strict dependency hierarchy (types → utils → platform → git-utils → server/ui)
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
- Use `try-catch` blocks for async operations: wraps feature execution, file operations, git commands
|
||||||
|
- Throw `new Error(message)` with descriptive messages: `throw new Error('already running')`, `throw new Error('Feature ${featureId} not found')`
|
||||||
|
- Classify errors with `classifyError()` from `@automaker/utils` for categorization
|
||||||
|
- Log errors with context using `createLogger()`: includes error classification
|
||||||
|
- Return error info objects: `{ valid: false, errors: [...], warnings: [...] }`
|
||||||
|
- Validation returns structured result: `{ valid, errors, warnings }` from provider `validateConfig()`
|
||||||
|
|
||||||
|
**Error Types:**
|
||||||
|
|
||||||
|
- Authentication errors: distinguish from validation/runtime errors
|
||||||
|
- Path validation errors: caught by middleware in Express routes
|
||||||
|
- File system errors: logged and recovery attempted with backups
|
||||||
|
- SDK/API errors: classified and wrapped with context
|
||||||
|
- Abort/cancellation errors: handled without stack traces (graceful shutdown)
|
||||||
|
|
||||||
|
**Error Messages:**
|
||||||
|
|
||||||
|
- Descriptive and actionable: not vague error codes
|
||||||
|
- Include context when helpful: file paths, feature IDs, model names
|
||||||
|
- User-friendly messages via `getUserFriendlyErrorMessage()` for client display
|
||||||
|
|
||||||
|
## Logging
|
||||||
|
|
||||||
|
**Framework:**
|
||||||
|
|
||||||
|
- Built-in `createLogger()` from `@automaker/utils`
|
||||||
|
- Each module creates logger: `const logger = createLogger('ModuleName')`
|
||||||
|
- Logger functions: `info()`, `warn()`, `error()`, `debug()`
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
- Log operation start and completion for significant operations
|
||||||
|
- Log warnings for non-critical issues: file deletion failures, missing optional configs
|
||||||
|
- Log errors with full error object: `logger.error('operation failed', error)`
|
||||||
|
- Use module name as logger context: `createLogger('AutoMode')`, `createLogger('HttpClient')`
|
||||||
|
- Avoid logging sensitive data (API keys, passwords)
|
||||||
|
- No console.log in production code - use logger
|
||||||
|
|
||||||
|
**What to Log:**
|
||||||
|
|
||||||
|
- Feature execution start/completion
|
||||||
|
- Error classification and recovery attempts
|
||||||
|
- File operations (create, delete, migrate)
|
||||||
|
- API calls and responses (in debug mode)
|
||||||
|
- Async operation start/end
|
||||||
|
- Warnings for deprecated patterns
|
||||||
|
|
||||||
|
## Comments
|
||||||
|
|
||||||
|
**When to Comment:**
|
||||||
|
|
||||||
|
- Complex algorithms or business logic: explain the "why" not the "what"
|
||||||
|
- Integration points: explain how modules communicate
|
||||||
|
- Workarounds: explain the constraint that made the workaround necessary
|
||||||
|
- Non-obvious performance implications
|
||||||
|
- Edge cases and their handling
|
||||||
|
|
||||||
|
**JSDoc/TSDoc:**
|
||||||
|
|
||||||
|
- Used for public functions and classes
|
||||||
|
- Document parameters with `@param`
|
||||||
|
- Document return types with `@returns`
|
||||||
|
- Document exceptions with `@throws`
|
||||||
|
- Used for service classes: `/**\n * Module description\n * Manages: ...\n */`
|
||||||
|
- Not required for simple getters/setters
|
||||||
|
|
||||||
|
**Example JSDoc Pattern:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
/**
|
||||||
|
* Delete images that were removed from a feature
|
||||||
|
*/
|
||||||
|
private async deleteOrphanedImages(
|
||||||
|
projectPath: string,
|
||||||
|
oldPaths: Array<string>,
|
||||||
|
newPaths: Array<string>
|
||||||
|
): Promise<void> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Function Design
|
||||||
|
|
||||||
|
**Size:**
|
||||||
|
|
||||||
|
- Keep functions under 100 lines when possible
|
||||||
|
- Large services split into multiple related methods
|
||||||
|
- Private helper methods extracted for complex logic
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
- Use destructuring for object parameters with multiple properties
|
||||||
|
- Document parameter types with TypeScript types
|
||||||
|
- Optional parameters marked with `?`
|
||||||
|
- Use `Record<string, unknown>` for flexible object parameters
|
||||||
|
|
||||||
|
**Return Values:**
|
||||||
|
|
||||||
|
- Explicit return types required for all public functions
|
||||||
|
- Return structured objects for multiple values
|
||||||
|
- Use `Promise<T>` for async functions
|
||||||
|
- Async generators use `AsyncGenerator<T>` for streaming responses
|
||||||
|
- Never implicitly return `undefined` (explicit return or throw)
|
||||||
|
|
||||||
|
## Module Design
|
||||||
|
|
||||||
|
**Exports:**
|
||||||
|
|
||||||
|
- Default export for class instantiation: `export default class FeatureLoader {}`
|
||||||
|
- Named exports for functions: `export function createEventEmitter() {}`
|
||||||
|
- Type exports separated: `export type { Feature };`
|
||||||
|
- Barrel files (index.ts) re-export from module
|
||||||
|
|
||||||
|
**Barrel Files:**
|
||||||
|
|
||||||
|
- Used in routes: `routes/features/index.ts` creates router and exports
|
||||||
|
- Used in stores: `store/index.ts` exports all store hooks
|
||||||
|
- Pattern: group related exports for easier importing
|
||||||
|
|
||||||
|
**Service Classes:**
|
||||||
|
|
||||||
|
- Instantiated once and dependency injected
|
||||||
|
- Public methods for API surface
|
||||||
|
- Private methods prefixed with `_`
|
||||||
|
- No static methods - prefer instances or functions
|
||||||
|
- Constructor takes dependencies: `constructor(config?: ProviderConfig)`
|
||||||
|
|
||||||
|
**Provider Pattern:**
|
||||||
|
|
||||||
|
- Abstract base class: `BaseProvider` with abstract methods
|
||||||
|
- Concrete implementations: `ClaudeProvider`, `CodexProvider`, `CursorProvider`
|
||||||
|
- Common interface: `executeQuery()`, `detectInstallation()`, `validateConfig()`
|
||||||
|
- Factory for instantiation: `ProviderFactory.create()`
|
||||||
|
|
||||||
|
## TypeScript Specific
|
||||||
|
|
||||||
|
**Strict Mode:** Always enabled globally
|
||||||
|
|
||||||
|
- `strict: true` in all tsconfigs
|
||||||
|
- No implicit `any` - declare types explicitly
|
||||||
|
- No optional chaining on base types without narrowing
|
||||||
|
|
||||||
|
**Type Definitions:**
|
||||||
|
|
||||||
|
- Interface for shapes: `interface Feature { ... }`
|
||||||
|
- Type for unions/aliases: `type ModelAlias = 'haiku' | 'sonnet' | 'opus'`
|
||||||
|
- Type guards for narrowing: `if (typeof x === 'string') { ... }`
|
||||||
|
- Generic types for reusable patterns: `EventCallback<T>`
|
||||||
|
|
||||||
|
**React Specific (UI):**
|
||||||
|
|
||||||
|
- Functional components only
|
||||||
|
- React 19 with hooks
|
||||||
|
- Type props interface: `interface CardProps extends React.ComponentProps<'div'> { ... }`
|
||||||
|
- Zustand stores for state management
|
||||||
|
- Custom hooks for shared logic
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Convention analysis: 2026-01-27_
|
||||||
232
.planning/codebase/INTEGRATIONS.md
Normal file
232
.planning/codebase/INTEGRATIONS.md
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
# External Integrations
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## APIs & External Services
|
||||||
|
|
||||||
|
**AI/LLM Providers:**
|
||||||
|
|
||||||
|
- Claude (Anthropic)
|
||||||
|
- SDK: `@anthropic-ai/claude-agent-sdk` (0.1.76)
|
||||||
|
- Auth: `ANTHROPIC_API_KEY` environment variable or stored credentials
|
||||||
|
- Features: Extended thinking, vision/images, tools, streaming
|
||||||
|
- Implementation: `apps/server/src/providers/claude-provider.ts`
|
||||||
|
- Models: Opus 4.5, Sonnet 4, Haiku 4.5, and legacy models
|
||||||
|
- Custom endpoints: `ANTHROPIC_BASE_URL` (optional)
|
||||||
|
|
||||||
|
- GitHub Copilot
|
||||||
|
- SDK: `@github/copilot-sdk` (0.1.16)
|
||||||
|
- Auth: GitHub OAuth (via `gh` CLI) or `GITHUB_TOKEN` environment variable
|
||||||
|
- Features: Tools, streaming, runtime model discovery
|
||||||
|
- Implementation: `apps/server/src/providers/copilot-provider.ts`
|
||||||
|
- CLI detection: Searches for Copilot CLI binary
|
||||||
|
- Models: Dynamic discovery via `copilot models list`
|
||||||
|
|
||||||
|
- OpenAI Codex/GPT-4
|
||||||
|
- SDK: `@openai/codex-sdk` (0.77.0)
|
||||||
|
- Auth: `OPENAI_API_KEY` environment variable or stored credentials
|
||||||
|
- Features: Extended thinking, tools, sandbox execution
|
||||||
|
- Implementation: `apps/server/src/providers/codex-provider.ts`
|
||||||
|
- Execution modes: CLI (with sandbox) or SDK (direct API)
|
||||||
|
- Models: Dynamic discovery via Codex CLI or SDK
|
||||||
|
|
||||||
|
- Google Gemini
|
||||||
|
- Implementation: `apps/server/src/providers/gemini-provider.ts`
|
||||||
|
- Features: Vision support, tools, streaming
|
||||||
|
|
||||||
|
- OpenCode (AWS/Azure/other)
|
||||||
|
- Implementation: `apps/server/src/providers/opencode-provider.ts`
|
||||||
|
- Supports: Amazon Bedrock, Azure models, local models
|
||||||
|
- Features: Flexible provider architecture
|
||||||
|
|
||||||
|
- Cursor Editor
|
||||||
|
- Implementation: `apps/server/src/providers/cursor-provider.ts`
|
||||||
|
- Features: Integration with Cursor IDE
|
||||||
|
|
||||||
|
**Model Context Protocol (MCP):**
|
||||||
|
|
||||||
|
- SDK: `@modelcontextprotocol/sdk` (1.25.2)
|
||||||
|
- Purpose: Connect AI agents to external tools and data sources
|
||||||
|
- Implementation: `apps/server/src/services/mcp-test-service.ts`, `apps/server/src/routes/mcp/`
|
||||||
|
- Configuration: Per-project in `.automaker/` directory
|
||||||
|
|
||||||
|
## Data Storage
|
||||||
|
|
||||||
|
**Databases:**
|
||||||
|
|
||||||
|
- None - This codebase does NOT use traditional databases (SQL/NoSQL)
|
||||||
|
- All data stored as files in local filesystem
|
||||||
|
|
||||||
|
**File Storage:**
|
||||||
|
|
||||||
|
- Local filesystem only
|
||||||
|
- Locations:
|
||||||
|
- `.automaker/` - Project-specific data (features, context, settings)
|
||||||
|
- `./data/` or `DATA_DIR` env var - Global data (settings, credentials, sessions)
|
||||||
|
- Secure file operations: `@automaker/platform` exports `secureFs` for restricted file access
|
||||||
|
|
||||||
|
**Caching:**
|
||||||
|
|
||||||
|
- In-memory caches for:
|
||||||
|
- Model lists (Copilot, Codex runtime discovery)
|
||||||
|
- Feature metadata
|
||||||
|
- Project specifications
|
||||||
|
- No distributed/persistent caching system
|
||||||
|
|
||||||
|
## Authentication & Identity
|
||||||
|
|
||||||
|
**Auth Provider:**
|
||||||
|
|
||||||
|
- Custom implementation (no third-party provider)
|
||||||
|
- Authentication methods:
|
||||||
|
1. Claude Max Plan (OAuth via Anthropic CLI)
|
||||||
|
2. API Key mode (ANTHROPIC_API_KEY)
|
||||||
|
3. Custom provider profiles with API keys
|
||||||
|
4. Token-based session authentication for WebSocket
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
|
||||||
|
- `apps/server/src/lib/auth.ts` - Auth middleware
|
||||||
|
- `apps/server/src/routes/auth/` - Auth routes
|
||||||
|
- Session tokens for WebSocket connections
|
||||||
|
- Credential storage in `./data/credentials.json` (encrypted/protected)
|
||||||
|
|
||||||
|
## Monitoring & Observability
|
||||||
|
|
||||||
|
**Error Tracking:**
|
||||||
|
|
||||||
|
- None - No automatic error reporting service integrated
|
||||||
|
- Custom error classification: `@automaker/utils` exports `classifyError()`
|
||||||
|
- User-friendly error messages: `getUserFriendlyErrorMessage()`
|
||||||
|
|
||||||
|
**Logs:**
|
||||||
|
|
||||||
|
- Console logging with configurable levels
|
||||||
|
- Logger: `@automaker/utils` exports `createLogger()`
|
||||||
|
- Log levels: ERROR, WARN, INFO, DEBUG
|
||||||
|
- Environment: `LOG_LEVEL` env var (optional)
|
||||||
|
- Storage: Logs output to console/stdout (no persistent logging to files)
|
||||||
|
|
||||||
|
**Usage Tracking:**
|
||||||
|
|
||||||
|
- Claude API usage: `apps/server/src/services/claude-usage-service.ts`
|
||||||
|
- Codex API usage: `apps/server/src/services/codex-usage-service.ts`
|
||||||
|
- Tracks: Tokens, costs, rates
|
||||||
|
|
||||||
|
## CI/CD & Deployment
|
||||||
|
|
||||||
|
**Hosting:**
|
||||||
|
|
||||||
|
- Local development: Node.js server + Vite dev server
|
||||||
|
- Desktop: Electron application (macOS, Windows, Linux)
|
||||||
|
- Web: Express server deployed to any Node.js host
|
||||||
|
|
||||||
|
**CI Pipeline:**
|
||||||
|
|
||||||
|
- GitHub Actions likely (`.github/workflows/` present in repo)
|
||||||
|
- Testing: Playwright E2E, Vitest unit tests
|
||||||
|
- Linting: ESLint
|
||||||
|
- Formatting: Prettier
|
||||||
|
|
||||||
|
**Build Process:**
|
||||||
|
|
||||||
|
- `npm run build:packages` - Build shared packages
|
||||||
|
- `npm run build` - Build web UI
|
||||||
|
- `npm run build:electron` - Build Electron apps (platform-specific)
|
||||||
|
- Electron Builder handles code signing and distribution
|
||||||
|
|
||||||
|
## Environment Configuration
|
||||||
|
|
||||||
|
**Required env vars:**
|
||||||
|
|
||||||
|
- `ANTHROPIC_API_KEY` - For Claude provider (or provide in settings)
|
||||||
|
- `OPENAI_API_KEY` - For Codex provider (optional)
|
||||||
|
- `GITHUB_TOKEN` - For GitHub operations (optional)
|
||||||
|
|
||||||
|
**Optional env vars:**
|
||||||
|
|
||||||
|
- `PORT` - Server port (default 3008)
|
||||||
|
- `HOST` - Server bind address (default 0.0.0.0)
|
||||||
|
- `HOSTNAME` - Public hostname (default localhost)
|
||||||
|
- `DATA_DIR` - Data storage directory (default ./data)
|
||||||
|
- `ANTHROPIC_BASE_URL` - Custom Claude endpoint
|
||||||
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to directory
|
||||||
|
- `AUTOMAKER_MOCK_AGENT` - Enable mock agent for testing
|
||||||
|
- `AUTOMAKER_AUTO_LOGIN` - Skip login prompt in dev
|
||||||
|
|
||||||
|
**Secrets location:**
|
||||||
|
|
||||||
|
- Runtime: Environment variables (`process.env`)
|
||||||
|
- Stored: `./data/credentials.json` (file-based)
|
||||||
|
- Retrieval: `apps/server/src/services/settings-service.ts`
|
||||||
|
|
||||||
|
## Webhooks & Callbacks
|
||||||
|
|
||||||
|
**Incoming:**
|
||||||
|
|
||||||
|
- WebSocket connections for real-time agent event streaming
|
||||||
|
- GitHub webhook routes (optional): `apps/server/src/routes/github/`
|
||||||
|
- Terminal WebSocket connections: `apps/server/src/routes/terminal/`
|
||||||
|
|
||||||
|
**Outgoing:**
|
||||||
|
|
||||||
|
- GitHub PRs: `apps/server/src/routes/worktree/routes/create-pr.ts`
|
||||||
|
- Git operations: `@automaker/git-utils` handles commits, pushes
|
||||||
|
- Terminal output streaming via WebSocket to clients
|
||||||
|
- Event hooks: `apps/server/src/services/event-hook-service.ts`
|
||||||
|
|
||||||
|
## Credential Management
|
||||||
|
|
||||||
|
**API Keys Storage:**
|
||||||
|
|
||||||
|
- File: `./data/credentials.json`
|
||||||
|
- Format: JSON with nested structure for different providers
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"apiKeys": {
|
||||||
|
"anthropic": "sk-...",
|
||||||
|
"openai": "sk-...",
|
||||||
|
"github": "ghp_..."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
- Access: `SettingsService.getCredentials()` from `apps/server/src/services/settings-service.ts`
|
||||||
|
- Security: File permissions should restrict to current user only
|
||||||
|
|
||||||
|
**Profile/Provider Configuration:**
|
||||||
|
|
||||||
|
- File: `./data/settings.json` (global) or `.automaker/settings.json` (per-project)
|
||||||
|
- Stores: Alternative provider profiles, model mappings, sandbox settings
|
||||||
|
- Types: `ClaudeApiProfile`, `ClaudeCompatibleProvider` from `@automaker/types`
|
||||||
|
|
||||||
|
## Third-Party Service Integration Points
|
||||||
|
|
||||||
|
**Git/GitHub:**
|
||||||
|
|
||||||
|
- `@automaker/git-utils` - Git operations (worktrees, commits, diffs)
|
||||||
|
- Codex/Cursor providers can create GitHub PRs
|
||||||
|
- GitHub CLI (`gh`) detection for Copilot authentication
|
||||||
|
|
||||||
|
**Terminal Access:**
|
||||||
|
|
||||||
|
- `node-pty` (1.1.0-beta41) - Pseudo-terminal interface
|
||||||
|
- `TerminalService` manages terminal sessions
|
||||||
|
- WebSocket streaming to frontend
|
||||||
|
|
||||||
|
**AI Models - Multi-Provider Abstraction:**
|
||||||
|
|
||||||
|
- `BaseProvider` interface: `apps/server/src/providers/base-provider.ts`
|
||||||
|
- Factory pattern: `apps/server/src/providers/provider-factory.ts`
|
||||||
|
- Allows swapping providers without changing agent logic
|
||||||
|
- All providers implement: `executeQuery()`, `detectInstallation()`, `getAvailableModels()`
|
||||||
|
|
||||||
|
**Process Spawning:**
|
||||||
|
|
||||||
|
- `@automaker/platform` exports `spawnProcess()`, `spawnJSONLProcess()`
|
||||||
|
- Codex CLI execution: JSONL output parsing
|
||||||
|
- Copilot CLI execution: Subprocess management
|
||||||
|
- Cursor IDE interaction: Process spawning for tool execution
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Integration audit: 2026-01-27_
|
||||||
230
.planning/codebase/STACK.md
Normal file
230
.planning/codebase/STACK.md
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
# Technology Stack
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Languages
|
||||||
|
|
||||||
|
**Primary:**
|
||||||
|
|
||||||
|
- TypeScript 5.9.3 - Used across all packages, apps, and configuration
|
||||||
|
- JavaScript (Node.js) - Runtime execution for scripts and tooling
|
||||||
|
|
||||||
|
**Secondary:**
|
||||||
|
|
||||||
|
- YAML 2.7.0 - Configuration files
|
||||||
|
- CSS/Tailwind CSS 4.1.18 - Frontend styling
|
||||||
|
|
||||||
|
## Runtime
|
||||||
|
|
||||||
|
**Environment:**
|
||||||
|
|
||||||
|
- Node.js 22.x (>=22.0.0 <23.0.0) - Required version, specified in `.nvmrc`
|
||||||
|
|
||||||
|
**Package Manager:**
|
||||||
|
|
||||||
|
- npm - Monorepo workspace management via npm workspaces
|
||||||
|
- Lockfile: `package-lock.json` (present)
|
||||||
|
|
||||||
|
## Frameworks
|
||||||
|
|
||||||
|
**Core - Frontend:**
|
||||||
|
|
||||||
|
- React 19.2.3 - UI framework with hooks and concurrent features
|
||||||
|
- Vite 7.3.0 - Build tool and dev server (`apps/ui/vite.config.ts`)
|
||||||
|
- Electron 39.2.7 - Desktop application runtime (`apps/ui/package.json`)
|
||||||
|
- TanStack Router 1.141.6 - File-based routing (React)
|
||||||
|
- Zustand 5.0.9 - State management (lightweight alternative to Redux)
|
||||||
|
- TanStack Query (React Query) 5.90.17 - Server state management
|
||||||
|
|
||||||
|
**Core - Backend:**
|
||||||
|
|
||||||
|
- Express 5.2.1 - HTTP server framework (`apps/server/package.json`)
|
||||||
|
- WebSocket (ws) 8.18.3 - Real-time bidirectional communication
|
||||||
|
- Claude Agent SDK (@anthropic-ai/claude-agent-sdk) 0.1.76 - AI provider integration
|
||||||
|
|
||||||
|
**Testing:**
|
||||||
|
|
||||||
|
- Playwright 1.57.0 - End-to-end testing (`apps/ui` E2E tests)
|
||||||
|
- Vitest 4.0.16 - Unit testing framework (runs on all packages and server)
|
||||||
|
- @vitest/ui 4.0.16 - Visual test runner UI
|
||||||
|
- @vitest/coverage-v8 4.0.16 - Code coverage reporting
|
||||||
|
|
||||||
|
**Build/Dev:**
|
||||||
|
|
||||||
|
- electron-builder 26.0.12 - Electron app packaging and distribution
|
||||||
|
- @vitejs/plugin-react 5.1.2 - Vite React support
|
||||||
|
- vite-plugin-electron 0.29.0 - Vite plugin for Electron main process
|
||||||
|
- vite-plugin-electron-renderer 0.14.6 - Vite plugin for Electron renderer
|
||||||
|
- ESLint 9.39.2 - Code linting (`apps/ui`)
|
||||||
|
- @typescript-eslint/eslint-plugin 8.50.0 - TypeScript ESLint rules
|
||||||
|
- Prettier 3.7.4 - Code formatting (root-level config)
|
||||||
|
- Tailwind CSS 4.1.18 - Utility-first CSS framework
|
||||||
|
- @tailwindcss/vite 4.1.18 - Tailwind Vite integration
|
||||||
|
|
||||||
|
**UI Components & Libraries:**
|
||||||
|
|
||||||
|
- Radix UI - Unstyled accessible component library (@radix-ui packages)
|
||||||
|
- react-dropdown-menu 2.1.16
|
||||||
|
- react-dialog 1.1.15
|
||||||
|
- react-select 2.2.6
|
||||||
|
- react-tooltip 1.2.8
|
||||||
|
- react-tabs 1.1.13
|
||||||
|
- react-collapsible 1.1.12
|
||||||
|
- react-checkbox 1.3.3
|
||||||
|
- react-radio-group 1.3.8
|
||||||
|
- react-popover 1.1.15
|
||||||
|
- react-slider 1.3.6
|
||||||
|
- react-switch 1.2.6
|
||||||
|
- react-scroll-area 1.2.10
|
||||||
|
- react-label 2.1.8
|
||||||
|
- Lucide React 0.562.0 - Icon library
|
||||||
|
- Geist 1.5.1 - Design system UI library
|
||||||
|
- Sonner 2.0.7 - Toast notifications
|
||||||
|
|
||||||
|
**Code Editor & Terminal:**
|
||||||
|
|
||||||
|
- @uiw/react-codemirror 4.25.4 - Code editor React component
|
||||||
|
- CodeMirror (@codemirror packages) 6.x - Editor toolkit
|
||||||
|
- xterm.js (@xterm/xterm) 5.5.0 - Terminal emulator
|
||||||
|
- @xterm/addon-fit 0.10.0 - Fit addon for terminal
|
||||||
|
- @xterm/addon-search 0.15.0 - Search addon for terminal
|
||||||
|
- @xterm/addon-web-links 0.11.0 - Web links addon
|
||||||
|
- @xterm/addon-webgl 0.18.0 - WebGL renderer for terminal
|
||||||
|
|
||||||
|
**Diagram/Graph Visualization:**
|
||||||
|
|
||||||
|
- @xyflow/react 12.10.0 - React flow diagram library
|
||||||
|
- dagre 0.8.5 - Graph layout algorithms
|
||||||
|
|
||||||
|
**Markdown/Content Rendering:**
|
||||||
|
|
||||||
|
- react-markdown 10.1.0 - Markdown parser and renderer
|
||||||
|
- remark-gfm 4.0.1 - GitHub Flavored Markdown support
|
||||||
|
- rehype-raw 7.0.0 - Raw HTML support in markdown
|
||||||
|
- rehype-sanitize 6.0.0 - HTML sanitization
|
||||||
|
|
||||||
|
**Data Validation & Parsing:**
|
||||||
|
|
||||||
|
- zod 3.24.1 or 4.0.0 - Schema validation and TypeScript type inference
|
||||||
|
|
||||||
|
**Utilities:**
|
||||||
|
|
||||||
|
- class-variance-authority 0.7.1 - CSS variant utilities
|
||||||
|
- clsx 2.1.1 - Conditional className utility
|
||||||
|
- cmdk 1.1.1 - Command menu/palette
|
||||||
|
- tailwind-merge 3.4.0 - Tailwind CSS conflict resolution
|
||||||
|
- usehooks-ts 3.1.1 - TypeScript React hooks
|
||||||
|
- @dnd-kit (drag-and-drop) 6.3.1 - Drag and drop library
|
||||||
|
|
||||||
|
**Font Libraries:**
|
||||||
|
|
||||||
|
- @fontsource - Web font packages (Cascadia Code, Fira Code, IBM Plex, Inconsolata, Inter, etc.)
|
||||||
|
|
||||||
|
**Development Utilities:**
|
||||||
|
|
||||||
|
- cross-spawn 7.0.6 - Cross-platform process spawning
|
||||||
|
- dotenv 17.2.3 - Environment variable loading
|
||||||
|
- tsx 4.21.0 - TypeScript execution for Node.js
|
||||||
|
- tree-kill 1.2.2 - Process tree killer utility
|
||||||
|
- node-pty 1.1.0-beta41 - PTY/terminal interface for Node.js
|
||||||
|
|
||||||
|
## Key Dependencies
|
||||||
|
|
||||||
|
**Critical - AI/Agent Integration:**
|
||||||
|
|
||||||
|
- @anthropic-ai/claude-agent-sdk 0.1.76 - Core Claude AI provider
|
||||||
|
- @github/copilot-sdk 0.1.16 - GitHub Copilot integration
|
||||||
|
- @openai/codex-sdk 0.77.0 - OpenAI Codex/GPT-4 integration
|
||||||
|
- @modelcontextprotocol/sdk 1.25.2 - Model Context Protocol servers
|
||||||
|
|
||||||
|
**Infrastructure - Internal Packages:**
|
||||||
|
|
||||||
|
- @automaker/types 1.0.0 - Shared TypeScript type definitions
|
||||||
|
- @automaker/utils 1.0.0 - Logging, error handling, utilities
|
||||||
|
- @automaker/platform 1.0.0 - Path management, security, process spawning
|
||||||
|
- @automaker/prompts 1.0.0 - AI prompt templates
|
||||||
|
- @automaker/model-resolver 1.0.0 - Claude model alias resolution
|
||||||
|
- @automaker/dependency-resolver 1.0.0 - Feature dependency ordering
|
||||||
|
- @automaker/git-utils 1.0.0 - Git operations & worktree management
|
||||||
|
- @automaker/spec-parser 1.0.0 - Project specification parsing
|
||||||
|
|
||||||
|
**Server Utilities:**
|
||||||
|
|
||||||
|
- express 5.2.1 - Web framework
|
||||||
|
- cors 2.8.5 - CORS middleware
|
||||||
|
- morgan 1.10.1 - HTTP request logger
|
||||||
|
- cookie-parser 1.4.7 - Cookie parsing middleware
|
||||||
|
- yaml 2.7.0 - YAML parsing and generation
|
||||||
|
|
||||||
|
**Type Definitions:**
|
||||||
|
|
||||||
|
- @types/express 5.0.6
|
||||||
|
- @types/node 22.19.3
|
||||||
|
- @types/react 19.2.7
|
||||||
|
- @types/react-dom 19.2.3
|
||||||
|
- @types/dagre 0.7.53
|
||||||
|
- @types/ws 8.18.1
|
||||||
|
- @types/cookie 0.6.0
|
||||||
|
- @types/cookie-parser 1.4.10
|
||||||
|
- @types/cors 2.8.19
|
||||||
|
- @types/morgan 1.9.10
|
||||||
|
|
||||||
|
**Optional Dependencies (Platform-specific):**
|
||||||
|
|
||||||
|
- lightningcss (various platforms) 1.29.2 - CSS parser (alternate to PostCSS)
|
||||||
|
- dmg-license 1.0.11 - DMG license dialog for macOS
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
**Environment:**
|
||||||
|
|
||||||
|
- `.env` and `.env.example` files in `apps/server/` and `apps/ui/`
|
||||||
|
- `dotenv` library loads variables from `.env` files
|
||||||
|
- Key env vars:
|
||||||
|
- `ANTHROPIC_API_KEY` - Claude API authentication
|
||||||
|
- `OPENAI_API_KEY` - OpenAI/Codex authentication
|
||||||
|
- `GITHUB_TOKEN` - GitHub API access
|
||||||
|
- `ANTHROPIC_BASE_URL` - Custom Claude endpoint (optional)
|
||||||
|
- `HOST` - Server bind address (default: 0.0.0.0)
|
||||||
|
- `HOSTNAME` - Hostname for URLs (default: localhost)
|
||||||
|
- `PORT` - Server port (default: 3008)
|
||||||
|
- `DATA_DIR` - Data storage directory (default: ./data)
|
||||||
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations
|
||||||
|
- `AUTOMAKER_MOCK_AGENT` - Enable mock agent for testing
|
||||||
|
- `AUTOMAKER_AUTO_LOGIN` - Skip login in dev (disabled in production)
|
||||||
|
- `VITE_HOSTNAME` - Frontend API hostname
|
||||||
|
|
||||||
|
**Build:**
|
||||||
|
|
||||||
|
- `apps/ui/electron-builder.config.json` or `apps/ui/package.json` build config
|
||||||
|
- Electron builder targets:
|
||||||
|
- macOS: DMG and ZIP
|
||||||
|
- Windows: NSIS installer
|
||||||
|
- Linux: AppImage, DEB, RPM
|
||||||
|
- Vite config: `apps/ui/vite.config.ts`, `apps/server/tsconfig.json`
|
||||||
|
- TypeScript config: `tsconfig.json` files in each package
|
||||||
|
|
||||||
|
## Platform Requirements
|
||||||
|
|
||||||
|
**Development:**
|
||||||
|
|
||||||
|
- Node.js 22.x
|
||||||
|
- npm (included with Node.js)
|
||||||
|
- Git (for worktree operations)
|
||||||
|
- Python (optional, for some dev scripts)
|
||||||
|
|
||||||
|
**Production:**
|
||||||
|
|
||||||
|
- Electron desktop app: Windows, macOS, Linux
|
||||||
|
- Web browser: Modern Chromium-based browsers
|
||||||
|
- Server: Any platform supporting Node.js 22.x
|
||||||
|
|
||||||
|
**Deployment Target:**
|
||||||
|
|
||||||
|
- Local desktop (Electron)
|
||||||
|
- Local web server (Express + Vite)
|
||||||
|
- Remote server deployment (Docker, systemd, or other orchestration)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Stack analysis: 2026-01-27_
|
||||||
340
.planning/codebase/STRUCTURE.md
Normal file
340
.planning/codebase/STRUCTURE.md
Normal file
@@ -0,0 +1,340 @@
|
|||||||
|
# Codebase Structure
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Directory Layout
|
||||||
|
|
||||||
|
```
|
||||||
|
automaker/
|
||||||
|
├── apps/ # Application packages
|
||||||
|
│ ├── ui/ # React + Electron frontend (port 3007)
|
||||||
|
│ │ ├── src/
|
||||||
|
│ │ │ ├── main.ts # Electron/Vite entry point
|
||||||
|
│ │ │ ├── app.tsx # Root React component (splash, router)
|
||||||
|
│ │ │ ├── renderer.tsx # Electron renderer entry
|
||||||
|
│ │ │ ├── routes/ # TanStack Router file-based routes
|
||||||
|
│ │ │ ├── components/ # React components (views, dialogs, UI, layout)
|
||||||
|
│ │ │ ├── store/ # Zustand state management
|
||||||
|
│ │ │ ├── hooks/ # Custom React hooks
|
||||||
|
│ │ │ ├── lib/ # Utilities (API client, electron, queries, etc.)
|
||||||
|
│ │ │ ├── electron/ # Electron main & preload process files
|
||||||
|
│ │ │ ├── config/ # UI configuration (fonts, themes, routes)
|
||||||
|
│ │ │ └── styles/ # CSS and theme files
|
||||||
|
│ │ ├── public/ # Static assets
|
||||||
|
│ │ └── tests/ # E2E Playwright tests
|
||||||
|
│ │
|
||||||
|
│ └── server/ # Express backend (port 3008)
|
||||||
|
│ ├── src/
|
||||||
|
│ │ ├── index.ts # Express app initialization, route mounting
|
||||||
|
│ │ ├── routes/ # REST API endpoints (30+ route folders)
|
||||||
|
│ │ ├── services/ # Business logic services
|
||||||
|
│ │ ├── providers/ # AI model provider implementations
|
||||||
|
│ │ ├── lib/ # Utilities (events, auth, helpers, etc.)
|
||||||
|
│ │ ├── middleware/ # Express middleware
|
||||||
|
│ │ └── types/ # Server-specific type definitions
|
||||||
|
│ └── tests/ # Unit tests (Vitest)
|
||||||
|
│
|
||||||
|
├── libs/ # Shared npm packages (@automaker/*)
|
||||||
|
│ ├── types/ # @automaker/types (no dependencies)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # Main export with all type definitions
|
||||||
|
│ │ ├── feature.ts # Feature, FeatureStatus, etc.
|
||||||
|
│ │ ├── provider.ts # Provider interfaces, model definitions
|
||||||
|
│ │ ├── settings.ts # Global and project settings types
|
||||||
|
│ │ ├── event.ts # Event types for real-time updates
|
||||||
|
│ │ ├── session.ts # AgentSession, conversation types
|
||||||
|
│ │ ├── model*.ts # Model-specific types (cursor, codex, gemini, etc.)
|
||||||
|
│ │ └── ... 20+ more type files
|
||||||
|
│ │
|
||||||
|
│ ├── utils/ # @automaker/utils (logging, errors, images, context)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── logger.ts # createLogger() with LogLevel enum
|
||||||
|
│ │ ├── errors.ts # classifyError(), error types
|
||||||
|
│ │ ├── image-utils.ts # Image processing, base64 encoding
|
||||||
|
│ │ ├── context-loader.ts # loadContextFiles() for AI prompts
|
||||||
|
│ │ └── ... more utilities
|
||||||
|
│ │
|
||||||
|
│ ├── platform/ # @automaker/platform (paths, security, OS)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # Path getters (getFeatureDir, getFeaturesDir, etc.)
|
||||||
|
│ │ ├── secure-fs.ts # Secure filesystem operations
|
||||||
|
│ │ └── config/ # Claude auth detection, allowed paths
|
||||||
|
│ │
|
||||||
|
│ ├── prompts/ # @automaker/prompts (AI prompt templates)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # Main prompts export
|
||||||
|
│ │ └── *-prompt.ts # Prompt templates for different features
|
||||||
|
│ │
|
||||||
|
│ ├── model-resolver/ # @automaker/model-resolver
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ └── index.ts # resolveModelString() for model aliases
|
||||||
|
│ │
|
||||||
|
│ ├── dependency-resolver/ # @automaker/dependency-resolver
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ └── index.ts # Resolve feature dependencies
|
||||||
|
│ │
|
||||||
|
│ ├── git-utils/ # @automaker/git-utils (git operations)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # getGitRepositoryDiffs(), worktree management
|
||||||
|
│ │ └── ... git helpers
|
||||||
|
│ │
|
||||||
|
│ ├── spec-parser/ # @automaker/spec-parser
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ └── ... spec parsing utilities
|
||||||
|
│ │
|
||||||
|
│ └── tsconfig.base.json # Base TypeScript config for all packages
|
||||||
|
│
|
||||||
|
├── .automaker/ # Project data directory (created by app)
|
||||||
|
│ ├── features/ # Feature storage
|
||||||
|
│ │ └── {featureId}/
|
||||||
|
│ │ ├── feature.json # Feature metadata and content
|
||||||
|
│ │ ├── agent-output.md # Agent execution results
|
||||||
|
│ │ └── images/ # Feature images
|
||||||
|
│ ├── context/ # Context files (CLAUDE.md, etc.)
|
||||||
|
│ ├── settings.json # Per-project settings
|
||||||
|
│ ├── spec.md # Project specification
|
||||||
|
│ └── analysis.json # Project structure analysis
|
||||||
|
│
|
||||||
|
├── data/ # Global data directory (default, configurable)
|
||||||
|
│ ├── settings.json # Global settings, profiles
|
||||||
|
│ ├── credentials.json # Encrypted API keys
|
||||||
|
│ ├── sessions-metadata.json # Chat session metadata
|
||||||
|
│ └── agent-sessions/ # Conversation histories
|
||||||
|
│
|
||||||
|
├── .planning/ # Generated documentation by GSD orchestrator
|
||||||
|
│ └── codebase/ # Codebase analysis documents
|
||||||
|
│ ├── ARCHITECTURE.md # Architecture patterns and layers
|
||||||
|
│ ├── STRUCTURE.md # This file
|
||||||
|
│ ├── STACK.md # Technology stack
|
||||||
|
│ ├── INTEGRATIONS.md # External API integrations
|
||||||
|
│ ├── CONVENTIONS.md # Code style and naming
|
||||||
|
│ ├── TESTING.md # Testing patterns
|
||||||
|
│ └── CONCERNS.md # Technical debt and issues
|
||||||
|
│
|
||||||
|
├── .github/ # GitHub Actions workflows
|
||||||
|
├── scripts/ # Build and utility scripts
|
||||||
|
├── tests/ # Test data and utilities
|
||||||
|
├── docs/ # Documentation
|
||||||
|
├── package.json # Root workspace config
|
||||||
|
├── package-lock.json # Lock file
|
||||||
|
├── CLAUDE.md # Project instructions for Claude Code
|
||||||
|
├── DEVELOPMENT_WORKFLOW.md # Development guidelines
|
||||||
|
└── README.md # Project overview
|
||||||
|
```
|
||||||
|
|
||||||
|
## Directory Purposes
|
||||||
|
|
||||||
|
**apps/ui/:**
|
||||||
|
|
||||||
|
- Purpose: React frontend for desktop (Electron) and web modes
|
||||||
|
- Build system: Vite 7 with TypeScript
|
||||||
|
- Styling: Tailwind CSS 4
|
||||||
|
- State: Zustand 5 with API persistence
|
||||||
|
- Routing: TanStack Router with file-based structure
|
||||||
|
- Desktop: Electron 39 with preload IPC bridge
|
||||||
|
|
||||||
|
**apps/server/:**
|
||||||
|
|
||||||
|
- Purpose: Express backend API and service layer
|
||||||
|
- Build system: TypeScript → JavaScript
|
||||||
|
- Runtime: Node.js 18+
|
||||||
|
- WebSocket: ws library for real-time streaming
|
||||||
|
- Process management: node-pty for terminal isolation
|
||||||
|
|
||||||
|
**libs/types/:**
|
||||||
|
|
||||||
|
- Purpose: Central type definitions (no dependencies, fast import)
|
||||||
|
- Used by: All other packages and apps
|
||||||
|
- Pattern: Single namespace export from index.ts
|
||||||
|
- Build: Compiled to ESM only
|
||||||
|
|
||||||
|
**libs/utils/:**
|
||||||
|
|
||||||
|
- Purpose: Shared utilities for logging, errors, file operations, image processing
|
||||||
|
- Used by: Server, UI, other libraries
|
||||||
|
- Notable: `createLogger()`, `classifyError()`, `loadContextFiles()`, `readImageAsBase64()`
|
||||||
|
|
||||||
|
**libs/platform/:**
|
||||||
|
|
||||||
|
- Purpose: OS-agnostic path management and security enforcement
|
||||||
|
- Used by: Server services for file operations
|
||||||
|
- Notable: Path normalization, allowed directory enforcement, Claude auth detection
|
||||||
|
|
||||||
|
**libs/prompts/:**
|
||||||
|
|
||||||
|
- Purpose: AI prompt templates injected into agent context
|
||||||
|
- Used by: AgentService when executing features
|
||||||
|
- Pattern: Function exports that return prompt strings
|
||||||
|
|
||||||
|
## Key File Locations
|
||||||
|
|
||||||
|
**Entry Points:**
|
||||||
|
|
||||||
|
**Server:**
|
||||||
|
|
||||||
|
- `apps/server/src/index.ts`: Express server initialization, route mounting, WebSocket setup
|
||||||
|
|
||||||
|
**UI (Web):**
|
||||||
|
|
||||||
|
- `apps/ui/src/main.ts`: Vite entry point
|
||||||
|
- `apps/ui/src/app.tsx`: Root React component
|
||||||
|
|
||||||
|
**UI (Electron):**
|
||||||
|
|
||||||
|
- `apps/ui/src/main.ts`: Vite entry point
|
||||||
|
- `apps/ui/src/electron/main-process.ts`: Electron main process
|
||||||
|
- `apps/ui/src/preload.ts`: Electron preload script for IPC bridge
|
||||||
|
|
||||||
|
**Configuration:**
|
||||||
|
|
||||||
|
- `apps/server/src/index.ts`: PORT, HOST, HOSTNAME, DATA_DIR env vars
|
||||||
|
- `apps/ui/src/config/`: Theme options, fonts, model aliases
|
||||||
|
- `libs/types/src/settings.ts`: Settings schema
|
||||||
|
- `.env.local`: Local development overrides (git-ignored)
|
||||||
|
|
||||||
|
**Core Logic:**
|
||||||
|
|
||||||
|
**Server:**
|
||||||
|
|
||||||
|
- `apps/server/src/services/agent-service.ts`: AI agent execution engine (31KB)
|
||||||
|
- `apps/server/src/services/auto-mode-service.ts`: Feature batching and automation (216KB - largest)
|
||||||
|
- `apps/server/src/services/feature-loader.ts`: Feature persistence and loading
|
||||||
|
- `apps/server/src/services/settings-service.ts`: Settings management
|
||||||
|
- `apps/server/src/providers/provider-factory.ts`: AI provider selection
|
||||||
|
|
||||||
|
**UI:**
|
||||||
|
|
||||||
|
- `apps/ui/src/store/app-store.ts`: Global state (84KB - largest frontend file)
|
||||||
|
- `apps/ui/src/lib/http-api-client.ts`: API client with auth (92KB)
|
||||||
|
- `apps/ui/src/components/views/board-view.tsx`: Kanban board (70KB)
|
||||||
|
- `apps/ui/src/routes/__root.tsx`: Root layout with session init (32KB)
|
||||||
|
|
||||||
|
**Testing:**
|
||||||
|
|
||||||
|
**E2E Tests:**
|
||||||
|
|
||||||
|
- `apps/ui/tests/`: Playwright tests organized by feature area
|
||||||
|
- `settings/`, `features/`, `projects/`, `agent/`, `utils/`, `context/`
|
||||||
|
|
||||||
|
**Unit Tests:**
|
||||||
|
|
||||||
|
- `libs/*/tests/`: Package-specific Vitest tests
|
||||||
|
- `apps/server/src/tests/`: Server integration tests
|
||||||
|
|
||||||
|
**Test Config:**
|
||||||
|
|
||||||
|
- `vitest.config.ts`: Root Vitest configuration
|
||||||
|
- `apps/ui/playwright.config.ts`: Playwright configuration
|
||||||
|
|
||||||
|
## Naming Conventions
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
|
||||||
|
- **Components:** PascalCase.tsx (e.g., `board-view.tsx`, `session-manager.tsx`)
|
||||||
|
- **Services:** camelCase-service.ts (e.g., `agent-service.ts`, `settings-service.ts`)
|
||||||
|
- **Hooks:** use-kebab-case.ts (e.g., `use-auto-mode.ts`, `use-settings-sync.ts`)
|
||||||
|
- **Utilities:** camelCase.ts (e.g., `api-fetch.ts`, `log-parser.ts`)
|
||||||
|
- **Routes:** kebab-case with index.ts pattern (e.g., `routes/agent/index.ts`)
|
||||||
|
- **Tests:** _.test.ts or _.spec.ts (co-located with source)
|
||||||
|
|
||||||
|
**Directories:**
|
||||||
|
|
||||||
|
- **Feature domains:** kebab-case (e.g., `auto-mode/`, `event-history/`, `project-settings-view/`)
|
||||||
|
- **Type categories:** kebab-case plural (e.g., `types/`, `services/`, `providers/`, `routes/`)
|
||||||
|
- **Shared utilities:** kebab-case (e.g., `lib/`, `utils/`, `hooks/`)
|
||||||
|
|
||||||
|
**TypeScript:**
|
||||||
|
|
||||||
|
- **Types:** PascalCase (e.g., `Feature`, `AgentSession`, `ProviderMessage`)
|
||||||
|
- **Interfaces:** PascalCase (e.g., `EventEmitter`, `ProviderFactory`)
|
||||||
|
- **Enums:** PascalCase (e.g., `LogLevel`, `FeatureStatus`)
|
||||||
|
- **Functions:** camelCase (e.g., `createLogger()`, `classifyError()`)
|
||||||
|
- **Constants:** UPPER_SNAKE_CASE (e.g., `DEFAULT_TIMEOUT_MS`, `MAX_RETRIES`)
|
||||||
|
- **Variables:** camelCase (e.g., `featureId`, `settingsService`)
|
||||||
|
|
||||||
|
## Where to Add New Code
|
||||||
|
|
||||||
|
**New Feature (end-to-end):**
|
||||||
|
|
||||||
|
- API Route: `apps/server/src/routes/{feature-name}/index.ts`
|
||||||
|
- Service Logic: `apps/server/src/services/{feature-name}-service.ts`
|
||||||
|
- UI Route: `apps/ui/src/routes/{feature-name}.tsx` (simple) or `{feature-name}/` (complex with subdir)
|
||||||
|
- Store: `apps/ui/src/store/{feature-name}-store.ts` (if complex state)
|
||||||
|
- Tests: `apps/ui/tests/{feature-name}/` or `apps/server/src/tests/`
|
||||||
|
|
||||||
|
**New Component/Module:**
|
||||||
|
|
||||||
|
- View Components: `apps/ui/src/components/views/{component-name}/`
|
||||||
|
- Dialog Components: `apps/ui/src/components/dialogs/{dialog-name}.tsx`
|
||||||
|
- Shared Components: `apps/ui/src/components/shared/` or `components/ui/` (shadcn)
|
||||||
|
- Layout Components: `apps/ui/src/components/layout/`
|
||||||
|
|
||||||
|
**Utilities:**
|
||||||
|
|
||||||
|
- New Library: Create in `libs/{package-name}/` with package.json and tsconfig.json
|
||||||
|
- Server Utilities: `apps/server/src/lib/{utility-name}.ts`
|
||||||
|
- Shared Utilities: Extend `libs/utils/src/` or create new lib if self-contained
|
||||||
|
- UI Utilities: `apps/ui/src/lib/{utility-name}.ts`
|
||||||
|
|
||||||
|
**New Provider (AI Model):**
|
||||||
|
|
||||||
|
- Implementation: `apps/server/src/providers/{provider-name}-provider.ts`
|
||||||
|
- Types: Add to `libs/types/src/{provider-name}-models.ts`
|
||||||
|
- Model Resolver: Update `libs/model-resolver/src/index.ts` with model alias mapping
|
||||||
|
- Settings: Update `libs/types/src/settings.ts` for provider-specific config
|
||||||
|
|
||||||
|
## Special Directories
|
||||||
|
|
||||||
|
**apps/ui/electron/:**
|
||||||
|
|
||||||
|
- Purpose: Electron-specific code (main process, IPC handlers, native APIs)
|
||||||
|
- Generated: Yes (preload.ts)
|
||||||
|
- Committed: Yes
|
||||||
|
|
||||||
|
**apps/ui/public/**
|
||||||
|
|
||||||
|
- Purpose: Static assets (sounds, images, icons)
|
||||||
|
- Generated: No
|
||||||
|
- Committed: Yes
|
||||||
|
|
||||||
|
**apps/ui/dist/:**
|
||||||
|
|
||||||
|
- Purpose: Built web application
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
**apps/ui/dist-electron/:**
|
||||||
|
|
||||||
|
- Purpose: Built Electron app bundle
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
**.automaker/features/{featureId}/:**
|
||||||
|
|
||||||
|
- Purpose: Per-feature persistent storage
|
||||||
|
- Structure: feature.json, agent-output.md, images/
|
||||||
|
- Generated: Yes (at runtime)
|
||||||
|
- Committed: Yes (tracked in project git)
|
||||||
|
|
||||||
|
**data/:**
|
||||||
|
|
||||||
|
- Purpose: Global data directory (global settings, credentials, sessions)
|
||||||
|
- Generated: Yes (created at first run)
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
- Configurable: Via DATA_DIR env var
|
||||||
|
|
||||||
|
**node_modules/:**
|
||||||
|
|
||||||
|
- Purpose: Installed dependencies
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
**dist/**, **build/:**
|
||||||
|
|
||||||
|
- Purpose: Build artifacts
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Structure analysis: 2026-01-27_
|
||||||
389
.planning/codebase/TESTING.md
Normal file
389
.planning/codebase/TESTING.md
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
# Testing Patterns
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Test Framework
|
||||||
|
|
||||||
|
**Runner:**
|
||||||
|
|
||||||
|
- Vitest 4.0.16 (for unit and integration tests)
|
||||||
|
- Playwright (for E2E tests)
|
||||||
|
- Config: `apps/server/vitest.config.ts`, `libs/*/vitest.config.ts`, `apps/ui/playwright.config.ts`
|
||||||
|
|
||||||
|
**Assertion Library:**
|
||||||
|
|
||||||
|
- Vitest built-in expect assertions
|
||||||
|
- API: `expect().toBe()`, `expect().toEqual()`, `expect().toHaveLength()`, `expect().toHaveProperty()`
|
||||||
|
|
||||||
|
**Run Commands:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run test # E2E tests (Playwright, headless)
|
||||||
|
npm run test:headed # E2E tests with browser visible
|
||||||
|
npm run test:packages # All shared package unit tests (vitest)
|
||||||
|
npm run test:server # Server unit tests (vitest run)
|
||||||
|
npm run test:server:coverage # Server tests with coverage report
|
||||||
|
npm run test:all # All tests (packages + server)
|
||||||
|
npm run test:unit # Vitest run (all projects)
|
||||||
|
npm run test:unit:watch # Vitest watch mode
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test File Organization
|
||||||
|
|
||||||
|
**Location:**
|
||||||
|
|
||||||
|
- Co-located with source: `src/module.ts` has `tests/unit/module.test.ts`
|
||||||
|
- Server tests: `apps/server/tests/` (separate directory)
|
||||||
|
- Library tests: `libs/*/tests/` (each package)
|
||||||
|
- E2E tests: `apps/ui/tests/` (Playwright)
|
||||||
|
|
||||||
|
**Naming:**
|
||||||
|
|
||||||
|
- Pattern: `{moduleName}.test.ts` for unit tests
|
||||||
|
- Pattern: `{moduleName}.spec.ts` for specification tests
|
||||||
|
- Glob pattern: `tests/**/*.test.ts`, `tests/**/*.spec.ts`
|
||||||
|
|
||||||
|
**Structure:**
|
||||||
|
|
||||||
|
```
|
||||||
|
apps/server/
|
||||||
|
├── tests/
|
||||||
|
│ ├── setup.ts # Global test setup
|
||||||
|
│ ├── unit/
|
||||||
|
│ │ ├── providers/ # Provider tests
|
||||||
|
│ │ │ ├── claude-provider.test.ts
|
||||||
|
│ │ │ ├── codex-provider.test.ts
|
||||||
|
│ │ │ └── base-provider.test.ts
|
||||||
|
│ │ └── services/
|
||||||
|
│ └── utils/
|
||||||
|
│ └── helpers.ts # Test utilities
|
||||||
|
└── src/
|
||||||
|
|
||||||
|
libs/platform/
|
||||||
|
├── tests/
|
||||||
|
│ ├── paths.test.ts
|
||||||
|
│ ├── security.test.ts
|
||||||
|
│ ├── subprocess.test.ts
|
||||||
|
│ └── node-finder.test.ts
|
||||||
|
└── src/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Structure
|
||||||
|
|
||||||
|
**Suite Organization:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { FeatureLoader } from '@/services/feature-loader.js';
|
||||||
|
|
||||||
|
describe('feature-loader.ts', () => {
|
||||||
|
let featureLoader: FeatureLoader;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
featureLoader = new FeatureLoader();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Cleanup resources
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('methodName', () => {
|
||||||
|
it('should do specific thing', () => {
|
||||||
|
expect(result).toBe(expected);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
- Setup pattern: `beforeEach()` initializes test instance, clears mocks
|
||||||
|
- Teardown pattern: `afterEach()` cleans up temp directories, removes created files
|
||||||
|
- Assertion pattern: one logical assertion per test (or multiple closely related)
|
||||||
|
- Test isolation: each test runs with fresh setup
|
||||||
|
|
||||||
|
## Mocking
|
||||||
|
|
||||||
|
**Framework:**
|
||||||
|
|
||||||
|
- Vitest `vi` module: `vi.mock()`, `vi.mocked()`, `vi.clearAllMocks()`
|
||||||
|
- Mock patterns: module mocking, function spying, return value mocking
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
Module mocking:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
vi.mock('@anthropic-ai/claude-agent-sdk');
|
||||||
|
// In test:
|
||||||
|
vi.mocked(sdk.query).mockReturnValue(
|
||||||
|
(async function* () {
|
||||||
|
yield { type: 'text', text: 'Response 1' };
|
||||||
|
})()
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
Async generator mocking (for streaming APIs):
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const generator = provider.executeQuery({
|
||||||
|
prompt: 'Hello',
|
||||||
|
model: 'claude-opus-4-5-20251101',
|
||||||
|
cwd: '/test',
|
||||||
|
});
|
||||||
|
const results = await collectAsyncGenerator(generator);
|
||||||
|
```
|
||||||
|
|
||||||
|
Partial mocking with spies:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const provider = new TestProvider();
|
||||||
|
const spy = vi.spyOn(provider, 'getName');
|
||||||
|
spy.mockReturnValue('mocked-name');
|
||||||
|
```
|
||||||
|
|
||||||
|
**What to Mock:**
|
||||||
|
|
||||||
|
- External APIs (Claude SDK, GitHub SDK, cloud services)
|
||||||
|
- File system operations (use temp directories instead when possible)
|
||||||
|
- Network calls
|
||||||
|
- Process execution
|
||||||
|
- Time-dependent operations
|
||||||
|
|
||||||
|
**What NOT to Mock:**
|
||||||
|
|
||||||
|
- Core business logic (test the actual implementation)
|
||||||
|
- Type definitions
|
||||||
|
- Internal module dependencies (test integration with real services)
|
||||||
|
- Standard library functions (fs, path, etc. - use fixtures instead)
|
||||||
|
|
||||||
|
## Fixtures and Factories
|
||||||
|
|
||||||
|
**Test Data:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Test helper for collecting async generator results
|
||||||
|
async function collectAsyncGenerator<T>(generator: AsyncGenerator<T>): Promise<T[]> {
|
||||||
|
const results: T[] = [];
|
||||||
|
for await (const item of generator) {
|
||||||
|
results.push(item);
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporary directory fixture
|
||||||
|
beforeEach(async () => {
|
||||||
|
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'test-'));
|
||||||
|
projectPath = path.join(tempDir, 'test-project');
|
||||||
|
await fs.mkdir(projectPath, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
try {
|
||||||
|
await fs.rm(tempDir, { recursive: true, force: true });
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore cleanup errors
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Location:**
|
||||||
|
|
||||||
|
- Inline in test files for simple fixtures
|
||||||
|
- `tests/utils/helpers.ts` for shared test utilities
|
||||||
|
- Factory functions for complex test objects: `createTestProvider()`, `createMockFeature()`
|
||||||
|
|
||||||
|
## Coverage
|
||||||
|
|
||||||
|
**Requirements (Server):**
|
||||||
|
|
||||||
|
- Lines: 60%
|
||||||
|
- Functions: 75%
|
||||||
|
- Branches: 55%
|
||||||
|
- Statements: 60%
|
||||||
|
- Config: `apps/server/vitest.config.ts` with thresholds
|
||||||
|
|
||||||
|
**Excluded from Coverage:**
|
||||||
|
|
||||||
|
- Route handlers: tested via integration/E2E tests
|
||||||
|
- Type re-exports
|
||||||
|
- Middleware: tested via integration tests
|
||||||
|
- Prompt templates
|
||||||
|
- MCP integration: awaits MCP SDK integration tests
|
||||||
|
- Provider CLI integrations: awaits integration tests
|
||||||
|
|
||||||
|
**View Coverage:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run test:server:coverage # Generate coverage report
|
||||||
|
# Opens HTML report in: apps/server/coverage/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
**Coverage Tools:**
|
||||||
|
|
||||||
|
- Provider: v8
|
||||||
|
- Reporters: text, json, html, lcov
|
||||||
|
- File inclusion: `src/**/*.ts`
|
||||||
|
- File exclusion: `src/**/*.d.ts`, specific service files in thresholds
|
||||||
|
|
||||||
|
## Test Types
|
||||||
|
|
||||||
|
**Unit Tests:**
|
||||||
|
|
||||||
|
- Scope: Individual functions and methods
|
||||||
|
- Approach: Test inputs → outputs with mocked dependencies
|
||||||
|
- Location: `apps/server/tests/unit/`
|
||||||
|
- Examples:
|
||||||
|
- Provider executeQuery() with mocked SDK
|
||||||
|
- Path construction functions with assertions
|
||||||
|
- Error classification with different error types
|
||||||
|
- Config validation with various inputs
|
||||||
|
|
||||||
|
**Integration Tests:**
|
||||||
|
|
||||||
|
- Scope: Multiple modules working together
|
||||||
|
- Approach: Test actual service calls with real file system or temp directories
|
||||||
|
- Pattern: Setup data → call method → verify results
|
||||||
|
- Example: Feature loader reading/writing feature.json files
|
||||||
|
- Example: Auto-mode service coordinating with multiple services
|
||||||
|
|
||||||
|
**E2E Tests:**
|
||||||
|
|
||||||
|
- Framework: Playwright
|
||||||
|
- Scope: Full user workflows from UI
|
||||||
|
- Location: `apps/ui/tests/`
|
||||||
|
- Config: `apps/ui/playwright.config.ts`
|
||||||
|
- Setup:
|
||||||
|
- Backend server with mock agent enabled
|
||||||
|
- Frontend Vite dev server
|
||||||
|
- Sequential execution (workers: 1) to avoid auth conflicts
|
||||||
|
- Screenshots/traces on failure
|
||||||
|
- Auth: Global setup authentication in `tests/global-setup.ts`
|
||||||
|
- Fixtures: `tests/e2e-fixtures/` for test project data
|
||||||
|
|
||||||
|
## Common Patterns
|
||||||
|
|
||||||
|
**Async Testing:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
it('should execute async operation', async () => {
|
||||||
|
const result = await featureLoader.loadFeature(projectPath, featureId);
|
||||||
|
expect(result).toBeDefined();
|
||||||
|
expect(result.id).toBe(featureId);
|
||||||
|
});
|
||||||
|
|
||||||
|
// For streams/generators:
|
||||||
|
const generator = provider.executeQuery({ prompt, model, cwd });
|
||||||
|
const results = await collectAsyncGenerator(generator);
|
||||||
|
expect(results).toHaveLength(2);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Error Testing:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
it('should throw error when feature not found', async () => {
|
||||||
|
await expect(featureLoader.getFeature(projectPath, 'nonexistent')).rejects.toThrow('not found');
|
||||||
|
});
|
||||||
|
|
||||||
|
// Testing error classification:
|
||||||
|
const errorInfo = classifyError(new Error('ENOENT'));
|
||||||
|
expect(errorInfo.category).toBe('FileSystem');
|
||||||
|
```
|
||||||
|
|
||||||
|
**Fixture Setup:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
it('should create feature with images', async () => {
|
||||||
|
// Setup: create temp feature directory
|
||||||
|
const featureDir = path.join(projectPath, '.automaker', 'features', featureId);
|
||||||
|
await fs.mkdir(featureDir, { recursive: true });
|
||||||
|
|
||||||
|
// Act: perform operation
|
||||||
|
const result = await featureLoader.updateFeature(projectPath, {
|
||||||
|
id: featureId,
|
||||||
|
imagePaths: ['/temp/image.png'],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Assert: verify file operations
|
||||||
|
const migratedPath = path.join(featureDir, 'images', 'image.png');
|
||||||
|
expect(fs.existsSync(migratedPath)).toBe(true);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Mock Reset Pattern:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// In vitest.config.ts:
|
||||||
|
mockReset: true, // Reset all mocks before each test
|
||||||
|
restoreMocks: true, // Restore original implementations
|
||||||
|
clearMocks: true, // Clear mock call history
|
||||||
|
|
||||||
|
// In test:
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
delete process.env.ANTHROPIC_API_KEY;
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Configuration
|
||||||
|
|
||||||
|
**Vitest Config Patterns:**
|
||||||
|
|
||||||
|
Server config (`apps/server/vitest.config.ts`):
|
||||||
|
|
||||||
|
- Environment: node
|
||||||
|
- Globals: true (describe/it without imports)
|
||||||
|
- Setup files: `./tests/setup.ts`
|
||||||
|
- Alias resolution: resolves `@automaker/*` to source files for mocking
|
||||||
|
|
||||||
|
Library config:
|
||||||
|
|
||||||
|
- Simpler setup: just environment and globals
|
||||||
|
- Coverage with high thresholds (90%+ lines)
|
||||||
|
|
||||||
|
**Global Setup:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// tests/setup.ts
|
||||||
|
import { vi, beforeEach } from 'vitest';
|
||||||
|
|
||||||
|
process.env.NODE_ENV = 'test';
|
||||||
|
process.env.DATA_DIR = '/tmp/test-data';
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing Best Practices
|
||||||
|
|
||||||
|
**Isolation:**
|
||||||
|
|
||||||
|
- Each test is independent (no state sharing)
|
||||||
|
- Cleanup temp files in afterEach
|
||||||
|
- Reset mocks and environment variables in beforeEach
|
||||||
|
|
||||||
|
**Clarity:**
|
||||||
|
|
||||||
|
- Descriptive test names: "should do X when Y condition"
|
||||||
|
- One logical assertion per test
|
||||||
|
- Clear arrange-act-assert structure
|
||||||
|
|
||||||
|
**Speed:**
|
||||||
|
|
||||||
|
- Mock external services
|
||||||
|
- Use in-memory temp directories
|
||||||
|
- Avoid real network calls
|
||||||
|
- Sequential E2E tests to prevent conflicts
|
||||||
|
|
||||||
|
**Maintainability:**
|
||||||
|
|
||||||
|
- Use beforeEach/afterEach for common setup
|
||||||
|
- Extract test helpers to `tests/utils/`
|
||||||
|
- Keep test data simple and local
|
||||||
|
- Mock consistently across tests
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Testing analysis: 2026-01-27_
|
||||||
@@ -161,7 +161,7 @@ Use `resolveModelString()` from `@automaker/model-resolver` to convert model ali
|
|||||||
|
|
||||||
- `haiku` → `claude-haiku-4-5`
|
- `haiku` → `claude-haiku-4-5`
|
||||||
- `sonnet` → `claude-sonnet-4-20250514`
|
- `sonnet` → `claude-sonnet-4-20250514`
|
||||||
- `opus` → `claude-opus-4-5-20251101`
|
- `opus` → `claude-opus-4-6`
|
||||||
|
|
||||||
## Environment Variables
|
## Environment Variables
|
||||||
|
|
||||||
|
|||||||
@@ -1,253 +0,0 @@
|
|||||||
# Development Workflow
|
|
||||||
|
|
||||||
This document defines the standard workflow for keeping a branch in sync with the upstream
|
|
||||||
release candidate (RC) and for shipping feature work. It is paired with `check-sync.sh`.
|
|
||||||
|
|
||||||
## Quick Decision Rule
|
|
||||||
|
|
||||||
1. Ask the user to select a workflow:
|
|
||||||
- **Sync Workflow** → you are maintaining the current RC branch with fixes/improvements
|
|
||||||
and will push the same fixes to both origin and upstream RC when you have local
|
|
||||||
commits to publish.
|
|
||||||
- **PR Workflow** → you are starting new feature work on a new branch; upstream updates
|
|
||||||
happen via PR only.
|
|
||||||
2. After the user selects, run:
|
|
||||||
```bash
|
|
||||||
./check-sync.sh
|
|
||||||
```
|
|
||||||
3. Use the status output to confirm alignment. If it reports **diverged**, default to
|
|
||||||
merging `upstream/<TARGET_RC>` into the current branch and preserving local commits.
|
|
||||||
For Sync Workflow, when the working tree is clean and you are behind upstream RC,
|
|
||||||
proceed with the fetch + merge without asking for additional confirmation.
|
|
||||||
|
|
||||||
## Target RC Resolution
|
|
||||||
|
|
||||||
The target RC is resolved dynamically so the workflow stays current as the RC changes.
|
|
||||||
|
|
||||||
Resolution order:
|
|
||||||
|
|
||||||
1. Latest `upstream/v*rc` branch (auto-detected)
|
|
||||||
2. `upstream/HEAD` (fallback)
|
|
||||||
3. If neither is available, you must pass `--rc <branch>`
|
|
||||||
|
|
||||||
Override for a single run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./check-sync.sh --rc <rc-branch>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Pre-Flight Checklist
|
|
||||||
|
|
||||||
1. Confirm a clean working tree:
|
|
||||||
```bash
|
|
||||||
git status
|
|
||||||
```
|
|
||||||
2. Confirm the current branch:
|
|
||||||
```bash
|
|
||||||
git branch --show-current
|
|
||||||
```
|
|
||||||
3. Ensure remotes exist (origin + upstream):
|
|
||||||
```bash
|
|
||||||
git remote -v
|
|
||||||
```
|
|
||||||
|
|
||||||
## Sync Workflow (Upstream Sync)
|
|
||||||
|
|
||||||
Use this flow when you are updating the current branch with fixes or improvements and
|
|
||||||
intend to keep origin and upstream RC in lockstep.
|
|
||||||
|
|
||||||
1. **Check sync status**
|
|
||||||
```bash
|
|
||||||
./check-sync.sh
|
|
||||||
```
|
|
||||||
2. **Update from upstream RC before editing (no pulls)**
|
|
||||||
- **Behind upstream RC** → fetch and merge RC into your branch:
|
|
||||||
```bash
|
|
||||||
git fetch upstream
|
|
||||||
git merge upstream/<TARGET_RC> --no-edit
|
|
||||||
```
|
|
||||||
When the working tree is clean and the user selected Sync Workflow, proceed without
|
|
||||||
an extra confirmation prompt.
|
|
||||||
- **Diverged** → stop and resolve manually.
|
|
||||||
3. **Resolve conflicts if needed**
|
|
||||||
- Handle conflicts intelligently: preserve upstream behavior and your local intent.
|
|
||||||
4. **Make changes and commit (if you are delivering fixes)**
|
|
||||||
```bash
|
|
||||||
git add -A
|
|
||||||
git commit -m "type: description"
|
|
||||||
```
|
|
||||||
5. **Build to verify**
|
|
||||||
```bash
|
|
||||||
npm run build:packages
|
|
||||||
npm run build
|
|
||||||
```
|
|
||||||
6. **Push after a successful merge to keep remotes aligned**
|
|
||||||
- If you only merged upstream RC changes, push **origin only** to sync your fork:
|
|
||||||
```bash
|
|
||||||
git push origin <branch>
|
|
||||||
```
|
|
||||||
- If you have local fixes to publish, push **origin + upstream**:
|
|
||||||
```bash
|
|
||||||
git push origin <branch>
|
|
||||||
git push upstream <branch>:<TARGET_RC>
|
|
||||||
```
|
|
||||||
- Always ask the user which push to perform.
|
|
||||||
- Origin (origin-only sync):
|
|
||||||
```bash
|
|
||||||
git push origin <branch>
|
|
||||||
```
|
|
||||||
- Upstream RC (publish the same fixes when you have local commits):
|
|
||||||
```bash
|
|
||||||
git push upstream <branch>:<TARGET_RC>
|
|
||||||
```
|
|
||||||
7. **Re-check sync**
|
|
||||||
```bash
|
|
||||||
./check-sync.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## PR Workflow (Feature Work)
|
|
||||||
|
|
||||||
Use this flow only for new feature work on a new branch. Do not push to upstream RC.
|
|
||||||
|
|
||||||
1. **Create or switch to a feature branch**
|
|
||||||
```bash
|
|
||||||
git checkout -b <branch>
|
|
||||||
```
|
|
||||||
2. **Make changes and commit**
|
|
||||||
```bash
|
|
||||||
git add -A
|
|
||||||
git commit -m "type: description"
|
|
||||||
```
|
|
||||||
3. **Merge upstream RC before shipping**
|
|
||||||
```bash
|
|
||||||
git merge upstream/<TARGET_RC> --no-edit
|
|
||||||
```
|
|
||||||
4. **Build and/or test**
|
|
||||||
```bash
|
|
||||||
npm run build:packages
|
|
||||||
npm run build
|
|
||||||
```
|
|
||||||
5. **Push to origin**
|
|
||||||
```bash
|
|
||||||
git push -u origin <branch>
|
|
||||||
```
|
|
||||||
6. **Create or update the PR**
|
|
||||||
- Use `gh pr create` or the GitHub UI.
|
|
||||||
7. **Review and follow-up**
|
|
||||||
|
|
||||||
- Apply feedback, commit changes, and push again.
|
|
||||||
- Re-run `./check-sync.sh` if additional upstream sync is needed.
|
|
||||||
|
|
||||||
## Conflict Resolution Checklist
|
|
||||||
|
|
||||||
1. Identify which changes are from upstream vs. local.
|
|
||||||
2. Preserve both behaviors where possible; avoid dropping either side.
|
|
||||||
3. Prefer minimal, safe integrations over refactors.
|
|
||||||
4. Re-run build commands after resolving conflicts.
|
|
||||||
5. Re-run `./check-sync.sh` to confirm status.
|
|
||||||
|
|
||||||
## Build/Test Matrix
|
|
||||||
|
|
||||||
- **Sync Workflow**: `npm run build:packages` and `npm run build`.
|
|
||||||
- **PR Workflow**: `npm run build:packages` and `npm run build` (plus relevant tests).
|
|
||||||
|
|
||||||
## Post-Sync Verification
|
|
||||||
|
|
||||||
1. `git status` should be clean.
|
|
||||||
2. `./check-sync.sh` should show expected alignment.
|
|
||||||
3. Verify recent commits with:
|
|
||||||
```bash
|
|
||||||
git log --oneline -5
|
|
||||||
```
|
|
||||||
|
|
||||||
## check-sync.sh Usage
|
|
||||||
|
|
||||||
- Uses dynamic Target RC resolution (see above).
|
|
||||||
- Override target RC:
|
|
||||||
```bash
|
|
||||||
./check-sync.sh --rc <rc-branch>
|
|
||||||
```
|
|
||||||
- Optional preview limit:
|
|
||||||
```bash
|
|
||||||
./check-sync.sh --preview 10
|
|
||||||
```
|
|
||||||
- The script prints sync status for both origin and upstream and previews recent commits
|
|
||||||
when you are behind.
|
|
||||||
|
|
||||||
## Stop Conditions
|
|
||||||
|
|
||||||
Stop and ask for guidance if any of the following are true:
|
|
||||||
|
|
||||||
- The working tree is dirty and you are about to merge or push.
|
|
||||||
- `./check-sync.sh` reports **diverged** during PR Workflow, or a merge cannot be completed.
|
|
||||||
- The script cannot resolve a target RC and requests `--rc`.
|
|
||||||
- A build fails after sync or conflict resolution.
|
|
||||||
|
|
||||||
## AI Agent Guardrails
|
|
||||||
|
|
||||||
- Always run `./check-sync.sh` before merges or pushes.
|
|
||||||
- Always ask for explicit user approval before any push command.
|
|
||||||
- Do not ask for additional confirmation before a Sync Workflow fetch + merge when the
|
|
||||||
working tree is clean and the user has already selected the Sync Workflow.
|
|
||||||
- Choose Sync vs PR workflow based on intent (RC maintenance vs new feature work), not
|
|
||||||
on the script's workflow hint.
|
|
||||||
- Only use force push when the user explicitly requests a history rewrite.
|
|
||||||
- Ask for explicit approval before dependency installs, branch deletion, or destructive operations.
|
|
||||||
- When resolving merge conflicts, preserve both upstream changes and local intent where possible.
|
|
||||||
- Do not create or switch to new branches unless the user explicitly requests it.
|
|
||||||
|
|
||||||
## AI Agent Decision Guidance
|
|
||||||
|
|
||||||
Agents should provide concrete, task-specific suggestions instead of repeatedly asking
|
|
||||||
open-ended questions. Use the user's stated goal and the `./check-sync.sh` status to
|
|
||||||
propose a default path plus one or two alternatives, and only ask for confirmation when
|
|
||||||
an action requires explicit approval.
|
|
||||||
|
|
||||||
Default behavior:
|
|
||||||
|
|
||||||
- If the intent is RC maintenance, recommend the Sync Workflow and proceed with
|
|
||||||
safe preparation steps (status checks, previews). If the branch is behind upstream RC,
|
|
||||||
fetch and merge without additional confirmation when the working tree is clean, then
|
|
||||||
push to origin to keep the fork aligned. Push upstream only when there are local fixes
|
|
||||||
to publish.
|
|
||||||
- If the intent is new feature work, recommend the PR Workflow and proceed with safe
|
|
||||||
preparation steps (status checks, identifying scope). Ask for approval before merges,
|
|
||||||
pushes, or dependency installs.
|
|
||||||
- If `./check-sync.sh` reports **diverged** during Sync Workflow, merge
|
|
||||||
`upstream/<TARGET_RC>` into the current branch and preserve local commits.
|
|
||||||
- If `./check-sync.sh` reports **diverged** during PR Workflow, stop and ask for guidance
|
|
||||||
with a short explanation of the divergence and the minimal options to resolve it.
|
|
||||||
If the user's intent is RC maintenance, prefer the Sync Workflow regardless of the
|
|
||||||
script hint. When the intent is new feature work, use the PR Workflow and avoid upstream
|
|
||||||
RC pushes.
|
|
||||||
|
|
||||||
Suggestion format (keep it short):
|
|
||||||
|
|
||||||
- **Recommended**: one sentence with the default path and why it fits the task.
|
|
||||||
- **Alternatives**: one or two options with the tradeoff or prerequisite.
|
|
||||||
- **Approval points**: mention any upcoming actions that need explicit approval (exclude sync
|
|
||||||
workflow pushes and merges).
|
|
||||||
|
|
||||||
## Failure Modes and How to Avoid Them
|
|
||||||
|
|
||||||
Sync Workflow:
|
|
||||||
|
|
||||||
- Wrong RC target: verify the auto-detected RC in `./check-sync.sh` output before merging.
|
|
||||||
- Diverged from upstream RC: stop and resolve manually before any merge or push.
|
|
||||||
- Dirty working tree: commit or stash before syncing to avoid accidental merges.
|
|
||||||
- Missing remotes: ensure both `origin` and `upstream` are configured before syncing.
|
|
||||||
- Build breaks after sync: run `npm run build:packages` and `npm run build` before pushing.
|
|
||||||
|
|
||||||
PR Workflow:
|
|
||||||
|
|
||||||
- Branch not synced to current RC: re-run `./check-sync.sh` and merge RC before shipping.
|
|
||||||
- Pushing the wrong branch: confirm `git branch --show-current` before pushing.
|
|
||||||
- Unreviewed changes: always commit and push to origin before opening or updating a PR.
|
|
||||||
- Skipped tests/builds: run the build commands before declaring the PR ready.
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
- Avoid merging with uncommitted changes; commit or stash first.
|
|
||||||
- Prefer merge over rebase for PR branches; rebases rewrite history and often require a force push,
|
|
||||||
which should only be done with an explicit user request.
|
|
||||||
- Use clear, conventional commit messages and split unrelated changes into separate commits.
|
|
||||||
10
Dockerfile
10
Dockerfile
@@ -118,6 +118,7 @@ RUN curl -fsSL https://opencode.ai/install | bash && \
|
|||||||
echo "=== Checking OpenCode CLI installation ===" && \
|
echo "=== Checking OpenCode CLI installation ===" && \
|
||||||
ls -la /home/automaker/.local/bin/ && \
|
ls -la /home/automaker/.local/bin/ && \
|
||||||
(which opencode && opencode --version) || echo "opencode installed (may need auth setup)"
|
(which opencode && opencode --version) || echo "opencode installed (may need auth setup)"
|
||||||
|
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
||||||
@@ -147,6 +148,15 @@ COPY --from=server-builder /app/apps/server/package*.json ./apps/server/
|
|||||||
# Copy node_modules (includes symlinks to libs)
|
# Copy node_modules (includes symlinks to libs)
|
||||||
COPY --from=server-builder /app/node_modules ./node_modules
|
COPY --from=server-builder /app/node_modules ./node_modules
|
||||||
|
|
||||||
|
# Install Playwright Chromium browser for AI agent verification tests
|
||||||
|
# This adds ~300MB to the image but enables automated testing mode out of the box
|
||||||
|
# Using the locally installed playwright ensures we use the pinned version from package-lock.json
|
||||||
|
USER automaker
|
||||||
|
RUN ./node_modules/.bin/playwright install chromium && \
|
||||||
|
echo "=== Playwright Chromium installed ===" && \
|
||||||
|
ls -la /home/automaker/.cache/ms-playwright/
|
||||||
|
USER root
|
||||||
|
|
||||||
# Create data and projects directories
|
# Create data and projects directories
|
||||||
RUN mkdir -p /data /projects && chown automaker:automaker /data /projects
|
RUN mkdir -p /data /projects && chown automaker:automaker /data /projects
|
||||||
|
|
||||||
|
|||||||
2
OPENCODE_CONFIG_CONTENT
Normal file
2
OPENCODE_CONFIG_CONTENT
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://opencode.ai/config.json",}
|
||||||
40
README.md
40
README.md
@@ -14,10 +14,6 @@
|
|||||||
|
|
||||||
**Stop typing code. Start directing AI agents.**
|
**Stop typing code. Start directing AI agents.**
|
||||||
|
|
||||||
> **[!WARNING]**
|
|
||||||
>
|
|
||||||
> **This project is no longer actively maintained.** The codebase is provided as-is. No bug fixes, security updates, or new features are being developed.
|
|
||||||
|
|
||||||
<details open>
|
<details open>
|
||||||
<summary><h2>Table of Contents</h2></summary>
|
<summary><h2>Table of Contents</h2></summary>
|
||||||
|
|
||||||
@@ -367,6 +363,42 @@ services:
|
|||||||
|
|
||||||
The Docker image supports both AMD64 and ARM64 architectures. The GitHub CLI and Claude CLI are automatically downloaded for the correct architecture during build.
|
The Docker image supports both AMD64 and ARM64 architectures. The GitHub CLI and Claude CLI are automatically downloaded for the correct architecture during build.
|
||||||
|
|
||||||
|
##### Playwright for Automated Testing
|
||||||
|
|
||||||
|
The Docker image includes **Playwright Chromium pre-installed** for AI agent verification tests. When agents implement features in automated testing mode, they use Playwright to verify the implementation works correctly.
|
||||||
|
|
||||||
|
**No additional setup required** - Playwright verification works out of the box.
|
||||||
|
|
||||||
|
#### Optional: Persist browsers for manual updates
|
||||||
|
|
||||||
|
By default, Playwright Chromium is pre-installed in the Docker image. If you need to manually update browsers or want to persist browser installations across container restarts (not image rebuilds), you can mount a volume.
|
||||||
|
|
||||||
|
**Important:** When you first add this volume mount to an existing setup, the empty volume will override the pre-installed browsers. You must re-install them:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# After adding the volume mount for the first time
|
||||||
|
docker exec --user automaker -w /app automaker-server npx playwright install chromium
|
||||||
|
```
|
||||||
|
|
||||||
|
Add this to your `docker-compose.override.yml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
server:
|
||||||
|
volumes:
|
||||||
|
- playwright-cache:/home/automaker/.cache/ms-playwright
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
playwright-cache:
|
||||||
|
name: automaker-playwright-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
**Updating browsers manually:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec --user automaker -w /app automaker-server npx playwright install chromium
|
||||||
|
```
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
|
|
||||||
#### End-to-End Tests (Playwright)
|
#### End-to-End Tests (Playwright)
|
||||||
|
|||||||
74
apps/server/eslint.config.mjs
Normal file
74
apps/server/eslint.config.mjs
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
import { defineConfig, globalIgnores } from 'eslint/config';
|
||||||
|
import js from '@eslint/js';
|
||||||
|
import ts from '@typescript-eslint/eslint-plugin';
|
||||||
|
import tsParser from '@typescript-eslint/parser';
|
||||||
|
|
||||||
|
const eslintConfig = defineConfig([
|
||||||
|
js.configs.recommended,
|
||||||
|
{
|
||||||
|
files: ['**/*.ts'],
|
||||||
|
languageOptions: {
|
||||||
|
parser: tsParser,
|
||||||
|
parserOptions: {
|
||||||
|
ecmaVersion: 'latest',
|
||||||
|
sourceType: 'module',
|
||||||
|
},
|
||||||
|
globals: {
|
||||||
|
// Node.js globals
|
||||||
|
console: 'readonly',
|
||||||
|
process: 'readonly',
|
||||||
|
Buffer: 'readonly',
|
||||||
|
__dirname: 'readonly',
|
||||||
|
__filename: 'readonly',
|
||||||
|
URL: 'readonly',
|
||||||
|
URLSearchParams: 'readonly',
|
||||||
|
AbortController: 'readonly',
|
||||||
|
AbortSignal: 'readonly',
|
||||||
|
fetch: 'readonly',
|
||||||
|
Response: 'readonly',
|
||||||
|
Request: 'readonly',
|
||||||
|
Headers: 'readonly',
|
||||||
|
FormData: 'readonly',
|
||||||
|
RequestInit: 'readonly',
|
||||||
|
// Timers
|
||||||
|
setTimeout: 'readonly',
|
||||||
|
setInterval: 'readonly',
|
||||||
|
clearTimeout: 'readonly',
|
||||||
|
clearInterval: 'readonly',
|
||||||
|
setImmediate: 'readonly',
|
||||||
|
clearImmediate: 'readonly',
|
||||||
|
queueMicrotask: 'readonly',
|
||||||
|
// Node.js types
|
||||||
|
NodeJS: 'readonly',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
plugins: {
|
||||||
|
'@typescript-eslint': ts,
|
||||||
|
},
|
||||||
|
rules: {
|
||||||
|
...ts.configs.recommended.rules,
|
||||||
|
'@typescript-eslint/no-unused-vars': [
|
||||||
|
'warn',
|
||||||
|
{
|
||||||
|
argsIgnorePattern: '^_',
|
||||||
|
varsIgnorePattern: '^_',
|
||||||
|
caughtErrorsIgnorePattern: '^_',
|
||||||
|
ignoreRestSiblings: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'@typescript-eslint/no-explicit-any': 'warn',
|
||||||
|
// Server code frequently works with terminal output containing ANSI escape codes
|
||||||
|
'no-control-regex': 'off',
|
||||||
|
'@typescript-eslint/ban-ts-comment': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
'ts-nocheck': 'allow-with-description',
|
||||||
|
minimumDescriptionLength: 10,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
globalIgnores(['dist/**', 'node_modules/**']),
|
||||||
|
]);
|
||||||
|
|
||||||
|
export default eslintConfig;
|
||||||
@@ -24,7 +24,7 @@
|
|||||||
"test:unit": "vitest run tests/unit"
|
"test:unit": "vitest run tests/unit"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/claude-agent-sdk": "0.1.76",
|
"@anthropic-ai/claude-agent-sdk": "0.2.32",
|
||||||
"@automaker/dependency-resolver": "1.0.0",
|
"@automaker/dependency-resolver": "1.0.0",
|
||||||
"@automaker/git-utils": "1.0.0",
|
"@automaker/git-utils": "1.0.0",
|
||||||
"@automaker/model-resolver": "1.0.0",
|
"@automaker/model-resolver": "1.0.0",
|
||||||
@@ -34,7 +34,7 @@
|
|||||||
"@automaker/utils": "1.0.0",
|
"@automaker/utils": "1.0.0",
|
||||||
"@github/copilot-sdk": "^0.1.16",
|
"@github/copilot-sdk": "^0.1.16",
|
||||||
"@modelcontextprotocol/sdk": "1.25.2",
|
"@modelcontextprotocol/sdk": "1.25.2",
|
||||||
"@openai/codex-sdk": "^0.77.0",
|
"@openai/codex-sdk": "^0.98.0",
|
||||||
"cookie-parser": "1.4.7",
|
"cookie-parser": "1.4.7",
|
||||||
"cors": "2.8.5",
|
"cors": "2.8.5",
|
||||||
"dotenv": "17.2.3",
|
"dotenv": "17.2.3",
|
||||||
@@ -45,6 +45,7 @@
|
|||||||
"yaml": "2.7.0"
|
"yaml": "2.7.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@playwright/test": "1.57.0",
|
||||||
"@types/cookie": "0.6.0",
|
"@types/cookie": "0.6.0",
|
||||||
"@types/cookie-parser": "1.4.10",
|
"@types/cookie-parser": "1.4.10",
|
||||||
"@types/cors": "2.8.19",
|
"@types/cors": "2.8.19",
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ import {
|
|||||||
import { createSettingsRoutes } from './routes/settings/index.js';
|
import { createSettingsRoutes } from './routes/settings/index.js';
|
||||||
import { AgentService } from './services/agent-service.js';
|
import { AgentService } from './services/agent-service.js';
|
||||||
import { FeatureLoader } from './services/feature-loader.js';
|
import { FeatureLoader } from './services/feature-loader.js';
|
||||||
import { AutoModeService } from './services/auto-mode-service.js';
|
import { AutoModeServiceCompat } from './services/auto-mode/index.js';
|
||||||
import { getTerminalService } from './services/terminal-service.js';
|
import { getTerminalService } from './services/terminal-service.js';
|
||||||
import { SettingsService } from './services/settings-service.js';
|
import { SettingsService } from './services/settings-service.js';
|
||||||
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
|
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
|
||||||
@@ -66,6 +66,10 @@ import { createCodexRoutes } from './routes/codex/index.js';
|
|||||||
import { CodexUsageService } from './services/codex-usage-service.js';
|
import { CodexUsageService } from './services/codex-usage-service.js';
|
||||||
import { CodexAppServerService } from './services/codex-app-server-service.js';
|
import { CodexAppServerService } from './services/codex-app-server-service.js';
|
||||||
import { CodexModelCacheService } from './services/codex-model-cache-service.js';
|
import { CodexModelCacheService } from './services/codex-model-cache-service.js';
|
||||||
|
import { createZaiRoutes } from './routes/zai/index.js';
|
||||||
|
import { ZaiUsageService } from './services/zai-usage-service.js';
|
||||||
|
import { createGeminiRoutes } from './routes/gemini/index.js';
|
||||||
|
import { GeminiUsageService } from './services/gemini-usage-service.js';
|
||||||
import { createGitHubRoutes } from './routes/github/index.js';
|
import { createGitHubRoutes } from './routes/github/index.js';
|
||||||
import { createContextRoutes } from './routes/context/index.js';
|
import { createContextRoutes } from './routes/context/index.js';
|
||||||
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
||||||
@@ -121,21 +125,57 @@ const BOX_CONTENT_WIDTH = 67;
|
|||||||
// The Claude Agent SDK can use either ANTHROPIC_API_KEY or Claude Code CLI authentication
|
// The Claude Agent SDK can use either ANTHROPIC_API_KEY or Claude Code CLI authentication
|
||||||
(async () => {
|
(async () => {
|
||||||
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
|
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
|
||||||
|
const hasEnvOAuthToken = !!process.env.CLAUDE_CODE_OAUTH_TOKEN;
|
||||||
|
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Starting credential detection...');
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Environment variables:', {
|
||||||
|
hasAnthropicKey,
|
||||||
|
hasEnvOAuthToken,
|
||||||
|
});
|
||||||
|
|
||||||
if (hasAnthropicKey) {
|
if (hasAnthropicKey) {
|
||||||
logger.info('✓ ANTHROPIC_API_KEY detected');
|
logger.info('✓ ANTHROPIC_API_KEY detected');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hasEnvOAuthToken) {
|
||||||
|
logger.info('✓ CLAUDE_CODE_OAUTH_TOKEN detected');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Check for Claude Code CLI authentication
|
// Check for Claude Code CLI authentication
|
||||||
|
// Store indicators outside the try block so we can use them in the warning message
|
||||||
|
let cliAuthIndicators: Awaited<ReturnType<typeof getClaudeAuthIndicators>> | null = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const indicators = await getClaudeAuthIndicators();
|
cliAuthIndicators = await getClaudeAuthIndicators();
|
||||||
|
const indicators = cliAuthIndicators;
|
||||||
|
|
||||||
|
// Log detailed credential detection results
|
||||||
|
const { checks, ...indicatorSummary } = indicators;
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Claude CLI auth indicators:', indicatorSummary);
|
||||||
|
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] File check details:', checks);
|
||||||
|
|
||||||
const hasCliAuth =
|
const hasCliAuth =
|
||||||
indicators.hasStatsCacheWithActivity ||
|
indicators.hasStatsCacheWithActivity ||
|
||||||
(indicators.hasSettingsFile && indicators.hasProjectsSessions) ||
|
(indicators.hasSettingsFile && indicators.hasProjectsSessions) ||
|
||||||
(indicators.hasCredentialsFile &&
|
(indicators.hasCredentialsFile &&
|
||||||
(indicators.credentials?.hasOAuthToken || indicators.credentials?.hasApiKey));
|
(indicators.credentials?.hasOAuthToken || indicators.credentials?.hasApiKey));
|
||||||
|
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Auth determination:', {
|
||||||
|
hasCliAuth,
|
||||||
|
reason: hasCliAuth
|
||||||
|
? indicators.hasStatsCacheWithActivity
|
||||||
|
? 'stats cache with activity'
|
||||||
|
: indicators.hasSettingsFile && indicators.hasProjectsSessions
|
||||||
|
? 'settings file + project sessions'
|
||||||
|
: indicators.credentials?.hasOAuthToken
|
||||||
|
? 'credentials file with OAuth token'
|
||||||
|
: 'credentials file with API key'
|
||||||
|
: 'no valid credentials found',
|
||||||
|
});
|
||||||
|
|
||||||
if (hasCliAuth) {
|
if (hasCliAuth) {
|
||||||
logger.info('✓ Claude Code CLI authentication detected');
|
logger.info('✓ Claude Code CLI authentication detected');
|
||||||
return;
|
return;
|
||||||
@@ -145,7 +185,7 @@ const BOX_CONTENT_WIDTH = 67;
|
|||||||
logger.warn('Error checking for Claude Code CLI authentication:', error);
|
logger.warn('Error checking for Claude Code CLI authentication:', error);
|
||||||
}
|
}
|
||||||
|
|
||||||
// No authentication found - show warning
|
// No authentication found - show warning with paths that were checked
|
||||||
const wHeader = '⚠️ WARNING: No Claude authentication configured'.padEnd(BOX_CONTENT_WIDTH);
|
const wHeader = '⚠️ WARNING: No Claude authentication configured'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
const w1 = 'The Claude Agent SDK requires authentication to function.'.padEnd(BOX_CONTENT_WIDTH);
|
const w1 = 'The Claude Agent SDK requires authentication to function.'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
const w2 = 'Options:'.padEnd(BOX_CONTENT_WIDTH);
|
const w2 = 'Options:'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
@@ -158,6 +198,33 @@ const BOX_CONTENT_WIDTH = 67;
|
|||||||
BOX_CONTENT_WIDTH
|
BOX_CONTENT_WIDTH
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Build paths checked summary from the indicators (if available)
|
||||||
|
let pathsCheckedInfo = '';
|
||||||
|
if (cliAuthIndicators) {
|
||||||
|
const pathsChecked: string[] = [];
|
||||||
|
|
||||||
|
// Collect paths that were checked (paths are always populated strings)
|
||||||
|
pathsChecked.push(`Settings: ${cliAuthIndicators.checks.settingsFile.path}`);
|
||||||
|
pathsChecked.push(`Stats cache: ${cliAuthIndicators.checks.statsCache.path}`);
|
||||||
|
pathsChecked.push(`Projects dir: ${cliAuthIndicators.checks.projectsDir.path}`);
|
||||||
|
for (const credFile of cliAuthIndicators.checks.credentialFiles) {
|
||||||
|
pathsChecked.push(`Credentials: ${credFile.path}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pathsChecked.length > 0) {
|
||||||
|
pathsCheckedInfo = `
|
||||||
|
║ ║
|
||||||
|
║ ${'Paths checked:'.padEnd(BOX_CONTENT_WIDTH)}║
|
||||||
|
${pathsChecked
|
||||||
|
.map((p) => {
|
||||||
|
const maxLen = BOX_CONTENT_WIDTH - 4;
|
||||||
|
const display = p.length > maxLen ? '...' + p.slice(-(maxLen - 3)) : p;
|
||||||
|
return `║ ${display.padEnd(maxLen)} ║`;
|
||||||
|
})
|
||||||
|
.join('\n')}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
logger.warn(`
|
logger.warn(`
|
||||||
╔═════════════════════════════════════════════════════════════════════╗
|
╔═════════════════════════════════════════════════════════════════════╗
|
||||||
║ ${wHeader}║
|
║ ${wHeader}║
|
||||||
@@ -169,7 +236,7 @@ const BOX_CONTENT_WIDTH = 67;
|
|||||||
║ ${w3}║
|
║ ${w3}║
|
||||||
║ ${w4}║
|
║ ${w4}║
|
||||||
║ ${w5}║
|
║ ${w5}║
|
||||||
║ ${w6}║
|
║ ${w6}║${pathsCheckedInfo}
|
||||||
║ ║
|
║ ║
|
||||||
╚═════════════════════════════════════════════════════════════════════╝
|
╚═════════════════════════════════════════════════════════════════════╝
|
||||||
`);
|
`);
|
||||||
@@ -237,7 +304,7 @@ app.use(
|
|||||||
callback(null, origin);
|
callback(null, origin);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch {
|
||||||
// Ignore URL parsing errors
|
// Ignore URL parsing errors
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -258,11 +325,15 @@ const events: EventEmitter = createEventEmitter();
|
|||||||
const settingsService = new SettingsService(DATA_DIR);
|
const settingsService = new SettingsService(DATA_DIR);
|
||||||
const agentService = new AgentService(DATA_DIR, events, settingsService);
|
const agentService = new AgentService(DATA_DIR, events, settingsService);
|
||||||
const featureLoader = new FeatureLoader();
|
const featureLoader = new FeatureLoader();
|
||||||
const autoModeService = new AutoModeService(events, settingsService);
|
|
||||||
|
// Auto-mode services: compatibility layer provides old interface while using new architecture
|
||||||
|
const autoModeService = new AutoModeServiceCompat(events, settingsService, featureLoader);
|
||||||
const claudeUsageService = new ClaudeUsageService();
|
const claudeUsageService = new ClaudeUsageService();
|
||||||
const codexAppServerService = new CodexAppServerService();
|
const codexAppServerService = new CodexAppServerService();
|
||||||
const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
|
const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
|
||||||
const codexUsageService = new CodexUsageService(codexAppServerService);
|
const codexUsageService = new CodexUsageService(codexAppServerService);
|
||||||
|
const zaiUsageService = new ZaiUsageService();
|
||||||
|
const geminiUsageService = new GeminiUsageService();
|
||||||
const mcpTestService = new MCPTestService(settingsService);
|
const mcpTestService = new MCPTestService(settingsService);
|
||||||
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
||||||
|
|
||||||
@@ -303,24 +374,77 @@ eventHookService.initialize(events, settingsService, eventHistoryService, featur
|
|||||||
logger.warn('Failed to check for legacy settings migration:', err);
|
logger.warn('Failed to check for legacy settings migration:', err);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply logging settings from saved settings
|
// Fetch global settings once and reuse for logging config and feature reconciliation
|
||||||
|
let globalSettings: Awaited<ReturnType<typeof settingsService.getGlobalSettings>> | null = null;
|
||||||
try {
|
try {
|
||||||
const settings = await settingsService.getGlobalSettings();
|
globalSettings = await settingsService.getGlobalSettings();
|
||||||
if (settings.serverLogLevel && LOG_LEVEL_MAP[settings.serverLogLevel] !== undefined) {
|
} catch {
|
||||||
setLogLevel(LOG_LEVEL_MAP[settings.serverLogLevel]);
|
logger.warn('Failed to load global settings, using defaults');
|
||||||
logger.info(`Server log level set to: ${settings.serverLogLevel}`);
|
}
|
||||||
|
|
||||||
|
// Apply logging settings from saved settings
|
||||||
|
if (globalSettings) {
|
||||||
|
try {
|
||||||
|
if (
|
||||||
|
globalSettings.serverLogLevel &&
|
||||||
|
LOG_LEVEL_MAP[globalSettings.serverLogLevel] !== undefined
|
||||||
|
) {
|
||||||
|
setLogLevel(LOG_LEVEL_MAP[globalSettings.serverLogLevel]);
|
||||||
|
logger.info(`Server log level set to: ${globalSettings.serverLogLevel}`);
|
||||||
|
}
|
||||||
|
// Apply request logging setting (default true if not set)
|
||||||
|
const enableRequestLog = globalSettings.enableRequestLogging ?? true;
|
||||||
|
setRequestLoggingEnabled(enableRequestLog);
|
||||||
|
logger.info(`HTTP request logging: ${enableRequestLog ? 'enabled' : 'disabled'}`);
|
||||||
|
} catch {
|
||||||
|
logger.warn('Failed to apply logging settings, using defaults');
|
||||||
}
|
}
|
||||||
// Apply request logging setting (default true if not set)
|
|
||||||
const enableRequestLog = settings.enableRequestLogging ?? true;
|
|
||||||
setRequestLoggingEnabled(enableRequestLog);
|
|
||||||
logger.info(`HTTP request logging: ${enableRequestLog ? 'enabled' : 'disabled'}`);
|
|
||||||
} catch (err) {
|
|
||||||
logger.warn('Failed to load logging settings, using defaults');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await agentService.initialize();
|
await agentService.initialize();
|
||||||
logger.info('Agent service initialized');
|
logger.info('Agent service initialized');
|
||||||
|
|
||||||
|
// Reconcile feature states on startup
|
||||||
|
// After any type of restart (clean, forced, crash), features may be stuck in
|
||||||
|
// transient states (in_progress, interrupted, pipeline_*) that don't match reality.
|
||||||
|
// Reconcile them back to resting states before the UI is served.
|
||||||
|
if (globalSettings) {
|
||||||
|
try {
|
||||||
|
if (globalSettings.projects && globalSettings.projects.length > 0) {
|
||||||
|
let totalReconciled = 0;
|
||||||
|
for (const project of globalSettings.projects) {
|
||||||
|
const count = await autoModeService.reconcileFeatureStates(project.path);
|
||||||
|
totalReconciled += count;
|
||||||
|
}
|
||||||
|
if (totalReconciled > 0) {
|
||||||
|
logger.info(
|
||||||
|
`[STARTUP] Reconciled ${totalReconciled} feature(s) across ${globalSettings.projects.length} project(s)`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
logger.info('[STARTUP] Feature state reconciliation complete - no stale states found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume interrupted features in the background after reconciliation.
|
||||||
|
// This uses the saved execution state to identify features that were running
|
||||||
|
// before the restart (their statuses have been reset to ready/backlog by
|
||||||
|
// reconciliation above). Running in background so it doesn't block startup.
|
||||||
|
if (totalReconciled > 0) {
|
||||||
|
for (const project of globalSettings.projects) {
|
||||||
|
autoModeService.resumeInterruptedFeatures(project.path).catch((err) => {
|
||||||
|
logger.warn(
|
||||||
|
`[STARTUP] Failed to resume interrupted features for ${project.path}:`,
|
||||||
|
err
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
logger.info('[STARTUP] Initiated background resume of interrupted features');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
logger.warn('[STARTUP] Failed to reconcile feature states:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Bootstrap Codex model cache in background (don't block server startup)
|
// Bootstrap Codex model cache in background (don't block server startup)
|
||||||
void codexModelCacheService.getModels().catch((err) => {
|
void codexModelCacheService.getModels().catch((err) => {
|
||||||
logger.error('Failed to bootstrap Codex model cache:', err);
|
logger.error('Failed to bootstrap Codex model cache:', err);
|
||||||
@@ -371,6 +495,8 @@ app.use('/api/terminal', createTerminalRoutes());
|
|||||||
app.use('/api/settings', createSettingsRoutes(settingsService));
|
app.use('/api/settings', createSettingsRoutes(settingsService));
|
||||||
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
||||||
app.use('/api/codex', createCodexRoutes(codexUsageService, codexModelCacheService));
|
app.use('/api/codex', createCodexRoutes(codexUsageService, codexModelCacheService));
|
||||||
|
app.use('/api/zai', createZaiRoutes(zaiUsageService, settingsService));
|
||||||
|
app.use('/api/gemini', createGeminiRoutes(geminiUsageService, events));
|
||||||
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
||||||
app.use('/api/context', createContextRoutes(settingsService));
|
app.use('/api/context', createContextRoutes(settingsService));
|
||||||
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
||||||
@@ -473,7 +599,7 @@ wss.on('connection', (ws: WebSocket) => {
|
|||||||
logger.info('Sending event to client:', {
|
logger.info('Sending event to client:', {
|
||||||
type,
|
type,
|
||||||
messageLength: message.length,
|
messageLength: message.length,
|
||||||
sessionId: (payload as any)?.sessionId,
|
sessionId: (payload as Record<string, unknown>)?.sessionId,
|
||||||
});
|
});
|
||||||
ws.send(message);
|
ws.send(message);
|
||||||
} else {
|
} else {
|
||||||
@@ -539,8 +665,15 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
|
|||||||
// Check if session exists
|
// Check if session exists
|
||||||
const session = terminalService.getSession(sessionId);
|
const session = terminalService.getSession(sessionId);
|
||||||
if (!session) {
|
if (!session) {
|
||||||
logger.info(`Session ${sessionId} not found`);
|
logger.warn(
|
||||||
ws.close(4004, 'Session not found');
|
`Terminal session ${sessionId} not found. ` +
|
||||||
|
`The session may have exited, been deleted, or was never created. ` +
|
||||||
|
`Active terminal sessions: ${terminalService.getSessionCount()}`
|
||||||
|
);
|
||||||
|
ws.close(
|
||||||
|
4004,
|
||||||
|
'Session not found. The terminal session may have expired or been closed. Please create a new terminal.'
|
||||||
|
);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,9 +8,6 @@ import { spawn, execSync } from 'child_process';
|
|||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { createLogger } from '@automaker/utils';
|
|
||||||
|
|
||||||
const logger = createLogger('CliDetection');
|
|
||||||
|
|
||||||
export interface CliInfo {
|
export interface CliInfo {
|
||||||
name: string;
|
name: string;
|
||||||
@@ -86,7 +83,7 @@ export async function detectCli(
|
|||||||
options: CliDetectionOptions = {}
|
options: CliDetectionOptions = {}
|
||||||
): Promise<CliDetectionResult> {
|
): Promise<CliDetectionResult> {
|
||||||
const config = CLI_CONFIGS[provider];
|
const config = CLI_CONFIGS[provider];
|
||||||
const { timeout = 5000, includeWsl = false, wslDistribution } = options;
|
const { timeout = 5000 } = options;
|
||||||
const issues: string[] = [];
|
const issues: string[] = [];
|
||||||
|
|
||||||
const cliInfo: CliInfo = {
|
const cliInfo: CliInfo = {
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ export interface ErrorClassification {
|
|||||||
suggestedAction?: string;
|
suggestedAction?: string;
|
||||||
retryable: boolean;
|
retryable: boolean;
|
||||||
provider?: string;
|
provider?: string;
|
||||||
context?: Record<string, any>;
|
context?: Record<string, unknown>;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ErrorPattern {
|
export interface ErrorPattern {
|
||||||
@@ -180,7 +180,7 @@ const ERROR_PATTERNS: ErrorPattern[] = [
|
|||||||
export function classifyError(
|
export function classifyError(
|
||||||
error: unknown,
|
error: unknown,
|
||||||
provider?: string,
|
provider?: string,
|
||||||
context?: Record<string, any>
|
context?: Record<string, unknown>
|
||||||
): ErrorClassification {
|
): ErrorClassification {
|
||||||
const errorText = getErrorText(error);
|
const errorText = getErrorText(error);
|
||||||
|
|
||||||
@@ -281,18 +281,19 @@ function getErrorText(error: unknown): string {
|
|||||||
|
|
||||||
if (typeof error === 'object' && error !== null) {
|
if (typeof error === 'object' && error !== null) {
|
||||||
// Handle structured error objects
|
// Handle structured error objects
|
||||||
const errorObj = error as any;
|
const errorObj = error as Record<string, unknown>;
|
||||||
|
|
||||||
if (errorObj.message) {
|
if (typeof errorObj.message === 'string') {
|
||||||
return errorObj.message;
|
return errorObj.message;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (errorObj.error?.message) {
|
const nestedError = errorObj.error;
|
||||||
return errorObj.error.message;
|
if (typeof nestedError === 'object' && nestedError !== null && 'message' in nestedError) {
|
||||||
|
return String((nestedError as Record<string, unknown>).message);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (errorObj.error) {
|
if (nestedError) {
|
||||||
return typeof errorObj.error === 'string' ? errorObj.error : JSON.stringify(errorObj.error);
|
return typeof nestedError === 'string' ? nestedError : JSON.stringify(nestedError);
|
||||||
}
|
}
|
||||||
|
|
||||||
return JSON.stringify(error);
|
return JSON.stringify(error);
|
||||||
@@ -307,7 +308,7 @@ function getErrorText(error: unknown): string {
|
|||||||
export function createErrorResponse(
|
export function createErrorResponse(
|
||||||
error: unknown,
|
error: unknown,
|
||||||
provider?: string,
|
provider?: string,
|
||||||
context?: Record<string, any>
|
context?: Record<string, unknown>
|
||||||
): {
|
): {
|
||||||
success: false;
|
success: false;
|
||||||
error: string;
|
error: string;
|
||||||
@@ -335,7 +336,7 @@ export function logError(
|
|||||||
error: unknown,
|
error: unknown,
|
||||||
provider?: string,
|
provider?: string,
|
||||||
operation?: string,
|
operation?: string,
|
||||||
additionalContext?: Record<string, any>
|
additionalContext?: Record<string, unknown>
|
||||||
): void {
|
): void {
|
||||||
const classification = classifyError(error, provider, {
|
const classification = classifyError(error, provider, {
|
||||||
operation,
|
operation,
|
||||||
|
|||||||
62
apps/server/src/lib/git-log-parser.ts
Normal file
62
apps/server/src/lib/git-log-parser.ts
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
export interface CommitFields {
|
||||||
|
hash: string;
|
||||||
|
shortHash: string;
|
||||||
|
author: string;
|
||||||
|
authorEmail: string;
|
||||||
|
date: string;
|
||||||
|
subject: string;
|
||||||
|
body: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function parseGitLogOutput(output: string): CommitFields[] {
|
||||||
|
const commits: CommitFields[] = [];
|
||||||
|
|
||||||
|
// Split by NUL character to separate commits
|
||||||
|
const commitBlocks = output.split('\0').filter((block) => block.trim());
|
||||||
|
|
||||||
|
for (const block of commitBlocks) {
|
||||||
|
const allLines = block.split('\n');
|
||||||
|
|
||||||
|
// Skip leading empty lines that may appear at block boundaries
|
||||||
|
let startIndex = 0;
|
||||||
|
while (startIndex < allLines.length && allLines[startIndex].trim() === '') {
|
||||||
|
startIndex++;
|
||||||
|
}
|
||||||
|
const fields = allLines.slice(startIndex);
|
||||||
|
|
||||||
|
// Validate we have all expected fields (at least hash, shortHash, author, authorEmail, date, subject)
|
||||||
|
if (fields.length < 6) {
|
||||||
|
continue; // Skip malformed blocks
|
||||||
|
}
|
||||||
|
|
||||||
|
const commit: CommitFields = {
|
||||||
|
hash: fields[0].trim(),
|
||||||
|
shortHash: fields[1].trim(),
|
||||||
|
author: fields[2].trim(),
|
||||||
|
authorEmail: fields[3].trim(),
|
||||||
|
date: fields[4].trim(),
|
||||||
|
subject: fields[5].trim(),
|
||||||
|
body: fields.slice(6).join('\n').trim(),
|
||||||
|
};
|
||||||
|
|
||||||
|
commits.push(commit);
|
||||||
|
}
|
||||||
|
|
||||||
|
return commits;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a commit object from parsed fields, matching the expected API response format
|
||||||
|
*/
|
||||||
|
export function createCommitFromFields(fields: CommitFields, files?: string[]) {
|
||||||
|
return {
|
||||||
|
hash: fields.hash,
|
||||||
|
shortHash: fields.shortHash,
|
||||||
|
author: fields.author,
|
||||||
|
authorEmail: fields.authorEmail,
|
||||||
|
date: fields.date,
|
||||||
|
subject: fields.subject,
|
||||||
|
body: fields.body,
|
||||||
|
files: files || [],
|
||||||
|
};
|
||||||
|
}
|
||||||
208
apps/server/src/lib/git.ts
Normal file
208
apps/server/src/lib/git.ts
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
/**
|
||||||
|
* Shared git command execution utilities.
|
||||||
|
*
|
||||||
|
* This module provides the canonical `execGitCommand` helper and common
|
||||||
|
* git utilities used across services and routes. All consumers should
|
||||||
|
* import from here rather than defining their own copy.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs/promises';
|
||||||
|
import path from 'path';
|
||||||
|
import { spawnProcess } from '@automaker/platform';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
|
const logger = createLogger('GitLib');
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Secure Command Execution
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute git command with array arguments to prevent command injection.
|
||||||
|
* Uses spawnProcess from @automaker/platform for secure, cross-platform execution.
|
||||||
|
*
|
||||||
|
* @param args - Array of git command arguments (e.g., ['worktree', 'add', path])
|
||||||
|
* @param cwd - Working directory to execute the command in
|
||||||
|
* @param env - Optional additional environment variables to pass to the git process.
|
||||||
|
* These are merged on top of the current process environment. Pass
|
||||||
|
* `{ LC_ALL: 'C' }` to force git to emit English output regardless of the
|
||||||
|
* system locale so that text-based output parsing remains reliable.
|
||||||
|
* @param abortController - Optional AbortController to cancel the git process.
|
||||||
|
* When the controller is aborted the underlying process is sent SIGTERM and
|
||||||
|
* the returned promise rejects with an Error whose message is 'Process aborted'.
|
||||||
|
* @returns Promise resolving to stdout output
|
||||||
|
* @throws Error with stderr/stdout message if command fails. The thrown error
|
||||||
|
* also has `stdout` and `stderr` string properties for structured access.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* // Safe: no injection possible
|
||||||
|
* await execGitCommand(['branch', '-D', branchName], projectPath);
|
||||||
|
*
|
||||||
|
* // Force English output for reliable text parsing:
|
||||||
|
* await execGitCommand(['rebase', '--', 'main'], worktreePath, { LC_ALL: 'C' });
|
||||||
|
*
|
||||||
|
* // With a process-level timeout:
|
||||||
|
* const controller = new AbortController();
|
||||||
|
* const timerId = setTimeout(() => controller.abort(), 30_000);
|
||||||
|
* try {
|
||||||
|
* await execGitCommand(['fetch', '--all', '--quiet'], cwd, undefined, controller);
|
||||||
|
* } finally {
|
||||||
|
* clearTimeout(timerId);
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* // Instead of unsafe:
|
||||||
|
* // await execAsync(`git branch -D ${branchName}`, { cwd });
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function execGitCommand(
|
||||||
|
args: string[],
|
||||||
|
cwd: string,
|
||||||
|
env?: Record<string, string>,
|
||||||
|
abortController?: AbortController
|
||||||
|
): Promise<string> {
|
||||||
|
const result = await spawnProcess({
|
||||||
|
command: 'git',
|
||||||
|
args,
|
||||||
|
cwd,
|
||||||
|
...(env !== undefined ? { env } : {}),
|
||||||
|
...(abortController !== undefined ? { abortController } : {}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// spawnProcess returns { stdout, stderr, exitCode }
|
||||||
|
if (result.exitCode === 0) {
|
||||||
|
return result.stdout;
|
||||||
|
} else {
|
||||||
|
const errorMessage =
|
||||||
|
result.stderr || result.stdout || `Git command failed with code ${result.exitCode}`;
|
||||||
|
throw Object.assign(new Error(errorMessage), {
|
||||||
|
stdout: result.stdout,
|
||||||
|
stderr: result.stderr,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Common Git Utilities
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current branch name for the given worktree.
|
||||||
|
*
|
||||||
|
* This is the canonical implementation shared across services. Services
|
||||||
|
* should import this rather than duplicating the logic locally.
|
||||||
|
*
|
||||||
|
* @param worktreePath - Path to the git worktree
|
||||||
|
* @returns The current branch name (trimmed)
|
||||||
|
*/
|
||||||
|
export async function getCurrentBranch(worktreePath: string): Promise<string> {
|
||||||
|
const branchOutput = await execGitCommand(['rev-parse', '--abbrev-ref', 'HEAD'], worktreePath);
|
||||||
|
return branchOutput.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Index Lock Recovery
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether an error message indicates a stale git index lock file.
|
||||||
|
*
|
||||||
|
* Git operations that write to the index (e.g. `git stash push`) will fail
|
||||||
|
* with "could not write index" or "Unable to create ... .lock" when a
|
||||||
|
* `.git/index.lock` file exists from a previously interrupted operation.
|
||||||
|
*
|
||||||
|
* @param errorMessage - The error string from a failed git command
|
||||||
|
* @returns true if the error looks like a stale index lock issue
|
||||||
|
*/
|
||||||
|
export function isIndexLockError(errorMessage: string): boolean {
|
||||||
|
const lower = errorMessage.toLowerCase();
|
||||||
|
return (
|
||||||
|
lower.includes('could not write index') ||
|
||||||
|
(lower.includes('unable to create') && lower.includes('index.lock')) ||
|
||||||
|
lower.includes('index.lock')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempt to remove a stale `.git/index.lock` file for the given worktree.
|
||||||
|
*
|
||||||
|
* Uses `git rev-parse --git-dir` to locate the correct `.git` directory,
|
||||||
|
* which works for both regular repositories and linked worktrees.
|
||||||
|
*
|
||||||
|
* @param worktreePath - Path to the git worktree (or main repo)
|
||||||
|
* @returns true if a lock file was found and removed, false otherwise
|
||||||
|
*/
|
||||||
|
export async function removeStaleIndexLock(worktreePath: string): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
// Resolve the .git directory (handles worktrees correctly)
|
||||||
|
const gitDirRaw = await execGitCommand(['rev-parse', '--git-dir'], worktreePath);
|
||||||
|
const gitDir = path.resolve(worktreePath, gitDirRaw.trim());
|
||||||
|
const lockFilePath = path.join(gitDir, 'index.lock');
|
||||||
|
|
||||||
|
// Check if the lock file exists
|
||||||
|
try {
|
||||||
|
await fs.access(lockFilePath);
|
||||||
|
} catch {
|
||||||
|
// Lock file does not exist — nothing to remove
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the stale lock file
|
||||||
|
await fs.unlink(lockFilePath);
|
||||||
|
logger.info('Removed stale index.lock file', { worktreePath, lockFilePath });
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
logger.warn('Failed to remove stale index.lock file', {
|
||||||
|
worktreePath,
|
||||||
|
error: err instanceof Error ? err.message : String(err),
|
||||||
|
});
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a git command with automatic retry when a stale index.lock is detected.
|
||||||
|
*
|
||||||
|
* If the command fails with an error indicating a locked index file, this
|
||||||
|
* helper will attempt to remove the stale `.git/index.lock` and retry the
|
||||||
|
* command exactly once.
|
||||||
|
*
|
||||||
|
* This is particularly useful for `git stash push` which writes to the
|
||||||
|
* index and commonly fails when a previous git operation was interrupted.
|
||||||
|
*
|
||||||
|
* @param args - Array of git command arguments
|
||||||
|
* @param cwd - Working directory to execute the command in
|
||||||
|
* @param env - Optional additional environment variables
|
||||||
|
* @returns Promise resolving to stdout output
|
||||||
|
* @throws The original error if retry also fails, or a non-lock error
|
||||||
|
*/
|
||||||
|
export async function execGitCommandWithLockRetry(
|
||||||
|
args: string[],
|
||||||
|
cwd: string,
|
||||||
|
env?: Record<string, string>
|
||||||
|
): Promise<string> {
|
||||||
|
try {
|
||||||
|
return await execGitCommand(args, cwd, env);
|
||||||
|
} catch (error: unknown) {
|
||||||
|
const err = error as { message?: string; stderr?: string };
|
||||||
|
const errorMessage = err.stderr || err.message || '';
|
||||||
|
|
||||||
|
if (!isIndexLockError(errorMessage)) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info('Git command failed due to index lock, attempting cleanup and retry', {
|
||||||
|
cwd,
|
||||||
|
args: args.join(' '),
|
||||||
|
});
|
||||||
|
|
||||||
|
const removed = await removeStaleIndexLock(cwd);
|
||||||
|
if (!removed) {
|
||||||
|
// Could not remove the lock file — re-throw the original error
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry the command once after removing the lock file
|
||||||
|
return await execGitCommand(args, cwd, env);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,11 +12,18 @@ export interface PermissionCheckResult {
|
|||||||
reason?: string;
|
reason?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Minimal shape of a Cursor tool call used for permission checking */
|
||||||
|
interface CursorToolCall {
|
||||||
|
shellToolCall?: { args?: { command: string } };
|
||||||
|
readToolCall?: { args?: { path: string } };
|
||||||
|
writeToolCall?: { args?: { path: string } };
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if a tool call is allowed based on permissions
|
* Check if a tool call is allowed based on permissions
|
||||||
*/
|
*/
|
||||||
export function checkToolCallPermission(
|
export function checkToolCallPermission(
|
||||||
toolCall: any,
|
toolCall: CursorToolCall,
|
||||||
permissions: CursorCliConfigFile | null
|
permissions: CursorCliConfigFile | null
|
||||||
): PermissionCheckResult {
|
): PermissionCheckResult {
|
||||||
if (!permissions || !permissions.permissions) {
|
if (!permissions || !permissions.permissions) {
|
||||||
@@ -152,7 +159,11 @@ function matchesRule(toolName: string, rule: string): boolean {
|
|||||||
/**
|
/**
|
||||||
* Log permission violations
|
* Log permission violations
|
||||||
*/
|
*/
|
||||||
export function logPermissionViolation(toolCall: any, reason: string, sessionId?: string): void {
|
export function logPermissionViolation(
|
||||||
|
toolCall: CursorToolCall,
|
||||||
|
reason: string,
|
||||||
|
sessionId?: string
|
||||||
|
): void {
|
||||||
const sessionIdStr = sessionId ? ` [${sessionId}]` : '';
|
const sessionIdStr = sessionId ? ` [${sessionId}]` : '';
|
||||||
|
|
||||||
if (toolCall.shellToolCall?.args?.command) {
|
if (toolCall.shellToolCall?.args?.command) {
|
||||||
|
|||||||
@@ -253,11 +253,27 @@ function buildMcpOptions(config: CreateSdkOptionsConfig): McpOptions {
|
|||||||
/**
|
/**
|
||||||
* Build thinking options for SDK configuration.
|
* Build thinking options for SDK configuration.
|
||||||
* Converts ThinkingLevel to maxThinkingTokens for the Claude SDK.
|
* Converts ThinkingLevel to maxThinkingTokens for the Claude SDK.
|
||||||
|
* For adaptive thinking (Opus 4.6), omits maxThinkingTokens to let the model
|
||||||
|
* decide its own reasoning depth.
|
||||||
*
|
*
|
||||||
* @param thinkingLevel - The thinking level to convert
|
* @param thinkingLevel - The thinking level to convert
|
||||||
* @returns Object with maxThinkingTokens if thinking is enabled
|
* @returns Object with maxThinkingTokens if thinking is enabled with a budget
|
||||||
*/
|
*/
|
||||||
function buildThinkingOptions(thinkingLevel?: ThinkingLevel): Partial<Options> {
|
function buildThinkingOptions(thinkingLevel?: ThinkingLevel): Partial<Options> {
|
||||||
|
if (!thinkingLevel || thinkingLevel === 'none') {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adaptive thinking (Opus 4.6): don't set maxThinkingTokens
|
||||||
|
// The model will use adaptive thinking by default
|
||||||
|
if (thinkingLevel === 'adaptive') {
|
||||||
|
logger.debug(
|
||||||
|
`buildThinkingOptions: thinkingLevel="adaptive" -> no maxThinkingTokens (model decides)`
|
||||||
|
);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manual budget-based thinking for Haiku/Sonnet
|
||||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
||||||
logger.debug(
|
logger.debug(
|
||||||
`buildThinkingOptions: thinkingLevel="${thinkingLevel}" -> maxThinkingTokens=${maxThinkingTokens}`
|
`buildThinkingOptions: thinkingLevel="${thinkingLevel}" -> maxThinkingTokens=${maxThinkingTokens}`
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ export async function readWorktreeMetadata(
|
|||||||
const metadataPath = getWorktreeMetadataPath(projectPath, branch);
|
const metadataPath = getWorktreeMetadataPath(projectPath, branch);
|
||||||
const content = (await secureFs.readFile(metadataPath, 'utf-8')) as string;
|
const content = (await secureFs.readFile(metadataPath, 'utf-8')) as string;
|
||||||
return JSON.parse(content) as WorktreeMetadata;
|
return JSON.parse(content) as WorktreeMetadata;
|
||||||
} catch (error) {
|
} catch (_error) {
|
||||||
// File doesn't exist or can't be read
|
// File doesn't exist or can't be read
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,11 +5,10 @@
|
|||||||
* with the provider architecture.
|
* with the provider architecture.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { query, type Options } from '@anthropic-ai/claude-agent-sdk';
|
import { query, type Options, type SDKUserMessage } from '@anthropic-ai/claude-agent-sdk';
|
||||||
import { BaseProvider } from './base-provider.js';
|
import { BaseProvider } from './base-provider.js';
|
||||||
import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
|
import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
|
||||||
|
import { getClaudeAuthIndicators } from '@automaker/platform';
|
||||||
const logger = createLogger('ClaudeProvider');
|
|
||||||
import {
|
import {
|
||||||
getThinkingTokenBudget,
|
getThinkingTokenBudget,
|
||||||
validateBareModelId,
|
validateBareModelId,
|
||||||
@@ -17,6 +16,14 @@ import {
|
|||||||
type ClaudeCompatibleProvider,
|
type ClaudeCompatibleProvider,
|
||||||
type Credentials,
|
type Credentials,
|
||||||
} from '@automaker/types';
|
} from '@automaker/types';
|
||||||
|
import type {
|
||||||
|
ExecuteOptions,
|
||||||
|
ProviderMessage,
|
||||||
|
InstallationStatus,
|
||||||
|
ModelDefinition,
|
||||||
|
} from './types.js';
|
||||||
|
|
||||||
|
const logger = createLogger('ClaudeProvider');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ProviderConfig - Union type for provider configuration
|
* ProviderConfig - Union type for provider configuration
|
||||||
@@ -25,37 +32,6 @@ import {
|
|||||||
* Both share the same connection settings structure.
|
* Both share the same connection settings structure.
|
||||||
*/
|
*/
|
||||||
type ProviderConfig = ClaudeApiProfile | ClaudeCompatibleProvider;
|
type ProviderConfig = ClaudeApiProfile | ClaudeCompatibleProvider;
|
||||||
import type {
|
|
||||||
ExecuteOptions,
|
|
||||||
ProviderMessage,
|
|
||||||
InstallationStatus,
|
|
||||||
ModelDefinition,
|
|
||||||
} from './types.js';
|
|
||||||
|
|
||||||
// Explicit allowlist of environment variables to pass to the SDK.
|
|
||||||
// Only these vars are passed - nothing else from process.env leaks through.
|
|
||||||
const ALLOWED_ENV_VARS = [
|
|
||||||
// Authentication
|
|
||||||
'ANTHROPIC_API_KEY',
|
|
||||||
'ANTHROPIC_AUTH_TOKEN',
|
|
||||||
// Endpoint configuration
|
|
||||||
'ANTHROPIC_BASE_URL',
|
|
||||||
'API_TIMEOUT_MS',
|
|
||||||
// Model mappings
|
|
||||||
'ANTHROPIC_DEFAULT_HAIKU_MODEL',
|
|
||||||
'ANTHROPIC_DEFAULT_SONNET_MODEL',
|
|
||||||
'ANTHROPIC_DEFAULT_OPUS_MODEL',
|
|
||||||
// Traffic control
|
|
||||||
'CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC',
|
|
||||||
// System vars (always from process.env)
|
|
||||||
'PATH',
|
|
||||||
'HOME',
|
|
||||||
'SHELL',
|
|
||||||
'TERM',
|
|
||||||
'USER',
|
|
||||||
'LANG',
|
|
||||||
'LC_ALL',
|
|
||||||
];
|
|
||||||
|
|
||||||
// System vars are always passed from process.env regardless of profile
|
// System vars are always passed from process.env regardless of profile
|
||||||
const SYSTEM_ENV_VARS = ['PATH', 'HOME', 'SHELL', 'TERM', 'USER', 'LANG', 'LC_ALL'];
|
const SYSTEM_ENV_VARS = ['PATH', 'HOME', 'SHELL', 'TERM', 'USER', 'LANG', 'LC_ALL'];
|
||||||
@@ -204,7 +180,7 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
model,
|
model,
|
||||||
cwd,
|
cwd,
|
||||||
systemPrompt,
|
systemPrompt,
|
||||||
maxTurns = 20,
|
maxTurns = 100,
|
||||||
allowedTools,
|
allowedTools,
|
||||||
abortController,
|
abortController,
|
||||||
conversationHistory,
|
conversationHistory,
|
||||||
@@ -219,8 +195,11 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
// claudeCompatibleProvider takes precedence over claudeApiProfile
|
// claudeCompatibleProvider takes precedence over claudeApiProfile
|
||||||
const providerConfig = claudeCompatibleProvider || claudeApiProfile;
|
const providerConfig = claudeCompatibleProvider || claudeApiProfile;
|
||||||
|
|
||||||
// Convert thinking level to token budget
|
// Build thinking configuration
|
||||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
// Adaptive thinking (Opus 4.6): don't set maxThinkingTokens, model uses adaptive by default
|
||||||
|
// Manual thinking (Haiku/Sonnet): use budget_tokens
|
||||||
|
const maxThinkingTokens =
|
||||||
|
thinkingLevel === 'adaptive' ? undefined : getThinkingTokenBudget(thinkingLevel);
|
||||||
|
|
||||||
// Build Claude SDK options
|
// Build Claude SDK options
|
||||||
const sdkOptions: Options = {
|
const sdkOptions: Options = {
|
||||||
@@ -255,14 +234,14 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Build prompt payload
|
// Build prompt payload
|
||||||
let promptPayload: string | AsyncIterable<any>;
|
let promptPayload: string | AsyncIterable<SDKUserMessage>;
|
||||||
|
|
||||||
if (Array.isArray(prompt)) {
|
if (Array.isArray(prompt)) {
|
||||||
// Multi-part prompt (with images)
|
// Multi-part prompt (with images)
|
||||||
promptPayload = (async function* () {
|
promptPayload = (async function* () {
|
||||||
const multiPartPrompt = {
|
const multiPartPrompt: SDKUserMessage = {
|
||||||
type: 'user' as const,
|
type: 'user' as const,
|
||||||
session_id: '',
|
session_id: sdkSessionId || '',
|
||||||
message: {
|
message: {
|
||||||
role: 'user' as const,
|
role: 'user' as const,
|
||||||
content: prompt,
|
content: prompt,
|
||||||
@@ -314,12 +293,16 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
? `${userMessage}\n\nTip: If you're running multiple features in auto-mode, consider reducing concurrency (maxConcurrency setting) to avoid hitting rate limits.`
|
? `${userMessage}\n\nTip: If you're running multiple features in auto-mode, consider reducing concurrency (maxConcurrency setting) to avoid hitting rate limits.`
|
||||||
: userMessage;
|
: userMessage;
|
||||||
|
|
||||||
const enhancedError = new Error(message);
|
const enhancedError = new Error(message) as Error & {
|
||||||
(enhancedError as any).originalError = error;
|
originalError: unknown;
|
||||||
(enhancedError as any).type = errorInfo.type;
|
type: string;
|
||||||
|
retryAfter?: number;
|
||||||
|
};
|
||||||
|
enhancedError.originalError = error;
|
||||||
|
enhancedError.type = errorInfo.type;
|
||||||
|
|
||||||
if (errorInfo.isRateLimit) {
|
if (errorInfo.isRateLimit) {
|
||||||
(enhancedError as any).retryAfter = errorInfo.retryAfter;
|
enhancedError.retryAfter = errorInfo.retryAfter;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw enhancedError;
|
throw enhancedError;
|
||||||
@@ -331,13 +314,37 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
*/
|
*/
|
||||||
async detectInstallation(): Promise<InstallationStatus> {
|
async detectInstallation(): Promise<InstallationStatus> {
|
||||||
// Claude SDK is always available since it's a dependency
|
// Claude SDK is always available since it's a dependency
|
||||||
const hasApiKey = !!process.env.ANTHROPIC_API_KEY;
|
// Check all four supported auth methods, mirroring the logic in buildEnv():
|
||||||
|
// 1. ANTHROPIC_API_KEY environment variable
|
||||||
|
// 2. ANTHROPIC_AUTH_TOKEN environment variable
|
||||||
|
// 3. credentials?.apiKeys?.anthropic (credentials file, checked via platform indicators)
|
||||||
|
// 4. Claude Max CLI OAuth (SDK handles this automatically; detected via getClaudeAuthIndicators)
|
||||||
|
const hasEnvApiKey = !!process.env.ANTHROPIC_API_KEY;
|
||||||
|
const hasEnvAuthToken = !!process.env.ANTHROPIC_AUTH_TOKEN;
|
||||||
|
|
||||||
|
// Check credentials file and CLI OAuth indicators (same sources used by buildEnv)
|
||||||
|
let hasCredentialsApiKey = false;
|
||||||
|
let hasCliOAuth = false;
|
||||||
|
try {
|
||||||
|
const indicators = await getClaudeAuthIndicators();
|
||||||
|
hasCredentialsApiKey = !!indicators.credentials?.hasApiKey;
|
||||||
|
hasCliOAuth = !!(
|
||||||
|
indicators.credentials?.hasOAuthToken ||
|
||||||
|
indicators.hasStatsCacheWithActivity ||
|
||||||
|
(indicators.hasSettingsFile && indicators.hasProjectsSessions)
|
||||||
|
);
|
||||||
|
} catch {
|
||||||
|
// If we can't check indicators, fall back to env vars only
|
||||||
|
}
|
||||||
|
|
||||||
|
const hasApiKey = hasEnvApiKey || hasCredentialsApiKey;
|
||||||
|
const authenticated = hasEnvApiKey || hasEnvAuthToken || hasCredentialsApiKey || hasCliOAuth;
|
||||||
|
|
||||||
const status: InstallationStatus = {
|
const status: InstallationStatus = {
|
||||||
installed: true,
|
installed: true,
|
||||||
method: 'sdk',
|
method: 'sdk',
|
||||||
hasApiKey,
|
hasApiKey,
|
||||||
authenticated: hasApiKey,
|
authenticated,
|
||||||
};
|
};
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
@@ -349,18 +356,30 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
getAvailableModels(): ModelDefinition[] {
|
getAvailableModels(): ModelDefinition[] {
|
||||||
const models = [
|
const models = [
|
||||||
{
|
{
|
||||||
id: 'claude-opus-4-5-20251101',
|
id: 'claude-opus-4-6',
|
||||||
name: 'Claude Opus 4.5',
|
name: 'Claude Opus 4.6',
|
||||||
modelString: 'claude-opus-4-5-20251101',
|
modelString: 'claude-opus-4-6',
|
||||||
provider: 'anthropic',
|
provider: 'anthropic',
|
||||||
description: 'Most capable Claude model',
|
description: 'Most capable Claude model with adaptive thinking',
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
maxOutputTokens: 16000,
|
maxOutputTokens: 128000,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
supportsTools: true,
|
supportsTools: true,
|
||||||
tier: 'premium' as const,
|
tier: 'premium' as const,
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: 'claude-sonnet-4-6',
|
||||||
|
name: 'Claude Sonnet 4.6',
|
||||||
|
modelString: 'claude-sonnet-4-6',
|
||||||
|
provider: 'anthropic',
|
||||||
|
description: 'Balanced performance and cost with enhanced reasoning',
|
||||||
|
contextWindow: 200000,
|
||||||
|
maxOutputTokens: 64000,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: 'claude-sonnet-4-20250514',
|
id: 'claude-sonnet-4-20250514',
|
||||||
name: 'Claude Sonnet 4',
|
name: 'Claude Sonnet 4',
|
||||||
|
|||||||
@@ -19,12 +19,11 @@ const MAX_OUTPUT_16K = 16000;
|
|||||||
export const CODEX_MODELS: ModelDefinition[] = [
|
export const CODEX_MODELS: ModelDefinition[] = [
|
||||||
// ========== Recommended Codex Models ==========
|
// ========== Recommended Codex Models ==========
|
||||||
{
|
{
|
||||||
id: CODEX_MODEL_MAP.gpt52Codex,
|
id: CODEX_MODEL_MAP.gpt53Codex,
|
||||||
name: 'GPT-5.2-Codex',
|
name: 'GPT-5.3-Codex',
|
||||||
modelString: CODEX_MODEL_MAP.gpt52Codex,
|
modelString: CODEX_MODEL_MAP.gpt53Codex,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description:
|
description: 'Latest frontier agentic coding model.',
|
||||||
'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).',
|
|
||||||
contextWindow: CONTEXT_WINDOW_256K,
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
maxOutputTokens: MAX_OUTPUT_32K,
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -33,12 +32,38 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
default: true,
|
default: true,
|
||||||
hasReasoning: true,
|
hasReasoning: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt53CodexSpark,
|
||||||
|
name: 'GPT-5.3-Codex-Spark',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt53CodexSpark,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Near-instant real-time coding model, 1000+ tokens/sec.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'premium' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt52Codex,
|
||||||
|
name: 'GPT-5.2-Codex',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt52Codex,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Frontier agentic coding model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'premium' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: CODEX_MODEL_MAP.gpt51CodexMax,
|
id: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||||
name: 'GPT-5.1-Codex-Max',
|
name: 'GPT-5.1-Codex-Max',
|
||||||
modelString: CODEX_MODEL_MAP.gpt51CodexMax,
|
modelString: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
|
description: 'Codex-optimized flagship for deep and fast reasoning.',
|
||||||
contextWindow: CONTEXT_WINDOW_256K,
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
maxOutputTokens: MAX_OUTPUT_32K,
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -51,7 +76,46 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
name: 'GPT-5.1-Codex-Mini',
|
name: 'GPT-5.1-Codex-Mini',
|
||||||
modelString: CODEX_MODEL_MAP.gpt51CodexMini,
|
modelString: CODEX_MODEL_MAP.gpt51CodexMini,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description: 'Smaller, more cost-effective version for faster workflows.',
|
description: 'Optimized for codex. Cheaper, faster, but less capable.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'basic' as const,
|
||||||
|
hasReasoning: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt51Codex,
|
||||||
|
name: 'GPT-5.1-Codex',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt51Codex,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Original GPT-5.1 Codex agentic coding model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt5Codex,
|
||||||
|
name: 'GPT-5-Codex',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt5Codex,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Original GPT-5 Codex model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||||
|
name: 'GPT-5-Codex-Mini',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Smaller, cheaper GPT-5 Codex variant.',
|
||||||
contextWindow: CONTEXT_WINDOW_128K,
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
maxOutputTokens: MAX_OUTPUT_16K,
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -66,7 +130,7 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
name: 'GPT-5.2',
|
name: 'GPT-5.2',
|
||||||
modelString: CODEX_MODEL_MAP.gpt52,
|
modelString: CODEX_MODEL_MAP.gpt52,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description: 'Best general agentic model for tasks across industries and domains.',
|
description: 'Latest frontier model with improvements across knowledge, reasoning and coding.',
|
||||||
contextWindow: CONTEXT_WINDOW_256K,
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
maxOutputTokens: MAX_OUTPUT_32K,
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -87,6 +151,19 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
tier: 'standard' as const,
|
tier: 'standard' as const,
|
||||||
hasReasoning: true,
|
hasReasoning: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt5,
|
||||||
|
name: 'GPT-5',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt5,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Base GPT-5 model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ import type {
|
|||||||
ModelDefinition,
|
ModelDefinition,
|
||||||
} from './types.js';
|
} from './types.js';
|
||||||
import {
|
import {
|
||||||
CODEX_MODEL_MAP,
|
|
||||||
supportsReasoningEffort,
|
supportsReasoningEffort,
|
||||||
validateBareModelId,
|
validateBareModelId,
|
||||||
calculateReasoningTimeout,
|
calculateReasoningTimeout,
|
||||||
@@ -56,15 +55,10 @@ const CODEX_EXEC_SUBCOMMAND = 'exec';
|
|||||||
const CODEX_JSON_FLAG = '--json';
|
const CODEX_JSON_FLAG = '--json';
|
||||||
const CODEX_MODEL_FLAG = '--model';
|
const CODEX_MODEL_FLAG = '--model';
|
||||||
const CODEX_VERSION_FLAG = '--version';
|
const CODEX_VERSION_FLAG = '--version';
|
||||||
const CODEX_SANDBOX_FLAG = '--sandbox';
|
|
||||||
const CODEX_APPROVAL_FLAG = '--ask-for-approval';
|
|
||||||
const CODEX_SEARCH_FLAG = '--search';
|
|
||||||
const CODEX_OUTPUT_SCHEMA_FLAG = '--output-schema';
|
|
||||||
const CODEX_CONFIG_FLAG = '--config';
|
const CODEX_CONFIG_FLAG = '--config';
|
||||||
const CODEX_IMAGE_FLAG = '--image';
|
|
||||||
const CODEX_ADD_DIR_FLAG = '--add-dir';
|
const CODEX_ADD_DIR_FLAG = '--add-dir';
|
||||||
|
const CODEX_OUTPUT_SCHEMA_FLAG = '--output-schema';
|
||||||
const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check';
|
const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check';
|
||||||
const CODEX_RESUME_FLAG = 'resume';
|
|
||||||
const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort';
|
const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort';
|
||||||
const CODEX_YOLO_FLAG = '--dangerously-bypass-approvals-and-sandbox';
|
const CODEX_YOLO_FLAG = '--dangerously-bypass-approvals-and-sandbox';
|
||||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||||
@@ -106,9 +100,6 @@ const TEXT_ENCODING = 'utf-8';
|
|||||||
*/
|
*/
|
||||||
const CODEX_CLI_TIMEOUT_MS = DEFAULT_TIMEOUT_MS;
|
const CODEX_CLI_TIMEOUT_MS = DEFAULT_TIMEOUT_MS;
|
||||||
const CODEX_FEATURE_GENERATION_BASE_TIMEOUT_MS = 300000; // 5 minutes for feature generation
|
const CODEX_FEATURE_GENERATION_BASE_TIMEOUT_MS = 300000; // 5 minutes for feature generation
|
||||||
const CONTEXT_WINDOW_256K = 256000;
|
|
||||||
const MAX_OUTPUT_32K = 32000;
|
|
||||||
const MAX_OUTPUT_16K = 16000;
|
|
||||||
const SYSTEM_PROMPT_SEPARATOR = '\n\n';
|
const SYSTEM_PROMPT_SEPARATOR = '\n\n';
|
||||||
const CODEX_INSTRUCTIONS_DIR = '.codex';
|
const CODEX_INSTRUCTIONS_DIR = '.codex';
|
||||||
const CODEX_INSTRUCTIONS_SECTION = 'Codex Project Instructions';
|
const CODEX_INSTRUCTIONS_SECTION = 'Codex Project Instructions';
|
||||||
@@ -210,16 +201,42 @@ function isSdkEligible(options: ExecuteOptions): boolean {
|
|||||||
return isNoToolsRequested(options) && !hasMcpServersConfigured(options);
|
return isNoToolsRequested(options) && !hasMcpServersConfigured(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function isSdkEligibleWithApiKey(options: ExecuteOptions): boolean {
|
||||||
|
// When using an API key (not CLI OAuth), prefer SDK over CLI to avoid OAuth issues.
|
||||||
|
// SDK mode is used when MCP servers are not configured (MCP requires CLI).
|
||||||
|
// Tool requests are handled by the SDK, so we allow SDK mode even with tools.
|
||||||
|
return !hasMcpServersConfigured(options);
|
||||||
|
}
|
||||||
|
|
||||||
async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<CodexExecutionPlan> {
|
async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<CodexExecutionPlan> {
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
const authIndicators = await getCodexAuthIndicators();
|
||||||
const openAiApiKey = await resolveOpenAiApiKey();
|
const openAiApiKey = await resolveOpenAiApiKey();
|
||||||
const hasApiKey = Boolean(openAiApiKey);
|
const hasApiKey = Boolean(openAiApiKey);
|
||||||
const cliAuthenticated = authIndicators.hasOAuthToken || authIndicators.hasApiKey || hasApiKey;
|
|
||||||
const sdkEligible = isSdkEligible(options);
|
|
||||||
const cliAvailable = Boolean(cliPath);
|
const cliAvailable = Boolean(cliPath);
|
||||||
|
// CLI OAuth login takes priority: if the user has logged in via `codex login`,
|
||||||
|
// use the CLI regardless of whether an API key is also stored.
|
||||||
|
// hasOAuthToken = OAuth session from `codex login`
|
||||||
|
// authIndicators.hasApiKey = API key stored in Codex's own auth file (via `codex login --api-key`)
|
||||||
|
// Both are "CLI-native" auth — distinct from an API key stored in Automaker's credentials.
|
||||||
|
const hasCliNativeAuth = authIndicators.hasOAuthToken || authIndicators.hasApiKey;
|
||||||
|
const sdkEligible = isSdkEligible(options);
|
||||||
|
|
||||||
if (hasApiKey) {
|
// If CLI is available and the user authenticated via the CLI (`codex login`),
|
||||||
|
// prefer CLI mode over SDK. This ensures `codex login` sessions take priority
|
||||||
|
// over API keys stored in Automaker's credentials.
|
||||||
|
if (cliAvailable && hasCliNativeAuth) {
|
||||||
|
return {
|
||||||
|
mode: CODEX_EXECUTION_MODE_CLI,
|
||||||
|
cliPath,
|
||||||
|
openAiApiKey,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// No CLI-native auth — prefer SDK when an API key is available.
|
||||||
|
// Using SDK with an API key avoids OAuth issues that can arise with the CLI.
|
||||||
|
// MCP servers still require CLI mode since the SDK doesn't support MCP.
|
||||||
|
if (hasApiKey && isSdkEligibleWithApiKey(options)) {
|
||||||
return {
|
return {
|
||||||
mode: CODEX_EXECUTION_MODE_SDK,
|
mode: CODEX_EXECUTION_MODE_SDK,
|
||||||
cliPath,
|
cliPath,
|
||||||
@@ -227,6 +244,16 @@ async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<Codex
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MCP servers are requested with an API key but no CLI-native auth — use CLI mode
|
||||||
|
// with the API key passed as an environment variable.
|
||||||
|
if (hasApiKey && cliAvailable) {
|
||||||
|
return {
|
||||||
|
mode: CODEX_EXECUTION_MODE_CLI,
|
||||||
|
cliPath,
|
||||||
|
openAiApiKey,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
if (sdkEligible) {
|
if (sdkEligible) {
|
||||||
if (!cliAvailable) {
|
if (!cliAvailable) {
|
||||||
throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
|
throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
|
||||||
@@ -237,15 +264,9 @@ async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<Codex
|
|||||||
throw new Error(ERROR_CODEX_CLI_REQUIRED);
|
throw new Error(ERROR_CODEX_CLI_REQUIRED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cliAuthenticated) {
|
// At this point, neither hasCliNativeAuth nor hasApiKey is true,
|
||||||
throw new Error(ERROR_CODEX_AUTH_REQUIRED);
|
// so authentication is required regardless.
|
||||||
}
|
throw new Error(ERROR_CODEX_AUTH_REQUIRED);
|
||||||
|
|
||||||
return {
|
|
||||||
mode: CODEX_EXECUTION_MODE_CLI,
|
|
||||||
cliPath,
|
|
||||||
openAiApiKey,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function getEventType(event: Record<string, unknown>): string | null {
|
function getEventType(event: Record<string, unknown>): string | null {
|
||||||
@@ -758,15 +779,12 @@ export class CodexProvider extends BaseProvider {
|
|||||||
options.cwd,
|
options.cwd,
|
||||||
codexSettings.sandboxMode !== 'danger-full-access'
|
codexSettings.sandboxMode !== 'danger-full-access'
|
||||||
);
|
);
|
||||||
const resolvedSandboxMode = sandboxCheck.enabled
|
|
||||||
? codexSettings.sandboxMode
|
|
||||||
: 'danger-full-access';
|
|
||||||
if (!sandboxCheck.enabled && sandboxCheck.message) {
|
if (!sandboxCheck.enabled && sandboxCheck.message) {
|
||||||
console.warn(`[CodexProvider] ${sandboxCheck.message}`);
|
console.warn(`[CodexProvider] ${sandboxCheck.message}`);
|
||||||
}
|
}
|
||||||
const searchEnabled =
|
const searchEnabled =
|
||||||
codexSettings.enableWebSearch || resolveSearchEnabled(resolvedAllowedTools, restrictTools);
|
codexSettings.enableWebSearch || resolveSearchEnabled(resolvedAllowedTools, restrictTools);
|
||||||
const outputSchemaPath = await writeOutputSchemaFile(options.cwd, options.outputFormat);
|
const schemaPath = await writeOutputSchemaFile(options.cwd, options.outputFormat);
|
||||||
const imageBlocks = codexSettings.enableImages ? extractImageBlocks(options.prompt) : [];
|
const imageBlocks = codexSettings.enableImages ? extractImageBlocks(options.prompt) : [];
|
||||||
const imagePaths = await writeImageFiles(options.cwd, imageBlocks);
|
const imagePaths = await writeImageFiles(options.cwd, imageBlocks);
|
||||||
const approvalPolicy =
|
const approvalPolicy =
|
||||||
@@ -801,7 +819,7 @@ export class CodexProvider extends BaseProvider {
|
|||||||
overrides.push({ key: 'features.web_search_request', value: true });
|
overrides.push({ key: 'features.web_search_request', value: true });
|
||||||
}
|
}
|
||||||
|
|
||||||
const configOverrides = buildConfigOverrides(overrides);
|
const configOverrideArgs = buildConfigOverrides(overrides);
|
||||||
const preExecArgs: string[] = [];
|
const preExecArgs: string[] = [];
|
||||||
|
|
||||||
// Add additional directories with write access
|
// Add additional directories with write access
|
||||||
@@ -811,6 +829,12 @@ export class CodexProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If images were written to disk, add the image directory so the CLI can access them
|
||||||
|
if (imagePaths.length > 0) {
|
||||||
|
const imageDir = path.join(options.cwd, CODEX_INSTRUCTIONS_DIR, IMAGE_TEMP_DIR);
|
||||||
|
preExecArgs.push(CODEX_ADD_DIR_FLAG, imageDir);
|
||||||
|
}
|
||||||
|
|
||||||
// Model is already bare (no prefix) - validated by executeQuery
|
// Model is already bare (no prefix) - validated by executeQuery
|
||||||
const args = [
|
const args = [
|
||||||
CODEX_EXEC_SUBCOMMAND,
|
CODEX_EXEC_SUBCOMMAND,
|
||||||
@@ -820,6 +844,8 @@ export class CodexProvider extends BaseProvider {
|
|||||||
CODEX_MODEL_FLAG,
|
CODEX_MODEL_FLAG,
|
||||||
options.model,
|
options.model,
|
||||||
CODEX_JSON_FLAG,
|
CODEX_JSON_FLAG,
|
||||||
|
...configOverrideArgs,
|
||||||
|
...(schemaPath ? [CODEX_OUTPUT_SCHEMA_FLAG, schemaPath] : []),
|
||||||
'-', // Read prompt from stdin to avoid shell escaping issues
|
'-', // Read prompt from stdin to avoid shell escaping issues
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -866,16 +892,36 @@ export class CodexProvider extends BaseProvider {
|
|||||||
|
|
||||||
// Enhance error message with helpful context
|
// Enhance error message with helpful context
|
||||||
let enhancedError = errorText;
|
let enhancedError = errorText;
|
||||||
if (errorText.toLowerCase().includes('rate limit')) {
|
const errorLower = errorText.toLowerCase();
|
||||||
|
if (errorLower.includes('rate limit')) {
|
||||||
enhancedError = `${errorText}\n\nTip: You're being rate limited. Try reducing concurrent tasks or waiting a few minutes before retrying.`;
|
enhancedError = `${errorText}\n\nTip: You're being rate limited. Try reducing concurrent tasks or waiting a few minutes before retrying.`;
|
||||||
|
} else if (errorLower.includes('authentication') || errorLower.includes('unauthorized')) {
|
||||||
|
enhancedError = `${errorText}\n\nTip: Check that your OPENAI_API_KEY is set correctly or run 'codex login' to authenticate.`;
|
||||||
} else if (
|
} else if (
|
||||||
errorText.toLowerCase().includes('authentication') ||
|
errorLower.includes('model does not exist') ||
|
||||||
errorText.toLowerCase().includes('unauthorized')
|
errorLower.includes('requested model does not exist') ||
|
||||||
|
errorLower.includes('do not have access') ||
|
||||||
|
errorLower.includes('model_not_found') ||
|
||||||
|
errorLower.includes('invalid_model')
|
||||||
) {
|
) {
|
||||||
enhancedError = `${errorText}\n\nTip: Check that your OPENAI_API_KEY is set correctly or run 'codex auth login' to authenticate.`;
|
enhancedError =
|
||||||
|
`${errorText}\n\nTip: The model '${options.model}' may not be available on your OpenAI plan. ` +
|
||||||
|
`See https://platform.openai.com/docs/models for available models. ` +
|
||||||
|
`Some models require a ChatGPT Pro/Plus subscription—authenticate with 'codex login' instead of an API key.`;
|
||||||
} else if (
|
} else if (
|
||||||
errorText.toLowerCase().includes('not found') ||
|
errorLower.includes('stream disconnected') ||
|
||||||
errorText.toLowerCase().includes('command not found')
|
errorLower.includes('stream ended') ||
|
||||||
|
errorLower.includes('connection reset')
|
||||||
|
) {
|
||||||
|
enhancedError =
|
||||||
|
`${errorText}\n\nTip: The connection to OpenAI was interrupted. This can happen due to:\n` +
|
||||||
|
`- Network instability\n` +
|
||||||
|
`- The model not being available on your plan\n` +
|
||||||
|
`- Server-side timeouts for long-running requests\n` +
|
||||||
|
`Try again, or switch to a different model.`;
|
||||||
|
} else if (
|
||||||
|
errorLower.includes('command not found') ||
|
||||||
|
errorLower.includes('is not recognized as an internal or external command')
|
||||||
) {
|
) {
|
||||||
enhancedError = `${errorText}\n\nTip: Make sure the Codex CLI is installed. Run 'npm install -g @openai/codex-cli' to install.`;
|
enhancedError = `${errorText}\n\nTip: Make sure the Codex CLI is installed. Run 'npm install -g @openai/codex-cli' to install.`;
|
||||||
}
|
}
|
||||||
@@ -1033,7 +1079,6 @@ export class CodexProvider extends BaseProvider {
|
|||||||
async detectInstallation(): Promise<InstallationStatus> {
|
async detectInstallation(): Promise<InstallationStatus> {
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
|
||||||
const installed = !!cliPath;
|
const installed = !!cliPath;
|
||||||
|
|
||||||
let version = '';
|
let version = '';
|
||||||
@@ -1045,7 +1090,7 @@ export class CodexProvider extends BaseProvider {
|
|||||||
cwd: process.cwd(),
|
cwd: process.cwd(),
|
||||||
});
|
});
|
||||||
version = result.stdout.trim();
|
version = result.stdout.trim();
|
||||||
} catch (error) {
|
} catch {
|
||||||
version = '';
|
version = '';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,9 @@ const SDK_HISTORY_HEADER = 'Current request:\n';
|
|||||||
const DEFAULT_RESPONSE_TEXT = '';
|
const DEFAULT_RESPONSE_TEXT = '';
|
||||||
const SDK_ERROR_DETAILS_LABEL = 'Details:';
|
const SDK_ERROR_DETAILS_LABEL = 'Details:';
|
||||||
|
|
||||||
|
type SdkReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
||||||
|
const SDK_REASONING_EFFORTS = new Set<string>(['minimal', 'low', 'medium', 'high', 'xhigh']);
|
||||||
|
|
||||||
type PromptBlock = {
|
type PromptBlock = {
|
||||||
type: string;
|
type: string;
|
||||||
text?: string;
|
text?: string;
|
||||||
@@ -99,38 +102,52 @@ export async function* executeCodexSdkQuery(
|
|||||||
const apiKey = resolveApiKey();
|
const apiKey = resolveApiKey();
|
||||||
const codex = new Codex({ apiKey });
|
const codex = new Codex({ apiKey });
|
||||||
|
|
||||||
|
// Build thread options with model
|
||||||
|
// The model must be passed to startThread/resumeThread so the SDK
|
||||||
|
// knows which model to use for the conversation. Without this,
|
||||||
|
// the SDK may use a default model that the user doesn't have access to.
|
||||||
|
const threadOptions: {
|
||||||
|
model?: string;
|
||||||
|
modelReasoningEffort?: SdkReasoningEffort;
|
||||||
|
} = {};
|
||||||
|
|
||||||
|
if (options.model) {
|
||||||
|
threadOptions.model = options.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add reasoning effort to thread options if model supports it
|
||||||
|
if (
|
||||||
|
options.reasoningEffort &&
|
||||||
|
options.model &&
|
||||||
|
supportsReasoningEffort(options.model) &&
|
||||||
|
options.reasoningEffort !== 'none' &&
|
||||||
|
SDK_REASONING_EFFORTS.has(options.reasoningEffort)
|
||||||
|
) {
|
||||||
|
threadOptions.modelReasoningEffort = options.reasoningEffort as SdkReasoningEffort;
|
||||||
|
}
|
||||||
|
|
||||||
// Resume existing thread or start new one
|
// Resume existing thread or start new one
|
||||||
let thread;
|
let thread;
|
||||||
if (options.sdkSessionId) {
|
if (options.sdkSessionId) {
|
||||||
try {
|
try {
|
||||||
thread = codex.resumeThread(options.sdkSessionId);
|
thread = codex.resumeThread(options.sdkSessionId, threadOptions);
|
||||||
} catch {
|
} catch {
|
||||||
// If resume fails, start a new thread
|
// If resume fails, start a new thread
|
||||||
thread = codex.startThread();
|
thread = codex.startThread(threadOptions);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
thread = codex.startThread();
|
thread = codex.startThread(threadOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
const promptText = buildPromptText(options, systemPrompt);
|
const promptText = buildPromptText(options, systemPrompt);
|
||||||
|
|
||||||
// Build run options with reasoning effort if supported
|
// Build run options
|
||||||
const runOptions: {
|
const runOptions: {
|
||||||
signal?: AbortSignal;
|
signal?: AbortSignal;
|
||||||
reasoning?: { effort: string };
|
|
||||||
} = {
|
} = {
|
||||||
signal: options.abortController?.signal,
|
signal: options.abortController?.signal,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add reasoning effort if model supports it and reasoningEffort is specified
|
|
||||||
if (
|
|
||||||
options.reasoningEffort &&
|
|
||||||
supportsReasoningEffort(options.model) &&
|
|
||||||
options.reasoningEffort !== 'none'
|
|
||||||
) {
|
|
||||||
runOptions.reasoning = { effort: options.reasoningEffort };
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the query
|
// Run the query
|
||||||
const result = await thread.run(promptText, runOptions);
|
const result = await thread.run(promptText, runOptions);
|
||||||
|
|
||||||
@@ -160,10 +177,42 @@ export async function* executeCodexSdkQuery(
|
|||||||
} catch (error) {
|
} catch (error) {
|
||||||
const errorInfo = classifyError(error);
|
const errorInfo = classifyError(error);
|
||||||
const userMessage = getUserFriendlyErrorMessage(error);
|
const userMessage = getUserFriendlyErrorMessage(error);
|
||||||
const combinedMessage = buildSdkErrorMessage(errorInfo.message, userMessage);
|
let combinedMessage = buildSdkErrorMessage(errorInfo.message, userMessage);
|
||||||
|
|
||||||
|
// Enhance error messages with actionable tips for common Codex issues
|
||||||
|
// Normalize inputs to avoid crashes from nullish values
|
||||||
|
const errorLower = (errorInfo?.message ?? '').toLowerCase();
|
||||||
|
const modelLabel = options?.model ?? '<unknown model>';
|
||||||
|
|
||||||
|
if (
|
||||||
|
errorLower.includes('does not exist') ||
|
||||||
|
errorLower.includes('model_not_found') ||
|
||||||
|
errorLower.includes('invalid_model')
|
||||||
|
) {
|
||||||
|
// Model not found - provide helpful guidance
|
||||||
|
combinedMessage +=
|
||||||
|
`\n\nTip: The model '${modelLabel}' may not be available on your OpenAI plan. ` +
|
||||||
|
`Some models (like gpt-5.3-codex) require a ChatGPT Pro/Plus subscription and OAuth login via 'codex login'. ` +
|
||||||
|
`Try using a different model (e.g., gpt-5.1 or gpt-5.2), or authenticate with 'codex login' instead of an API key.`;
|
||||||
|
} else if (
|
||||||
|
errorLower.includes('stream disconnected') ||
|
||||||
|
errorLower.includes('stream ended') ||
|
||||||
|
errorLower.includes('connection reset') ||
|
||||||
|
errorLower.includes('socket hang up')
|
||||||
|
) {
|
||||||
|
// Stream disconnection - provide helpful guidance
|
||||||
|
combinedMessage +=
|
||||||
|
`\n\nTip: The connection to OpenAI was interrupted. This can happen due to:\n` +
|
||||||
|
`- Network instability\n` +
|
||||||
|
`- The model not being available on your plan (try 'codex login' for OAuth authentication)\n` +
|
||||||
|
`- Server-side timeouts for long-running requests\n` +
|
||||||
|
`Try again, or switch to a different model.`;
|
||||||
|
}
|
||||||
|
|
||||||
console.error('[CodexSDK] executeQuery() error during execution:', {
|
console.error('[CodexSDK] executeQuery() error during execution:', {
|
||||||
type: errorInfo.type,
|
type: errorInfo.type,
|
||||||
message: errorInfo.message,
|
message: errorInfo.message,
|
||||||
|
model: options.model,
|
||||||
isRateLimit: errorInfo.isRateLimit,
|
isRateLimit: errorInfo.isRateLimit,
|
||||||
retryAfter: errorInfo.retryAfter,
|
retryAfter: errorInfo.retryAfter,
|
||||||
stack: error instanceof Error ? error.stack : undefined,
|
stack: error instanceof Error ? error.stack : undefined,
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ import {
|
|||||||
const logger = createLogger('CopilotProvider');
|
const logger = createLogger('CopilotProvider');
|
||||||
|
|
||||||
// Default bare model (without copilot- prefix) for SDK calls
|
// Default bare model (without copilot- prefix) for SDK calls
|
||||||
const DEFAULT_BARE_MODEL = 'claude-sonnet-4.5';
|
const DEFAULT_BARE_MODEL = 'claude-sonnet-4.6';
|
||||||
|
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
// SDK Event Types (from @github/copilot-sdk)
|
// SDK Event Types (from @github/copilot-sdk)
|
||||||
@@ -85,10 +85,6 @@ interface SdkToolExecutionEndEvent extends SdkEvent {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
interface SdkSessionIdleEvent extends SdkEvent {
|
|
||||||
type: 'session.idle';
|
|
||||||
}
|
|
||||||
|
|
||||||
interface SdkSessionErrorEvent extends SdkEvent {
|
interface SdkSessionErrorEvent extends SdkEvent {
|
||||||
type: 'session.error';
|
type: 'session.error';
|
||||||
data: {
|
data: {
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ import type {
|
|||||||
} from './types.js';
|
} from './types.js';
|
||||||
import { validateBareModelId } from '@automaker/types';
|
import { validateBareModelId } from '@automaker/types';
|
||||||
import { validateApiKey } from '../lib/auth-utils.js';
|
import { validateApiKey } from '../lib/auth-utils.js';
|
||||||
import { getEffectivePermissions } from '../services/cursor-config-service.js';
|
import { getEffectivePermissions, detectProfile } from '../services/cursor-config-service.js';
|
||||||
import {
|
import {
|
||||||
type CursorStreamEvent,
|
type CursorStreamEvent,
|
||||||
type CursorSystemEvent,
|
type CursorSystemEvent,
|
||||||
@@ -69,6 +69,7 @@ interface CursorToolHandler<TArgs = unknown, TResult = unknown> {
|
|||||||
* Registry of Cursor tool handlers
|
* Registry of Cursor tool handlers
|
||||||
* Each handler knows how to normalize its specific tool call type
|
* Each handler knows how to normalize its specific tool call type
|
||||||
*/
|
*/
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- handler registry stores heterogeneous tool type parameters
|
||||||
const CURSOR_TOOL_HANDLERS: Record<string, CursorToolHandler<any, any>> = {
|
const CURSOR_TOOL_HANDLERS: Record<string, CursorToolHandler<any, any>> = {
|
||||||
readToolCall: {
|
readToolCall: {
|
||||||
name: 'Read',
|
name: 'Read',
|
||||||
@@ -877,8 +878,12 @@ export class CursorProvider extends CliProvider {
|
|||||||
|
|
||||||
logger.debug(`CursorProvider.executeQuery called with model: "${options.model}"`);
|
logger.debug(`CursorProvider.executeQuery called with model: "${options.model}"`);
|
||||||
|
|
||||||
// Get effective permissions for this project
|
// Get effective permissions for this project and detect the active profile
|
||||||
const effectivePermissions = await getEffectivePermissions(options.cwd || process.cwd());
|
const effectivePermissions = await getEffectivePermissions(options.cwd || process.cwd());
|
||||||
|
const activeProfile = detectProfile(effectivePermissions);
|
||||||
|
logger.debug(
|
||||||
|
`Active permission profile: ${activeProfile ?? 'none'}, permissions: ${JSON.stringify(effectivePermissions)}`
|
||||||
|
);
|
||||||
|
|
||||||
// Debug: log raw events when AUTOMAKER_DEBUG_RAW_OUTPUT is enabled
|
// Debug: log raw events when AUTOMAKER_DEBUG_RAW_OUTPUT is enabled
|
||||||
const debugRawEvents =
|
const debugRawEvents =
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ import type {
|
|||||||
ProviderMessage,
|
ProviderMessage,
|
||||||
InstallationStatus,
|
InstallationStatus,
|
||||||
ModelDefinition,
|
ModelDefinition,
|
||||||
ContentBlock,
|
|
||||||
} from './types.js';
|
} from './types.js';
|
||||||
import { validateBareModelId } from '@automaker/types';
|
import { validateBareModelId } from '@automaker/types';
|
||||||
import { GEMINI_MODEL_MAP, type GeminiAuthStatus } from '@automaker/types';
|
import { GEMINI_MODEL_MAP, type GeminiAuthStatus } from '@automaker/types';
|
||||||
|
|||||||
@@ -192,6 +192,28 @@ export interface OpenCodeToolErrorEvent extends OpenCodeBaseEvent {
|
|||||||
part?: OpenCodePart & { error: string };
|
part?: OpenCodePart & { error: string };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tool use event - The actual format emitted by OpenCode CLI when a tool is invoked.
|
||||||
|
* Contains the tool name, call ID, and the complete state (input, output, status).
|
||||||
|
* Note: OpenCode CLI emits 'tool_use' (not 'tool_call') as the event type.
|
||||||
|
*/
|
||||||
|
export interface OpenCodeToolUseEvent extends OpenCodeBaseEvent {
|
||||||
|
type: 'tool_use';
|
||||||
|
part: OpenCodePart & {
|
||||||
|
type: 'tool';
|
||||||
|
callID?: string;
|
||||||
|
tool?: string;
|
||||||
|
state?: {
|
||||||
|
status?: string;
|
||||||
|
input?: unknown;
|
||||||
|
output?: string;
|
||||||
|
title?: string;
|
||||||
|
metadata?: unknown;
|
||||||
|
time?: { start: number; end: number };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Union type of all OpenCode stream events
|
* Union type of all OpenCode stream events
|
||||||
*/
|
*/
|
||||||
@@ -200,6 +222,7 @@ export type OpenCodeStreamEvent =
|
|||||||
| OpenCodeStepStartEvent
|
| OpenCodeStepStartEvent
|
||||||
| OpenCodeStepFinishEvent
|
| OpenCodeStepFinishEvent
|
||||||
| OpenCodeToolCallEvent
|
| OpenCodeToolCallEvent
|
||||||
|
| OpenCodeToolUseEvent
|
||||||
| OpenCodeToolResultEvent
|
| OpenCodeToolResultEvent
|
||||||
| OpenCodeErrorEvent
|
| OpenCodeErrorEvent
|
||||||
| OpenCodeToolErrorEvent;
|
| OpenCodeToolErrorEvent;
|
||||||
@@ -311,8 +334,8 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
* Arguments built:
|
* Arguments built:
|
||||||
* - 'run' subcommand for executing queries
|
* - 'run' subcommand for executing queries
|
||||||
* - '--format', 'json' for JSONL streaming output
|
* - '--format', 'json' for JSONL streaming output
|
||||||
* - '-c', '<cwd>' for working directory (using opencode's -c flag)
|
|
||||||
* - '--model', '<model>' for model selection (if specified)
|
* - '--model', '<model>' for model selection (if specified)
|
||||||
|
* - '--session', '<id>' for continuing an existing session (if sdkSessionId is set)
|
||||||
*
|
*
|
||||||
* The prompt is passed via stdin (piped) to avoid shell escaping issues.
|
* The prompt is passed via stdin (piped) to avoid shell escaping issues.
|
||||||
* OpenCode CLI automatically reads from stdin when input is piped.
|
* OpenCode CLI automatically reads from stdin when input is piped.
|
||||||
@@ -326,6 +349,14 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
// Add JSON output format for JSONL parsing (not 'stream-json')
|
// Add JSON output format for JSONL parsing (not 'stream-json')
|
||||||
args.push('--format', 'json');
|
args.push('--format', 'json');
|
||||||
|
|
||||||
|
// Handle session resumption for conversation continuity.
|
||||||
|
// The opencode CLI supports `--session <id>` to continue an existing session.
|
||||||
|
// The sdkSessionId is captured from the sessionID field in previous stream events
|
||||||
|
// and persisted by AgentService for use in follow-up messages.
|
||||||
|
if (options.sdkSessionId) {
|
||||||
|
args.push('--session', options.sdkSessionId);
|
||||||
|
}
|
||||||
|
|
||||||
// Handle model selection
|
// Handle model selection
|
||||||
// Convert canonical prefix format (opencode-xxx) to CLI slash format (opencode/xxx)
|
// Convert canonical prefix format (opencode-xxx) to CLI slash format (opencode/xxx)
|
||||||
// OpenCode CLI expects provider/model format (e.g., 'opencode/big-model')
|
// OpenCode CLI expects provider/model format (e.g., 'opencode/big-model')
|
||||||
@@ -398,15 +429,225 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
return subprocessOptions;
|
return subprocessOptions;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if an error message indicates a session-not-found condition.
|
||||||
|
*
|
||||||
|
* Centralizes the pattern matching for session errors to avoid duplication.
|
||||||
|
* Strips ANSI escape codes first since opencode CLI uses colored stderr output
|
||||||
|
* (e.g. "\x1b[91m\x1b[1mError: \x1b[0mSession not found").
|
||||||
|
*
|
||||||
|
* IMPORTANT: Patterns must be specific enough to avoid false positives.
|
||||||
|
* Generic patterns like "notfounderror" or "resource not found" match
|
||||||
|
* non-session errors (e.g. "ProviderModelNotFoundError") which would
|
||||||
|
* trigger unnecessary retries that fail identically, producing confusing
|
||||||
|
* error messages like "OpenCode session could not be created".
|
||||||
|
*
|
||||||
|
* @param errorText - Raw error text (may contain ANSI codes)
|
||||||
|
* @returns true if the error indicates the session was not found
|
||||||
|
*/
|
||||||
|
private static isSessionNotFoundError(errorText: string): boolean {
|
||||||
|
const cleaned = OpencodeProvider.stripAnsiCodes(errorText).toLowerCase();
|
||||||
|
|
||||||
|
// Explicit session-related phrases — high confidence
|
||||||
|
if (
|
||||||
|
cleaned.includes('session not found') ||
|
||||||
|
cleaned.includes('session does not exist') ||
|
||||||
|
cleaned.includes('invalid session') ||
|
||||||
|
cleaned.includes('session expired') ||
|
||||||
|
cleaned.includes('no such session')
|
||||||
|
) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic "NotFoundError" / "resource not found" are only session errors
|
||||||
|
// when the message also references a session path or session ID.
|
||||||
|
// Without this guard, errors like "ProviderModelNotFoundError" or
|
||||||
|
// "Resource not found: /path/to/config.json" would false-positive.
|
||||||
|
if (cleaned.includes('notfounderror') || cleaned.includes('resource not found')) {
|
||||||
|
return cleaned.includes('/session/') || /\bsession\b/.test(cleaned);
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Strip ANSI escape codes from a string.
|
||||||
|
*
|
||||||
|
* The OpenCode CLI uses colored stderr output (e.g. "\x1b[91m\x1b[1mError: \x1b[0m").
|
||||||
|
* These escape codes render as garbled text like "[91m[1mError: [0m" in the UI
|
||||||
|
* when passed through as-is. This utility removes them so error messages are
|
||||||
|
* clean and human-readable.
|
||||||
|
*/
|
||||||
|
private static stripAnsiCodes(text: string): string {
|
||||||
|
return text.replace(/\x1b\[[0-9;]*m/g, '');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean a CLI error message for display.
|
||||||
|
*
|
||||||
|
* Strips ANSI escape codes AND removes the redundant "Error: " prefix that
|
||||||
|
* the OpenCode CLI prepends to error messages in its colored stderr output
|
||||||
|
* (e.g. "\x1b[91m\x1b[1mError: \x1b[0mSession not found" → "Session not found").
|
||||||
|
*
|
||||||
|
* Without this, consumers that wrap the message in their own "Error: " prefix
|
||||||
|
* (like AgentService or AgentExecutor) produce garbled double-prefixed output:
|
||||||
|
* "Error: Error: Session not found".
|
||||||
|
*/
|
||||||
|
private static cleanErrorMessage(text: string): string {
|
||||||
|
let cleaned = OpencodeProvider.stripAnsiCodes(text).trim();
|
||||||
|
// Remove leading "Error: " prefix (case-insensitive) if present.
|
||||||
|
// The CLI formats errors as: \x1b[91m\x1b[1mError: \x1b[0m<actual message>
|
||||||
|
// After ANSI stripping this becomes: "Error: <actual message>"
|
||||||
|
cleaned = cleaned.replace(/^Error:\s*/i, '').trim();
|
||||||
|
return cleaned || text;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a query with automatic session resumption fallback.
|
||||||
|
*
|
||||||
|
* When a sdkSessionId is provided, the CLI receives `--session <id>`.
|
||||||
|
* If the session no longer exists on disk the CLI will fail with a
|
||||||
|
* "NotFoundError" / "Resource not found" / "Session not found" error.
|
||||||
|
*
|
||||||
|
* The opencode CLI writes this to **stderr** and exits non-zero.
|
||||||
|
* `spawnJSONLProcess` collects stderr and **yields** it as
|
||||||
|
* `{ type: 'error', error: <stderrText> }` — it is NOT thrown.
|
||||||
|
* After `normalizeEvent`, the error becomes a yielded `ProviderMessage`
|
||||||
|
* with `type: 'error'`. A simple try/catch therefore cannot intercept it.
|
||||||
|
*
|
||||||
|
* This override iterates the parent stream, intercepts yielded error
|
||||||
|
* messages that match the session-not-found pattern, and retries the
|
||||||
|
* entire query WITHOUT the `--session` flag so a fresh session is started.
|
||||||
|
*
|
||||||
|
* Session-not-found retry is ONLY attempted when `sdkSessionId` is set.
|
||||||
|
* Without the `--session` flag the CLI always creates a fresh session, so
|
||||||
|
* retrying without it would be identical to the first attempt and would
|
||||||
|
* fail the same way — producing a confusing "session could not be created"
|
||||||
|
* message for what is actually a different error (model not found, auth
|
||||||
|
* failure, etc.).
|
||||||
|
*
|
||||||
|
* All error messages (session or not) are cleaned of ANSI codes and the
|
||||||
|
* CLI's redundant "Error: " prefix before being yielded to consumers.
|
||||||
|
*
|
||||||
|
* After a successful retry, the consumer (AgentService) will receive a new
|
||||||
|
* session_id from the fresh stream events, which it persists to metadata —
|
||||||
|
* replacing the stale sdkSessionId and preventing repeated failures.
|
||||||
|
*/
|
||||||
|
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||||
|
// When no sdkSessionId is set, there is nothing to "retry without" — just
|
||||||
|
// stream normally and clean error messages as they pass through.
|
||||||
|
if (!options.sdkSessionId) {
|
||||||
|
for await (const msg of super.executeQuery(options)) {
|
||||||
|
// Clean error messages so consumers don't get ANSI or double "Error:" prefix
|
||||||
|
if (msg.type === 'error' && msg.error && typeof msg.error === 'string') {
|
||||||
|
msg.error = OpencodeProvider.cleanErrorMessage(msg.error);
|
||||||
|
}
|
||||||
|
yield msg;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// sdkSessionId IS set — the CLI will receive `--session <id>`.
|
||||||
|
// If that session no longer exists, intercept the error and retry fresh.
|
||||||
|
//
|
||||||
|
// To avoid buffering the entire stream in memory for long-lived sessions,
|
||||||
|
// we only buffer an initial window of messages until we observe a healthy
|
||||||
|
// (non-error) message. Once a healthy message is seen, we flush the buffer
|
||||||
|
// and switch to direct passthrough, while still watching for session errors
|
||||||
|
// via isSessionNotFoundError on any subsequent error messages.
|
||||||
|
const buffered: ProviderMessage[] = [];
|
||||||
|
let sessionError = false;
|
||||||
|
let seenHealthyMessage = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
for await (const msg of super.executeQuery(options)) {
|
||||||
|
if (msg.type === 'error') {
|
||||||
|
const errorText = msg.error || '';
|
||||||
|
if (OpencodeProvider.isSessionNotFoundError(errorText)) {
|
||||||
|
sessionError = true;
|
||||||
|
opencodeLogger.info(
|
||||||
|
`OpenCode session error detected (session "${options.sdkSessionId}") ` +
|
||||||
|
`— retrying without --session to start fresh`
|
||||||
|
);
|
||||||
|
break; // stop consuming the failed stream
|
||||||
|
}
|
||||||
|
|
||||||
|
// Non-session error — clean it
|
||||||
|
if (msg.error && typeof msg.error === 'string') {
|
||||||
|
msg.error = OpencodeProvider.cleanErrorMessage(msg.error);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// A non-error message is a healthy signal — stop buffering after this
|
||||||
|
seenHealthyMessage = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (seenHealthyMessage && buffered.length > 0) {
|
||||||
|
// Flush the pre-healthy buffer first, then switch to passthrough
|
||||||
|
for (const bufferedMsg of buffered) {
|
||||||
|
yield bufferedMsg;
|
||||||
|
}
|
||||||
|
buffered.length = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (seenHealthyMessage) {
|
||||||
|
// Passthrough mode — yield directly without buffering
|
||||||
|
yield msg;
|
||||||
|
} else {
|
||||||
|
// Still in initial window — buffer until we see a healthy message
|
||||||
|
buffered.push(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Also handle thrown exceptions (e.g. from mapError in cli-provider)
|
||||||
|
const errMsg = error instanceof Error ? error.message : String(error);
|
||||||
|
if (OpencodeProvider.isSessionNotFoundError(errMsg)) {
|
||||||
|
sessionError = true;
|
||||||
|
opencodeLogger.info(
|
||||||
|
`OpenCode session error detected (thrown, session "${options.sdkSessionId}") ` +
|
||||||
|
`— retrying without --session to start fresh`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sessionError) {
|
||||||
|
// Retry the entire query without the stale session ID.
|
||||||
|
const retryOptions = { ...options, sdkSessionId: undefined };
|
||||||
|
opencodeLogger.info('Retrying OpenCode query without --session flag...');
|
||||||
|
|
||||||
|
// Stream the retry directly to the consumer.
|
||||||
|
// If the retry also fails, it's a genuine error (not session-related)
|
||||||
|
// and should be surfaced as-is rather than masked with a misleading
|
||||||
|
// "session could not be created" message.
|
||||||
|
for await (const retryMsg of super.executeQuery(retryOptions)) {
|
||||||
|
if (retryMsg.type === 'error' && retryMsg.error && typeof retryMsg.error === 'string') {
|
||||||
|
retryMsg.error = OpencodeProvider.cleanErrorMessage(retryMsg.error);
|
||||||
|
}
|
||||||
|
yield retryMsg;
|
||||||
|
}
|
||||||
|
} else if (buffered.length > 0) {
|
||||||
|
// No session error and still have buffered messages (stream ended before
|
||||||
|
// any healthy message was observed) — flush them to the consumer
|
||||||
|
for (const msg of buffered) {
|
||||||
|
yield msg;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If seenHealthyMessage is true, all messages have already been yielded
|
||||||
|
// directly in passthrough mode — nothing left to flush.
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Normalize a raw CLI event to ProviderMessage format
|
* Normalize a raw CLI event to ProviderMessage format
|
||||||
*
|
*
|
||||||
* Maps OpenCode event types to the standard ProviderMessage structure:
|
* Maps OpenCode event types to the standard ProviderMessage structure:
|
||||||
* - text -> type: 'assistant', content with type: 'text'
|
* - text -> type: 'assistant', content with type: 'text'
|
||||||
* - step_start -> null (informational, no message needed)
|
* - step_start -> null (informational, no message needed)
|
||||||
* - step_finish with reason 'stop' -> type: 'result', subtype: 'success'
|
* - step_finish with reason 'stop'/'end_turn' -> type: 'result', subtype: 'success'
|
||||||
|
* - step_finish with reason 'tool-calls' -> null (intermediate step, not final)
|
||||||
* - step_finish with error -> type: 'error'
|
* - step_finish with error -> type: 'error'
|
||||||
* - tool_call -> type: 'assistant', content with type: 'tool_use'
|
* - tool_use -> type: 'assistant', content with type: 'tool_use' (OpenCode CLI format)
|
||||||
|
* - tool_call -> type: 'assistant', content with type: 'tool_use' (legacy format)
|
||||||
* - tool_result -> type: 'assistant', content with type: 'tool_result'
|
* - tool_result -> type: 'assistant', content with type: 'tool_result'
|
||||||
* - error -> type: 'error'
|
* - error -> type: 'error'
|
||||||
*
|
*
|
||||||
@@ -459,7 +700,7 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
return {
|
return {
|
||||||
type: 'error',
|
type: 'error',
|
||||||
session_id: finishEvent.sessionID,
|
session_id: finishEvent.sessionID,
|
||||||
error: finishEvent.part.error,
|
error: OpencodeProvider.cleanErrorMessage(finishEvent.part.error),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -468,15 +709,40 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
return {
|
return {
|
||||||
type: 'error',
|
type: 'error',
|
||||||
session_id: finishEvent.sessionID,
|
session_id: finishEvent.sessionID,
|
||||||
error: 'Step execution failed',
|
error: OpencodeProvider.cleanErrorMessage('Step execution failed'),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Successful completion (reason: 'stop' or 'end_turn')
|
// Intermediate step completion (reason: 'tool-calls') — the agent loop
|
||||||
|
// is continuing because the model requested tool calls. Skip these so
|
||||||
|
// consumers don't mistake them for final results.
|
||||||
|
if (finishEvent.part?.reason === 'tool-calls') {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only treat an explicit allowlist of reasons as true success.
|
||||||
|
// Reasons like 'length' (context-window truncation) or 'content-filter'
|
||||||
|
// indicate the model stopped abnormally and must not be surfaced as
|
||||||
|
// successful completions.
|
||||||
|
const SUCCESS_REASONS = new Set(['stop', 'end_turn']);
|
||||||
|
const reason = finishEvent.part?.reason;
|
||||||
|
|
||||||
|
if (reason === undefined || SUCCESS_REASONS.has(reason)) {
|
||||||
|
// Final completion (reason: 'stop', 'end_turn', or unset)
|
||||||
|
return {
|
||||||
|
type: 'result',
|
||||||
|
subtype: 'success',
|
||||||
|
session_id: finishEvent.sessionID,
|
||||||
|
result: (finishEvent.part as OpenCodePart & { result?: string })?.result,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Non-success, non-tool-calls reason (e.g. 'length', 'content-filter')
|
||||||
return {
|
return {
|
||||||
type: 'result',
|
type: 'result',
|
||||||
subtype: 'success',
|
subtype: 'error',
|
||||||
session_id: finishEvent.sessionID,
|
session_id: finishEvent.sessionID,
|
||||||
|
error: `Step finished with non-success reason: ${reason}`,
|
||||||
result: (finishEvent.part as OpenCodePart & { result?: string })?.result,
|
result: (finishEvent.part as OpenCodePart & { result?: string })?.result,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -484,8 +750,10 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
case 'tool_error': {
|
case 'tool_error': {
|
||||||
const toolErrorEvent = openCodeEvent as OpenCodeBaseEvent;
|
const toolErrorEvent = openCodeEvent as OpenCodeBaseEvent;
|
||||||
|
|
||||||
// Extract error message from part.error
|
// Extract error message from part.error and clean ANSI codes
|
||||||
const errorMessage = toolErrorEvent.part?.error || 'Tool execution failed';
|
const errorMessage = OpencodeProvider.cleanErrorMessage(
|
||||||
|
toolErrorEvent.part?.error || 'Tool execution failed'
|
||||||
|
);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
type: 'error',
|
type: 'error',
|
||||||
@@ -494,6 +762,45 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OpenCode CLI emits 'tool_use' events (not 'tool_call') when the model invokes a tool.
|
||||||
|
// The event format includes the tool name, call ID, and state with input/output.
|
||||||
|
// Handle both 'tool_use' (actual CLI format) and 'tool_call' (legacy/alternative) for robustness.
|
||||||
|
case 'tool_use': {
|
||||||
|
const toolUseEvent = openCodeEvent as OpenCodeToolUseEvent;
|
||||||
|
const part = toolUseEvent.part;
|
||||||
|
|
||||||
|
// Generate a tool use ID if not provided
|
||||||
|
const toolUseId = part?.callID || part?.call_id || generateToolUseId();
|
||||||
|
const toolName = part?.tool || part?.name || 'unknown';
|
||||||
|
|
||||||
|
const content: ContentBlock[] = [
|
||||||
|
{
|
||||||
|
type: 'tool_use',
|
||||||
|
name: toolName,
|
||||||
|
tool_use_id: toolUseId,
|
||||||
|
input: part?.state?.input || part?.args,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// If the tool has already completed (state.status === 'completed'), also emit the result
|
||||||
|
if (part?.state?.status === 'completed' && part?.state?.output) {
|
||||||
|
content.push({
|
||||||
|
type: 'tool_result',
|
||||||
|
tool_use_id: toolUseId,
|
||||||
|
content: part.state.output,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
type: 'assistant',
|
||||||
|
session_id: toolUseEvent.sessionID,
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
case 'tool_call': {
|
case 'tool_call': {
|
||||||
const toolEvent = openCodeEvent as OpenCodeToolCallEvent;
|
const toolEvent = openCodeEvent as OpenCodeToolCallEvent;
|
||||||
|
|
||||||
@@ -560,6 +867,13 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
errorMessage = errorEvent.part.error;
|
errorMessage = errorEvent.part.error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean error messages: strip ANSI escape codes AND the redundant "Error: "
|
||||||
|
// prefix the CLI adds. The OpenCode CLI outputs colored stderr like:
|
||||||
|
// \x1b[91m\x1b[1mError: \x1b[0mSession not found
|
||||||
|
// Without cleaning, consumers that wrap in their own "Error: " prefix
|
||||||
|
// produce "Error: Error: Session not found".
|
||||||
|
errorMessage = OpencodeProvider.cleanErrorMessage(errorMessage);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
type: 'error',
|
type: 'error',
|
||||||
session_id: errorEvent.sessionID,
|
session_id: errorEvent.sessionID,
|
||||||
@@ -623,9 +937,9 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'opencode/glm-4.7-free',
|
id: 'opencode/glm-5-free',
|
||||||
name: 'GLM 4.7 Free',
|
name: 'GLM 5 Free',
|
||||||
modelString: 'opencode/glm-4.7-free',
|
modelString: 'opencode/glm-5-free',
|
||||||
provider: 'opencode',
|
provider: 'opencode',
|
||||||
description: 'OpenCode free tier GLM model',
|
description: 'OpenCode free tier GLM model',
|
||||||
supportsTools: true,
|
supportsTools: true,
|
||||||
@@ -643,19 +957,19 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
tier: 'basic',
|
tier: 'basic',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'opencode/grok-code',
|
id: 'opencode/kimi-k2.5-free',
|
||||||
name: 'Grok Code (Free)',
|
name: 'Kimi K2.5 Free',
|
||||||
modelString: 'opencode/grok-code',
|
modelString: 'opencode/kimi-k2.5-free',
|
||||||
provider: 'opencode',
|
provider: 'opencode',
|
||||||
description: 'OpenCode free tier Grok model for coding',
|
description: 'OpenCode free tier Kimi model for coding',
|
||||||
supportsTools: true,
|
supportsTools: true,
|
||||||
supportsVision: false,
|
supportsVision: false,
|
||||||
tier: 'basic',
|
tier: 'basic',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'opencode/minimax-m2.1-free',
|
id: 'opencode/minimax-m2.5-free',
|
||||||
name: 'MiniMax M2.1 Free',
|
name: 'MiniMax M2.5 Free',
|
||||||
modelString: 'opencode/minimax-m2.1-free',
|
modelString: 'opencode/minimax-m2.5-free',
|
||||||
provider: 'opencode',
|
provider: 'opencode',
|
||||||
description: 'OpenCode free tier MiniMax model',
|
description: 'OpenCode free tier MiniMax model',
|
||||||
supportsTools: true,
|
supportsTools: true,
|
||||||
@@ -777,7 +1091,7 @@ export class OpencodeProvider extends CliProvider {
|
|||||||
*
|
*
|
||||||
* OpenCode CLI output format (one model per line):
|
* OpenCode CLI output format (one model per line):
|
||||||
* opencode/big-pickle
|
* opencode/big-pickle
|
||||||
* opencode/glm-4.7-free
|
* opencode/glm-5-free
|
||||||
* anthropic/claude-3-5-haiku-20241022
|
* anthropic/claude-3-5-haiku-20241022
|
||||||
* github-copilot/claude-3.5-sonnet
|
* github-copilot/claude-3.5-sonnet
|
||||||
* ...
|
* ...
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ export class ProviderFactory {
|
|||||||
/**
|
/**
|
||||||
* Get the appropriate provider for a given model ID
|
* Get the appropriate provider for a given model ID
|
||||||
*
|
*
|
||||||
* @param modelId Model identifier (e.g., "claude-opus-4-5-20251101", "cursor-gpt-4o", "cursor-auto")
|
* @param modelId Model identifier (e.g., "claude-opus-4-6", "cursor-gpt-4o", "cursor-auto")
|
||||||
* @param options Optional settings
|
* @param options Optional settings
|
||||||
* @param options.throwOnDisconnected Throw error if provider is disconnected (default: true)
|
* @param options.throwOnDisconnected Throw error if provider is disconnected (default: true)
|
||||||
* @returns Provider instance for the model
|
* @returns Provider instance for the model
|
||||||
|
|||||||
@@ -16,8 +16,6 @@
|
|||||||
|
|
||||||
import { ProviderFactory } from './provider-factory.js';
|
import { ProviderFactory } from './provider-factory.js';
|
||||||
import type {
|
import type {
|
||||||
ProviderMessage,
|
|
||||||
ContentBlock,
|
|
||||||
ThinkingLevel,
|
ThinkingLevel,
|
||||||
ReasoningEffort,
|
ReasoningEffort,
|
||||||
ClaudeApiProfile,
|
ClaudeApiProfile,
|
||||||
@@ -96,7 +94,7 @@ export interface StreamingQueryOptions extends SimpleQueryOptions {
|
|||||||
/**
|
/**
|
||||||
* Default model to use when none specified
|
* Default model to use when none specified
|
||||||
*/
|
*/
|
||||||
const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
|
const DEFAULT_MODEL = 'claude-sonnet-4-6';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Execute a simple query and return the text result
|
* Execute a simple query and return the text result
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ export function createHistoryHandler(agentService: AgentService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const result = agentService.getHistory(sessionId);
|
const result = await agentService.getHistory(sessionId);
|
||||||
res.json(result);
|
res.json(result);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'Get history failed');
|
logError(error, 'Get history failed');
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ export function createQueueListHandler(agentService: AgentService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const result = agentService.getQueue(sessionId);
|
const result = await agentService.getQueue(sessionId);
|
||||||
res.json(result);
|
res.json(result);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'List queue failed');
|
logError(error, 'List queue failed');
|
||||||
|
|||||||
@@ -53,7 +53,15 @@ export function createSendHandler(agentService: AgentService) {
|
|||||||
thinkingLevel,
|
thinkingLevel,
|
||||||
})
|
})
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
logger.error('Background error in sendMessage():', error);
|
const errorMsg = (error as Error).message || 'Unknown error';
|
||||||
|
logger.error(`Background error in sendMessage() for session ${sessionId}:`, errorMsg);
|
||||||
|
|
||||||
|
// Emit error via WebSocket so the UI is notified even though
|
||||||
|
// the HTTP response already returned 200. This is critical for
|
||||||
|
// session-not-found errors where sendMessage() throws before it
|
||||||
|
// can emit its own error event (no in-memory session to emit from).
|
||||||
|
agentService.emitSessionError(sessionId, errorMsg);
|
||||||
|
|
||||||
logError(error, 'Send message failed (background)');
|
logError(error, 'Send message failed (background)');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import type { Request, Response } from 'express';
|
|||||||
import { AgentService } from '../../../services/agent-service.js';
|
import { AgentService } from '../../../services/agent-service.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
const logger = createLogger('Agent');
|
const _logger = createLogger('Agent');
|
||||||
|
|
||||||
export function createStartHandler(agentService: AgentService) {
|
export function createStartHandler(agentService: AgentService) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ export function logAuthStatus(context: string): void {
|
|||||||
*/
|
*/
|
||||||
export function logError(error: unknown, context: string): void {
|
export function logError(error: unknown, context: string): void {
|
||||||
logger.error(`❌ ${context}:`);
|
logger.error(`❌ ${context}:`);
|
||||||
logger.error('Error name:', (error as any)?.name);
|
logger.error('Error name:', (error as Error)?.name);
|
||||||
logger.error('Error message:', (error as Error)?.message);
|
logger.error('Error message:', (error as Error)?.message);
|
||||||
logger.error('Error stack:', (error as Error)?.stack);
|
logger.error('Error stack:', (error as Error)?.stack);
|
||||||
logger.error('Full error object:', JSON.stringify(error, Object.getOwnPropertyNames(error), 2));
|
logger.error('Full error object:', JSON.stringify(error, Object.getOwnPropertyNames(error), 2));
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ const DEFAULT_MAX_FEATURES = 50;
|
|||||||
* Timeout for Codex models when generating features (5 minutes).
|
* Timeout for Codex models when generating features (5 minutes).
|
||||||
* Codex models are slower and need more time to generate 50+ features.
|
* Codex models are slower and need more time to generate 50+ features.
|
||||||
*/
|
*/
|
||||||
const CODEX_FEATURE_GENERATION_TIMEOUT_MS = 300000; // 5 minutes
|
const _CODEX_FEATURE_GENERATION_TIMEOUT_MS = 300000; // 5 minutes
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for extracted features JSON response
|
* Type for extracted features JSON response
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ import {
|
|||||||
updateTechnologyStack,
|
updateTechnologyStack,
|
||||||
updateRoadmapPhaseStatus,
|
updateRoadmapPhaseStatus,
|
||||||
type ImplementedFeature,
|
type ImplementedFeature,
|
||||||
type RoadmapPhase,
|
|
||||||
} from '../../lib/xml-extractor.js';
|
} from '../../lib/xml-extractor.js';
|
||||||
import { getNotificationService } from '../../services/notification-service.js';
|
import { getNotificationService } from '../../services/notification-service.js';
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
/**
|
/**
|
||||||
* Auto Mode routes - HTTP API for autonomous feature implementation
|
* Auto Mode routes - HTTP API for autonomous feature implementation
|
||||||
*
|
*
|
||||||
* Uses the AutoModeService for real feature execution with Claude Agent SDK
|
* Uses AutoModeServiceCompat which provides the old interface while
|
||||||
|
* delegating to GlobalAutoModeService and per-project facades.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { Router } from 'express';
|
import { Router } from 'express';
|
||||||
import type { AutoModeService } from '../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js';
|
||||||
import { validatePathParams } from '../../middleware/validate-paths.js';
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
import { createStopFeatureHandler } from './routes/stop-feature.js';
|
import { createStopFeatureHandler } from './routes/stop-feature.js';
|
||||||
import { createStatusHandler } from './routes/status.js';
|
import { createStatusHandler } from './routes/status.js';
|
||||||
@@ -20,8 +21,14 @@ import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
|
|||||||
import { createCommitFeatureHandler } from './routes/commit-feature.js';
|
import { createCommitFeatureHandler } from './routes/commit-feature.js';
|
||||||
import { createApprovePlanHandler } from './routes/approve-plan.js';
|
import { createApprovePlanHandler } from './routes/approve-plan.js';
|
||||||
import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
|
import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
|
||||||
|
import { createReconcileHandler } from './routes/reconcile.js';
|
||||||
|
|
||||||
export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
/**
|
||||||
|
* Create auto-mode routes.
|
||||||
|
*
|
||||||
|
* @param autoModeService - AutoModeServiceCompat instance
|
||||||
|
*/
|
||||||
|
export function createAutoModeRoutes(autoModeService: AutoModeServiceCompat): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
// Auto loop control routes
|
// Auto loop control routes
|
||||||
@@ -75,6 +82,11 @@ export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
|||||||
validatePathParams('projectPath'),
|
validatePathParams('projectPath'),
|
||||||
createResumeInterruptedHandler(autoModeService)
|
createResumeInterruptedHandler(autoModeService)
|
||||||
);
|
);
|
||||||
|
router.post(
|
||||||
|
'/reconcile',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createReconcileHandler(autoModeService)
|
||||||
|
);
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createAnalyzeProjectHandler(autoModeService: AutoModeService) {
|
export function createAnalyzeProjectHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath } = req.body as { projectPath: string };
|
const { projectPath } = req.body as { projectPath: string };
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
export function createApprovePlanHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { featureId, approved, editedPlan, feedback, projectPath } = req.body as {
|
const { featureId, approved, editedPlan, feedback, projectPath } = req.body as {
|
||||||
@@ -17,7 +17,7 @@ export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
|||||||
approved: boolean;
|
approved: boolean;
|
||||||
editedPlan?: string;
|
editedPlan?: string;
|
||||||
feedback?: string;
|
feedback?: string;
|
||||||
projectPath?: string;
|
projectPath: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!featureId) {
|
if (!featureId) {
|
||||||
@@ -36,6 +36,14 @@ export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'projectPath is required',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Note: We no longer check hasPendingApproval here because resolvePlanApproval
|
// Note: We no longer check hasPendingApproval here because resolvePlanApproval
|
||||||
// can handle recovery when pending approval is not in Map but feature has planSpec.status='generated'
|
// can handle recovery when pending approval is not in Map but feature has planSpec.status='generated'
|
||||||
// This supports cases where the server restarted while waiting for approval
|
// This supports cases where the server restarted while waiting for approval
|
||||||
@@ -48,11 +56,11 @@ export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
|||||||
|
|
||||||
// Resolve the pending approval (with recovery support)
|
// Resolve the pending approval (with recovery support)
|
||||||
const result = await autoModeService.resolvePlanApproval(
|
const result = await autoModeService.resolvePlanApproval(
|
||||||
|
projectPath,
|
||||||
featureId,
|
featureId,
|
||||||
approved,
|
approved,
|
||||||
editedPlan,
|
editedPlan,
|
||||||
feedback,
|
feedback
|
||||||
projectPath
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createCommitFeatureHandler(autoModeService: AutoModeService) {
|
export function createCommitFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, worktreePath } = req.body as {
|
const { projectPath, featureId, worktreePath } = req.body as {
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createContextExistsHandler(autoModeService: AutoModeService) {
|
export function createContextExistsHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId } = req.body as {
|
const { projectPath, featureId } = req.body as {
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
|
export function createFollowUpFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, prompt, imagePaths, useWorktrees } = req.body as {
|
const { projectPath, featureId, prompt, imagePaths, useWorktrees } = req.body as {
|
||||||
@@ -30,16 +30,12 @@ export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
|
|||||||
|
|
||||||
// Start follow-up in background
|
// Start follow-up in background
|
||||||
// followUpFeature derives workDir from feature.branchName
|
// followUpFeature derives workDir from feature.branchName
|
||||||
|
// Default to false to match run-feature/resume-feature behavior.
|
||||||
|
// Worktrees should only be used when explicitly enabled by the user.
|
||||||
autoModeService
|
autoModeService
|
||||||
// Default to false to match run-feature/resume-feature behavior.
|
|
||||||
// Worktrees should only be used when explicitly enabled by the user.
|
|
||||||
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
|
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
|
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
|
||||||
})
|
|
||||||
.finally(() => {
|
|
||||||
// Release the starting slot when follow-up completes (success or error)
|
|
||||||
// Note: The feature should be in runningFeatures by this point
|
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
|
|||||||
53
apps/server/src/routes/auto-mode/routes/reconcile.ts
Normal file
53
apps/server/src/routes/auto-mode/routes/reconcile.ts
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/**
|
||||||
|
* Reconcile Feature States Handler
|
||||||
|
*
|
||||||
|
* On-demand endpoint to reconcile all feature states for a project.
|
||||||
|
* Resets features stuck in transient states (in_progress, interrupted, pipeline_*)
|
||||||
|
* back to resting states (ready/backlog) and emits events to update the UI.
|
||||||
|
*
|
||||||
|
* This is useful when:
|
||||||
|
* - The UI reconnects after a server restart
|
||||||
|
* - A client detects stale feature states
|
||||||
|
* - An admin wants to force-reset stuck features
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
|
|
||||||
|
const logger = createLogger('ReconcileFeatures');
|
||||||
|
|
||||||
|
interface ReconcileRequest {
|
||||||
|
projectPath: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createReconcileHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
const { projectPath } = req.body as ReconcileRequest;
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ error: 'Project path is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Reconciling feature states for ${projectPath}`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const reconciledCount = await autoModeService.reconcileFeatureStates(projectPath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
reconciledCount,
|
||||||
|
message:
|
||||||
|
reconciledCount > 0
|
||||||
|
? `Reconciled ${reconciledCount} feature(s)`
|
||||||
|
: 'No features needed reconciliation',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error reconciling feature states:', error);
|
||||||
|
res.status(500).json({
|
||||||
|
error: error instanceof Error ? error.message : 'Unknown error',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createResumeFeatureHandler(autoModeService: AutoModeService) {
|
export function createResumeFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, useWorktrees } = req.body as {
|
const { projectPath, featureId, useWorktrees } = req.body as {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
|
|
||||||
const logger = createLogger('ResumeInterrupted');
|
const logger = createLogger('ResumeInterrupted');
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@ interface ResumeInterruptedRequest {
|
|||||||
projectPath: string;
|
projectPath: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function createResumeInterruptedHandler(autoModeService: AutoModeService) {
|
export function createResumeInterruptedHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
const { projectPath } = req.body as ResumeInterruptedRequest;
|
const { projectPath } = req.body as ResumeInterruptedRequest;
|
||||||
|
|
||||||
@@ -28,6 +28,7 @@ export function createResumeInterruptedHandler(autoModeService: AutoModeService)
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
await autoModeService.resumeInterruptedFeatures(projectPath);
|
await autoModeService.resumeInterruptedFeatures(projectPath);
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
message: 'Resume check completed',
|
message: 'Resume check completed',
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createRunFeatureHandler(autoModeService: AutoModeService) {
|
export function createRunFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, useWorktrees } = req.body as {
|
const { projectPath, featureId, useWorktrees } = req.body as {
|
||||||
@@ -26,23 +26,9 @@ export function createRunFeatureHandler(autoModeService: AutoModeService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check per-worktree capacity before starting
|
// Note: No concurrency limit check here. Manual feature starts always run
|
||||||
const capacity = await autoModeService.checkWorktreeCapacity(projectPath, featureId);
|
// immediately and bypass the concurrency limit. Their presence IS counted
|
||||||
if (!capacity.hasCapacity) {
|
// by the auto-loop coordinator when deciding whether to dispatch new auto-mode tasks.
|
||||||
const worktreeDesc = capacity.branchName
|
|
||||||
? `worktree "${capacity.branchName}"`
|
|
||||||
: 'main worktree';
|
|
||||||
res.status(429).json({
|
|
||||||
success: false,
|
|
||||||
error: `Agent limit reached for ${worktreeDesc} (${capacity.currentAgents}/${capacity.maxAgents}). Wait for running tasks to complete or increase the limit.`,
|
|
||||||
details: {
|
|
||||||
currentAgents: capacity.currentAgents,
|
|
||||||
maxAgents: capacity.maxAgents,
|
|
||||||
branchName: capacity.branchName,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start execution in background
|
// Start execution in background
|
||||||
// executeFeature derives workDir from feature.branchName
|
// executeFeature derives workDir from feature.branchName
|
||||||
@@ -50,10 +36,6 @@ export function createRunFeatureHandler(autoModeService: AutoModeService) {
|
|||||||
.executeFeature(projectPath, featureId, useWorktrees ?? false, false)
|
.executeFeature(projectPath, featureId, useWorktrees ?? false, false)
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
logger.error(`Feature ${featureId} error:`, error);
|
logger.error(`Feature ${featureId} error:`, error);
|
||||||
})
|
|
||||||
.finally(() => {
|
|
||||||
// Release the starting slot when execution completes (success or error)
|
|
||||||
// Note: The feature should be in runningFeatures by this point
|
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createStartHandler(autoModeService: AutoModeService) {
|
export function createStartHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, branchName, maxConcurrency } = req.body as {
|
const { projectPath, branchName, maxConcurrency } = req.body as {
|
||||||
|
|||||||
@@ -6,10 +6,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createStatusHandler(autoModeService: AutoModeService) {
|
/**
|
||||||
|
* Create status handler.
|
||||||
|
*/
|
||||||
|
export function createStatusHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, branchName } = req.body as {
|
const { projectPath, branchName } = req.body as {
|
||||||
@@ -21,7 +24,8 @@ export function createStatusHandler(autoModeService: AutoModeService) {
|
|||||||
if (projectPath) {
|
if (projectPath) {
|
||||||
// Normalize branchName: undefined becomes null
|
// Normalize branchName: undefined becomes null
|
||||||
const normalizedBranchName = branchName ?? null;
|
const normalizedBranchName = branchName ?? null;
|
||||||
const projectStatus = autoModeService.getStatusForProject(
|
|
||||||
|
const projectStatus = await autoModeService.getStatusForProject(
|
||||||
projectPath,
|
projectPath,
|
||||||
normalizedBranchName
|
normalizedBranchName
|
||||||
);
|
);
|
||||||
@@ -38,7 +42,7 @@ export function createStatusHandler(autoModeService: AutoModeService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fall back to global status for backward compatibility
|
// Global status for backward compatibility
|
||||||
const status = autoModeService.getStatus();
|
const status = autoModeService.getStatus();
|
||||||
const activeProjects = autoModeService.getActiveAutoLoopProjects();
|
const activeProjects = autoModeService.getActiveAutoLoopProjects();
|
||||||
const activeWorktrees = autoModeService.getActiveAutoLoopWorktrees();
|
const activeWorktrees = autoModeService.getActiveAutoLoopWorktrees();
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createStopFeatureHandler(autoModeService: AutoModeService) {
|
export function createStopFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { featureId } = req.body as { featureId: string };
|
const { featureId } = req.body as { featureId: string };
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createStopHandler(autoModeService: AutoModeService) {
|
export function createStopHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, branchName } = req.body as {
|
const { projectPath, branchName } = req.body as {
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createVerifyFeatureHandler(autoModeService: AutoModeService) {
|
export function createVerifyFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId } = req.body as {
|
const { projectPath, featureId } = req.body as {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import type { Feature, BacklogPlanResult, BacklogChange, DependencyUpdate } from '@automaker/types';
|
import type { Feature, BacklogPlanResult } from '@automaker/types';
|
||||||
import {
|
import {
|
||||||
DEFAULT_PHASE_MODELS,
|
DEFAULT_PHASE_MODELS,
|
||||||
isCursorModel,
|
isCursorModel,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { BacklogPlanResult, BacklogChange, Feature } from '@automaker/types';
|
import type { BacklogPlanResult } from '@automaker/types';
|
||||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
import { clearBacklogPlan, getErrorMessage, logError, logger } from '../common.js';
|
import { clearBacklogPlan, getErrorMessage, logError, logger } from '../common.js';
|
||||||
|
|
||||||
@@ -58,6 +58,9 @@ export function createApplyHandler() {
|
|||||||
if (feature.dependencies?.includes(change.featureId)) {
|
if (feature.dependencies?.includes(change.featureId)) {
|
||||||
const newDeps = feature.dependencies.filter((d) => d !== change.featureId);
|
const newDeps = feature.dependencies.filter((d) => d !== change.featureId);
|
||||||
await featureLoader.update(projectPath, feature.id, { dependencies: newDeps });
|
await featureLoader.update(projectPath, feature.id, { dependencies: newDeps });
|
||||||
|
// Mutate the in-memory feature object so subsequent deletions use the updated
|
||||||
|
// dependency list and don't reintroduce already-removed dependency IDs.
|
||||||
|
feature.dependencies = newDeps;
|
||||||
logger.info(
|
logger.info(
|
||||||
`[BacklogPlan] Removed dependency ${change.featureId} from ${feature.id}`
|
`[BacklogPlan] Removed dependency ${change.featureId} from ${feature.id}`
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -219,18 +219,21 @@ export function createEnhanceHandler(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve the model - use provider resolved model, passed model, or default to sonnet
|
// Resolve the model for API call.
|
||||||
const resolvedModel =
|
// CRITICAL: For custom providers (GLM, MiniMax), pass the provider's model ID (e.g. "GLM-4.7")
|
||||||
providerResolvedModel || resolveModelString(model, CLAUDE_MODEL_MAP.sonnet);
|
// to the API, NOT the resolved Claude model - otherwise we get "model not found"
|
||||||
|
const modelForApi = claudeCompatibleProvider
|
||||||
|
? model
|
||||||
|
: providerResolvedModel || resolveModelString(model, CLAUDE_MODEL_MAP.sonnet);
|
||||||
|
|
||||||
logger.debug(`Using model: ${resolvedModel}`);
|
logger.debug(`Using model: ${modelForApi}`);
|
||||||
|
|
||||||
// Use simpleQuery - provider abstraction handles routing to correct provider
|
// Use simpleQuery - provider abstraction handles routing to correct provider
|
||||||
// The system prompt is combined with user prompt since some providers
|
// The system prompt is combined with user prompt since some providers
|
||||||
// don't have a separate system prompt concept
|
// don't have a separate system prompt concept
|
||||||
const result = await simpleQuery({
|
const result = await simpleQuery({
|
||||||
prompt: [systemPrompt, projectContext, userPrompt].filter(Boolean).join('\n\n'),
|
prompt: [systemPrompt, projectContext, userPrompt].filter(Boolean).join('\n\n'),
|
||||||
model: resolvedModel,
|
model: modelForApi,
|
||||||
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
|
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
|
||||||
maxTurns: 1,
|
maxTurns: 1,
|
||||||
allowedTools: [],
|
allowedTools: [],
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
import { Router } from 'express';
|
import { Router } from 'express';
|
||||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import type { AutoModeService } from '../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js';
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import { validatePathParams } from '../../middleware/validate-paths.js';
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
import { createListHandler } from './routes/list.js';
|
import { createListHandler } from './routes/list.js';
|
||||||
@@ -24,7 +24,7 @@ export function createFeaturesRoutes(
|
|||||||
featureLoader: FeatureLoader,
|
featureLoader: FeatureLoader,
|
||||||
settingsService?: SettingsService,
|
settingsService?: SettingsService,
|
||||||
events?: EventEmitter,
|
events?: EventEmitter,
|
||||||
autoModeService?: AutoModeService
|
autoModeService?: AutoModeServiceCompat
|
||||||
): Router {
|
): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
@@ -33,6 +33,11 @@ export function createFeaturesRoutes(
|
|||||||
validatePathParams('projectPath'),
|
validatePathParams('projectPath'),
|
||||||
createListHandler(featureLoader, autoModeService)
|
createListHandler(featureLoader, autoModeService)
|
||||||
);
|
);
|
||||||
|
router.get(
|
||||||
|
'/list',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createListHandler(featureLoader, autoModeService)
|
||||||
|
);
|
||||||
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
|
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
|
||||||
router.post(
|
router.post(
|
||||||
'/create',
|
'/create',
|
||||||
|
|||||||
@@ -24,19 +24,6 @@ export function createCreateHandler(featureLoader: FeatureLoader, events?: Event
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for duplicate title if title is provided
|
|
||||||
if (feature.title && feature.title.trim()) {
|
|
||||||
const duplicate = await featureLoader.findDuplicateTitle(projectPath, feature.title);
|
|
||||||
if (duplicate) {
|
|
||||||
res.status(409).json({
|
|
||||||
success: false,
|
|
||||||
error: `A feature with title "${feature.title}" already exists`,
|
|
||||||
duplicateFeatureId: duplicate.id,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const created = await featureLoader.create(projectPath, feature);
|
const created = await featureLoader.create(projectPath, feature);
|
||||||
|
|
||||||
// Emit feature_created event for hooks
|
// Emit feature_created event for hooks
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ interface ExportRequest {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export function createExportHandler(featureLoader: FeatureLoader) {
|
export function createExportHandler(_featureLoader: FeatureLoader) {
|
||||||
const exportService = getFeatureExportService();
|
const exportService = getFeatureExportService();
|
||||||
|
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ export function createGenerateTitleHandler(
|
|||||||
): (req: Request, res: Response) => Promise<void> {
|
): (req: Request, res: Response) => Promise<void> {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { description, projectPath } = req.body as GenerateTitleRequestBody;
|
const { description } = req.body as GenerateTitleRequestBody;
|
||||||
|
|
||||||
if (!description || typeof description !== 'string') {
|
if (!description || typeof description !== 'string') {
|
||||||
const response: GenerateTitleErrorResponse = {
|
const response: GenerateTitleErrorResponse = {
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ interface ConflictInfo {
|
|||||||
hasConflict: boolean;
|
hasConflict: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function createImportHandler(featureLoader: FeatureLoader) {
|
export function createImportHandler(_featureLoader: FeatureLoader) {
|
||||||
const exportService = getFeatureExportService();
|
const exportService = getFeatureExportService();
|
||||||
|
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
/**
|
/**
|
||||||
* POST /list endpoint - List all features for a project
|
* POST/GET /list endpoint - List all features for a project
|
||||||
|
*
|
||||||
|
* projectPath may come from req.body (POST) or req.query (GET fallback).
|
||||||
*
|
*
|
||||||
* Also performs orphan detection when a project is loaded to identify
|
* Also performs orphan detection when a project is loaded to identify
|
||||||
* features whose branches no longer exist. This runs on every project load/switch.
|
* features whose branches no longer exist. This runs on every project load/switch.
|
||||||
@@ -7,16 +9,29 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
const logger = createLogger('FeaturesListRoute');
|
const logger = createLogger('FeaturesListRoute');
|
||||||
|
|
||||||
export function createListHandler(featureLoader: FeatureLoader, autoModeService?: AutoModeService) {
|
export function createListHandler(
|
||||||
|
featureLoader: FeatureLoader,
|
||||||
|
autoModeService?: AutoModeServiceCompat
|
||||||
|
) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath } = req.body as { projectPath: string };
|
const bodyProjectPath =
|
||||||
|
typeof req.body === 'object' && req.body !== null
|
||||||
|
? (req.body as { projectPath?: unknown }).projectPath
|
||||||
|
: undefined;
|
||||||
|
const queryProjectPath = req.query.projectPath;
|
||||||
|
const projectPath =
|
||||||
|
typeof bodyProjectPath === 'string'
|
||||||
|
? bodyProjectPath
|
||||||
|
: typeof queryProjectPath === 'string'
|
||||||
|
? queryProjectPath
|
||||||
|
: undefined;
|
||||||
|
|
||||||
if (!projectPath) {
|
if (!projectPath) {
|
||||||
res.status(400).json({ success: false, error: 'projectPath is required' });
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
@@ -30,18 +45,23 @@ export function createListHandler(featureLoader: FeatureLoader, autoModeService?
|
|||||||
// We don't await this to keep the list response fast
|
// We don't await this to keep the list response fast
|
||||||
// Note: detectOrphanedFeatures handles errors internally and always resolves
|
// Note: detectOrphanedFeatures handles errors internally and always resolves
|
||||||
if (autoModeService) {
|
if (autoModeService) {
|
||||||
autoModeService.detectOrphanedFeatures(projectPath).then((orphanedFeatures) => {
|
autoModeService
|
||||||
if (orphanedFeatures.length > 0) {
|
.detectOrphanedFeatures(projectPath)
|
||||||
logger.info(
|
.then((orphanedFeatures) => {
|
||||||
`[ProjectLoad] Detected ${orphanedFeatures.length} orphaned feature(s) in ${projectPath}`
|
if (orphanedFeatures.length > 0) {
|
||||||
);
|
|
||||||
for (const { feature, missingBranch } of orphanedFeatures) {
|
|
||||||
logger.info(
|
logger.info(
|
||||||
`[ProjectLoad] Orphaned: ${feature.title || feature.id} - branch "${missingBranch}" no longer exists`
|
`[ProjectLoad] Detected ${orphanedFeatures.length} orphaned feature(s) in ${projectPath}`
|
||||||
);
|
);
|
||||||
|
for (const { feature, missingBranch } of orphanedFeatures) {
|
||||||
|
logger.info(
|
||||||
|
`[ProjectLoad] Orphaned: ${feature.title || feature.id} - branch "${missingBranch}" no longer exists`
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
});
|
.catch((error) => {
|
||||||
|
logger.warn(`[ProjectLoad] Orphan detection failed for ${projectPath}:`, error);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
res.json({ success: true, features });
|
res.json({ success: true, features });
|
||||||
|
|||||||
@@ -40,23 +40,6 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for duplicate title if title is being updated
|
|
||||||
if (updates.title && updates.title.trim()) {
|
|
||||||
const duplicate = await featureLoader.findDuplicateTitle(
|
|
||||||
projectPath,
|
|
||||||
updates.title,
|
|
||||||
featureId // Exclude the current feature from duplicate check
|
|
||||||
);
|
|
||||||
if (duplicate) {
|
|
||||||
res.status(409).json({
|
|
||||||
success: false,
|
|
||||||
error: `A feature with title "${updates.title}" already exists`,
|
|
||||||
duplicateFeatureId: duplicate.id,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the current feature to detect status changes
|
// Get the current feature to detect status changes
|
||||||
const currentFeature = await featureLoader.get(projectPath, featureId);
|
const currentFeature = await featureLoader.get(projectPath, featureId);
|
||||||
const previousStatus = currentFeature?.status as FeatureStatus | undefined;
|
const previousStatus = currentFeature?.status as FeatureStatus | undefined;
|
||||||
|
|||||||
@@ -19,6 +19,10 @@ import { createBrowseHandler } from './routes/browse.js';
|
|||||||
import { createImageHandler } from './routes/image.js';
|
import { createImageHandler } from './routes/image.js';
|
||||||
import { createSaveBoardBackgroundHandler } from './routes/save-board-background.js';
|
import { createSaveBoardBackgroundHandler } from './routes/save-board-background.js';
|
||||||
import { createDeleteBoardBackgroundHandler } from './routes/delete-board-background.js';
|
import { createDeleteBoardBackgroundHandler } from './routes/delete-board-background.js';
|
||||||
|
import { createBrowseProjectFilesHandler } from './routes/browse-project-files.js';
|
||||||
|
import { createCopyHandler } from './routes/copy.js';
|
||||||
|
import { createMoveHandler } from './routes/move.js';
|
||||||
|
import { createDownloadHandler } from './routes/download.js';
|
||||||
|
|
||||||
export function createFsRoutes(_events: EventEmitter): Router {
|
export function createFsRoutes(_events: EventEmitter): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
@@ -37,6 +41,10 @@ export function createFsRoutes(_events: EventEmitter): Router {
|
|||||||
router.get('/image', createImageHandler());
|
router.get('/image', createImageHandler());
|
||||||
router.post('/save-board-background', createSaveBoardBackgroundHandler());
|
router.post('/save-board-background', createSaveBoardBackgroundHandler());
|
||||||
router.post('/delete-board-background', createDeleteBoardBackgroundHandler());
|
router.post('/delete-board-background', createDeleteBoardBackgroundHandler());
|
||||||
|
router.post('/browse-project-files', createBrowseProjectFilesHandler());
|
||||||
|
router.post('/copy', createCopyHandler());
|
||||||
|
router.post('/move', createMoveHandler());
|
||||||
|
router.post('/download', createDownloadHandler());
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
191
apps/server/src/routes/fs/routes/browse-project-files.ts
Normal file
191
apps/server/src/routes/fs/routes/browse-project-files.ts
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
/**
|
||||||
|
* POST /browse-project-files endpoint - Browse files and directories within a project
|
||||||
|
*
|
||||||
|
* Unlike /browse which only lists directories (for project folder selection),
|
||||||
|
* this endpoint lists both files and directories relative to a project root.
|
||||||
|
* Used by the file selector for "Copy files to worktree" settings.
|
||||||
|
*
|
||||||
|
* Features:
|
||||||
|
* - Lists both files and directories
|
||||||
|
* - Hides .git, .worktrees, node_modules, and other build artifacts
|
||||||
|
* - Returns entries relative to the project root
|
||||||
|
* - Supports navigating into subdirectories
|
||||||
|
* - Security: prevents path traversal outside project root
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
|
import path from 'path';
|
||||||
|
import { PathNotAllowedError } from '@automaker/platform';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
// Directories to hide from the listing (build artifacts, caches, etc.)
|
||||||
|
const HIDDEN_DIRECTORIES = new Set([
|
||||||
|
'.git',
|
||||||
|
'.worktrees',
|
||||||
|
'node_modules',
|
||||||
|
'.automaker',
|
||||||
|
'__pycache__',
|
||||||
|
'.cache',
|
||||||
|
'.next',
|
||||||
|
'.nuxt',
|
||||||
|
'.svelte-kit',
|
||||||
|
'.turbo',
|
||||||
|
'.vercel',
|
||||||
|
'.output',
|
||||||
|
'coverage',
|
||||||
|
'.nyc_output',
|
||||||
|
'dist',
|
||||||
|
'build',
|
||||||
|
'out',
|
||||||
|
'.tmp',
|
||||||
|
'tmp',
|
||||||
|
'.venv',
|
||||||
|
'venv',
|
||||||
|
'target',
|
||||||
|
'vendor',
|
||||||
|
'.gradle',
|
||||||
|
'.idea',
|
||||||
|
'.vscode',
|
||||||
|
]);
|
||||||
|
|
||||||
|
interface ProjectFileEntry {
|
||||||
|
name: string;
|
||||||
|
relativePath: string;
|
||||||
|
isDirectory: boolean;
|
||||||
|
isFile: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createBrowseProjectFilesHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, relativePath } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
relativePath?: string; // Relative path within the project to browse (empty = project root)
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const resolvedProjectPath = path.resolve(projectPath);
|
||||||
|
|
||||||
|
// Determine the target directory to browse
|
||||||
|
let targetPath = resolvedProjectPath;
|
||||||
|
let currentRelativePath = '';
|
||||||
|
|
||||||
|
if (relativePath) {
|
||||||
|
// Security: normalize and validate the relative path
|
||||||
|
const normalized = path.normalize(relativePath);
|
||||||
|
if (normalized.startsWith('..') || path.isAbsolute(normalized)) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Invalid relative path - must be within the project directory',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
targetPath = path.join(resolvedProjectPath, normalized);
|
||||||
|
currentRelativePath = normalized;
|
||||||
|
|
||||||
|
// Double-check the resolved path is within the project
|
||||||
|
// Use a separator-terminated prefix to prevent matching sibling dirs
|
||||||
|
// that share the same prefix (e.g. /projects/foo vs /projects/foobar).
|
||||||
|
const resolvedTarget = path.resolve(targetPath);
|
||||||
|
const projectPrefix = resolvedProjectPath.endsWith(path.sep)
|
||||||
|
? resolvedProjectPath
|
||||||
|
: resolvedProjectPath + path.sep;
|
||||||
|
if (!resolvedTarget.startsWith(projectPrefix) && resolvedTarget !== resolvedProjectPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Path traversal detected',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine parent relative path
|
||||||
|
let parentRelativePath: string | null = null;
|
||||||
|
if (currentRelativePath) {
|
||||||
|
const parent = path.dirname(currentRelativePath);
|
||||||
|
parentRelativePath = parent === '.' ? '' : parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const stat = await secureFs.stat(targetPath);
|
||||||
|
|
||||||
|
if (!stat.isDirectory()) {
|
||||||
|
res.status(400).json({ success: false, error: 'Path is not a directory' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read directory contents
|
||||||
|
const dirEntries = await secureFs.readdir(targetPath, { withFileTypes: true });
|
||||||
|
|
||||||
|
// Filter and map entries
|
||||||
|
const entries: ProjectFileEntry[] = dirEntries
|
||||||
|
.filter((entry) => {
|
||||||
|
// Skip hidden directories (build artifacts, etc.)
|
||||||
|
if (entry.isDirectory() && HIDDEN_DIRECTORIES.has(entry.name)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Skip entries starting with . (hidden files) except common config files
|
||||||
|
// We keep hidden files visible since users often need .env, .eslintrc, etc.
|
||||||
|
return true;
|
||||||
|
})
|
||||||
|
.map((entry) => {
|
||||||
|
const entryRelativePath = currentRelativePath
|
||||||
|
? path.posix.join(currentRelativePath.replace(/\\/g, '/'), entry.name)
|
||||||
|
: entry.name;
|
||||||
|
|
||||||
|
return {
|
||||||
|
name: entry.name,
|
||||||
|
relativePath: entryRelativePath,
|
||||||
|
isDirectory: entry.isDirectory(),
|
||||||
|
isFile: entry.isFile(),
|
||||||
|
};
|
||||||
|
})
|
||||||
|
// Sort: directories first, then files, alphabetically within each group
|
||||||
|
.sort((a, b) => {
|
||||||
|
if (a.isDirectory !== b.isDirectory) {
|
||||||
|
return a.isDirectory ? -1 : 1;
|
||||||
|
}
|
||||||
|
return a.name.localeCompare(b.name);
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
currentRelativePath,
|
||||||
|
parentRelativePath,
|
||||||
|
entries,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : 'Failed to read directory';
|
||||||
|
const isPermissionError = errorMessage.includes('EPERM') || errorMessage.includes('EACCES');
|
||||||
|
|
||||||
|
if (isPermissionError) {
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
currentRelativePath,
|
||||||
|
parentRelativePath,
|
||||||
|
entries: [],
|
||||||
|
warning: 'Permission denied - unable to read this directory',
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: errorMessage,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof PathNotAllowedError) {
|
||||||
|
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logError(error, 'Browse project files failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
99
apps/server/src/routes/fs/routes/copy.ts
Normal file
99
apps/server/src/routes/fs/routes/copy.ts
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
/**
|
||||||
|
* POST /copy endpoint - Copy file or directory to a new location
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
|
import path from 'path';
|
||||||
|
import { PathNotAllowedError } from '@automaker/platform';
|
||||||
|
import { mkdirSafe } from '@automaker/utils';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Recursively copy a directory and its contents
|
||||||
|
*/
|
||||||
|
async function copyDirectoryRecursive(src: string, dest: string): Promise<void> {
|
||||||
|
await mkdirSafe(dest);
|
||||||
|
const entries = await secureFs.readdir(src, { withFileTypes: true });
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
const srcPath = path.join(src, entry.name);
|
||||||
|
const destPath = path.join(dest, entry.name);
|
||||||
|
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
await copyDirectoryRecursive(srcPath, destPath);
|
||||||
|
} else {
|
||||||
|
await secureFs.copyFile(srcPath, destPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createCopyHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { sourcePath, destinationPath, overwrite } = req.body as {
|
||||||
|
sourcePath: string;
|
||||||
|
destinationPath: string;
|
||||||
|
overwrite?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!sourcePath || !destinationPath) {
|
||||||
|
res
|
||||||
|
.status(400)
|
||||||
|
.json({ success: false, error: 'sourcePath and destinationPath are required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent copying a folder into itself or its own descendant (infinite recursion)
|
||||||
|
const resolvedSrc = path.resolve(sourcePath);
|
||||||
|
const resolvedDest = path.resolve(destinationPath);
|
||||||
|
if (resolvedDest === resolvedSrc || resolvedDest.startsWith(resolvedSrc + path.sep)) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Cannot copy a folder into itself or one of its own descendants',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if destination already exists
|
||||||
|
try {
|
||||||
|
await secureFs.stat(destinationPath);
|
||||||
|
// Destination exists
|
||||||
|
if (!overwrite) {
|
||||||
|
res.status(409).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Destination already exists',
|
||||||
|
exists: true,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// If overwrite is true, remove the existing destination first to avoid merging
|
||||||
|
await secureFs.rm(destinationPath, { recursive: true });
|
||||||
|
} catch {
|
||||||
|
// Destination doesn't exist - good to proceed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure parent directory exists
|
||||||
|
await mkdirSafe(path.dirname(path.resolve(destinationPath)));
|
||||||
|
|
||||||
|
// Check if source is a directory
|
||||||
|
const stats = await secureFs.stat(sourcePath);
|
||||||
|
|
||||||
|
if (stats.isDirectory()) {
|
||||||
|
await copyDirectoryRecursive(sourcePath, destinationPath);
|
||||||
|
} else {
|
||||||
|
await secureFs.copyFile(sourcePath, destinationPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({ success: true });
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof PathNotAllowedError) {
|
||||||
|
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logError(error, 'Copy file failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
142
apps/server/src/routes/fs/routes/download.ts
Normal file
142
apps/server/src/routes/fs/routes/download.ts
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
/**
|
||||||
|
* POST /download endpoint - Download a file, or GET /download for streaming
|
||||||
|
* For folders, creates a zip archive on the fly
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
|
import path from 'path';
|
||||||
|
import { PathNotAllowedError } from '@automaker/platform';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
import { createReadStream } from 'fs';
|
||||||
|
import { execFile } from 'child_process';
|
||||||
|
import { promisify } from 'util';
|
||||||
|
import { tmpdir } from 'os';
|
||||||
|
|
||||||
|
const execFileAsync = promisify(execFile);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get total size of a directory recursively
|
||||||
|
*/
|
||||||
|
async function getDirectorySize(dirPath: string): Promise<number> {
|
||||||
|
let totalSize = 0;
|
||||||
|
const entries = await secureFs.readdir(dirPath, { withFileTypes: true });
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
const entryPath = path.join(dirPath, entry.name);
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
totalSize += await getDirectorySize(entryPath);
|
||||||
|
} else {
|
||||||
|
const stats = await secureFs.stat(entryPath);
|
||||||
|
totalSize += Number(stats.size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return totalSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createDownloadHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { filePath } = req.body as { filePath: string };
|
||||||
|
|
||||||
|
if (!filePath) {
|
||||||
|
res.status(400).json({ success: false, error: 'filePath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const stats = await secureFs.stat(filePath);
|
||||||
|
const fileName = path.basename(filePath);
|
||||||
|
|
||||||
|
if (stats.isDirectory()) {
|
||||||
|
// For directories, create a zip archive
|
||||||
|
const dirSize = await getDirectorySize(filePath);
|
||||||
|
const MAX_DIR_SIZE = 100 * 1024 * 1024; // 100MB limit
|
||||||
|
|
||||||
|
if (dirSize > MAX_DIR_SIZE) {
|
||||||
|
res.status(413).json({
|
||||||
|
success: false,
|
||||||
|
error: `Directory is too large to download (${(dirSize / (1024 * 1024)).toFixed(1)}MB). Maximum size is ${MAX_DIR_SIZE / (1024 * 1024)}MB.`,
|
||||||
|
size: dirSize,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a temporary zip file
|
||||||
|
const zipFileName = `${fileName}.zip`;
|
||||||
|
const tmpZipPath = path.join(tmpdir(), `automaker-download-${Date.now()}-${zipFileName}`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Use system zip command (available on macOS and Linux)
|
||||||
|
// Use execFile to avoid shell injection via user-provided paths
|
||||||
|
await execFileAsync('zip', ['-r', tmpZipPath, fileName], {
|
||||||
|
cwd: path.dirname(filePath),
|
||||||
|
maxBuffer: 50 * 1024 * 1024,
|
||||||
|
});
|
||||||
|
|
||||||
|
const zipStats = await secureFs.stat(tmpZipPath);
|
||||||
|
|
||||||
|
res.setHeader('Content-Type', 'application/zip');
|
||||||
|
res.setHeader('Content-Disposition', `attachment; filename="${zipFileName}"`);
|
||||||
|
res.setHeader('Content-Length', zipStats.size.toString());
|
||||||
|
res.setHeader('X-Directory-Size', dirSize.toString());
|
||||||
|
|
||||||
|
const stream = createReadStream(tmpZipPath);
|
||||||
|
stream.pipe(res);
|
||||||
|
|
||||||
|
stream.on('end', async () => {
|
||||||
|
// Cleanup temp file
|
||||||
|
try {
|
||||||
|
await secureFs.rm(tmpZipPath);
|
||||||
|
} catch {
|
||||||
|
// Ignore cleanup errors
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('error', async (err) => {
|
||||||
|
logError(err, 'Download stream error');
|
||||||
|
try {
|
||||||
|
await secureFs.rm(tmpZipPath);
|
||||||
|
} catch {
|
||||||
|
// Ignore cleanup errors
|
||||||
|
}
|
||||||
|
if (!res.headersSent) {
|
||||||
|
res.status(500).json({ success: false, error: 'Stream error during download' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (zipError) {
|
||||||
|
// Cleanup on zip failure
|
||||||
|
try {
|
||||||
|
await secureFs.rm(tmpZipPath);
|
||||||
|
} catch {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
throw zipError;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For individual files, stream directly
|
||||||
|
res.setHeader('Content-Type', 'application/octet-stream');
|
||||||
|
res.setHeader('Content-Disposition', `attachment; filename="${fileName}"`);
|
||||||
|
res.setHeader('Content-Length', stats.size.toString());
|
||||||
|
|
||||||
|
const stream = createReadStream(filePath);
|
||||||
|
stream.pipe(res);
|
||||||
|
|
||||||
|
stream.on('error', (err) => {
|
||||||
|
logError(err, 'Download stream error');
|
||||||
|
if (!res.headersSent) {
|
||||||
|
res.status(500).json({ success: false, error: 'Stream error during download' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof PathNotAllowedError) {
|
||||||
|
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logError(error, 'Download failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -35,9 +35,9 @@ export function createMkdirHandler() {
|
|||||||
error: 'Path exists and is not a directory',
|
error: 'Path exists and is not a directory',
|
||||||
});
|
});
|
||||||
return;
|
return;
|
||||||
} catch (statError: any) {
|
} catch (statError: unknown) {
|
||||||
// ENOENT means path doesn't exist - we should create it
|
// ENOENT means path doesn't exist - we should create it
|
||||||
if (statError.code !== 'ENOENT') {
|
if ((statError as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||||
// Some other error (could be ELOOP in parent path)
|
// Some other error (could be ELOOP in parent path)
|
||||||
throw statError;
|
throw statError;
|
||||||
}
|
}
|
||||||
@@ -47,7 +47,7 @@ export function createMkdirHandler() {
|
|||||||
await secureFs.mkdir(resolvedPath, { recursive: true });
|
await secureFs.mkdir(resolvedPath, { recursive: true });
|
||||||
|
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
} catch (error: any) {
|
} catch (error: unknown) {
|
||||||
// Path not allowed - return 403 Forbidden
|
// Path not allowed - return 403 Forbidden
|
||||||
if (error instanceof PathNotAllowedError) {
|
if (error instanceof PathNotAllowedError) {
|
||||||
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||||
@@ -55,7 +55,7 @@ export function createMkdirHandler() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle ELOOP specifically
|
// Handle ELOOP specifically
|
||||||
if (error.code === 'ELOOP') {
|
if ((error as NodeJS.ErrnoException).code === 'ELOOP') {
|
||||||
logError(error, 'Create directory failed - symlink loop detected');
|
logError(error, 'Create directory failed - symlink loop detected');
|
||||||
res.status(400).json({
|
res.status(400).json({
|
||||||
success: false,
|
success: false,
|
||||||
|
|||||||
79
apps/server/src/routes/fs/routes/move.ts
Normal file
79
apps/server/src/routes/fs/routes/move.ts
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
/**
|
||||||
|
* POST /move endpoint - Move (rename) file or directory to a new location
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
|
import path from 'path';
|
||||||
|
import { PathNotAllowedError } from '@automaker/platform';
|
||||||
|
import { mkdirSafe } from '@automaker/utils';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
export function createMoveHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { sourcePath, destinationPath, overwrite } = req.body as {
|
||||||
|
sourcePath: string;
|
||||||
|
destinationPath: string;
|
||||||
|
overwrite?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!sourcePath || !destinationPath) {
|
||||||
|
res
|
||||||
|
.status(400)
|
||||||
|
.json({ success: false, error: 'sourcePath and destinationPath are required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent moving to same location or into its own descendant
|
||||||
|
const resolvedSrc = path.resolve(sourcePath);
|
||||||
|
const resolvedDest = path.resolve(destinationPath);
|
||||||
|
if (resolvedDest === resolvedSrc) {
|
||||||
|
// No-op: source and destination are the same
|
||||||
|
res.json({ success: true });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (resolvedDest.startsWith(resolvedSrc + path.sep)) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Cannot move a folder into one of its own descendants',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if destination already exists
|
||||||
|
try {
|
||||||
|
await secureFs.stat(destinationPath);
|
||||||
|
// Destination exists
|
||||||
|
if (!overwrite) {
|
||||||
|
res.status(409).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Destination already exists',
|
||||||
|
exists: true,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// If overwrite is true, remove the existing destination first
|
||||||
|
await secureFs.rm(destinationPath, { recursive: true });
|
||||||
|
} catch {
|
||||||
|
// Destination doesn't exist - good to proceed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure parent directory exists
|
||||||
|
await mkdirSafe(path.dirname(path.resolve(destinationPath)));
|
||||||
|
|
||||||
|
// Use rename for the move operation
|
||||||
|
await secureFs.rename(sourcePath, destinationPath);
|
||||||
|
|
||||||
|
res.json({ success: true });
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof PathNotAllowedError) {
|
||||||
|
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logError(error, 'Move file failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -10,7 +10,11 @@ import { getErrorMessage, logError } from '../common.js';
|
|||||||
export function createResolveDirectoryHandler() {
|
export function createResolveDirectoryHandler() {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { directoryName, sampleFiles, fileCount } = req.body as {
|
const {
|
||||||
|
directoryName,
|
||||||
|
sampleFiles,
|
||||||
|
fileCount: _fileCount,
|
||||||
|
} = req.body as {
|
||||||
directoryName: string;
|
directoryName: string;
|
||||||
sampleFiles?: string[];
|
sampleFiles?: string[];
|
||||||
fileCount?: number;
|
fileCount?: number;
|
||||||
|
|||||||
@@ -11,10 +11,9 @@ import { getBoardDir } from '@automaker/platform';
|
|||||||
export function createSaveBoardBackgroundHandler() {
|
export function createSaveBoardBackgroundHandler() {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { data, filename, mimeType, projectPath } = req.body as {
|
const { data, filename, projectPath } = req.body as {
|
||||||
data: string;
|
data: string;
|
||||||
filename: string;
|
filename: string;
|
||||||
mimeType: string;
|
|
||||||
projectPath: string;
|
projectPath: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -12,10 +12,9 @@ import { sanitizeFilename } from '@automaker/utils';
|
|||||||
export function createSaveImageHandler() {
|
export function createSaveImageHandler() {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { data, filename, mimeType, projectPath } = req.body as {
|
const { data, filename, projectPath } = req.body as {
|
||||||
data: string;
|
data: string;
|
||||||
filename: string;
|
filename: string;
|
||||||
mimeType: string;
|
|
||||||
projectPath: string;
|
projectPath: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import * as secureFs from '../../../lib/secure-fs.js';
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import { isPathAllowed, PathNotAllowedError, getAllowedRootDirectory } from '@automaker/platform';
|
import { isPathAllowed, getAllowedRootDirectory } from '@automaker/platform';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createValidatePathHandler() {
|
export function createValidatePathHandler() {
|
||||||
|
|||||||
66
apps/server/src/routes/gemini/index.ts
Normal file
66
apps/server/src/routes/gemini/index.ts
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
import { Router, Request, Response } from 'express';
|
||||||
|
import { GeminiProvider } from '../../providers/gemini-provider.js';
|
||||||
|
import { GeminiUsageService } from '../../services/gemini-usage-service.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
|
|
||||||
|
const logger = createLogger('Gemini');
|
||||||
|
|
||||||
|
export function createGeminiRoutes(
|
||||||
|
usageService: GeminiUsageService,
|
||||||
|
_events: EventEmitter
|
||||||
|
): Router {
|
||||||
|
const router = Router();
|
||||||
|
|
||||||
|
// Get current usage/quota data from Google Cloud API
|
||||||
|
router.get('/usage', async (_req: Request, res: Response) => {
|
||||||
|
try {
|
||||||
|
const usageData = await usageService.fetchUsageData();
|
||||||
|
|
||||||
|
res.json(usageData);
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
logger.error('Error fetching Gemini usage:', error);
|
||||||
|
|
||||||
|
// Return error in a format the UI expects
|
||||||
|
res.status(200).json({
|
||||||
|
authenticated: false,
|
||||||
|
authMethod: 'none',
|
||||||
|
usedPercent: 0,
|
||||||
|
remainingPercent: 100,
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
error: `Failed to fetch Gemini usage: ${message}`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check if Gemini is available
|
||||||
|
router.get('/status', async (_req: Request, res: Response) => {
|
||||||
|
try {
|
||||||
|
const provider = new GeminiProvider();
|
||||||
|
const status = await provider.detectInstallation();
|
||||||
|
|
||||||
|
// Derive authMethod from typed InstallationStatus fields
|
||||||
|
const authMethod = status.authenticated
|
||||||
|
? status.hasApiKey
|
||||||
|
? 'api_key'
|
||||||
|
: 'cli_login'
|
||||||
|
: 'none';
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
installed: status.installed,
|
||||||
|
version: status.version || null,
|
||||||
|
path: status.path || null,
|
||||||
|
authenticated: status.authenticated || false,
|
||||||
|
authMethod,
|
||||||
|
hasCredentialsFile: false,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
res.status(500).json({ success: false, error: message });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return router;
|
||||||
|
}
|
||||||
@@ -6,12 +6,22 @@ import { Router } from 'express';
|
|||||||
import { validatePathParams } from '../../middleware/validate-paths.js';
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
import { createDiffsHandler } from './routes/diffs.js';
|
import { createDiffsHandler } from './routes/diffs.js';
|
||||||
import { createFileDiffHandler } from './routes/file-diff.js';
|
import { createFileDiffHandler } from './routes/file-diff.js';
|
||||||
|
import { createStageFilesHandler } from './routes/stage-files.js';
|
||||||
|
import { createDetailsHandler } from './routes/details.js';
|
||||||
|
import { createEnhancedStatusHandler } from './routes/enhanced-status.js';
|
||||||
|
|
||||||
export function createGitRoutes(): Router {
|
export function createGitRoutes(): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
router.post('/diffs', validatePathParams('projectPath'), createDiffsHandler());
|
router.post('/diffs', validatePathParams('projectPath'), createDiffsHandler());
|
||||||
router.post('/file-diff', validatePathParams('projectPath', 'filePath'), createFileDiffHandler());
|
router.post('/file-diff', validatePathParams('projectPath', 'filePath'), createFileDiffHandler());
|
||||||
|
router.post(
|
||||||
|
'/stage-files',
|
||||||
|
validatePathParams('projectPath', 'files[]'),
|
||||||
|
createStageFilesHandler()
|
||||||
|
);
|
||||||
|
router.post('/details', validatePathParams('projectPath', 'filePath?'), createDetailsHandler());
|
||||||
|
router.post('/enhanced-status', validatePathParams('projectPath'), createEnhancedStatusHandler());
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
248
apps/server/src/routes/git/routes/details.ts
Normal file
248
apps/server/src/routes/git/routes/details.ts
Normal file
@@ -0,0 +1,248 @@
|
|||||||
|
/**
|
||||||
|
* POST /details endpoint - Get detailed git info for a file or project
|
||||||
|
* Returns branch, last commit info, diff stats, and conflict status
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { exec, execFile } from 'child_process';
|
||||||
|
import { promisify } from 'util';
|
||||||
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
const execAsync = promisify(exec);
|
||||||
|
const execFileAsync = promisify(execFile);
|
||||||
|
|
||||||
|
interface GitFileDetails {
|
||||||
|
branch: string;
|
||||||
|
lastCommitHash: string;
|
||||||
|
lastCommitMessage: string;
|
||||||
|
lastCommitAuthor: string;
|
||||||
|
lastCommitTimestamp: string;
|
||||||
|
linesAdded: number;
|
||||||
|
linesRemoved: number;
|
||||||
|
isConflicted: boolean;
|
||||||
|
isStaged: boolean;
|
||||||
|
isUnstaged: boolean;
|
||||||
|
statusLabel: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createDetailsHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, filePath } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
filePath?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get current branch
|
||||||
|
const { stdout: branchRaw } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||||
|
cwd: projectPath,
|
||||||
|
});
|
||||||
|
const branch = branchRaw.trim();
|
||||||
|
|
||||||
|
if (!filePath) {
|
||||||
|
// Project-level details - just return branch info
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
details: { branch },
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get last commit info for this file
|
||||||
|
let lastCommitHash = '';
|
||||||
|
let lastCommitMessage = '';
|
||||||
|
let lastCommitAuthor = '';
|
||||||
|
let lastCommitTimestamp = '';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { stdout: logOutput } = await execFileAsync(
|
||||||
|
'git',
|
||||||
|
['log', '-1', '--format=%H|%s|%an|%aI', '--', filePath],
|
||||||
|
{ cwd: projectPath }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (logOutput.trim()) {
|
||||||
|
const parts = logOutput.trim().split('|');
|
||||||
|
lastCommitHash = parts[0] || '';
|
||||||
|
lastCommitMessage = parts[1] || '';
|
||||||
|
lastCommitAuthor = parts[2] || '';
|
||||||
|
lastCommitTimestamp = parts[3] || '';
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// File may not have any commits yet
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get diff stats (lines added/removed)
|
||||||
|
let linesAdded = 0;
|
||||||
|
let linesRemoved = 0;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check if file is untracked first
|
||||||
|
const { stdout: statusLine } = await execFileAsync(
|
||||||
|
'git',
|
||||||
|
['status', '--porcelain', '--', filePath],
|
||||||
|
{ cwd: projectPath }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (statusLine.trim().startsWith('??')) {
|
||||||
|
// Untracked file - count all lines as added using Node.js instead of shell
|
||||||
|
try {
|
||||||
|
const fileContent = (await secureFs.readFile(filePath, 'utf-8')).toString();
|
||||||
|
const lines = fileContent.split('\n');
|
||||||
|
// Don't count trailing empty line from final newline
|
||||||
|
linesAdded =
|
||||||
|
lines.length > 0 && lines[lines.length - 1] === ''
|
||||||
|
? lines.length - 1
|
||||||
|
: lines.length;
|
||||||
|
} catch {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const { stdout: diffStatRaw } = await execFileAsync(
|
||||||
|
'git',
|
||||||
|
['diff', '--numstat', 'HEAD', '--', filePath],
|
||||||
|
{ cwd: projectPath }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (diffStatRaw.trim()) {
|
||||||
|
const parts = diffStatRaw.trim().split('\t');
|
||||||
|
linesAdded = parseInt(parts[0], 10) || 0;
|
||||||
|
linesRemoved = parseInt(parts[1], 10) || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also check staged diff stats
|
||||||
|
const { stdout: stagedDiffStatRaw } = await execFileAsync(
|
||||||
|
'git',
|
||||||
|
['diff', '--numstat', '--cached', '--', filePath],
|
||||||
|
{ cwd: projectPath }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (stagedDiffStatRaw.trim()) {
|
||||||
|
const parts = stagedDiffStatRaw.trim().split('\t');
|
||||||
|
linesAdded += parseInt(parts[0], 10) || 0;
|
||||||
|
linesRemoved += parseInt(parts[1], 10) || 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Diff might not be available
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get conflict and staging status
|
||||||
|
let isConflicted = false;
|
||||||
|
let isStaged = false;
|
||||||
|
let isUnstaged = false;
|
||||||
|
let statusLabel = '';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { stdout: statusOutput } = await execFileAsync(
|
||||||
|
'git',
|
||||||
|
['status', '--porcelain', '--', filePath],
|
||||||
|
{ cwd: projectPath }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (statusOutput.trim()) {
|
||||||
|
const indexStatus = statusOutput[0];
|
||||||
|
const workTreeStatus = statusOutput[1];
|
||||||
|
|
||||||
|
// Check for conflicts (both modified, unmerged states)
|
||||||
|
if (
|
||||||
|
indexStatus === 'U' ||
|
||||||
|
workTreeStatus === 'U' ||
|
||||||
|
(indexStatus === 'A' && workTreeStatus === 'A') ||
|
||||||
|
(indexStatus === 'D' && workTreeStatus === 'D')
|
||||||
|
) {
|
||||||
|
isConflicted = true;
|
||||||
|
statusLabel = 'Conflicted';
|
||||||
|
} else {
|
||||||
|
// Staged changes (index has a status)
|
||||||
|
if (indexStatus !== ' ' && indexStatus !== '?') {
|
||||||
|
isStaged = true;
|
||||||
|
}
|
||||||
|
// Unstaged changes (work tree has a status)
|
||||||
|
if (workTreeStatus !== ' ' && workTreeStatus !== '?') {
|
||||||
|
isUnstaged = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build status label
|
||||||
|
if (isStaged && isUnstaged) {
|
||||||
|
statusLabel = 'Staged + Modified';
|
||||||
|
} else if (isStaged) {
|
||||||
|
statusLabel = 'Staged';
|
||||||
|
} else {
|
||||||
|
const statusChar = workTreeStatus !== ' ' ? workTreeStatus : indexStatus;
|
||||||
|
switch (statusChar) {
|
||||||
|
case 'M':
|
||||||
|
statusLabel = 'Modified';
|
||||||
|
break;
|
||||||
|
case 'A':
|
||||||
|
statusLabel = 'Added';
|
||||||
|
break;
|
||||||
|
case 'D':
|
||||||
|
statusLabel = 'Deleted';
|
||||||
|
break;
|
||||||
|
case 'R':
|
||||||
|
statusLabel = 'Renamed';
|
||||||
|
break;
|
||||||
|
case 'C':
|
||||||
|
statusLabel = 'Copied';
|
||||||
|
break;
|
||||||
|
case '?':
|
||||||
|
statusLabel = 'Untracked';
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
statusLabel = statusChar || '';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Status might not be available
|
||||||
|
}
|
||||||
|
|
||||||
|
const details: GitFileDetails = {
|
||||||
|
branch,
|
||||||
|
lastCommitHash,
|
||||||
|
lastCommitMessage,
|
||||||
|
lastCommitAuthor,
|
||||||
|
lastCommitTimestamp,
|
||||||
|
linesAdded,
|
||||||
|
linesRemoved,
|
||||||
|
isConflicted,
|
||||||
|
isStaged,
|
||||||
|
isUnstaged,
|
||||||
|
statusLabel,
|
||||||
|
};
|
||||||
|
|
||||||
|
res.json({ success: true, details });
|
||||||
|
} catch (innerError) {
|
||||||
|
logError(innerError, 'Git details failed');
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
details: {
|
||||||
|
branch: '',
|
||||||
|
lastCommitHash: '',
|
||||||
|
lastCommitMessage: '',
|
||||||
|
lastCommitAuthor: '',
|
||||||
|
lastCommitTimestamp: '',
|
||||||
|
linesAdded: 0,
|
||||||
|
linesRemoved: 0,
|
||||||
|
isConflicted: false,
|
||||||
|
isStaged: false,
|
||||||
|
isUnstaged: false,
|
||||||
|
statusLabel: '',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Get git details failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -23,6 +23,7 @@ export function createDiffsHandler() {
|
|||||||
diff: result.diff,
|
diff: result.diff,
|
||||||
files: result.files,
|
files: result.files,
|
||||||
hasChanges: result.hasChanges,
|
hasChanges: result.hasChanges,
|
||||||
|
...(result.mergeState ? { mergeState: result.mergeState } : {}),
|
||||||
});
|
});
|
||||||
} catch (innerError) {
|
} catch (innerError) {
|
||||||
logError(innerError, 'Git diff failed');
|
logError(innerError, 'Git diff failed');
|
||||||
|
|||||||
176
apps/server/src/routes/git/routes/enhanced-status.ts
Normal file
176
apps/server/src/routes/git/routes/enhanced-status.ts
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
/**
|
||||||
|
* POST /enhanced-status endpoint - Get enhanced git status with diff stats per file
|
||||||
|
* Returns per-file status with lines added/removed and staged/unstaged differentiation
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { exec } from 'child_process';
|
||||||
|
import { promisify } from 'util';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
const execAsync = promisify(exec);
|
||||||
|
|
||||||
|
interface EnhancedFileStatus {
|
||||||
|
path: string;
|
||||||
|
indexStatus: string;
|
||||||
|
workTreeStatus: string;
|
||||||
|
isConflicted: boolean;
|
||||||
|
isStaged: boolean;
|
||||||
|
isUnstaged: boolean;
|
||||||
|
linesAdded: number;
|
||||||
|
linesRemoved: number;
|
||||||
|
statusLabel: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getStatusLabel(indexStatus: string, workTreeStatus: string): string {
|
||||||
|
// Check for conflicts
|
||||||
|
if (
|
||||||
|
indexStatus === 'U' ||
|
||||||
|
workTreeStatus === 'U' ||
|
||||||
|
(indexStatus === 'A' && workTreeStatus === 'A') ||
|
||||||
|
(indexStatus === 'D' && workTreeStatus === 'D')
|
||||||
|
) {
|
||||||
|
return 'Conflicted';
|
||||||
|
}
|
||||||
|
|
||||||
|
const hasStaged = indexStatus !== ' ' && indexStatus !== '?';
|
||||||
|
const hasUnstaged = workTreeStatus !== ' ' && workTreeStatus !== '?';
|
||||||
|
|
||||||
|
if (hasStaged && hasUnstaged) return 'Staged + Modified';
|
||||||
|
if (hasStaged) return 'Staged';
|
||||||
|
|
||||||
|
const statusChar = workTreeStatus !== ' ' ? workTreeStatus : indexStatus;
|
||||||
|
switch (statusChar) {
|
||||||
|
case 'M':
|
||||||
|
return 'Modified';
|
||||||
|
case 'A':
|
||||||
|
return 'Added';
|
||||||
|
case 'D':
|
||||||
|
return 'Deleted';
|
||||||
|
case 'R':
|
||||||
|
return 'Renamed';
|
||||||
|
case 'C':
|
||||||
|
return 'Copied';
|
||||||
|
case '?':
|
||||||
|
return 'Untracked';
|
||||||
|
default:
|
||||||
|
return statusChar || '';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createEnhancedStatusHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body as { projectPath: string };
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get current branch
|
||||||
|
const { stdout: branchRaw } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||||
|
cwd: projectPath,
|
||||||
|
});
|
||||||
|
const branch = branchRaw.trim();
|
||||||
|
|
||||||
|
// Get porcelain status for all files
|
||||||
|
const { stdout: statusOutput } = await execAsync('git status --porcelain', {
|
||||||
|
cwd: projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get diff numstat for working tree changes
|
||||||
|
let workTreeStats: Record<string, { added: number; removed: number }> = {};
|
||||||
|
try {
|
||||||
|
const { stdout: numstatRaw } = await execAsync('git diff --numstat', {
|
||||||
|
cwd: projectPath,
|
||||||
|
maxBuffer: 10 * 1024 * 1024,
|
||||||
|
});
|
||||||
|
for (const line of numstatRaw.trim().split('\n').filter(Boolean)) {
|
||||||
|
const parts = line.split('\t');
|
||||||
|
if (parts.length >= 3) {
|
||||||
|
const added = parseInt(parts[0], 10) || 0;
|
||||||
|
const removed = parseInt(parts[1], 10) || 0;
|
||||||
|
workTreeStats[parts[2]] = { added, removed };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get diff numstat for staged changes
|
||||||
|
let stagedStats: Record<string, { added: number; removed: number }> = {};
|
||||||
|
try {
|
||||||
|
const { stdout: stagedNumstatRaw } = await execAsync('git diff --numstat --cached', {
|
||||||
|
cwd: projectPath,
|
||||||
|
maxBuffer: 10 * 1024 * 1024,
|
||||||
|
});
|
||||||
|
for (const line of stagedNumstatRaw.trim().split('\n').filter(Boolean)) {
|
||||||
|
const parts = line.split('\t');
|
||||||
|
if (parts.length >= 3) {
|
||||||
|
const added = parseInt(parts[0], 10) || 0;
|
||||||
|
const removed = parseInt(parts[1], 10) || 0;
|
||||||
|
stagedStats[parts[2]] = { added, removed };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse status and build enhanced file list
|
||||||
|
const files: EnhancedFileStatus[] = [];
|
||||||
|
|
||||||
|
for (const line of statusOutput.split('\n').filter(Boolean)) {
|
||||||
|
if (line.length < 4) continue;
|
||||||
|
|
||||||
|
const indexStatus = line[0];
|
||||||
|
const workTreeStatus = line[1];
|
||||||
|
const filePath = line.substring(3).trim();
|
||||||
|
|
||||||
|
// Handle renamed files (format: "R old -> new")
|
||||||
|
const actualPath = filePath.includes(' -> ')
|
||||||
|
? filePath.split(' -> ')[1].trim()
|
||||||
|
: filePath;
|
||||||
|
|
||||||
|
const isConflicted =
|
||||||
|
indexStatus === 'U' ||
|
||||||
|
workTreeStatus === 'U' ||
|
||||||
|
(indexStatus === 'A' && workTreeStatus === 'A') ||
|
||||||
|
(indexStatus === 'D' && workTreeStatus === 'D');
|
||||||
|
|
||||||
|
const isStaged = indexStatus !== ' ' && indexStatus !== '?';
|
||||||
|
const isUnstaged = workTreeStatus !== ' ' && workTreeStatus !== '?';
|
||||||
|
|
||||||
|
// Combine diff stats from both working tree and staged
|
||||||
|
const wtStats = workTreeStats[actualPath] || { added: 0, removed: 0 };
|
||||||
|
const stStats = stagedStats[actualPath] || { added: 0, removed: 0 };
|
||||||
|
|
||||||
|
files.push({
|
||||||
|
path: actualPath,
|
||||||
|
indexStatus,
|
||||||
|
workTreeStatus,
|
||||||
|
isConflicted,
|
||||||
|
isStaged,
|
||||||
|
isUnstaged,
|
||||||
|
linesAdded: wtStats.added + stStats.added,
|
||||||
|
linesRemoved: wtStats.removed + stStats.removed,
|
||||||
|
statusLabel: getStatusLabel(indexStatus, workTreeStatus),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
branch,
|
||||||
|
files,
|
||||||
|
});
|
||||||
|
} catch (innerError) {
|
||||||
|
logError(innerError, 'Git enhanced status failed');
|
||||||
|
res.json({ success: true, branch: '', files: [] });
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Get enhanced status failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
67
apps/server/src/routes/git/routes/stage-files.ts
Normal file
67
apps/server/src/routes/git/routes/stage-files.ts
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
/**
|
||||||
|
* POST /stage-files endpoint - Stage or unstage files in the main project
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
import { stageFiles, StageFilesValidationError } from '../../../services/stage-files-service.js';
|
||||||
|
|
||||||
|
export function createStageFilesHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, files, operation } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
files: string[];
|
||||||
|
operation: 'stage' | 'unstage';
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'projectPath required',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!Array.isArray(files) || files.length === 0) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'files array required and must not be empty',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
if (typeof file !== 'string' || file.trim() === '') {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Each element of files must be a non-empty string',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (operation !== 'stage' && operation !== 'unstage') {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'operation must be "stage" or "unstage"',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await stageFiles(projectPath, files, operation);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
result,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof StageFilesValidationError) {
|
||||||
|
res.status(400).json({ success: false, error: error.message });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
logError(error, `${(req.body as { operation?: string })?.operation ?? 'stage'} files failed`);
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -9,6 +9,8 @@ import { createCheckGitHubRemoteHandler } from './routes/check-github-remote.js'
|
|||||||
import { createListIssuesHandler } from './routes/list-issues.js';
|
import { createListIssuesHandler } from './routes/list-issues.js';
|
||||||
import { createListPRsHandler } from './routes/list-prs.js';
|
import { createListPRsHandler } from './routes/list-prs.js';
|
||||||
import { createListCommentsHandler } from './routes/list-comments.js';
|
import { createListCommentsHandler } from './routes/list-comments.js';
|
||||||
|
import { createListPRReviewCommentsHandler } from './routes/list-pr-review-comments.js';
|
||||||
|
import { createResolvePRCommentHandler } from './routes/resolve-pr-comment.js';
|
||||||
import { createValidateIssueHandler } from './routes/validate-issue.js';
|
import { createValidateIssueHandler } from './routes/validate-issue.js';
|
||||||
import {
|
import {
|
||||||
createValidationStatusHandler,
|
createValidationStatusHandler,
|
||||||
@@ -29,6 +31,16 @@ export function createGitHubRoutes(
|
|||||||
router.post('/issues', validatePathParams('projectPath'), createListIssuesHandler());
|
router.post('/issues', validatePathParams('projectPath'), createListIssuesHandler());
|
||||||
router.post('/prs', validatePathParams('projectPath'), createListPRsHandler());
|
router.post('/prs', validatePathParams('projectPath'), createListPRsHandler());
|
||||||
router.post('/issue-comments', validatePathParams('projectPath'), createListCommentsHandler());
|
router.post('/issue-comments', validatePathParams('projectPath'), createListCommentsHandler());
|
||||||
|
router.post(
|
||||||
|
'/pr-review-comments',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createListPRReviewCommentsHandler()
|
||||||
|
);
|
||||||
|
router.post(
|
||||||
|
'/resolve-pr-comment',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createResolvePRCommentHandler()
|
||||||
|
);
|
||||||
router.post(
|
router.post(
|
||||||
'/validate-issue',
|
'/validate-issue',
|
||||||
validatePathParams('projectPath'),
|
validatePathParams('projectPath'),
|
||||||
|
|||||||
333
apps/server/src/routes/github/routes/list-pr-review-comments.ts
Normal file
333
apps/server/src/routes/github/routes/list-pr-review-comments.ts
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
/**
|
||||||
|
* POST /pr-review-comments endpoint - Fetch review comments for a GitHub PR
|
||||||
|
*
|
||||||
|
* Fetches both regular PR comments and inline code review comments
|
||||||
|
* for a specific pull request, providing file path and line context.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { spawn } from 'child_process';
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
|
||||||
|
import { checkGitHubRemote } from './check-github-remote.js';
|
||||||
|
|
||||||
|
export interface PRReviewComment {
|
||||||
|
id: string;
|
||||||
|
author: string;
|
||||||
|
avatarUrl?: string;
|
||||||
|
body: string;
|
||||||
|
path?: string;
|
||||||
|
line?: number;
|
||||||
|
createdAt: string;
|
||||||
|
updatedAt?: string;
|
||||||
|
isReviewComment: boolean;
|
||||||
|
/** Whether this is an outdated review comment (code has changed since) */
|
||||||
|
isOutdated?: boolean;
|
||||||
|
/** Whether the review thread containing this comment has been resolved */
|
||||||
|
isResolved?: boolean;
|
||||||
|
/** The GraphQL node ID of the review thread (used for resolve/unresolve mutations) */
|
||||||
|
threadId?: string;
|
||||||
|
/** The diff hunk context for the comment */
|
||||||
|
diffHunk?: string;
|
||||||
|
/** The side of the diff (LEFT or RIGHT) */
|
||||||
|
side?: string;
|
||||||
|
/** The commit ID the comment was made on */
|
||||||
|
commitId?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ListPRReviewCommentsResult {
|
||||||
|
success: boolean;
|
||||||
|
comments?: PRReviewComment[];
|
||||||
|
totalCount?: number;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ListPRReviewCommentsRequest {
|
||||||
|
projectPath: string;
|
||||||
|
prNumber: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Timeout for GitHub GraphQL API requests in milliseconds */
|
||||||
|
const GITHUB_API_TIMEOUT_MS = 30000;
|
||||||
|
|
||||||
|
interface GraphQLReviewThreadComment {
|
||||||
|
databaseId: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GraphQLReviewThread {
|
||||||
|
id: string;
|
||||||
|
isResolved: boolean;
|
||||||
|
comments: {
|
||||||
|
nodes: GraphQLReviewThreadComment[];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GraphQLResponse {
|
||||||
|
data?: {
|
||||||
|
repository?: {
|
||||||
|
pullRequest?: {
|
||||||
|
reviewThreads?: {
|
||||||
|
nodes: GraphQLReviewThread[];
|
||||||
|
};
|
||||||
|
} | null;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
errors?: Array<{ message: string }>;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ReviewThreadInfo {
|
||||||
|
isResolved: boolean;
|
||||||
|
threadId: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch review thread resolved status and thread IDs using GitHub GraphQL API.
|
||||||
|
* Returns a map of comment ID (string) -> { isResolved, threadId }.
|
||||||
|
*/
|
||||||
|
async function fetchReviewThreadResolvedStatus(
|
||||||
|
projectPath: string,
|
||||||
|
owner: string,
|
||||||
|
repo: string,
|
||||||
|
prNumber: number
|
||||||
|
): Promise<Map<string, ReviewThreadInfo>> {
|
||||||
|
const resolvedMap = new Map<string, ReviewThreadInfo>();
|
||||||
|
|
||||||
|
const query = `
|
||||||
|
query GetPRReviewThreads(
|
||||||
|
$owner: String!
|
||||||
|
$repo: String!
|
||||||
|
$prNumber: Int!
|
||||||
|
) {
|
||||||
|
repository(owner: $owner, name: $repo) {
|
||||||
|
pullRequest(number: $prNumber) {
|
||||||
|
reviewThreads(first: 100) {
|
||||||
|
nodes {
|
||||||
|
id
|
||||||
|
isResolved
|
||||||
|
comments(first: 100) {
|
||||||
|
nodes {
|
||||||
|
databaseId
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`;
|
||||||
|
|
||||||
|
const variables = { owner, repo, prNumber };
|
||||||
|
const requestBody = JSON.stringify({ query, variables });
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await new Promise<GraphQLResponse>((resolve, reject) => {
|
||||||
|
const gh = spawn('gh', ['api', 'graphql', '--input', '-'], {
|
||||||
|
cwd: projectPath,
|
||||||
|
env: execEnv,
|
||||||
|
});
|
||||||
|
|
||||||
|
const timeoutId = setTimeout(() => {
|
||||||
|
gh.kill();
|
||||||
|
reject(new Error('GitHub GraphQL API request timed out'));
|
||||||
|
}, GITHUB_API_TIMEOUT_MS);
|
||||||
|
|
||||||
|
let stdout = '';
|
||||||
|
let stderr = '';
|
||||||
|
gh.stdout.on('data', (data: Buffer) => (stdout += data.toString()));
|
||||||
|
gh.stderr.on('data', (data: Buffer) => (stderr += data.toString()));
|
||||||
|
|
||||||
|
gh.on('close', (code) => {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
if (code !== 0) {
|
||||||
|
return reject(new Error(`gh process exited with code ${code}: ${stderr}`));
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
resolve(JSON.parse(stdout));
|
||||||
|
} catch (e) {
|
||||||
|
reject(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
gh.stdin.write(requestBody);
|
||||||
|
gh.stdin.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
if (response.errors && response.errors.length > 0) {
|
||||||
|
throw new Error(response.errors[0].message);
|
||||||
|
}
|
||||||
|
|
||||||
|
const threads = response.data?.repository?.pullRequest?.reviewThreads?.nodes ?? [];
|
||||||
|
for (const thread of threads) {
|
||||||
|
const info: ReviewThreadInfo = { isResolved: thread.isResolved, threadId: thread.id };
|
||||||
|
for (const comment of thread.comments.nodes) {
|
||||||
|
resolvedMap.set(String(comment.databaseId), info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Log but don't fail — resolved status is best-effort
|
||||||
|
logError(error, 'Failed to fetch PR review thread resolved status');
|
||||||
|
}
|
||||||
|
|
||||||
|
return resolvedMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch all comments for a PR (both regular and inline review comments)
|
||||||
|
*/
|
||||||
|
async function fetchPRReviewComments(
|
||||||
|
projectPath: string,
|
||||||
|
owner: string,
|
||||||
|
repo: string,
|
||||||
|
prNumber: number
|
||||||
|
): Promise<PRReviewComment[]> {
|
||||||
|
const allComments: PRReviewComment[] = [];
|
||||||
|
|
||||||
|
// Fetch review thread resolved status in parallel with comment fetching
|
||||||
|
const resolvedStatusPromise = fetchReviewThreadResolvedStatus(projectPath, owner, repo, prNumber);
|
||||||
|
|
||||||
|
// 1. Fetch regular PR comments (issue-level comments)
|
||||||
|
try {
|
||||||
|
const { stdout: commentsOutput } = await execAsync(
|
||||||
|
`gh pr view ${prNumber} -R ${owner}/${repo} --json comments`,
|
||||||
|
{
|
||||||
|
cwd: projectPath,
|
||||||
|
env: execEnv,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
const commentsData = JSON.parse(commentsOutput);
|
||||||
|
const regularComments = (commentsData.comments || []).map(
|
||||||
|
(c: {
|
||||||
|
id: string;
|
||||||
|
author: { login: string; avatarUrl?: string };
|
||||||
|
body: string;
|
||||||
|
createdAt: string;
|
||||||
|
updatedAt?: string;
|
||||||
|
}) => ({
|
||||||
|
id: String(c.id),
|
||||||
|
author: c.author?.login || 'unknown',
|
||||||
|
avatarUrl: c.author?.avatarUrl,
|
||||||
|
body: c.body,
|
||||||
|
createdAt: c.createdAt,
|
||||||
|
updatedAt: c.updatedAt,
|
||||||
|
isReviewComment: false,
|
||||||
|
isOutdated: false,
|
||||||
|
// Regular PR comments are not part of review threads, so not resolvable
|
||||||
|
isResolved: false,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
allComments.push(...regularComments);
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Failed to fetch regular PR comments');
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Fetch inline review comments (code-level comments with file/line info)
|
||||||
|
try {
|
||||||
|
const reviewsEndpoint = `repos/${owner}/${repo}/pulls/${prNumber}/comments`;
|
||||||
|
const { stdout: reviewsOutput } = await execAsync(`gh api ${reviewsEndpoint} --paginate`, {
|
||||||
|
cwd: projectPath,
|
||||||
|
env: execEnv,
|
||||||
|
});
|
||||||
|
|
||||||
|
const reviewsData = JSON.parse(reviewsOutput);
|
||||||
|
const reviewComments = (Array.isArray(reviewsData) ? reviewsData : []).map(
|
||||||
|
(c: {
|
||||||
|
id: number;
|
||||||
|
user: { login: string; avatar_url?: string };
|
||||||
|
body: string;
|
||||||
|
path: string;
|
||||||
|
line?: number;
|
||||||
|
original_line?: number;
|
||||||
|
created_at: string;
|
||||||
|
updated_at?: string;
|
||||||
|
diff_hunk?: string;
|
||||||
|
side?: string;
|
||||||
|
commit_id?: string;
|
||||||
|
position?: number | null;
|
||||||
|
}) => ({
|
||||||
|
id: String(c.id),
|
||||||
|
author: c.user?.login || 'unknown',
|
||||||
|
avatarUrl: c.user?.avatar_url,
|
||||||
|
body: c.body,
|
||||||
|
path: c.path,
|
||||||
|
line: c.line || c.original_line,
|
||||||
|
createdAt: c.created_at,
|
||||||
|
updatedAt: c.updated_at,
|
||||||
|
isReviewComment: true,
|
||||||
|
// A review comment is "outdated" if position is null (code has changed)
|
||||||
|
isOutdated: c.position === null && !c.line,
|
||||||
|
// isResolved will be filled in below from GraphQL data
|
||||||
|
isResolved: false,
|
||||||
|
diffHunk: c.diff_hunk,
|
||||||
|
side: c.side,
|
||||||
|
commitId: c.commit_id,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
allComments.push(...reviewComments);
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Failed to fetch inline review comments');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for resolved status and apply to inline review comments
|
||||||
|
const resolvedMap = await resolvedStatusPromise;
|
||||||
|
if (resolvedMap.size > 0) {
|
||||||
|
for (const comment of allComments) {
|
||||||
|
if (comment.isReviewComment && resolvedMap.has(comment.id)) {
|
||||||
|
const info = resolvedMap.get(comment.id);
|
||||||
|
comment.isResolved = info?.isResolved ?? false;
|
||||||
|
comment.threadId = info?.threadId;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by createdAt descending (newest first)
|
||||||
|
allComments.sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime());
|
||||||
|
|
||||||
|
return allComments;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createListPRReviewCommentsHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, prNumber } = req.body as ListPRReviewCommentsRequest;
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!prNumber || typeof prNumber !== 'number') {
|
||||||
|
res
|
||||||
|
.status(400)
|
||||||
|
.json({ success: false, error: 'prNumber is required and must be a number' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is a GitHub repo and get owner/repo
|
||||||
|
const remoteStatus = await checkGitHubRemote(projectPath);
|
||||||
|
if (!remoteStatus.hasGitHubRemote || !remoteStatus.owner || !remoteStatus.repo) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Project does not have a GitHub remote',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const comments = await fetchPRReviewComments(
|
||||||
|
projectPath,
|
||||||
|
remoteStatus.owner,
|
||||||
|
remoteStatus.repo,
|
||||||
|
prNumber
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
comments,
|
||||||
|
totalCount: comments.length,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Fetch PR review comments failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
151
apps/server/src/routes/github/routes/resolve-pr-comment.ts
Normal file
151
apps/server/src/routes/github/routes/resolve-pr-comment.ts
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
/**
|
||||||
|
* POST /resolve-pr-comment endpoint - Resolve or unresolve a GitHub PR review thread
|
||||||
|
*
|
||||||
|
* Uses the GitHub GraphQL API to resolve or unresolve a review thread
|
||||||
|
* identified by its GraphQL node ID (threadId).
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { spawn } from 'child_process';
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { execEnv, getErrorMessage, logError } from './common.js';
|
||||||
|
import { checkGitHubRemote } from './check-github-remote.js';
|
||||||
|
|
||||||
|
export interface ResolvePRCommentResult {
|
||||||
|
success: boolean;
|
||||||
|
isResolved?: boolean;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ResolvePRCommentRequest {
|
||||||
|
projectPath: string;
|
||||||
|
threadId: string;
|
||||||
|
resolve: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Timeout for GitHub GraphQL API requests in milliseconds */
|
||||||
|
const GITHUB_API_TIMEOUT_MS = 30000;
|
||||||
|
|
||||||
|
interface GraphQLMutationResponse {
|
||||||
|
data?: {
|
||||||
|
resolveReviewThread?: {
|
||||||
|
thread?: { isResolved: boolean; id: string } | null;
|
||||||
|
} | null;
|
||||||
|
unresolveReviewThread?: {
|
||||||
|
thread?: { isResolved: boolean; id: string } | null;
|
||||||
|
} | null;
|
||||||
|
};
|
||||||
|
errors?: Array<{ message: string }>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a GraphQL mutation to resolve or unresolve a review thread.
|
||||||
|
*/
|
||||||
|
async function executeReviewThreadMutation(
|
||||||
|
projectPath: string,
|
||||||
|
threadId: string,
|
||||||
|
resolve: boolean
|
||||||
|
): Promise<{ isResolved: boolean }> {
|
||||||
|
const mutationName = resolve ? 'resolveReviewThread' : 'unresolveReviewThread';
|
||||||
|
|
||||||
|
const mutation = `
|
||||||
|
mutation ${resolve ? 'ResolveThread' : 'UnresolveThread'}($threadId: ID!) {
|
||||||
|
${mutationName}(input: { threadId: $threadId }) {
|
||||||
|
thread {
|
||||||
|
id
|
||||||
|
isResolved
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`;
|
||||||
|
|
||||||
|
const variables = { threadId };
|
||||||
|
const requestBody = JSON.stringify({ query: mutation, variables });
|
||||||
|
|
||||||
|
const response = await new Promise<GraphQLMutationResponse>((res, rej) => {
|
||||||
|
const gh = spawn('gh', ['api', 'graphql', '--input', '-'], {
|
||||||
|
cwd: projectPath,
|
||||||
|
env: execEnv,
|
||||||
|
});
|
||||||
|
|
||||||
|
const timeoutId = setTimeout(() => {
|
||||||
|
gh.kill();
|
||||||
|
rej(new Error('GitHub GraphQL API request timed out'));
|
||||||
|
}, GITHUB_API_TIMEOUT_MS);
|
||||||
|
|
||||||
|
let stdout = '';
|
||||||
|
let stderr = '';
|
||||||
|
gh.stdout.on('data', (data: Buffer) => (stdout += data.toString()));
|
||||||
|
gh.stderr.on('data', (data: Buffer) => (stderr += data.toString()));
|
||||||
|
|
||||||
|
gh.on('close', (code) => {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
if (code !== 0) {
|
||||||
|
return rej(new Error(`gh process exited with code ${code}: ${stderr}`));
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
res(JSON.parse(stdout));
|
||||||
|
} catch (e) {
|
||||||
|
rej(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
gh.stdin.write(requestBody);
|
||||||
|
gh.stdin.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
if (response.errors && response.errors.length > 0) {
|
||||||
|
throw new Error(response.errors[0].message);
|
||||||
|
}
|
||||||
|
|
||||||
|
const threadData = resolve
|
||||||
|
? response.data?.resolveReviewThread?.thread
|
||||||
|
: response.data?.unresolveReviewThread?.thread;
|
||||||
|
|
||||||
|
if (!threadData) {
|
||||||
|
throw new Error('No thread data returned from GitHub API');
|
||||||
|
}
|
||||||
|
|
||||||
|
return { isResolved: threadData.isResolved };
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createResolvePRCommentHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, threadId, resolve } = req.body as ResolvePRCommentRequest;
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!threadId) {
|
||||||
|
res.status(400).json({ success: false, error: 'threadId is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof resolve !== 'boolean') {
|
||||||
|
res.status(400).json({ success: false, error: 'resolve must be a boolean' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is a GitHub repo
|
||||||
|
const remoteStatus = await checkGitHubRemote(projectPath);
|
||||||
|
if (!remoteStatus.hasGitHubRemote) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Project does not have a GitHub remote',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await executeReviewThreadMutation(projectPath, threadId, resolve);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
isResolved: result.isResolved,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Resolve PR comment failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -25,7 +25,7 @@ import {
|
|||||||
isOpencodeModel,
|
isOpencodeModel,
|
||||||
supportsStructuredOutput,
|
supportsStructuredOutput,
|
||||||
} from '@automaker/types';
|
} from '@automaker/types';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel, resolveModelString } from '@automaker/model-resolver';
|
||||||
import { extractJson } from '../../../lib/json-extractor.js';
|
import { extractJson } from '../../../lib/json-extractor.js';
|
||||||
import { writeValidation } from '../../../lib/validation-storage.js';
|
import { writeValidation } from '../../../lib/validation-storage.js';
|
||||||
import { streamingQuery } from '../../../providers/simple-query-service.js';
|
import { streamingQuery } from '../../../providers/simple-query-service.js';
|
||||||
@@ -188,8 +188,12 @@ ${basePrompt}`;
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use provider resolved model if available, otherwise use original model
|
// CRITICAL: For custom providers (GLM, MiniMax), pass the provider's model ID (e.g. "GLM-4.7")
|
||||||
const effectiveModel = providerResolvedModel || (model as string);
|
// to the API, NOT the resolved Claude model - otherwise we get "model not found"
|
||||||
|
// For standard Claude models, resolve aliases (e.g., 'opus' -> 'claude-opus-4-20250514')
|
||||||
|
const effectiveModel = claudeCompatibleProvider
|
||||||
|
? (model as string)
|
||||||
|
: providerResolvedModel || resolveModelString(model as string);
|
||||||
logger.info(`Using model: ${effectiveModel}`);
|
logger.info(`Using model: ${effectiveModel}`);
|
||||||
|
|
||||||
// Use streamingQuery with event callbacks
|
// Use streamingQuery with event callbacks
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import type { Request, Response } from 'express';
|
|||||||
import type { EventEmitter } from '../../../lib/events.js';
|
import type { EventEmitter } from '../../../lib/events.js';
|
||||||
import type { IssueValidationEvent } from '@automaker/types';
|
import type { IssueValidationEvent } from '@automaker/types';
|
||||||
import {
|
import {
|
||||||
isValidationRunning,
|
|
||||||
getValidationStatus,
|
getValidationStatus,
|
||||||
getRunningValidations,
|
getRunningValidations,
|
||||||
abortValidation,
|
abortValidation,
|
||||||
@@ -15,7 +14,6 @@ import {
|
|||||||
logger,
|
logger,
|
||||||
} from './validation-common.js';
|
} from './validation-common.js';
|
||||||
import {
|
import {
|
||||||
readValidation,
|
|
||||||
getAllValidations,
|
getAllValidations,
|
||||||
getValidationWithFreshness,
|
getValidationWithFreshness,
|
||||||
deleteValidation,
|
deleteValidation,
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ export function createProvidersHandler() {
|
|||||||
// Get installation status from all providers
|
// Get installation status from all providers
|
||||||
const statuses = await ProviderFactory.checkAllProviders();
|
const statuses = await ProviderFactory.checkAllProviders();
|
||||||
|
|
||||||
const providers: Record<string, any> = {
|
const providers: Record<string, Record<string, unknown>> = {
|
||||||
anthropic: {
|
anthropic: {
|
||||||
available: statuses.claude?.installed || false,
|
available: statuses.claude?.installed || false,
|
||||||
hasApiKey: !!process.env.ANTHROPIC_API_KEY,
|
hasApiKey: !!process.env.ANTHROPIC_API_KEY,
|
||||||
|
|||||||
@@ -4,14 +4,14 @@
|
|||||||
|
|
||||||
import { Router } from 'express';
|
import { Router } from 'express';
|
||||||
import type { FeatureLoader } from '../../services/feature-loader.js';
|
import type { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
import type { AutoModeService } from '../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import type { NotificationService } from '../../services/notification-service.js';
|
import type { NotificationService } from '../../services/notification-service.js';
|
||||||
import { createOverviewHandler } from './routes/overview.js';
|
import { createOverviewHandler } from './routes/overview.js';
|
||||||
|
|
||||||
export function createProjectsRoutes(
|
export function createProjectsRoutes(
|
||||||
featureLoader: FeatureLoader,
|
featureLoader: FeatureLoader,
|
||||||
autoModeService: AutoModeService,
|
autoModeService: AutoModeServiceCompat,
|
||||||
settingsService: SettingsService,
|
settingsService: SettingsService,
|
||||||
notificationService: NotificationService
|
notificationService: NotificationService
|
||||||
): Router {
|
): Router {
|
||||||
|
|||||||
@@ -9,7 +9,11 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { FeatureLoader } from '../../../services/feature-loader.js';
|
import type { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type {
|
||||||
|
AutoModeServiceCompat,
|
||||||
|
RunningAgentInfo,
|
||||||
|
ProjectAutoModeStatus,
|
||||||
|
} from '../../../services/auto-mode/index.js';
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import type { NotificationService } from '../../../services/notification-service.js';
|
import type { NotificationService } from '../../../services/notification-service.js';
|
||||||
import type {
|
import type {
|
||||||
@@ -147,7 +151,7 @@ function getLastActivityAt(features: Feature[]): string | undefined {
|
|||||||
|
|
||||||
export function createOverviewHandler(
|
export function createOverviewHandler(
|
||||||
featureLoader: FeatureLoader,
|
featureLoader: FeatureLoader,
|
||||||
autoModeService: AutoModeService,
|
autoModeService: AutoModeServiceCompat,
|
||||||
settingsService: SettingsService,
|
settingsService: SettingsService,
|
||||||
notificationService: NotificationService
|
notificationService: NotificationService
|
||||||
) {
|
) {
|
||||||
@@ -158,7 +162,7 @@ export function createOverviewHandler(
|
|||||||
const projectRefs: ProjectRef[] = settings.projects || [];
|
const projectRefs: ProjectRef[] = settings.projects || [];
|
||||||
|
|
||||||
// Get all running agents once to count live running features per project
|
// Get all running agents once to count live running features per project
|
||||||
const allRunningAgents = await autoModeService.getRunningAgents();
|
const allRunningAgents: RunningAgentInfo[] = await autoModeService.getRunningAgents();
|
||||||
|
|
||||||
// Collect project statuses in parallel
|
// Collect project statuses in parallel
|
||||||
const projectStatusPromises = projectRefs.map(async (projectRef): Promise<ProjectStatus> => {
|
const projectStatusPromises = projectRefs.map(async (projectRef): Promise<ProjectStatus> => {
|
||||||
@@ -169,7 +173,10 @@ export function createOverviewHandler(
|
|||||||
const totalFeatures = features.length;
|
const totalFeatures = features.length;
|
||||||
|
|
||||||
// Get auto-mode status for this project (main worktree, branchName = null)
|
// Get auto-mode status for this project (main worktree, branchName = null)
|
||||||
const autoModeStatus = autoModeService.getStatusForProject(projectRef.path, null);
|
const autoModeStatus: ProjectAutoModeStatus = await autoModeService.getStatusForProject(
|
||||||
|
projectRef.path,
|
||||||
|
null
|
||||||
|
);
|
||||||
const isAutoModeRunning = autoModeStatus.isAutoLoopRunning;
|
const isAutoModeRunning = autoModeStatus.isAutoLoopRunning;
|
||||||
|
|
||||||
// Count live running features for this project (across all branches)
|
// Count live running features for this project (across all branches)
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { Router } from 'express';
|
import { Router } from 'express';
|
||||||
import type { AutoModeService } from '../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js';
|
||||||
import { createIndexHandler } from './routes/index.js';
|
import { createIndexHandler } from './routes/index.js';
|
||||||
|
|
||||||
export function createRunningAgentsRoutes(autoModeService: AutoModeService): Router {
|
export function createRunningAgentsRoutes(autoModeService: AutoModeServiceCompat): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
router.get('/', createIndexHandler(autoModeService));
|
router.get('/', createIndexHandler(autoModeService));
|
||||||
|
|||||||
@@ -3,16 +3,17 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getBacklogPlanStatus, getRunningDetails } from '../../backlog-plan/common.js';
|
import { getBacklogPlanStatus, getRunningDetails } from '../../backlog-plan/common.js';
|
||||||
import { getAllRunningGenerations } from '../../app-spec/common.js';
|
import { getAllRunningGenerations } from '../../app-spec/common.js';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createIndexHandler(autoModeService: AutoModeService) {
|
export function createIndexHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const runningAgents = [...(await autoModeService.getRunningAgents())];
|
const runningAgents = [...(await autoModeService.getRunningAgents())];
|
||||||
|
|
||||||
const backlogPlanStatus = getBacklogPlanStatus();
|
const backlogPlanStatus = getBacklogPlanStatus();
|
||||||
const backlogPlanDetails = getRunningDetails();
|
const backlogPlanDetails = getRunningDetails();
|
||||||
|
|
||||||
|
|||||||
@@ -46,16 +46,14 @@ export function createUpdateGlobalHandler(settingsService: SettingsService) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Minimal debug logging to help diagnose accidental wipes.
|
// Minimal debug logging to help diagnose accidental wipes.
|
||||||
const projectsLen = Array.isArray((updates as any).projects)
|
const projectsLen = Array.isArray(updates.projects) ? updates.projects.length : undefined;
|
||||||
? (updates as any).projects.length
|
const trashedLen = Array.isArray(updates.trashedProjects)
|
||||||
: undefined;
|
? updates.trashedProjects.length
|
||||||
const trashedLen = Array.isArray((updates as any).trashedProjects)
|
|
||||||
? (updates as any).trashedProjects.length
|
|
||||||
: undefined;
|
: undefined;
|
||||||
logger.info(
|
logger.info(
|
||||||
`[SERVER_SETTINGS_UPDATE] Request received: projects=${projectsLen ?? 'n/a'}, trashedProjects=${trashedLen ?? 'n/a'}, theme=${
|
`[SERVER_SETTINGS_UPDATE] Request received: projects=${projectsLen ?? 'n/a'}, trashedProjects=${trashedLen ?? 'n/a'}, theme=${
|
||||||
(updates as any).theme ?? 'n/a'
|
updates.theme ?? 'n/a'
|
||||||
}, localStorageMigrated=${(updates as any).localStorageMigrated ?? 'n/a'}`
|
}, localStorageMigrated=${updates.localStorageMigrated ?? 'n/a'}`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Get old settings to detect theme changes
|
// Get old settings to detect theme changes
|
||||||
|
|||||||
@@ -4,13 +4,9 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
import { exec } from 'child_process';
|
|
||||||
import { promisify } from 'util';
|
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
|
|
||||||
const execAsync = promisify(exec);
|
|
||||||
|
|
||||||
export function createAuthClaudeHandler() {
|
export function createAuthClaudeHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -4,13 +4,9 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { logError, getErrorMessage } from '../common.js';
|
import { logError, getErrorMessage } from '../common.js';
|
||||||
import { exec } from 'child_process';
|
|
||||||
import { promisify } from 'util';
|
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
|
|
||||||
const execAsync = promisify(exec);
|
|
||||||
|
|
||||||
export function createAuthOpencodeHandler() {
|
export function createAuthOpencodeHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -10,9 +10,6 @@ import type { Request, Response } from 'express';
|
|||||||
import { CopilotProvider } from '../../../providers/copilot-provider.js';
|
import { CopilotProvider } from '../../../providers/copilot-provider.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
import type { ModelDefinition } from '@automaker/types';
|
import type { ModelDefinition } from '@automaker/types';
|
||||||
import { createLogger } from '@automaker/utils';
|
|
||||||
|
|
||||||
const logger = createLogger('CopilotModelsRoute');
|
|
||||||
|
|
||||||
// Singleton provider instance for caching
|
// Singleton provider instance for caching
|
||||||
let providerInstance: CopilotProvider | null = null;
|
let providerInstance: CopilotProvider | null = null;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user