mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-30 14:22:02 +00:00
Compare commits
123 Commits
feat/debug
...
v0.9.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f2707404c | ||
|
|
fdd3a28be7 | ||
|
|
21d275c984 | ||
|
|
cadb19d7ed | ||
|
|
a01241fabe | ||
|
|
89a0877bcc | ||
|
|
950a97d72b | ||
|
|
639c1de2e8 | ||
|
|
254e4f630c | ||
|
|
5d0fb08651 | ||
|
|
7ea64b32f3 | ||
|
|
93807c22c1 | ||
|
|
b2cf17b53b | ||
|
|
7e768b6290 | ||
|
|
7bdf5e4261 | ||
|
|
f6fed612df | ||
|
|
3b3e282df7 | ||
|
|
7f69f652fb | ||
|
|
1452232409 | ||
|
|
4f0f56a7ba | ||
|
|
a695d0db7b | ||
|
|
87c3d766c9 | ||
|
|
89248001e4 | ||
|
|
41b4869068 | ||
|
|
be88a07329 | ||
|
|
f6738ff26c | ||
|
|
ae22f781d8 | ||
|
|
e649c4ced5 | ||
|
|
50da1b401c | ||
|
|
33d02d1df8 | ||
|
|
f3041190fa | ||
|
|
55c2530d5a | ||
|
|
5fbc7dd13e | ||
|
|
b2e5ff1460 | ||
|
|
0f9232ea33 | ||
|
|
a815be6a20 | ||
|
|
7b4667eba9 | ||
|
|
08ccf2632a | ||
|
|
7583598a05 | ||
|
|
7e68691e92 | ||
|
|
4b2034b834 | ||
|
|
4dcf54146c | ||
|
|
8a9715adef | ||
|
|
d253d494ba | ||
|
|
d1f7794afa | ||
|
|
4d80a93710 | ||
|
|
d70faf3b28 | ||
|
|
271749a5a4 | ||
|
|
d1bd131cab | ||
|
|
96fe90ca65 | ||
|
|
fd5f7b873a | ||
|
|
959467de90 | ||
|
|
69434fe356 | ||
|
|
dc264bd164 | ||
|
|
8992f667c7 | ||
|
|
eb627ef323 | ||
|
|
d8cdb0bf7a | ||
|
|
8c68c24716 | ||
|
|
9b302583c4 | ||
|
|
47c2d795e0 | ||
|
|
d608d8c2d4 | ||
|
|
f737b1f30a | ||
|
|
8b36fce7d7 | ||
|
|
30a2a1c921 | ||
|
|
763f9832c3 | ||
|
|
11b1bbc143 | ||
|
|
7176d3e513 | ||
|
|
9c3ba34b51 | ||
|
|
ff3af937da | ||
|
|
b9fcb916a6 | ||
|
|
cfa1f114fd | ||
|
|
761929ea8e | ||
|
|
4d36e66deb | ||
|
|
e58e389658 | ||
|
|
821827f850 | ||
|
|
9d8464cceb | ||
|
|
48a4fa5c6c | ||
|
|
70c04b5a3f | ||
|
|
24ea10e818 | ||
|
|
927451013c | ||
|
|
0d206fe75f | ||
|
|
11accac5ae | ||
|
|
2250367ddc | ||
|
|
fe305bbc81 | ||
|
|
92195340c6 | ||
|
|
1316ead8c8 | ||
|
|
03b33106e0 | ||
|
|
251f0fd88e | ||
|
|
96f154d440 | ||
|
|
27c6d5a3bb | ||
|
|
a57dcc170d | ||
|
|
5c601ff200 | ||
|
|
4d4025ca06 | ||
|
|
77f253c7bd | ||
|
|
fe13d47b24 | ||
|
|
33acf502ed | ||
|
|
66557b2093 | ||
|
|
c713cef484 | ||
|
|
5f7cbd3435 | ||
|
|
a4290b5863 | ||
|
|
f9b0a38642 | ||
|
|
46636cf385 | ||
|
|
e24a6894a5 | ||
|
|
cf9289e21a | ||
|
|
fe7bc954ba | ||
|
|
236989bf6e | ||
|
|
8a6a83bf52 | ||
|
|
84b582ffa7 | ||
|
|
bd5176165d | ||
|
|
49f32c4d59 | ||
|
|
0af5bc86f4 | ||
|
|
bc5a36c5f4 | ||
|
|
2934d73db2 | ||
|
|
a4968f7235 | ||
|
|
b8e0c18c53 | ||
|
|
d0b3e0d9bb | ||
|
|
2a0719e00c | ||
|
|
af394183e6 | ||
|
|
5d675561ba | ||
|
|
27fb3f2777 | ||
|
|
aca84fe16a | ||
|
|
1117afc37a | ||
|
|
06c02de1cb |
3
.claude/.gitignore
vendored
3
.claude/.gitignore
vendored
@@ -1 +1,2 @@
|
||||
hans/
|
||||
hans/
|
||||
skills/
|
||||
@@ -1 +1,19 @@
|
||||
node_modules/
|
||||
# Dependencies
|
||||
node_modules/
|
||||
**/node_modules/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
**/dist/
|
||||
dist-electron/
|
||||
**/dist-electron/
|
||||
build/
|
||||
**/build/
|
||||
.next/
|
||||
**/.next/
|
||||
.nuxt/
|
||||
**/.nuxt/
|
||||
out/
|
||||
**/out/
|
||||
.cache/
|
||||
**/.cache/
|
||||
35
.github/workflows/e2e-tests.yml
vendored
35
.github/workflows/e2e-tests.yml
vendored
@@ -31,24 +31,43 @@ jobs:
|
||||
- name: Build server
|
||||
run: npm run build --workspace=apps/server
|
||||
|
||||
- name: Set up Git user
|
||||
run: |
|
||||
git config --global user.name "GitHub CI"
|
||||
git config --global user.email "ci@example.com"
|
||||
|
||||
- name: Start backend server
|
||||
run: npm run start --workspace=apps/server &
|
||||
env:
|
||||
PORT: 3008
|
||||
NODE_ENV: test
|
||||
# Use a deterministic API key so Playwright can log in reliably
|
||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||
# Reduce log noise in CI
|
||||
AUTOMAKER_HIDE_API_KEY: 'true'
|
||||
# Avoid real API calls during CI
|
||||
AUTOMAKER_MOCK_AGENT: 'true'
|
||||
# Simulate containerized environment to skip sandbox confirmation dialogs
|
||||
IS_CONTAINERIZED: 'true'
|
||||
|
||||
- name: Wait for backend server
|
||||
run: |
|
||||
echo "Waiting for backend server to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -s http://localhost:3008/api/health > /dev/null 2>&1; then
|
||||
for i in {1..60}; do
|
||||
if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
|
||||
echo "Backend server is ready!"
|
||||
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check response: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
||||
exit 0
|
||||
fi
|
||||
echo "Waiting... ($i/30)"
|
||||
echo "Waiting... ($i/60)"
|
||||
sleep 1
|
||||
done
|
||||
echo "Backend server failed to start!"
|
||||
echo "Checking server status..."
|
||||
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||
echo "Testing health endpoint..."
|
||||
curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
|
||||
exit 1
|
||||
|
||||
- name: Run E2E tests
|
||||
@@ -59,6 +78,8 @@ jobs:
|
||||
CI: true
|
||||
VITE_SERVER_URL: http://localhost:3008
|
||||
VITE_SKIP_SETUP: 'true'
|
||||
# Keep UI-side login/defaults consistent
|
||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||
|
||||
- name: Upload Playwright report
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -68,10 +89,12 @@ jobs:
|
||||
path: apps/ui/playwright-report/
|
||||
retention-days: 7
|
||||
|
||||
- name: Upload test results
|
||||
- name: Upload test results (screenshots, traces, videos)
|
||||
uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
if: always()
|
||||
with:
|
||||
name: test-results
|
||||
path: apps/ui/test-results/
|
||||
path: |
|
||||
apps/ui/test-results/
|
||||
retention-days: 7
|
||||
if-no-files-found: ignore
|
||||
|
||||
2
.github/workflows/security-audit.yml
vendored
2
.github/workflows/security-audit.yml
vendored
@@ -26,5 +26,5 @@ jobs:
|
||||
check-lockfile: 'true'
|
||||
|
||||
- name: Run npm audit
|
||||
run: npm audit --audit-level=moderate
|
||||
run: npm audit --audit-level=critical
|
||||
continue-on-error: false
|
||||
|
||||
89
Dockerfile
89
Dockerfile
@@ -8,10 +8,12 @@
|
||||
# =============================================================================
|
||||
# BASE STAGE - Common setup for all builds (DRY: defined once, used by all)
|
||||
# =============================================================================
|
||||
FROM node:22-alpine AS base
|
||||
FROM node:22-slim AS base
|
||||
|
||||
# Install build dependencies for native modules (node-pty)
|
||||
RUN apk add --no-cache python3 make g++
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 make g++ \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -51,30 +53,63 @@ RUN npm run build:packages && npm run build --workspace=apps/server
|
||||
# =============================================================================
|
||||
# SERVER PRODUCTION STAGE
|
||||
# =============================================================================
|
||||
FROM node:22-alpine AS server
|
||||
FROM node:22-slim AS server
|
||||
|
||||
# Install git, curl, bash (for terminal), and GitHub CLI (pinned version, multi-arch)
|
||||
RUN apk add --no-cache git curl bash && \
|
||||
GH_VERSION="2.63.2" && \
|
||||
ARCH=$(uname -m) && \
|
||||
case "$ARCH" in \
|
||||
# Build argument for tracking which commit this image was built from
|
||||
ARG GIT_COMMIT_SHA=unknown
|
||||
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
|
||||
|
||||
# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git curl bash gosu ca-certificates openssh-client \
|
||||
&& GH_VERSION="2.63.2" \
|
||||
&& ARCH=$(uname -m) \
|
||||
&& case "$ARCH" in \
|
||||
x86_64) GH_ARCH="amd64" ;; \
|
||||
aarch64|arm64) GH_ARCH="arm64" ;; \
|
||||
*) echo "Unsupported architecture: $ARCH" && exit 1 ;; \
|
||||
esac && \
|
||||
curl -L "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${GH_ARCH}.tar.gz" -o gh.tar.gz && \
|
||||
tar -xzf gh.tar.gz && \
|
||||
mv gh_${GH_VERSION}_linux_${GH_ARCH}/bin/gh /usr/local/bin/gh && \
|
||||
rm -rf gh.tar.gz gh_${GH_VERSION}_linux_${GH_ARCH}
|
||||
esac \
|
||||
&& curl -L "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${GH_ARCH}.tar.gz" -o gh.tar.gz \
|
||||
&& tar -xzf gh.tar.gz \
|
||||
&& mv gh_${GH_VERSION}_linux_${GH_ARCH}/bin/gh /usr/local/bin/gh \
|
||||
&& rm -rf gh.tar.gz gh_${GH_VERSION}_linux_${GH_ARCH} \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Claude CLI globally
|
||||
# Install Claude CLI globally (available to all users via npm global bin)
|
||||
RUN npm install -g @anthropic-ai/claude-code
|
||||
|
||||
WORKDIR /app
|
||||
# Create non-root user with home directory BEFORE installing Cursor CLI
|
||||
RUN groupadd -g 1001 automaker && \
|
||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||
mkdir -p /home/automaker/.local/bin && \
|
||||
mkdir -p /home/automaker/.cursor && \
|
||||
chown -R automaker:automaker /home/automaker && \
|
||||
chmod 700 /home/automaker/.cursor
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1001 -S automaker && \
|
||||
adduser -S automaker -u 1001
|
||||
# Install Cursor CLI as the automaker user
|
||||
# Set HOME explicitly and install to /home/automaker/.local/bin/
|
||||
USER automaker
|
||||
ENV HOME=/home/automaker
|
||||
RUN curl https://cursor.com/install -fsS | bash && \
|
||||
echo "=== Checking Cursor CLI installation ===" && \
|
||||
ls -la /home/automaker/.local/bin/ && \
|
||||
echo "=== PATH is: $PATH ===" && \
|
||||
(which cursor-agent && cursor-agent --version) || echo "cursor-agent installed (may need auth setup)"
|
||||
USER root
|
||||
|
||||
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
||||
RUN mkdir -p /etc/profile.d && \
|
||||
echo 'export PATH="/home/automaker/.local/bin:$PATH"' > /etc/profile.d/cursor-cli.sh && \
|
||||
chmod +x /etc/profile.d/cursor-cli.sh
|
||||
|
||||
# Add to automaker's .bashrc for bash interactive shells
|
||||
RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /home/automaker/.bashrc && \
|
||||
chown automaker:automaker /home/automaker/.bashrc
|
||||
|
||||
# Also add to root's .bashrc since docker exec defaults to root
|
||||
RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /root/.bashrc
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy root package.json (needed for workspace resolution)
|
||||
COPY --from=server-builder /app/package*.json ./
|
||||
@@ -98,12 +133,19 @@ RUN git config --system --add safe.directory '*' && \
|
||||
# Use gh as credential helper (works with GH_TOKEN env var)
|
||||
git config --system credential.helper '!gh auth git-credential'
|
||||
|
||||
# Switch to non-root user
|
||||
USER automaker
|
||||
# Copy entrypoint script for fixing permissions on mounted volumes
|
||||
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
# Note: We stay as root here so entrypoint can fix permissions
|
||||
# The entrypoint script will switch to automaker user before running the command
|
||||
|
||||
# Environment variables
|
||||
ENV PORT=3008
|
||||
ENV DATA_DIR=/data
|
||||
ENV HOME=/home/automaker
|
||||
# Add user's local bin to PATH for cursor-agent
|
||||
ENV PATH="/home/automaker/.local/bin:${PATH}"
|
||||
|
||||
# Expose port
|
||||
EXPOSE 3008
|
||||
@@ -112,6 +154,9 @@ EXPOSE 3008
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:3008/api/health || exit 1
|
||||
|
||||
# Use entrypoint to fix permissions before starting
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
|
||||
# Start server
|
||||
CMD ["node", "apps/server/dist/index.js"]
|
||||
|
||||
@@ -143,6 +188,10 @@ RUN npm run build:packages && npm run build --workspace=apps/ui
|
||||
# =============================================================================
|
||||
FROM nginx:alpine AS ui
|
||||
|
||||
# Build argument for tracking which commit this image was built from
|
||||
ARG GIT_COMMIT_SHA=unknown
|
||||
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
|
||||
|
||||
# Copy built files
|
||||
COPY --from=ui-builder /app/apps/ui/dist /usr/share/nginx/html
|
||||
|
||||
|
||||
80
Dockerfile.dev
Normal file
80
Dockerfile.dev
Normal file
@@ -0,0 +1,80 @@
|
||||
# Automaker Development Dockerfile
|
||||
# For development with live reload via volume mounting
|
||||
# Source code is NOT copied - it's mounted as a volume
|
||||
#
|
||||
# Usage:
|
||||
# docker compose -f docker-compose.dev.yml up
|
||||
|
||||
FROM node:22-slim
|
||||
|
||||
# Install build dependencies for native modules (node-pty) and runtime tools
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 make g++ \
|
||||
git curl bash gosu ca-certificates openssh-client \
|
||||
&& GH_VERSION="2.63.2" \
|
||||
&& ARCH=$(uname -m) \
|
||||
&& case "$ARCH" in \
|
||||
x86_64) GH_ARCH="amd64" ;; \
|
||||
aarch64|arm64) GH_ARCH="arm64" ;; \
|
||||
*) echo "Unsupported architecture: $ARCH" && exit 1 ;; \
|
||||
esac \
|
||||
&& curl -L "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${GH_ARCH}.tar.gz" -o gh.tar.gz \
|
||||
&& tar -xzf gh.tar.gz \
|
||||
&& mv gh_${GH_VERSION}_linux_${GH_ARCH}/bin/gh /usr/local/bin/gh \
|
||||
&& rm -rf gh.tar.gz gh_${GH_VERSION}_linux_${GH_ARCH} \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Claude CLI globally
|
||||
RUN npm install -g @anthropic-ai/claude-code
|
||||
|
||||
# Create non-root user
|
||||
RUN groupadd -g 1001 automaker && \
|
||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||
mkdir -p /home/automaker/.local/bin && \
|
||||
mkdir -p /home/automaker/.cursor && \
|
||||
chown -R automaker:automaker /home/automaker && \
|
||||
chmod 700 /home/automaker/.cursor
|
||||
|
||||
# Install Cursor CLI as automaker user
|
||||
USER automaker
|
||||
ENV HOME=/home/automaker
|
||||
RUN curl https://cursor.com/install -fsS | bash || true
|
||||
USER root
|
||||
|
||||
# Add PATH to profile for Cursor CLI
|
||||
RUN mkdir -p /etc/profile.d && \
|
||||
echo 'export PATH="/home/automaker/.local/bin:$PATH"' > /etc/profile.d/cursor-cli.sh && \
|
||||
chmod +x /etc/profile.d/cursor-cli.sh
|
||||
|
||||
# Add to user bashrc files
|
||||
RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /home/automaker/.bashrc && \
|
||||
chown automaker:automaker /home/automaker/.bashrc
|
||||
RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /root/.bashrc
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create directories with proper permissions
|
||||
RUN mkdir -p /data /projects && chown automaker:automaker /data /projects
|
||||
|
||||
# Configure git for mounted volumes
|
||||
RUN git config --system --add safe.directory '*' && \
|
||||
git config --system credential.helper '!gh auth git-credential'
|
||||
|
||||
# Copy entrypoint script
|
||||
COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
# Environment variables
|
||||
ENV PORT=3008
|
||||
ENV DATA_DIR=/data
|
||||
ENV HOME=/home/automaker
|
||||
ENV PATH="/home/automaker/.local/bin:${PATH}"
|
||||
|
||||
# Expose both dev ports
|
||||
EXPOSE 3007 3008
|
||||
|
||||
# Use entrypoint for permission handling
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
|
||||
# Default command - will be overridden by docker-compose
|
||||
CMD ["npm", "run", "dev:web"]
|
||||
19
README.md
19
README.md
@@ -117,24 +117,16 @@ cd automaker
|
||||
# 2. Install dependencies
|
||||
npm install
|
||||
|
||||
# 3. Build shared packages (Now can be skipped npm install / run dev does it automaticly)
|
||||
# 3. Build shared packages (can be skipped - npm run dev does it automatically)
|
||||
npm run build:packages
|
||||
|
||||
# 4. Start Automaker (production mode)
|
||||
npm run start
|
||||
# 4. Start Automaker
|
||||
npm run dev
|
||||
# Choose between:
|
||||
# 1. Web Application (browser at localhost:3007)
|
||||
# 2. Desktop Application (Electron - recommended)
|
||||
```
|
||||
|
||||
**Note:** The `npm run start` command will:
|
||||
|
||||
- Check for dependencies and install if needed
|
||||
- Build the application if needed
|
||||
- Kill any processes on ports 3007/3008
|
||||
- Present an interactive menu to choose your run mode
|
||||
- Run in production mode (no hot reload)
|
||||
|
||||
**Authentication Setup:** On first run, Automaker will automatically show a setup wizard where you can configure authentication. You can choose to:
|
||||
|
||||
- Use **Claude Code CLI** (recommended) - Automaker will detect your CLI credentials automatically
|
||||
@@ -150,7 +142,7 @@ export ANTHROPIC_API_KEY="sk-ant-..."
|
||||
echo "ANTHROPIC_API_KEY=sk-ant-..." > .env
|
||||
```
|
||||
|
||||
**For Development:** If you want to develop on Automaker with Vite live reload and hot module replacement, use `npm run dev` instead. This will start the development server with fast refresh and instant updates as you make changes.
|
||||
**For Development:** `npm run dev` starts the development server with Vite live reload and hot module replacement for fast refresh and instant updates as you make changes.
|
||||
|
||||
## How to Run
|
||||
|
||||
@@ -194,9 +186,6 @@ npm run dev:web
|
||||
```bash
|
||||
# Build for web deployment (uses Vite)
|
||||
npm run build
|
||||
|
||||
# Run production build
|
||||
npm run start
|
||||
```
|
||||
|
||||
#### Desktop Application
|
||||
|
||||
@@ -8,6 +8,20 @@
|
||||
# Your Anthropic API key for Claude models
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
|
||||
# ============================================
|
||||
# OPTIONAL - Additional API Keys
|
||||
# ============================================
|
||||
|
||||
# OpenAI API key for Codex/GPT models
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Cursor API key for Cursor models
|
||||
CURSOR_API_KEY=...
|
||||
|
||||
# OAuth credentials for CLI authentication (extracted automatically)
|
||||
CLAUDE_OAUTH_CREDENTIALS=
|
||||
CURSOR_AUTH_TOKEN=
|
||||
|
||||
# ============================================
|
||||
# OPTIONAL - Security
|
||||
# ============================================
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@automaker/server",
|
||||
"version": "0.7.3",
|
||||
"version": "0.9.0",
|
||||
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
||||
"author": "AutoMaker Team",
|
||||
"license": "SEE LICENSE IN LICENSE",
|
||||
@@ -33,6 +33,7 @@
|
||||
"@automaker/types": "1.0.0",
|
||||
"@automaker/utils": "1.0.0",
|
||||
"@modelcontextprotocol/sdk": "1.25.1",
|
||||
"@openai/codex-sdk": "^0.77.0",
|
||||
"cookie-parser": "1.4.7",
|
||||
"cors": "2.8.5",
|
||||
"dotenv": "17.2.3",
|
||||
|
||||
@@ -53,6 +53,8 @@ import { SettingsService } from './services/settings-service.js';
|
||||
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
|
||||
import { createClaudeRoutes } from './routes/claude/index.js';
|
||||
import { ClaudeUsageService } from './services/claude-usage-service.js';
|
||||
import { createCodexRoutes } from './routes/codex/index.js';
|
||||
import { CodexUsageService } from './services/codex-usage-service.js';
|
||||
import { createGitHubRoutes } from './routes/github/index.js';
|
||||
import { createContextRoutes } from './routes/context/index.js';
|
||||
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
||||
@@ -166,6 +168,7 @@ const agentService = new AgentService(DATA_DIR, events, settingsService);
|
||||
const featureLoader = new FeatureLoader();
|
||||
const autoModeService = new AutoModeService(events, settingsService);
|
||||
const claudeUsageService = new ClaudeUsageService();
|
||||
const codexUsageService = new CodexUsageService();
|
||||
const mcpTestService = new MCPTestService(settingsService);
|
||||
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
||||
|
||||
@@ -188,9 +191,10 @@ setInterval(() => {
|
||||
// This helps prevent CSRF and content-type confusion attacks
|
||||
app.use('/api', requireJsonContentType);
|
||||
|
||||
// Mount API routes - health and auth are unauthenticated
|
||||
// Mount API routes - health, auth, and setup are unauthenticated
|
||||
app.use('/api/health', createHealthRoutes());
|
||||
app.use('/api/auth', createAuthRoutes());
|
||||
app.use('/api/setup', createSetupRoutes());
|
||||
|
||||
// Apply authentication to all other routes
|
||||
app.use('/api', authMiddleware);
|
||||
@@ -206,7 +210,6 @@ app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
|
||||
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
|
||||
app.use('/api/worktree', createWorktreeRoutes());
|
||||
app.use('/api/git', createGitRoutes());
|
||||
app.use('/api/setup', createSetupRoutes());
|
||||
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
|
||||
app.use('/api/models', createModelsRoutes());
|
||||
app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events, settingsService));
|
||||
@@ -216,6 +219,7 @@ app.use('/api/templates', createTemplatesRoutes());
|
||||
app.use('/api/terminal', createTerminalRoutes());
|
||||
app.use('/api/settings', createSettingsRoutes(settingsService));
|
||||
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
||||
app.use('/api/codex', createCodexRoutes(codexUsageService));
|
||||
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
||||
app.use('/api/context', createContextRoutes(settingsService));
|
||||
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
||||
|
||||
257
apps/server/src/lib/agent-discovery.ts
Normal file
257
apps/server/src/lib/agent-discovery.ts
Normal file
@@ -0,0 +1,257 @@
|
||||
/**
|
||||
* Agent Discovery - Scans filesystem for AGENT.md files
|
||||
*
|
||||
* Discovers agents from:
|
||||
* - ~/.claude/agents/ (user-level, global)
|
||||
* - .claude/agents/ (project-level)
|
||||
*
|
||||
* Similar to Skills, but for custom subagents defined in AGENT.md files.
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import os from 'os';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { secureFs, systemPaths } from '@automaker/platform';
|
||||
import type { AgentDefinition } from '@automaker/types';
|
||||
|
||||
const logger = createLogger('AgentDiscovery');
|
||||
|
||||
export interface FilesystemAgent {
|
||||
name: string; // Directory name (e.g., 'code-reviewer')
|
||||
definition: AgentDefinition;
|
||||
source: 'user' | 'project';
|
||||
filePath: string; // Full path to AGENT.md
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse agent content string into AgentDefinition
|
||||
* Format:
|
||||
* ---
|
||||
* name: agent-name # Optional
|
||||
* description: When to use this agent
|
||||
* tools: tool1, tool2, tool3 # Optional (comma or space separated list)
|
||||
* model: sonnet # Optional: sonnet, opus, haiku
|
||||
* ---
|
||||
* System prompt content here...
|
||||
*/
|
||||
function parseAgentContent(content: string, filePath: string): AgentDefinition | null {
|
||||
// Extract frontmatter
|
||||
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---\n([\s\S]*)$/);
|
||||
if (!frontmatterMatch) {
|
||||
logger.warn(`Invalid agent file format (missing frontmatter): ${filePath}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const [, frontmatter, prompt] = frontmatterMatch;
|
||||
|
||||
// Parse description (required)
|
||||
const description = frontmatter.match(/description:\s*(.+)/)?.[1]?.trim();
|
||||
if (!description) {
|
||||
logger.warn(`Missing description in agent file: ${filePath}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Parse tools (optional) - supports both comma-separated and space-separated
|
||||
const toolsMatch = frontmatter.match(/tools:\s*(.+)/);
|
||||
const tools = toolsMatch
|
||||
? toolsMatch[1]
|
||||
.split(/[,\s]+/) // Split by comma or whitespace
|
||||
.map((t) => t.trim())
|
||||
.filter((t) => t && t !== '')
|
||||
: undefined;
|
||||
|
||||
// Parse model (optional) - validate against allowed values
|
||||
const modelMatch = frontmatter.match(/model:\s*(\w+)/);
|
||||
const modelValue = modelMatch?.[1]?.trim();
|
||||
const validModels = ['sonnet', 'opus', 'haiku', 'inherit'] as const;
|
||||
const model =
|
||||
modelValue && validModels.includes(modelValue as (typeof validModels)[number])
|
||||
? (modelValue as 'sonnet' | 'opus' | 'haiku' | 'inherit')
|
||||
: undefined;
|
||||
|
||||
if (modelValue && !model) {
|
||||
logger.warn(
|
||||
`Invalid model "${modelValue}" in agent file: ${filePath}. Expected one of: ${validModels.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
description,
|
||||
prompt: prompt.trim(),
|
||||
tools,
|
||||
model,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Directory entry with type information
|
||||
*/
|
||||
interface DirEntry {
|
||||
name: string;
|
||||
isFile: boolean;
|
||||
isDirectory: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filesystem adapter interface for abstracting systemPaths vs secureFs
|
||||
*/
|
||||
interface FsAdapter {
|
||||
exists: (filePath: string) => Promise<boolean>;
|
||||
readdir: (dirPath: string) => Promise<DirEntry[]>;
|
||||
readFile: (filePath: string) => Promise<string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a filesystem adapter for system paths (user directory)
|
||||
*/
|
||||
function createSystemPathAdapter(): FsAdapter {
|
||||
return {
|
||||
exists: (filePath) => Promise.resolve(systemPaths.systemPathExists(filePath)),
|
||||
readdir: async (dirPath) => {
|
||||
const entryNames = await systemPaths.systemPathReaddir(dirPath);
|
||||
const entries: DirEntry[] = [];
|
||||
for (const name of entryNames) {
|
||||
const stat = await systemPaths.systemPathStat(path.join(dirPath, name));
|
||||
entries.push({
|
||||
name,
|
||||
isFile: stat.isFile(),
|
||||
isDirectory: stat.isDirectory(),
|
||||
});
|
||||
}
|
||||
return entries;
|
||||
},
|
||||
readFile: (filePath) => systemPaths.systemPathReadFile(filePath, 'utf-8') as Promise<string>,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a filesystem adapter for project paths (secureFs)
|
||||
*/
|
||||
function createSecureFsAdapter(): FsAdapter {
|
||||
return {
|
||||
exists: (filePath) =>
|
||||
secureFs
|
||||
.access(filePath)
|
||||
.then(() => true)
|
||||
.catch(() => false),
|
||||
readdir: async (dirPath) => {
|
||||
const entries = await secureFs.readdir(dirPath, { withFileTypes: true });
|
||||
return entries.map((entry) => ({
|
||||
name: entry.name,
|
||||
isFile: entry.isFile(),
|
||||
isDirectory: entry.isDirectory(),
|
||||
}));
|
||||
},
|
||||
readFile: (filePath) => secureFs.readFile(filePath, 'utf-8') as Promise<string>,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse agent file using the provided filesystem adapter
|
||||
*/
|
||||
async function parseAgentFileWithAdapter(
|
||||
filePath: string,
|
||||
fsAdapter: FsAdapter
|
||||
): Promise<AgentDefinition | null> {
|
||||
try {
|
||||
const content = await fsAdapter.readFile(filePath);
|
||||
return parseAgentContent(content, filePath);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to parse agent file: ${filePath}`, error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan a directory for agent .md files
|
||||
* Agents can be in two formats:
|
||||
* 1. Flat: agent-name.md (file directly in agents/)
|
||||
* 2. Subdirectory: agent-name/AGENT.md (folder + file, similar to Skills)
|
||||
*/
|
||||
async function scanAgentsDirectory(
|
||||
baseDir: string,
|
||||
source: 'user' | 'project'
|
||||
): Promise<FilesystemAgent[]> {
|
||||
const agents: FilesystemAgent[] = [];
|
||||
const fsAdapter = source === 'user' ? createSystemPathAdapter() : createSecureFsAdapter();
|
||||
|
||||
try {
|
||||
// Check if directory exists
|
||||
const exists = await fsAdapter.exists(baseDir);
|
||||
if (!exists) {
|
||||
logger.debug(`Directory does not exist: ${baseDir}`);
|
||||
return agents;
|
||||
}
|
||||
|
||||
// Read all entries in the directory
|
||||
const entries = await fsAdapter.readdir(baseDir);
|
||||
|
||||
for (const entry of entries) {
|
||||
// Check for flat .md file format (agent-name.md)
|
||||
if (entry.isFile && entry.name.endsWith('.md')) {
|
||||
const agentName = entry.name.slice(0, -3); // Remove .md extension
|
||||
const agentFilePath = path.join(baseDir, entry.name);
|
||||
const definition = await parseAgentFileWithAdapter(agentFilePath, fsAdapter);
|
||||
if (definition) {
|
||||
agents.push({
|
||||
name: agentName,
|
||||
definition,
|
||||
source,
|
||||
filePath: agentFilePath,
|
||||
});
|
||||
logger.debug(`Discovered ${source} agent (flat): ${agentName}`);
|
||||
}
|
||||
}
|
||||
// Check for subdirectory format (agent-name/AGENT.md)
|
||||
else if (entry.isDirectory) {
|
||||
const agentFilePath = path.join(baseDir, entry.name, 'AGENT.md');
|
||||
const agentFileExists = await fsAdapter.exists(agentFilePath);
|
||||
|
||||
if (agentFileExists) {
|
||||
const definition = await parseAgentFileWithAdapter(agentFilePath, fsAdapter);
|
||||
if (definition) {
|
||||
agents.push({
|
||||
name: entry.name,
|
||||
definition,
|
||||
source,
|
||||
filePath: agentFilePath,
|
||||
});
|
||||
logger.debug(`Discovered ${source} agent (subdirectory): ${entry.name}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to scan agents directory: ${baseDir}`, error);
|
||||
}
|
||||
|
||||
return agents;
|
||||
}
|
||||
|
||||
/**
|
||||
* Discover all filesystem-based agents from user and project sources
|
||||
*/
|
||||
export async function discoverFilesystemAgents(
|
||||
projectPath?: string,
|
||||
sources: Array<'user' | 'project'> = ['user', 'project']
|
||||
): Promise<FilesystemAgent[]> {
|
||||
const agents: FilesystemAgent[] = [];
|
||||
|
||||
// Discover user-level agents from ~/.claude/agents/
|
||||
if (sources.includes('user')) {
|
||||
const userAgentsDir = path.join(os.homedir(), '.claude', 'agents');
|
||||
const userAgents = await scanAgentsDirectory(userAgentsDir, 'user');
|
||||
agents.push(...userAgents);
|
||||
logger.info(`Discovered ${userAgents.length} user-level agents from ${userAgentsDir}`);
|
||||
}
|
||||
|
||||
// Discover project-level agents from .claude/agents/
|
||||
if (sources.includes('project') && projectPath) {
|
||||
const projectAgentsDir = path.join(projectPath, '.claude', 'agents');
|
||||
const projectAgents = await scanAgentsDirectory(projectAgentsDir, 'project');
|
||||
agents.push(...projectAgents);
|
||||
logger.info(`Discovered ${projectAgents.length} project-level agents from ${projectAgentsDir}`);
|
||||
}
|
||||
|
||||
return agents;
|
||||
}
|
||||
263
apps/server/src/lib/auth-utils.ts
Normal file
263
apps/server/src/lib/auth-utils.ts
Normal file
@@ -0,0 +1,263 @@
|
||||
/**
|
||||
* Secure authentication utilities that avoid environment variable race conditions
|
||||
*/
|
||||
|
||||
import { spawn } from 'child_process';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('AuthUtils');
|
||||
|
||||
export interface SecureAuthEnv {
|
||||
[key: string]: string | undefined;
|
||||
}
|
||||
|
||||
export interface AuthValidationResult {
|
||||
isValid: boolean;
|
||||
error?: string;
|
||||
normalizedKey?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates API key format without modifying process.env
|
||||
*/
|
||||
export function validateApiKey(
|
||||
key: string,
|
||||
provider: 'anthropic' | 'openai' | 'cursor'
|
||||
): AuthValidationResult {
|
||||
if (!key || typeof key !== 'string' || key.trim().length === 0) {
|
||||
return { isValid: false, error: 'API key is required' };
|
||||
}
|
||||
|
||||
const trimmedKey = key.trim();
|
||||
|
||||
switch (provider) {
|
||||
case 'anthropic':
|
||||
if (!trimmedKey.startsWith('sk-ant-')) {
|
||||
return {
|
||||
isValid: false,
|
||||
error: 'Invalid Anthropic API key format. Should start with "sk-ant-"',
|
||||
};
|
||||
}
|
||||
if (trimmedKey.length < 20) {
|
||||
return { isValid: false, error: 'Anthropic API key too short' };
|
||||
}
|
||||
break;
|
||||
|
||||
case 'openai':
|
||||
if (!trimmedKey.startsWith('sk-')) {
|
||||
return { isValid: false, error: 'Invalid OpenAI API key format. Should start with "sk-"' };
|
||||
}
|
||||
if (trimmedKey.length < 20) {
|
||||
return { isValid: false, error: 'OpenAI API key too short' };
|
||||
}
|
||||
break;
|
||||
|
||||
case 'cursor':
|
||||
// Cursor API keys might have different format
|
||||
if (trimmedKey.length < 10) {
|
||||
return { isValid: false, error: 'Cursor API key too short' };
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return { isValid: true, normalizedKey: trimmedKey };
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a secure environment object for authentication testing
|
||||
* without modifying the global process.env
|
||||
*/
|
||||
export function createSecureAuthEnv(
|
||||
authMethod: 'cli' | 'api_key',
|
||||
apiKey?: string,
|
||||
provider: 'anthropic' | 'openai' | 'cursor' = 'anthropic'
|
||||
): SecureAuthEnv {
|
||||
const env: SecureAuthEnv = { ...process.env };
|
||||
|
||||
if (authMethod === 'cli') {
|
||||
// For CLI auth, remove the API key to force CLI authentication
|
||||
const envKey = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
|
||||
delete env[envKey];
|
||||
} else if (authMethod === 'api_key' && apiKey) {
|
||||
// For API key auth, validate and set the provided key
|
||||
const validation = validateApiKey(apiKey, provider);
|
||||
if (!validation.isValid) {
|
||||
throw new Error(validation.error);
|
||||
}
|
||||
const envKey = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
|
||||
env[envKey] = validation.normalizedKey;
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a temporary environment override for the current process
|
||||
* WARNING: This should only be used in isolated contexts and immediately cleaned up
|
||||
*/
|
||||
export function createTempEnvOverride(authEnv: SecureAuthEnv): () => void {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
// Apply the auth environment
|
||||
Object.assign(process.env, authEnv);
|
||||
|
||||
// Return cleanup function
|
||||
return () => {
|
||||
// Restore original environment
|
||||
Object.keys(process.env).forEach((key) => {
|
||||
if (!(key in originalEnv)) {
|
||||
delete process.env[key];
|
||||
}
|
||||
});
|
||||
Object.assign(process.env, originalEnv);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Spawns a process with secure environment isolation
|
||||
*/
|
||||
export function spawnSecureAuth(
|
||||
command: string,
|
||||
args: string[],
|
||||
authEnv: SecureAuthEnv,
|
||||
options: {
|
||||
cwd?: string;
|
||||
timeout?: number;
|
||||
} = {}
|
||||
): Promise<{ stdout: string; stderr: string; exitCode: number | null }> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const { cwd = process.cwd(), timeout = 30000 } = options;
|
||||
|
||||
logger.debug(`Spawning secure auth process: ${command} ${args.join(' ')}`);
|
||||
|
||||
const child = spawn(command, args, {
|
||||
cwd,
|
||||
env: authEnv,
|
||||
stdio: 'pipe',
|
||||
shell: false,
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
let isResolved = false;
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
if (!isResolved) {
|
||||
child.kill('SIGTERM');
|
||||
isResolved = true;
|
||||
reject(new Error('Authentication process timed out'));
|
||||
}
|
||||
}, timeout);
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
clearTimeout(timeoutId);
|
||||
if (!isResolved) {
|
||||
isResolved = true;
|
||||
resolve({ stdout, stderr, exitCode: code });
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', (error) => {
|
||||
clearTimeout(timeoutId);
|
||||
if (!isResolved) {
|
||||
isResolved = true;
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Safely extracts environment variable without race conditions
|
||||
*/
|
||||
export function safeGetEnv(key: string): string | undefined {
|
||||
return process.env[key];
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if an environment variable would be modified without actually modifying it
|
||||
*/
|
||||
export function wouldModifyEnv(key: string, newValue: string): boolean {
|
||||
const currentValue = safeGetEnv(key);
|
||||
return currentValue !== newValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Secure auth session management
|
||||
*/
|
||||
export class AuthSessionManager {
|
||||
private static activeSessions = new Map<string, SecureAuthEnv>();
|
||||
|
||||
static createSession(
|
||||
sessionId: string,
|
||||
authMethod: 'cli' | 'api_key',
|
||||
apiKey?: string,
|
||||
provider: 'anthropic' | 'openai' | 'cursor' = 'anthropic'
|
||||
): SecureAuthEnv {
|
||||
const env = createSecureAuthEnv(authMethod, apiKey, provider);
|
||||
this.activeSessions.set(sessionId, env);
|
||||
return env;
|
||||
}
|
||||
|
||||
static getSession(sessionId: string): SecureAuthEnv | undefined {
|
||||
return this.activeSessions.get(sessionId);
|
||||
}
|
||||
|
||||
static destroySession(sessionId: string): void {
|
||||
this.activeSessions.delete(sessionId);
|
||||
}
|
||||
|
||||
static cleanup(): void {
|
||||
this.activeSessions.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rate limiting for auth attempts to prevent abuse
|
||||
*/
|
||||
export class AuthRateLimiter {
|
||||
private attempts = new Map<string, { count: number; lastAttempt: number }>();
|
||||
|
||||
constructor(
|
||||
private maxAttempts = 5,
|
||||
private windowMs = 60000
|
||||
) {}
|
||||
|
||||
canAttempt(identifier: string): boolean {
|
||||
const now = Date.now();
|
||||
const record = this.attempts.get(identifier);
|
||||
|
||||
if (!record || now - record.lastAttempt > this.windowMs) {
|
||||
this.attempts.set(identifier, { count: 1, lastAttempt: now });
|
||||
return true;
|
||||
}
|
||||
|
||||
if (record.count >= this.maxAttempts) {
|
||||
return false;
|
||||
}
|
||||
|
||||
record.count++;
|
||||
record.lastAttempt = now;
|
||||
return true;
|
||||
}
|
||||
|
||||
getRemainingAttempts(identifier: string): number {
|
||||
const record = this.attempts.get(identifier);
|
||||
if (!record) return this.maxAttempts;
|
||||
return Math.max(0, this.maxAttempts - record.count);
|
||||
}
|
||||
|
||||
getResetTime(identifier: string): Date | null {
|
||||
const record = this.attempts.get(identifier);
|
||||
if (!record) return null;
|
||||
return new Date(record.lastAttempt + this.windowMs);
|
||||
}
|
||||
}
|
||||
@@ -262,7 +262,7 @@ export function getSessionCookieOptions(): {
|
||||
return {
|
||||
httpOnly: true, // JavaScript cannot access this cookie
|
||||
secure: process.env.NODE_ENV === 'production', // HTTPS only in production
|
||||
sameSite: 'strict', // Only sent for same-site requests (CSRF protection)
|
||||
sameSite: 'lax', // Sent for same-site requests and top-level navigations, but not cross-origin fetch/XHR
|
||||
maxAge: SESSION_MAX_AGE_MS,
|
||||
path: '/',
|
||||
};
|
||||
|
||||
447
apps/server/src/lib/cli-detection.ts
Normal file
447
apps/server/src/lib/cli-detection.ts
Normal file
@@ -0,0 +1,447 @@
|
||||
/**
|
||||
* Unified CLI Detection Framework
|
||||
*
|
||||
* Provides consistent CLI detection and management across all providers
|
||||
*/
|
||||
|
||||
import { spawn, execSync } from 'child_process';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('CliDetection');
|
||||
|
||||
export interface CliInfo {
|
||||
name: string;
|
||||
command: string;
|
||||
version?: string;
|
||||
path?: string;
|
||||
installed: boolean;
|
||||
authenticated: boolean;
|
||||
authMethod: 'cli' | 'api_key' | 'none';
|
||||
platform?: string;
|
||||
architectures?: string[];
|
||||
}
|
||||
|
||||
export interface CliDetectionOptions {
|
||||
timeout?: number;
|
||||
includeWsl?: boolean;
|
||||
wslDistribution?: string;
|
||||
}
|
||||
|
||||
export interface CliDetectionResult {
|
||||
cli: CliInfo;
|
||||
detected: boolean;
|
||||
issues: string[];
|
||||
}
|
||||
|
||||
export interface UnifiedCliDetection {
|
||||
claude?: CliDetectionResult;
|
||||
codex?: CliDetectionResult;
|
||||
cursor?: CliDetectionResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI Configuration for different providers
|
||||
*/
|
||||
const CLI_CONFIGS = {
|
||||
claude: {
|
||||
name: 'Claude CLI',
|
||||
commands: ['claude'],
|
||||
versionArgs: ['--version'],
|
||||
installCommands: {
|
||||
darwin: 'brew install anthropics/claude/claude',
|
||||
linux: 'curl -fsSL https://claude.ai/install.sh | sh',
|
||||
win32: 'iwr https://claude.ai/install.ps1 -UseBasicParsing | iex',
|
||||
},
|
||||
},
|
||||
codex: {
|
||||
name: 'Codex CLI',
|
||||
commands: ['codex', 'openai'],
|
||||
versionArgs: ['--version'],
|
||||
installCommands: {
|
||||
darwin: 'npm install -g @openai/codex-cli',
|
||||
linux: 'npm install -g @openai/codex-cli',
|
||||
win32: 'npm install -g @openai/codex-cli',
|
||||
},
|
||||
},
|
||||
cursor: {
|
||||
name: 'Cursor CLI',
|
||||
commands: ['cursor-agent', 'cursor'],
|
||||
versionArgs: ['--version'],
|
||||
installCommands: {
|
||||
darwin: 'brew install cursor/cursor/cursor-agent',
|
||||
linux: 'curl -fsSL https://cursor.sh/install.sh | sh',
|
||||
win32: 'iwr https://cursor.sh/install.ps1 -UseBasicParsing | iex',
|
||||
},
|
||||
},
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Detect if a CLI is installed and available
|
||||
*/
|
||||
export async function detectCli(
|
||||
provider: keyof typeof CLI_CONFIGS,
|
||||
options: CliDetectionOptions = {}
|
||||
): Promise<CliDetectionResult> {
|
||||
const config = CLI_CONFIGS[provider];
|
||||
const { timeout = 5000, includeWsl = false, wslDistribution } = options;
|
||||
const issues: string[] = [];
|
||||
|
||||
const cliInfo: CliInfo = {
|
||||
name: config.name,
|
||||
command: '',
|
||||
installed: false,
|
||||
authenticated: false,
|
||||
authMethod: 'none',
|
||||
};
|
||||
|
||||
try {
|
||||
// Find the command in PATH
|
||||
const command = await findCommand([...config.commands]);
|
||||
if (command) {
|
||||
cliInfo.command = command;
|
||||
}
|
||||
|
||||
if (!cliInfo.command) {
|
||||
issues.push(`${config.name} not found in PATH`);
|
||||
return { cli: cliInfo, detected: false, issues };
|
||||
}
|
||||
|
||||
cliInfo.path = cliInfo.command;
|
||||
cliInfo.installed = true;
|
||||
|
||||
// Get version
|
||||
try {
|
||||
cliInfo.version = await getCliVersion(cliInfo.command, [...config.versionArgs], timeout);
|
||||
} catch (error) {
|
||||
issues.push(`Failed to get ${config.name} version: ${error}`);
|
||||
}
|
||||
|
||||
// Check authentication
|
||||
cliInfo.authMethod = await checkCliAuth(provider, cliInfo.command);
|
||||
cliInfo.authenticated = cliInfo.authMethod !== 'none';
|
||||
|
||||
return { cli: cliInfo, detected: true, issues };
|
||||
} catch (error) {
|
||||
issues.push(`Error detecting ${config.name}: ${error}`);
|
||||
return { cli: cliInfo, detected: false, issues };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect all CLIs in the system
|
||||
*/
|
||||
export async function detectAllCLis(
|
||||
options: CliDetectionOptions = {}
|
||||
): Promise<UnifiedCliDetection> {
|
||||
const results: UnifiedCliDetection = {};
|
||||
|
||||
// Detect all providers in parallel
|
||||
const providers = Object.keys(CLI_CONFIGS) as Array<keyof typeof CLI_CONFIGS>;
|
||||
const detectionPromises = providers.map(async (provider) => {
|
||||
const result = await detectCli(provider, options);
|
||||
return { provider, result };
|
||||
});
|
||||
|
||||
const detections = await Promise.all(detectionPromises);
|
||||
|
||||
for (const { provider, result } of detections) {
|
||||
results[provider] = result;
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the first available command from a list of alternatives
|
||||
*/
|
||||
export async function findCommand(commands: string[]): Promise<string | null> {
|
||||
for (const command of commands) {
|
||||
try {
|
||||
const whichCommand = process.platform === 'win32' ? 'where' : 'which';
|
||||
const result = execSync(`${whichCommand} ${command}`, {
|
||||
encoding: 'utf8',
|
||||
timeout: 2000,
|
||||
}).trim();
|
||||
|
||||
if (result) {
|
||||
return result.split('\n')[0]; // Take first result on Windows
|
||||
}
|
||||
} catch {
|
||||
// Command not found, try next
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get CLI version
|
||||
*/
|
||||
export async function getCliVersion(
|
||||
command: string,
|
||||
args: string[],
|
||||
timeout: number = 5000
|
||||
): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn(command, args, {
|
||||
stdio: 'pipe',
|
||||
timeout,
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
if (code === 0 && stdout) {
|
||||
resolve(stdout.trim());
|
||||
} else if (stderr) {
|
||||
reject(stderr.trim());
|
||||
} else {
|
||||
reject(`Command exited with code ${code}`);
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check authentication status for a CLI
|
||||
*/
|
||||
export async function checkCliAuth(
|
||||
provider: keyof typeof CLI_CONFIGS,
|
||||
command: string
|
||||
): Promise<'cli' | 'api_key' | 'none'> {
|
||||
try {
|
||||
switch (provider) {
|
||||
case 'claude':
|
||||
return await checkClaudeAuth(command);
|
||||
case 'codex':
|
||||
return await checkCodexAuth(command);
|
||||
case 'cursor':
|
||||
return await checkCursorAuth(command);
|
||||
default:
|
||||
return 'none';
|
||||
}
|
||||
} catch {
|
||||
return 'none';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Claude CLI authentication
|
||||
*/
|
||||
async function checkClaudeAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
|
||||
try {
|
||||
// Check for environment variable
|
||||
if (process.env.ANTHROPIC_API_KEY) {
|
||||
return 'api_key';
|
||||
}
|
||||
|
||||
// Try running a simple command to check CLI auth
|
||||
const result = await getCliVersion(command, ['--version'], 3000);
|
||||
if (result) {
|
||||
return 'cli'; // If version works, assume CLI is authenticated
|
||||
}
|
||||
} catch {
|
||||
// Version command might work even without auth, so we need a better check
|
||||
}
|
||||
|
||||
// Try a more specific auth check
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(command, ['whoami'], {
|
||||
stdio: 'pipe',
|
||||
timeout: 3000,
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
if (code === 0 && stdout && !stderr.includes('not authenticated')) {
|
||||
resolve('cli');
|
||||
} else {
|
||||
resolve('none');
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', () => {
|
||||
resolve('none');
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Codex CLI authentication
|
||||
*/
|
||||
async function checkCodexAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
|
||||
// Check for environment variable
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
return 'api_key';
|
||||
}
|
||||
|
||||
try {
|
||||
// Try a simple auth check
|
||||
const result = await getCliVersion(command, ['--version'], 3000);
|
||||
if (result) {
|
||||
return 'cli';
|
||||
}
|
||||
} catch {
|
||||
// Version check failed
|
||||
}
|
||||
|
||||
return 'none';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Cursor CLI authentication
|
||||
*/
|
||||
async function checkCursorAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
|
||||
// Check for environment variable
|
||||
if (process.env.CURSOR_API_KEY) {
|
||||
return 'api_key';
|
||||
}
|
||||
|
||||
// Check for credentials files
|
||||
const credentialPaths = [
|
||||
path.join(os.homedir(), '.cursor', 'credentials.json'),
|
||||
path.join(os.homedir(), '.config', 'cursor', 'credentials.json'),
|
||||
path.join(os.homedir(), '.cursor', 'auth.json'),
|
||||
path.join(os.homedir(), '.config', 'cursor', 'auth.json'),
|
||||
];
|
||||
|
||||
for (const credPath of credentialPaths) {
|
||||
try {
|
||||
if (fs.existsSync(credPath)) {
|
||||
const content = fs.readFileSync(credPath, 'utf8');
|
||||
const creds = JSON.parse(content);
|
||||
if (creds.accessToken || creds.token || creds.apiKey) {
|
||||
return 'cli';
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Invalid credentials file
|
||||
}
|
||||
}
|
||||
|
||||
// Try a simple command
|
||||
try {
|
||||
const result = await getCliVersion(command, ['--version'], 3000);
|
||||
if (result) {
|
||||
return 'cli';
|
||||
}
|
||||
} catch {
|
||||
// Version check failed
|
||||
}
|
||||
|
||||
return 'none';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation instructions for a provider
|
||||
*/
|
||||
export function getInstallInstructions(
|
||||
provider: keyof typeof CLI_CONFIGS,
|
||||
platform: NodeJS.Platform = process.platform
|
||||
): string {
|
||||
const config = CLI_CONFIGS[provider];
|
||||
const command = config.installCommands[platform as keyof typeof config.installCommands];
|
||||
|
||||
if (!command) {
|
||||
return `No installation instructions available for ${provider} on ${platform}`;
|
||||
}
|
||||
|
||||
return command;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get platform-specific CLI paths and versions
|
||||
*/
|
||||
export function getPlatformCliPaths(provider: keyof typeof CLI_CONFIGS): string[] {
|
||||
const config = CLI_CONFIGS[provider];
|
||||
const platform = process.platform;
|
||||
|
||||
switch (platform) {
|
||||
case 'darwin':
|
||||
return [
|
||||
`/usr/local/bin/${config.commands[0]}`,
|
||||
`/opt/homebrew/bin/${config.commands[0]}`,
|
||||
path.join(os.homedir(), '.local', 'bin', config.commands[0]),
|
||||
];
|
||||
|
||||
case 'linux':
|
||||
return [
|
||||
`/usr/bin/${config.commands[0]}`,
|
||||
`/usr/local/bin/${config.commands[0]}`,
|
||||
path.join(os.homedir(), '.local', 'bin', config.commands[0]),
|
||||
path.join(os.homedir(), '.npm', 'global', 'bin', config.commands[0]),
|
||||
];
|
||||
|
||||
case 'win32':
|
||||
return [
|
||||
path.join(
|
||||
os.homedir(),
|
||||
'AppData',
|
||||
'Local',
|
||||
'Programs',
|
||||
config.commands[0],
|
||||
`${config.commands[0]}.exe`
|
||||
),
|
||||
path.join(process.env.ProgramFiles || '', config.commands[0], `${config.commands[0]}.exe`),
|
||||
path.join(
|
||||
process.env.ProgramFiles || '',
|
||||
config.commands[0],
|
||||
'bin',
|
||||
`${config.commands[0]}.exe`
|
||||
),
|
||||
];
|
||||
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate CLI installation
|
||||
*/
|
||||
export function validateCliInstallation(cliInfo: CliInfo): {
|
||||
valid: boolean;
|
||||
issues: string[];
|
||||
} {
|
||||
const issues: string[] = [];
|
||||
|
||||
if (!cliInfo.installed) {
|
||||
issues.push('CLI is not installed');
|
||||
}
|
||||
|
||||
if (cliInfo.installed && !cliInfo.version) {
|
||||
issues.push('Could not determine CLI version');
|
||||
}
|
||||
|
||||
if (cliInfo.installed && cliInfo.authMethod === 'none') {
|
||||
issues.push('CLI is not authenticated');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: issues.length === 0,
|
||||
issues,
|
||||
};
|
||||
}
|
||||
98
apps/server/src/lib/codex-auth.ts
Normal file
98
apps/server/src/lib/codex-auth.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
/**
|
||||
* Shared utility for checking Codex CLI authentication status
|
||||
*
|
||||
* Uses 'codex login status' command to verify authentication.
|
||||
* Never assumes authenticated - only returns true if CLI confirms.
|
||||
*/
|
||||
|
||||
import { spawnProcess, getCodexAuthPath } from '@automaker/platform';
|
||||
import { findCodexCliPath } from '@automaker/platform';
|
||||
import * as fs from 'fs';
|
||||
|
||||
const CODEX_COMMAND = 'codex';
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
|
||||
export interface CodexAuthCheckResult {
|
||||
authenticated: boolean;
|
||||
method: 'api_key_env' | 'cli_authenticated' | 'none';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Codex authentication status using 'codex login status' command
|
||||
*
|
||||
* @param cliPath Optional CLI path. If not provided, will attempt to find it.
|
||||
* @returns Authentication status and method
|
||||
*/
|
||||
export async function checkCodexAuthentication(
|
||||
cliPath?: string | null
|
||||
): Promise<CodexAuthCheckResult> {
|
||||
console.log('[CodexAuth] checkCodexAuthentication called with cliPath:', cliPath);
|
||||
|
||||
const resolvedCliPath = cliPath || (await findCodexCliPath());
|
||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
||||
|
||||
console.log('[CodexAuth] resolvedCliPath:', resolvedCliPath);
|
||||
console.log('[CodexAuth] hasApiKey:', hasApiKey);
|
||||
|
||||
// Debug: Check auth file
|
||||
const authFilePath = getCodexAuthPath();
|
||||
console.log('[CodexAuth] Auth file path:', authFilePath);
|
||||
try {
|
||||
const authFileExists = fs.existsSync(authFilePath);
|
||||
console.log('[CodexAuth] Auth file exists:', authFileExists);
|
||||
if (authFileExists) {
|
||||
const authContent = fs.readFileSync(authFilePath, 'utf-8');
|
||||
console.log('[CodexAuth] Auth file content:', authContent.substring(0, 500)); // First 500 chars
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('[CodexAuth] Error reading auth file:', error);
|
||||
}
|
||||
|
||||
// If CLI is not installed, cannot be authenticated
|
||||
if (!resolvedCliPath) {
|
||||
console.log('[CodexAuth] No CLI path found, returning not authenticated');
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
try {
|
||||
console.log('[CodexAuth] Running: ' + resolvedCliPath + ' login status');
|
||||
const result = await spawnProcess({
|
||||
command: resolvedCliPath || CODEX_COMMAND,
|
||||
args: ['login', 'status'],
|
||||
cwd: process.cwd(),
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'dumb', // Avoid interactive output
|
||||
},
|
||||
});
|
||||
|
||||
console.log('[CodexAuth] Command result:');
|
||||
console.log('[CodexAuth] exitCode:', result.exitCode);
|
||||
console.log('[CodexAuth] stdout:', JSON.stringify(result.stdout));
|
||||
console.log('[CodexAuth] stderr:', JSON.stringify(result.stderr));
|
||||
|
||||
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
|
||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||
const isLoggedIn = combinedOutput.includes('logged in');
|
||||
console.log('[CodexAuth] isLoggedIn (contains "logged in" in stdout or stderr):', isLoggedIn);
|
||||
|
||||
if (result.exitCode === 0 && isLoggedIn) {
|
||||
// Determine auth method based on what we know
|
||||
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||
console.log('[CodexAuth] Authenticated! method:', method);
|
||||
return { authenticated: true, method };
|
||||
}
|
||||
|
||||
console.log(
|
||||
'[CodexAuth] Not authenticated. exitCode:',
|
||||
result.exitCode,
|
||||
'isLoggedIn:',
|
||||
isLoggedIn
|
||||
);
|
||||
} catch (error) {
|
||||
console.log('[CodexAuth] Error running command:', error);
|
||||
}
|
||||
|
||||
console.log('[CodexAuth] Returning not authenticated');
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
414
apps/server/src/lib/error-handler.ts
Normal file
414
apps/server/src/lib/error-handler.ts
Normal file
@@ -0,0 +1,414 @@
|
||||
/**
|
||||
* Unified Error Handling System for CLI Providers
|
||||
*
|
||||
* Provides consistent error classification, user-friendly messages, and debugging support
|
||||
* across all AI providers (Claude, Codex, Cursor)
|
||||
*/
|
||||
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('ErrorHandler');
|
||||
|
||||
export enum ErrorType {
|
||||
AUTHENTICATION = 'authentication',
|
||||
BILLING = 'billing',
|
||||
RATE_LIMIT = 'rate_limit',
|
||||
NETWORK = 'network',
|
||||
TIMEOUT = 'timeout',
|
||||
VALIDATION = 'validation',
|
||||
PERMISSION = 'permission',
|
||||
CLI_NOT_FOUND = 'cli_not_found',
|
||||
CLI_NOT_INSTALLED = 'cli_not_installed',
|
||||
MODEL_NOT_SUPPORTED = 'model_not_supported',
|
||||
INVALID_REQUEST = 'invalid_request',
|
||||
SERVER_ERROR = 'server_error',
|
||||
UNKNOWN = 'unknown',
|
||||
}
|
||||
|
||||
export enum ErrorSeverity {
|
||||
LOW = 'low',
|
||||
MEDIUM = 'medium',
|
||||
HIGH = 'high',
|
||||
CRITICAL = 'critical',
|
||||
}
|
||||
|
||||
export interface ErrorClassification {
|
||||
type: ErrorType;
|
||||
severity: ErrorSeverity;
|
||||
userMessage: string;
|
||||
technicalMessage: string;
|
||||
suggestedAction?: string;
|
||||
retryable: boolean;
|
||||
provider?: string;
|
||||
context?: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface ErrorPattern {
|
||||
type: ErrorType;
|
||||
severity: ErrorSeverity;
|
||||
patterns: RegExp[];
|
||||
userMessage: string;
|
||||
suggestedAction?: string;
|
||||
retryable: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error patterns for different types of errors
|
||||
*/
|
||||
const ERROR_PATTERNS: ErrorPattern[] = [
|
||||
// Authentication errors
|
||||
{
|
||||
type: ErrorType.AUTHENTICATION,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [
|
||||
/unauthorized/i,
|
||||
/authentication.*fail/i,
|
||||
/invalid_api_key/i,
|
||||
/invalid api key/i,
|
||||
/not authenticated/i,
|
||||
/please.*log/i,
|
||||
/token.*revoked/i,
|
||||
/oauth.*error/i,
|
||||
/credentials.*invalid/i,
|
||||
],
|
||||
userMessage: 'Authentication failed. Please check your API key or login credentials.',
|
||||
suggestedAction:
|
||||
"Verify your API key is correct and hasn't expired, or run the CLI login command.",
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Billing errors
|
||||
{
|
||||
type: ErrorType.BILLING,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [
|
||||
/credit.*balance.*low/i,
|
||||
/insufficient.*credit/i,
|
||||
/billing.*issue/i,
|
||||
/payment.*required/i,
|
||||
/usage.*exceeded/i,
|
||||
/quota.*exceeded/i,
|
||||
/add.*credit/i,
|
||||
],
|
||||
userMessage: 'Account has insufficient credits or billing issues.',
|
||||
suggestedAction: 'Please add credits to your account or check your billing settings.',
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Rate limit errors
|
||||
{
|
||||
type: ErrorType.RATE_LIMIT,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
patterns: [
|
||||
/rate.*limit/i,
|
||||
/too.*many.*request/i,
|
||||
/limit.*reached/i,
|
||||
/try.*later/i,
|
||||
/429/i,
|
||||
/reset.*time/i,
|
||||
/upgrade.*plan/i,
|
||||
],
|
||||
userMessage: 'Rate limit reached. Please wait before trying again.',
|
||||
suggestedAction: 'Wait a few minutes before retrying, or consider upgrading your plan.',
|
||||
retryable: true,
|
||||
},
|
||||
|
||||
// Network errors
|
||||
{
|
||||
type: ErrorType.NETWORK,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
patterns: [/network/i, /connection/i, /dns/i, /timeout/i, /econnrefused/i, /enotfound/i],
|
||||
userMessage: 'Network connection issue.',
|
||||
suggestedAction: 'Check your internet connection and try again.',
|
||||
retryable: true,
|
||||
},
|
||||
|
||||
// Timeout errors
|
||||
{
|
||||
type: ErrorType.TIMEOUT,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
patterns: [/timeout/i, /aborted/i, /time.*out/i],
|
||||
userMessage: 'Operation timed out.',
|
||||
suggestedAction: 'Try again with a simpler request or check your connection.',
|
||||
retryable: true,
|
||||
},
|
||||
|
||||
// Permission errors
|
||||
{
|
||||
type: ErrorType.PERMISSION,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/permission.*denied/i, /access.*denied/i, /forbidden/i, /403/i, /not.*authorized/i],
|
||||
userMessage: 'Permission denied.',
|
||||
suggestedAction: 'Check if you have the required permissions for this operation.',
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// CLI not found
|
||||
{
|
||||
type: ErrorType.CLI_NOT_FOUND,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/command not found/i, /not recognized/i, /not.*installed/i, /ENOENT/i],
|
||||
userMessage: 'CLI tool not found.',
|
||||
suggestedAction: "Please install the required CLI tool and ensure it's in your PATH.",
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Model not supported
|
||||
{
|
||||
type: ErrorType.MODEL_NOT_SUPPORTED,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/model.*not.*support/i, /unknown.*model/i, /invalid.*model/i],
|
||||
userMessage: 'Model not supported.',
|
||||
suggestedAction: 'Check available models and use a supported one.',
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Server errors
|
||||
{
|
||||
type: ErrorType.SERVER_ERROR,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/internal.*server/i, /server.*error/i, /500/i, /502/i, /503/i, /504/i],
|
||||
userMessage: 'Server error occurred.',
|
||||
suggestedAction: 'Try again in a few minutes or contact support if the issue persists.',
|
||||
retryable: true,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Classify an error into a specific type with user-friendly message
|
||||
*/
|
||||
export function classifyError(
|
||||
error: unknown,
|
||||
provider?: string,
|
||||
context?: Record<string, any>
|
||||
): ErrorClassification {
|
||||
const errorText = getErrorText(error);
|
||||
|
||||
// Try to match against known patterns
|
||||
for (const pattern of ERROR_PATTERNS) {
|
||||
for (const regex of pattern.patterns) {
|
||||
if (regex.test(errorText)) {
|
||||
return {
|
||||
type: pattern.type,
|
||||
severity: pattern.severity,
|
||||
userMessage: pattern.userMessage,
|
||||
technicalMessage: errorText,
|
||||
suggestedAction: pattern.suggestedAction,
|
||||
retryable: pattern.retryable,
|
||||
provider,
|
||||
context,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unknown error
|
||||
return {
|
||||
type: ErrorType.UNKNOWN,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
userMessage: 'An unexpected error occurred.',
|
||||
technicalMessage: errorText,
|
||||
suggestedAction: 'Please try again or contact support if the issue persists.',
|
||||
retryable: true,
|
||||
provider,
|
||||
context,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a user-friendly error message
|
||||
*/
|
||||
export function getUserFriendlyErrorMessage(error: unknown, provider?: string): string {
|
||||
const classification = classifyError(error, provider);
|
||||
|
||||
let message = classification.userMessage;
|
||||
|
||||
if (classification.suggestedAction) {
|
||||
message += ` ${classification.suggestedAction}`;
|
||||
}
|
||||
|
||||
// Add provider-specific context if available
|
||||
if (provider) {
|
||||
message = `[${provider.toUpperCase()}] ${message}`;
|
||||
}
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is retryable
|
||||
*/
|
||||
export function isRetryableError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.retryable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is authentication-related
|
||||
*/
|
||||
export function isAuthenticationError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.type === ErrorType.AUTHENTICATION;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is billing-related
|
||||
*/
|
||||
export function isBillingError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.type === ErrorType.BILLING;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is rate limit related
|
||||
*/
|
||||
export function isRateLimitError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.type === ErrorType.RATE_LIMIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get error text from various error types
|
||||
*/
|
||||
function getErrorText(error: unknown): string {
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
if (typeof error === 'object' && error !== null) {
|
||||
// Handle structured error objects
|
||||
const errorObj = error as any;
|
||||
|
||||
if (errorObj.message) {
|
||||
return errorObj.message;
|
||||
}
|
||||
|
||||
if (errorObj.error?.message) {
|
||||
return errorObj.error.message;
|
||||
}
|
||||
|
||||
if (errorObj.error) {
|
||||
return typeof errorObj.error === 'string' ? errorObj.error : JSON.stringify(errorObj.error);
|
||||
}
|
||||
|
||||
return JSON.stringify(error);
|
||||
}
|
||||
|
||||
return String(error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a standardized error response
|
||||
*/
|
||||
export function createErrorResponse(
|
||||
error: unknown,
|
||||
provider?: string,
|
||||
context?: Record<string, any>
|
||||
): {
|
||||
success: false;
|
||||
error: string;
|
||||
errorType: ErrorType;
|
||||
severity: ErrorSeverity;
|
||||
retryable: boolean;
|
||||
suggestedAction?: string;
|
||||
} {
|
||||
const classification = classifyError(error, provider, context);
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: classification.userMessage,
|
||||
errorType: classification.type,
|
||||
severity: classification.severity,
|
||||
retryable: classification.retryable,
|
||||
suggestedAction: classification.suggestedAction,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Log error with full context
|
||||
*/
|
||||
export function logError(
|
||||
error: unknown,
|
||||
provider?: string,
|
||||
operation?: string,
|
||||
additionalContext?: Record<string, any>
|
||||
): void {
|
||||
const classification = classifyError(error, provider, {
|
||||
operation,
|
||||
...additionalContext,
|
||||
});
|
||||
|
||||
logger.error(`Error in ${provider || 'unknown'}${operation ? ` during ${operation}` : ''}`, {
|
||||
type: classification.type,
|
||||
severity: classification.severity,
|
||||
message: classification.userMessage,
|
||||
technicalMessage: classification.technicalMessage,
|
||||
retryable: classification.retryable,
|
||||
suggestedAction: classification.suggestedAction,
|
||||
context: classification.context,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Provider-specific error handlers
|
||||
*/
|
||||
export const ProviderErrorHandler = {
|
||||
claude: {
|
||||
classify: (error: unknown) => classifyError(error, 'claude'),
|
||||
getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'claude'),
|
||||
isAuth: (error: unknown) => isAuthenticationError(error),
|
||||
isBilling: (error: unknown) => isBillingError(error),
|
||||
isRateLimit: (error: unknown) => isRateLimitError(error),
|
||||
},
|
||||
|
||||
codex: {
|
||||
classify: (error: unknown) => classifyError(error, 'codex'),
|
||||
getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'codex'),
|
||||
isAuth: (error: unknown) => isAuthenticationError(error),
|
||||
isBilling: (error: unknown) => isBillingError(error),
|
||||
isRateLimit: (error: unknown) => isRateLimitError(error),
|
||||
},
|
||||
|
||||
cursor: {
|
||||
classify: (error: unknown) => classifyError(error, 'cursor'),
|
||||
getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'cursor'),
|
||||
isAuth: (error: unknown) => isAuthenticationError(error),
|
||||
isBilling: (error: unknown) => isBillingError(error),
|
||||
isRateLimit: (error: unknown) => isRateLimitError(error),
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a retry handler for retryable errors
|
||||
*/
|
||||
export function createRetryHandler(maxRetries: number = 3, baseDelay: number = 1000) {
|
||||
return async function <T>(
|
||||
operation: () => Promise<T>,
|
||||
shouldRetry: (error: unknown) => boolean = isRetryableError
|
||||
): Promise<T> {
|
||||
let lastError: unknown;
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
|
||||
if (attempt === maxRetries || !shouldRetry(error)) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Exponential backoff with jitter
|
||||
const delay = baseDelay * Math.pow(2, attempt) + Math.random() * 1000;
|
||||
logger.debug(`Retrying operation in ${delay}ms (attempt ${attempt + 1}/${maxRetries})`);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
throw lastError;
|
||||
};
|
||||
}
|
||||
173
apps/server/src/lib/permission-enforcer.ts
Normal file
173
apps/server/src/lib/permission-enforcer.ts
Normal file
@@ -0,0 +1,173 @@
|
||||
/**
|
||||
* Permission enforcement utilities for Cursor provider
|
||||
*/
|
||||
|
||||
import type { CursorCliConfigFile } from '@automaker/types';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('PermissionEnforcer');
|
||||
|
||||
export interface PermissionCheckResult {
|
||||
allowed: boolean;
|
||||
reason?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool call is allowed based on permissions
|
||||
*/
|
||||
export function checkToolCallPermission(
|
||||
toolCall: any,
|
||||
permissions: CursorCliConfigFile | null
|
||||
): PermissionCheckResult {
|
||||
if (!permissions || !permissions.permissions) {
|
||||
// If no permissions are configured, allow everything (backward compatibility)
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
const { allow = [], deny = [] } = permissions.permissions;
|
||||
|
||||
// Check shell tool calls
|
||||
if (toolCall.shellToolCall?.args?.command) {
|
||||
const command = toolCall.shellToolCall.args.command;
|
||||
const toolName = `Shell(${extractCommandName(command)})`;
|
||||
|
||||
// Check deny list first (deny takes precedence)
|
||||
for (const denyRule of deny) {
|
||||
if (matchesRule(toolName, denyRule)) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Operation blocked by permission rule: ${denyRule}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Then check allow list
|
||||
for (const allowRule of allow) {
|
||||
if (matchesRule(toolName, allowRule)) {
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Operation not in allow list: ${toolName}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Check read tool calls
|
||||
if (toolCall.readToolCall?.args?.path) {
|
||||
const path = toolCall.readToolCall.args.path;
|
||||
const toolName = `Read(${path})`;
|
||||
|
||||
// Check deny list first
|
||||
for (const denyRule of deny) {
|
||||
if (matchesRule(toolName, denyRule)) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Read operation blocked by permission rule: ${denyRule}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Then check allow list
|
||||
for (const allowRule of allow) {
|
||||
if (matchesRule(toolName, allowRule)) {
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Read operation not in allow list: ${toolName}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Check write tool calls
|
||||
if (toolCall.writeToolCall?.args?.path) {
|
||||
const path = toolCall.writeToolCall.args.path;
|
||||
const toolName = `Write(${path})`;
|
||||
|
||||
// Check deny list first
|
||||
for (const denyRule of deny) {
|
||||
if (matchesRule(toolName, denyRule)) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Write operation blocked by permission rule: ${denyRule}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Then check allow list
|
||||
for (const allowRule of allow) {
|
||||
if (matchesRule(toolName, allowRule)) {
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Write operation not in allow list: ${toolName}`,
|
||||
};
|
||||
}
|
||||
|
||||
// For other tool types, allow by default for now
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the base command name from a shell command
|
||||
*/
|
||||
function extractCommandName(command: string): string {
|
||||
// Remove leading spaces and get the first word
|
||||
const trimmed = command.trim();
|
||||
const firstWord = trimmed.split(/\s+/)[0];
|
||||
return firstWord || 'unknown';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool name matches a permission rule
|
||||
*/
|
||||
function matchesRule(toolName: string, rule: string): boolean {
|
||||
// Exact match
|
||||
if (toolName === rule) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Wildcard patterns
|
||||
if (rule.includes('*')) {
|
||||
const regex = new RegExp(rule.replace(/\*/g, '.*'));
|
||||
return regex.test(toolName);
|
||||
}
|
||||
|
||||
// Prefix match for shell commands (e.g., "Shell(git)" matches "Shell(git status)")
|
||||
if (rule.startsWith('Shell(') && toolName.startsWith('Shell(')) {
|
||||
const ruleCommand = rule.slice(6, -1); // Remove "Shell(" and ")"
|
||||
const toolCommand = extractCommandName(toolName.slice(6, -1)); // Remove "Shell(" and ")"
|
||||
return toolCommand.startsWith(ruleCommand);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log permission violations
|
||||
*/
|
||||
export function logPermissionViolation(toolCall: any, reason: string, sessionId?: string): void {
|
||||
const sessionIdStr = sessionId ? ` [${sessionId}]` : '';
|
||||
|
||||
if (toolCall.shellToolCall?.args?.command) {
|
||||
logger.warn(
|
||||
`Permission violation${sessionIdStr}: Shell command blocked - ${toolCall.shellToolCall.args.command} (${reason})`
|
||||
);
|
||||
} else if (toolCall.readToolCall?.args?.path) {
|
||||
logger.warn(
|
||||
`Permission violation${sessionIdStr}: Read operation blocked - ${toolCall.readToolCall.args.path} (${reason})`
|
||||
);
|
||||
} else if (toolCall.writeToolCall?.args?.path) {
|
||||
logger.warn(
|
||||
`Permission violation${sessionIdStr}: Write operation blocked - ${toolCall.writeToolCall.args.path} (${reason})`
|
||||
);
|
||||
} else {
|
||||
logger.warn(`Permission violation${sessionIdStr}: Tool call blocked (${reason})`, { toolCall });
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,6 @@
|
||||
*/
|
||||
|
||||
import type { Options } from '@anthropic-ai/claude-agent-sdk';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import { resolveModelString } from '@automaker/model-resolver';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
@@ -31,6 +30,68 @@ import {
|
||||
} from '@automaker/types';
|
||||
import { isPathAllowed, PathNotAllowedError, getAllowedRootDirectory } from '@automaker/platform';
|
||||
|
||||
/**
|
||||
* Result of sandbox compatibility check
|
||||
*/
|
||||
export interface SandboxCompatibilityResult {
|
||||
/** Whether sandbox mode can be enabled for this path */
|
||||
enabled: boolean;
|
||||
/** Optional message explaining why sandbox is disabled */
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a working directory is compatible with sandbox mode.
|
||||
* Some paths (like cloud storage mounts) may not work with sandboxed execution.
|
||||
*
|
||||
* @param cwd - The working directory to check
|
||||
* @param sandboxRequested - Whether sandbox mode was requested by settings
|
||||
* @returns Object indicating if sandbox can be enabled and why not if disabled
|
||||
*/
|
||||
export function checkSandboxCompatibility(
|
||||
cwd: string,
|
||||
sandboxRequested: boolean
|
||||
): SandboxCompatibilityResult {
|
||||
if (!sandboxRequested) {
|
||||
return { enabled: false };
|
||||
}
|
||||
|
||||
const resolvedCwd = path.resolve(cwd);
|
||||
|
||||
// Check for cloud storage paths that may not be compatible with sandbox
|
||||
const cloudStoragePatterns = [
|
||||
// macOS mounted volumes
|
||||
/^\/Volumes\/GoogleDrive/i,
|
||||
/^\/Volumes\/Dropbox/i,
|
||||
/^\/Volumes\/OneDrive/i,
|
||||
/^\/Volumes\/iCloud/i,
|
||||
// macOS home directory
|
||||
/^\/Users\/[^/]+\/Google Drive/i,
|
||||
/^\/Users\/[^/]+\/Dropbox/i,
|
||||
/^\/Users\/[^/]+\/OneDrive/i,
|
||||
/^\/Users\/[^/]+\/Library\/Mobile Documents/i, // iCloud
|
||||
// Linux home directory
|
||||
/^\/home\/[^/]+\/Google Drive/i,
|
||||
/^\/home\/[^/]+\/Dropbox/i,
|
||||
/^\/home\/[^/]+\/OneDrive/i,
|
||||
// Windows
|
||||
/^C:\\Users\\[^\\]+\\Google Drive/i,
|
||||
/^C:\\Users\\[^\\]+\\Dropbox/i,
|
||||
/^C:\\Users\\[^\\]+\\OneDrive/i,
|
||||
];
|
||||
|
||||
for (const pattern of cloudStoragePatterns) {
|
||||
if (pattern.test(resolvedCwd)) {
|
||||
return {
|
||||
enabled: false,
|
||||
message: `Sandbox disabled: Cloud storage path detected (${resolvedCwd}). Sandbox mode may not work correctly with cloud-synced directories.`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return { enabled: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that a working directory is allowed by ALLOWED_ROOT_DIRECTORY.
|
||||
* This is the centralized security check for ALL AI model invocations.
|
||||
@@ -57,139 +118,6 @@ export function validateWorkingDirectory(cwd: string): void {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Known cloud storage path patterns where sandbox mode is incompatible.
|
||||
*
|
||||
* The Claude CLI sandbox feature uses filesystem isolation that conflicts with
|
||||
* cloud storage providers' virtual filesystem implementations. This causes the
|
||||
* Claude process to exit with code 1 when sandbox is enabled for these paths.
|
||||
*
|
||||
* Affected providers (macOS paths):
|
||||
* - Dropbox: ~/Library/CloudStorage/Dropbox-*
|
||||
* - Google Drive: ~/Library/CloudStorage/GoogleDrive-*
|
||||
* - OneDrive: ~/Library/CloudStorage/OneDrive-*
|
||||
* - iCloud Drive: ~/Library/Mobile Documents/
|
||||
* - Box: ~/Library/CloudStorage/Box-*
|
||||
*
|
||||
* Note: This is a known limitation when using cloud storage paths.
|
||||
*/
|
||||
|
||||
/**
|
||||
* macOS-specific cloud storage patterns that appear under ~/Library/
|
||||
* These are specific enough to use with includes() safely.
|
||||
*/
|
||||
const MACOS_CLOUD_STORAGE_PATTERNS = [
|
||||
'/Library/CloudStorage/', // Dropbox, Google Drive, OneDrive, Box on macOS
|
||||
'/Library/Mobile Documents/', // iCloud Drive on macOS
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Generic cloud storage folder names that need to be anchored to the home directory
|
||||
* to avoid false positives (e.g., /home/user/my-project-about-dropbox/).
|
||||
*/
|
||||
const HOME_ANCHORED_CLOUD_FOLDERS = [
|
||||
'Google Drive', // Google Drive on some systems
|
||||
'Dropbox', // Dropbox on Linux/alternative installs
|
||||
'OneDrive', // OneDrive on Linux/alternative installs
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Check if a path is within a cloud storage location.
|
||||
*
|
||||
* Cloud storage providers use virtual filesystem implementations that are
|
||||
* incompatible with the Claude CLI sandbox feature, causing process crashes.
|
||||
*
|
||||
* Uses two detection strategies:
|
||||
* 1. macOS-specific patterns (under ~/Library/) - checked via includes()
|
||||
* 2. Generic folder names - anchored to home directory to avoid false positives
|
||||
*
|
||||
* @param cwd - The working directory path to check
|
||||
* @returns true if the path is in a cloud storage location
|
||||
*/
|
||||
export function isCloudStoragePath(cwd: string): boolean {
|
||||
const resolvedPath = path.resolve(cwd);
|
||||
// Normalize to forward slashes for consistent pattern matching across platforms
|
||||
let normalizedPath = resolvedPath.split(path.sep).join('/');
|
||||
// Remove Windows drive letter if present (e.g., "C:/Users" -> "/Users")
|
||||
// This ensures Unix paths in tests work the same on Windows
|
||||
normalizedPath = normalizedPath.replace(/^[A-Za-z]:/, '');
|
||||
|
||||
// Check macOS-specific patterns (these are specific enough to use includes)
|
||||
if (MACOS_CLOUD_STORAGE_PATTERNS.some((pattern) => normalizedPath.includes(pattern))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check home-anchored patterns to avoid false positives
|
||||
// e.g., /home/user/my-project-about-dropbox/ should NOT match
|
||||
const home = os.homedir();
|
||||
for (const folder of HOME_ANCHORED_CLOUD_FOLDERS) {
|
||||
const cloudPath = path.join(home, folder);
|
||||
let normalizedCloudPath = cloudPath.split(path.sep).join('/');
|
||||
// Remove Windows drive letter if present
|
||||
normalizedCloudPath = normalizedCloudPath.replace(/^[A-Za-z]:/, '');
|
||||
// Check if resolved path starts with the cloud storage path followed by a separator
|
||||
// This ensures we match ~/Dropbox/project but not ~/Dropbox-archive or ~/my-dropbox-tool
|
||||
if (
|
||||
normalizedPath === normalizedCloudPath ||
|
||||
normalizedPath.startsWith(normalizedCloudPath + '/')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of sandbox compatibility check
|
||||
*/
|
||||
export interface SandboxCheckResult {
|
||||
/** Whether sandbox should be enabled */
|
||||
enabled: boolean;
|
||||
/** If disabled, the reason why */
|
||||
disabledReason?: 'cloud_storage' | 'user_setting';
|
||||
/** Human-readable message for logging/UI */
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if sandbox mode should be enabled for a given configuration.
|
||||
*
|
||||
* Sandbox mode is automatically disabled for cloud storage paths because the
|
||||
* Claude CLI sandbox feature is incompatible with virtual filesystem
|
||||
* implementations used by cloud storage providers (Dropbox, Google Drive, etc.).
|
||||
*
|
||||
* @param cwd - The working directory
|
||||
* @param enableSandboxMode - User's sandbox mode setting
|
||||
* @returns SandboxCheckResult with enabled status and reason if disabled
|
||||
*/
|
||||
export function checkSandboxCompatibility(
|
||||
cwd: string,
|
||||
enableSandboxMode?: boolean
|
||||
): SandboxCheckResult {
|
||||
// User has explicitly disabled sandbox mode
|
||||
if (enableSandboxMode === false) {
|
||||
return {
|
||||
enabled: false,
|
||||
disabledReason: 'user_setting',
|
||||
};
|
||||
}
|
||||
|
||||
// Check for cloud storage incompatibility (applies when enabled or undefined)
|
||||
if (isCloudStoragePath(cwd)) {
|
||||
return {
|
||||
enabled: false,
|
||||
disabledReason: 'cloud_storage',
|
||||
message: `Sandbox mode auto-disabled: Project is in a cloud storage location (${cwd}). The Claude CLI sandbox feature is incompatible with cloud storage filesystems. To use sandbox mode, move your project to a local directory.`,
|
||||
};
|
||||
}
|
||||
|
||||
// Sandbox is compatible and enabled (true or undefined defaults to enabled)
|
||||
return {
|
||||
enabled: true,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool presets for different use cases
|
||||
*/
|
||||
@@ -272,55 +200,31 @@ export function getModelForUseCase(
|
||||
|
||||
/**
|
||||
* Base options that apply to all SDK calls
|
||||
* AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||
*/
|
||||
function getBaseOptions(): Partial<Options> {
|
||||
return {
|
||||
permissionMode: 'acceptEdits',
|
||||
permissionMode: 'bypassPermissions',
|
||||
allowDangerouslySkipPermissions: true,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* MCP permission options result
|
||||
* MCP options result
|
||||
*/
|
||||
interface McpPermissionOptions {
|
||||
/** Whether tools should be restricted to a preset */
|
||||
shouldRestrictTools: boolean;
|
||||
/** Options to spread when MCP bypass is enabled */
|
||||
bypassOptions: Partial<Options>;
|
||||
interface McpOptions {
|
||||
/** Options to spread for MCP servers */
|
||||
mcpServerOptions: Partial<Options>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build MCP-related options based on configuration.
|
||||
* Centralizes the logic for determining permission modes and tool restrictions
|
||||
* when MCP servers are configured.
|
||||
*
|
||||
* @param config - The SDK options config
|
||||
* @returns Object with MCP permission settings to spread into final options
|
||||
* @returns Object with MCP server settings to spread into final options
|
||||
*/
|
||||
function buildMcpOptions(config: CreateSdkOptionsConfig): McpPermissionOptions {
|
||||
const hasMcpServers = config.mcpServers && Object.keys(config.mcpServers).length > 0;
|
||||
// Default to true for autonomous workflow. Security is enforced when adding servers
|
||||
// via the security warning dialog that explains the risks.
|
||||
const mcpAutoApprove = config.mcpAutoApproveTools ?? true;
|
||||
const mcpUnrestricted = config.mcpUnrestrictedTools ?? true;
|
||||
|
||||
// Determine if we should bypass permissions based on settings
|
||||
const shouldBypassPermissions = hasMcpServers && mcpAutoApprove;
|
||||
// Determine if we should restrict tools (only when no MCP or unrestricted is disabled)
|
||||
const shouldRestrictTools = !hasMcpServers || !mcpUnrestricted;
|
||||
|
||||
function buildMcpOptions(config: CreateSdkOptionsConfig): McpOptions {
|
||||
return {
|
||||
shouldRestrictTools,
|
||||
// Only include bypass options when MCP is configured and auto-approve is enabled
|
||||
bypassOptions: shouldBypassPermissions
|
||||
? {
|
||||
permissionMode: 'bypassPermissions' as const,
|
||||
// Required flag when using bypassPermissions mode
|
||||
allowDangerouslySkipPermissions: true,
|
||||
}
|
||||
: {},
|
||||
// Include MCP servers if configured
|
||||
mcpServerOptions: config.mcpServers ? { mcpServers: config.mcpServers } : {},
|
||||
};
|
||||
@@ -422,18 +326,9 @@ export interface CreateSdkOptionsConfig {
|
||||
/** Enable auto-loading of CLAUDE.md files via SDK's settingSources */
|
||||
autoLoadClaudeMd?: boolean;
|
||||
|
||||
/** Enable sandbox mode for bash command isolation */
|
||||
enableSandboxMode?: boolean;
|
||||
|
||||
/** MCP servers to make available to the agent */
|
||||
mcpServers?: Record<string, McpServerConfig>;
|
||||
|
||||
/** Auto-approve MCP tool calls without permission prompts */
|
||||
mcpAutoApproveTools?: boolean;
|
||||
|
||||
/** Allow unrestricted tools when MCP servers are enabled */
|
||||
mcpUnrestrictedTools?: boolean;
|
||||
|
||||
/** Extended thinking level for Claude models */
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
}
|
||||
@@ -554,7 +449,6 @@ export function createSuggestionsOptions(config: CreateSdkOptionsConfig): Option
|
||||
* - Full tool access for code modification
|
||||
* - Standard turns for interactive sessions
|
||||
* - Model priority: explicit model > session model > chat default
|
||||
* - Sandbox mode controlled by enableSandboxMode setting (auto-disabled for cloud storage)
|
||||
* - When autoLoadClaudeMd is true, uses preset mode and settingSources for CLAUDE.md loading
|
||||
*/
|
||||
export function createChatOptions(config: CreateSdkOptionsConfig): Options {
|
||||
@@ -573,24 +467,12 @@ export function createChatOptions(config: CreateSdkOptionsConfig): Options {
|
||||
// Build thinking options
|
||||
const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
|
||||
|
||||
// Check sandbox compatibility (auto-disables for cloud storage paths)
|
||||
const sandboxCheck = checkSandboxCompatibility(config.cwd, config.enableSandboxMode);
|
||||
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('chat', effectiveModel),
|
||||
maxTurns: MAX_TURNS.standard,
|
||||
cwd: config.cwd,
|
||||
// Only restrict tools if no MCP servers configured or unrestricted is disabled
|
||||
...(mcpOptions.shouldRestrictTools && { allowedTools: [...TOOL_PRESETS.chat] }),
|
||||
// Apply MCP bypass options if configured
|
||||
...mcpOptions.bypassOptions,
|
||||
...(sandboxCheck.enabled && {
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
}),
|
||||
allowedTools: [...TOOL_PRESETS.chat],
|
||||
...claudeMdOptions,
|
||||
...thinkingOptions,
|
||||
...(config.abortController && { abortController: config.abortController }),
|
||||
@@ -605,7 +487,6 @@ export function createChatOptions(config: CreateSdkOptionsConfig): Options {
|
||||
* - Full tool access for code modification and implementation
|
||||
* - Extended turns for thorough feature implementation
|
||||
* - Uses default model (can be overridden)
|
||||
* - Sandbox mode controlled by enableSandboxMode setting (auto-disabled for cloud storage)
|
||||
* - When autoLoadClaudeMd is true, uses preset mode and settingSources for CLAUDE.md loading
|
||||
*/
|
||||
export function createAutoModeOptions(config: CreateSdkOptionsConfig): Options {
|
||||
@@ -621,24 +502,12 @@ export function createAutoModeOptions(config: CreateSdkOptionsConfig): Options {
|
||||
// Build thinking options
|
||||
const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
|
||||
|
||||
// Check sandbox compatibility (auto-disables for cloud storage paths)
|
||||
const sandboxCheck = checkSandboxCompatibility(config.cwd, config.enableSandboxMode);
|
||||
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('auto', config.model),
|
||||
maxTurns: MAX_TURNS.maximum,
|
||||
cwd: config.cwd,
|
||||
// Only restrict tools if no MCP servers configured or unrestricted is disabled
|
||||
...(mcpOptions.shouldRestrictTools && { allowedTools: [...TOOL_PRESETS.fullAccess] }),
|
||||
// Apply MCP bypass options if configured
|
||||
...mcpOptions.bypassOptions,
|
||||
...(sandboxCheck.enabled && {
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
}),
|
||||
allowedTools: [...TOOL_PRESETS.fullAccess],
|
||||
...claudeMdOptions,
|
||||
...thinkingOptions,
|
||||
...(config.abortController && { abortController: config.abortController }),
|
||||
@@ -656,7 +525,6 @@ export function createCustomOptions(
|
||||
config: CreateSdkOptionsConfig & {
|
||||
maxTurns?: number;
|
||||
allowedTools?: readonly string[];
|
||||
sandbox?: { enabled: boolean; autoAllowBashIfSandboxed?: boolean };
|
||||
}
|
||||
): Options {
|
||||
// Validate working directory before creating options
|
||||
@@ -671,22 +539,17 @@ export function createCustomOptions(
|
||||
// Build thinking options
|
||||
const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
|
||||
|
||||
// For custom options: use explicit allowedTools if provided, otherwise use preset based on MCP settings
|
||||
// For custom options: use explicit allowedTools if provided, otherwise default to readOnly
|
||||
const effectiveAllowedTools = config.allowedTools
|
||||
? [...config.allowedTools]
|
||||
: mcpOptions.shouldRestrictTools
|
||||
? [...TOOL_PRESETS.readOnly]
|
||||
: undefined;
|
||||
: [...TOOL_PRESETS.readOnly];
|
||||
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('default', config.model),
|
||||
maxTurns: config.maxTurns ?? MAX_TURNS.maximum,
|
||||
cwd: config.cwd,
|
||||
...(effectiveAllowedTools && { allowedTools: effectiveAllowedTools }),
|
||||
...(config.sandbox && { sandbox: config.sandbox }),
|
||||
// Apply MCP bypass options if configured
|
||||
...mcpOptions.bypassOptions,
|
||||
allowedTools: effectiveAllowedTools,
|
||||
...claudeMdOptions,
|
||||
...thinkingOptions,
|
||||
...(config.abortController && { abortController: config.abortController }),
|
||||
|
||||
@@ -55,34 +55,6 @@ export async function getAutoLoadClaudeMdSetting(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the enableSandboxMode setting from global settings.
|
||||
* Returns false if settings service is not available.
|
||||
*
|
||||
* @param settingsService - Optional settings service instance
|
||||
* @param logPrefix - Prefix for log messages (e.g., '[AgentService]')
|
||||
* @returns Promise resolving to the enableSandboxMode setting value
|
||||
*/
|
||||
export async function getEnableSandboxModeSetting(
|
||||
settingsService?: SettingsService | null,
|
||||
logPrefix = '[SettingsHelper]'
|
||||
): Promise<boolean> {
|
||||
if (!settingsService) {
|
||||
logger.info(`${logPrefix} SettingsService not available, sandbox mode disabled`);
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const globalSettings = await settingsService.getGlobalSettings();
|
||||
const result = globalSettings.enableSandboxMode ?? false;
|
||||
logger.info(`${logPrefix} enableSandboxMode from global settings: ${result}`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error(`${logPrefix} Failed to load enableSandboxMode setting:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters out CLAUDE.md from context files when autoLoadClaudeMd is enabled
|
||||
* and rebuilds the formatted prompt without it.
|
||||
@@ -269,3 +241,83 @@ export async function getPromptCustomization(
|
||||
enhancement: mergeEnhancementPrompts(customization.enhancement),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Skills configuration from settings.
|
||||
* Returns configuration for enabling skills and which sources to load from.
|
||||
*
|
||||
* @param settingsService - Settings service instance
|
||||
* @returns Skills configuration with enabled state, sources, and tool inclusion flag
|
||||
*/
|
||||
export async function getSkillsConfiguration(settingsService: SettingsService): Promise<{
|
||||
enabled: boolean;
|
||||
sources: Array<'user' | 'project'>;
|
||||
shouldIncludeInTools: boolean;
|
||||
}> {
|
||||
const settings = await settingsService.getGlobalSettings();
|
||||
const enabled = settings.enableSkills ?? true; // Default enabled
|
||||
const sources = settings.skillsSources ?? ['user', 'project']; // Default both sources
|
||||
|
||||
return {
|
||||
enabled,
|
||||
sources,
|
||||
shouldIncludeInTools: enabled && sources.length > 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Subagents configuration from settings.
|
||||
* Returns configuration for enabling subagents and which sources to load from.
|
||||
*
|
||||
* @param settingsService - Settings service instance
|
||||
* @returns Subagents configuration with enabled state, sources, and tool inclusion flag
|
||||
*/
|
||||
export async function getSubagentsConfiguration(settingsService: SettingsService): Promise<{
|
||||
enabled: boolean;
|
||||
sources: Array<'user' | 'project'>;
|
||||
shouldIncludeInTools: boolean;
|
||||
}> {
|
||||
const settings = await settingsService.getGlobalSettings();
|
||||
const enabled = settings.enableSubagents ?? true; // Default enabled
|
||||
const sources = settings.subagentsSources ?? ['user', 'project']; // Default both sources
|
||||
|
||||
return {
|
||||
enabled,
|
||||
sources,
|
||||
shouldIncludeInTools: enabled && sources.length > 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get custom subagents from settings, merging global and project-level definitions.
|
||||
* Project-level subagents take precedence over global ones with the same name.
|
||||
*
|
||||
* @param settingsService - Settings service instance
|
||||
* @param projectPath - Path to the project for loading project-specific subagents
|
||||
* @returns Record of agent names to definitions, or undefined if none configured
|
||||
*/
|
||||
export async function getCustomSubagents(
|
||||
settingsService: SettingsService,
|
||||
projectPath?: string
|
||||
): Promise<Record<string, import('@automaker/types').AgentDefinition> | undefined> {
|
||||
// Get global subagents
|
||||
const globalSettings = await settingsService.getGlobalSettings();
|
||||
const globalSubagents = globalSettings.customSubagents || {};
|
||||
|
||||
// If no project path, return only global subagents
|
||||
if (!projectPath) {
|
||||
return Object.keys(globalSubagents).length > 0 ? globalSubagents : undefined;
|
||||
}
|
||||
|
||||
// Get project-specific subagents
|
||||
const projectSettings = await settingsService.getProjectSettings(projectPath);
|
||||
const projectSubagents = projectSettings.customSubagents || {};
|
||||
|
||||
// Merge: project-level takes precedence
|
||||
const merged = {
|
||||
...globalSubagents,
|
||||
...projectSubagents,
|
||||
};
|
||||
|
||||
return Object.keys(merged).length > 0 ? merged : undefined;
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import { BaseProvider } from './base-provider.js';
|
||||
import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('ClaudeProvider');
|
||||
import { getThinkingTokenBudget } from '@automaker/types';
|
||||
import { getThinkingTokenBudget, validateBareModelId } from '@automaker/types';
|
||||
import type {
|
||||
ExecuteOptions,
|
||||
ProviderMessage,
|
||||
@@ -53,6 +53,10 @@ export class ClaudeProvider extends BaseProvider {
|
||||
* Execute a query using Claude Agent SDK
|
||||
*/
|
||||
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||
// Validate that model doesn't have a provider prefix
|
||||
// AgentService should strip prefixes before passing to providers
|
||||
validateBareModelId(options.model, 'ClaudeProvider');
|
||||
|
||||
const {
|
||||
prompt,
|
||||
model,
|
||||
@@ -70,14 +74,6 @@ export class ClaudeProvider extends BaseProvider {
|
||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
||||
|
||||
// Build Claude SDK options
|
||||
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||
const hasMcpServers = options.mcpServers && Object.keys(options.mcpServers).length > 0;
|
||||
const defaultTools = ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'];
|
||||
|
||||
// AUTONOMOUS MODE: Always bypass permissions and allow unrestricted tools
|
||||
// Only restrict tools when no MCP servers are configured
|
||||
const shouldRestrictTools = !hasMcpServers;
|
||||
|
||||
const sdkOptions: Options = {
|
||||
model,
|
||||
systemPrompt,
|
||||
@@ -85,10 +81,9 @@ export class ClaudeProvider extends BaseProvider {
|
||||
cwd,
|
||||
// Pass only explicitly allowed environment variables to SDK
|
||||
env: buildEnv(),
|
||||
// Only restrict tools if explicitly set OR (no MCP / unrestricted disabled)
|
||||
...(allowedTools && shouldRestrictTools && { allowedTools }),
|
||||
...(!allowedTools && shouldRestrictTools && { allowedTools: defaultTools }),
|
||||
// AUTONOMOUS MODE: Always bypass permissions and allow dangerous operations
|
||||
// Pass through allowedTools if provided by caller (decided by sdk-options.ts)
|
||||
...(allowedTools && { allowedTools }),
|
||||
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||
permissionMode: 'bypassPermissions',
|
||||
allowDangerouslySkipPermissions: true,
|
||||
abortController,
|
||||
@@ -98,12 +93,12 @@ export class ClaudeProvider extends BaseProvider {
|
||||
: {}),
|
||||
// Forward settingSources for CLAUDE.md file loading
|
||||
...(options.settingSources && { settingSources: options.settingSources }),
|
||||
// Forward sandbox configuration
|
||||
...(options.sandbox && { sandbox: options.sandbox }),
|
||||
// Forward MCP servers configuration
|
||||
...(options.mcpServers && { mcpServers: options.mcpServers }),
|
||||
// Extended thinking configuration
|
||||
...(maxThinkingTokens && { maxThinkingTokens }),
|
||||
// Subagents configuration for specialized task delegation
|
||||
...(options.agents && { agents: options.agents }),
|
||||
};
|
||||
|
||||
// Build prompt payload
|
||||
|
||||
85
apps/server/src/providers/codex-config-manager.ts
Normal file
85
apps/server/src/providers/codex-config-manager.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
/**
|
||||
* Codex Config Manager - Writes MCP server configuration for Codex CLI
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import type { McpServerConfig } from '@automaker/types';
|
||||
import * as secureFs from '../lib/secure-fs.js';
|
||||
|
||||
const CODEX_CONFIG_DIR = '.codex';
|
||||
const CODEX_CONFIG_FILENAME = 'config.toml';
|
||||
const CODEX_MCP_SECTION = 'mcp_servers';
|
||||
|
||||
function formatTomlString(value: string): string {
|
||||
return JSON.stringify(value);
|
||||
}
|
||||
|
||||
function formatTomlArray(values: string[]): string {
|
||||
const formatted = values.map((value) => formatTomlString(value)).join(', ');
|
||||
return `[${formatted}]`;
|
||||
}
|
||||
|
||||
function formatTomlInlineTable(values: Record<string, string>): string {
|
||||
const entries = Object.entries(values).map(
|
||||
([key, value]) => `${key} = ${formatTomlString(value)}`
|
||||
);
|
||||
return `{ ${entries.join(', ')} }`;
|
||||
}
|
||||
|
||||
function formatTomlKey(key: string): string {
|
||||
return `"${key.replace(/"/g, '\\"')}"`;
|
||||
}
|
||||
|
||||
function buildServerBlock(name: string, server: McpServerConfig): string[] {
|
||||
const lines: string[] = [];
|
||||
const section = `${CODEX_MCP_SECTION}.${formatTomlKey(name)}`;
|
||||
lines.push(`[${section}]`);
|
||||
|
||||
if (server.type) {
|
||||
lines.push(`type = ${formatTomlString(server.type)}`);
|
||||
}
|
||||
|
||||
if ('command' in server && server.command) {
|
||||
lines.push(`command = ${formatTomlString(server.command)}`);
|
||||
}
|
||||
|
||||
if ('args' in server && server.args && server.args.length > 0) {
|
||||
lines.push(`args = ${formatTomlArray(server.args)}`);
|
||||
}
|
||||
|
||||
if ('env' in server && server.env && Object.keys(server.env).length > 0) {
|
||||
lines.push(`env = ${formatTomlInlineTable(server.env)}`);
|
||||
}
|
||||
|
||||
if ('url' in server && server.url) {
|
||||
lines.push(`url = ${formatTomlString(server.url)}`);
|
||||
}
|
||||
|
||||
if ('headers' in server && server.headers && Object.keys(server.headers).length > 0) {
|
||||
lines.push(`headers = ${formatTomlInlineTable(server.headers)}`);
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
export class CodexConfigManager {
|
||||
async configureMcpServers(
|
||||
cwd: string,
|
||||
mcpServers: Record<string, McpServerConfig>
|
||||
): Promise<void> {
|
||||
const configDir = path.join(cwd, CODEX_CONFIG_DIR);
|
||||
const configPath = path.join(configDir, CODEX_CONFIG_FILENAME);
|
||||
|
||||
await secureFs.mkdir(configDir, { recursive: true });
|
||||
|
||||
const blocks: string[] = [];
|
||||
for (const [name, server] of Object.entries(mcpServers)) {
|
||||
blocks.push(...buildServerBlock(name, server), '');
|
||||
}
|
||||
|
||||
const content = blocks.join('\n').trim();
|
||||
if (content) {
|
||||
await secureFs.writeFile(configPath, content + '\n', 'utf-8');
|
||||
}
|
||||
}
|
||||
}
|
||||
111
apps/server/src/providers/codex-models.ts
Normal file
111
apps/server/src/providers/codex-models.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Codex Model Definitions
|
||||
*
|
||||
* Official Codex CLI models as documented at https://developers.openai.com/codex/models/
|
||||
*/
|
||||
|
||||
import { CODEX_MODEL_MAP } from '@automaker/types';
|
||||
import type { ModelDefinition } from './types.js';
|
||||
|
||||
const CONTEXT_WINDOW_256K = 256000;
|
||||
const CONTEXT_WINDOW_128K = 128000;
|
||||
const MAX_OUTPUT_32K = 32000;
|
||||
const MAX_OUTPUT_16K = 16000;
|
||||
|
||||
/**
|
||||
* All available Codex models with their specifications
|
||||
* Based on https://developers.openai.com/codex/models/
|
||||
*/
|
||||
export const CODEX_MODELS: ModelDefinition[] = [
|
||||
// ========== Recommended Codex Models ==========
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt52Codex,
|
||||
name: 'GPT-5.2-Codex',
|
||||
modelString: CODEX_MODEL_MAP.gpt52Codex,
|
||||
provider: 'openai',
|
||||
description:
|
||||
'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).',
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'premium' as const,
|
||||
default: true,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||
name: 'GPT-5.1-Codex-Max',
|
||||
modelString: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||
provider: 'openai',
|
||||
description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'premium' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt51CodexMini,
|
||||
name: 'GPT-5.1-Codex-Mini',
|
||||
modelString: CODEX_MODEL_MAP.gpt51CodexMini,
|
||||
provider: 'openai',
|
||||
description: 'Smaller, more cost-effective version for faster workflows.',
|
||||
contextWindow: CONTEXT_WINDOW_128K,
|
||||
maxOutputTokens: MAX_OUTPUT_16K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'basic' as const,
|
||||
hasReasoning: false,
|
||||
},
|
||||
|
||||
// ========== General-Purpose GPT Models ==========
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt52,
|
||||
name: 'GPT-5.2',
|
||||
modelString: CODEX_MODEL_MAP.gpt52,
|
||||
provider: 'openai',
|
||||
description: 'Best general agentic model for tasks across industries and domains.',
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt51,
|
||||
name: 'GPT-5.1',
|
||||
modelString: CODEX_MODEL_MAP.gpt51,
|
||||
provider: 'openai',
|
||||
description: 'Great for coding and agentic tasks across domains.',
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Get model definition by ID
|
||||
*/
|
||||
export function getCodexModelById(modelId: string): ModelDefinition | undefined {
|
||||
return CODEX_MODELS.find((m) => m.id === modelId || m.modelString === modelId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all models that support reasoning
|
||||
*/
|
||||
export function getReasoningModels(): ModelDefinition[] {
|
||||
return CODEX_MODELS.filter((m) => m.hasReasoning);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by tier
|
||||
*/
|
||||
export function getModelsByTier(tier: 'premium' | 'standard' | 'basic'): ModelDefinition[] {
|
||||
return CODEX_MODELS.filter((m) => m.tier === tier);
|
||||
}
|
||||
1114
apps/server/src/providers/codex-provider.ts
Normal file
1114
apps/server/src/providers/codex-provider.ts
Normal file
File diff suppressed because it is too large
Load Diff
173
apps/server/src/providers/codex-sdk-client.ts
Normal file
173
apps/server/src/providers/codex-sdk-client.ts
Normal file
@@ -0,0 +1,173 @@
|
||||
/**
|
||||
* Codex SDK client - Executes Codex queries via official @openai/codex-sdk
|
||||
*
|
||||
* Used for programmatic control of Codex from within the application.
|
||||
* Provides cleaner integration than spawning CLI processes.
|
||||
*/
|
||||
|
||||
import { Codex } from '@openai/codex-sdk';
|
||||
import { formatHistoryAsText, classifyError, getUserFriendlyErrorMessage } from '@automaker/utils';
|
||||
import { supportsReasoningEffort } from '@automaker/types';
|
||||
import type { ExecuteOptions, ProviderMessage } from './types.js';
|
||||
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
const SDK_HISTORY_HEADER = 'Current request:\n';
|
||||
const DEFAULT_RESPONSE_TEXT = '';
|
||||
const SDK_ERROR_DETAILS_LABEL = 'Details:';
|
||||
|
||||
type PromptBlock = {
|
||||
type: string;
|
||||
text?: string;
|
||||
source?: {
|
||||
type?: string;
|
||||
media_type?: string;
|
||||
data?: string;
|
||||
};
|
||||
};
|
||||
|
||||
function resolveApiKey(): string {
|
||||
const apiKey = process.env[OPENAI_API_KEY_ENV];
|
||||
if (!apiKey) {
|
||||
throw new Error('OPENAI_API_KEY is not set.');
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
function normalizePromptBlocks(prompt: ExecuteOptions['prompt']): PromptBlock[] {
|
||||
if (Array.isArray(prompt)) {
|
||||
return prompt as PromptBlock[];
|
||||
}
|
||||
return [{ type: 'text', text: prompt }];
|
||||
}
|
||||
|
||||
function buildPromptText(options: ExecuteOptions, systemPrompt: string | null): string {
|
||||
const historyText =
|
||||
options.conversationHistory && options.conversationHistory.length > 0
|
||||
? formatHistoryAsText(options.conversationHistory)
|
||||
: '';
|
||||
|
||||
const promptBlocks = normalizePromptBlocks(options.prompt);
|
||||
const promptTexts: string[] = [];
|
||||
|
||||
for (const block of promptBlocks) {
|
||||
if (block.type === 'text' && typeof block.text === 'string' && block.text.trim()) {
|
||||
promptTexts.push(block.text);
|
||||
}
|
||||
}
|
||||
|
||||
const promptContent = promptTexts.join('\n\n');
|
||||
if (!promptContent.trim()) {
|
||||
throw new Error('Codex SDK prompt is empty.');
|
||||
}
|
||||
|
||||
const parts: string[] = [];
|
||||
if (systemPrompt) {
|
||||
parts.push(`System: ${systemPrompt}`);
|
||||
}
|
||||
if (historyText) {
|
||||
parts.push(historyText);
|
||||
}
|
||||
parts.push(`${SDK_HISTORY_HEADER}${promptContent}`);
|
||||
|
||||
return parts.join('\n\n');
|
||||
}
|
||||
|
||||
function buildSdkErrorMessage(rawMessage: string, userMessage: string): string {
|
||||
if (!rawMessage) {
|
||||
return userMessage;
|
||||
}
|
||||
if (!userMessage || rawMessage === userMessage) {
|
||||
return rawMessage;
|
||||
}
|
||||
return `${userMessage}\n\n${SDK_ERROR_DETAILS_LABEL} ${rawMessage}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query using the official Codex SDK
|
||||
*
|
||||
* The SDK provides a cleaner interface than spawning CLI processes:
|
||||
* - Handles authentication automatically
|
||||
* - Provides TypeScript types
|
||||
* - Supports thread management and resumption
|
||||
* - Better error handling
|
||||
*/
|
||||
export async function* executeCodexSdkQuery(
|
||||
options: ExecuteOptions,
|
||||
systemPrompt: string | null
|
||||
): AsyncGenerator<ProviderMessage> {
|
||||
try {
|
||||
const apiKey = resolveApiKey();
|
||||
const codex = new Codex({ apiKey });
|
||||
|
||||
// Resume existing thread or start new one
|
||||
let thread;
|
||||
if (options.sdkSessionId) {
|
||||
try {
|
||||
thread = codex.resumeThread(options.sdkSessionId);
|
||||
} catch {
|
||||
// If resume fails, start a new thread
|
||||
thread = codex.startThread();
|
||||
}
|
||||
} else {
|
||||
thread = codex.startThread();
|
||||
}
|
||||
|
||||
const promptText = buildPromptText(options, systemPrompt);
|
||||
|
||||
// Build run options with reasoning effort if supported
|
||||
const runOptions: {
|
||||
signal?: AbortSignal;
|
||||
reasoning?: { effort: string };
|
||||
} = {
|
||||
signal: options.abortController?.signal,
|
||||
};
|
||||
|
||||
// Add reasoning effort if model supports it and reasoningEffort is specified
|
||||
if (
|
||||
options.reasoningEffort &&
|
||||
supportsReasoningEffort(options.model) &&
|
||||
options.reasoningEffort !== 'none'
|
||||
) {
|
||||
runOptions.reasoning = { effort: options.reasoningEffort };
|
||||
}
|
||||
|
||||
// Run the query
|
||||
const result = await thread.run(promptText, runOptions);
|
||||
|
||||
// Extract response text (from finalResponse property)
|
||||
const outputText = result.finalResponse ?? DEFAULT_RESPONSE_TEXT;
|
||||
|
||||
// Get thread ID (may be null if not populated yet)
|
||||
const threadId = thread.id ?? undefined;
|
||||
|
||||
// Yield assistant message
|
||||
yield {
|
||||
type: 'assistant',
|
||||
session_id: threadId,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: outputText }],
|
||||
},
|
||||
};
|
||||
|
||||
// Yield result
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
session_id: threadId,
|
||||
result: outputText,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorInfo = classifyError(error);
|
||||
const userMessage = getUserFriendlyErrorMessage(error);
|
||||
const combinedMessage = buildSdkErrorMessage(errorInfo.message, userMessage);
|
||||
console.error('[CodexSDK] executeQuery() error during execution:', {
|
||||
type: errorInfo.type,
|
||||
message: errorInfo.message,
|
||||
isRateLimit: errorInfo.isRateLimit,
|
||||
retryAfter: errorInfo.retryAfter,
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
});
|
||||
yield { type: 'error', error: combinedMessage };
|
||||
}
|
||||
}
|
||||
436
apps/server/src/providers/codex-tool-mapping.ts
Normal file
436
apps/server/src/providers/codex-tool-mapping.ts
Normal file
@@ -0,0 +1,436 @@
|
||||
export type CodexToolResolution = {
|
||||
name: string;
|
||||
input: Record<string, unknown>;
|
||||
};
|
||||
|
||||
export type CodexTodoItem = {
|
||||
content: string;
|
||||
status: 'pending' | 'in_progress' | 'completed';
|
||||
activeForm?: string;
|
||||
};
|
||||
|
||||
const TOOL_NAME_BASH = 'Bash';
|
||||
const TOOL_NAME_READ = 'Read';
|
||||
const TOOL_NAME_EDIT = 'Edit';
|
||||
const TOOL_NAME_WRITE = 'Write';
|
||||
const TOOL_NAME_GREP = 'Grep';
|
||||
const TOOL_NAME_GLOB = 'Glob';
|
||||
const TOOL_NAME_TODO = 'TodoWrite';
|
||||
const TOOL_NAME_DELETE = 'Delete';
|
||||
const TOOL_NAME_LS = 'Ls';
|
||||
|
||||
const INPUT_KEY_COMMAND = 'command';
|
||||
const INPUT_KEY_FILE_PATH = 'file_path';
|
||||
const INPUT_KEY_PATTERN = 'pattern';
|
||||
|
||||
const SHELL_WRAPPER_PATTERNS = [
|
||||
/^\/bin\/bash\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^bash\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^\/bin\/sh\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^sh\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^cmd\.exe\s+\/c\s+["']?([\s\S]+)["']?$/i,
|
||||
/^powershell(?:\.exe)?\s+-Command\s+["']?([\s\S]+)["']?$/i,
|
||||
/^pwsh(?:\.exe)?\s+-Command\s+["']?([\s\S]+)["']?$/i,
|
||||
] as const;
|
||||
|
||||
const COMMAND_SEPARATOR_PATTERN = /\s*(?:&&|\|\||;)\s*/;
|
||||
const SEGMENT_SKIP_PREFIXES = ['cd ', 'export ', 'set ', 'pushd '] as const;
|
||||
const WRAPPER_COMMANDS = new Set(['sudo', 'env', 'command']);
|
||||
const READ_COMMANDS = new Set(['cat', 'sed', 'head', 'tail', 'less', 'more', 'bat', 'stat', 'wc']);
|
||||
const SEARCH_COMMANDS = new Set(['rg', 'grep', 'ag', 'ack']);
|
||||
const GLOB_COMMANDS = new Set(['ls', 'find', 'fd', 'tree']);
|
||||
const DELETE_COMMANDS = new Set(['rm', 'del', 'erase', 'remove', 'unlink']);
|
||||
const LIST_COMMANDS = new Set(['ls', 'dir', 'll', 'la']);
|
||||
const WRITE_COMMANDS = new Set(['tee', 'touch', 'mkdir']);
|
||||
const APPLY_PATCH_COMMAND = 'apply_patch';
|
||||
const APPLY_PATCH_PATTERN = /\bapply_patch\b/;
|
||||
const REDIRECTION_TARGET_PATTERN = /(?:>>|>)\s*([^\s]+)/;
|
||||
const SED_IN_PLACE_FLAGS = new Set(['-i', '--in-place']);
|
||||
const PERL_IN_PLACE_FLAG = /-.*i/;
|
||||
const SEARCH_PATTERN_FLAGS = new Set(['-e', '--regexp']);
|
||||
const SEARCH_VALUE_FLAGS = new Set([
|
||||
'-g',
|
||||
'--glob',
|
||||
'--iglob',
|
||||
'--type',
|
||||
'--type-add',
|
||||
'--type-clear',
|
||||
'--encoding',
|
||||
]);
|
||||
const SEARCH_FILE_LIST_FLAGS = new Set(['--files']);
|
||||
const TODO_LINE_PATTERN = /^[-*]\s*(?:\[(?<status>[ x~])\]\s*)?(?<content>.+)$/;
|
||||
const TODO_STATUS_COMPLETED = 'completed';
|
||||
const TODO_STATUS_IN_PROGRESS = 'in_progress';
|
||||
const TODO_STATUS_PENDING = 'pending';
|
||||
const PATCH_FILE_MARKERS = [
|
||||
'*** Update File: ',
|
||||
'*** Add File: ',
|
||||
'*** Delete File: ',
|
||||
'*** Move to: ',
|
||||
] as const;
|
||||
|
||||
function stripShellWrapper(command: string): string {
|
||||
const trimmed = command.trim();
|
||||
for (const pattern of SHELL_WRAPPER_PATTERNS) {
|
||||
const match = trimmed.match(pattern);
|
||||
if (match && match[1]) {
|
||||
return unescapeCommand(match[1].trim());
|
||||
}
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function unescapeCommand(command: string): string {
|
||||
return command.replace(/\\(["'])/g, '$1');
|
||||
}
|
||||
|
||||
function extractPrimarySegment(command: string): string {
|
||||
const segments = command
|
||||
.split(COMMAND_SEPARATOR_PATTERN)
|
||||
.map((segment) => segment.trim())
|
||||
.filter(Boolean);
|
||||
|
||||
for (const segment of segments) {
|
||||
const shouldSkip = SEGMENT_SKIP_PREFIXES.some((prefix) => segment.startsWith(prefix));
|
||||
if (!shouldSkip) {
|
||||
return segment;
|
||||
}
|
||||
}
|
||||
|
||||
return command.trim();
|
||||
}
|
||||
|
||||
function tokenizeCommand(command: string): string[] {
|
||||
const tokens: string[] = [];
|
||||
let current = '';
|
||||
let inSingleQuote = false;
|
||||
let inDoubleQuote = false;
|
||||
let isEscaped = false;
|
||||
|
||||
for (const char of command) {
|
||||
if (isEscaped) {
|
||||
current += char;
|
||||
isEscaped = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '\\') {
|
||||
isEscaped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === "'" && !inDoubleQuote) {
|
||||
inSingleQuote = !inSingleQuote;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"' && !inSingleQuote) {
|
||||
inDoubleQuote = !inDoubleQuote;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!inSingleQuote && !inDoubleQuote && /\s/.test(char)) {
|
||||
if (current) {
|
||||
tokens.push(current);
|
||||
current = '';
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
current += char;
|
||||
}
|
||||
|
||||
if (current) {
|
||||
tokens.push(current);
|
||||
}
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
function stripWrapperTokens(tokens: string[]): string[] {
|
||||
let index = 0;
|
||||
while (index < tokens.length && WRAPPER_COMMANDS.has(tokens[index].toLowerCase())) {
|
||||
index += 1;
|
||||
}
|
||||
return tokens.slice(index);
|
||||
}
|
||||
|
||||
function extractFilePathFromTokens(tokens: string[]): string | null {
|
||||
const candidates = tokens.slice(1).filter((token) => token && !token.startsWith('-'));
|
||||
if (candidates.length === 0) return null;
|
||||
return candidates[candidates.length - 1];
|
||||
}
|
||||
|
||||
function extractSearchPattern(tokens: string[]): string | null {
|
||||
const remaining = tokens.slice(1);
|
||||
|
||||
for (let index = 0; index < remaining.length; index += 1) {
|
||||
const token = remaining[index];
|
||||
if (token === '--') {
|
||||
return remaining[index + 1] ?? null;
|
||||
}
|
||||
if (SEARCH_PATTERN_FLAGS.has(token)) {
|
||||
return remaining[index + 1] ?? null;
|
||||
}
|
||||
if (SEARCH_VALUE_FLAGS.has(token)) {
|
||||
index += 1;
|
||||
continue;
|
||||
}
|
||||
if (token.startsWith('-')) {
|
||||
continue;
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function extractTeeTarget(tokens: string[]): string | null {
|
||||
const teeIndex = tokens.findIndex((token) => token === 'tee');
|
||||
if (teeIndex < 0) return null;
|
||||
const candidate = tokens[teeIndex + 1];
|
||||
return candidate && !candidate.startsWith('-') ? candidate : null;
|
||||
}
|
||||
|
||||
function extractRedirectionTarget(command: string): string | null {
|
||||
const match = command.match(REDIRECTION_TARGET_PATTERN);
|
||||
return match?.[1] ?? null;
|
||||
}
|
||||
|
||||
function extractFilePathFromDeleteTokens(tokens: string[]): string | null {
|
||||
// rm file.txt or rm /path/to/file.txt
|
||||
// Skip flags and get the first non-flag argument
|
||||
for (let i = 1; i < tokens.length; i++) {
|
||||
const token = tokens[i];
|
||||
if (token && !token.startsWith('-')) {
|
||||
return token;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function hasSedInPlaceFlag(tokens: string[]): boolean {
|
||||
return tokens.some((token) => SED_IN_PLACE_FLAGS.has(token) || token.startsWith('-i'));
|
||||
}
|
||||
|
||||
function hasPerlInPlaceFlag(tokens: string[]): boolean {
|
||||
return tokens.some((token) => PERL_IN_PLACE_FLAG.test(token));
|
||||
}
|
||||
|
||||
function extractPatchFilePath(command: string): string | null {
|
||||
for (const marker of PATCH_FILE_MARKERS) {
|
||||
const index = command.indexOf(marker);
|
||||
if (index < 0) continue;
|
||||
const start = index + marker.length;
|
||||
const end = command.indexOf('\n', start);
|
||||
const rawPath = (end === -1 ? command.slice(start) : command.slice(start, end)).trim();
|
||||
if (rawPath) return rawPath;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function buildInputWithFilePath(filePath: string | null): Record<string, unknown> {
|
||||
return filePath ? { [INPUT_KEY_FILE_PATH]: filePath } : {};
|
||||
}
|
||||
|
||||
function buildInputWithPattern(pattern: string | null): Record<string, unknown> {
|
||||
return pattern ? { [INPUT_KEY_PATTERN]: pattern } : {};
|
||||
}
|
||||
|
||||
export function resolveCodexToolCall(command: string): CodexToolResolution {
|
||||
const normalized = stripShellWrapper(command);
|
||||
const primarySegment = extractPrimarySegment(normalized);
|
||||
const tokens = stripWrapperTokens(tokenizeCommand(primarySegment));
|
||||
const commandToken = tokens[0]?.toLowerCase() ?? '';
|
||||
|
||||
const redirectionTarget = extractRedirectionTarget(primarySegment);
|
||||
if (redirectionTarget) {
|
||||
return {
|
||||
name: TOOL_NAME_WRITE,
|
||||
input: buildInputWithFilePath(redirectionTarget),
|
||||
};
|
||||
}
|
||||
|
||||
if (commandToken === APPLY_PATCH_COMMAND || APPLY_PATCH_PATTERN.test(primarySegment)) {
|
||||
return {
|
||||
name: TOOL_NAME_EDIT,
|
||||
input: buildInputWithFilePath(extractPatchFilePath(primarySegment)),
|
||||
};
|
||||
}
|
||||
|
||||
if (commandToken === 'sed' && hasSedInPlaceFlag(tokens)) {
|
||||
return {
|
||||
name: TOOL_NAME_EDIT,
|
||||
input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
if (commandToken === 'perl' && hasPerlInPlaceFlag(tokens)) {
|
||||
return {
|
||||
name: TOOL_NAME_EDIT,
|
||||
input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
if (WRITE_COMMANDS.has(commandToken)) {
|
||||
const filePath =
|
||||
commandToken === 'tee' ? extractTeeTarget(tokens) : extractFilePathFromTokens(tokens);
|
||||
return {
|
||||
name: TOOL_NAME_WRITE,
|
||||
input: buildInputWithFilePath(filePath),
|
||||
};
|
||||
}
|
||||
|
||||
if (SEARCH_COMMANDS.has(commandToken)) {
|
||||
if (tokens.some((token) => SEARCH_FILE_LIST_FLAGS.has(token))) {
|
||||
return {
|
||||
name: TOOL_NAME_GLOB,
|
||||
input: buildInputWithPattern(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
name: TOOL_NAME_GREP,
|
||||
input: buildInputWithPattern(extractSearchPattern(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
// Handle Delete commands (rm, del, erase, remove, unlink)
|
||||
if (DELETE_COMMANDS.has(commandToken)) {
|
||||
// Skip if -r or -rf flags (recursive delete should go to Bash)
|
||||
if (
|
||||
tokens.some((token) => token === '-r' || token === '-rf' || token === '-f' || token === '-rf')
|
||||
) {
|
||||
return {
|
||||
name: TOOL_NAME_BASH,
|
||||
input: { [INPUT_KEY_COMMAND]: normalized },
|
||||
};
|
||||
}
|
||||
// Simple file deletion - extract the file path
|
||||
const filePath = extractFilePathFromDeleteTokens(tokens);
|
||||
if (filePath) {
|
||||
return {
|
||||
name: TOOL_NAME_DELETE,
|
||||
input: { path: filePath },
|
||||
};
|
||||
}
|
||||
// Fall back to bash if we can't determine the file path
|
||||
return {
|
||||
name: TOOL_NAME_BASH,
|
||||
input: { [INPUT_KEY_COMMAND]: normalized },
|
||||
};
|
||||
}
|
||||
|
||||
// Handle simple Ls commands (just listing, not find/glob)
|
||||
if (LIST_COMMANDS.has(commandToken)) {
|
||||
const filePath = extractFilePathFromTokens(tokens);
|
||||
return {
|
||||
name: TOOL_NAME_LS,
|
||||
input: { path: filePath || '.' },
|
||||
};
|
||||
}
|
||||
|
||||
if (GLOB_COMMANDS.has(commandToken)) {
|
||||
return {
|
||||
name: TOOL_NAME_GLOB,
|
||||
input: buildInputWithPattern(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
if (READ_COMMANDS.has(commandToken)) {
|
||||
return {
|
||||
name: TOOL_NAME_READ,
|
||||
input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
name: TOOL_NAME_BASH,
|
||||
input: { [INPUT_KEY_COMMAND]: normalized },
|
||||
};
|
||||
}
|
||||
|
||||
function parseTodoLines(lines: string[]): CodexTodoItem[] {
|
||||
const todos: CodexTodoItem[] = [];
|
||||
|
||||
for (const line of lines) {
|
||||
const match = line.match(TODO_LINE_PATTERN);
|
||||
if (!match?.groups?.content) continue;
|
||||
|
||||
const statusToken = match.groups.status;
|
||||
const status =
|
||||
statusToken === 'x'
|
||||
? TODO_STATUS_COMPLETED
|
||||
: statusToken === '~'
|
||||
? TODO_STATUS_IN_PROGRESS
|
||||
: TODO_STATUS_PENDING;
|
||||
|
||||
todos.push({ content: match.groups.content.trim(), status });
|
||||
}
|
||||
|
||||
return todos;
|
||||
}
|
||||
|
||||
function extractTodoFromArray(value: unknown[]): CodexTodoItem[] {
|
||||
return value
|
||||
.map((entry) => {
|
||||
if (typeof entry === 'string') {
|
||||
return { content: entry, status: TODO_STATUS_PENDING };
|
||||
}
|
||||
if (entry && typeof entry === 'object') {
|
||||
const record = entry as Record<string, unknown>;
|
||||
const content =
|
||||
typeof record.content === 'string'
|
||||
? record.content
|
||||
: typeof record.text === 'string'
|
||||
? record.text
|
||||
: typeof record.title === 'string'
|
||||
? record.title
|
||||
: null;
|
||||
if (!content) return null;
|
||||
const status =
|
||||
record.status === TODO_STATUS_COMPLETED ||
|
||||
record.status === TODO_STATUS_IN_PROGRESS ||
|
||||
record.status === TODO_STATUS_PENDING
|
||||
? (record.status as CodexTodoItem['status'])
|
||||
: TODO_STATUS_PENDING;
|
||||
const activeForm = typeof record.activeForm === 'string' ? record.activeForm : undefined;
|
||||
return { content, status, activeForm };
|
||||
}
|
||||
return null;
|
||||
})
|
||||
.filter((item): item is CodexTodoItem => Boolean(item));
|
||||
}
|
||||
|
||||
export function extractCodexTodoItems(item: Record<string, unknown>): CodexTodoItem[] | null {
|
||||
const todosValue = item.todos;
|
||||
if (Array.isArray(todosValue)) {
|
||||
const todos = extractTodoFromArray(todosValue);
|
||||
return todos.length > 0 ? todos : null;
|
||||
}
|
||||
|
||||
const itemsValue = item.items;
|
||||
if (Array.isArray(itemsValue)) {
|
||||
const todos = extractTodoFromArray(itemsValue);
|
||||
return todos.length > 0 ? todos : null;
|
||||
}
|
||||
|
||||
const textValue =
|
||||
typeof item.text === 'string'
|
||||
? item.text
|
||||
: typeof item.content === 'string'
|
||||
? item.content
|
||||
: null;
|
||||
if (!textValue) return null;
|
||||
|
||||
const lines = textValue
|
||||
.split('\n')
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean);
|
||||
const todos = parseTodoLines(lines);
|
||||
return todos.length > 0 ? todos : null;
|
||||
}
|
||||
|
||||
export function getCodexTodoToolName(): string {
|
||||
return TOOL_NAME_TODO;
|
||||
}
|
||||
@@ -28,7 +28,9 @@ import type {
|
||||
ModelDefinition,
|
||||
ContentBlock,
|
||||
} from './types.js';
|
||||
import { stripProviderPrefix } from '@automaker/types';
|
||||
import { validateBareModelId } from '@automaker/types';
|
||||
import { validateApiKey } from '../lib/auth-utils.js';
|
||||
import { getEffectivePermissions } from '../services/cursor-config-service.js';
|
||||
import {
|
||||
type CursorStreamEvent,
|
||||
type CursorSystemEvent,
|
||||
@@ -315,18 +317,25 @@ export class CursorProvider extends CliProvider {
|
||||
}
|
||||
|
||||
buildCliArgs(options: ExecuteOptions): string[] {
|
||||
// Extract model (strip 'cursor-' prefix if present)
|
||||
const model = stripProviderPrefix(options.model || 'auto');
|
||||
// Model is already bare (no prefix) - validated by executeQuery
|
||||
const model = options.model || 'auto';
|
||||
|
||||
// Build CLI arguments for cursor-agent
|
||||
// NOTE: Prompt is NOT included here - it's passed via stdin to avoid
|
||||
// shell escaping issues when content contains $(), backticks, etc.
|
||||
const cliArgs: string[] = [
|
||||
const cliArgs: string[] = [];
|
||||
|
||||
// If using Cursor IDE (cliPath is 'cursor' not 'cursor-agent'), add 'agent' subcommand
|
||||
if (this.cliPath && !this.cliPath.includes('cursor-agent')) {
|
||||
cliArgs.push('agent');
|
||||
}
|
||||
|
||||
cliArgs.push(
|
||||
'-p', // Print mode (non-interactive)
|
||||
'--output-format',
|
||||
'stream-json',
|
||||
'--stream-partial-output', // Real-time streaming
|
||||
];
|
||||
'--stream-partial-output' // Real-time streaming
|
||||
);
|
||||
|
||||
// Only add --force if NOT in read-only mode
|
||||
// Without --force, Cursor CLI suggests changes but doesn't apply them
|
||||
@@ -472,7 +481,9 @@ export class CursorProvider extends CliProvider {
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Override CLI detection to add Cursor-specific versions directory check
|
||||
* Override CLI detection to add Cursor-specific checks:
|
||||
* 1. Versions directory for cursor-agent installations
|
||||
* 2. Cursor IDE with 'cursor agent' subcommand support
|
||||
*/
|
||||
protected detectCli(): CliDetectionResult {
|
||||
// First try standard detection (PATH, common paths, WSL)
|
||||
@@ -507,6 +518,39 @@ export class CursorProvider extends CliProvider {
|
||||
}
|
||||
}
|
||||
|
||||
// If cursor-agent not found, try to find 'cursor' IDE and use 'cursor agent' subcommand
|
||||
// The Cursor IDE includes the agent as a subcommand: cursor agent
|
||||
if (process.platform !== 'win32') {
|
||||
const cursorPaths = [
|
||||
'/usr/bin/cursor',
|
||||
'/usr/local/bin/cursor',
|
||||
path.join(os.homedir(), '.local/bin/cursor'),
|
||||
'/opt/cursor/cursor',
|
||||
];
|
||||
|
||||
for (const cursorPath of cursorPaths) {
|
||||
if (fs.existsSync(cursorPath)) {
|
||||
// Verify cursor agent subcommand works
|
||||
try {
|
||||
execSync(`"${cursorPath}" agent --version`, {
|
||||
encoding: 'utf8',
|
||||
timeout: 5000,
|
||||
stdio: 'pipe',
|
||||
});
|
||||
logger.debug(`Using cursor agent via Cursor IDE: ${cursorPath}`);
|
||||
// Return cursor path but we'll use 'cursor agent' subcommand
|
||||
return {
|
||||
cliPath: cursorPath,
|
||||
useWsl: false,
|
||||
strategy: 'native',
|
||||
};
|
||||
} catch {
|
||||
// cursor agent subcommand doesn't work, try next path
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -605,6 +649,10 @@ export class CursorProvider extends CliProvider {
|
||||
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||
this.ensureCliDetected();
|
||||
|
||||
// Validate that model doesn't have a provider prefix
|
||||
// AgentService should strip prefixes before passing to providers
|
||||
validateBareModelId(options.model, 'CursorProvider');
|
||||
|
||||
if (!this.cliPath) {
|
||||
throw this.createError(
|
||||
CursorErrorCode.NOT_INSTALLED,
|
||||
@@ -642,6 +690,9 @@ export class CursorProvider extends CliProvider {
|
||||
|
||||
logger.debug(`CursorProvider.executeQuery called with model: "${options.model}"`);
|
||||
|
||||
// Get effective permissions for this project
|
||||
const effectivePermissions = await getEffectivePermissions(options.cwd || process.cwd());
|
||||
|
||||
// Debug: log raw events when AUTOMAKER_DEBUG_RAW_OUTPUT is enabled
|
||||
const debugRawEvents =
|
||||
process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === 'true' ||
|
||||
@@ -838,9 +889,16 @@ export class CursorProvider extends CliProvider {
|
||||
});
|
||||
return result;
|
||||
}
|
||||
const result = execSync(`"${this.cliPath}" --version`, {
|
||||
|
||||
// If using Cursor IDE, use 'cursor agent --version'
|
||||
const versionCmd = this.cliPath.includes('cursor-agent')
|
||||
? `"${this.cliPath}" --version`
|
||||
: `"${this.cliPath}" agent --version`;
|
||||
|
||||
const result = execSync(versionCmd, {
|
||||
encoding: 'utf8',
|
||||
timeout: 5000,
|
||||
stdio: 'pipe',
|
||||
}).trim();
|
||||
return result;
|
||||
} catch {
|
||||
@@ -857,8 +915,13 @@ export class CursorProvider extends CliProvider {
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
// Check for API key in environment
|
||||
// Check for API key in environment with validation
|
||||
if (process.env.CURSOR_API_KEY) {
|
||||
const validation = validateApiKey(process.env.CURSOR_API_KEY, 'cursor');
|
||||
if (!validation.isValid) {
|
||||
logger.warn('Cursor API key validation failed:', validation.error);
|
||||
return { authenticated: false, method: 'api_key', error: validation.error };
|
||||
}
|
||||
return { authenticated: true, method: 'api_key' };
|
||||
}
|
||||
|
||||
|
||||
@@ -25,5 +25,8 @@ export { ClaudeProvider } from './claude-provider.js';
|
||||
export { CursorProvider, CursorErrorCode, CursorError } from './cursor-provider.js';
|
||||
export { CursorConfigManager } from './cursor-config-manager.js';
|
||||
|
||||
// OpenCode provider
|
||||
export { OpencodeProvider } from './opencode-provider.js';
|
||||
|
||||
// Provider factory
|
||||
export { ProviderFactory } from './provider-factory.js';
|
||||
|
||||
666
apps/server/src/providers/opencode-provider.ts
Normal file
666
apps/server/src/providers/opencode-provider.ts
Normal file
@@ -0,0 +1,666 @@
|
||||
/**
|
||||
* OpenCode Provider - Executes queries using opencode CLI
|
||||
*
|
||||
* Extends CliProvider with OpenCode-specific configuration:
|
||||
* - Event normalization for OpenCode's stream-json format
|
||||
* - Model definitions for anthropic, openai, and google models
|
||||
* - NPX-based Windows execution strategy
|
||||
* - Platform-specific npm global installation paths
|
||||
*
|
||||
* Spawns the opencode CLI with --output-format stream-json for streaming responses.
|
||||
*/
|
||||
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { CliProvider, type CliSpawnConfig } from './cli-provider.js';
|
||||
import type {
|
||||
ProviderConfig,
|
||||
ExecuteOptions,
|
||||
ProviderMessage,
|
||||
ModelDefinition,
|
||||
InstallationStatus,
|
||||
ContentBlock,
|
||||
} from '@automaker/types';
|
||||
import { stripProviderPrefix } from '@automaker/types';
|
||||
import { type SubprocessOptions, getOpenCodeAuthIndicators } from '@automaker/platform';
|
||||
|
||||
// =============================================================================
|
||||
// OpenCode Auth Types
|
||||
// =============================================================================
|
||||
|
||||
export interface OpenCodeAuthStatus {
|
||||
authenticated: boolean;
|
||||
method: 'api_key' | 'oauth' | 'none';
|
||||
hasOAuthToken?: boolean;
|
||||
hasApiKey?: boolean;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// OpenCode Stream Event Types
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Base interface for all OpenCode stream events
|
||||
*/
|
||||
interface OpenCodeBaseEvent {
|
||||
/** Event type identifier */
|
||||
type: string;
|
||||
/** Optional session identifier */
|
||||
session_id?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Text delta event - Incremental text output from the model
|
||||
*/
|
||||
export interface OpenCodeTextDeltaEvent extends OpenCodeBaseEvent {
|
||||
type: 'text-delta';
|
||||
/** The incremental text content */
|
||||
text: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Text end event - Signals completion of text generation
|
||||
*/
|
||||
export interface OpenCodeTextEndEvent extends OpenCodeBaseEvent {
|
||||
type: 'text-end';
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool call event - Request to execute a tool
|
||||
*/
|
||||
export interface OpenCodeToolCallEvent extends OpenCodeBaseEvent {
|
||||
type: 'tool-call';
|
||||
/** Unique identifier for this tool call */
|
||||
call_id?: string;
|
||||
/** Tool name to invoke */
|
||||
name: string;
|
||||
/** Arguments to pass to the tool */
|
||||
args: unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool result event - Output from a tool execution
|
||||
*/
|
||||
export interface OpenCodeToolResultEvent extends OpenCodeBaseEvent {
|
||||
type: 'tool-result';
|
||||
/** The tool call ID this result corresponds to */
|
||||
call_id?: string;
|
||||
/** Output from the tool execution */
|
||||
output: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool error event - Tool execution failed
|
||||
*/
|
||||
export interface OpenCodeToolErrorEvent extends OpenCodeBaseEvent {
|
||||
type: 'tool-error';
|
||||
/** The tool call ID that failed */
|
||||
call_id?: string;
|
||||
/** Error message describing the failure */
|
||||
error: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start step event - Begins an agentic loop iteration
|
||||
*/
|
||||
export interface OpenCodeStartStepEvent extends OpenCodeBaseEvent {
|
||||
type: 'start-step';
|
||||
/** Step number in the agentic loop */
|
||||
step?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finish step event - Completes an agentic loop iteration
|
||||
*/
|
||||
export interface OpenCodeFinishStepEvent extends OpenCodeBaseEvent {
|
||||
type: 'finish-step';
|
||||
/** Step number that completed */
|
||||
step?: number;
|
||||
/** Whether the step completed successfully */
|
||||
success?: boolean;
|
||||
/** Optional result data */
|
||||
result?: string;
|
||||
/** Optional error if step failed */
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Union type of all OpenCode stream events
|
||||
*/
|
||||
export type OpenCodeStreamEvent =
|
||||
| OpenCodeTextDeltaEvent
|
||||
| OpenCodeTextEndEvent
|
||||
| OpenCodeToolCallEvent
|
||||
| OpenCodeToolResultEvent
|
||||
| OpenCodeToolErrorEvent
|
||||
| OpenCodeStartStepEvent
|
||||
| OpenCodeFinishStepEvent;
|
||||
|
||||
// =============================================================================
|
||||
// Tool Use ID Generation
|
||||
// =============================================================================
|
||||
|
||||
/** Counter for generating unique tool use IDs when call_id is not provided */
|
||||
let toolUseIdCounter = 0;
|
||||
|
||||
/**
|
||||
* Generate a unique tool use ID for tool calls without explicit IDs
|
||||
*/
|
||||
function generateToolUseId(): string {
|
||||
toolUseIdCounter += 1;
|
||||
return `opencode-tool-${toolUseIdCounter}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the tool use ID counter (useful for testing)
|
||||
*/
|
||||
export function resetToolUseIdCounter(): void {
|
||||
toolUseIdCounter = 0;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Provider Implementation
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* OpencodeProvider - Integrates opencode CLI as an AI provider
|
||||
*
|
||||
* OpenCode is an npm-distributed CLI tool that provides access to
|
||||
* multiple AI model providers through a unified interface.
|
||||
*/
|
||||
export class OpencodeProvider extends CliProvider {
|
||||
constructor(config: ProviderConfig = {}) {
|
||||
super(config);
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
// CliProvider Abstract Method Implementations
|
||||
// ==========================================================================
|
||||
|
||||
getName(): string {
|
||||
return 'opencode';
|
||||
}
|
||||
|
||||
getCliName(): string {
|
||||
return 'opencode';
|
||||
}
|
||||
|
||||
getSpawnConfig(): CliSpawnConfig {
|
||||
return {
|
||||
windowsStrategy: 'npx',
|
||||
npxPackage: 'opencode-ai@latest',
|
||||
commonPaths: {
|
||||
linux: [
|
||||
path.join(os.homedir(), '.opencode/bin/opencode'),
|
||||
path.join(os.homedir(), '.npm-global/bin/opencode'),
|
||||
'/usr/local/bin/opencode',
|
||||
'/usr/bin/opencode',
|
||||
path.join(os.homedir(), '.local/bin/opencode'),
|
||||
],
|
||||
darwin: [
|
||||
path.join(os.homedir(), '.opencode/bin/opencode'),
|
||||
path.join(os.homedir(), '.npm-global/bin/opencode'),
|
||||
'/usr/local/bin/opencode',
|
||||
'/opt/homebrew/bin/opencode',
|
||||
path.join(os.homedir(), '.local/bin/opencode'),
|
||||
],
|
||||
win32: [
|
||||
path.join(os.homedir(), '.opencode', 'bin', 'opencode.exe'),
|
||||
path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'opencode.cmd'),
|
||||
path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'opencode'),
|
||||
path.join(process.env.APPDATA || '', 'npm', 'opencode.cmd'),
|
||||
],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build CLI arguments for the `opencode run` command
|
||||
*
|
||||
* Arguments built:
|
||||
* - 'run' subcommand for executing queries
|
||||
* - '--format', 'stream-json' for JSONL streaming output
|
||||
* - '-q' / '--quiet' to suppress spinner and interactive elements
|
||||
* - '-c', '<cwd>' for working directory
|
||||
* - '--model', '<model>' for model selection (if specified)
|
||||
* - '-' as final arg to read prompt from stdin
|
||||
*
|
||||
* The prompt is NOT included in CLI args - it's passed via stdin to avoid
|
||||
* shell escaping issues with special characters in content.
|
||||
*
|
||||
* @param options - Execution options containing model, cwd, etc.
|
||||
* @returns Array of CLI arguments for opencode run
|
||||
*/
|
||||
buildCliArgs(options: ExecuteOptions): string[] {
|
||||
const args: string[] = ['run'];
|
||||
|
||||
// Add streaming JSON output format for JSONL parsing
|
||||
args.push('--format', 'stream-json');
|
||||
|
||||
// Suppress spinner and interactive elements for non-TTY usage
|
||||
args.push('-q');
|
||||
|
||||
// Set working directory
|
||||
if (options.cwd) {
|
||||
args.push('-c', options.cwd);
|
||||
}
|
||||
|
||||
// Handle model selection
|
||||
// Strip 'opencode-' prefix if present, OpenCode uses format like 'anthropic/claude-sonnet-4-5'
|
||||
if (options.model) {
|
||||
const model = stripProviderPrefix(options.model);
|
||||
args.push('--model', model);
|
||||
}
|
||||
|
||||
// Use '-' to indicate reading prompt from stdin
|
||||
// This avoids shell escaping issues with special characters
|
||||
args.push('-');
|
||||
|
||||
return args;
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
// Prompt Handling
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Extract prompt text from ExecuteOptions for passing via stdin
|
||||
*
|
||||
* Handles both string prompts and array-based prompts with content blocks.
|
||||
* For array prompts with images, extracts only text content (images would
|
||||
* need separate handling via file paths if OpenCode supports them).
|
||||
*
|
||||
* @param options - Execution options containing the prompt
|
||||
* @returns Plain text prompt string
|
||||
*/
|
||||
private extractPromptText(options: ExecuteOptions): string {
|
||||
if (typeof options.prompt === 'string') {
|
||||
return options.prompt;
|
||||
}
|
||||
|
||||
// Array-based prompt - extract text content
|
||||
if (Array.isArray(options.prompt)) {
|
||||
return options.prompt
|
||||
.filter((block) => block.type === 'text' && block.text)
|
||||
.map((block) => block.text)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
throw new Error('Invalid prompt format: expected string or content block array');
|
||||
}
|
||||
|
||||
/**
|
||||
* Build subprocess options with stdin data for prompt
|
||||
*
|
||||
* Extends the base class method to add stdinData containing the prompt.
|
||||
* This allows passing prompts via stdin instead of CLI arguments,
|
||||
* avoiding shell escaping issues with special characters.
|
||||
*
|
||||
* @param options - Execution options
|
||||
* @param cliArgs - CLI arguments from buildCliArgs
|
||||
* @returns SubprocessOptions with stdinData set
|
||||
*/
|
||||
protected buildSubprocessOptions(options: ExecuteOptions, cliArgs: string[]): SubprocessOptions {
|
||||
const subprocessOptions = super.buildSubprocessOptions(options, cliArgs);
|
||||
|
||||
// Pass prompt via stdin to avoid shell interpretation of special characters
|
||||
// like $(), backticks, quotes, etc. that may appear in prompts or file content
|
||||
subprocessOptions.stdinData = this.extractPromptText(options);
|
||||
|
||||
return subprocessOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize a raw CLI event to ProviderMessage format
|
||||
*
|
||||
* Maps OpenCode event types to the standard ProviderMessage structure:
|
||||
* - text-delta -> type: 'assistant', content with type: 'text'
|
||||
* - text-end -> null (informational, no message needed)
|
||||
* - tool-call -> type: 'assistant', content with type: 'tool_use'
|
||||
* - tool-result -> type: 'assistant', content with type: 'tool_result'
|
||||
* - tool-error -> type: 'error'
|
||||
* - start-step -> null (informational, no message needed)
|
||||
* - finish-step with success -> type: 'result', subtype: 'success'
|
||||
* - finish-step with error -> type: 'error'
|
||||
*
|
||||
* @param event - Raw event from OpenCode CLI JSONL output
|
||||
* @returns Normalized ProviderMessage or null to skip the event
|
||||
*/
|
||||
normalizeEvent(event: unknown): ProviderMessage | null {
|
||||
if (!event || typeof event !== 'object') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const openCodeEvent = event as OpenCodeStreamEvent;
|
||||
|
||||
switch (openCodeEvent.type) {
|
||||
case 'text-delta': {
|
||||
const textEvent = openCodeEvent as OpenCodeTextDeltaEvent;
|
||||
|
||||
// Skip empty text deltas
|
||||
if (!textEvent.text) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const content: ContentBlock[] = [
|
||||
{
|
||||
type: 'text',
|
||||
text: textEvent.text,
|
||||
},
|
||||
];
|
||||
|
||||
return {
|
||||
type: 'assistant',
|
||||
session_id: textEvent.session_id,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
case 'text-end': {
|
||||
// Text end is informational - no message needed
|
||||
return null;
|
||||
}
|
||||
|
||||
case 'tool-call': {
|
||||
const toolEvent = openCodeEvent as OpenCodeToolCallEvent;
|
||||
|
||||
// Generate a tool use ID if not provided
|
||||
const toolUseId = toolEvent.call_id || generateToolUseId();
|
||||
|
||||
const content: ContentBlock[] = [
|
||||
{
|
||||
type: 'tool_use',
|
||||
name: toolEvent.name,
|
||||
tool_use_id: toolUseId,
|
||||
input: toolEvent.args,
|
||||
},
|
||||
];
|
||||
|
||||
return {
|
||||
type: 'assistant',
|
||||
session_id: toolEvent.session_id,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
case 'tool-result': {
|
||||
const resultEvent = openCodeEvent as OpenCodeToolResultEvent;
|
||||
|
||||
const content: ContentBlock[] = [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: resultEvent.call_id,
|
||||
content: resultEvent.output,
|
||||
},
|
||||
];
|
||||
|
||||
return {
|
||||
type: 'assistant',
|
||||
session_id: resultEvent.session_id,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
case 'tool-error': {
|
||||
const errorEvent = openCodeEvent as OpenCodeToolErrorEvent;
|
||||
|
||||
return {
|
||||
type: 'error',
|
||||
session_id: errorEvent.session_id,
|
||||
error: errorEvent.error || 'Tool execution failed',
|
||||
};
|
||||
}
|
||||
|
||||
case 'start-step': {
|
||||
// Start step is informational - no message needed
|
||||
return null;
|
||||
}
|
||||
|
||||
case 'finish-step': {
|
||||
const finishEvent = openCodeEvent as OpenCodeFinishStepEvent;
|
||||
|
||||
// Check if the step failed
|
||||
if (finishEvent.success === false || finishEvent.error) {
|
||||
return {
|
||||
type: 'error',
|
||||
session_id: finishEvent.session_id,
|
||||
error: finishEvent.error || 'Step execution failed',
|
||||
};
|
||||
}
|
||||
|
||||
// Successful completion
|
||||
return {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
session_id: finishEvent.session_id,
|
||||
result: finishEvent.result,
|
||||
};
|
||||
}
|
||||
|
||||
default: {
|
||||
// Unknown event type - skip it
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
// Model Configuration
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Get available models for OpenCode
|
||||
*
|
||||
* Returns model definitions for supported AI providers:
|
||||
* - Anthropic Claude models (Sonnet, Opus, Haiku)
|
||||
* - OpenAI GPT-4o
|
||||
* - Google Gemini 2.5 Pro
|
||||
*/
|
||||
getAvailableModels(): ModelDefinition[] {
|
||||
return [
|
||||
// OpenCode Free Tier Models
|
||||
{
|
||||
id: 'opencode/big-pickle',
|
||||
name: 'Big Pickle (Free)',
|
||||
modelString: 'opencode/big-pickle',
|
||||
provider: 'opencode',
|
||||
description: 'OpenCode free tier model - great for general coding',
|
||||
supportsTools: true,
|
||||
supportsVision: false,
|
||||
tier: 'basic',
|
||||
},
|
||||
{
|
||||
id: 'opencode/gpt-5-nano',
|
||||
name: 'GPT-5 Nano (Free)',
|
||||
modelString: 'opencode/gpt-5-nano',
|
||||
provider: 'opencode',
|
||||
description: 'Fast and lightweight free tier model',
|
||||
supportsTools: true,
|
||||
supportsVision: false,
|
||||
tier: 'basic',
|
||||
},
|
||||
{
|
||||
id: 'opencode/grok-code',
|
||||
name: 'Grok Code (Free)',
|
||||
modelString: 'opencode/grok-code',
|
||||
provider: 'opencode',
|
||||
description: 'OpenCode free tier Grok model for coding',
|
||||
supportsTools: true,
|
||||
supportsVision: false,
|
||||
tier: 'basic',
|
||||
},
|
||||
// Amazon Bedrock - Claude Models
|
||||
{
|
||||
id: 'amazon-bedrock/anthropic.claude-sonnet-4-5-20250929-v1:0',
|
||||
name: 'Claude Sonnet 4.5 (Bedrock)',
|
||||
modelString: 'amazon-bedrock/anthropic.claude-sonnet-4-5-20250929-v1:0',
|
||||
provider: 'opencode',
|
||||
description: 'Latest Claude Sonnet via AWS Bedrock - fast and intelligent',
|
||||
supportsTools: true,
|
||||
supportsVision: true,
|
||||
tier: 'premium',
|
||||
default: true,
|
||||
},
|
||||
{
|
||||
id: 'amazon-bedrock/anthropic.claude-opus-4-5-20251101-v1:0',
|
||||
name: 'Claude Opus 4.5 (Bedrock)',
|
||||
modelString: 'amazon-bedrock/anthropic.claude-opus-4-5-20251101-v1:0',
|
||||
provider: 'opencode',
|
||||
description: 'Most capable Claude model via AWS Bedrock',
|
||||
supportsTools: true,
|
||||
supportsVision: true,
|
||||
tier: 'premium',
|
||||
},
|
||||
{
|
||||
id: 'amazon-bedrock/anthropic.claude-haiku-4-5-20251001-v1:0',
|
||||
name: 'Claude Haiku 4.5 (Bedrock)',
|
||||
modelString: 'amazon-bedrock/anthropic.claude-haiku-4-5-20251001-v1:0',
|
||||
provider: 'opencode',
|
||||
description: 'Fastest Claude model via AWS Bedrock',
|
||||
supportsTools: true,
|
||||
supportsVision: true,
|
||||
tier: 'standard',
|
||||
},
|
||||
// Amazon Bedrock - DeepSeek Models
|
||||
{
|
||||
id: 'amazon-bedrock/deepseek.r1-v1:0',
|
||||
name: 'DeepSeek R1 (Bedrock)',
|
||||
modelString: 'amazon-bedrock/deepseek.r1-v1:0',
|
||||
provider: 'opencode',
|
||||
description: 'DeepSeek R1 reasoning model - excellent for coding',
|
||||
supportsTools: true,
|
||||
supportsVision: false,
|
||||
tier: 'premium',
|
||||
},
|
||||
// Amazon Bedrock - Amazon Nova Models
|
||||
{
|
||||
id: 'amazon-bedrock/amazon.nova-pro-v1:0',
|
||||
name: 'Amazon Nova Pro (Bedrock)',
|
||||
modelString: 'amazon-bedrock/amazon.nova-pro-v1:0',
|
||||
provider: 'opencode',
|
||||
description: 'Amazon Nova Pro - balanced performance',
|
||||
supportsTools: true,
|
||||
supportsVision: true,
|
||||
tier: 'standard',
|
||||
},
|
||||
// Amazon Bedrock - Meta Llama Models
|
||||
{
|
||||
id: 'amazon-bedrock/meta.llama4-maverick-17b-instruct-v1:0',
|
||||
name: 'Llama 4 Maverick 17B (Bedrock)',
|
||||
modelString: 'amazon-bedrock/meta.llama4-maverick-17b-instruct-v1:0',
|
||||
provider: 'opencode',
|
||||
description: 'Meta Llama 4 Maverick via AWS Bedrock',
|
||||
supportsTools: true,
|
||||
supportsVision: false,
|
||||
tier: 'standard',
|
||||
},
|
||||
// Amazon Bedrock - Qwen Models
|
||||
{
|
||||
id: 'amazon-bedrock/qwen.qwen3-coder-480b-a35b-v1:0',
|
||||
name: 'Qwen3 Coder 480B (Bedrock)',
|
||||
modelString: 'amazon-bedrock/qwen.qwen3-coder-480b-a35b-v1:0',
|
||||
provider: 'opencode',
|
||||
description: 'Qwen3 Coder 480B - excellent for coding',
|
||||
supportsTools: true,
|
||||
supportsVision: false,
|
||||
tier: 'premium',
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
// Feature Support
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Check if a feature is supported by OpenCode
|
||||
*
|
||||
* Supported features:
|
||||
* - tools: Function calling / tool use
|
||||
* - text: Text generation
|
||||
* - vision: Image understanding
|
||||
*/
|
||||
supportsFeature(feature: string): boolean {
|
||||
const supportedFeatures = ['tools', 'text', 'vision'];
|
||||
return supportedFeatures.includes(feature);
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
// Authentication
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Check authentication status for OpenCode CLI
|
||||
*
|
||||
* Checks for authentication via:
|
||||
* - OAuth token in auth file
|
||||
* - API key in auth file
|
||||
*/
|
||||
async checkAuth(): Promise<OpenCodeAuthStatus> {
|
||||
const authIndicators = await getOpenCodeAuthIndicators();
|
||||
|
||||
// Check for OAuth token
|
||||
if (authIndicators.hasOAuthToken) {
|
||||
return {
|
||||
authenticated: true,
|
||||
method: 'oauth',
|
||||
hasOAuthToken: true,
|
||||
hasApiKey: authIndicators.hasApiKey,
|
||||
};
|
||||
}
|
||||
|
||||
// Check for API key
|
||||
if (authIndicators.hasApiKey) {
|
||||
return {
|
||||
authenticated: true,
|
||||
method: 'api_key',
|
||||
hasOAuthToken: false,
|
||||
hasApiKey: true,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
hasOAuthToken: false,
|
||||
hasApiKey: false,
|
||||
};
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
// Installation Detection
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Detect OpenCode installation status
|
||||
*
|
||||
* Checks if the opencode CLI is available either through:
|
||||
* - Direct installation (npm global)
|
||||
* - NPX (fallback on Windows)
|
||||
* Also checks authentication status.
|
||||
*/
|
||||
async detectInstallation(): Promise<InstallationStatus> {
|
||||
this.ensureCliDetected();
|
||||
|
||||
const installed = await this.isInstalled();
|
||||
const auth = await this.checkAuth();
|
||||
|
||||
return {
|
||||
installed,
|
||||
path: this.cliPath || undefined,
|
||||
method: this.detectedStrategy === 'npx' ? 'npm' : 'cli',
|
||||
authenticated: auth.authenticated,
|
||||
hasApiKey: auth.hasApiKey,
|
||||
hasOAuthToken: auth.hasOAuthToken,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,27 @@
|
||||
|
||||
import { BaseProvider } from './base-provider.js';
|
||||
import type { InstallationStatus, ModelDefinition } from './types.js';
|
||||
import { isCursorModel, type ModelProvider } from '@automaker/types';
|
||||
import { isCursorModel, isCodexModel, isOpencodeModel, type ModelProvider } from '@automaker/types';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
const DISCONNECTED_MARKERS: Record<string, string> = {
|
||||
claude: '.claude-disconnected',
|
||||
codex: '.codex-disconnected',
|
||||
cursor: '.cursor-disconnected',
|
||||
opencode: '.opencode-disconnected',
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a provider CLI is disconnected from the app
|
||||
*/
|
||||
export function isProviderDisconnected(providerName: string): boolean {
|
||||
const markerFile = DISCONNECTED_MARKERS[providerName.toLowerCase()];
|
||||
if (!markerFile) return false;
|
||||
|
||||
const markerPath = path.join(process.cwd(), '.automaker', markerFile);
|
||||
return fs.existsSync(markerPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provider registration entry
|
||||
@@ -75,10 +95,26 @@ export class ProviderFactory {
|
||||
* Get the appropriate provider for a given model ID
|
||||
*
|
||||
* @param modelId Model identifier (e.g., "claude-opus-4-5-20251101", "cursor-gpt-4o", "cursor-auto")
|
||||
* @param options Optional settings
|
||||
* @param options.throwOnDisconnected Throw error if provider is disconnected (default: true)
|
||||
* @returns Provider instance for the model
|
||||
* @throws Error if provider is disconnected and throwOnDisconnected is true
|
||||
*/
|
||||
static getProviderForModel(modelId: string): BaseProvider {
|
||||
const providerName = this.getProviderNameForModel(modelId);
|
||||
static getProviderForModel(
|
||||
modelId: string,
|
||||
options: { throwOnDisconnected?: boolean } = {}
|
||||
): BaseProvider {
|
||||
const { throwOnDisconnected = true } = options;
|
||||
const providerName = this.getProviderForModelName(modelId);
|
||||
|
||||
// Check if provider is disconnected
|
||||
if (throwOnDisconnected && isProviderDisconnected(providerName)) {
|
||||
throw new Error(
|
||||
`${providerName.charAt(0).toUpperCase() + providerName.slice(1)} CLI is disconnected from the app. ` +
|
||||
`Please go to Settings > Providers and click "Sign In" to reconnect.`
|
||||
);
|
||||
}
|
||||
|
||||
const provider = this.getProviderByName(providerName);
|
||||
|
||||
if (!provider) {
|
||||
@@ -93,6 +129,35 @@ export class ProviderFactory {
|
||||
return provider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the provider name for a given model ID (without creating provider instance)
|
||||
*/
|
||||
static getProviderForModelName(modelId: string): string {
|
||||
const lowerModel = modelId.toLowerCase();
|
||||
|
||||
// Get all registered providers sorted by priority (descending)
|
||||
const registrations = Array.from(providerRegistry.entries()).sort(
|
||||
([, a], [, b]) => (b.priority ?? 0) - (a.priority ?? 0)
|
||||
);
|
||||
|
||||
// Check each provider's canHandleModel function
|
||||
for (const [name, reg] of registrations) {
|
||||
if (reg.canHandleModel?.(lowerModel)) {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: Check for explicit prefixes
|
||||
for (const [name] of registrations) {
|
||||
if (lowerModel.startsWith(`${name}-`)) {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
// Default to claude (first registered provider or claude)
|
||||
return 'claude';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available providers
|
||||
*/
|
||||
@@ -156,6 +221,41 @@ export class ProviderFactory {
|
||||
static getRegisteredProviderNames(): string[] {
|
||||
return Array.from(providerRegistry.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a specific model supports vision/image input
|
||||
*
|
||||
* @param modelId Model identifier
|
||||
* @returns Whether the model supports vision (defaults to true if model not found)
|
||||
*/
|
||||
static modelSupportsVision(modelId: string): boolean {
|
||||
const provider = this.getProviderForModel(modelId);
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
// Find the model in the available models list
|
||||
for (const model of models) {
|
||||
if (
|
||||
model.id === modelId ||
|
||||
model.modelString === modelId ||
|
||||
model.id.endsWith(`-${modelId}`) ||
|
||||
model.modelString.endsWith(`-${modelId}`) ||
|
||||
model.modelString === modelId.replace(/^(claude|cursor|codex)-/, '') ||
|
||||
model.modelString === modelId.replace(/-(claude|cursor|codex)$/, '')
|
||||
) {
|
||||
return model.supportsVision ?? true;
|
||||
}
|
||||
}
|
||||
|
||||
// Also try exact match with model string from provider's model map
|
||||
for (const model of models) {
|
||||
if (model.modelString === modelId || model.id === modelId) {
|
||||
return model.supportsVision ?? true;
|
||||
}
|
||||
}
|
||||
|
||||
// Default to true (Claude SDK supports vision by default)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
@@ -165,6 +265,8 @@ export class ProviderFactory {
|
||||
// Import providers for registration side-effects
|
||||
import { ClaudeProvider } from './claude-provider.js';
|
||||
import { CursorProvider } from './cursor-provider.js';
|
||||
import { CodexProvider } from './codex-provider.js';
|
||||
import { OpencodeProvider } from './opencode-provider.js';
|
||||
|
||||
// Register Claude provider
|
||||
registerProvider('claude', {
|
||||
@@ -184,3 +286,18 @@ registerProvider('cursor', {
|
||||
canHandleModel: (model: string) => isCursorModel(model),
|
||||
priority: 10, // Higher priority - check Cursor models first
|
||||
});
|
||||
|
||||
// Register Codex provider
|
||||
registerProvider('codex', {
|
||||
factory: () => new CodexProvider(),
|
||||
aliases: ['openai'],
|
||||
canHandleModel: (model: string) => isCodexModel(model),
|
||||
priority: 5, // Medium priority - check after Cursor but before Claude
|
||||
});
|
||||
|
||||
// Register OpenCode provider
|
||||
registerProvider('opencode', {
|
||||
factory: () => new OpencodeProvider(),
|
||||
canHandleModel: (model: string) => isOpencodeModel(model),
|
||||
priority: 3, // Between codex (5) and claude (0)
|
||||
});
|
||||
|
||||
@@ -9,7 +9,7 @@ import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import * as secureFs from '../../lib/secure-fs.js';
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createFeatureGenerationOptions } from '../../lib/sdk-options.js';
|
||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||
@@ -124,6 +124,8 @@ IMPORTANT: Do not ask for clarification. The specification is provided above. Ge
|
||||
logger.info('[FeatureGeneration] Using Cursor provider');
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// Add explicit instructions for Cursor to return JSON in response
|
||||
const cursorPrompt = `${prompt}
|
||||
@@ -135,7 +137,7 @@ CRITICAL INSTRUCTIONS:
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
|
||||
@@ -16,7 +16,7 @@ import {
|
||||
type SpecOutput,
|
||||
} from '../../lib/app-spec-format.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createSpecGenerationOptions } from '../../lib/sdk-options.js';
|
||||
import { extractJson } from '../../lib/json-extractor.js';
|
||||
@@ -118,6 +118,8 @@ ${getStructuredSpecPromptInstruction()}`;
|
||||
logger.info('[SpecGeneration] Using Cursor provider');
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// For Cursor, include the JSON schema in the prompt with clear instructions
|
||||
// to return JSON in the response (not write to a file)
|
||||
@@ -134,7 +136,7 @@ Your entire response should be valid JSON starting with { and ending with }. No
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
|
||||
@@ -229,12 +229,13 @@ export function createAuthRoutes(): Router {
|
||||
await invalidateSession(sessionToken);
|
||||
}
|
||||
|
||||
// Clear the cookie
|
||||
res.clearCookie(cookieName, {
|
||||
httpOnly: true,
|
||||
secure: process.env.NODE_ENV === 'production',
|
||||
sameSite: 'strict',
|
||||
path: '/',
|
||||
// Clear the cookie by setting it to empty with immediate expiration
|
||||
// Using res.cookie() with maxAge: 0 is more reliable than clearCookie()
|
||||
// in cross-origin development environments
|
||||
res.cookie(cookieName, '', {
|
||||
...getSessionCookieOptions(),
|
||||
maxAge: 0,
|
||||
expires: new Date(0),
|
||||
});
|
||||
|
||||
res.json({
|
||||
|
||||
@@ -31,7 +31,9 @@ export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
|
||||
// Start follow-up in background
|
||||
// followUpFeature derives workDir from feature.branchName
|
||||
autoModeService
|
||||
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? true)
|
||||
// Default to false to match run-feature/resume-feature behavior.
|
||||
// Worktrees should only be used when explicitly enabled by the user.
|
||||
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
|
||||
.catch((error) => {
|
||||
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
|
||||
})
|
||||
|
||||
@@ -7,7 +7,12 @@
|
||||
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
import type { Feature, BacklogPlanResult, BacklogChange, DependencyUpdate } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, type ThinkingLevel } from '@automaker/types';
|
||||
import {
|
||||
DEFAULT_PHASE_MODELS,
|
||||
isCursorModel,
|
||||
stripProviderPrefix,
|
||||
type ThinkingLevel,
|
||||
} from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||
@@ -120,6 +125,8 @@ export async function generateBacklogPlan(
|
||||
logger.info('[BacklogPlan] Using model:', effectiveModel);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(effectiveModel);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(effectiveModel);
|
||||
|
||||
// Get autoLoadClaudeMd setting
|
||||
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||
@@ -151,7 +158,7 @@ ${userPrompt}`;
|
||||
// Execute the query
|
||||
const stream = provider.executeQuery({
|
||||
prompt: finalPrompt,
|
||||
model: effectiveModel,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
systemPrompt: finalSystemPrompt,
|
||||
maxTurns: 1,
|
||||
|
||||
@@ -13,7 +13,10 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
|
||||
// Check if Claude CLI is available first
|
||||
const isAvailable = await service.isAvailable();
|
||||
if (!isAvailable) {
|
||||
res.status(503).json({
|
||||
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
||||
// Use a 200 + error payload for Claude CLI issues so the UI doesn't
|
||||
// interpret it as an invalid Automaker session (401/403 triggers logout).
|
||||
res.status(200).json({
|
||||
error: 'Claude CLI not found',
|
||||
message: "Please install Claude Code CLI and run 'claude login' to authenticate",
|
||||
});
|
||||
@@ -26,12 +29,13 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
|
||||
if (message.includes('Authentication required') || message.includes('token_expired')) {
|
||||
res.status(401).json({
|
||||
// Do NOT use 401/403 here: that status code is reserved for Automaker session auth.
|
||||
res.status(200).json({
|
||||
error: 'Authentication required',
|
||||
message: "Please run 'claude login' to authenticate",
|
||||
});
|
||||
} else if (message.includes('timed out')) {
|
||||
res.status(504).json({
|
||||
res.status(200).json({
|
||||
error: 'Command timed out',
|
||||
message: 'The Claude CLI took too long to respond',
|
||||
});
|
||||
|
||||
56
apps/server/src/routes/codex/index.ts
Normal file
56
apps/server/src/routes/codex/index.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import { Router, Request, Response } from 'express';
|
||||
import { CodexUsageService } from '../../services/codex-usage-service.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('Codex');
|
||||
|
||||
export function createCodexRoutes(service: CodexUsageService): Router {
|
||||
const router = Router();
|
||||
|
||||
// Get current usage (attempts to fetch from Codex CLI)
|
||||
router.get('/usage', async (req: Request, res: Response) => {
|
||||
try {
|
||||
// Check if Codex CLI is available first
|
||||
const isAvailable = await service.isAvailable();
|
||||
if (!isAvailable) {
|
||||
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
||||
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
|
||||
// interpret it as an invalid Automaker session (401/403 triggers logout).
|
||||
res.status(200).json({
|
||||
error: 'Codex CLI not found',
|
||||
message: "Please install Codex CLI and run 'codex login' to authenticate",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const usage = await service.fetchUsageData();
|
||||
res.json(usage);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
|
||||
if (message.includes('not authenticated') || message.includes('login')) {
|
||||
// Do NOT use 401/403 here: that status code is reserved for Automaker session auth.
|
||||
res.status(200).json({
|
||||
error: 'Authentication required',
|
||||
message: "Please run 'codex login' to authenticate",
|
||||
});
|
||||
} else if (message.includes('not available') || message.includes('does not provide')) {
|
||||
// This is the expected case - Codex doesn't provide usage stats
|
||||
res.status(200).json({
|
||||
error: 'Usage statistics not available',
|
||||
message: message,
|
||||
});
|
||||
} else if (message.includes('timed out')) {
|
||||
res.status(200).json({
|
||||
error: 'Command timed out',
|
||||
message: 'The Codex CLI took too long to respond',
|
||||
});
|
||||
} else {
|
||||
logger.error('Error fetching usage:', error);
|
||||
res.status(500).json({ error: message });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return router;
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { PathNotAllowedError } from '@automaker/platform';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
||||
@@ -198,6 +198,8 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
||||
logger.info(`Using Cursor provider for model: ${model}`);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// Build a simple text prompt for Cursor (no multi-part content blocks)
|
||||
const cursorPrompt = `${instructionText}\n\n--- FILE CONTENT ---\n${contentToAnalyze}`;
|
||||
@@ -205,7 +207,7 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
||||
let responseText = '';
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model,
|
||||
model: bareModel,
|
||||
cwd,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
@@ -232,7 +234,6 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
autoLoadClaudeMd,
|
||||
sandbox: { enabled: true, autoAllowBashIfSandboxed: true },
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
});
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger, readImageAsBase64 } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
@@ -357,6 +357,8 @@ export function createDescribeImageHandler(
|
||||
logger.info(`[${requestId}] Using Cursor provider for model: ${model}`);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// Build prompt with image reference for Cursor
|
||||
// Note: Cursor CLI may not support base64 image blocks directly,
|
||||
@@ -367,7 +369,7 @@ export function createDescribeImageHandler(
|
||||
const queryStart = Date.now();
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model,
|
||||
model: bareModel,
|
||||
cwd,
|
||||
maxTurns: 1,
|
||||
allowedTools: ['Read'], // Allow Read tool so Cursor can read the image if needed
|
||||
@@ -394,14 +396,13 @@ export function createDescribeImageHandler(
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
autoLoadClaudeMd,
|
||||
sandbox: { enabled: true, autoAllowBashIfSandboxed: true },
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
|
||||
sdkOptions.allowedTools
|
||||
)} sandbox=${JSON.stringify(sdkOptions.sandbox)}`
|
||||
)}`
|
||||
);
|
||||
|
||||
const promptGenerator = (async function* () {
|
||||
|
||||
@@ -12,6 +12,7 @@ import { resolveModelString } from '@automaker/model-resolver';
|
||||
import {
|
||||
CLAUDE_MODEL_MAP,
|
||||
isCursorModel,
|
||||
stripProviderPrefix,
|
||||
ThinkingLevel,
|
||||
getThinkingTokenBudget,
|
||||
} from '@automaker/types';
|
||||
@@ -98,12 +99,14 @@ async function extractTextFromStream(
|
||||
*/
|
||||
async function executeWithCursor(prompt: string, model: string): Promise<string> {
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
let responseText = '';
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt,
|
||||
model,
|
||||
model: bareModel,
|
||||
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
|
||||
readOnly: true, // Prompt enhancement only generates text, doesn't write files
|
||||
})) {
|
||||
|
||||
@@ -9,6 +9,7 @@ import { createListHandler } from './routes/list.js';
|
||||
import { createGetHandler } from './routes/get.js';
|
||||
import { createCreateHandler } from './routes/create.js';
|
||||
import { createUpdateHandler } from './routes/update.js';
|
||||
import { createBulkUpdateHandler } from './routes/bulk-update.js';
|
||||
import { createDeleteHandler } from './routes/delete.js';
|
||||
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
|
||||
import { createGenerateTitleHandler } from './routes/generate-title.js';
|
||||
@@ -20,6 +21,11 @@ export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
|
||||
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
|
||||
router.post('/create', validatePathParams('projectPath'), createCreateHandler(featureLoader));
|
||||
router.post('/update', validatePathParams('projectPath'), createUpdateHandler(featureLoader));
|
||||
router.post(
|
||||
'/bulk-update',
|
||||
validatePathParams('projectPath'),
|
||||
createBulkUpdateHandler(featureLoader)
|
||||
);
|
||||
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
|
||||
router.post('/agent-output', createAgentOutputHandler(featureLoader));
|
||||
router.post('/raw-output', createRawOutputHandler(featureLoader));
|
||||
|
||||
75
apps/server/src/routes/features/routes/bulk-update.ts
Normal file
75
apps/server/src/routes/features/routes/bulk-update.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
* POST /bulk-update endpoint - Update multiple features at once
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||
import type { Feature } from '@automaker/types';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
interface BulkUpdateRequest {
|
||||
projectPath: string;
|
||||
featureIds: string[];
|
||||
updates: Partial<Feature>;
|
||||
}
|
||||
|
||||
interface BulkUpdateResult {
|
||||
featureId: string;
|
||||
success: boolean;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export function createBulkUpdateHandler(featureLoader: FeatureLoader) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureIds, updates } = req.body as BulkUpdateRequest;
|
||||
|
||||
if (!projectPath || !featureIds || !Array.isArray(featureIds) || featureIds.length === 0) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath and featureIds (non-empty array) are required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!updates || Object.keys(updates).length === 0) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'updates object with at least one field is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const results: BulkUpdateResult[] = [];
|
||||
const updatedFeatures: Feature[] = [];
|
||||
|
||||
for (const featureId of featureIds) {
|
||||
try {
|
||||
const updated = await featureLoader.update(projectPath, featureId, updates);
|
||||
results.push({ featureId, success: true });
|
||||
updatedFeatures.push(updated);
|
||||
} catch (error) {
|
||||
results.push({
|
||||
featureId,
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const successCount = results.filter((r) => r.success).length;
|
||||
const failureCount = results.filter((r) => !r.success).length;
|
||||
|
||||
res.json({
|
||||
success: failureCount === 0,
|
||||
updatedCount: successCount,
|
||||
failedCount: failureCount,
|
||||
results,
|
||||
features: updatedFeatures,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Bulk update features failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -10,11 +10,14 @@ import { getErrorMessage, logError } from '../common.js';
|
||||
export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId, updates } = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
updates: Partial<Feature>;
|
||||
};
|
||||
const { projectPath, featureId, updates, descriptionHistorySource, enhancementMode } =
|
||||
req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
updates: Partial<Feature>;
|
||||
descriptionHistorySource?: 'enhance' | 'edit';
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance';
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId || !updates) {
|
||||
res.status(400).json({
|
||||
@@ -24,7 +27,13 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
return;
|
||||
}
|
||||
|
||||
const updated = await featureLoader.update(projectPath, featureId, updates);
|
||||
const updated = await featureLoader.update(
|
||||
projectPath,
|
||||
featureId,
|
||||
updates,
|
||||
descriptionHistorySource,
|
||||
enhancementMode
|
||||
);
|
||||
res.json({ success: true, feature: updated });
|
||||
} catch (error) {
|
||||
logError(error, 'Update feature failed');
|
||||
|
||||
@@ -18,7 +18,7 @@ import type {
|
||||
LinkedPRInfo,
|
||||
ThinkingLevel,
|
||||
} from '@automaker/types';
|
||||
import { isCursorModel, DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||
import { isCursorModel, DEFAULT_PHASE_MODELS, stripProviderPrefix } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createSuggestionsOptions } from '../../../lib/sdk-options.js';
|
||||
import { extractJson } from '../../../lib/json-extractor.js';
|
||||
@@ -120,6 +120,8 @@ async function runValidation(
|
||||
logger.info(`Using Cursor provider for validation with model: ${model}`);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// For Cursor, include the system prompt and schema in the user prompt
|
||||
const cursorPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
|
||||
@@ -137,7 +139,7 @@ ${prompt}`;
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
readOnly: true, // Issue validation only reads code, doesn't write
|
||||
})) {
|
||||
|
||||
@@ -23,6 +23,7 @@ import { createGetProjectHandler } from './routes/get-project.js';
|
||||
import { createUpdateProjectHandler } from './routes/update-project.js';
|
||||
import { createMigrateHandler } from './routes/migrate.js';
|
||||
import { createStatusHandler } from './routes/status.js';
|
||||
import { createDiscoverAgentsHandler } from './routes/discover-agents.js';
|
||||
|
||||
/**
|
||||
* Create settings router with all endpoints
|
||||
@@ -39,6 +40,7 @@ import { createStatusHandler } from './routes/status.js';
|
||||
* - POST /project - Get project settings (requires projectPath in body)
|
||||
* - PUT /project - Update project settings
|
||||
* - POST /migrate - Migrate settings from localStorage
|
||||
* - POST /agents/discover - Discover filesystem agents from .claude/agents/ (read-only)
|
||||
*
|
||||
* @param settingsService - Instance of SettingsService for file I/O
|
||||
* @returns Express Router configured with all settings endpoints
|
||||
@@ -72,5 +74,8 @@ export function createSettingsRoutes(settingsService: SettingsService): Router {
|
||||
// Migration from localStorage
|
||||
router.post('/migrate', createMigrateHandler(settingsService));
|
||||
|
||||
// Filesystem agents discovery (read-only)
|
||||
router.post('/agents/discover', createDiscoverAgentsHandler());
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
61
apps/server/src/routes/settings/routes/discover-agents.ts
Normal file
61
apps/server/src/routes/settings/routes/discover-agents.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
/**
|
||||
* Discover Agents Route - Returns filesystem-based agents from .claude/agents/
|
||||
*
|
||||
* Scans both user-level (~/.claude/agents/) and project-level (.claude/agents/)
|
||||
* directories for AGENT.md files and returns parsed agent definitions.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { discoverFilesystemAgents } from '../../../lib/agent-discovery.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('DiscoverAgentsRoute');
|
||||
|
||||
interface DiscoverAgentsRequest {
|
||||
projectPath?: string;
|
||||
sources?: Array<'user' | 'project'>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create handler for discovering filesystem agents
|
||||
*
|
||||
* POST /api/settings/agents/discover
|
||||
* Body: { projectPath?: string, sources?: ['user', 'project'] }
|
||||
*
|
||||
* Returns:
|
||||
* {
|
||||
* success: true,
|
||||
* agents: Array<{
|
||||
* name: string,
|
||||
* definition: AgentDefinition,
|
||||
* source: 'user' | 'project',
|
||||
* filePath: string
|
||||
* }>
|
||||
* }
|
||||
*/
|
||||
export function createDiscoverAgentsHandler() {
|
||||
return async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { projectPath, sources = ['user', 'project'] } = req.body as DiscoverAgentsRequest;
|
||||
|
||||
logger.info(
|
||||
`Discovering agents from sources: ${sources.join(', ')}${projectPath ? ` (project: ${projectPath})` : ''}`
|
||||
);
|
||||
|
||||
const agents = await discoverFilesystemAgents(projectPath, sources);
|
||||
|
||||
logger.info(`Discovered ${agents.length} filesystem agents`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
agents,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to discover agents:', error);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to discover agents',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -11,7 +11,7 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import type { SettingsService } from '../../../services/settings-service.js';
|
||||
import type { GlobalSettings } from '../../../types/settings.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { getErrorMessage, logError, logger } from '../common.js';
|
||||
|
||||
/**
|
||||
* Create handler factory for PUT /api/settings/global
|
||||
@@ -32,6 +32,18 @@ export function createUpdateGlobalHandler(settingsService: SettingsService) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Minimal debug logging to help diagnose accidental wipes.
|
||||
if ('projects' in updates || 'theme' in updates || 'localStorageMigrated' in updates) {
|
||||
const projectsLen = Array.isArray((updates as any).projects)
|
||||
? (updates as any).projects.length
|
||||
: undefined;
|
||||
logger.info(
|
||||
`Update global settings request: projects=${projectsLen ?? 'n/a'}, theme=${
|
||||
(updates as any).theme ?? 'n/a'
|
||||
}, localStorageMigrated=${(updates as any).localStorageMigrated ?? 'n/a'}`
|
||||
);
|
||||
}
|
||||
|
||||
const settings = await settingsService.updateGlobalSettings(updates);
|
||||
|
||||
res.json({
|
||||
|
||||
@@ -6,9 +6,24 @@ import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { getClaudeCliPaths, getClaudeAuthIndicators, systemPathAccess } from '@automaker/platform';
|
||||
import { getApiKey } from './common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
const DISCONNECTED_MARKER_FILE = '.claude-disconnected';
|
||||
|
||||
function isDisconnectedFromApp(): boolean {
|
||||
try {
|
||||
// Check if we're in a project directory
|
||||
const projectRoot = process.cwd();
|
||||
const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
|
||||
return fs.existsSync(markerPath);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getClaudeStatus() {
|
||||
let installed = false;
|
||||
let version = '';
|
||||
@@ -60,6 +75,30 @@ export async function getClaudeStatus() {
|
||||
}
|
||||
}
|
||||
|
||||
// Check if user has manually disconnected from the app
|
||||
if (isDisconnectedFromApp()) {
|
||||
return {
|
||||
status: installed ? 'installed' : 'not_installed',
|
||||
installed,
|
||||
method,
|
||||
version,
|
||||
path: cliPath,
|
||||
auth: {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
hasCredentialsFile: false,
|
||||
hasToken: false,
|
||||
hasStoredOAuthToken: false,
|
||||
hasStoredApiKey: false,
|
||||
hasEnvApiKey: false,
|
||||
oauthTokenValid: false,
|
||||
apiKeyValid: false,
|
||||
hasCliAuth: false,
|
||||
hasRecentActivity: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Check authentication - detect all possible auth methods
|
||||
// Note: apiKeys.anthropic_oauth_token stores OAuth tokens from subscription auth
|
||||
// apiKeys.anthropic stores direct API keys for pay-per-use
|
||||
|
||||
@@ -11,8 +11,19 @@ import { createDeleteApiKeyHandler } from './routes/delete-api-key.js';
|
||||
import { createApiKeysHandler } from './routes/api-keys.js';
|
||||
import { createPlatformHandler } from './routes/platform.js';
|
||||
import { createVerifyClaudeAuthHandler } from './routes/verify-claude-auth.js';
|
||||
import { createVerifyCodexAuthHandler } from './routes/verify-codex-auth.js';
|
||||
import { createGhStatusHandler } from './routes/gh-status.js';
|
||||
import { createCursorStatusHandler } from './routes/cursor-status.js';
|
||||
import { createCodexStatusHandler } from './routes/codex-status.js';
|
||||
import { createInstallCodexHandler } from './routes/install-codex.js';
|
||||
import { createAuthCodexHandler } from './routes/auth-codex.js';
|
||||
import { createAuthCursorHandler } from './routes/auth-cursor.js';
|
||||
import { createDeauthClaudeHandler } from './routes/deauth-claude.js';
|
||||
import { createDeauthCodexHandler } from './routes/deauth-codex.js';
|
||||
import { createDeauthCursorHandler } from './routes/deauth-cursor.js';
|
||||
import { createAuthOpencodeHandler } from './routes/auth-opencode.js';
|
||||
import { createDeauthOpencodeHandler } from './routes/deauth-opencode.js';
|
||||
import { createOpencodeStatusHandler } from './routes/opencode-status.js';
|
||||
import {
|
||||
createGetCursorConfigHandler,
|
||||
createSetCursorDefaultModelHandler,
|
||||
@@ -30,15 +41,30 @@ export function createSetupRoutes(): Router {
|
||||
router.get('/claude-status', createClaudeStatusHandler());
|
||||
router.post('/install-claude', createInstallClaudeHandler());
|
||||
router.post('/auth-claude', createAuthClaudeHandler());
|
||||
router.post('/deauth-claude', createDeauthClaudeHandler());
|
||||
router.post('/store-api-key', createStoreApiKeyHandler());
|
||||
router.post('/delete-api-key', createDeleteApiKeyHandler());
|
||||
router.get('/api-keys', createApiKeysHandler());
|
||||
router.get('/platform', createPlatformHandler());
|
||||
router.post('/verify-claude-auth', createVerifyClaudeAuthHandler());
|
||||
router.post('/verify-codex-auth', createVerifyCodexAuthHandler());
|
||||
router.get('/gh-status', createGhStatusHandler());
|
||||
|
||||
// Cursor CLI routes
|
||||
router.get('/cursor-status', createCursorStatusHandler());
|
||||
router.post('/auth-cursor', createAuthCursorHandler());
|
||||
router.post('/deauth-cursor', createDeauthCursorHandler());
|
||||
|
||||
// Codex CLI routes
|
||||
router.get('/codex-status', createCodexStatusHandler());
|
||||
router.post('/install-codex', createInstallCodexHandler());
|
||||
router.post('/auth-codex', createAuthCodexHandler());
|
||||
router.post('/deauth-codex', createDeauthCodexHandler());
|
||||
|
||||
// OpenCode CLI routes
|
||||
router.get('/opencode-status', createOpencodeStatusHandler());
|
||||
router.post('/auth-opencode', createAuthOpencodeHandler());
|
||||
router.post('/deauth-opencode', createDeauthOpencodeHandler());
|
||||
router.get('/cursor-config', createGetCursorConfigHandler());
|
||||
router.post('/cursor-config/default-model', createSetCursorDefaultModelHandler());
|
||||
router.post('/cursor-config/models', createSetCursorModelsHandler());
|
||||
|
||||
@@ -11,6 +11,7 @@ export function createApiKeysHandler() {
|
||||
res.json({
|
||||
success: true,
|
||||
hasAnthropicKey: !!getApiKey('anthropic') || !!process.env.ANTHROPIC_API_KEY,
|
||||
hasOpenaiKey: !!getApiKey('openai') || !!process.env.OPENAI_API_KEY,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Get API keys failed');
|
||||
|
||||
@@ -4,19 +4,54 @@
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
export function createAuthClaudeHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
res.json({
|
||||
success: true,
|
||||
requiresManualAuth: true,
|
||||
command: 'claude login',
|
||||
message: "Please run 'claude login' in your terminal to authenticate",
|
||||
});
|
||||
// Remove the disconnected marker file to reconnect the app to the CLI
|
||||
const markerPath = path.join(process.cwd(), '.automaker', '.claude-disconnected');
|
||||
if (fs.existsSync(markerPath)) {
|
||||
fs.unlinkSync(markerPath);
|
||||
}
|
||||
|
||||
// Check if CLI is already authenticated by checking auth indicators
|
||||
const { getClaudeAuthIndicators } = await import('@automaker/platform');
|
||||
const indicators = await getClaudeAuthIndicators();
|
||||
const isAlreadyAuthenticated =
|
||||
indicators.hasStatsCacheWithActivity ||
|
||||
(indicators.hasSettingsFile && indicators.hasProjectsSessions) ||
|
||||
indicators.hasCredentialsFile;
|
||||
|
||||
if (isAlreadyAuthenticated) {
|
||||
// CLI is already authenticated, just reconnect
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Claude CLI is now linked with the app',
|
||||
wasAlreadyAuthenticated: true,
|
||||
});
|
||||
} else {
|
||||
// CLI needs authentication - but we can't run claude login here
|
||||
// because it requires browser OAuth. Just reconnect and let the user authenticate if needed.
|
||||
res.json({
|
||||
success: true,
|
||||
message:
|
||||
'Claude CLI is now linked with the app. If prompted, please authenticate with "claude login" in your terminal.',
|
||||
requiresManualAuth: true,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Auth Claude failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to link Claude CLI with the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
50
apps/server/src/routes/setup/routes/auth-codex.ts
Normal file
50
apps/server/src/routes/setup/routes/auth-codex.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
/**
|
||||
* POST /auth-codex endpoint - Authenticate Codex CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export function createAuthCodexHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Remove the disconnected marker file to reconnect the app to the CLI
|
||||
const markerPath = path.join(process.cwd(), '.automaker', '.codex-disconnected');
|
||||
if (fs.existsSync(markerPath)) {
|
||||
fs.unlinkSync(markerPath);
|
||||
}
|
||||
|
||||
// Use the same detection logic as the Codex provider
|
||||
const { getCodexAuthIndicators } = await import('@automaker/platform');
|
||||
const indicators = await getCodexAuthIndicators();
|
||||
|
||||
const isAlreadyAuthenticated =
|
||||
indicators.hasApiKey || indicators.hasAuthFile || indicators.hasOAuthToken;
|
||||
|
||||
if (isAlreadyAuthenticated) {
|
||||
// Already has authentication, just reconnect
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Codex CLI is now linked with the app',
|
||||
wasAlreadyAuthenticated: true,
|
||||
});
|
||||
} else {
|
||||
res.json({
|
||||
success: true,
|
||||
message:
|
||||
'Codex CLI is now linked with the app. If prompted, please authenticate with "codex login" in your terminal.',
|
||||
requiresManualAuth: true,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Auth Codex failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to link Codex CLI with the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
73
apps/server/src/routes/setup/routes/auth-cursor.ts
Normal file
73
apps/server/src/routes/setup/routes/auth-cursor.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
/**
|
||||
* POST /auth-cursor endpoint - Authenticate Cursor CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import os from 'os';
|
||||
|
||||
export function createAuthCursorHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Remove the disconnected marker file to reconnect the app to the CLI
|
||||
const markerPath = path.join(process.cwd(), '.automaker', '.cursor-disconnected');
|
||||
if (fs.existsSync(markerPath)) {
|
||||
fs.unlinkSync(markerPath);
|
||||
}
|
||||
|
||||
// Check if Cursor is already authenticated using the same logic as CursorProvider
|
||||
const isAlreadyAuthenticated = (): boolean => {
|
||||
// Check for API key in environment
|
||||
if (process.env.CURSOR_API_KEY) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for credentials files
|
||||
const credentialPaths = [
|
||||
path.join(os.homedir(), '.cursor', 'credentials.json'),
|
||||
path.join(os.homedir(), '.config', 'cursor', 'credentials.json'),
|
||||
];
|
||||
|
||||
for (const credPath of credentialPaths) {
|
||||
if (fs.existsSync(credPath)) {
|
||||
try {
|
||||
const content = fs.readFileSync(credPath, 'utf8');
|
||||
const creds = JSON.parse(content);
|
||||
if (creds.accessToken || creds.token) {
|
||||
return true;
|
||||
}
|
||||
} catch {
|
||||
// Invalid credentials file, continue checking
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
if (isAlreadyAuthenticated()) {
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Cursor CLI is now linked with the app',
|
||||
wasAlreadyAuthenticated: true,
|
||||
});
|
||||
} else {
|
||||
res.json({
|
||||
success: true,
|
||||
message:
|
||||
'Cursor CLI is now linked with the app. If prompted, please authenticate with "cursor auth" in your terminal.',
|
||||
requiresManualAuth: true,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Auth Cursor failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to link Cursor CLI with the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
51
apps/server/src/routes/setup/routes/auth-opencode.ts
Normal file
51
apps/server/src/routes/setup/routes/auth-opencode.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
/**
|
||||
* POST /auth-opencode endpoint - Authenticate OpenCode CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
export function createAuthOpencodeHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Remove the disconnected marker file to reconnect the app to the CLI
|
||||
const markerPath = path.join(process.cwd(), '.automaker', '.opencode-disconnected');
|
||||
if (fs.existsSync(markerPath)) {
|
||||
fs.unlinkSync(markerPath);
|
||||
}
|
||||
|
||||
// Check if OpenCode is already authenticated
|
||||
// For OpenCode, check if there's an auth token or API key
|
||||
const hasApiKey = !!process.env.OPENCODE_API_KEY;
|
||||
|
||||
if (hasApiKey) {
|
||||
// Already has authentication, just reconnect
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'OpenCode CLI is now linked with the app',
|
||||
wasAlreadyAuthenticated: true,
|
||||
});
|
||||
} else {
|
||||
res.json({
|
||||
success: true,
|
||||
message:
|
||||
'OpenCode CLI is now linked with the app. If prompted, please authenticate with OpenCode.',
|
||||
requiresManualAuth: true,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Auth OpenCode failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to link OpenCode CLI with the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
81
apps/server/src/routes/setup/routes/codex-status.ts
Normal file
81
apps/server/src/routes/setup/routes/codex-status.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* GET /codex-status endpoint - Get Codex CLI installation and auth status
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { CodexProvider } from '../../../providers/codex-provider.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
const DISCONNECTED_MARKER_FILE = '.codex-disconnected';
|
||||
|
||||
function isCodexDisconnectedFromApp(): boolean {
|
||||
try {
|
||||
const projectRoot = process.cwd();
|
||||
const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
|
||||
return fs.existsSync(markerPath);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates handler for GET /api/setup/codex-status
|
||||
* Returns Codex CLI installation and authentication status
|
||||
*/
|
||||
export function createCodexStatusHandler() {
|
||||
const installCommand = 'npm install -g @openai/codex';
|
||||
const loginCommand = 'codex login';
|
||||
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Check if user has manually disconnected from the app
|
||||
if (isCodexDisconnectedFromApp()) {
|
||||
res.json({
|
||||
success: true,
|
||||
installed: true,
|
||||
version: null,
|
||||
path: null,
|
||||
auth: {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
hasApiKey: false,
|
||||
},
|
||||
installCommand,
|
||||
loginCommand,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const provider = new CodexProvider();
|
||||
const status = await provider.detectInstallation();
|
||||
|
||||
// Derive auth method from authenticated status and API key presence
|
||||
let authMethod = 'none';
|
||||
if (status.authenticated) {
|
||||
authMethod = status.hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
installed: status.installed,
|
||||
version: status.version || null,
|
||||
path: status.path || null,
|
||||
auth: {
|
||||
authenticated: status.authenticated || false,
|
||||
method: authMethod,
|
||||
hasApiKey: status.hasApiKey || false,
|
||||
},
|
||||
installCommand,
|
||||
loginCommand,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Get Codex status failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -5,6 +5,20 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { CursorProvider } from '../../../providers/cursor-provider.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
const DISCONNECTED_MARKER_FILE = '.cursor-disconnected';
|
||||
|
||||
function isCursorDisconnectedFromApp(): boolean {
|
||||
try {
|
||||
const projectRoot = process.cwd();
|
||||
const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
|
||||
return fs.existsSync(markerPath);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates handler for GET /api/setup/cursor-status
|
||||
@@ -16,6 +30,30 @@ export function createCursorStatusHandler() {
|
||||
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Check if user has manually disconnected from the app
|
||||
if (isCursorDisconnectedFromApp()) {
|
||||
const provider = new CursorProvider();
|
||||
const [installed, version] = await Promise.all([
|
||||
provider.isInstalled(),
|
||||
provider.getVersion(),
|
||||
]);
|
||||
const cliPath = installed ? provider.getCliPath() : null;
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
installed,
|
||||
version: version || null,
|
||||
path: cliPath,
|
||||
auth: {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
},
|
||||
installCommand,
|
||||
loginCommand,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const provider = new CursorProvider();
|
||||
|
||||
const [installed, version, auth] = await Promise.all([
|
||||
|
||||
44
apps/server/src/routes/setup/routes/deauth-claude.ts
Normal file
44
apps/server/src/routes/setup/routes/deauth-claude.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
/**
|
||||
* POST /deauth-claude endpoint - Sign out from Claude CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export function createDeauthClaudeHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Create a marker file to indicate the CLI is disconnected from the app
|
||||
const automakerDir = path.join(process.cwd(), '.automaker');
|
||||
const markerPath = path.join(automakerDir, '.claude-disconnected');
|
||||
|
||||
// Ensure .automaker directory exists
|
||||
if (!fs.existsSync(automakerDir)) {
|
||||
fs.mkdirSync(automakerDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Create the marker file with timestamp
|
||||
fs.writeFileSync(
|
||||
markerPath,
|
||||
JSON.stringify({
|
||||
disconnectedAt: new Date().toISOString(),
|
||||
message: 'Claude CLI is disconnected from the app',
|
||||
})
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Claude CLI is now disconnected from the app',
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Deauth Claude failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to disconnect Claude CLI from the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
44
apps/server/src/routes/setup/routes/deauth-codex.ts
Normal file
44
apps/server/src/routes/setup/routes/deauth-codex.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
/**
|
||||
* POST /deauth-codex endpoint - Sign out from Codex CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export function createDeauthCodexHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Create a marker file to indicate the CLI is disconnected from the app
|
||||
const automakerDir = path.join(process.cwd(), '.automaker');
|
||||
const markerPath = path.join(automakerDir, '.codex-disconnected');
|
||||
|
||||
// Ensure .automaker directory exists
|
||||
if (!fs.existsSync(automakerDir)) {
|
||||
fs.mkdirSync(automakerDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Create the marker file with timestamp
|
||||
fs.writeFileSync(
|
||||
markerPath,
|
||||
JSON.stringify({
|
||||
disconnectedAt: new Date().toISOString(),
|
||||
message: 'Codex CLI is disconnected from the app',
|
||||
})
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Codex CLI is now disconnected from the app',
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Deauth Codex failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to disconnect Codex CLI from the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
44
apps/server/src/routes/setup/routes/deauth-cursor.ts
Normal file
44
apps/server/src/routes/setup/routes/deauth-cursor.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
/**
|
||||
* POST /deauth-cursor endpoint - Sign out from Cursor CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export function createDeauthCursorHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Create a marker file to indicate the CLI is disconnected from the app
|
||||
const automakerDir = path.join(process.cwd(), '.automaker');
|
||||
const markerPath = path.join(automakerDir, '.cursor-disconnected');
|
||||
|
||||
// Ensure .automaker directory exists
|
||||
if (!fs.existsSync(automakerDir)) {
|
||||
fs.mkdirSync(automakerDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Create the marker file with timestamp
|
||||
fs.writeFileSync(
|
||||
markerPath,
|
||||
JSON.stringify({
|
||||
disconnectedAt: new Date().toISOString(),
|
||||
message: 'Cursor CLI is disconnected from the app',
|
||||
})
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Cursor CLI is now disconnected from the app',
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Deauth Cursor failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to disconnect Cursor CLI from the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
40
apps/server/src/routes/setup/routes/deauth-opencode.ts
Normal file
40
apps/server/src/routes/setup/routes/deauth-opencode.ts
Normal file
@@ -0,0 +1,40 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export function createDeauthOpencodeHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Create a marker file to indicate the CLI is disconnected from the app
|
||||
const automakerDir = path.join(process.cwd(), '.automaker');
|
||||
const markerPath = path.join(automakerDir, '.opencode-disconnected');
|
||||
|
||||
// Ensure .automaker directory exists
|
||||
if (!fs.existsSync(automakerDir)) {
|
||||
fs.mkdirSync(automakerDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Create the marker file with timestamp
|
||||
fs.writeFileSync(
|
||||
markerPath,
|
||||
JSON.stringify({
|
||||
disconnectedAt: new Date().toISOString(),
|
||||
message: 'OpenCode CLI is disconnected from the app',
|
||||
})
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'OpenCode CLI is now disconnected from the app',
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Deauth OpenCode failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
message: 'Failed to disconnect OpenCode CLI from the app',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -46,13 +46,14 @@ export function createDeleteApiKeyHandler() {
|
||||
// Map provider to env key name
|
||||
const envKeyMap: Record<string, string> = {
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
openai: 'OPENAI_API_KEY',
|
||||
};
|
||||
|
||||
const envKey = envKeyMap[provider];
|
||||
if (!envKey) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Unknown provider: ${provider}. Only anthropic is supported.`,
|
||||
error: `Unknown provider: ${provider}. Only anthropic and openai are supported.`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
33
apps/server/src/routes/setup/routes/install-codex.ts
Normal file
33
apps/server/src/routes/setup/routes/install-codex.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
/**
|
||||
* POST /install-codex endpoint - Install Codex CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
|
||||
/**
|
||||
* Creates handler for POST /api/setup/install-codex
|
||||
* Installs Codex CLI (currently returns instructions for manual install)
|
||||
*/
|
||||
export function createInstallCodexHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// For now, return manual installation instructions
|
||||
// In the future, this could potentially trigger npm global install
|
||||
const installCommand = 'npm install -g @openai/codex';
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Please install Codex CLI manually by running: ${installCommand}`,
|
||||
requiresManualInstall: true,
|
||||
installCommand,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Install Codex failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
59
apps/server/src/routes/setup/routes/opencode-status.ts
Normal file
59
apps/server/src/routes/setup/routes/opencode-status.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
/**
|
||||
* GET /opencode-status endpoint - Get OpenCode CLI installation and auth status
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { OpencodeProvider } from '../../../providers/opencode-provider.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
/**
|
||||
* Creates handler for GET /api/setup/opencode-status
|
||||
* Returns OpenCode CLI installation and authentication status
|
||||
*/
|
||||
export function createOpencodeStatusHandler() {
|
||||
const installCommand = 'curl -fsSL https://opencode.ai/install | bash';
|
||||
const loginCommand = 'opencode auth login';
|
||||
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const provider = new OpencodeProvider();
|
||||
const status = await provider.detectInstallation();
|
||||
|
||||
// Derive auth method from authenticated status and API key presence
|
||||
let authMethod = 'none';
|
||||
if (status.authenticated) {
|
||||
authMethod = status.hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
installed: status.installed,
|
||||
version: status.version || null,
|
||||
path: status.path || null,
|
||||
auth: {
|
||||
authenticated: status.authenticated || false,
|
||||
method: authMethod,
|
||||
hasApiKey: status.hasApiKey || false,
|
||||
hasEnvApiKey: !!process.env.ANTHROPIC_API_KEY || !!process.env.OPENAI_API_KEY,
|
||||
hasOAuthToken: status.hasOAuthToken || false,
|
||||
},
|
||||
recommendation: status.installed
|
||||
? undefined
|
||||
: 'Install OpenCode CLI to use multi-provider AI models.',
|
||||
installCommand,
|
||||
loginCommand,
|
||||
installCommands: {
|
||||
macos: installCommand,
|
||||
linux: installCommand,
|
||||
npm: 'npm install -g opencode-ai',
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Get OpenCode status failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -7,8 +7,16 @@ import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { getApiKey } from '../common.js';
|
||||
import {
|
||||
createSecureAuthEnv,
|
||||
AuthSessionManager,
|
||||
AuthRateLimiter,
|
||||
validateApiKey,
|
||||
createTempEnvOverride,
|
||||
} from '../../../lib/auth-utils.js';
|
||||
|
||||
const logger = createLogger('Setup');
|
||||
const rateLimiter = new AuthRateLimiter();
|
||||
|
||||
// Known error patterns that indicate auth failure
|
||||
const AUTH_ERROR_PATTERNS = [
|
||||
@@ -77,6 +85,19 @@ export function createVerifyClaudeAuthHandler() {
|
||||
apiKey?: string;
|
||||
};
|
||||
|
||||
// Rate limiting to prevent abuse
|
||||
const clientIp = req.ip || req.socket.remoteAddress || 'unknown';
|
||||
if (!rateLimiter.canAttempt(clientIp)) {
|
||||
const resetTime = rateLimiter.getResetTime(clientIp);
|
||||
res.status(429).json({
|
||||
success: false,
|
||||
authenticated: false,
|
||||
error: 'Too many authentication attempts. Please try again later.',
|
||||
resetTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`[Setup] Verifying Claude authentication using method: ${authMethod || 'auto'}${apiKey ? ' (with provided key)' : ''}`
|
||||
);
|
||||
@@ -89,37 +110,48 @@ export function createVerifyClaudeAuthHandler() {
|
||||
let errorMessage = '';
|
||||
let receivedAnyContent = false;
|
||||
|
||||
// Save original env values
|
||||
const originalAnthropicKey = process.env.ANTHROPIC_API_KEY;
|
||||
// Create secure auth session
|
||||
const sessionId = `claude-auth-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
try {
|
||||
// Configure environment based on auth method
|
||||
if (authMethod === 'cli') {
|
||||
// For CLI verification, remove any API key so it uses CLI credentials only
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
logger.info('[Setup] Cleared API key environment for CLI verification');
|
||||
} else if (authMethod === 'api_key') {
|
||||
// For API key verification, use provided key, stored key, or env var (in order of priority)
|
||||
if (apiKey) {
|
||||
// Use the provided API key (allows testing unsaved keys)
|
||||
process.env.ANTHROPIC_API_KEY = apiKey;
|
||||
logger.info('[Setup] Using provided API key for verification');
|
||||
} else {
|
||||
const storedApiKey = getApiKey('anthropic');
|
||||
if (storedApiKey) {
|
||||
process.env.ANTHROPIC_API_KEY = storedApiKey;
|
||||
logger.info('[Setup] Using stored API key for verification');
|
||||
} else if (!process.env.ANTHROPIC_API_KEY) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: 'No API key configured. Please enter an API key first.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
// For API key verification, validate the key first
|
||||
if (authMethod === 'api_key' && apiKey) {
|
||||
const validation = validateApiKey(apiKey, 'anthropic');
|
||||
if (!validation.isValid) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: validation.error,
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Create secure environment without modifying process.env
|
||||
const authEnv = createSecureAuthEnv(authMethod || 'api_key', apiKey, 'anthropic');
|
||||
|
||||
// For API key verification without provided key, use stored key or env var
|
||||
if (authMethod === 'api_key' && !apiKey) {
|
||||
const storedApiKey = getApiKey('anthropic');
|
||||
if (storedApiKey) {
|
||||
authEnv.ANTHROPIC_API_KEY = storedApiKey;
|
||||
logger.info('[Setup] Using stored API key for verification');
|
||||
} else if (!authEnv.ANTHROPIC_API_KEY) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: 'No API key configured. Please enter an API key first.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Store the secure environment in session manager
|
||||
AuthSessionManager.createSession(sessionId, authMethod || 'api_key', apiKey, 'anthropic');
|
||||
|
||||
// Create temporary environment override for SDK call
|
||||
const cleanupEnv = createTempEnvOverride(authEnv);
|
||||
|
||||
// Run a minimal query to verify authentication
|
||||
const stream = query({
|
||||
prompt: "Reply with only the word 'ok'",
|
||||
@@ -278,13 +310,8 @@ export function createVerifyClaudeAuthHandler() {
|
||||
}
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
// Restore original environment
|
||||
if (originalAnthropicKey !== undefined) {
|
||||
process.env.ANTHROPIC_API_KEY = originalAnthropicKey;
|
||||
} else if (authMethod === 'cli') {
|
||||
// If we cleared it and there was no original, keep it cleared
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
}
|
||||
// Clean up the auth session
|
||||
AuthSessionManager.destroySession(sessionId);
|
||||
}
|
||||
|
||||
logger.info('[Setup] Verification result:', {
|
||||
|
||||
282
apps/server/src/routes/setup/routes/verify-codex-auth.ts
Normal file
282
apps/server/src/routes/setup/routes/verify-codex-auth.ts
Normal file
@@ -0,0 +1,282 @@
|
||||
/**
|
||||
* POST /verify-codex-auth endpoint - Verify Codex authentication
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { CODEX_MODEL_MAP } from '@automaker/types';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
import { getApiKey } from '../common.js';
|
||||
import { getCodexAuthIndicators } from '@automaker/platform';
|
||||
import {
|
||||
createSecureAuthEnv,
|
||||
AuthSessionManager,
|
||||
AuthRateLimiter,
|
||||
validateApiKey,
|
||||
createTempEnvOverride,
|
||||
} from '../../../lib/auth-utils.js';
|
||||
|
||||
const logger = createLogger('Setup');
|
||||
const rateLimiter = new AuthRateLimiter();
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
const AUTH_PROMPT = "Reply with only the word 'ok'";
|
||||
const AUTH_TIMEOUT_MS = 30000;
|
||||
const ERROR_BILLING_MESSAGE =
|
||||
'Credit balance is too low. Please add credits to your OpenAI account.';
|
||||
const ERROR_RATE_LIMIT_MESSAGE =
|
||||
'Rate limit reached. Please wait a while before trying again or upgrade your plan.';
|
||||
const ERROR_CLI_AUTH_REQUIRED =
|
||||
"CLI authentication failed. Please run 'codex login' to authenticate.";
|
||||
const ERROR_API_KEY_REQUIRED = 'No API key configured. Please enter an API key first.';
|
||||
const AUTH_ERROR_PATTERNS = [
|
||||
'authentication',
|
||||
'unauthorized',
|
||||
'invalid_api_key',
|
||||
'invalid api key',
|
||||
'api key is invalid',
|
||||
'not authenticated',
|
||||
'login',
|
||||
'auth(',
|
||||
'token refresh',
|
||||
'tokenrefresh',
|
||||
'failed to parse server response',
|
||||
'transport channel closed',
|
||||
];
|
||||
const BILLING_ERROR_PATTERNS = [
|
||||
'credit balance is too low',
|
||||
'credit balance too low',
|
||||
'insufficient credits',
|
||||
'insufficient balance',
|
||||
'no credits',
|
||||
'out of credits',
|
||||
'billing',
|
||||
'payment required',
|
||||
'add credits',
|
||||
];
|
||||
const RATE_LIMIT_PATTERNS = [
|
||||
'limit reached',
|
||||
'rate limit',
|
||||
'rate_limit',
|
||||
'too many requests',
|
||||
'resets',
|
||||
'429',
|
||||
];
|
||||
|
||||
function containsAuthError(text: string): boolean {
|
||||
const lowerText = text.toLowerCase();
|
||||
return AUTH_ERROR_PATTERNS.some((pattern) => lowerText.includes(pattern));
|
||||
}
|
||||
|
||||
function isBillingError(text: string): boolean {
|
||||
const lowerText = text.toLowerCase();
|
||||
return BILLING_ERROR_PATTERNS.some((pattern) => lowerText.includes(pattern));
|
||||
}
|
||||
|
||||
function isRateLimitError(text: string): boolean {
|
||||
if (isBillingError(text)) {
|
||||
return false;
|
||||
}
|
||||
const lowerText = text.toLowerCase();
|
||||
return RATE_LIMIT_PATTERNS.some((pattern) => lowerText.includes(pattern));
|
||||
}
|
||||
|
||||
export function createVerifyCodexAuthHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
const { authMethod, apiKey } = req.body as {
|
||||
authMethod?: 'cli' | 'api_key';
|
||||
apiKey?: string;
|
||||
};
|
||||
|
||||
// Create session ID for cleanup
|
||||
const sessionId = `codex-auth-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
// Rate limiting
|
||||
const clientIp = req.ip || req.socket.remoteAddress || 'unknown';
|
||||
if (!rateLimiter.canAttempt(clientIp)) {
|
||||
const resetTime = rateLimiter.getResetTime(clientIp);
|
||||
res.status(429).json({
|
||||
success: false,
|
||||
authenticated: false,
|
||||
error: 'Too many authentication attempts. Please try again later.',
|
||||
resetTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const abortController = new AbortController();
|
||||
const timeoutId = setTimeout(() => abortController.abort(), AUTH_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
// Create secure environment without modifying process.env
|
||||
const authEnv = createSecureAuthEnv(authMethod || 'api_key', apiKey, 'openai');
|
||||
|
||||
// For API key auth, validate and use the provided key or stored key
|
||||
if (authMethod === 'api_key') {
|
||||
if (apiKey) {
|
||||
// Use the provided API key
|
||||
const validation = validateApiKey(apiKey, 'openai');
|
||||
if (!validation.isValid) {
|
||||
res.json({ success: true, authenticated: false, error: validation.error });
|
||||
return;
|
||||
}
|
||||
authEnv[OPENAI_API_KEY_ENV] = validation.normalizedKey;
|
||||
} else {
|
||||
// Try stored key
|
||||
const storedApiKey = getApiKey('openai');
|
||||
if (storedApiKey) {
|
||||
const validation = validateApiKey(storedApiKey, 'openai');
|
||||
if (!validation.isValid) {
|
||||
res.json({ success: true, authenticated: false, error: validation.error });
|
||||
return;
|
||||
}
|
||||
authEnv[OPENAI_API_KEY_ENV] = validation.normalizedKey;
|
||||
} else if (!authEnv[OPENAI_API_KEY_ENV]) {
|
||||
res.json({ success: true, authenticated: false, error: ERROR_API_KEY_REQUIRED });
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create session and temporary environment override
|
||||
AuthSessionManager.createSession(sessionId, authMethod || 'api_key', undefined, 'openai');
|
||||
const cleanupEnv = createTempEnvOverride(authEnv);
|
||||
|
||||
try {
|
||||
if (authMethod === 'cli') {
|
||||
const authIndicators = await getCodexAuthIndicators();
|
||||
if (!authIndicators.hasOAuthToken && !authIndicators.hasApiKey) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: ERROR_CLI_AUTH_REQUIRED,
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Use Codex provider explicitly (not ProviderFactory.getProviderForModel)
|
||||
// because Cursor also supports GPT models and has higher priority
|
||||
const provider = ProviderFactory.getProviderByName('codex');
|
||||
if (!provider) {
|
||||
throw new Error('Codex provider not available');
|
||||
}
|
||||
const stream = provider.executeQuery({
|
||||
prompt: AUTH_PROMPT,
|
||||
model: CODEX_MODEL_MAP.gpt52Codex,
|
||||
cwd: process.cwd(),
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
abortController,
|
||||
});
|
||||
|
||||
let receivedAnyContent = false;
|
||||
let errorMessage = '';
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'error' && msg.error) {
|
||||
if (isBillingError(msg.error)) {
|
||||
errorMessage = ERROR_BILLING_MESSAGE;
|
||||
} else if (isRateLimitError(msg.error)) {
|
||||
errorMessage = ERROR_RATE_LIMIT_MESSAGE;
|
||||
} else {
|
||||
errorMessage = msg.error;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
receivedAnyContent = true;
|
||||
if (isBillingError(block.text)) {
|
||||
errorMessage = ERROR_BILLING_MESSAGE;
|
||||
break;
|
||||
}
|
||||
if (isRateLimitError(block.text)) {
|
||||
errorMessage = ERROR_RATE_LIMIT_MESSAGE;
|
||||
break;
|
||||
}
|
||||
if (containsAuthError(block.text)) {
|
||||
errorMessage = block.text;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (msg.type === 'result' && msg.result) {
|
||||
receivedAnyContent = true;
|
||||
if (isBillingError(msg.result)) {
|
||||
errorMessage = ERROR_BILLING_MESSAGE;
|
||||
} else if (isRateLimitError(msg.result)) {
|
||||
errorMessage = ERROR_RATE_LIMIT_MESSAGE;
|
||||
} else if (containsAuthError(msg.result)) {
|
||||
errorMessage = msg.result;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (errorMessage) {
|
||||
// Rate limit and billing errors mean auth succeeded but usage is limited
|
||||
const isUsageLimitError =
|
||||
errorMessage === ERROR_BILLING_MESSAGE || errorMessage === ERROR_RATE_LIMIT_MESSAGE;
|
||||
|
||||
const response: {
|
||||
success: boolean;
|
||||
authenticated: boolean;
|
||||
error: string;
|
||||
details?: string;
|
||||
} = {
|
||||
success: true,
|
||||
authenticated: isUsageLimitError ? true : false,
|
||||
error: isUsageLimitError
|
||||
? errorMessage
|
||||
: authMethod === 'cli'
|
||||
? ERROR_CLI_AUTH_REQUIRED
|
||||
: 'API key is invalid or has been revoked.',
|
||||
};
|
||||
|
||||
// Include detailed error for auth failures so users can debug
|
||||
if (!isUsageLimitError && errorMessage !== response.error) {
|
||||
response.details = errorMessage;
|
||||
}
|
||||
|
||||
res.json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!receivedAnyContent) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: 'No response received from Codex. Please check your authentication.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
res.json({ success: true, authenticated: true });
|
||||
} finally {
|
||||
// Clean up environment override
|
||||
cleanupEnv();
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const errMessage = error instanceof Error ? error.message : String(error);
|
||||
logger.error('[Setup] Codex auth verification error:', errMessage);
|
||||
const normalizedError = isBillingError(errMessage)
|
||||
? ERROR_BILLING_MESSAGE
|
||||
: isRateLimitError(errMessage)
|
||||
? ERROR_RATE_LIMIT_MESSAGE
|
||||
: errMessage;
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: normalizedError,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
// Clean up session
|
||||
AuthSessionManager.destroySession(sessionId);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -8,7 +8,12 @@
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, type ThinkingLevel } from '@automaker/types';
|
||||
import {
|
||||
DEFAULT_PHASE_MODELS,
|
||||
isCursorModel,
|
||||
stripProviderPrefix,
|
||||
type ThinkingLevel,
|
||||
} from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createSuggestionsOptions } from '../../lib/sdk-options.js';
|
||||
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||
@@ -207,6 +212,8 @@ The response will be automatically formatted as structured JSON.`;
|
||||
logger.info('[Suggestions] Using Cursor provider');
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// For Cursor, include the JSON schema in the prompt with clear instructions
|
||||
const cursorPrompt = `${prompt}
|
||||
@@ -222,7 +229,7 @@ Your entire response should be valid JSON starting with { and ending with }. No
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
|
||||
@@ -11,9 +11,10 @@ import { getGitRepositoryDiffs } from '../../common.js';
|
||||
export function createDiffsHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId } = req.body as {
|
||||
const { projectPath, featureId, useWorktrees } = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
useWorktrees?: boolean;
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId) {
|
||||
@@ -24,6 +25,19 @@ export function createDiffsHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
// If worktrees aren't enabled, don't probe .worktrees at all.
|
||||
// This avoids noisy logs that make it look like features are "running in worktrees".
|
||||
if (useWorktrees === false) {
|
||||
const result = await getGitRepositoryDiffs(projectPath);
|
||||
res.json({
|
||||
success: true,
|
||||
diff: result.diff,
|
||||
files: result.files,
|
||||
hasChanges: result.hasChanges,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Git worktrees are stored in project directory
|
||||
const worktreePath = path.join(projectPath, '.worktrees', featureId);
|
||||
|
||||
@@ -41,7 +55,11 @@ export function createDiffsHandler() {
|
||||
});
|
||||
} catch (innerError) {
|
||||
// Worktree doesn't exist - fallback to main project path
|
||||
logError(innerError, 'Worktree access failed, falling back to main project');
|
||||
const code = (innerError as NodeJS.ErrnoException | undefined)?.code;
|
||||
// ENOENT is expected when a feature has no worktree; don't log as an error.
|
||||
if (code && code !== 'ENOENT') {
|
||||
logError(innerError, 'Worktree access failed, falling back to main project');
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await getGitRepositoryDiffs(projectPath);
|
||||
|
||||
@@ -15,10 +15,11 @@ const execAsync = promisify(exec);
|
||||
export function createFileDiffHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId, filePath } = req.body as {
|
||||
const { projectPath, featureId, filePath, useWorktrees } = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
filePath: string;
|
||||
useWorktrees?: boolean;
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId || !filePath) {
|
||||
@@ -29,6 +30,12 @@ export function createFileDiffHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
// If worktrees aren't enabled, don't probe .worktrees at all.
|
||||
if (useWorktrees === false) {
|
||||
res.json({ success: true, diff: '', filePath });
|
||||
return;
|
||||
}
|
||||
|
||||
// Git worktrees are stored in project directory
|
||||
const worktreePath = path.join(projectPath, '.worktrees', featureId);
|
||||
|
||||
@@ -57,7 +64,11 @@ export function createFileDiffHandler() {
|
||||
|
||||
res.json({ success: true, diff, filePath });
|
||||
} catch (innerError) {
|
||||
logError(innerError, 'Worktree file diff failed');
|
||||
const code = (innerError as NodeJS.ErrnoException | undefined)?.code;
|
||||
// ENOENT is expected when a feature has no worktree; don't log as an error.
|
||||
if (code && code !== 'ENOENT') {
|
||||
logError(innerError, 'Worktree file diff failed');
|
||||
}
|
||||
res.json({ success: true, diff: '', filePath });
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -6,13 +6,16 @@
|
||||
import path from 'path';
|
||||
import * as secureFs from '../lib/secure-fs.js';
|
||||
import type { EventEmitter } from '../lib/events.js';
|
||||
import type { ExecuteOptions, ThinkingLevel } from '@automaker/types';
|
||||
import type { ExecuteOptions, ThinkingLevel, ReasoningEffort } from '@automaker/types';
|
||||
import { stripProviderPrefix } from '@automaker/types';
|
||||
import {
|
||||
readImageAsBase64,
|
||||
buildPromptWithImages,
|
||||
isAbortError,
|
||||
loadContextFiles,
|
||||
createLogger,
|
||||
classifyError,
|
||||
getUserFriendlyErrorMessage,
|
||||
} from '@automaker/utils';
|
||||
import { ProviderFactory } from '../providers/provider-factory.js';
|
||||
import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js';
|
||||
@@ -20,10 +23,12 @@ import { PathNotAllowedError } from '@automaker/platform';
|
||||
import type { SettingsService } from './settings-service.js';
|
||||
import {
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getEnableSandboxModeSetting,
|
||||
filterClaudeMdFromContext,
|
||||
getMCPServersFromSettings,
|
||||
getPromptCustomization,
|
||||
getSkillsConfiguration,
|
||||
getSubagentsConfiguration,
|
||||
getCustomSubagents,
|
||||
} from '../lib/settings-helpers.js';
|
||||
|
||||
interface Message {
|
||||
@@ -55,6 +60,7 @@ interface Session {
|
||||
workingDirectory: string;
|
||||
model?: string;
|
||||
thinkingLevel?: ThinkingLevel; // Thinking level for Claude models
|
||||
reasoningEffort?: ReasoningEffort; // Reasoning effort for Codex models
|
||||
sdkSessionId?: string; // Claude SDK session ID for conversation continuity
|
||||
promptQueue: QueuedPrompt[]; // Queue of prompts to auto-run after current task
|
||||
}
|
||||
@@ -144,6 +150,7 @@ export class AgentService {
|
||||
imagePaths,
|
||||
model,
|
||||
thinkingLevel,
|
||||
reasoningEffort,
|
||||
}: {
|
||||
sessionId: string;
|
||||
message: string;
|
||||
@@ -151,6 +158,7 @@ export class AgentService {
|
||||
imagePaths?: string[];
|
||||
model?: string;
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
reasoningEffort?: ReasoningEffort;
|
||||
}) {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (!session) {
|
||||
@@ -163,7 +171,7 @@ export class AgentService {
|
||||
throw new Error('Agent is already processing a message');
|
||||
}
|
||||
|
||||
// Update session model and thinking level if provided
|
||||
// Update session model, thinking level, and reasoning effort if provided
|
||||
if (model) {
|
||||
session.model = model;
|
||||
await this.updateSession(sessionId, { model });
|
||||
@@ -171,6 +179,21 @@ export class AgentService {
|
||||
if (thinkingLevel !== undefined) {
|
||||
session.thinkingLevel = thinkingLevel;
|
||||
}
|
||||
if (reasoningEffort !== undefined) {
|
||||
session.reasoningEffort = reasoningEffort;
|
||||
}
|
||||
|
||||
// Validate vision support before processing images
|
||||
const effectiveModel = model || session.model;
|
||||
if (imagePaths && imagePaths.length > 0 && effectiveModel) {
|
||||
const supportsVision = ProviderFactory.modelSupportsVision(effectiveModel);
|
||||
if (!supportsVision) {
|
||||
throw new Error(
|
||||
`This model (${effectiveModel}) does not support image input. ` +
|
||||
`Please switch to a model that supports vision, or remove the images and try again.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Read images and convert to base64
|
||||
const images: Message['images'] = [];
|
||||
@@ -232,19 +255,34 @@ export class AgentService {
|
||||
'[AgentService]'
|
||||
);
|
||||
|
||||
// Load enableSandboxMode setting (global setting only)
|
||||
const enableSandboxMode = await getEnableSandboxModeSetting(
|
||||
this.settingsService,
|
||||
'[AgentService]'
|
||||
);
|
||||
|
||||
// Load MCP servers from settings (global setting only)
|
||||
const mcpServers = await getMCPServersFromSettings(this.settingsService, '[AgentService]');
|
||||
|
||||
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.)
|
||||
// Get Skills configuration from settings
|
||||
const skillsConfig = this.settingsService
|
||||
? await getSkillsConfiguration(this.settingsService)
|
||||
: { enabled: false, sources: [] as Array<'user' | 'project'>, shouldIncludeInTools: false };
|
||||
|
||||
// Get Subagents configuration from settings
|
||||
const subagentsConfig = this.settingsService
|
||||
? await getSubagentsConfiguration(this.settingsService)
|
||||
: { enabled: false, sources: [] as Array<'user' | 'project'>, shouldIncludeInTools: false };
|
||||
|
||||
// Get custom subagents from settings (merge global + project-level) only if enabled
|
||||
const customSubagents =
|
||||
this.settingsService && subagentsConfig.enabled
|
||||
? await getCustomSubagents(this.settingsService, effectiveWorkDir)
|
||||
: undefined;
|
||||
|
||||
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
|
||||
// Use the user's message as task context for smart memory selection
|
||||
const contextResult = await loadContextFiles({
|
||||
projectPath: effectiveWorkDir,
|
||||
fsModule: secureFs as Parameters<typeof loadContextFiles>[0]['fsModule'],
|
||||
taskContext: {
|
||||
title: message.substring(0, 200), // Use first 200 chars as title
|
||||
description: message,
|
||||
},
|
||||
});
|
||||
|
||||
// When autoLoadClaudeMd is enabled, filter out CLAUDE.md to avoid duplication
|
||||
@@ -258,8 +296,9 @@ export class AgentService {
|
||||
: baseSystemPrompt;
|
||||
|
||||
// Build SDK options using centralized configuration
|
||||
// Use thinking level from request, or fall back to session's stored thinking level
|
||||
// Use thinking level and reasoning effort from request, or fall back to session's stored values
|
||||
const effectiveThinkingLevel = thinkingLevel ?? session.thinkingLevel;
|
||||
const effectiveReasoningEffort = reasoningEffort ?? session.reasoningEffort;
|
||||
const sdkOptions = createChatOptions({
|
||||
cwd: effectiveWorkDir,
|
||||
model: model,
|
||||
@@ -267,7 +306,6 @@ export class AgentService {
|
||||
systemPrompt: combinedSystemPrompt,
|
||||
abortController: session.abortController!,
|
||||
autoLoadClaudeMd,
|
||||
enableSandboxMode,
|
||||
thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
|
||||
});
|
||||
@@ -275,25 +313,71 @@ export class AgentService {
|
||||
// Extract model, maxTurns, and allowedTools from SDK options
|
||||
const effectiveModel = sdkOptions.model!;
|
||||
const maxTurns = sdkOptions.maxTurns;
|
||||
const allowedTools = sdkOptions.allowedTools as string[] | undefined;
|
||||
let allowedTools = sdkOptions.allowedTools as string[] | undefined;
|
||||
|
||||
// Get provider for this model
|
||||
// Build merged settingSources array using Set for automatic deduplication
|
||||
const sdkSettingSources = (sdkOptions.settingSources ?? []).filter(
|
||||
(source): source is 'user' | 'project' => source === 'user' || source === 'project'
|
||||
);
|
||||
const skillSettingSources = skillsConfig.enabled ? skillsConfig.sources : [];
|
||||
const settingSources = [...new Set([...sdkSettingSources, ...skillSettingSources])];
|
||||
|
||||
// Enhance allowedTools with Skills and Subagents tools
|
||||
// These tools are not in the provider's default set - they're added dynamically based on settings
|
||||
const needsSkillTool = skillsConfig.shouldIncludeInTools;
|
||||
const needsTaskTool =
|
||||
subagentsConfig.shouldIncludeInTools &&
|
||||
customSubagents &&
|
||||
Object.keys(customSubagents).length > 0;
|
||||
|
||||
// Base tools that match the provider's default set
|
||||
const baseTools = ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'];
|
||||
|
||||
if (allowedTools) {
|
||||
allowedTools = [...allowedTools]; // Create a copy to avoid mutating SDK options
|
||||
// Add Skill tool if skills are enabled
|
||||
if (needsSkillTool && !allowedTools.includes('Skill')) {
|
||||
allowedTools.push('Skill');
|
||||
}
|
||||
// Add Task tool if custom subagents are configured
|
||||
if (needsTaskTool && !allowedTools.includes('Task')) {
|
||||
allowedTools.push('Task');
|
||||
}
|
||||
} else if (needsSkillTool || needsTaskTool) {
|
||||
// If no allowedTools specified but we need to add Skill/Task tools,
|
||||
// build the full list including base tools
|
||||
allowedTools = [...baseTools];
|
||||
if (needsSkillTool) {
|
||||
allowedTools.push('Skill');
|
||||
}
|
||||
if (needsTaskTool) {
|
||||
allowedTools.push('Task');
|
||||
}
|
||||
}
|
||||
|
||||
// Get provider for this model (with prefix)
|
||||
const provider = ProviderFactory.getProviderForModel(effectiveModel);
|
||||
|
||||
// Strip provider prefix - providers should receive bare model IDs
|
||||
const bareModel = stripProviderPrefix(effectiveModel);
|
||||
|
||||
// Build options for provider
|
||||
const options: ExecuteOptions = {
|
||||
prompt: '', // Will be set below based on images
|
||||
model: effectiveModel,
|
||||
model: bareModel, // Bare model ID (e.g., "gpt-5.1-codex-max", "composer-1")
|
||||
originalModel: effectiveModel, // Original with prefix for logging (e.g., "codex-gpt-5.1-codex-max")
|
||||
cwd: effectiveWorkDir,
|
||||
systemPrompt: sdkOptions.systemPrompt,
|
||||
maxTurns: maxTurns,
|
||||
allowedTools: allowedTools,
|
||||
abortController: session.abortController!,
|
||||
conversationHistory: conversationHistory.length > 0 ? conversationHistory : undefined,
|
||||
settingSources: sdkOptions.settingSources,
|
||||
sandbox: sdkOptions.sandbox, // Pass sandbox configuration
|
||||
settingSources: settingSources.length > 0 ? settingSources : undefined,
|
||||
sdkSessionId: session.sdkSessionId, // Pass SDK session ID for resuming
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
|
||||
agents: customSubagents, // Pass custom subagents for task delegation
|
||||
thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
|
||||
reasoningEffort: effectiveReasoningEffort, // Pass reasoning effort for Codex models
|
||||
};
|
||||
|
||||
// Build prompt content with images
|
||||
@@ -374,6 +458,53 @@ export class AgentService {
|
||||
content: responseText,
|
||||
toolUses,
|
||||
});
|
||||
} else if (msg.type === 'error') {
|
||||
// Some providers (like Codex CLI/SaaS or Cursor CLI) surface failures as
|
||||
// streamed error messages instead of throwing. Handle these here so the
|
||||
// Agent Runner UX matches the Claude/Cursor behavior without changing
|
||||
// their provider implementations.
|
||||
const rawErrorText =
|
||||
(typeof msg.error === 'string' && msg.error.trim()) ||
|
||||
'Unexpected error from provider during agent execution.';
|
||||
|
||||
const errorInfo = classifyError(new Error(rawErrorText));
|
||||
|
||||
// Keep the provider-supplied text intact (Codex already includes helpful tips),
|
||||
// only add a small rate-limit hint when we can detect it.
|
||||
const enhancedText = errorInfo.isRateLimit
|
||||
? `${rawErrorText}\n\nTip: It looks like you hit a rate limit. Try waiting a bit or reducing concurrent Agent Runner / Auto Mode tasks.`
|
||||
: rawErrorText;
|
||||
|
||||
this.logger.error('Provider error during agent execution:', {
|
||||
type: errorInfo.type,
|
||||
message: errorInfo.message,
|
||||
});
|
||||
|
||||
// Mark session as no longer running so the UI and queue stay in sync
|
||||
session.isRunning = false;
|
||||
session.abortController = null;
|
||||
|
||||
const errorMessage: Message = {
|
||||
id: this.generateId(),
|
||||
role: 'assistant',
|
||||
content: `Error: ${enhancedText}`,
|
||||
timestamp: new Date().toISOString(),
|
||||
isError: true,
|
||||
};
|
||||
|
||||
session.messages.push(errorMessage);
|
||||
await this.saveSession(sessionId, session.messages);
|
||||
|
||||
this.emitAgentEvent(sessionId, {
|
||||
type: 'error',
|
||||
error: enhancedText,
|
||||
message: errorMessage,
|
||||
});
|
||||
|
||||
// Don't continue streaming after an error message
|
||||
return {
|
||||
success: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,17 +14,17 @@ import type {
|
||||
ExecuteOptions,
|
||||
Feature,
|
||||
ModelProvider,
|
||||
PipelineConfig,
|
||||
PipelineStep,
|
||||
ThinkingLevel,
|
||||
PlanningMode,
|
||||
} from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, stripProviderPrefix } from '@automaker/types';
|
||||
import {
|
||||
buildPromptWithImages,
|
||||
isAbortError,
|
||||
classifyError,
|
||||
loadContextFiles,
|
||||
appendLearning,
|
||||
recordMemoryUsage,
|
||||
createLogger,
|
||||
} from '@automaker/utils';
|
||||
|
||||
@@ -47,7 +47,6 @@ import type { SettingsService } from './settings-service.js';
|
||||
import { pipelineService, PipelineService } from './pipeline-service.js';
|
||||
import {
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getEnableSandboxModeSetting,
|
||||
filterClaudeMdFromContext,
|
||||
getMCPServersFromSettings,
|
||||
getPromptCustomization,
|
||||
@@ -323,6 +322,8 @@ export class AutoModeService {
|
||||
projectPath,
|
||||
});
|
||||
|
||||
// Note: Memory folder initialization is now handled by loadContextFiles
|
||||
|
||||
// Run the loop in the background
|
||||
this.runAutoLoop().catch((error) => {
|
||||
logger.error('Loop error:', error);
|
||||
@@ -514,15 +515,21 @@ export class AutoModeService {
|
||||
|
||||
// Build the prompt - use continuation prompt if provided (for recovery after plan approval)
|
||||
let prompt: string;
|
||||
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) - passed as system prompt
|
||||
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
|
||||
// Context loader uses task context to select relevant memory files
|
||||
const contextResult = await loadContextFiles({
|
||||
projectPath,
|
||||
fsModule: secureFs as Parameters<typeof loadContextFiles>[0]['fsModule'],
|
||||
taskContext: {
|
||||
title: feature.title ?? '',
|
||||
description: feature.description ?? '',
|
||||
},
|
||||
});
|
||||
|
||||
// When autoLoadClaudeMd is enabled, filter out CLAUDE.md to avoid duplication
|
||||
// (SDK handles CLAUDE.md via settingSources), but keep other context files like CODE_QUALITY.md
|
||||
const contextFilesPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
|
||||
// Note: contextResult.formattedPrompt now includes both context AND memory
|
||||
const combinedSystemPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
|
||||
|
||||
if (options?.continuationPrompt) {
|
||||
// Continuation prompt is used when recovering from a plan approval
|
||||
@@ -575,7 +582,7 @@ export class AutoModeService {
|
||||
projectPath,
|
||||
planningMode: feature.planningMode,
|
||||
requirePlanApproval: feature.requirePlanApproval,
|
||||
systemPrompt: contextFilesPrompt || undefined,
|
||||
systemPrompt: combinedSystemPrompt || undefined,
|
||||
autoLoadClaudeMd,
|
||||
thinkingLevel: feature.thinkingLevel,
|
||||
}
|
||||
@@ -607,6 +614,36 @@ export class AutoModeService {
|
||||
// Record success to reset consecutive failure tracking
|
||||
this.recordSuccess();
|
||||
|
||||
// Record learnings and memory usage after successful feature completion
|
||||
try {
|
||||
const featureDir = getFeatureDir(projectPath, featureId);
|
||||
const outputPath = path.join(featureDir, 'agent-output.md');
|
||||
let agentOutput = '';
|
||||
try {
|
||||
const outputContent = await secureFs.readFile(outputPath, 'utf-8');
|
||||
agentOutput =
|
||||
typeof outputContent === 'string' ? outputContent : outputContent.toString();
|
||||
} catch {
|
||||
// Agent output might not exist yet
|
||||
}
|
||||
|
||||
// Record memory usage if we loaded any memory files
|
||||
if (contextResult.memoryFiles.length > 0 && agentOutput) {
|
||||
await recordMemoryUsage(
|
||||
projectPath,
|
||||
contextResult.memoryFiles,
|
||||
agentOutput,
|
||||
true, // success
|
||||
secureFs as Parameters<typeof recordMemoryUsage>[4]
|
||||
);
|
||||
}
|
||||
|
||||
// Extract and record learnings from the agent output
|
||||
await this.recordLearningsFromFeature(projectPath, feature, agentOutput);
|
||||
} catch (learningError) {
|
||||
console.warn('[AutoMode] Failed to record learnings:', learningError);
|
||||
}
|
||||
|
||||
this.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
passes: true,
|
||||
@@ -675,10 +712,14 @@ export class AutoModeService {
|
||||
): Promise<void> {
|
||||
logger.info(`Executing ${steps.length} pipeline step(s) for feature ${featureId}`);
|
||||
|
||||
// Load context files once
|
||||
// Load context files once with feature context for smart memory selection
|
||||
const contextResult = await loadContextFiles({
|
||||
projectPath,
|
||||
fsModule: secureFs as Parameters<typeof loadContextFiles>[0]['fsModule'],
|
||||
taskContext: {
|
||||
title: feature.title ?? '',
|
||||
description: feature.description ?? '',
|
||||
},
|
||||
});
|
||||
const contextFilesPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
|
||||
|
||||
@@ -911,6 +952,10 @@ Complete the pipeline step instructions above. Review the previous work and appl
|
||||
const contextResult = await loadContextFiles({
|
||||
projectPath,
|
||||
fsModule: secureFs as Parameters<typeof loadContextFiles>[0]['fsModule'],
|
||||
taskContext: {
|
||||
title: feature?.title ?? prompt.substring(0, 200),
|
||||
description: feature?.description ?? prompt,
|
||||
},
|
||||
});
|
||||
|
||||
// When autoLoadClaudeMd is enabled, filter out CLAUDE.md to avoid duplication
|
||||
@@ -1314,7 +1359,6 @@ Format your response as a structured markdown document.`;
|
||||
allowedTools: sdkOptions.allowedTools as string[],
|
||||
abortController,
|
||||
settingSources: sdkOptions.settingSources,
|
||||
sandbox: sdkOptions.sandbox, // Pass sandbox configuration
|
||||
thinkingLevel: analysisThinkingLevel, // Pass thinking level
|
||||
};
|
||||
|
||||
@@ -1784,9 +1828,13 @@ Format your response as a structured markdown document.`;
|
||||
// Apply dependency-aware ordering
|
||||
const { orderedFeatures } = resolveDependencies(pendingFeatures);
|
||||
|
||||
// Get skipVerificationInAutoMode setting
|
||||
const settings = await this.settingsService?.getGlobalSettings();
|
||||
const skipVerification = settings?.skipVerificationInAutoMode ?? false;
|
||||
|
||||
// Filter to only features with satisfied dependencies
|
||||
const readyFeatures = orderedFeatures.filter((feature: Feature) =>
|
||||
areDependenciesSatisfied(feature, allFeatures)
|
||||
areDependenciesSatisfied(feature, allFeatures, { skipVerification })
|
||||
);
|
||||
|
||||
return readyFeatures;
|
||||
@@ -1989,6 +2037,18 @@ This helps parse your summary correctly in the output logs.`;
|
||||
const planningMode = options?.planningMode || 'skip';
|
||||
const previousContent = options?.previousContent;
|
||||
|
||||
// Validate vision support before processing images
|
||||
const effectiveModel = model || 'claude-sonnet-4-20250514';
|
||||
if (imagePaths && imagePaths.length > 0) {
|
||||
const supportsVision = ProviderFactory.modelSupportsVision(effectiveModel);
|
||||
if (!supportsVision) {
|
||||
throw new Error(
|
||||
`This model (${effectiveModel}) does not support image input. ` +
|
||||
`Please switch to a model that supports vision (like Claude models), or remove the images and try again.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this planning mode can generate a spec/plan that needs approval
|
||||
// - spec and full always generate specs
|
||||
// - lite only generates approval-ready content when requirePlanApproval is true
|
||||
@@ -2062,9 +2122,6 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
? options.autoLoadClaudeMd
|
||||
: await getAutoLoadClaudeMdSetting(finalProjectPath, this.settingsService, '[AutoMode]');
|
||||
|
||||
// Load enableSandboxMode setting (global setting only)
|
||||
const enableSandboxMode = await getEnableSandboxModeSetting(this.settingsService, '[AutoMode]');
|
||||
|
||||
// Load MCP servers from settings (global setting only)
|
||||
const mcpServers = await getMCPServersFromSettings(this.settingsService, '[AutoMode]');
|
||||
|
||||
@@ -2076,7 +2133,6 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
model: model,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
enableSandboxMode,
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
|
||||
thinkingLevel: options?.thinkingLevel,
|
||||
});
|
||||
@@ -2093,7 +2149,12 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
// Get provider for this model
|
||||
const provider = ProviderFactory.getProviderForModel(finalModel);
|
||||
|
||||
logger.info(`Using provider "${provider.getName()}" for model "${finalModel}"`);
|
||||
// Strip provider prefix - providers should receive bare model IDs
|
||||
const bareModel = stripProviderPrefix(finalModel);
|
||||
|
||||
logger.info(
|
||||
`Using provider "${provider.getName()}" for model "${finalModel}" (bare: ${bareModel})`
|
||||
);
|
||||
|
||||
// Build prompt content with images using utility
|
||||
const { content: promptContent } = await buildPromptWithImages(
|
||||
@@ -2112,14 +2173,13 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
|
||||
const executeOptions: ExecuteOptions = {
|
||||
prompt: promptContent,
|
||||
model: finalModel,
|
||||
model: bareModel,
|
||||
maxTurns: maxTurns,
|
||||
cwd: workDir,
|
||||
allowedTools: allowedTools,
|
||||
abortController,
|
||||
systemPrompt: sdkOptions.systemPrompt,
|
||||
settingSources: sdkOptions.settingSources,
|
||||
sandbox: sdkOptions.sandbox, // Pass sandbox configuration
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
|
||||
thinkingLevel: options?.thinkingLevel, // Pass thinking level for extended thinking
|
||||
};
|
||||
@@ -2202,9 +2262,23 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
}, WRITE_DEBOUNCE_MS);
|
||||
};
|
||||
|
||||
// Heartbeat logging so "silent" model calls are visible.
|
||||
// Some runs can take a while before the first streamed message arrives.
|
||||
const streamStartTime = Date.now();
|
||||
let receivedAnyStreamMessage = false;
|
||||
const STREAM_HEARTBEAT_MS = 15_000;
|
||||
const streamHeartbeat = setInterval(() => {
|
||||
if (receivedAnyStreamMessage) return;
|
||||
const elapsedSeconds = Math.round((Date.now() - streamStartTime) / 1000);
|
||||
logger.info(
|
||||
`Waiting for first model response for feature ${featureId} (${elapsedSeconds}s elapsed)...`
|
||||
);
|
||||
}, STREAM_HEARTBEAT_MS);
|
||||
|
||||
// Wrap stream processing in try/finally to ensure timeout cleanup on any error/abort
|
||||
try {
|
||||
streamLoop: for await (const msg of stream) {
|
||||
receivedAnyStreamMessage = true;
|
||||
// Log raw stream event for debugging
|
||||
appendRawEvent(msg);
|
||||
|
||||
@@ -2404,7 +2478,7 @@ After generating the revised spec, output:
|
||||
// Make revision call
|
||||
const revisionStream = provider.executeQuery({
|
||||
prompt: revisionPrompt,
|
||||
model: finalModel,
|
||||
model: bareModel,
|
||||
maxTurns: maxTurns || 100,
|
||||
cwd: workDir,
|
||||
allowedTools: allowedTools,
|
||||
@@ -2542,7 +2616,7 @@ After generating the revised spec, output:
|
||||
// Execute task with dedicated agent
|
||||
const taskStream = provider.executeQuery({
|
||||
prompt: taskPrompt,
|
||||
model: finalModel,
|
||||
model: bareModel,
|
||||
maxTurns: Math.min(maxTurns || 100, 50), // Limit turns per task
|
||||
cwd: workDir,
|
||||
allowedTools: allowedTools,
|
||||
@@ -2630,7 +2704,7 @@ Implement all the changes described in the plan above.`;
|
||||
|
||||
const continuationStream = provider.executeQuery({
|
||||
prompt: continuationPrompt,
|
||||
model: finalModel,
|
||||
model: bareModel,
|
||||
maxTurns: maxTurns,
|
||||
cwd: workDir,
|
||||
allowedTools: allowedTools,
|
||||
@@ -2721,6 +2795,7 @@ Implement all the changes described in the plan above.`;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
clearInterval(streamHeartbeat);
|
||||
// ALWAYS clear pending timeouts to prevent memory leaks
|
||||
// This runs on success, error, or abort
|
||||
if (writeTimeout) {
|
||||
@@ -2874,4 +2949,207 @@ Begin implementing task ${task.id} now.`;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract and record learnings from a completed feature
|
||||
* Uses a quick Claude call to identify important decisions and patterns
|
||||
*/
|
||||
private async recordLearningsFromFeature(
|
||||
projectPath: string,
|
||||
feature: Feature,
|
||||
agentOutput: string
|
||||
): Promise<void> {
|
||||
if (!agentOutput || agentOutput.length < 100) {
|
||||
// Not enough output to extract learnings from
|
||||
console.log(
|
||||
`[AutoMode] Skipping learning extraction - output too short (${agentOutput?.length || 0} chars)`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[AutoMode] Extracting learnings from feature "${feature.title}" (${agentOutput.length} chars)`
|
||||
);
|
||||
|
||||
// Limit output to avoid token limits
|
||||
const truncatedOutput = agentOutput.length > 10000 ? agentOutput.slice(-10000) : agentOutput;
|
||||
|
||||
const userPrompt = `You are an Architecture Decision Record (ADR) extractor. Analyze this implementation and return ONLY JSON with learnings. No explanations.
|
||||
|
||||
Feature: "${feature.title}"
|
||||
|
||||
Implementation log:
|
||||
${truncatedOutput}
|
||||
|
||||
Extract MEANINGFUL learnings - not obvious things. For each, capture:
|
||||
- DECISIONS: Why this approach vs alternatives? What would break if changed?
|
||||
- GOTCHAS: What was unexpected? What's the root cause? How to avoid?
|
||||
- PATTERNS: Why this pattern? What problem does it solve? Trade-offs?
|
||||
|
||||
JSON format ONLY (no markdown, no text):
|
||||
{"learnings": [{
|
||||
"category": "architecture|api|ui|database|auth|testing|performance|security|gotchas",
|
||||
"type": "decision|gotcha|pattern",
|
||||
"content": "What was done/learned",
|
||||
"context": "Problem being solved or situation faced",
|
||||
"why": "Reasoning - why this approach",
|
||||
"rejected": "Alternative considered and why rejected",
|
||||
"tradeoffs": "What became easier/harder",
|
||||
"breaking": "What breaks if this is changed/removed"
|
||||
}]}
|
||||
|
||||
IMPORTANT: Only include NON-OBVIOUS learnings with real reasoning. Skip trivial patterns.
|
||||
If nothing notable: {"learnings": []}`;
|
||||
|
||||
try {
|
||||
// Import query dynamically to avoid circular dependencies
|
||||
const { query } = await import('@anthropic-ai/claude-agent-sdk');
|
||||
|
||||
// Get model from phase settings
|
||||
const settings = await this.settingsService?.getGlobalSettings();
|
||||
const phaseModelEntry =
|
||||
settings?.phaseModels?.memoryExtractionModel || DEFAULT_PHASE_MODELS.memoryExtractionModel;
|
||||
const { model } = resolvePhaseModel(phaseModelEntry);
|
||||
|
||||
const stream = query({
|
||||
prompt: userPrompt,
|
||||
options: {
|
||||
model,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
permissionMode: 'acceptEdits',
|
||||
systemPrompt:
|
||||
'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
|
||||
},
|
||||
});
|
||||
|
||||
// Extract text from stream
|
||||
let responseText = '';
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
responseText = msg.result || responseText;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[AutoMode] Learning extraction response: ${responseText.length} chars`);
|
||||
console.log(`[AutoMode] Response preview: ${responseText.substring(0, 300)}`);
|
||||
|
||||
// Parse the response - handle JSON in markdown code blocks or raw
|
||||
let jsonStr: string | null = null;
|
||||
|
||||
// First try to find JSON in markdown code blocks
|
||||
const codeBlockMatch = responseText.match(/```(?:json)?\s*(\{[\s\S]*?\})\s*```/);
|
||||
if (codeBlockMatch) {
|
||||
console.log('[AutoMode] Found JSON in code block');
|
||||
jsonStr = codeBlockMatch[1];
|
||||
} else {
|
||||
// Fall back to finding balanced braces containing "learnings"
|
||||
// Use a more precise approach: find the opening brace before "learnings"
|
||||
const learningsIndex = responseText.indexOf('"learnings"');
|
||||
if (learningsIndex !== -1) {
|
||||
// Find the opening brace before "learnings"
|
||||
let braceStart = responseText.lastIndexOf('{', learningsIndex);
|
||||
if (braceStart !== -1) {
|
||||
// Find matching closing brace
|
||||
let braceCount = 0;
|
||||
let braceEnd = -1;
|
||||
for (let i = braceStart; i < responseText.length; i++) {
|
||||
if (responseText[i] === '{') braceCount++;
|
||||
if (responseText[i] === '}') braceCount--;
|
||||
if (braceCount === 0) {
|
||||
braceEnd = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (braceEnd !== -1) {
|
||||
jsonStr = responseText.substring(braceStart, braceEnd + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!jsonStr) {
|
||||
console.log('[AutoMode] Could not extract JSON from response');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`[AutoMode] Extracted JSON: ${jsonStr.substring(0, 200)}`);
|
||||
|
||||
let parsed: { learnings?: unknown[] };
|
||||
try {
|
||||
parsed = JSON.parse(jsonStr);
|
||||
} catch {
|
||||
console.warn('[AutoMode] Failed to parse learnings JSON:', jsonStr.substring(0, 200));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!parsed.learnings || !Array.isArray(parsed.learnings)) {
|
||||
console.log('[AutoMode] No learnings array in parsed response');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`[AutoMode] Found ${parsed.learnings.length} potential learnings`);
|
||||
|
||||
// Valid learning types
|
||||
const validTypes = new Set(['decision', 'learning', 'pattern', 'gotcha']);
|
||||
|
||||
// Record each learning
|
||||
for (const item of parsed.learnings) {
|
||||
// Validate required fields with proper type narrowing
|
||||
if (!item || typeof item !== 'object') continue;
|
||||
|
||||
const learning = item as Record<string, unknown>;
|
||||
if (
|
||||
!learning.category ||
|
||||
typeof learning.category !== 'string' ||
|
||||
!learning.content ||
|
||||
typeof learning.content !== 'string' ||
|
||||
!learning.content.trim()
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate and normalize type
|
||||
const typeStr = typeof learning.type === 'string' ? learning.type : 'learning';
|
||||
const learningType = validTypes.has(typeStr)
|
||||
? (typeStr as 'decision' | 'learning' | 'pattern' | 'gotcha')
|
||||
: 'learning';
|
||||
|
||||
console.log(
|
||||
`[AutoMode] Appending learning: category=${learning.category}, type=${learningType}`
|
||||
);
|
||||
await appendLearning(
|
||||
projectPath,
|
||||
{
|
||||
category: learning.category,
|
||||
type: learningType,
|
||||
content: learning.content.trim(),
|
||||
context: typeof learning.context === 'string' ? learning.context : undefined,
|
||||
why: typeof learning.why === 'string' ? learning.why : undefined,
|
||||
rejected: typeof learning.rejected === 'string' ? learning.rejected : undefined,
|
||||
tradeoffs: typeof learning.tradeoffs === 'string' ? learning.tradeoffs : undefined,
|
||||
breaking: typeof learning.breaking === 'string' ? learning.breaking : undefined,
|
||||
},
|
||||
secureFs as Parameters<typeof appendLearning>[2]
|
||||
);
|
||||
}
|
||||
|
||||
const validLearnings = parsed.learnings.filter(
|
||||
(l) => l && typeof l === 'object' && (l as Record<string, unknown>).content
|
||||
);
|
||||
if (validLearnings.length > 0) {
|
||||
console.log(
|
||||
`[AutoMode] Recorded ${parsed.learnings.length} learning(s) from feature ${feature.id}`
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(`[AutoMode] Failed to extract learnings from feature ${feature.id}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
404
apps/server/src/services/codex-usage-service.ts
Normal file
404
apps/server/src/services/codex-usage-service.ts
Normal file
@@ -0,0 +1,404 @@
|
||||
import {
|
||||
findCodexCliPath,
|
||||
spawnProcess,
|
||||
getCodexAuthPath,
|
||||
systemPathExists,
|
||||
systemPathReadFile,
|
||||
} from '@automaker/platform';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('CodexUsage');
|
||||
|
||||
export interface CodexRateLimitWindow {
|
||||
limit: number;
|
||||
used: number;
|
||||
remaining: number;
|
||||
usedPercent: number;
|
||||
windowDurationMins: number;
|
||||
resetsAt: number;
|
||||
}
|
||||
|
||||
export interface CodexCreditsSnapshot {
|
||||
balance?: string;
|
||||
unlimited?: boolean;
|
||||
hasCredits?: boolean;
|
||||
}
|
||||
|
||||
export type CodexPlanType = 'free' | 'plus' | 'pro' | 'team' | 'enterprise' | 'edu' | 'unknown';
|
||||
|
||||
export interface CodexUsageData {
|
||||
rateLimits: {
|
||||
primary?: CodexRateLimitWindow;
|
||||
secondary?: CodexRateLimitWindow;
|
||||
credits?: CodexCreditsSnapshot;
|
||||
planType?: CodexPlanType;
|
||||
} | null;
|
||||
lastUpdated: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Codex Usage Service
|
||||
*
|
||||
* Attempts to fetch usage data from Codex CLI and OpenAI API.
|
||||
* Codex CLI doesn't provide a direct usage command, but we can:
|
||||
* 1. Parse usage info from error responses (rate limit errors contain plan info)
|
||||
* 2. Check for OpenAI API usage if API key is available
|
||||
*/
|
||||
export class CodexUsageService {
|
||||
private cachedCliPath: string | null = null;
|
||||
|
||||
/**
|
||||
* Check if Codex CLI is available on the system
|
||||
*/
|
||||
async isAvailable(): Promise<boolean> {
|
||||
this.cachedCliPath = await findCodexCliPath();
|
||||
return Boolean(this.cachedCliPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to fetch usage data
|
||||
*
|
||||
* Tries multiple approaches:
|
||||
* 1. Always try to get plan type from auth file first (authoritative source)
|
||||
* 2. Check for OpenAI API key in environment for API usage
|
||||
* 3. Make a test request to capture rate limit headers from CLI
|
||||
* 4. Combine results from auth file and CLI
|
||||
*/
|
||||
async fetchUsageData(): Promise<CodexUsageData> {
|
||||
const cliPath = this.cachedCliPath || (await findCodexCliPath());
|
||||
|
||||
if (!cliPath) {
|
||||
throw new Error('Codex CLI not found. Please install it with: npm install -g @openai/codex');
|
||||
}
|
||||
|
||||
// Always try to get plan type from auth file first - this is the authoritative source
|
||||
const authPlanType = await this.getPlanTypeFromAuthFile();
|
||||
|
||||
// Check if user has an API key that we can use
|
||||
const hasApiKey = !!process.env.OPENAI_API_KEY;
|
||||
|
||||
if (hasApiKey) {
|
||||
// Try to get usage from OpenAI API
|
||||
const openaiUsage = await this.fetchOpenAIUsage();
|
||||
if (openaiUsage) {
|
||||
// Merge with auth file plan type if available
|
||||
if (authPlanType && openaiUsage.rateLimits) {
|
||||
openaiUsage.rateLimits.planType = authPlanType;
|
||||
}
|
||||
return openaiUsage;
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get usage from Codex CLI by making a simple request
|
||||
const codexUsage = await this.fetchCodexUsage(cliPath, authPlanType);
|
||||
if (codexUsage) {
|
||||
return codexUsage;
|
||||
}
|
||||
|
||||
// Fallback: try to parse full usage from auth file
|
||||
const authUsage = await this.fetchFromAuthFile();
|
||||
if (authUsage) {
|
||||
return authUsage;
|
||||
}
|
||||
|
||||
// If all else fails, return a message with helpful information
|
||||
throw new Error(
|
||||
'Codex usage statistics require additional configuration. ' +
|
||||
'To enable usage tracking:\n\n' +
|
||||
'1. Set your OpenAI API key in the environment:\n' +
|
||||
' export OPENAI_API_KEY=sk-...\n\n' +
|
||||
'2. Or check your usage at:\n' +
|
||||
' https://platform.openai.com/usage\n\n' +
|
||||
'Note: If using Codex CLI with ChatGPT OAuth authentication, ' +
|
||||
'usage data must be queried through your OpenAI account.'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract plan type from auth file JWT token
|
||||
* Returns the actual plan type or 'unknown' if not available
|
||||
*/
|
||||
private async getPlanTypeFromAuthFile(): Promise<CodexPlanType> {
|
||||
try {
|
||||
const authFilePath = getCodexAuthPath();
|
||||
const exists = await systemPathExists(authFilePath);
|
||||
|
||||
if (!exists) {
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
const authContent = await systemPathReadFile(authFilePath);
|
||||
const authData = JSON.parse(authContent);
|
||||
|
||||
if (!authData.tokens?.id_token) {
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
const claims = this.parseJwt(authData.tokens.id_token);
|
||||
if (!claims) {
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
// Extract plan type from nested OpenAI auth object with type validation
|
||||
const openaiAuthClaim = claims['https://api.openai.com/auth'];
|
||||
|
||||
let accountType: string | undefined;
|
||||
let isSubscriptionExpired = false;
|
||||
|
||||
if (
|
||||
openaiAuthClaim &&
|
||||
typeof openaiAuthClaim === 'object' &&
|
||||
!Array.isArray(openaiAuthClaim)
|
||||
) {
|
||||
const openaiAuth = openaiAuthClaim as Record<string, unknown>;
|
||||
|
||||
if (typeof openaiAuth.chatgpt_plan_type === 'string') {
|
||||
accountType = openaiAuth.chatgpt_plan_type;
|
||||
}
|
||||
|
||||
// Check if subscription has expired
|
||||
if (typeof openaiAuth.chatgpt_subscription_active_until === 'string') {
|
||||
const expiryDate = new Date(openaiAuth.chatgpt_subscription_active_until);
|
||||
if (!isNaN(expiryDate.getTime())) {
|
||||
isSubscriptionExpired = expiryDate < new Date();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback: try top-level claim names
|
||||
const possibleClaimNames = [
|
||||
'https://chatgpt.com/account_type',
|
||||
'account_type',
|
||||
'plan',
|
||||
'plan_type',
|
||||
];
|
||||
|
||||
for (const claimName of possibleClaimNames) {
|
||||
const claimValue = claims[claimName];
|
||||
if (claimValue && typeof claimValue === 'string') {
|
||||
accountType = claimValue;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If subscription is expired, treat as free plan
|
||||
if (isSubscriptionExpired && accountType && accountType !== 'free') {
|
||||
logger.info(`Subscription expired, using "free" instead of "${accountType}"`);
|
||||
accountType = 'free';
|
||||
}
|
||||
|
||||
if (accountType) {
|
||||
const normalizedType = accountType.toLowerCase();
|
||||
if (['free', 'plus', 'pro', 'team', 'enterprise', 'edu'].includes(normalizedType)) {
|
||||
return normalizedType as CodexPlanType;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to get plan type from auth file:', error);
|
||||
}
|
||||
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to fetch usage from OpenAI API using the API key
|
||||
*/
|
||||
private async fetchOpenAIUsage(): Promise<CodexUsageData | null> {
|
||||
const apiKey = process.env.OPENAI_API_KEY;
|
||||
if (!apiKey) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const endTime = Math.floor(Date.now() / 1000);
|
||||
const startTime = endTime - 7 * 24 * 60 * 60; // Last 7 days
|
||||
|
||||
const response = await fetch(
|
||||
`https://api.openai.com/v1/organization/usage/completions?start_time=${startTime}&end_time=${endTime}&limit=1`,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
return this.parseOpenAIUsage(data);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to fetch from OpenAI API:', error);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse OpenAI usage API response
|
||||
*/
|
||||
private parseOpenAIUsage(data: any): CodexUsageData {
|
||||
let totalInputTokens = 0;
|
||||
let totalOutputTokens = 0;
|
||||
|
||||
if (data.data && Array.isArray(data.data)) {
|
||||
for (const bucket of data.data) {
|
||||
if (bucket.results && Array.isArray(bucket.results)) {
|
||||
for (const result of bucket.results) {
|
||||
totalInputTokens += result.input_tokens || 0;
|
||||
totalOutputTokens += result.output_tokens || 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
rateLimits: {
|
||||
planType: 'unknown',
|
||||
credits: {
|
||||
hasCredits: true,
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to fetch usage by making a test request to Codex CLI
|
||||
* and parsing rate limit information from the response
|
||||
*/
|
||||
private async fetchCodexUsage(
|
||||
cliPath: string,
|
||||
authPlanType: CodexPlanType
|
||||
): Promise<CodexUsageData | null> {
|
||||
try {
|
||||
// Make a simple request to trigger rate limit info if at limit
|
||||
const result = await spawnProcess({
|
||||
command: cliPath,
|
||||
args: ['exec', '--', 'echo', 'test'],
|
||||
cwd: process.cwd(),
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'dumb',
|
||||
},
|
||||
timeout: 10000,
|
||||
});
|
||||
|
||||
// Parse the output for rate limit information
|
||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||
|
||||
// Check if we got a rate limit error
|
||||
const rateLimitMatch = combinedOutput.match(
|
||||
/usage_limit_reached.*?"plan_type":"([^"]+)".*?"resets_at":(\d+).*?"resets_in_seconds":(\d+)/
|
||||
);
|
||||
|
||||
if (rateLimitMatch) {
|
||||
// Rate limit error contains the plan type - use that as it's the most authoritative
|
||||
const planType = rateLimitMatch[1] as CodexPlanType;
|
||||
const resetsAt = parseInt(rateLimitMatch[2], 10);
|
||||
const resetsInSeconds = parseInt(rateLimitMatch[3], 10);
|
||||
|
||||
logger.info(
|
||||
`Rate limit hit - plan: ${planType}, resets in ${Math.ceil(resetsInSeconds / 60)} mins`
|
||||
);
|
||||
|
||||
return {
|
||||
rateLimits: {
|
||||
planType,
|
||||
primary: {
|
||||
limit: 0,
|
||||
used: 0,
|
||||
remaining: 0,
|
||||
usedPercent: 100,
|
||||
windowDurationMins: Math.ceil(resetsInSeconds / 60),
|
||||
resetsAt,
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
// No rate limit error - use the plan type from auth file
|
||||
const isFreePlan = authPlanType === 'free';
|
||||
|
||||
return {
|
||||
rateLimits: {
|
||||
planType: authPlanType,
|
||||
credits: {
|
||||
hasCredits: true,
|
||||
unlimited: !isFreePlan && authPlanType !== 'unknown',
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Failed to fetch from Codex CLI:', error);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to extract usage info from the Codex auth file
|
||||
* Reuses getPlanTypeFromAuthFile to avoid code duplication
|
||||
*/
|
||||
private async fetchFromAuthFile(): Promise<CodexUsageData | null> {
|
||||
try {
|
||||
const planType = await this.getPlanTypeFromAuthFile();
|
||||
|
||||
if (planType === 'unknown') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const isFreePlan = planType === 'free';
|
||||
|
||||
return {
|
||||
rateLimits: {
|
||||
planType,
|
||||
credits: {
|
||||
hasCredits: true,
|
||||
unlimited: !isFreePlan,
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Failed to parse auth file:', error);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JWT token to extract claims
|
||||
*/
|
||||
private parseJwt(token: string): any {
|
||||
try {
|
||||
const parts = token.split('.');
|
||||
|
||||
if (parts.length !== 3) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const base64Url = parts[1];
|
||||
const base64 = base64Url.replace(/-/g, '+').replace(/_/g, '/');
|
||||
|
||||
// Use Buffer for Node.js environment instead of atob
|
||||
let jsonPayload: string;
|
||||
if (typeof Buffer !== 'undefined') {
|
||||
jsonPayload = Buffer.from(base64, 'base64').toString('utf-8');
|
||||
} else {
|
||||
jsonPayload = decodeURIComponent(
|
||||
atob(base64)
|
||||
.split('')
|
||||
.map((c) => '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2))
|
||||
.join('')
|
||||
);
|
||||
}
|
||||
|
||||
return JSON.parse(jsonPayload);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import type { Feature } from '@automaker/types';
|
||||
import type { Feature, DescriptionHistoryEntry } from '@automaker/types';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import * as secureFs from '../lib/secure-fs.js';
|
||||
import {
|
||||
@@ -274,6 +274,16 @@ export class FeatureLoader {
|
||||
featureData.imagePaths
|
||||
);
|
||||
|
||||
// Initialize description history with the initial description
|
||||
const initialHistory: DescriptionHistoryEntry[] = [];
|
||||
if (featureData.description && featureData.description.trim()) {
|
||||
initialHistory.push({
|
||||
description: featureData.description,
|
||||
timestamp: new Date().toISOString(),
|
||||
source: 'initial',
|
||||
});
|
||||
}
|
||||
|
||||
// Ensure feature has required fields
|
||||
const feature: Feature = {
|
||||
category: featureData.category || 'Uncategorized',
|
||||
@@ -281,6 +291,7 @@ export class FeatureLoader {
|
||||
...featureData,
|
||||
id: featureId,
|
||||
imagePaths: migratedImagePaths,
|
||||
descriptionHistory: initialHistory,
|
||||
};
|
||||
|
||||
// Write feature.json
|
||||
@@ -292,11 +303,18 @@ export class FeatureLoader {
|
||||
|
||||
/**
|
||||
* Update a feature (partial updates supported)
|
||||
* @param projectPath - Path to the project
|
||||
* @param featureId - ID of the feature to update
|
||||
* @param updates - Partial feature updates
|
||||
* @param descriptionHistorySource - Source of description change ('enhance' or 'edit')
|
||||
* @param enhancementMode - Enhancement mode if source is 'enhance'
|
||||
*/
|
||||
async update(
|
||||
projectPath: string,
|
||||
featureId: string,
|
||||
updates: Partial<Feature>
|
||||
updates: Partial<Feature>,
|
||||
descriptionHistorySource?: 'enhance' | 'edit',
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance'
|
||||
): Promise<Feature> {
|
||||
const feature = await this.get(projectPath, featureId);
|
||||
if (!feature) {
|
||||
@@ -313,11 +331,28 @@ export class FeatureLoader {
|
||||
updatedImagePaths = await this.migrateImages(projectPath, featureId, updates.imagePaths);
|
||||
}
|
||||
|
||||
// Track description history if description changed
|
||||
let updatedHistory = feature.descriptionHistory || [];
|
||||
if (
|
||||
updates.description !== undefined &&
|
||||
updates.description !== feature.description &&
|
||||
updates.description.trim()
|
||||
) {
|
||||
const historyEntry: DescriptionHistoryEntry = {
|
||||
description: updates.description,
|
||||
timestamp: new Date().toISOString(),
|
||||
source: descriptionHistorySource || 'edit',
|
||||
...(descriptionHistorySource === 'enhance' && enhancementMode ? { enhancementMode } : {}),
|
||||
};
|
||||
updatedHistory = [...updatedHistory, historyEntry];
|
||||
}
|
||||
|
||||
// Merge updates
|
||||
const updatedFeature: Feature = {
|
||||
...feature,
|
||||
...updates,
|
||||
...(updatedImagePaths !== undefined ? { imagePaths: updatedImagePaths } : {}),
|
||||
descriptionHistory: updatedHistory,
|
||||
};
|
||||
|
||||
// Write back to file
|
||||
|
||||
@@ -40,6 +40,7 @@ import type { SettingsService } from './settings-service.js';
|
||||
import type { FeatureLoader } from './feature-loader.js';
|
||||
import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js';
|
||||
import { resolveModelString } from '@automaker/model-resolver';
|
||||
import { stripProviderPrefix } from '@automaker/types';
|
||||
|
||||
const logger = createLogger('IdeationService');
|
||||
|
||||
@@ -201,7 +202,7 @@ export class IdeationService {
|
||||
existingWorkContext
|
||||
);
|
||||
|
||||
// Resolve model alias to canonical identifier
|
||||
// Resolve model alias to canonical identifier (with prefix)
|
||||
const modelId = resolveModelString(options?.model ?? 'sonnet');
|
||||
|
||||
// Create SDK options
|
||||
@@ -214,9 +215,13 @@ export class IdeationService {
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(modelId);
|
||||
|
||||
// Strip provider prefix - providers need bare model IDs
|
||||
const bareModel = stripProviderPrefix(modelId);
|
||||
|
||||
const executeOptions: ExecuteOptions = {
|
||||
prompt: message,
|
||||
model: modelId,
|
||||
model: bareModel,
|
||||
originalModel: modelId,
|
||||
cwd: projectPath,
|
||||
systemPrompt: sdkOptions.systemPrompt,
|
||||
maxTurns: 1, // Single turn for ideation
|
||||
@@ -648,7 +653,7 @@ export class IdeationService {
|
||||
existingWorkContext
|
||||
);
|
||||
|
||||
// Resolve model alias to canonical identifier
|
||||
// Resolve model alias to canonical identifier (with prefix)
|
||||
const modelId = resolveModelString('sonnet');
|
||||
|
||||
// Create SDK options
|
||||
@@ -661,9 +666,13 @@ export class IdeationService {
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(modelId);
|
||||
|
||||
// Strip provider prefix - providers need bare model IDs
|
||||
const bareModel = stripProviderPrefix(modelId);
|
||||
|
||||
const executeOptions: ExecuteOptions = {
|
||||
prompt: prompt.prompt,
|
||||
model: modelId,
|
||||
model: bareModel,
|
||||
originalModel: modelId,
|
||||
cwd: projectPath,
|
||||
systemPrompt: sdkOptions.systemPrompt,
|
||||
maxTurns: 1,
|
||||
|
||||
@@ -153,14 +153,6 @@ export class SettingsService {
|
||||
const storedVersion = settings.version || 1;
|
||||
let needsSave = false;
|
||||
|
||||
// Migration v1 -> v2: Force enableSandboxMode to false for existing users
|
||||
// Sandbox mode can cause issues on some systems, so we're disabling it by default
|
||||
if (storedVersion < 2) {
|
||||
logger.info('Migrating settings from v1 to v2: disabling sandbox mode');
|
||||
result.enableSandboxMode = false;
|
||||
needsSave = true;
|
||||
}
|
||||
|
||||
// Migration v2 -> v3: Convert string phase models to PhaseModelEntry objects
|
||||
// Note: migratePhaseModels() handles the actual conversion for both v1 and v2 formats
|
||||
if (storedVersion < 3) {
|
||||
@@ -170,6 +162,16 @@ export class SettingsService {
|
||||
needsSave = true;
|
||||
}
|
||||
|
||||
// Migration v3 -> v4: Add onboarding/setup wizard state fields
|
||||
// Older settings files never stored setup state in settings.json (it lived in localStorage),
|
||||
// so default to "setup complete" for existing installs to avoid forcing re-onboarding.
|
||||
if (storedVersion < 4) {
|
||||
if (settings.setupComplete === undefined) result.setupComplete = true;
|
||||
if (settings.isFirstRun === undefined) result.isFirstRun = false;
|
||||
if (settings.skipClaudeSetup === undefined) result.skipClaudeSetup = false;
|
||||
needsSave = true;
|
||||
}
|
||||
|
||||
// Update version if any migration occurred
|
||||
if (needsSave) {
|
||||
result.version = SETTINGS_VERSION;
|
||||
@@ -264,25 +266,79 @@ export class SettingsService {
|
||||
const settingsPath = getGlobalSettingsPath(this.dataDir);
|
||||
|
||||
const current = await this.getGlobalSettings();
|
||||
|
||||
// Guard against destructive "empty array/object" overwrites.
|
||||
// During auth transitions, the UI can briefly have default/empty state and accidentally
|
||||
// sync it, wiping persisted settings (especially `projects`).
|
||||
const sanitizedUpdates: Partial<GlobalSettings> = { ...updates };
|
||||
let attemptedProjectWipe = false;
|
||||
|
||||
const ignoreEmptyArrayOverwrite = <K extends keyof GlobalSettings>(key: K): void => {
|
||||
const nextVal = sanitizedUpdates[key] as unknown;
|
||||
const curVal = current[key] as unknown;
|
||||
if (
|
||||
Array.isArray(nextVal) &&
|
||||
nextVal.length === 0 &&
|
||||
Array.isArray(curVal) &&
|
||||
curVal.length > 0
|
||||
) {
|
||||
delete sanitizedUpdates[key];
|
||||
}
|
||||
};
|
||||
|
||||
const currentProjectsLen = Array.isArray(current.projects) ? current.projects.length : 0;
|
||||
if (
|
||||
Array.isArray(sanitizedUpdates.projects) &&
|
||||
sanitizedUpdates.projects.length === 0 &&
|
||||
currentProjectsLen > 0
|
||||
) {
|
||||
attemptedProjectWipe = true;
|
||||
delete sanitizedUpdates.projects;
|
||||
}
|
||||
|
||||
ignoreEmptyArrayOverwrite('trashedProjects');
|
||||
ignoreEmptyArrayOverwrite('projectHistory');
|
||||
ignoreEmptyArrayOverwrite('recentFolders');
|
||||
ignoreEmptyArrayOverwrite('aiProfiles');
|
||||
ignoreEmptyArrayOverwrite('mcpServers');
|
||||
ignoreEmptyArrayOverwrite('enabledCursorModels');
|
||||
|
||||
// Empty object overwrite guard
|
||||
if (
|
||||
sanitizedUpdates.lastSelectedSessionByProject &&
|
||||
typeof sanitizedUpdates.lastSelectedSessionByProject === 'object' &&
|
||||
!Array.isArray(sanitizedUpdates.lastSelectedSessionByProject) &&
|
||||
Object.keys(sanitizedUpdates.lastSelectedSessionByProject).length === 0 &&
|
||||
current.lastSelectedSessionByProject &&
|
||||
Object.keys(current.lastSelectedSessionByProject).length > 0
|
||||
) {
|
||||
delete sanitizedUpdates.lastSelectedSessionByProject;
|
||||
}
|
||||
|
||||
// If a request attempted to wipe projects, also ignore theme changes in that same request.
|
||||
if (attemptedProjectWipe) {
|
||||
delete sanitizedUpdates.theme;
|
||||
}
|
||||
|
||||
const updated: GlobalSettings = {
|
||||
...current,
|
||||
...updates,
|
||||
...sanitizedUpdates,
|
||||
version: SETTINGS_VERSION,
|
||||
};
|
||||
|
||||
// Deep merge keyboard shortcuts if provided
|
||||
if (updates.keyboardShortcuts) {
|
||||
if (sanitizedUpdates.keyboardShortcuts) {
|
||||
updated.keyboardShortcuts = {
|
||||
...current.keyboardShortcuts,
|
||||
...updates.keyboardShortcuts,
|
||||
...sanitizedUpdates.keyboardShortcuts,
|
||||
};
|
||||
}
|
||||
|
||||
// Deep merge phaseModels if provided
|
||||
if (updates.phaseModels) {
|
||||
if (sanitizedUpdates.phaseModels) {
|
||||
updated.phaseModels = {
|
||||
...current.phaseModels,
|
||||
...updates.phaseModels,
|
||||
...sanitizedUpdates.phaseModels,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -523,8 +579,26 @@ export class SettingsService {
|
||||
}
|
||||
}
|
||||
|
||||
// Parse setup wizard state (previously stored in localStorage)
|
||||
let setupState: Record<string, unknown> = {};
|
||||
if (localStorageData['automaker-setup']) {
|
||||
try {
|
||||
const parsed = JSON.parse(localStorageData['automaker-setup']);
|
||||
setupState = parsed.state || parsed;
|
||||
} catch (e) {
|
||||
errors.push(`Failed to parse automaker-setup: ${e}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract global settings
|
||||
const globalSettings: Partial<GlobalSettings> = {
|
||||
setupComplete:
|
||||
setupState.setupComplete !== undefined ? (setupState.setupComplete as boolean) : false,
|
||||
isFirstRun: setupState.isFirstRun !== undefined ? (setupState.isFirstRun as boolean) : true,
|
||||
skipClaudeSetup:
|
||||
setupState.skipClaudeSetup !== undefined
|
||||
? (setupState.skipClaudeSetup as boolean)
|
||||
: false,
|
||||
theme: (appState.theme as GlobalSettings['theme']) || 'dark',
|
||||
sidebarOpen: appState.sidebarOpen !== undefined ? (appState.sidebarOpen as boolean) : true,
|
||||
chatHistoryOpen: (appState.chatHistoryOpen as boolean) || false,
|
||||
@@ -537,7 +611,12 @@ export class SettingsService {
|
||||
appState.enableDependencyBlocking !== undefined
|
||||
? (appState.enableDependencyBlocking as boolean)
|
||||
: true,
|
||||
useWorktrees: (appState.useWorktrees as boolean) || false,
|
||||
skipVerificationInAutoMode:
|
||||
appState.skipVerificationInAutoMode !== undefined
|
||||
? (appState.skipVerificationInAutoMode as boolean)
|
||||
: false,
|
||||
useWorktrees:
|
||||
appState.useWorktrees !== undefined ? (appState.useWorktrees as boolean) : true,
|
||||
showProfilesOnly: (appState.showProfilesOnly as boolean) || false,
|
||||
defaultPlanningMode:
|
||||
(appState.defaultPlanningMode as GlobalSettings['defaultPlanningMode']) || 'skip',
|
||||
|
||||
373
apps/server/src/tests/cli-integration.test.ts
Normal file
373
apps/server/src/tests/cli-integration.test.ts
Normal file
@@ -0,0 +1,373 @@
|
||||
/**
|
||||
* CLI Integration Tests
|
||||
*
|
||||
* Comprehensive tests for CLI detection, authentication, and operations
|
||||
* across all providers (Claude, Codex, Cursor)
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
detectCli,
|
||||
detectAllCLis,
|
||||
findCommand,
|
||||
getCliVersion,
|
||||
getInstallInstructions,
|
||||
validateCliInstallation,
|
||||
} from '../lib/cli-detection.js';
|
||||
import { classifyError, getUserFriendlyErrorMessage } from '../lib/error-handler.js';
|
||||
|
||||
describe('CLI Detection Framework', () => {
|
||||
describe('findCommand', () => {
|
||||
it('should find existing command', async () => {
|
||||
// Test with a command that should exist
|
||||
const result = await findCommand(['node']);
|
||||
expect(result).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should return null for non-existent command', async () => {
|
||||
const result = await findCommand(['nonexistent-command-12345']);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should find first available command from alternatives', async () => {
|
||||
const result = await findCommand(['nonexistent-command-12345', 'node']);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result).toContain('node');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCliVersion', () => {
|
||||
it('should get version for existing command', async () => {
|
||||
const version = await getCliVersion('node', ['--version'], 5000);
|
||||
expect(version).toBeTruthy();
|
||||
expect(typeof version).toBe('string');
|
||||
});
|
||||
|
||||
it('should timeout for non-responsive command', async () => {
|
||||
await expect(getCliVersion('sleep', ['10'], 1000)).rejects.toThrow();
|
||||
}, 15000); // Give extra time for test timeout
|
||||
|
||||
it("should handle command that doesn't exist", async () => {
|
||||
await expect(
|
||||
getCliVersion('nonexistent-command-12345', ['--version'], 2000)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getInstallInstructions', () => {
|
||||
it('should return instructions for supported platforms', () => {
|
||||
const claudeInstructions = getInstallInstructions('claude', 'darwin');
|
||||
expect(claudeInstructions).toContain('brew install');
|
||||
|
||||
const codexInstructions = getInstallInstructions('codex', 'linux');
|
||||
expect(codexInstructions).toContain('npm install');
|
||||
});
|
||||
|
||||
it('should handle unsupported platform', () => {
|
||||
const instructions = getInstallInstructions('claude', 'unknown-platform' as any);
|
||||
expect(instructions).toContain('No installation instructions available');
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateCliInstallation', () => {
|
||||
it('should validate properly installed CLI', () => {
|
||||
const cliInfo = {
|
||||
name: 'Test CLI',
|
||||
command: 'node',
|
||||
version: 'v18.0.0',
|
||||
path: '/usr/bin/node',
|
||||
installed: true,
|
||||
authenticated: true,
|
||||
authMethod: 'cli' as const,
|
||||
};
|
||||
|
||||
const result = validateCliInstallation(cliInfo);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.issues).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect issues with installation', () => {
|
||||
const cliInfo = {
|
||||
name: 'Test CLI',
|
||||
command: '',
|
||||
version: '',
|
||||
path: '',
|
||||
installed: false,
|
||||
authenticated: false,
|
||||
authMethod: 'none' as const,
|
||||
};
|
||||
|
||||
const result = validateCliInstallation(cliInfo);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.issues.length).toBeGreaterThan(0);
|
||||
expect(result.issues).toContain('CLI is not installed');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling System', () => {
|
||||
describe('classifyError', () => {
|
||||
it('should classify authentication errors', () => {
|
||||
const authError = new Error('invalid_api_key: Your API key is invalid');
|
||||
const result = classifyError(authError, 'claude');
|
||||
|
||||
expect(result.type).toBe('authentication');
|
||||
expect(result.severity).toBe('high');
|
||||
expect(result.userMessage).toContain('Authentication failed');
|
||||
expect(result.retryable).toBe(false);
|
||||
expect(result.provider).toBe('claude');
|
||||
});
|
||||
|
||||
it('should classify billing errors', () => {
|
||||
const billingError = new Error('credit balance is too low');
|
||||
const result = classifyError(billingError);
|
||||
|
||||
expect(result.type).toBe('billing');
|
||||
expect(result.severity).toBe('high');
|
||||
expect(result.userMessage).toContain('insufficient credits');
|
||||
expect(result.retryable).toBe(false);
|
||||
});
|
||||
|
||||
it('should classify rate limit errors', () => {
|
||||
const rateLimitError = new Error('Rate limit reached. Try again later.');
|
||||
const result = classifyError(rateLimitError);
|
||||
|
||||
expect(result.type).toBe('rate_limit');
|
||||
expect(result.severity).toBe('medium');
|
||||
expect(result.userMessage).toContain('Rate limit reached');
|
||||
expect(result.retryable).toBe(true);
|
||||
});
|
||||
|
||||
it('should classify network errors', () => {
|
||||
const networkError = new Error('ECONNREFUSED: Connection refused');
|
||||
const result = classifyError(networkError);
|
||||
|
||||
expect(result.type).toBe('network');
|
||||
expect(result.severity).toBe('medium');
|
||||
expect(result.userMessage).toContain('Network connection issue');
|
||||
expect(result.retryable).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle unknown errors', () => {
|
||||
const unknownError = new Error('Something completely unexpected happened');
|
||||
const result = classifyError(unknownError);
|
||||
|
||||
expect(result.type).toBe('unknown');
|
||||
expect(result.severity).toBe('medium');
|
||||
expect(result.userMessage).toContain('unexpected error');
|
||||
expect(result.retryable).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getUserFriendlyErrorMessage', () => {
|
||||
it('should include provider name in message', () => {
|
||||
const error = new Error('invalid_api_key');
|
||||
const message = getUserFriendlyErrorMessage(error, 'claude');
|
||||
|
||||
expect(message).toContain('[CLAUDE]');
|
||||
});
|
||||
|
||||
it('should include suggested action when available', () => {
|
||||
const error = new Error('invalid_api_key');
|
||||
const message = getUserFriendlyErrorMessage(error);
|
||||
|
||||
expect(message).toContain('Verify your API key');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Provider-Specific Tests', () => {
|
||||
describe('Claude CLI Detection', () => {
|
||||
it('should detect Claude CLI if installed', async () => {
|
||||
const result = await detectCli('claude');
|
||||
|
||||
if (result.detected) {
|
||||
expect(result.cli.name).toBe('Claude CLI');
|
||||
expect(result.cli.installed).toBe(true);
|
||||
expect(result.cli.command).toBeTruthy();
|
||||
}
|
||||
// If not installed, that's also a valid test result
|
||||
});
|
||||
|
||||
it('should handle missing Claude CLI gracefully', async () => {
|
||||
// This test will pass regardless of whether Claude is installed
|
||||
const result = await detectCli('claude');
|
||||
expect(typeof result.detected).toBe('boolean');
|
||||
expect(Array.isArray(result.issues)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Codex CLI Detection', () => {
|
||||
it('should detect Codex CLI if installed', async () => {
|
||||
const result = await detectCli('codex');
|
||||
|
||||
if (result.detected) {
|
||||
expect(result.cli.name).toBe('Codex CLI');
|
||||
expect(result.cli.installed).toBe(true);
|
||||
expect(result.cli.command).toBeTruthy();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Cursor CLI Detection', () => {
|
||||
it('should detect Cursor CLI if installed', async () => {
|
||||
const result = await detectCli('cursor');
|
||||
|
||||
if (result.detected) {
|
||||
expect(result.cli.name).toBe('Cursor CLI');
|
||||
expect(result.cli.installed).toBe(true);
|
||||
expect(result.cli.command).toBeTruthy();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration Tests', () => {
|
||||
describe('detectAllCLis', () => {
|
||||
it('should detect all available CLIs', async () => {
|
||||
const results = await detectAllCLis();
|
||||
|
||||
expect(results).toHaveProperty('claude');
|
||||
expect(results).toHaveProperty('codex');
|
||||
expect(results).toHaveProperty('cursor');
|
||||
|
||||
// Each should have the expected structure
|
||||
Object.values(results).forEach((result) => {
|
||||
expect(result).toHaveProperty('cli');
|
||||
expect(result).toHaveProperty('detected');
|
||||
expect(result).toHaveProperty('issues');
|
||||
expect(result.cli).toHaveProperty('name');
|
||||
expect(result.cli).toHaveProperty('installed');
|
||||
expect(result.cli).toHaveProperty('authenticated');
|
||||
});
|
||||
}, 30000); // Longer timeout for CLI detection
|
||||
|
||||
it('should handle concurrent CLI detection', async () => {
|
||||
// Run detection multiple times concurrently
|
||||
const promises = [detectAllCLis(), detectAllCLis(), detectAllCLis()];
|
||||
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
// All should return consistent results
|
||||
expect(results).toHaveLength(3);
|
||||
results.forEach((result) => {
|
||||
expect(result).toHaveProperty('claude');
|
||||
expect(result).toHaveProperty('codex');
|
||||
expect(result).toHaveProperty('cursor');
|
||||
});
|
||||
}, 45000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Recovery Tests', () => {
|
||||
it('should handle partial CLI detection failures', async () => {
|
||||
// Mock a scenario where some CLIs fail to detect
|
||||
const results = await detectAllCLis();
|
||||
|
||||
// Should still return results for all providers
|
||||
expect(results).toHaveProperty('claude');
|
||||
expect(results).toHaveProperty('codex');
|
||||
expect(results).toHaveProperty('cursor');
|
||||
|
||||
// Should provide error information for failures
|
||||
Object.entries(results).forEach(([provider, result]) => {
|
||||
if (!result.detected && result.issues.length > 0) {
|
||||
expect(result.issues.length).toBeGreaterThan(0);
|
||||
expect(result.issues[0]).toBeTruthy();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle timeout during CLI detection', async () => {
|
||||
// Test with very short timeout
|
||||
const result = await detectCli('claude', { timeout: 1 });
|
||||
|
||||
// Should handle gracefully without throwing
|
||||
expect(typeof result.detected).toBe('boolean');
|
||||
expect(Array.isArray(result.issues)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Security Tests', () => {
|
||||
it('should not expose sensitive information in error messages', () => {
|
||||
const errorWithKey = new Error('invalid_api_key: sk-ant-abc123secret456');
|
||||
const message = getUserFriendlyErrorMessage(errorWithKey);
|
||||
|
||||
// Should not expose the actual API key
|
||||
expect(message).not.toContain('sk-ant-abc123secret456');
|
||||
expect(message).toContain('Authentication failed');
|
||||
});
|
||||
|
||||
it('should sanitize file paths in error messages', () => {
|
||||
const errorWithPath = new Error('Permission denied: /home/user/.ssh/id_rsa');
|
||||
const message = getUserFriendlyErrorMessage(errorWithPath);
|
||||
|
||||
// Should not expose sensitive file paths
|
||||
expect(message).not.toContain('/home/user/.ssh/id_rsa');
|
||||
});
|
||||
});
|
||||
|
||||
// Performance Tests
|
||||
describe('Performance Tests', () => {
|
||||
it('should detect CLIs within reasonable time', async () => {
|
||||
const startTime = Date.now();
|
||||
const results = await detectAllCLis();
|
||||
const endTime = Date.now();
|
||||
|
||||
const duration = endTime - startTime;
|
||||
expect(duration).toBeLessThan(10000); // Should complete in under 10 seconds
|
||||
expect(results).toHaveProperty('claude');
|
||||
expect(results).toHaveProperty('codex');
|
||||
expect(results).toHaveProperty('cursor');
|
||||
}, 15000);
|
||||
|
||||
it('should handle rapid repeated calls', async () => {
|
||||
// Make multiple rapid calls
|
||||
const promises = Array.from({ length: 10 }, () => detectAllCLis());
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
// All should complete successfully
|
||||
expect(results).toHaveLength(10);
|
||||
results.forEach((result) => {
|
||||
expect(result).toHaveProperty('claude');
|
||||
expect(result).toHaveProperty('codex');
|
||||
expect(result).toHaveProperty('cursor');
|
||||
});
|
||||
}, 60000);
|
||||
});
|
||||
|
||||
// Edge Cases
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle empty CLI names', async () => {
|
||||
await expect(detectCli('' as any)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle null CLI names', async () => {
|
||||
await expect(detectCli(null as any)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle undefined CLI names', async () => {
|
||||
await expect(detectCli(undefined as any)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle malformed error objects', () => {
|
||||
const testCases = [
|
||||
null,
|
||||
undefined,
|
||||
'',
|
||||
123,
|
||||
[],
|
||||
{ nested: { error: { message: 'test' } } },
|
||||
{ error: 'simple string error' },
|
||||
];
|
||||
|
||||
testCases.forEach((error) => {
|
||||
expect(() => {
|
||||
const result = classifyError(error);
|
||||
expect(result).toHaveProperty('type');
|
||||
expect(result).toHaveProperty('severity');
|
||||
expect(result).toHaveProperty('userMessage');
|
||||
}).not.toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -277,7 +277,7 @@ describe('auth.ts', () => {
|
||||
const options = getSessionCookieOptions();
|
||||
|
||||
expect(options.httpOnly).toBe(true);
|
||||
expect(options.sameSite).toBe('strict');
|
||||
expect(options.sameSite).toBe('lax');
|
||||
expect(options.path).toBe('/');
|
||||
expect(options.maxAge).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
@@ -1,161 +1,15 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import os from 'os';
|
||||
|
||||
describe('sdk-options.ts', () => {
|
||||
let originalEnv: NodeJS.ProcessEnv;
|
||||
let homedirSpy: ReturnType<typeof vi.spyOn>;
|
||||
|
||||
beforeEach(() => {
|
||||
originalEnv = { ...process.env };
|
||||
vi.resetModules();
|
||||
// Spy on os.homedir and set default return value
|
||||
homedirSpy = vi.spyOn(os, 'homedir').mockReturnValue('/Users/test');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
homedirSpy.mockRestore();
|
||||
});
|
||||
|
||||
describe('isCloudStoragePath', () => {
|
||||
it('should detect Dropbox paths on macOS', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/Library/CloudStorage/Dropbox-Personal/project')).toBe(
|
||||
true
|
||||
);
|
||||
expect(isCloudStoragePath('/Users/test/Library/CloudStorage/Dropbox/project')).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect Google Drive paths on macOS', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(
|
||||
isCloudStoragePath('/Users/test/Library/CloudStorage/GoogleDrive-user@gmail.com/project')
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect OneDrive paths on macOS', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/Library/CloudStorage/OneDrive-Personal/project')).toBe(
|
||||
true
|
||||
);
|
||||
});
|
||||
|
||||
it('should detect iCloud Drive paths on macOS', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(
|
||||
isCloudStoragePath('/Users/test/Library/Mobile Documents/com~apple~CloudDocs/project')
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect home-anchored Dropbox paths', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/Dropbox')).toBe(true);
|
||||
expect(isCloudStoragePath('/Users/test/Dropbox/project')).toBe(true);
|
||||
expect(isCloudStoragePath('/Users/test/Dropbox/nested/deep/project')).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect home-anchored Google Drive paths', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/Google Drive')).toBe(true);
|
||||
expect(isCloudStoragePath('/Users/test/Google Drive/project')).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect home-anchored OneDrive paths', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/OneDrive')).toBe(true);
|
||||
expect(isCloudStoragePath('/Users/test/OneDrive/project')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for local paths', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/projects/myapp')).toBe(false);
|
||||
expect(isCloudStoragePath('/home/user/code/project')).toBe(false);
|
||||
expect(isCloudStoragePath('/var/www/app')).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for relative paths not in cloud storage', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('./project')).toBe(false);
|
||||
expect(isCloudStoragePath('../other-project')).toBe(false);
|
||||
});
|
||||
|
||||
// Tests for false positive prevention - paths that contain cloud storage names but aren't cloud storage
|
||||
it('should NOT flag paths that merely contain "dropbox" in the name', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
// Projects with dropbox-like names
|
||||
expect(isCloudStoragePath('/home/user/my-project-about-dropbox')).toBe(false);
|
||||
expect(isCloudStoragePath('/Users/test/projects/dropbox-clone')).toBe(false);
|
||||
expect(isCloudStoragePath('/Users/test/projects/Dropbox-backup-tool')).toBe(false);
|
||||
// Dropbox folder that's NOT in the home directory
|
||||
expect(isCloudStoragePath('/var/shared/Dropbox/project')).toBe(false);
|
||||
});
|
||||
|
||||
it('should NOT flag paths that merely contain "Google Drive" in the name', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/projects/google-drive-api-client')).toBe(false);
|
||||
expect(isCloudStoragePath('/home/user/Google Drive API Tests')).toBe(false);
|
||||
});
|
||||
|
||||
it('should NOT flag paths that merely contain "OneDrive" in the name', async () => {
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
expect(isCloudStoragePath('/Users/test/projects/onedrive-sync-tool')).toBe(false);
|
||||
expect(isCloudStoragePath('/home/user/OneDrive-migration-scripts')).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle different home directories correctly', async () => {
|
||||
// Change the mocked home directory
|
||||
homedirSpy.mockReturnValue('/home/linuxuser');
|
||||
const { isCloudStoragePath } = await import('@/lib/sdk-options.js');
|
||||
|
||||
// Should detect Dropbox under the Linux home directory
|
||||
expect(isCloudStoragePath('/home/linuxuser/Dropbox/project')).toBe(true);
|
||||
// Should NOT detect Dropbox under the old home directory (since home changed)
|
||||
expect(isCloudStoragePath('/Users/test/Dropbox/project')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkSandboxCompatibility', () => {
|
||||
it('should return enabled=false when user disables sandbox', async () => {
|
||||
const { checkSandboxCompatibility } = await import('@/lib/sdk-options.js');
|
||||
const result = checkSandboxCompatibility('/Users/test/project', false);
|
||||
expect(result.enabled).toBe(false);
|
||||
expect(result.disabledReason).toBe('user_setting');
|
||||
});
|
||||
|
||||
it('should return enabled=false for cloud storage paths even when sandbox enabled', async () => {
|
||||
const { checkSandboxCompatibility } = await import('@/lib/sdk-options.js');
|
||||
const result = checkSandboxCompatibility(
|
||||
'/Users/test/Library/CloudStorage/Dropbox-Personal/project',
|
||||
true
|
||||
);
|
||||
expect(result.enabled).toBe(false);
|
||||
expect(result.disabledReason).toBe('cloud_storage');
|
||||
expect(result.message).toContain('cloud storage');
|
||||
});
|
||||
|
||||
it('should return enabled=true for local paths when sandbox enabled', async () => {
|
||||
const { checkSandboxCompatibility } = await import('@/lib/sdk-options.js');
|
||||
const result = checkSandboxCompatibility('/Users/test/projects/myapp', true);
|
||||
expect(result.enabled).toBe(true);
|
||||
expect(result.disabledReason).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return enabled=true when enableSandboxMode is undefined for local paths', async () => {
|
||||
const { checkSandboxCompatibility } = await import('@/lib/sdk-options.js');
|
||||
const result = checkSandboxCompatibility('/Users/test/project', undefined);
|
||||
expect(result.enabled).toBe(true);
|
||||
expect(result.disabledReason).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return enabled=false for cloud storage paths when enableSandboxMode is undefined', async () => {
|
||||
const { checkSandboxCompatibility } = await import('@/lib/sdk-options.js');
|
||||
const result = checkSandboxCompatibility(
|
||||
'/Users/test/Library/CloudStorage/Dropbox-Personal/project',
|
||||
undefined
|
||||
);
|
||||
expect(result.enabled).toBe(false);
|
||||
expect(result.disabledReason).toBe('cloud_storage');
|
||||
});
|
||||
});
|
||||
|
||||
describe('TOOL_PRESETS', () => {
|
||||
@@ -325,19 +179,15 @@ describe('sdk-options.ts', () => {
|
||||
it('should create options with chat settings', async () => {
|
||||
const { createChatOptions, TOOL_PRESETS, MAX_TURNS } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createChatOptions({ cwd: '/test/path', enableSandboxMode: true });
|
||||
const options = createChatOptions({ cwd: '/test/path' });
|
||||
|
||||
expect(options.cwd).toBe('/test/path');
|
||||
expect(options.maxTurns).toBe(MAX_TURNS.standard);
|
||||
expect(options.allowedTools).toEqual([...TOOL_PRESETS.chat]);
|
||||
expect(options.sandbox).toEqual({
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should prefer explicit model over session model', async () => {
|
||||
const { createChatOptions, getModelForUseCase } = await import('@/lib/sdk-options.js');
|
||||
const { createChatOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createChatOptions({
|
||||
cwd: '/test/path',
|
||||
@@ -358,41 +208,6 @@ describe('sdk-options.ts', () => {
|
||||
|
||||
expect(options.model).toBe('claude-sonnet-4-20250514');
|
||||
});
|
||||
|
||||
it('should not set sandbox when enableSandboxMode is false', async () => {
|
||||
const { createChatOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createChatOptions({
|
||||
cwd: '/test/path',
|
||||
enableSandboxMode: false,
|
||||
});
|
||||
|
||||
expect(options.sandbox).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should enable sandbox by default when enableSandboxMode is not provided', async () => {
|
||||
const { createChatOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createChatOptions({
|
||||
cwd: '/test/path',
|
||||
});
|
||||
|
||||
expect(options.sandbox).toEqual({
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should auto-disable sandbox for cloud storage paths', async () => {
|
||||
const { createChatOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createChatOptions({
|
||||
cwd: '/Users/test/Library/CloudStorage/Dropbox-Personal/project',
|
||||
enableSandboxMode: true,
|
||||
});
|
||||
|
||||
expect(options.sandbox).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('createAutoModeOptions', () => {
|
||||
@@ -400,15 +215,11 @@ describe('sdk-options.ts', () => {
|
||||
const { createAutoModeOptions, TOOL_PRESETS, MAX_TURNS } =
|
||||
await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createAutoModeOptions({ cwd: '/test/path', enableSandboxMode: true });
|
||||
const options = createAutoModeOptions({ cwd: '/test/path' });
|
||||
|
||||
expect(options.cwd).toBe('/test/path');
|
||||
expect(options.maxTurns).toBe(MAX_TURNS.maximum);
|
||||
expect(options.allowedTools).toEqual([...TOOL_PRESETS.fullAccess]);
|
||||
expect(options.sandbox).toEqual({
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should include systemPrompt when provided', async () => {
|
||||
@@ -433,62 +244,6 @@ describe('sdk-options.ts', () => {
|
||||
|
||||
expect(options.abortController).toBe(abortController);
|
||||
});
|
||||
|
||||
it('should not set sandbox when enableSandboxMode is false', async () => {
|
||||
const { createAutoModeOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createAutoModeOptions({
|
||||
cwd: '/test/path',
|
||||
enableSandboxMode: false,
|
||||
});
|
||||
|
||||
expect(options.sandbox).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should enable sandbox by default when enableSandboxMode is not provided', async () => {
|
||||
const { createAutoModeOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createAutoModeOptions({
|
||||
cwd: '/test/path',
|
||||
});
|
||||
|
||||
expect(options.sandbox).toEqual({
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should auto-disable sandbox for cloud storage paths', async () => {
|
||||
const { createAutoModeOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createAutoModeOptions({
|
||||
cwd: '/Users/test/Library/CloudStorage/Dropbox-Personal/project',
|
||||
enableSandboxMode: true,
|
||||
});
|
||||
|
||||
expect(options.sandbox).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should auto-disable sandbox for cloud storage paths even when enableSandboxMode is not provided', async () => {
|
||||
const { createAutoModeOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createAutoModeOptions({
|
||||
cwd: '/Users/test/Library/CloudStorage/Dropbox-Personal/project',
|
||||
});
|
||||
|
||||
expect(options.sandbox).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should auto-disable sandbox for iCloud paths', async () => {
|
||||
const { createAutoModeOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createAutoModeOptions({
|
||||
cwd: '/Users/test/Library/Mobile Documents/com~apple~CloudDocs/project',
|
||||
enableSandboxMode: true,
|
||||
});
|
||||
|
||||
expect(options.sandbox).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('createCustomOptions', () => {
|
||||
@@ -499,13 +254,11 @@ describe('sdk-options.ts', () => {
|
||||
cwd: '/test/path',
|
||||
maxTurns: 10,
|
||||
allowedTools: ['Read', 'Write'],
|
||||
sandbox: { enabled: true },
|
||||
});
|
||||
|
||||
expect(options.cwd).toBe('/test/path');
|
||||
expect(options.maxTurns).toBe(10);
|
||||
expect(options.allowedTools).toEqual(['Read', 'Write']);
|
||||
expect(options.sandbox).toEqual({ enabled: true });
|
||||
});
|
||||
|
||||
it('should use defaults when optional params not provided', async () => {
|
||||
@@ -517,20 +270,6 @@ describe('sdk-options.ts', () => {
|
||||
expect(options.allowedTools).toEqual([...TOOL_PRESETS.readOnly]);
|
||||
});
|
||||
|
||||
it('should include sandbox when provided', async () => {
|
||||
const { createCustomOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
const options = createCustomOptions({
|
||||
cwd: '/test/path',
|
||||
sandbox: { enabled: true, autoAllowBashIfSandboxed: false },
|
||||
});
|
||||
|
||||
expect(options.sandbox).toEqual({
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: false,
|
||||
});
|
||||
});
|
||||
|
||||
it('should include systemPrompt when provided', async () => {
|
||||
const { createCustomOptions } = await import('@/lib/sdk-options.js');
|
||||
|
||||
|
||||
@@ -179,8 +179,7 @@ describe('validation-storage.ts', () => {
|
||||
});
|
||||
|
||||
it('should return false for validation exactly at 24 hours', () => {
|
||||
const exactDate = new Date();
|
||||
exactDate.setHours(exactDate.getHours() - 24);
|
||||
const exactDate = new Date(Date.now() - 24 * 60 * 60 * 1000 + 100);
|
||||
|
||||
const validation = createMockValidation({
|
||||
validatedAt: exactDate.toISOString(),
|
||||
|
||||
@@ -37,6 +37,7 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Hello',
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
@@ -79,7 +80,7 @@ describe('claude-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should use default allowed tools when not specified', async () => {
|
||||
it('should not include allowedTools when not specified (caller decides via sdk-options)', async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: 'text', text: 'test' };
|
||||
@@ -88,6 +89,7 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
@@ -95,37 +97,8 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: 'Test',
|
||||
options: expect.objectContaining({
|
||||
allowedTools: ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'],
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it('should pass sandbox configuration when provided', async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: 'text', text: 'test' };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
cwd: '/test',
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: 'Test',
|
||||
options: expect.objectContaining({
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
options: expect.not.objectContaining({
|
||||
allowedTools: expect.anything(),
|
||||
}),
|
||||
});
|
||||
});
|
||||
@@ -141,6 +114,7 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
cwd: '/test',
|
||||
abortController,
|
||||
});
|
||||
@@ -169,6 +143,7 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Current message',
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
cwd: '/test',
|
||||
conversationHistory,
|
||||
sdkSessionId: 'test-session-id',
|
||||
@@ -199,6 +174,7 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: arrayPrompt as any,
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
@@ -218,6 +194,7 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
@@ -243,6 +220,7 @@ describe('claude-provider.ts', () => {
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
|
||||
293
apps/server/tests/unit/providers/codex-provider.test.ts
Normal file
293
apps/server/tests/unit/providers/codex-provider.test.ts
Normal file
@@ -0,0 +1,293 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterAll } from 'vitest';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import { CodexProvider } from '../../../src/providers/codex-provider.js';
|
||||
import type { ProviderMessage } from '../../../src/providers/types.js';
|
||||
import { collectAsyncGenerator } from '../../utils/helpers.js';
|
||||
import {
|
||||
spawnJSONLProcess,
|
||||
findCodexCliPath,
|
||||
secureFs,
|
||||
getCodexConfigDir,
|
||||
getCodexAuthIndicators,
|
||||
} from '@automaker/platform';
|
||||
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
const originalOpenAIKey = process.env[OPENAI_API_KEY_ENV];
|
||||
|
||||
const codexRunMock = vi.fn();
|
||||
|
||||
vi.mock('@openai/codex-sdk', () => ({
|
||||
Codex: class {
|
||||
constructor(_opts: { apiKey: string }) {}
|
||||
startThread() {
|
||||
return {
|
||||
id: 'thread-123',
|
||||
run: codexRunMock,
|
||||
};
|
||||
}
|
||||
resumeThread() {
|
||||
return {
|
||||
id: 'thread-123',
|
||||
run: codexRunMock,
|
||||
};
|
||||
}
|
||||
},
|
||||
}));
|
||||
|
||||
const EXEC_SUBCOMMAND = 'exec';
|
||||
|
||||
vi.mock('@automaker/platform', () => ({
|
||||
spawnJSONLProcess: vi.fn(),
|
||||
spawnProcess: vi.fn(),
|
||||
findCodexCliPath: vi.fn(),
|
||||
getCodexAuthIndicators: vi.fn().mockResolvedValue({
|
||||
hasAuthFile: false,
|
||||
hasOAuthToken: false,
|
||||
hasApiKey: false,
|
||||
}),
|
||||
getCodexConfigDir: vi.fn().mockReturnValue('/home/test/.codex'),
|
||||
secureFs: {
|
||||
readFile: vi.fn(),
|
||||
mkdir: vi.fn(),
|
||||
writeFile: vi.fn(),
|
||||
},
|
||||
getDataDirectory: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('@/services/settings-service.js', () => ({
|
||||
SettingsService: class {
|
||||
async getGlobalSettings() {
|
||||
return {
|
||||
codexAutoLoadAgents: false,
|
||||
codexSandboxMode: 'workspace-write',
|
||||
codexApprovalPolicy: 'on-request',
|
||||
};
|
||||
}
|
||||
},
|
||||
}));
|
||||
|
||||
describe('codex-provider.ts', () => {
|
||||
let provider: CodexProvider;
|
||||
|
||||
afterAll(() => {
|
||||
if (originalOpenAIKey !== undefined) {
|
||||
process.env[OPENAI_API_KEY_ENV] = originalOpenAIKey;
|
||||
} else {
|
||||
delete process.env[OPENAI_API_KEY_ENV];
|
||||
}
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.mocked(getCodexConfigDir).mockReturnValue('/home/test/.codex');
|
||||
vi.mocked(findCodexCliPath).mockResolvedValue('/usr/bin/codex');
|
||||
vi.mocked(getCodexAuthIndicators).mockResolvedValue({
|
||||
hasAuthFile: true,
|
||||
hasOAuthToken: true,
|
||||
hasApiKey: false,
|
||||
});
|
||||
delete process.env[OPENAI_API_KEY_ENV];
|
||||
provider = new CodexProvider();
|
||||
});
|
||||
|
||||
describe('executeQuery', () => {
|
||||
it('emits tool_use and tool_result with shared tool_use_id for command execution', async () => {
|
||||
const mockEvents = [
|
||||
{
|
||||
type: 'item.started',
|
||||
item: {
|
||||
type: 'command_execution',
|
||||
id: 'cmd-1',
|
||||
command: 'ls',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'item.completed',
|
||||
item: {
|
||||
type: 'command_execution',
|
||||
id: 'cmd-1',
|
||||
output: 'file1\nfile2',
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue(
|
||||
(async function* () {
|
||||
for (const event of mockEvents) {
|
||||
yield event;
|
||||
}
|
||||
})()
|
||||
);
|
||||
const results = await collectAsyncGenerator<ProviderMessage>(
|
||||
provider.executeQuery({
|
||||
prompt: 'List files',
|
||||
model: 'gpt-5.2',
|
||||
cwd: '/tmp',
|
||||
})
|
||||
);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
const toolUse = results[0];
|
||||
const toolResult = results[1];
|
||||
|
||||
expect(toolUse.type).toBe('assistant');
|
||||
expect(toolUse.message?.content[0].type).toBe('tool_use');
|
||||
const toolUseId = toolUse.message?.content[0].tool_use_id;
|
||||
expect(toolUseId).toBeDefined();
|
||||
|
||||
expect(toolResult.type).toBe('assistant');
|
||||
expect(toolResult.message?.content[0].type).toBe('tool_result');
|
||||
expect(toolResult.message?.content[0].tool_use_id).toBe(toolUseId);
|
||||
expect(toolResult.message?.content[0].content).toBe('file1\nfile2');
|
||||
});
|
||||
|
||||
it('adds output schema and max turn overrides when configured', async () => {
|
||||
// Note: With full-permissions always on, these flags are no longer used
|
||||
// This test now only verifies the basic CLI structure
|
||||
// Using gpt-5.1-codex-max which should route to Codex (not Cursor)
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Test config',
|
||||
model: 'gpt-5.1-codex-max',
|
||||
cwd: '/tmp',
|
||||
allowedTools: ['Read', 'Write'],
|
||||
maxTurns: 5,
|
||||
})
|
||||
);
|
||||
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
expect(call.args).toContain('exec'); // Should have exec subcommand
|
||||
expect(call.args).toContain('--dangerously-bypass-approvals-and-sandbox'); // Should have YOLO flag
|
||||
expect(call.args).toContain('--model');
|
||||
expect(call.args).toContain('--json');
|
||||
});
|
||||
|
||||
it('overrides approval policy when MCP auto-approval is enabled', async () => {
|
||||
// Note: With full-permissions always on (--dangerously-bypass-approvals-and-sandbox),
|
||||
// approval policy is bypassed, not configured via --config
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Test approvals',
|
||||
model: 'gpt-5.1-codex-max',
|
||||
cwd: '/tmp',
|
||||
mcpServers: { mock: { type: 'stdio', command: 'node' } },
|
||||
mcpAutoApproveTools: true,
|
||||
codexSettings: { approvalPolicy: 'untrusted' },
|
||||
})
|
||||
);
|
||||
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
const execIndex = call.args.indexOf(EXEC_SUBCOMMAND);
|
||||
expect(call.args).toContain('--dangerously-bypass-approvals-and-sandbox'); // YOLO flag bypasses approval
|
||||
expect(call.args).toContain('--model');
|
||||
expect(call.args).toContain('--json');
|
||||
});
|
||||
|
||||
it('injects user and project instructions when auto-load is enabled', async () => {
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
const userPath = path.join('/home/test/.codex', 'AGENTS.md');
|
||||
const projectPath = path.join('/tmp/project', '.codex', 'AGENTS.md');
|
||||
vi.mocked(secureFs.readFile).mockImplementation(async (filePath: string) => {
|
||||
if (filePath === userPath) {
|
||||
return 'User rules';
|
||||
}
|
||||
if (filePath === projectPath) {
|
||||
return 'Project rules';
|
||||
}
|
||||
throw new Error('missing');
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Hello',
|
||||
model: 'gpt-5.2',
|
||||
cwd: '/tmp/project',
|
||||
codexSettings: { autoLoadAgents: true },
|
||||
})
|
||||
);
|
||||
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
const promptText = call.stdinData;
|
||||
expect(promptText).toContain('User rules');
|
||||
expect(promptText).toContain('Project rules');
|
||||
});
|
||||
|
||||
it('disables sandbox mode when running in cloud storage paths', async () => {
|
||||
// Note: With full-permissions always on (--dangerously-bypass-approvals-and-sandbox),
|
||||
// sandbox mode is bypassed, not configured via --sandbox flag
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
const cloudPath = path.join(os.homedir(), 'Dropbox', 'project');
|
||||
await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Hello',
|
||||
model: 'gpt-5.1-codex-max',
|
||||
cwd: cloudPath,
|
||||
codexSettings: { sandboxMode: 'workspace-write' },
|
||||
})
|
||||
);
|
||||
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
// YOLO flag bypasses sandbox entirely
|
||||
expect(call.args).toContain('--dangerously-bypass-approvals-and-sandbox');
|
||||
expect(call.args).toContain('--model');
|
||||
expect(call.args).toContain('--json');
|
||||
});
|
||||
|
||||
it('uses the SDK when no tools are requested and an API key is present', async () => {
|
||||
process.env[OPENAI_API_KEY_ENV] = 'sk-test';
|
||||
codexRunMock.mockResolvedValue({ finalResponse: 'Hello from SDK' });
|
||||
|
||||
const results = await collectAsyncGenerator<ProviderMessage>(
|
||||
provider.executeQuery({
|
||||
prompt: 'Hello',
|
||||
model: 'gpt-5.2',
|
||||
cwd: '/tmp',
|
||||
allowedTools: [],
|
||||
})
|
||||
);
|
||||
|
||||
expect(results[0].message?.content[0].text).toBe('Hello from SDK');
|
||||
expect(results[1].result).toBe('Hello from SDK');
|
||||
});
|
||||
|
||||
it('uses the CLI when tools are requested even if an API key is present', async () => {
|
||||
process.env[OPENAI_API_KEY_ENV] = 'sk-test';
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Read files',
|
||||
model: 'gpt-5.2',
|
||||
cwd: '/tmp',
|
||||
allowedTools: ['Read'],
|
||||
})
|
||||
);
|
||||
|
||||
expect(codexRunMock).not.toHaveBeenCalled();
|
||||
expect(spawnJSONLProcess).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('falls back to CLI when no tools are requested and no API key is available', async () => {
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Hello',
|
||||
model: 'gpt-5.2',
|
||||
cwd: '/tmp',
|
||||
allowedTools: [],
|
||||
})
|
||||
);
|
||||
|
||||
expect(codexRunMock).not.toHaveBeenCalled();
|
||||
expect(spawnJSONLProcess).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
1277
apps/server/tests/unit/providers/opencode-provider.test.ts
Normal file
1277
apps/server/tests/unit/providers/opencode-provider.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,18 +2,42 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { ProviderFactory } from '@/providers/provider-factory.js';
|
||||
import { ClaudeProvider } from '@/providers/claude-provider.js';
|
||||
import { CursorProvider } from '@/providers/cursor-provider.js';
|
||||
import { CodexProvider } from '@/providers/codex-provider.js';
|
||||
import { OpencodeProvider } from '@/providers/opencode-provider.js';
|
||||
|
||||
describe('provider-factory.ts', () => {
|
||||
let consoleSpy: any;
|
||||
let detectClaudeSpy: any;
|
||||
let detectCursorSpy: any;
|
||||
let detectCodexSpy: any;
|
||||
let detectOpencodeSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
consoleSpy = {
|
||||
warn: vi.spyOn(console, 'warn').mockImplementation(() => {}),
|
||||
};
|
||||
|
||||
// Avoid hitting real CLI / filesystem checks during unit tests
|
||||
detectClaudeSpy = vi
|
||||
.spyOn(ClaudeProvider.prototype, 'detectInstallation')
|
||||
.mockResolvedValue({ installed: true });
|
||||
detectCursorSpy = vi
|
||||
.spyOn(CursorProvider.prototype, 'detectInstallation')
|
||||
.mockResolvedValue({ installed: true });
|
||||
detectCodexSpy = vi
|
||||
.spyOn(CodexProvider.prototype, 'detectInstallation')
|
||||
.mockResolvedValue({ installed: true });
|
||||
detectOpencodeSpy = vi
|
||||
.spyOn(OpencodeProvider.prototype, 'detectInstallation')
|
||||
.mockResolvedValue({ installed: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleSpy.warn.mockRestore();
|
||||
detectClaudeSpy.mockRestore();
|
||||
detectCursorSpy.mockRestore();
|
||||
detectCodexSpy.mockRestore();
|
||||
detectOpencodeSpy.mockRestore();
|
||||
});
|
||||
|
||||
describe('getProviderForModel', () => {
|
||||
@@ -111,10 +135,11 @@ describe('provider-factory.ts', () => {
|
||||
});
|
||||
|
||||
describe('Cursor models via model ID lookup', () => {
|
||||
it('should return CursorProvider for gpt-5.2 (valid Cursor model)', () => {
|
||||
// gpt-5.2 is in CURSOR_MODEL_MAP
|
||||
it('should return CodexProvider for gpt-5.2 (Codex model, not Cursor)', () => {
|
||||
// gpt-5.2 is in both CURSOR_MODEL_MAP and CODEX_MODEL_CONFIG_MAP
|
||||
// It should route to Codex since Codex models take priority
|
||||
const provider = ProviderFactory.getProviderForModel('gpt-5.2');
|
||||
expect(provider).toBeInstanceOf(CursorProvider);
|
||||
expect(provider).toBeInstanceOf(CodexProvider);
|
||||
});
|
||||
|
||||
it('should return CursorProvider for grok (valid Cursor model)', () => {
|
||||
@@ -141,9 +166,9 @@ describe('provider-factory.ts', () => {
|
||||
expect(hasClaudeProvider).toBe(true);
|
||||
});
|
||||
|
||||
it('should return exactly 2 providers', () => {
|
||||
it('should return exactly 4 providers', () => {
|
||||
const providers = ProviderFactory.getAllProviders();
|
||||
expect(providers).toHaveLength(2);
|
||||
expect(providers).toHaveLength(4);
|
||||
});
|
||||
|
||||
it('should include CursorProvider', () => {
|
||||
@@ -179,7 +204,9 @@ describe('provider-factory.ts', () => {
|
||||
|
||||
expect(keys).toContain('claude');
|
||||
expect(keys).toContain('cursor');
|
||||
expect(keys).toHaveLength(2);
|
||||
expect(keys).toContain('codex');
|
||||
expect(keys).toContain('opencode');
|
||||
expect(keys).toHaveLength(4);
|
||||
});
|
||||
|
||||
it('should include cursor status', async () => {
|
||||
|
||||
@@ -144,6 +144,33 @@ describe('settings-service.ts', () => {
|
||||
expect(updated.keyboardShortcuts.agent).toBe(DEFAULT_GLOBAL_SETTINGS.keyboardShortcuts.agent);
|
||||
});
|
||||
|
||||
it('should not overwrite non-empty projects with an empty array (data loss guard)', async () => {
|
||||
const initial: GlobalSettings = {
|
||||
...DEFAULT_GLOBAL_SETTINGS,
|
||||
theme: 'solarized' as GlobalSettings['theme'],
|
||||
projects: [
|
||||
{
|
||||
id: 'proj1',
|
||||
name: 'Project 1',
|
||||
path: '/tmp/project-1',
|
||||
lastOpened: new Date().toISOString(),
|
||||
},
|
||||
] as any,
|
||||
};
|
||||
const settingsPath = path.join(testDataDir, 'settings.json');
|
||||
await fs.writeFile(settingsPath, JSON.stringify(initial, null, 2));
|
||||
|
||||
const updated = await settingsService.updateGlobalSettings({
|
||||
projects: [],
|
||||
theme: 'light',
|
||||
} as any);
|
||||
|
||||
expect(updated.projects.length).toBe(1);
|
||||
expect((updated.projects as any)[0]?.id).toBe('proj1');
|
||||
// Theme should be preserved in the same request if it attempted to wipe projects
|
||||
expect(updated.theme).toBe('solarized');
|
||||
});
|
||||
|
||||
it('should create data directory if it does not exist', async () => {
|
||||
const newDataDir = path.join(os.tmpdir(), `new-data-dir-${Date.now()}`);
|
||||
const newService = new SettingsService(newDataDir);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@automaker/ui",
|
||||
"version": "0.7.3",
|
||||
"version": "0.9.0",
|
||||
"description": "An autonomous AI development studio that helps you build software faster using AI-powered agents",
|
||||
"homepage": "https://github.com/AutoMaker-Org/automaker",
|
||||
"repository": {
|
||||
|
||||
@@ -3,6 +3,7 @@ import { defineConfig, devices } from '@playwright/test';
|
||||
const port = process.env.TEST_PORT || 3007;
|
||||
const serverPort = process.env.TEST_SERVER_PORT || 3008;
|
||||
const reuseServer = process.env.TEST_REUSE_SERVER === 'true';
|
||||
const useExternalBackend = !!process.env.VITE_SERVER_URL;
|
||||
// Always use mock agent for tests (disables rate limiting, uses mock Claude responses)
|
||||
const mockAgent = true;
|
||||
|
||||
@@ -33,31 +34,38 @@ export default defineConfig({
|
||||
webServer: [
|
||||
// Backend server - runs with mock agent enabled in CI
|
||||
// Uses dev:test (no file watching) to avoid port conflicts from server restarts
|
||||
{
|
||||
command: `cd ../server && npm run dev:test`,
|
||||
url: `http://localhost:${serverPort}/api/health`,
|
||||
// Don't reuse existing server to ensure we use the test API key
|
||||
reuseExistingServer: false,
|
||||
timeout: 60000,
|
||||
env: {
|
||||
...process.env,
|
||||
PORT: String(serverPort),
|
||||
// Enable mock agent in CI to avoid real API calls
|
||||
AUTOMAKER_MOCK_AGENT: mockAgent ? 'true' : 'false',
|
||||
// Set a test API key for web mode authentication
|
||||
AUTOMAKER_API_KEY: process.env.AUTOMAKER_API_KEY || 'test-api-key-for-e2e-tests',
|
||||
// Hide the API key banner to reduce log noise
|
||||
AUTOMAKER_HIDE_API_KEY: 'true',
|
||||
// No ALLOWED_ROOT_DIRECTORY restriction - allow all paths for testing
|
||||
// Simulate containerized environment to skip sandbox confirmation dialogs
|
||||
IS_CONTAINERIZED: 'true',
|
||||
},
|
||||
},
|
||||
...(useExternalBackend
|
||||
? []
|
||||
: [
|
||||
{
|
||||
command: `cd ../server && npm run dev:test`,
|
||||
url: `http://localhost:${serverPort}/api/health`,
|
||||
// Don't reuse existing server to ensure we use the test API key
|
||||
reuseExistingServer: false,
|
||||
timeout: 60000,
|
||||
env: {
|
||||
...process.env,
|
||||
PORT: String(serverPort),
|
||||
// Enable mock agent in CI to avoid real API calls
|
||||
AUTOMAKER_MOCK_AGENT: mockAgent ? 'true' : 'false',
|
||||
// Set a test API key for web mode authentication
|
||||
AUTOMAKER_API_KEY:
|
||||
process.env.AUTOMAKER_API_KEY || 'test-api-key-for-e2e-tests',
|
||||
// Hide the API key banner to reduce log noise
|
||||
AUTOMAKER_HIDE_API_KEY: 'true',
|
||||
// Explicitly unset ALLOWED_ROOT_DIRECTORY to allow all paths for testing
|
||||
// (prevents inheriting /projects from Docker or other environments)
|
||||
ALLOWED_ROOT_DIRECTORY: '',
|
||||
// Simulate containerized environment to skip sandbox confirmation dialogs
|
||||
IS_CONTAINERIZED: 'true',
|
||||
},
|
||||
},
|
||||
]),
|
||||
// Frontend Vite dev server
|
||||
{
|
||||
command: `npm run dev`,
|
||||
url: `http://localhost:${port}`,
|
||||
reuseExistingServer: true,
|
||||
reuseExistingServer: false,
|
||||
timeout: 120000,
|
||||
env: {
|
||||
...process.env,
|
||||
|
||||
@@ -10,24 +10,42 @@ const execAsync = promisify(exec);
|
||||
|
||||
const SERVER_PORT = process.env.TEST_SERVER_PORT || 3008;
|
||||
const UI_PORT = process.env.TEST_PORT || 3007;
|
||||
const USE_EXTERNAL_SERVER = !!process.env.VITE_SERVER_URL;
|
||||
|
||||
async function killProcessOnPort(port) {
|
||||
try {
|
||||
const { stdout } = await execAsync(`lsof -ti:${port}`);
|
||||
const pids = stdout.trim().split('\n').filter(Boolean);
|
||||
const hasLsof = await execAsync('command -v lsof').then(
|
||||
() => true,
|
||||
() => false
|
||||
);
|
||||
|
||||
if (pids.length > 0) {
|
||||
console.log(`[KillTestServers] Found process(es) on port ${port}: ${pids.join(', ')}`);
|
||||
for (const pid of pids) {
|
||||
try {
|
||||
await execAsync(`kill -9 ${pid}`);
|
||||
console.log(`[KillTestServers] Killed process ${pid}`);
|
||||
} catch (error) {
|
||||
// Process might have already exited
|
||||
if (hasLsof) {
|
||||
const { stdout } = await execAsync(`lsof -ti:${port}`);
|
||||
const pids = stdout.trim().split('\n').filter(Boolean);
|
||||
|
||||
if (pids.length > 0) {
|
||||
console.log(`[KillTestServers] Found process(es) on port ${port}: ${pids.join(', ')}`);
|
||||
for (const pid of pids) {
|
||||
try {
|
||||
await execAsync(`kill -9 ${pid}`);
|
||||
console.log(`[KillTestServers] Killed process ${pid}`);
|
||||
} catch (error) {
|
||||
// Process might have already exited
|
||||
}
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
}
|
||||
// Wait a moment for the port to be released
|
||||
return;
|
||||
}
|
||||
|
||||
const hasFuser = await execAsync('command -v fuser').then(
|
||||
() => true,
|
||||
() => false
|
||||
);
|
||||
if (hasFuser) {
|
||||
await execAsync(`fuser -k -9 ${port}/tcp`).catch(() => undefined);
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
return;
|
||||
}
|
||||
} catch (error) {
|
||||
// No process on port, which is fine
|
||||
@@ -36,7 +54,9 @@ async function killProcessOnPort(port) {
|
||||
|
||||
async function main() {
|
||||
console.log('[KillTestServers] Checking for existing test servers...');
|
||||
await killProcessOnPort(Number(SERVER_PORT));
|
||||
if (!USE_EXTERNAL_SERVER) {
|
||||
await killProcessOnPort(Number(SERVER_PORT));
|
||||
}
|
||||
await killProcessOnPort(Number(UI_PORT));
|
||||
console.log('[KillTestServers] Done');
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@
|
||||
/**
|
||||
* Setup script for E2E test fixtures
|
||||
* Creates the necessary test fixture directories and files before running Playwright tests
|
||||
* Also resets the server's settings.json to a known state for test isolation
|
||||
*/
|
||||
|
||||
import * as fs from 'fs';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
@@ -16,6 +18,9 @@ const __dirname = path.dirname(__filename);
|
||||
const WORKSPACE_ROOT = path.resolve(__dirname, '../../..');
|
||||
const FIXTURE_PATH = path.join(WORKSPACE_ROOT, 'test/fixtures/projectA');
|
||||
const SPEC_FILE_PATH = path.join(FIXTURE_PATH, '.automaker/app_spec.txt');
|
||||
const SERVER_SETTINGS_PATH = path.join(WORKSPACE_ROOT, 'apps/server/data/settings.json');
|
||||
// Create a shared test workspace directory that will be used as default for project creation
|
||||
const TEST_WORKSPACE_DIR = path.join(os.tmpdir(), 'automaker-e2e-workspace');
|
||||
|
||||
const SPEC_CONTENT = `<app_spec>
|
||||
<name>Test Project A</name>
|
||||
@@ -27,10 +32,154 @@ const SPEC_CONTENT = `<app_spec>
|
||||
</app_spec>
|
||||
`;
|
||||
|
||||
// Clean settings.json for E2E tests - no current project so localStorage can control state
|
||||
const E2E_SETTINGS = {
|
||||
version: 4,
|
||||
setupComplete: true,
|
||||
isFirstRun: false,
|
||||
skipClaudeSetup: false,
|
||||
theme: 'dark',
|
||||
sidebarOpen: true,
|
||||
chatHistoryOpen: false,
|
||||
kanbanCardDetailLevel: 'standard',
|
||||
maxConcurrency: 3,
|
||||
defaultSkipTests: true,
|
||||
enableDependencyBlocking: true,
|
||||
skipVerificationInAutoMode: false,
|
||||
useWorktrees: true,
|
||||
showProfilesOnly: false,
|
||||
defaultPlanningMode: 'skip',
|
||||
defaultRequirePlanApproval: false,
|
||||
defaultAIProfileId: null,
|
||||
muteDoneSound: false,
|
||||
phaseModels: {
|
||||
enhancementModel: { model: 'sonnet' },
|
||||
fileDescriptionModel: { model: 'haiku' },
|
||||
imageDescriptionModel: { model: 'haiku' },
|
||||
validationModel: { model: 'sonnet' },
|
||||
specGenerationModel: { model: 'opus' },
|
||||
featureGenerationModel: { model: 'sonnet' },
|
||||
backlogPlanningModel: { model: 'sonnet' },
|
||||
projectAnalysisModel: { model: 'sonnet' },
|
||||
suggestionsModel: { model: 'sonnet' },
|
||||
},
|
||||
enhancementModel: 'sonnet',
|
||||
validationModel: 'opus',
|
||||
enabledCursorModels: ['auto', 'composer-1'],
|
||||
cursorDefaultModel: 'auto',
|
||||
keyboardShortcuts: {
|
||||
board: 'K',
|
||||
agent: 'A',
|
||||
spec: 'D',
|
||||
context: 'C',
|
||||
settings: 'S',
|
||||
profiles: 'M',
|
||||
terminal: 'T',
|
||||
toggleSidebar: '`',
|
||||
addFeature: 'N',
|
||||
addContextFile: 'N',
|
||||
startNext: 'G',
|
||||
newSession: 'N',
|
||||
openProject: 'O',
|
||||
projectPicker: 'P',
|
||||
cyclePrevProject: 'Q',
|
||||
cycleNextProject: 'E',
|
||||
addProfile: 'N',
|
||||
splitTerminalRight: 'Alt+D',
|
||||
splitTerminalDown: 'Alt+S',
|
||||
closeTerminal: 'Alt+W',
|
||||
tools: 'T',
|
||||
ideation: 'I',
|
||||
githubIssues: 'G',
|
||||
githubPrs: 'R',
|
||||
newTerminalTab: 'Alt+T',
|
||||
},
|
||||
aiProfiles: [
|
||||
{
|
||||
id: 'profile-heavy-task',
|
||||
name: 'Heavy Task',
|
||||
description:
|
||||
'Claude Opus with Ultrathink for complex architecture, migrations, or deep debugging.',
|
||||
model: 'opus',
|
||||
thinkingLevel: 'ultrathink',
|
||||
provider: 'claude',
|
||||
isBuiltIn: true,
|
||||
icon: 'Brain',
|
||||
},
|
||||
{
|
||||
id: 'profile-balanced',
|
||||
name: 'Balanced',
|
||||
description: 'Claude Sonnet with medium thinking for typical development tasks.',
|
||||
model: 'sonnet',
|
||||
thinkingLevel: 'medium',
|
||||
provider: 'claude',
|
||||
isBuiltIn: true,
|
||||
icon: 'Scale',
|
||||
},
|
||||
{
|
||||
id: 'profile-quick-edit',
|
||||
name: 'Quick Edit',
|
||||
description: 'Claude Haiku for fast, simple edits and minor fixes.',
|
||||
model: 'haiku',
|
||||
thinkingLevel: 'none',
|
||||
provider: 'claude',
|
||||
isBuiltIn: true,
|
||||
icon: 'Zap',
|
||||
},
|
||||
{
|
||||
id: 'profile-cursor-refactoring',
|
||||
name: 'Cursor Refactoring',
|
||||
description: 'Cursor Composer 1 for refactoring tasks.',
|
||||
provider: 'cursor',
|
||||
cursorModel: 'composer-1',
|
||||
isBuiltIn: true,
|
||||
icon: 'Sparkles',
|
||||
},
|
||||
],
|
||||
// Default test project using the fixture path - tests can override via route mocking if needed
|
||||
projects: [
|
||||
{
|
||||
id: 'e2e-default-project',
|
||||
name: 'E2E Test Project',
|
||||
path: FIXTURE_PATH,
|
||||
lastOpened: new Date().toISOString(),
|
||||
},
|
||||
],
|
||||
trashedProjects: [],
|
||||
currentProjectId: 'e2e-default-project',
|
||||
projectHistory: [],
|
||||
projectHistoryIndex: 0,
|
||||
lastProjectDir: TEST_WORKSPACE_DIR,
|
||||
recentFolders: [],
|
||||
worktreePanelCollapsed: false,
|
||||
lastSelectedSessionByProject: {},
|
||||
autoLoadClaudeMd: false,
|
||||
skipSandboxWarning: true,
|
||||
codexAutoLoadAgents: false,
|
||||
codexSandboxMode: 'workspace-write',
|
||||
codexApprovalPolicy: 'on-request',
|
||||
codexEnableWebSearch: false,
|
||||
codexEnableImages: true,
|
||||
codexAdditionalDirs: [],
|
||||
mcpServers: [],
|
||||
enableSandboxMode: false,
|
||||
mcpAutoApproveTools: true,
|
||||
mcpUnrestrictedTools: true,
|
||||
promptCustomization: {},
|
||||
localStorageMigrated: true,
|
||||
};
|
||||
|
||||
function setupFixtures() {
|
||||
console.log('Setting up E2E test fixtures...');
|
||||
console.log(`Workspace root: ${WORKSPACE_ROOT}`);
|
||||
console.log(`Fixture path: ${FIXTURE_PATH}`);
|
||||
console.log(`Test workspace dir: ${TEST_WORKSPACE_DIR}`);
|
||||
|
||||
// Create test workspace directory for project creation tests
|
||||
if (!fs.existsSync(TEST_WORKSPACE_DIR)) {
|
||||
fs.mkdirSync(TEST_WORKSPACE_DIR, { recursive: true });
|
||||
console.log(`Created test workspace directory: ${TEST_WORKSPACE_DIR}`);
|
||||
}
|
||||
|
||||
// Create fixture directory
|
||||
const specDir = path.dirname(SPEC_FILE_PATH);
|
||||
@@ -43,6 +192,15 @@ function setupFixtures() {
|
||||
fs.writeFileSync(SPEC_FILE_PATH, SPEC_CONTENT);
|
||||
console.log(`Created fixture file: ${SPEC_FILE_PATH}`);
|
||||
|
||||
// Reset server settings.json to a clean state for E2E tests
|
||||
const settingsDir = path.dirname(SERVER_SETTINGS_PATH);
|
||||
if (!fs.existsSync(settingsDir)) {
|
||||
fs.mkdirSync(settingsDir, { recursive: true });
|
||||
console.log(`Created directory: ${settingsDir}`);
|
||||
}
|
||||
fs.writeFileSync(SERVER_SETTINGS_PATH, JSON.stringify(E2E_SETTINGS, null, 2));
|
||||
console.log(`Reset server settings: ${SERVER_SETTINGS_PATH}`);
|
||||
|
||||
console.log('E2E test fixtures setup complete!');
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import { RouterProvider } from '@tanstack/react-router';
|
||||
import { createLogger } from '@automaker/utils/logger';
|
||||
import { router } from './utils/router';
|
||||
import { SplashScreen } from './components/splash-screen';
|
||||
import { useSettingsMigration } from './hooks/use-settings-migration';
|
||||
import { useSettingsSync } from './hooks/use-settings-sync';
|
||||
import { useCursorStatusInit } from './hooks/use-cursor-status-init';
|
||||
import './styles/global.css';
|
||||
import './styles/theme-imports';
|
||||
@@ -32,10 +32,14 @@ export default function App() {
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Run settings migration on startup (localStorage -> file storage)
|
||||
const migrationState = useSettingsMigration();
|
||||
if (migrationState.migrated) {
|
||||
logger.info('Settings migrated to file storage');
|
||||
// Settings are now loaded in __root.tsx after successful session verification
|
||||
// This ensures a unified flow: verify session → load settings → redirect
|
||||
// We no longer block router rendering here - settings loading happens in __root.tsx
|
||||
|
||||
// Sync settings changes back to server (API-first persistence)
|
||||
const settingsSyncState = useSettingsSync();
|
||||
if (settingsSyncState.error) {
|
||||
logger.error('Settings sync error:', settingsSyncState.error);
|
||||
}
|
||||
|
||||
// Initialize Cursor CLI status at startup
|
||||
|
||||
405
apps/ui/src/components/codex-usage-popover.tsx
Normal file
405
apps/ui/src/components/codex-usage-popover.tsx
Normal file
@@ -0,0 +1,405 @@
|
||||
import { useState, useEffect, useMemo, useCallback } from 'react';
|
||||
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { RefreshCw, AlertTriangle, CheckCircle, XCircle, Clock, ExternalLink } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { getElectronAPI } from '@/lib/electron';
|
||||
import { useAppStore } from '@/store/app-store';
|
||||
import { useSetupStore } from '@/store/setup-store';
|
||||
|
||||
// Error codes for distinguishing failure modes
|
||||
const ERROR_CODES = {
|
||||
API_BRIDGE_UNAVAILABLE: 'API_BRIDGE_UNAVAILABLE',
|
||||
AUTH_ERROR: 'AUTH_ERROR',
|
||||
NOT_AVAILABLE: 'NOT_AVAILABLE',
|
||||
UNKNOWN: 'UNKNOWN',
|
||||
} as const;
|
||||
|
||||
type ErrorCode = (typeof ERROR_CODES)[keyof typeof ERROR_CODES];
|
||||
|
||||
type UsageError = {
|
||||
code: ErrorCode;
|
||||
message: string;
|
||||
};
|
||||
|
||||
// Fixed refresh interval (45 seconds)
|
||||
const REFRESH_INTERVAL_SECONDS = 45;
|
||||
|
||||
// Helper to format reset time
|
||||
function formatResetTime(unixTimestamp: number): string {
|
||||
const date = new Date(unixTimestamp * 1000);
|
||||
const now = new Date();
|
||||
const diff = date.getTime() - now.getTime();
|
||||
|
||||
// If less than 1 hour, show minutes
|
||||
if (diff < 3600000) {
|
||||
const mins = Math.ceil(diff / 60000);
|
||||
return `Resets in ${mins}m`;
|
||||
}
|
||||
|
||||
// If less than 24 hours, show hours and minutes
|
||||
if (diff < 86400000) {
|
||||
const hours = Math.floor(diff / 3600000);
|
||||
const mins = Math.ceil((diff % 3600000) / 60000);
|
||||
return `Resets in ${hours}h ${mins > 0 ? `${mins}m` : ''}`;
|
||||
}
|
||||
|
||||
// Otherwise show date
|
||||
return `Resets ${date.toLocaleDateString()} at ${date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}`;
|
||||
}
|
||||
|
||||
// Helper to format window duration
|
||||
function getWindowLabel(durationMins: number): { title: string; subtitle: string } {
|
||||
if (durationMins < 60) {
|
||||
return { title: `${durationMins}min Window`, subtitle: 'Rate limit' };
|
||||
}
|
||||
if (durationMins < 1440) {
|
||||
const hours = Math.round(durationMins / 60);
|
||||
return { title: `${hours}h Window`, subtitle: 'Rate limit' };
|
||||
}
|
||||
const days = Math.round(durationMins / 1440);
|
||||
return { title: `${days}d Window`, subtitle: 'Rate limit' };
|
||||
}
|
||||
|
||||
export function CodexUsagePopover() {
|
||||
const { codexUsage, codexUsageLastUpdated, setCodexUsage } = useAppStore();
|
||||
const codexAuthStatus = useSetupStore((state) => state.codexAuthStatus);
|
||||
const [open, setOpen] = useState(false);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [error, setError] = useState<UsageError | null>(null);
|
||||
|
||||
// Check if Codex is authenticated
|
||||
const isCodexAuthenticated = codexAuthStatus?.authenticated;
|
||||
|
||||
// Check if data is stale (older than 2 minutes)
|
||||
const isStale = useMemo(() => {
|
||||
return !codexUsageLastUpdated || Date.now() - codexUsageLastUpdated > 2 * 60 * 1000;
|
||||
}, [codexUsageLastUpdated]);
|
||||
|
||||
const fetchUsage = useCallback(
|
||||
async (isAutoRefresh = false) => {
|
||||
if (!isAutoRefresh) setLoading(true);
|
||||
setError(null);
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
if (!api.codex) {
|
||||
setError({
|
||||
code: ERROR_CODES.API_BRIDGE_UNAVAILABLE,
|
||||
message: 'Codex API bridge not available',
|
||||
});
|
||||
return;
|
||||
}
|
||||
const data = await api.codex.getUsage();
|
||||
if ('error' in data) {
|
||||
// Check if it's the "not available" error
|
||||
if (
|
||||
data.message?.includes('not available') ||
|
||||
data.message?.includes('does not provide')
|
||||
) {
|
||||
setError({
|
||||
code: ERROR_CODES.NOT_AVAILABLE,
|
||||
message: data.message || data.error,
|
||||
});
|
||||
} else {
|
||||
setError({
|
||||
code: ERROR_CODES.AUTH_ERROR,
|
||||
message: data.message || data.error,
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
setCodexUsage(data);
|
||||
} catch (err) {
|
||||
setError({
|
||||
code: ERROR_CODES.UNKNOWN,
|
||||
message: err instanceof Error ? err.message : 'Failed to fetch usage',
|
||||
});
|
||||
} finally {
|
||||
if (!isAutoRefresh) setLoading(false);
|
||||
}
|
||||
},
|
||||
[setCodexUsage]
|
||||
);
|
||||
|
||||
// Auto-fetch on mount if data is stale (only if authenticated)
|
||||
useEffect(() => {
|
||||
if (isStale && isCodexAuthenticated) {
|
||||
fetchUsage(true);
|
||||
}
|
||||
}, [isStale, isCodexAuthenticated, fetchUsage]);
|
||||
|
||||
useEffect(() => {
|
||||
// Skip if not authenticated
|
||||
if (!isCodexAuthenticated) return;
|
||||
|
||||
// Initial fetch when opened
|
||||
if (open) {
|
||||
if (!codexUsage || isStale) {
|
||||
fetchUsage();
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-refresh interval (only when open)
|
||||
let intervalId: NodeJS.Timeout | null = null;
|
||||
if (open) {
|
||||
intervalId = setInterval(() => {
|
||||
fetchUsage(true);
|
||||
}, REFRESH_INTERVAL_SECONDS * 1000);
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (intervalId) clearInterval(intervalId);
|
||||
};
|
||||
}, [open, codexUsage, isStale, isCodexAuthenticated, fetchUsage]);
|
||||
|
||||
// Derived status color/icon helper
|
||||
const getStatusInfo = (percentage: number) => {
|
||||
if (percentage >= 75) return { color: 'text-red-500', icon: XCircle, bg: 'bg-red-500' };
|
||||
if (percentage >= 50)
|
||||
return { color: 'text-orange-500', icon: AlertTriangle, bg: 'bg-orange-500' };
|
||||
return { color: 'text-green-500', icon: CheckCircle, bg: 'bg-green-500' };
|
||||
};
|
||||
|
||||
// Helper component for the progress bar
|
||||
const ProgressBar = ({ percentage, colorClass }: { percentage: number; colorClass: string }) => (
|
||||
<div className="h-2 w-full bg-secondary/50 rounded-full overflow-hidden">
|
||||
<div
|
||||
className={cn('h-full transition-all duration-500', colorClass)}
|
||||
style={{ width: `${Math.min(percentage, 100)}%` }}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
const UsageCard = ({
|
||||
title,
|
||||
subtitle,
|
||||
percentage,
|
||||
resetText,
|
||||
isPrimary = false,
|
||||
stale = false,
|
||||
}: {
|
||||
title: string;
|
||||
subtitle: string;
|
||||
percentage: number;
|
||||
resetText?: string;
|
||||
isPrimary?: boolean;
|
||||
stale?: boolean;
|
||||
}) => {
|
||||
const isValidPercentage =
|
||||
typeof percentage === 'number' && !isNaN(percentage) && isFinite(percentage);
|
||||
const safePercentage = isValidPercentage ? percentage : 0;
|
||||
|
||||
const status = getStatusInfo(safePercentage);
|
||||
const StatusIcon = status.icon;
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'rounded-xl border bg-card/50 p-4 transition-opacity',
|
||||
isPrimary ? 'border-border/60 shadow-sm' : 'border-border/40',
|
||||
(stale || !isValidPercentage) && 'opacity-50'
|
||||
)}
|
||||
>
|
||||
<div className="flex items-start justify-between mb-3">
|
||||
<div>
|
||||
<h4 className={cn('font-semibold', isPrimary ? 'text-sm' : 'text-xs')}>{title}</h4>
|
||||
<p className="text-[10px] text-muted-foreground">{subtitle}</p>
|
||||
</div>
|
||||
{isValidPercentage ? (
|
||||
<div className="flex items-center gap-1.5">
|
||||
<StatusIcon className={cn('w-3.5 h-3.5', status.color)} />
|
||||
<span
|
||||
className={cn(
|
||||
'font-mono font-bold',
|
||||
status.color,
|
||||
isPrimary ? 'text-base' : 'text-sm'
|
||||
)}
|
||||
>
|
||||
{Math.round(safePercentage)}%
|
||||
</span>
|
||||
</div>
|
||||
) : (
|
||||
<span className="text-xs text-muted-foreground">N/A</span>
|
||||
)}
|
||||
</div>
|
||||
<ProgressBar
|
||||
percentage={safePercentage}
|
||||
colorClass={isValidPercentage ? status.bg : 'bg-muted-foreground/30'}
|
||||
/>
|
||||
{resetText && (
|
||||
<div className="mt-2 flex justify-end">
|
||||
<p className="text-xs text-muted-foreground flex items-center gap-1">
|
||||
<Clock className="w-3 h-3" />
|
||||
{resetText}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// Header Button
|
||||
const maxPercentage = codexUsage?.rateLimits
|
||||
? Math.max(
|
||||
codexUsage.rateLimits.primary?.usedPercent || 0,
|
||||
codexUsage.rateLimits.secondary?.usedPercent || 0
|
||||
)
|
||||
: 0;
|
||||
|
||||
const getProgressBarColor = (percentage: number) => {
|
||||
if (percentage >= 80) return 'bg-red-500';
|
||||
if (percentage >= 50) return 'bg-yellow-500';
|
||||
return 'bg-green-500';
|
||||
};
|
||||
|
||||
const trigger = (
|
||||
<Button variant="ghost" size="sm" className="h-9 gap-3 bg-secondary border border-border px-3">
|
||||
<span className="text-sm font-medium">Codex</span>
|
||||
{codexUsage && codexUsage.rateLimits && (
|
||||
<div
|
||||
className={cn(
|
||||
'h-1.5 w-16 bg-muted-foreground/20 rounded-full overflow-hidden transition-opacity',
|
||||
isStale && 'opacity-60'
|
||||
)}
|
||||
>
|
||||
<div
|
||||
className={cn('h-full transition-all duration-500', getProgressBarColor(maxPercentage))}
|
||||
style={{ width: `${Math.min(maxPercentage, 100)}%` }}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
);
|
||||
|
||||
return (
|
||||
<Popover open={open} onOpenChange={setOpen}>
|
||||
<PopoverTrigger asChild>{trigger}</PopoverTrigger>
|
||||
<PopoverContent
|
||||
className="w-80 p-0 overflow-hidden bg-background/95 backdrop-blur-xl border-border shadow-2xl"
|
||||
align="end"
|
||||
sideOffset={8}
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-border/50 bg-secondary/10">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm font-semibold">Codex Usage</span>
|
||||
</div>
|
||||
{error && error.code !== ERROR_CODES.NOT_AVAILABLE && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className={cn('h-6 w-6', loading && 'opacity-80')}
|
||||
onClick={() => !loading && fetchUsage(false)}
|
||||
>
|
||||
<RefreshCw className="w-3.5 h-3.5" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="p-4 space-y-4">
|
||||
{error ? (
|
||||
<div className="flex flex-col items-center justify-center py-6 text-center space-y-3">
|
||||
<AlertTriangle className="w-8 h-8 text-yellow-500/80" />
|
||||
<div className="space-y-1 flex flex-col items-center">
|
||||
<p className="text-sm font-medium">
|
||||
{error.code === ERROR_CODES.NOT_AVAILABLE ? 'Usage not available' : error.message}
|
||||
</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{error.code === ERROR_CODES.API_BRIDGE_UNAVAILABLE ? (
|
||||
'Ensure the Electron bridge is running or restart the app'
|
||||
) : error.code === ERROR_CODES.NOT_AVAILABLE ? (
|
||||
<>
|
||||
Codex CLI doesn't provide usage statistics. Check{' '}
|
||||
<a
|
||||
href="https://platform.openai.com/usage"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="underline hover:text-foreground"
|
||||
>
|
||||
OpenAI dashboard
|
||||
</a>{' '}
|
||||
for usage details.
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
Make sure Codex CLI is installed and authenticated via{' '}
|
||||
<code className="font-mono bg-muted px-1 rounded">codex login</code>
|
||||
</>
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
) : !codexUsage ? (
|
||||
// Loading state
|
||||
<div className="flex flex-col items-center justify-center py-8 space-y-2">
|
||||
<RefreshCw className="w-6 h-6 animate-spin text-muted-foreground/50" />
|
||||
<p className="text-xs text-muted-foreground">Loading usage data...</p>
|
||||
</div>
|
||||
) : codexUsage.rateLimits ? (
|
||||
<>
|
||||
{/* Primary Window Card */}
|
||||
{codexUsage.rateLimits.primary && (
|
||||
<UsageCard
|
||||
title={getWindowLabel(codexUsage.rateLimits.primary.windowDurationMins).title}
|
||||
subtitle={
|
||||
getWindowLabel(codexUsage.rateLimits.primary.windowDurationMins).subtitle
|
||||
}
|
||||
percentage={codexUsage.rateLimits.primary.usedPercent}
|
||||
resetText={formatResetTime(codexUsage.rateLimits.primary.resetsAt)}
|
||||
isPrimary={true}
|
||||
stale={isStale}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Secondary Window Card */}
|
||||
{codexUsage.rateLimits.secondary && (
|
||||
<UsageCard
|
||||
title={getWindowLabel(codexUsage.rateLimits.secondary.windowDurationMins).title}
|
||||
subtitle={
|
||||
getWindowLabel(codexUsage.rateLimits.secondary.windowDurationMins).subtitle
|
||||
}
|
||||
percentage={codexUsage.rateLimits.secondary.usedPercent}
|
||||
resetText={formatResetTime(codexUsage.rateLimits.secondary.resetsAt)}
|
||||
stale={isStale}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Plan Type */}
|
||||
{codexUsage.rateLimits.planType && (
|
||||
<div className="rounded-xl border border-border/40 bg-secondary/20 p-3">
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Plan:{' '}
|
||||
<span className="text-foreground font-medium">
|
||||
{codexUsage.rateLimits.planType.charAt(0).toUpperCase() +
|
||||
codexUsage.rateLimits.planType.slice(1)}
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<div className="flex flex-col items-center justify-center py-6 text-center">
|
||||
<AlertTriangle className="w-8 h-8 text-yellow-500/80" />
|
||||
<p className="text-sm font-medium mt-3">No usage data available</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex items-center justify-between px-4 py-2 bg-secondary/10 border-t border-border/50">
|
||||
<a
|
||||
href="https://platform.openai.com/usage"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="text-[10px] text-muted-foreground hover:text-foreground flex items-center gap-1 transition-colors"
|
||||
>
|
||||
OpenAI Dashboard <ExternalLink className="w-2.5 h-2.5" />
|
||||
</a>
|
||||
|
||||
<span className="text-[10px] text-muted-foreground">Updates every minute</span>
|
||||
</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
@@ -11,10 +11,10 @@ import {
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { PathInput } from '@/components/ui/path-input';
|
||||
import { Kbd, KbdGroup } from '@/components/ui/kbd';
|
||||
import { getJSON, setJSON } from '@/lib/storage';
|
||||
import { getDefaultWorkspaceDirectory, saveLastProjectDirectory } from '@/lib/workspace-config';
|
||||
import { useOSDetection } from '@/hooks';
|
||||
import { apiPost } from '@/lib/api-fetch';
|
||||
import { useAppStore } from '@/store/app-store';
|
||||
|
||||
interface DirectoryEntry {
|
||||
name: string;
|
||||
@@ -40,28 +40,8 @@ interface FileBrowserDialogProps {
|
||||
initialPath?: string;
|
||||
}
|
||||
|
||||
const RECENT_FOLDERS_KEY = 'file-browser-recent-folders';
|
||||
const MAX_RECENT_FOLDERS = 5;
|
||||
|
||||
function getRecentFolders(): string[] {
|
||||
return getJSON<string[]>(RECENT_FOLDERS_KEY) ?? [];
|
||||
}
|
||||
|
||||
function addRecentFolder(path: string): void {
|
||||
const recent = getRecentFolders();
|
||||
// Remove if already exists, then add to front
|
||||
const filtered = recent.filter((p) => p !== path);
|
||||
const updated = [path, ...filtered].slice(0, MAX_RECENT_FOLDERS);
|
||||
setJSON(RECENT_FOLDERS_KEY, updated);
|
||||
}
|
||||
|
||||
function removeRecentFolder(path: string): string[] {
|
||||
const recent = getRecentFolders();
|
||||
const updated = recent.filter((p) => p !== path);
|
||||
setJSON(RECENT_FOLDERS_KEY, updated);
|
||||
return updated;
|
||||
}
|
||||
|
||||
export function FileBrowserDialog({
|
||||
open,
|
||||
onOpenChange,
|
||||
@@ -78,20 +58,20 @@ export function FileBrowserDialog({
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [error, setError] = useState('');
|
||||
const [warning, setWarning] = useState('');
|
||||
const [recentFolders, setRecentFolders] = useState<string[]>([]);
|
||||
|
||||
// Load recent folders when dialog opens
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
setRecentFolders(getRecentFolders());
|
||||
}
|
||||
}, [open]);
|
||||
// Use recent folders from app store (synced via API)
|
||||
const recentFolders = useAppStore((s) => s.recentFolders);
|
||||
const setRecentFolders = useAppStore((s) => s.setRecentFolders);
|
||||
const addRecentFolder = useAppStore((s) => s.addRecentFolder);
|
||||
|
||||
const handleRemoveRecent = useCallback((e: React.MouseEvent, path: string) => {
|
||||
e.stopPropagation();
|
||||
const updated = removeRecentFolder(path);
|
||||
setRecentFolders(updated);
|
||||
}, []);
|
||||
const handleRemoveRecent = useCallback(
|
||||
(e: React.MouseEvent, path: string) => {
|
||||
e.stopPropagation();
|
||||
const updated = recentFolders.filter((p) => p !== path);
|
||||
setRecentFolders(updated);
|
||||
},
|
||||
[recentFolders, setRecentFolders]
|
||||
);
|
||||
|
||||
const browseDirectory = useCallback(async (dirPath?: string) => {
|
||||
setLoading(true);
|
||||
|
||||
@@ -5,34 +5,16 @@
|
||||
* Prompts them to either restart the app in a container or reload to try again.
|
||||
*/
|
||||
|
||||
import { useState } from 'react';
|
||||
import { createLogger } from '@automaker/utils/logger';
|
||||
import { ShieldX, RefreshCw, Container, Copy, Check } from 'lucide-react';
|
||||
|
||||
const logger = createLogger('SandboxRejectionScreen');
|
||||
import { ShieldX, RefreshCw } from 'lucide-react';
|
||||
import { Button } from '@/components/ui/button';
|
||||
|
||||
const DOCKER_COMMAND = 'npm run dev:docker';
|
||||
|
||||
export function SandboxRejectionScreen() {
|
||||
const [copied, setCopied] = useState(false);
|
||||
|
||||
const handleReload = () => {
|
||||
// Clear the rejection state and reload
|
||||
sessionStorage.removeItem('automaker-sandbox-denied');
|
||||
window.location.reload();
|
||||
};
|
||||
|
||||
const handleCopy = async () => {
|
||||
try {
|
||||
await navigator.clipboard.writeText(DOCKER_COMMAND);
|
||||
setCopied(true);
|
||||
setTimeout(() => setCopied(false), 2000);
|
||||
} catch (err) {
|
||||
logger.error('Failed to copy:', err);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="min-h-screen bg-background flex items-center justify-center p-4">
|
||||
<div className="max-w-md w-full text-center space-y-6">
|
||||
@@ -49,32 +31,10 @@ export function SandboxRejectionScreen() {
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="bg-muted/50 border border-border rounded-lg p-4 text-left space-y-3">
|
||||
<div className="flex items-start gap-3">
|
||||
<Container className="w-5 h-5 mt-0.5 text-primary flex-shrink-0" />
|
||||
<div className="flex-1 space-y-2">
|
||||
<p className="font-medium text-sm">Run in Docker (Recommended)</p>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
Run Automaker in a containerized sandbox environment:
|
||||
</p>
|
||||
<div className="flex items-center gap-2 bg-background border border-border rounded-lg p-2">
|
||||
<code className="flex-1 text-sm font-mono px-2">{DOCKER_COMMAND}</code>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={handleCopy}
|
||||
className="h-8 px-2 hover:bg-muted"
|
||||
>
|
||||
{copied ? (
|
||||
<Check className="w-4 h-4 text-green-500" />
|
||||
) : (
|
||||
<Copy className="w-4 h-4" />
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
For safer operation, consider running Automaker in Docker. See the README for
|
||||
instructions.
|
||||
</p>
|
||||
|
||||
<div className="pt-2">
|
||||
<Button
|
||||
|
||||
@@ -6,10 +6,7 @@
|
||||
*/
|
||||
|
||||
import { useState } from 'react';
|
||||
import { createLogger } from '@automaker/utils/logger';
|
||||
import { ShieldAlert, Copy, Check } from 'lucide-react';
|
||||
|
||||
const logger = createLogger('SandboxRiskDialog');
|
||||
import { ShieldAlert } from 'lucide-react';
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
@@ -28,10 +25,7 @@ interface SandboxRiskDialogProps {
|
||||
onDeny: () => void;
|
||||
}
|
||||
|
||||
const DOCKER_COMMAND = 'npm run dev:docker';
|
||||
|
||||
export function SandboxRiskDialog({ open, onConfirm, onDeny }: SandboxRiskDialogProps) {
|
||||
const [copied, setCopied] = useState(false);
|
||||
const [skipInFuture, setSkipInFuture] = useState(false);
|
||||
|
||||
const handleConfirm = () => {
|
||||
@@ -40,16 +34,6 @@ export function SandboxRiskDialog({ open, onConfirm, onDeny }: SandboxRiskDialog
|
||||
setSkipInFuture(false);
|
||||
};
|
||||
|
||||
const handleCopy = async () => {
|
||||
try {
|
||||
await navigator.clipboard.writeText(DOCKER_COMMAND);
|
||||
setCopied(true);
|
||||
setTimeout(() => setCopied(false), 2000);
|
||||
} catch (err) {
|
||||
logger.error('Failed to copy:', err);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={() => {}}>
|
||||
<DialogContent
|
||||
@@ -81,26 +65,10 @@ export function SandboxRiskDialog({ open, onConfirm, onDeny }: SandboxRiskDialog
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<p className="text-sm text-muted-foreground">
|
||||
For safer operation, consider running Automaker in Docker:
|
||||
</p>
|
||||
<div className="flex items-center gap-2 bg-muted/50 border border-border rounded-lg p-2">
|
||||
<code className="flex-1 text-sm font-mono px-2">{DOCKER_COMMAND}</code>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={handleCopy}
|
||||
className="h-8 px-2 hover:bg-muted"
|
||||
>
|
||||
{copied ? (
|
||||
<Check className="w-4 h-4 text-green-500" />
|
||||
) : (
|
||||
<Copy className="w-4 h-4" />
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
For safer operation, consider running Automaker in Docker. See the README for
|
||||
instructions.
|
||||
</p>
|
||||
</div>
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
@@ -1,13 +1,30 @@
|
||||
import type { NavigateOptions } from '@tanstack/react-router';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { useOSDetection } from '@/hooks/use-os-detection';
|
||||
|
||||
interface AutomakerLogoProps {
|
||||
sidebarOpen: boolean;
|
||||
navigate: (opts: NavigateOptions) => void;
|
||||
}
|
||||
|
||||
function getOSAbbreviation(os: string): string {
|
||||
switch (os) {
|
||||
case 'mac':
|
||||
return 'M';
|
||||
case 'windows':
|
||||
return 'W';
|
||||
case 'linux':
|
||||
return 'L';
|
||||
default:
|
||||
return '?';
|
||||
}
|
||||
}
|
||||
|
||||
export function AutomakerLogo({ sidebarOpen, navigate }: AutomakerLogoProps) {
|
||||
const appVersion = typeof __APP_VERSION__ !== 'undefined' ? __APP_VERSION__ : '0.0.0';
|
||||
const { os } = useOSDetection();
|
||||
const appMode = import.meta.env.VITE_APP_MODE || '?';
|
||||
const versionSuffix = `${getOSAbbreviation(os)}${appMode}`;
|
||||
|
||||
return (
|
||||
<div
|
||||
@@ -18,57 +35,64 @@ export function AutomakerLogo({ sidebarOpen, navigate }: AutomakerLogoProps) {
|
||||
onClick={() => navigate({ to: '/' })}
|
||||
data-testid="logo-button"
|
||||
>
|
||||
{!sidebarOpen ? (
|
||||
<div className="relative flex flex-col items-center justify-center rounded-lg gap-0.5">
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 256 256"
|
||||
role="img"
|
||||
aria-label="Automaker Logo"
|
||||
className="size-8 group-hover:rotate-12 transition-transform duration-300 ease-out"
|
||||
>
|
||||
<defs>
|
||||
<linearGradient
|
||||
id="bg-collapsed"
|
||||
x1="0"
|
||||
y1="0"
|
||||
x2="256"
|
||||
y2="256"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
>
|
||||
<stop offset="0%" style={{ stopColor: 'var(--brand-400)' }} />
|
||||
<stop offset="100%" style={{ stopColor: 'var(--brand-600)' }} />
|
||||
</linearGradient>
|
||||
<filter id="iconShadow-collapsed" x="-20%" y="-20%" width="140%" height="140%">
|
||||
<feDropShadow
|
||||
dx="0"
|
||||
dy="4"
|
||||
stdDeviation="4"
|
||||
floodColor="#000000"
|
||||
floodOpacity="0.25"
|
||||
/>
|
||||
</filter>
|
||||
</defs>
|
||||
<rect x="16" y="16" width="224" height="224" rx="56" fill="url(#bg-collapsed)" />
|
||||
<g
|
||||
fill="none"
|
||||
stroke="#FFFFFF"
|
||||
strokeWidth="20"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
filter="url(#iconShadow-collapsed)"
|
||||
{/* Collapsed logo - shown when sidebar is closed OR on small screens when sidebar is open */}
|
||||
<div
|
||||
className={cn(
|
||||
'relative flex flex-col items-center justify-center rounded-lg gap-0.5',
|
||||
sidebarOpen ? 'flex lg:hidden' : 'flex'
|
||||
)}
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 256 256"
|
||||
role="img"
|
||||
aria-label="Automaker Logo"
|
||||
className="size-8 group-hover:rotate-12 transition-transform duration-300 ease-out"
|
||||
>
|
||||
<defs>
|
||||
<linearGradient
|
||||
id="bg-collapsed"
|
||||
x1="0"
|
||||
y1="0"
|
||||
x2="256"
|
||||
y2="256"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
>
|
||||
<path d="M92 92 L52 128 L92 164" />
|
||||
<path d="M144 72 L116 184" />
|
||||
<path d="M164 92 L204 128 L164 164" />
|
||||
</g>
|
||||
</svg>
|
||||
<span className="text-[0.625rem] text-muted-foreground leading-none font-medium">
|
||||
v{appVersion}
|
||||
</span>
|
||||
</div>
|
||||
) : (
|
||||
<div className={cn('flex flex-col', 'hidden lg:flex')}>
|
||||
<stop offset="0%" style={{ stopColor: 'var(--brand-400)' }} />
|
||||
<stop offset="100%" style={{ stopColor: 'var(--brand-600)' }} />
|
||||
</linearGradient>
|
||||
<filter id="iconShadow-collapsed" x="-20%" y="-20%" width="140%" height="140%">
|
||||
<feDropShadow
|
||||
dx="0"
|
||||
dy="4"
|
||||
stdDeviation="4"
|
||||
floodColor="#000000"
|
||||
floodOpacity="0.25"
|
||||
/>
|
||||
</filter>
|
||||
</defs>
|
||||
<rect x="16" y="16" width="224" height="224" rx="56" fill="url(#bg-collapsed)" />
|
||||
<g
|
||||
fill="none"
|
||||
stroke="#FFFFFF"
|
||||
strokeWidth="20"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
filter="url(#iconShadow-collapsed)"
|
||||
>
|
||||
<path d="M92 92 L52 128 L92 164" />
|
||||
<path d="M144 72 L116 184" />
|
||||
<path d="M164 92 L204 128 L164 164" />
|
||||
</g>
|
||||
</svg>
|
||||
<span className="text-[0.625rem] text-muted-foreground leading-none font-medium">
|
||||
v{appVersion} {versionSuffix}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{/* Expanded logo - only shown when sidebar is open on large screens */}
|
||||
{sidebarOpen && (
|
||||
<div className="hidden lg:flex flex-col">
|
||||
<div className="flex items-center gap-1">
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
@@ -118,7 +142,7 @@ export function AutomakerLogo({ sidebarOpen, navigate }: AutomakerLogoProps) {
|
||||
</span>
|
||||
</div>
|
||||
<span className="text-[0.625rem] text-muted-foreground leading-none font-medium ml-[38.8px]">
|
||||
v{appVersion}
|
||||
v{appVersion} {versionSuffix}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -21,8 +21,10 @@ export function SidebarHeader({ sidebarOpen, navigate }: SidebarHeaderProps) {
|
||||
'bg-gradient-to-b from-transparent to-background/5',
|
||||
'flex items-center',
|
||||
sidebarOpen ? 'px-3 lg:px-5 justify-start' : 'px-3 justify-center',
|
||||
// Add left padding on macOS to avoid overlapping with traffic light buttons
|
||||
isMac && 'pt-4 pl-20'
|
||||
// Add left padding on macOS to avoid overlapping with traffic light buttons (only when expanded)
|
||||
isMac && sidebarOpen && 'pt-4 pl-20',
|
||||
// Smaller top padding on macOS when collapsed
|
||||
isMac && !sidebarOpen && 'pt-4'
|
||||
)}
|
||||
>
|
||||
<AutomakerLogo sidebarOpen={sidebarOpen} navigate={navigate} />
|
||||
|
||||
@@ -52,7 +52,8 @@ export function SidebarNavigation({
|
||||
<button
|
||||
key={item.id}
|
||||
onClick={() => {
|
||||
navigate({ to: `/${item.id}` as const });
|
||||
// Cast to the router's path type; item.id is constrained to known routes
|
||||
navigate({ to: `/${item.id}` as unknown as '/' });
|
||||
}}
|
||||
className={cn(
|
||||
'group flex items-center w-full px-3 py-2.5 rounded-xl relative overflow-hidden titlebar-no-drag',
|
||||
|
||||
@@ -254,7 +254,8 @@ export function useNavigation({
|
||||
if (item.shortcut) {
|
||||
shortcutsList.push({
|
||||
key: item.shortcut,
|
||||
action: () => navigate({ to: `/${item.id}` as const }),
|
||||
// Cast to router path type; ids are constrained to known routes
|
||||
action: () => navigate({ to: `/${item.id}` as unknown as '/' }),
|
||||
description: `Navigate to ${item.label}`,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -132,6 +132,9 @@ export function useProjectCreation({
|
||||
const api = getElectronAPI();
|
||||
|
||||
// Clone template repository
|
||||
if (!api.templates) {
|
||||
throw new Error('Templates API is not available');
|
||||
}
|
||||
const cloneResult = await api.templates.clone(template.repoUrl, projectName, parentDir);
|
||||
if (!cloneResult.success) {
|
||||
throw new Error(cloneResult.error || 'Failed to clone template');
|
||||
@@ -204,6 +207,9 @@ export function useProjectCreation({
|
||||
const api = getElectronAPI();
|
||||
|
||||
// Clone custom repository
|
||||
if (!api.templates) {
|
||||
throw new Error('Templates API is not available');
|
||||
}
|
||||
const cloneResult = await api.templates.clone(repoUrl, projectName, parentDir);
|
||||
if (!cloneResult.success) {
|
||||
throw new Error(cloneResult.error || 'Failed to clone repository');
|
||||
|
||||
@@ -42,6 +42,9 @@ export function useSpecRegeneration({
|
||||
}
|
||||
|
||||
if (event.type === 'spec_regeneration_complete') {
|
||||
// Only show toast if we're in active creation flow (not regular regeneration)
|
||||
const isCreationFlow = creatingSpecProjectPath !== null;
|
||||
|
||||
setSpecCreatingForProject(null);
|
||||
setShowSetupDialog(false);
|
||||
setProjectOverview('');
|
||||
@@ -49,9 +52,12 @@ export function useSpecRegeneration({
|
||||
// Clear onboarding state if we came from onboarding
|
||||
setNewProjectName('');
|
||||
setNewProjectPath('');
|
||||
toast.success('App specification created', {
|
||||
description: 'Your project is now set up and ready to go!',
|
||||
});
|
||||
|
||||
if (isCreationFlow) {
|
||||
toast.success('App specification created', {
|
||||
description: 'Your project is now set up and ready to go!',
|
||||
});
|
||||
}
|
||||
} else if (event.type === 'spec_regeneration_error') {
|
||||
setSpecCreatingForProject(null);
|
||||
toast.error('Failed to create specification', {
|
||||
|
||||
161
apps/ui/src/components/ui/provider-icon.tsx
Normal file
161
apps/ui/src/components/ui/provider-icon.tsx
Normal file
@@ -0,0 +1,161 @@
|
||||
import type { ComponentType, SVGProps } from 'react';
|
||||
import { Cpu } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import type { AgentModel, ModelProvider } from '@automaker/types';
|
||||
import { getProviderFromModel } from '@/lib/utils';
|
||||
|
||||
const PROVIDER_ICON_KEYS = {
|
||||
anthropic: 'anthropic',
|
||||
openai: 'openai',
|
||||
cursor: 'cursor',
|
||||
gemini: 'gemini',
|
||||
grok: 'grok',
|
||||
} as const;
|
||||
|
||||
type ProviderIconKey = keyof typeof PROVIDER_ICON_KEYS;
|
||||
|
||||
interface ProviderIconDefinition {
|
||||
viewBox: string;
|
||||
path: string;
|
||||
}
|
||||
|
||||
const PROVIDER_ICON_DEFINITIONS: Record<ProviderIconKey, ProviderIconDefinition> = {
|
||||
anthropic: {
|
||||
viewBox: '0 0 248 248',
|
||||
// Official Claude logo from claude.ai favicon
|
||||
path: 'M52.4285 162.873L98.7844 136.879L99.5485 134.602L98.7844 133.334H96.4921L88.7237 132.862L62.2346 132.153L39.3113 131.207L17.0249 130.026L11.4214 128.844L6.2 121.873L6.7094 118.447L11.4214 115.257L18.171 115.847L33.0711 116.911L55.485 118.447L71.6586 119.392L95.728 121.873H99.5485L100.058 120.337L98.7844 119.392L97.7656 118.447L74.5877 102.732L49.4995 86.1905L36.3823 76.62L29.3779 71.7757L25.8121 67.2858L24.2839 57.3608L30.6515 50.2716L39.3113 50.8623L41.4763 51.4531L50.2636 58.1879L68.9842 72.7209L93.4357 90.6804L97.0015 93.6343L98.4374 92.6652L98.6571 91.9801L97.0015 89.2625L83.757 65.2772L69.621 40.8192L63.2534 30.6579L61.5978 24.632C60.9565 22.1032 60.579 20.0111 60.579 17.4246L67.8381 7.49965L71.9133 6.19995L81.7193 7.49965L85.7946 11.0443L91.9074 24.9865L101.714 46.8451L116.996 76.62L121.453 85.4816L123.873 93.6343L124.764 96.1155H126.292V94.6976L127.566 77.9197L129.858 57.3608L132.15 30.8942L132.915 23.4505L136.608 14.4708L143.994 9.62643L149.725 12.344L154.437 19.0788L153.8 23.4505L150.998 41.6463L145.522 70.1215L141.957 89.2625H143.994L146.414 86.7813L156.093 74.0206L172.266 53.698L179.398 45.6635L187.803 36.802L193.152 32.5484H203.34L210.726 43.6549L207.415 55.1159L196.972 68.3492L188.312 79.5739L175.896 96.2095L168.191 109.585L168.882 110.689L170.738 110.53L198.755 104.504L213.91 101.787L231.994 98.7149L240.144 102.496L241.036 106.395L237.852 114.311L218.495 119.037L195.826 123.645L162.07 131.592L161.696 131.893L162.137 132.547L177.36 133.925L183.855 134.279H199.774L229.447 136.524L237.215 141.605L241.8 147.867L241.036 152.711L229.065 158.737L213.019 154.956L175.45 145.977L162.587 142.787H160.805V143.85L171.502 154.366L191.242 172.089L215.82 195.011L217.094 200.682L213.91 205.172L210.599 204.699L188.949 188.394L180.544 181.069L161.696 165.118H160.422V166.772L164.752 173.152L187.803 207.771L188.949 218.405L187.294 221.832L181.308 223.959L174.813 222.777L161.187 203.754L147.305 182.486L136.098 163.345L134.745 164.2L128.075 235.42L125.019 239.082L117.887 241.8L111.902 237.31L108.718 229.984L111.902 215.452L115.722 196.547L118.779 181.541L121.58 162.873L123.291 156.636L123.14 156.219L121.773 156.449L107.699 175.752L86.304 204.699L69.3663 222.777L65.291 224.431L58.2867 220.768L58.9235 214.27L62.8713 208.48L86.304 178.705L100.44 160.155L109.551 149.507L109.462 147.967L108.959 147.924L46.6977 188.512L35.6182 189.93L30.7788 185.44L31.4156 178.115L33.7079 175.752L52.4285 162.873Z',
|
||||
},
|
||||
openai: {
|
||||
viewBox: '0 0 158.7128 157.296',
|
||||
path: 'M60.8734,57.2556v-14.9432c0-1.2586.4722-2.2029,1.5728-2.8314l30.0443-17.3023c4.0899-2.3593,8.9662-3.4599,13.9988-3.4599,18.8759,0,30.8307,14.6289,30.8307,30.2006,0,1.1007,0,2.3593-.158,3.6178l-31.1446-18.2467c-1.8872-1.1006-3.7754-1.1006-5.6629,0l-39.4812,22.9651ZM131.0276,115.4561v-35.7074c0-2.2028-.9446-3.7756-2.8318-4.8763l-39.481-22.9651,12.8982-7.3934c1.1007-.6285,2.0453-.6285,3.1458,0l30.0441,17.3024c8.6523,5.0341,14.4708,15.7296,14.4708,26.1107,0,11.9539-7.0769,22.965-18.2461,27.527v.0021ZM51.593,83.9964l-12.8982-7.5497c-1.1007-.6285-1.5728-1.5728-1.5728-2.8314v-34.6048c0-16.8303,12.8982-29.5722,30.3585-29.5722,6.607,0,12.7403,2.2029,17.9324,6.1349l-30.987,17.9324c-1.8871,1.1007-2.8314,2.6735-2.8314,4.8764v45.6159l-.0014-.0015ZM79.3562,100.0403l-18.4829-10.3811v-22.0209l18.4829-10.3811,18.4812,10.3811v22.0209l-18.4812,10.3811ZM91.2319,147.8591c-6.607,0-12.7403-2.2031-17.9324-6.1344l30.9866-17.9333c1.8872-1.1005,2.8318-2.6728,2.8318-4.8759v-45.616l13.0564,7.5498c1.1005.6285,1.5723,1.5728,1.5723,2.8314v34.6051c0,16.8297-13.0564,29.5723-30.5147,29.5723v.001ZM53.9522,112.7822l-30.0443-17.3024c-8.652-5.0343-14.471-15.7296-14.471-26.1107,0-12.1119,7.2356-22.9652,18.403-27.5272v35.8634c0,2.2028.9443,3.7756,2.8314,4.8763l39.3248,22.8068-12.8982,7.3938c-1.1007.6287-2.045.6287-3.1456,0ZM52.2229,138.5791c-17.7745,0-30.8306-13.3713-30.8306-29.8871,0-1.2585.1578-2.5169.3143-3.7754l30.987,17.9323c1.8871,1.1005,3.7757,1.1005,5.6628,0l39.4811-22.807v14.9435c0,1.2585-.4721,2.2021-1.5728,2.8308l-30.0443,17.3025c-4.0898,2.359-8.9662,3.4605-13.9989,3.4605h.0014ZM91.2319,157.296c19.0327,0,34.9188-13.5272,38.5383-31.4594,17.6164-4.562,28.9425-21.0779,28.9425-37.908,0-11.0112-4.719-21.7066-13.2133-29.4143.7867-3.3035,1.2595-6.607,1.2595-9.909,0-22.4929-18.2471-39.3247-39.3251-39.3247-4.2461,0-8.3363.6285-12.4262,2.045-7.0792-6.9213-16.8318-11.3254-27.5271-11.3254-19.0331,0-34.9191,13.5268-38.5384,31.4591C11.3255,36.0212,0,52.5373,0,69.3675c0,11.0112,4.7184,21.7065,13.2125,29.4142-.7865,3.3035-1.2586,6.6067-1.2586,9.9092,0,22.4923,18.2466,39.3241,39.3248,39.3241,4.2462,0,8.3362-.6277,12.426-2.0441,7.0776,6.921,16.8302,11.3251,27.5271,11.3251Z',
|
||||
},
|
||||
cursor: {
|
||||
viewBox: '0 0 512 512',
|
||||
// Official Cursor logo - hexagonal shape with triangular wedge
|
||||
path: 'M415.035 156.35l-151.503-87.4695c-4.865-2.8094-10.868-2.8094-15.733 0l-151.4969 87.4695c-4.0897 2.362-6.6146 6.729-6.6146 11.459v176.383c0 4.73 2.5249 9.097 6.6146 11.458l151.5039 87.47c4.865 2.809 10.868 2.809 15.733 0l151.504-87.47c4.089-2.361 6.614-6.728 6.614-11.458v-176.383c0-4.73-2.525-9.097-6.614-11.459zm-9.516 18.528l-146.255 253.32c-.988 1.707-3.599 1.01-3.599-.967v-165.872c0-3.314-1.771-6.379-4.644-8.044l-143.645-82.932c-1.707-.988-1.01-3.599.968-3.599h292.509c4.154 0 6.75 4.503 4.673 8.101h-.007z',
|
||||
},
|
||||
gemini: {
|
||||
viewBox: '0 0 192 192',
|
||||
// Official Google Gemini sparkle logo from gemini.google.com
|
||||
path: 'M164.93 86.68c-13.56-5.84-25.42-13.84-35.6-24.01-10.17-10.17-18.18-22.04-24.01-35.6-2.23-5.19-4.04-10.54-5.42-16.02C99.45 9.26 97.85 8 96 8s-3.45 1.26-3.9 3.05c-1.38 5.48-3.18 10.81-5.42 16.02-5.84 13.56-13.84 25.43-24.01 35.6-10.17 10.16-22.04 18.17-35.6 24.01-5.19 2.23-10.54 4.04-16.02 5.42C9.26 92.55 8 94.15 8 96s1.26 3.45 3.05 3.9c5.48 1.38 10.81 3.18 16.02 5.42 13.56 5.84 25.42 13.84 35.6 24.01 10.17 10.17 18.18 22.04 24.01 35.6 2.24 5.2 4.04 10.54 5.42 16.02A4.03 4.03 0 0 0 96 184c1.85 0 3.45-1.26 3.9-3.05 1.38-5.48 3.18-10.81 5.42-16.02 5.84-13.56 13.84-25.42 24.01-35.6 10.17-10.17 22.04-18.18 35.6-24.01 5.2-2.24 10.54-4.04 16.02-5.42A4.03 4.03 0 0 0 184 96c0-1.85-1.26-3.45-3.05-3.9-5.48-1.38-10.81-3.18-16.02-5.42',
|
||||
},
|
||||
grok: {
|
||||
viewBox: '0 0 512 509.641',
|
||||
// Official Grok/xAI logo - stylized symbol from grok.com
|
||||
path: 'M213.235 306.019l178.976-180.002v.169l51.695-51.763c-.924 1.32-1.86 2.605-2.785 3.89-39.281 54.164-58.46 80.649-43.07 146.922l-.09-.101c10.61 45.11-.744 95.137-37.398 131.836-46.216 46.306-120.167 56.611-181.063 14.928l42.462-19.675c38.863 15.278 81.392 8.57 111.947-22.03 30.566-30.6 37.432-75.159 22.065-112.252-2.92-7.025-11.67-8.795-17.792-4.263l-124.947 92.341zm-25.786 22.437l-.033.034L68.094 435.217c7.565-10.429 16.957-20.294 26.327-30.149 26.428-27.803 52.653-55.359 36.654-94.302-21.422-52.112-8.952-113.177 30.724-152.898 41.243-41.254 101.98-51.661 152.706-30.758 11.23 4.172 21.016 10.114 28.638 15.639l-42.359 19.584c-39.44-16.563-84.629-5.299-112.207 22.313-37.298 37.308-44.84 102.003-1.128 143.81z',
|
||||
},
|
||||
};
|
||||
|
||||
export interface ProviderIconProps extends Omit<SVGProps<SVGSVGElement>, 'viewBox'> {
|
||||
provider: ProviderIconKey;
|
||||
title?: string;
|
||||
}
|
||||
|
||||
export function ProviderIcon({ provider, title, className, ...props }: ProviderIconProps) {
|
||||
const definition = PROVIDER_ICON_DEFINITIONS[provider];
|
||||
const {
|
||||
role,
|
||||
'aria-label': ariaLabel,
|
||||
'aria-labelledby': ariaLabelledby,
|
||||
'aria-hidden': ariaHidden,
|
||||
...rest
|
||||
} = props;
|
||||
const hasAccessibleLabel = Boolean(title || ariaLabel || ariaLabelledby);
|
||||
|
||||
return (
|
||||
<svg
|
||||
viewBox={definition.viewBox}
|
||||
className={cn('inline-block', className)}
|
||||
role={role ?? (hasAccessibleLabel ? 'img' : 'presentation')}
|
||||
aria-hidden={ariaHidden ?? !hasAccessibleLabel}
|
||||
focusable="false"
|
||||
{...rest}
|
||||
>
|
||||
{title && <title>{title}</title>}
|
||||
<path d={definition.path} fill="currentColor" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export function AnthropicIcon(props: Omit<ProviderIconProps, 'provider'>) {
|
||||
return <ProviderIcon provider={PROVIDER_ICON_KEYS.anthropic} {...props} />;
|
||||
}
|
||||
|
||||
export function OpenAIIcon(props: Omit<ProviderIconProps, 'provider'>) {
|
||||
return <ProviderIcon provider={PROVIDER_ICON_KEYS.openai} {...props} />;
|
||||
}
|
||||
|
||||
export function CursorIcon(props: Omit<ProviderIconProps, 'provider'>) {
|
||||
return <ProviderIcon provider={PROVIDER_ICON_KEYS.cursor} {...props} />;
|
||||
}
|
||||
|
||||
export function GeminiIcon(props: Omit<ProviderIconProps, 'provider'>) {
|
||||
return <ProviderIcon provider={PROVIDER_ICON_KEYS.gemini} {...props} />;
|
||||
}
|
||||
|
||||
export function GrokIcon(props: Omit<ProviderIconProps, 'provider'>) {
|
||||
return <ProviderIcon provider={PROVIDER_ICON_KEYS.grok} {...props} />;
|
||||
}
|
||||
|
||||
export function OpenCodeIcon({ className, ...props }: { className?: string }) {
|
||||
return <Cpu className={cn('inline-block', className)} {...props} />;
|
||||
}
|
||||
|
||||
export const PROVIDER_ICON_COMPONENTS: Record<
|
||||
ModelProvider,
|
||||
ComponentType<{ className?: string }>
|
||||
> = {
|
||||
claude: AnthropicIcon,
|
||||
cursor: CursorIcon, // Default for Cursor provider (will be overridden by getProviderIconForModel)
|
||||
codex: OpenAIIcon,
|
||||
opencode: OpenCodeIcon,
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the underlying model icon based on the model string
|
||||
* For Cursor models, detects whether it's Claude, GPT, Gemini, Grok, or Cursor-specific
|
||||
*/
|
||||
function getUnderlyingModelIcon(model?: AgentModel | string): ProviderIconKey {
|
||||
if (!model) return 'anthropic';
|
||||
|
||||
const modelStr = typeof model === 'string' ? model.toLowerCase() : model;
|
||||
|
||||
// Check for Cursor-specific models with underlying providers
|
||||
if (modelStr.includes('sonnet') || modelStr.includes('opus') || modelStr.includes('claude')) {
|
||||
return 'anthropic';
|
||||
}
|
||||
if (modelStr.includes('gpt-') || modelStr.includes('codex')) {
|
||||
return 'openai';
|
||||
}
|
||||
if (modelStr.includes('gemini')) {
|
||||
return 'gemini';
|
||||
}
|
||||
if (modelStr.includes('grok')) {
|
||||
return 'grok';
|
||||
}
|
||||
if (modelStr.includes('cursor') || modelStr === 'auto' || modelStr === 'composer-1') {
|
||||
return 'cursor';
|
||||
}
|
||||
|
||||
// Default based on provider
|
||||
const provider = getProviderFromModel(model);
|
||||
if (provider === 'codex') return 'openai';
|
||||
if (provider === 'cursor') return 'cursor';
|
||||
return 'anthropic';
|
||||
}
|
||||
|
||||
export function getProviderIconForModel(
|
||||
model?: AgentModel | string
|
||||
): ComponentType<{ className?: string }> {
|
||||
const iconKey = getUnderlyingModelIcon(model);
|
||||
|
||||
const iconMap: Record<ProviderIconKey, ComponentType<{ className?: string }>> = {
|
||||
anthropic: AnthropicIcon,
|
||||
openai: OpenAIIcon,
|
||||
cursor: CursorIcon,
|
||||
gemini: GeminiIcon,
|
||||
grok: GrokIcon,
|
||||
};
|
||||
|
||||
return iconMap[iconKey] || AnthropicIcon;
|
||||
}
|
||||
@@ -52,10 +52,12 @@ export function TaskProgressPanel({
|
||||
}
|
||||
|
||||
const result = await api.features.get(projectPath, featureId);
|
||||
if (result.success && result.feature?.planSpec?.tasks) {
|
||||
const planTasks = result.feature.planSpec.tasks;
|
||||
const currentId = result.feature.planSpec.currentTaskId;
|
||||
const completedCount = result.feature.planSpec.tasksCompleted || 0;
|
||||
const feature: any = (result as any).feature;
|
||||
if (result.success && feature?.planSpec?.tasks) {
|
||||
const planSpec = feature.planSpec as any;
|
||||
const planTasks = planSpec.tasks;
|
||||
const currentId = planSpec.currentTaskId;
|
||||
const completedCount = planSpec.tasksCompleted || 0;
|
||||
|
||||
// Convert planSpec tasks to TaskInfo with proper status
|
||||
const initialTasks: TaskInfo[] = planTasks.map((t: any, index: number) => ({
|
||||
|
||||
612
apps/ui/src/components/usage-popover.tsx
Normal file
612
apps/ui/src/components/usage-popover.tsx
Normal file
@@ -0,0 +1,612 @@
|
||||
import { useState, useEffect, useMemo, useCallback } from 'react';
|
||||
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs';
|
||||
import { RefreshCw, AlertTriangle, CheckCircle, XCircle, Clock, ExternalLink } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { getElectronAPI } from '@/lib/electron';
|
||||
import { useAppStore } from '@/store/app-store';
|
||||
import { useSetupStore } from '@/store/setup-store';
|
||||
import { AnthropicIcon, OpenAIIcon } from '@/components/ui/provider-icon';
|
||||
|
||||
// Error codes for distinguishing failure modes
|
||||
const ERROR_CODES = {
|
||||
API_BRIDGE_UNAVAILABLE: 'API_BRIDGE_UNAVAILABLE',
|
||||
AUTH_ERROR: 'AUTH_ERROR',
|
||||
NOT_AVAILABLE: 'NOT_AVAILABLE',
|
||||
UNKNOWN: 'UNKNOWN',
|
||||
} as const;
|
||||
|
||||
type ErrorCode = (typeof ERROR_CODES)[keyof typeof ERROR_CODES];
|
||||
|
||||
type UsageError = {
|
||||
code: ErrorCode;
|
||||
message: string;
|
||||
};
|
||||
|
||||
// Fixed refresh interval (45 seconds)
|
||||
const REFRESH_INTERVAL_SECONDS = 45;
|
||||
|
||||
// Helper to format reset time for Codex
|
||||
function formatCodexResetTime(unixTimestamp: number): string {
|
||||
const date = new Date(unixTimestamp * 1000);
|
||||
const now = new Date();
|
||||
const diff = date.getTime() - now.getTime();
|
||||
|
||||
if (diff < 3600000) {
|
||||
const mins = Math.ceil(diff / 60000);
|
||||
return `Resets in ${mins}m`;
|
||||
}
|
||||
if (diff < 86400000) {
|
||||
const hours = Math.floor(diff / 3600000);
|
||||
const mins = Math.ceil((diff % 3600000) / 60000);
|
||||
return `Resets in ${hours}h ${mins > 0 ? `${mins}m` : ''}`;
|
||||
}
|
||||
return `Resets ${date.toLocaleDateString()} at ${date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}`;
|
||||
}
|
||||
|
||||
// Helper to format window duration for Codex
|
||||
function getCodexWindowLabel(durationMins: number): { title: string; subtitle: string } {
|
||||
if (durationMins < 60) {
|
||||
return { title: `${durationMins}min Window`, subtitle: 'Rate limit' };
|
||||
}
|
||||
if (durationMins < 1440) {
|
||||
const hours = Math.round(durationMins / 60);
|
||||
return { title: `${hours}h Window`, subtitle: 'Rate limit' };
|
||||
}
|
||||
const days = Math.round(durationMins / 1440);
|
||||
return { title: `${days}d Window`, subtitle: 'Rate limit' };
|
||||
}
|
||||
|
||||
export function UsagePopover() {
|
||||
const { claudeUsage, claudeUsageLastUpdated, setClaudeUsage } = useAppStore();
|
||||
const { codexUsage, codexUsageLastUpdated, setCodexUsage } = useAppStore();
|
||||
const claudeAuthStatus = useSetupStore((state) => state.claudeAuthStatus);
|
||||
const codexAuthStatus = useSetupStore((state) => state.codexAuthStatus);
|
||||
|
||||
const [open, setOpen] = useState(false);
|
||||
const [activeTab, setActiveTab] = useState<'claude' | 'codex'>('claude');
|
||||
const [claudeLoading, setClaudeLoading] = useState(false);
|
||||
const [codexLoading, setCodexLoading] = useState(false);
|
||||
const [claudeError, setClaudeError] = useState<UsageError | null>(null);
|
||||
const [codexError, setCodexError] = useState<UsageError | null>(null);
|
||||
|
||||
// Check authentication status
|
||||
const isClaudeCliVerified =
|
||||
claudeAuthStatus?.authenticated && claudeAuthStatus?.method === 'cli_authenticated';
|
||||
const isCodexAuthenticated = codexAuthStatus?.authenticated;
|
||||
|
||||
// Determine which tab to show by default
|
||||
useEffect(() => {
|
||||
if (isClaudeCliVerified) {
|
||||
setActiveTab('claude');
|
||||
} else if (isCodexAuthenticated) {
|
||||
setActiveTab('codex');
|
||||
}
|
||||
}, [isClaudeCliVerified, isCodexAuthenticated]);
|
||||
|
||||
// Check if data is stale (older than 2 minutes)
|
||||
const isClaudeStale = useMemo(() => {
|
||||
return !claudeUsageLastUpdated || Date.now() - claudeUsageLastUpdated > 2 * 60 * 1000;
|
||||
}, [claudeUsageLastUpdated]);
|
||||
|
||||
const isCodexStale = useMemo(() => {
|
||||
return !codexUsageLastUpdated || Date.now() - codexUsageLastUpdated > 2 * 60 * 1000;
|
||||
}, [codexUsageLastUpdated]);
|
||||
|
||||
const fetchClaudeUsage = useCallback(
|
||||
async (isAutoRefresh = false) => {
|
||||
if (!isAutoRefresh) setClaudeLoading(true);
|
||||
setClaudeError(null);
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
if (!api.claude) {
|
||||
setClaudeError({
|
||||
code: ERROR_CODES.API_BRIDGE_UNAVAILABLE,
|
||||
message: 'Claude API bridge not available',
|
||||
});
|
||||
return;
|
||||
}
|
||||
const data = await api.claude.getUsage();
|
||||
if ('error' in data) {
|
||||
setClaudeError({
|
||||
code: ERROR_CODES.AUTH_ERROR,
|
||||
message: data.message || data.error,
|
||||
});
|
||||
return;
|
||||
}
|
||||
setClaudeUsage(data);
|
||||
} catch (err) {
|
||||
setClaudeError({
|
||||
code: ERROR_CODES.UNKNOWN,
|
||||
message: err instanceof Error ? err.message : 'Failed to fetch usage',
|
||||
});
|
||||
} finally {
|
||||
if (!isAutoRefresh) setClaudeLoading(false);
|
||||
}
|
||||
},
|
||||
[setClaudeUsage]
|
||||
);
|
||||
|
||||
const fetchCodexUsage = useCallback(
|
||||
async (isAutoRefresh = false) => {
|
||||
if (!isAutoRefresh) setCodexLoading(true);
|
||||
setCodexError(null);
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
if (!api.codex) {
|
||||
setCodexError({
|
||||
code: ERROR_CODES.API_BRIDGE_UNAVAILABLE,
|
||||
message: 'Codex API bridge not available',
|
||||
});
|
||||
return;
|
||||
}
|
||||
const data = await api.codex.getUsage();
|
||||
if ('error' in data) {
|
||||
if (
|
||||
data.message?.includes('not available') ||
|
||||
data.message?.includes('does not provide')
|
||||
) {
|
||||
setCodexError({
|
||||
code: ERROR_CODES.NOT_AVAILABLE,
|
||||
message: data.message || data.error,
|
||||
});
|
||||
} else {
|
||||
setCodexError({
|
||||
code: ERROR_CODES.AUTH_ERROR,
|
||||
message: data.message || data.error,
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
setCodexUsage(data);
|
||||
} catch (err) {
|
||||
setCodexError({
|
||||
code: ERROR_CODES.UNKNOWN,
|
||||
message: err instanceof Error ? err.message : 'Failed to fetch usage',
|
||||
});
|
||||
} finally {
|
||||
if (!isAutoRefresh) setCodexLoading(false);
|
||||
}
|
||||
},
|
||||
[setCodexUsage]
|
||||
);
|
||||
|
||||
// Auto-fetch on mount if data is stale
|
||||
useEffect(() => {
|
||||
if (isClaudeStale && isClaudeCliVerified) {
|
||||
fetchClaudeUsage(true);
|
||||
}
|
||||
}, [isClaudeStale, isClaudeCliVerified, fetchClaudeUsage]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isCodexStale && isCodexAuthenticated) {
|
||||
fetchCodexUsage(true);
|
||||
}
|
||||
}, [isCodexStale, isCodexAuthenticated, fetchCodexUsage]);
|
||||
|
||||
// Auto-refresh when popover is open
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
|
||||
// Fetch based on active tab
|
||||
if (activeTab === 'claude' && isClaudeCliVerified) {
|
||||
if (!claudeUsage || isClaudeStale) {
|
||||
fetchClaudeUsage();
|
||||
}
|
||||
const intervalId = setInterval(() => {
|
||||
fetchClaudeUsage(true);
|
||||
}, REFRESH_INTERVAL_SECONDS * 1000);
|
||||
return () => clearInterval(intervalId);
|
||||
}
|
||||
|
||||
if (activeTab === 'codex' && isCodexAuthenticated) {
|
||||
if (!codexUsage || isCodexStale) {
|
||||
fetchCodexUsage();
|
||||
}
|
||||
const intervalId = setInterval(() => {
|
||||
fetchCodexUsage(true);
|
||||
}, REFRESH_INTERVAL_SECONDS * 1000);
|
||||
return () => clearInterval(intervalId);
|
||||
}
|
||||
}, [
|
||||
open,
|
||||
activeTab,
|
||||
claudeUsage,
|
||||
isClaudeStale,
|
||||
isClaudeCliVerified,
|
||||
codexUsage,
|
||||
isCodexStale,
|
||||
isCodexAuthenticated,
|
||||
fetchClaudeUsage,
|
||||
fetchCodexUsage,
|
||||
]);
|
||||
|
||||
// Derived status color/icon helper
|
||||
const getStatusInfo = (percentage: number) => {
|
||||
if (percentage >= 75) return { color: 'text-red-500', icon: XCircle, bg: 'bg-red-500' };
|
||||
if (percentage >= 50)
|
||||
return { color: 'text-orange-500', icon: AlertTriangle, bg: 'bg-orange-500' };
|
||||
return { color: 'text-green-500', icon: CheckCircle, bg: 'bg-green-500' };
|
||||
};
|
||||
|
||||
// Helper component for the progress bar
|
||||
const ProgressBar = ({ percentage, colorClass }: { percentage: number; colorClass: string }) => (
|
||||
<div className="h-2 w-full bg-secondary/50 rounded-full overflow-hidden">
|
||||
<div
|
||||
className={cn('h-full transition-all duration-500', colorClass)}
|
||||
style={{ width: `${Math.min(percentage, 100)}%` }}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
const UsageCard = ({
|
||||
title,
|
||||
subtitle,
|
||||
percentage,
|
||||
resetText,
|
||||
isPrimary = false,
|
||||
stale = false,
|
||||
}: {
|
||||
title: string;
|
||||
subtitle: string;
|
||||
percentage: number;
|
||||
resetText?: string;
|
||||
isPrimary?: boolean;
|
||||
stale?: boolean;
|
||||
}) => {
|
||||
const isValidPercentage =
|
||||
typeof percentage === 'number' && !isNaN(percentage) && isFinite(percentage);
|
||||
const safePercentage = isValidPercentage ? percentage : 0;
|
||||
|
||||
const status = getStatusInfo(safePercentage);
|
||||
const StatusIcon = status.icon;
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'rounded-xl border bg-card/50 p-4 transition-opacity',
|
||||
isPrimary ? 'border-border/60 shadow-sm' : 'border-border/40',
|
||||
(stale || !isValidPercentage) && 'opacity-50'
|
||||
)}
|
||||
>
|
||||
<div className="flex items-start justify-between mb-3">
|
||||
<div>
|
||||
<h4 className={cn('font-semibold', isPrimary ? 'text-sm' : 'text-xs')}>{title}</h4>
|
||||
<p className="text-[10px] text-muted-foreground">{subtitle}</p>
|
||||
</div>
|
||||
{isValidPercentage ? (
|
||||
<div className="flex items-center gap-1.5">
|
||||
<StatusIcon className={cn('w-3.5 h-3.5', status.color)} />
|
||||
<span
|
||||
className={cn(
|
||||
'font-mono font-bold',
|
||||
status.color,
|
||||
isPrimary ? 'text-base' : 'text-sm'
|
||||
)}
|
||||
>
|
||||
{Math.round(safePercentage)}%
|
||||
</span>
|
||||
</div>
|
||||
) : (
|
||||
<span className="text-xs text-muted-foreground">N/A</span>
|
||||
)}
|
||||
</div>
|
||||
<ProgressBar
|
||||
percentage={safePercentage}
|
||||
colorClass={isValidPercentage ? status.bg : 'bg-muted-foreground/30'}
|
||||
/>
|
||||
{resetText && (
|
||||
<div className="mt-2 flex justify-end">
|
||||
<p className="text-xs text-muted-foreground flex items-center gap-1">
|
||||
<Clock className="w-3 h-3" />
|
||||
{resetText}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// Calculate max percentage for header button
|
||||
const claudeMaxPercentage = claudeUsage
|
||||
? Math.max(claudeUsage.sessionPercentage || 0, claudeUsage.weeklyPercentage || 0)
|
||||
: 0;
|
||||
|
||||
const codexMaxPercentage = codexUsage?.rateLimits
|
||||
? Math.max(
|
||||
codexUsage.rateLimits.primary?.usedPercent || 0,
|
||||
codexUsage.rateLimits.secondary?.usedPercent || 0
|
||||
)
|
||||
: 0;
|
||||
|
||||
const maxPercentage = Math.max(claudeMaxPercentage, codexMaxPercentage);
|
||||
const isStale = activeTab === 'claude' ? isClaudeStale : isCodexStale;
|
||||
|
||||
const getProgressBarColor = (percentage: number) => {
|
||||
if (percentage >= 80) return 'bg-red-500';
|
||||
if (percentage >= 50) return 'bg-yellow-500';
|
||||
return 'bg-green-500';
|
||||
};
|
||||
|
||||
const trigger = (
|
||||
<Button variant="ghost" size="sm" className="h-9 gap-3 bg-secondary border border-border px-3">
|
||||
<span className="text-sm font-medium">Usage</span>
|
||||
{(claudeUsage || codexUsage) && (
|
||||
<div
|
||||
className={cn(
|
||||
'h-1.5 w-16 bg-muted-foreground/20 rounded-full overflow-hidden transition-opacity',
|
||||
isStale && 'opacity-60'
|
||||
)}
|
||||
>
|
||||
<div
|
||||
className={cn('h-full transition-all duration-500', getProgressBarColor(maxPercentage))}
|
||||
style={{ width: `${Math.min(maxPercentage, 100)}%` }}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
);
|
||||
|
||||
// Determine which tabs to show
|
||||
const showClaudeTab = isClaudeCliVerified;
|
||||
const showCodexTab = isCodexAuthenticated;
|
||||
|
||||
return (
|
||||
<Popover open={open} onOpenChange={setOpen}>
|
||||
<PopoverTrigger asChild>{trigger}</PopoverTrigger>
|
||||
<PopoverContent
|
||||
className="w-80 p-0 overflow-hidden bg-background/95 backdrop-blur-xl border-border shadow-2xl"
|
||||
align="end"
|
||||
sideOffset={8}
|
||||
>
|
||||
<Tabs value={activeTab} onValueChange={(v) => setActiveTab(v as 'claude' | 'codex')}>
|
||||
{/* Tabs Header */}
|
||||
{showClaudeTab && showCodexTab && (
|
||||
<TabsList className="grid w-full grid-cols-2 rounded-none border-b border-border/50">
|
||||
<TabsTrigger value="claude" className="gap-2">
|
||||
<AnthropicIcon className="w-3.5 h-3.5" />
|
||||
Claude
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="codex" className="gap-2">
|
||||
<OpenAIIcon className="w-3.5 h-3.5" />
|
||||
Codex
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
)}
|
||||
|
||||
{/* Claude Tab Content */}
|
||||
<TabsContent value="claude" className="m-0">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-border/50 bg-secondary/10">
|
||||
<div className="flex items-center gap-2">
|
||||
<AnthropicIcon className="w-4 h-4" />
|
||||
<span className="text-sm font-semibold">Claude Usage</span>
|
||||
</div>
|
||||
{claudeError && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className={cn('h-6 w-6', claudeLoading && 'opacity-80')}
|
||||
onClick={() => !claudeLoading && fetchClaudeUsage(false)}
|
||||
>
|
||||
<RefreshCw className="w-3.5 h-3.5" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="p-4 space-y-4">
|
||||
{claudeError ? (
|
||||
<div className="flex flex-col items-center justify-center py-6 text-center space-y-3">
|
||||
<AlertTriangle className="w-8 h-8 text-yellow-500/80" />
|
||||
<div className="space-y-1 flex flex-col items-center">
|
||||
<p className="text-sm font-medium">{claudeError.message}</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{claudeError.code === ERROR_CODES.API_BRIDGE_UNAVAILABLE ? (
|
||||
'Ensure the Electron bridge is running or restart the app'
|
||||
) : (
|
||||
<>
|
||||
Make sure Claude CLI is installed and authenticated via{' '}
|
||||
<code className="font-mono bg-muted px-1 rounded">claude login</code>
|
||||
</>
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
) : !claudeUsage ? (
|
||||
<div className="flex flex-col items-center justify-center py-8 space-y-2">
|
||||
<RefreshCw className="w-6 h-6 animate-spin text-muted-foreground/50" />
|
||||
<p className="text-xs text-muted-foreground">Loading usage data...</p>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<UsageCard
|
||||
title="Session Usage"
|
||||
subtitle="5-hour rolling window"
|
||||
percentage={claudeUsage.sessionPercentage}
|
||||
resetText={claudeUsage.sessionResetText}
|
||||
isPrimary={true}
|
||||
stale={isClaudeStale}
|
||||
/>
|
||||
|
||||
<div className="grid grid-cols-2 gap-3">
|
||||
<UsageCard
|
||||
title="Weekly"
|
||||
subtitle="All models"
|
||||
percentage={claudeUsage.weeklyPercentage}
|
||||
resetText={claudeUsage.weeklyResetText}
|
||||
stale={isClaudeStale}
|
||||
/>
|
||||
<UsageCard
|
||||
title="Sonnet"
|
||||
subtitle="Weekly"
|
||||
percentage={claudeUsage.sonnetWeeklyPercentage}
|
||||
resetText={claudeUsage.sonnetResetText}
|
||||
stale={isClaudeStale}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{claudeUsage.costLimit && claudeUsage.costLimit > 0 && (
|
||||
<UsageCard
|
||||
title="Extra Usage"
|
||||
subtitle={`${claudeUsage.costUsed ?? 0} / ${claudeUsage.costLimit} ${claudeUsage.costCurrency ?? ''}`}
|
||||
percentage={
|
||||
claudeUsage.costLimit > 0
|
||||
? ((claudeUsage.costUsed ?? 0) / claudeUsage.costLimit) * 100
|
||||
: 0
|
||||
}
|
||||
stale={isClaudeStale}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex items-center justify-between px-4 py-2 bg-secondary/10 border-t border-border/50">
|
||||
<a
|
||||
href="https://status.claude.com"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="text-[10px] text-muted-foreground hover:text-foreground flex items-center gap-1 transition-colors"
|
||||
>
|
||||
Claude Status <ExternalLink className="w-2.5 h-2.5" />
|
||||
</a>
|
||||
<span className="text-[10px] text-muted-foreground">Updates every minute</span>
|
||||
</div>
|
||||
</TabsContent>
|
||||
|
||||
{/* Codex Tab Content */}
|
||||
<TabsContent value="codex" className="m-0">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-border/50 bg-secondary/10">
|
||||
<div className="flex items-center gap-2">
|
||||
<OpenAIIcon className="w-4 h-4" />
|
||||
<span className="text-sm font-semibold">Codex Usage</span>
|
||||
</div>
|
||||
{codexError && codexError.code !== ERROR_CODES.NOT_AVAILABLE && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className={cn('h-6 w-6', codexLoading && 'opacity-80')}
|
||||
onClick={() => !codexLoading && fetchCodexUsage(false)}
|
||||
>
|
||||
<RefreshCw className="w-3.5 h-3.5" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="p-4 space-y-4">
|
||||
{codexError ? (
|
||||
<div className="flex flex-col items-center justify-center py-6 text-center space-y-3">
|
||||
<AlertTriangle className="w-8 h-8 text-yellow-500/80" />
|
||||
<div className="space-y-1 flex flex-col items-center">
|
||||
<p className="text-sm font-medium">
|
||||
{codexError.code === ERROR_CODES.NOT_AVAILABLE
|
||||
? 'Usage not available'
|
||||
: codexError.message}
|
||||
</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{codexError.code === ERROR_CODES.API_BRIDGE_UNAVAILABLE ? (
|
||||
'Ensure the Electron bridge is running or restart the app'
|
||||
) : codexError.code === ERROR_CODES.NOT_AVAILABLE ? (
|
||||
<>
|
||||
Codex CLI doesn't provide usage statistics. Check{' '}
|
||||
<a
|
||||
href="https://platform.openai.com/usage"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="underline hover:text-foreground"
|
||||
>
|
||||
OpenAI dashboard
|
||||
</a>{' '}
|
||||
for usage details.
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
Make sure Codex CLI is installed and authenticated via{' '}
|
||||
<code className="font-mono bg-muted px-1 rounded">codex login</code>
|
||||
</>
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
) : !codexUsage ? (
|
||||
<div className="flex flex-col items-center justify-center py-8 space-y-2">
|
||||
<RefreshCw className="w-6 h-6 animate-spin text-muted-foreground/50" />
|
||||
<p className="text-xs text-muted-foreground">Loading usage data...</p>
|
||||
</div>
|
||||
) : codexUsage.rateLimits ? (
|
||||
<>
|
||||
{codexUsage.rateLimits.primary && (
|
||||
<UsageCard
|
||||
title={
|
||||
getCodexWindowLabel(codexUsage.rateLimits.primary.windowDurationMins).title
|
||||
}
|
||||
subtitle={
|
||||
getCodexWindowLabel(codexUsage.rateLimits.primary.windowDurationMins)
|
||||
.subtitle
|
||||
}
|
||||
percentage={codexUsage.rateLimits.primary.usedPercent}
|
||||
resetText={formatCodexResetTime(codexUsage.rateLimits.primary.resetsAt)}
|
||||
isPrimary={true}
|
||||
stale={isCodexStale}
|
||||
/>
|
||||
)}
|
||||
|
||||
{codexUsage.rateLimits.secondary && (
|
||||
<UsageCard
|
||||
title={
|
||||
getCodexWindowLabel(codexUsage.rateLimits.secondary.windowDurationMins)
|
||||
.title
|
||||
}
|
||||
subtitle={
|
||||
getCodexWindowLabel(codexUsage.rateLimits.secondary.windowDurationMins)
|
||||
.subtitle
|
||||
}
|
||||
percentage={codexUsage.rateLimits.secondary.usedPercent}
|
||||
resetText={formatCodexResetTime(codexUsage.rateLimits.secondary.resetsAt)}
|
||||
stale={isCodexStale}
|
||||
/>
|
||||
)}
|
||||
|
||||
{codexUsage.rateLimits.planType && (
|
||||
<div className="rounded-xl border border-border/40 bg-secondary/20 p-3">
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Plan:{' '}
|
||||
<span className="text-foreground font-medium">
|
||||
{codexUsage.rateLimits.planType.charAt(0).toUpperCase() +
|
||||
codexUsage.rateLimits.planType.slice(1)}
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<div className="flex flex-col items-center justify-center py-6 text-center">
|
||||
<AlertTriangle className="w-8 h-8 text-yellow-500/80" />
|
||||
<p className="text-sm font-medium mt-3">No usage data available</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="flex items-center justify-between px-4 py-2 bg-secondary/10 border-t border-border/50">
|
||||
<a
|
||||
href="https://platform.openai.com/usage"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="text-[10px] text-muted-foreground hover:text-foreground flex items-center gap-1 transition-colors"
|
||||
>
|
||||
OpenAI Dashboard <ExternalLink className="w-2.5 h-2.5" />
|
||||
</a>
|
||||
<span className="text-[10px] text-muted-foreground">Updates every minute</span>
|
||||
</div>
|
||||
</TabsContent>
|
||||
</Tabs>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user