fixing file uploads on context page

This commit is contained in:
Test User
2025-12-21 23:44:26 -05:00
parent 17c69ea1ca
commit e2718b37e3
22 changed files with 2808 additions and 1693 deletions

View File

@@ -6,53 +6,54 @@
* In web mode, this server runs on a remote host.
*/
import express from "express";
import cors from "cors";
import morgan from "morgan";
import { WebSocketServer, WebSocket } from "ws";
import { createServer } from "http";
import dotenv from "dotenv";
import express from 'express';
import cors from 'cors';
import morgan from 'morgan';
import { WebSocketServer, WebSocket } from 'ws';
import { createServer } from 'http';
import dotenv from 'dotenv';
import { createEventEmitter, type EventEmitter } from "./lib/events.js";
import { initAllowedPaths } from "@automaker/platform";
import { authMiddleware, getAuthStatus } from "./lib/auth.js";
import { createFsRoutes } from "./routes/fs/index.js";
import { createHealthRoutes } from "./routes/health/index.js";
import { createAgentRoutes } from "./routes/agent/index.js";
import { createSessionsRoutes } from "./routes/sessions/index.js";
import { createFeaturesRoutes } from "./routes/features/index.js";
import { createAutoModeRoutes } from "./routes/auto-mode/index.js";
import { createEnhancePromptRoutes } from "./routes/enhance-prompt/index.js";
import { createWorktreeRoutes } from "./routes/worktree/index.js";
import { createGitRoutes } from "./routes/git/index.js";
import { createSetupRoutes } from "./routes/setup/index.js";
import { createSuggestionsRoutes } from "./routes/suggestions/index.js";
import { createModelsRoutes } from "./routes/models/index.js";
import { createRunningAgentsRoutes } from "./routes/running-agents/index.js";
import { createWorkspaceRoutes } from "./routes/workspace/index.js";
import { createTemplatesRoutes } from "./routes/templates/index.js";
import { createEventEmitter, type EventEmitter } from './lib/events.js';
import { initAllowedPaths } from '@automaker/platform';
import { authMiddleware, getAuthStatus } from './lib/auth.js';
import { createFsRoutes } from './routes/fs/index.js';
import { createHealthRoutes } from './routes/health/index.js';
import { createAgentRoutes } from './routes/agent/index.js';
import { createSessionsRoutes } from './routes/sessions/index.js';
import { createFeaturesRoutes } from './routes/features/index.js';
import { createAutoModeRoutes } from './routes/auto-mode/index.js';
import { createEnhancePromptRoutes } from './routes/enhance-prompt/index.js';
import { createWorktreeRoutes } from './routes/worktree/index.js';
import { createGitRoutes } from './routes/git/index.js';
import { createSetupRoutes } from './routes/setup/index.js';
import { createSuggestionsRoutes } from './routes/suggestions/index.js';
import { createModelsRoutes } from './routes/models/index.js';
import { createRunningAgentsRoutes } from './routes/running-agents/index.js';
import { createWorkspaceRoutes } from './routes/workspace/index.js';
import { createTemplatesRoutes } from './routes/templates/index.js';
import {
createTerminalRoutes,
validateTerminalToken,
isTerminalEnabled,
isTerminalPasswordRequired,
} from "./routes/terminal/index.js";
import { createSettingsRoutes } from "./routes/settings/index.js";
import { AgentService } from "./services/agent-service.js";
import { FeatureLoader } from "./services/feature-loader.js";
import { AutoModeService } from "./services/auto-mode-service.js";
import { getTerminalService } from "./services/terminal-service.js";
import { SettingsService } from "./services/settings-service.js";
import { createSpecRegenerationRoutes } from "./routes/app-spec/index.js";
import { createClaudeRoutes } from "./routes/claude/index.js";
import { ClaudeUsageService } from "./services/claude-usage-service.js";
} from './routes/terminal/index.js';
import { createSettingsRoutes } from './routes/settings/index.js';
import { AgentService } from './services/agent-service.js';
import { FeatureLoader } from './services/feature-loader.js';
import { AutoModeService } from './services/auto-mode-service.js';
import { getTerminalService } from './services/terminal-service.js';
import { SettingsService } from './services/settings-service.js';
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
import { createClaudeRoutes } from './routes/claude/index.js';
import { ClaudeUsageService } from './services/claude-usage-service.js';
import { createContextRoutes } from './routes/context/index.js';
// Load environment variables
dotenv.config();
const PORT = parseInt(process.env.PORT || "3008", 10);
const DATA_DIR = process.env.DATA_DIR || "./data";
const ENABLE_REQUEST_LOGGING = process.env.ENABLE_REQUEST_LOGGING !== "false"; // Default to true
const PORT = parseInt(process.env.PORT || '3008', 10);
const DATA_DIR = process.env.DATA_DIR || './data';
const ENABLE_REQUEST_LOGGING = process.env.ENABLE_REQUEST_LOGGING !== 'false'; // Default to true
// Check for required environment variables
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
@@ -71,7 +72,7 @@ if (!hasAnthropicKey) {
╚═══════════════════════════════════════════════════════════════════════╝
`);
} else {
console.log("[Server] ✓ ANTHROPIC_API_KEY detected (API key auth)");
console.log('[Server] ✓ ANTHROPIC_API_KEY detected (API key auth)');
}
// Initialize security
@@ -83,7 +84,7 @@ const app = express();
// Middleware
// Custom colored logger showing only endpoint and status code (configurable via ENABLE_REQUEST_LOGGING env var)
if (ENABLE_REQUEST_LOGGING) {
morgan.token("status-colored", (req, res) => {
morgan.token('status-colored', (req, res) => {
const status = res.statusCode;
if (status >= 500) return `\x1b[31m${status}\x1b[0m`; // Red for server errors
if (status >= 400) return `\x1b[33m${status}\x1b[0m`; // Yellow for client errors
@@ -92,18 +93,18 @@ if (ENABLE_REQUEST_LOGGING) {
});
app.use(
morgan(":method :url :status-colored", {
skip: (req) => req.url === "/api/health", // Skip health check logs
morgan(':method :url :status-colored', {
skip: (req) => req.url === '/api/health', // Skip health check logs
})
);
}
app.use(
cors({
origin: process.env.CORS_ORIGIN || "*",
origin: process.env.CORS_ORIGIN || '*',
credentials: true,
})
);
app.use(express.json({ limit: "50mb" }));
app.use(express.json({ limit: '50mb' }));
// Create shared event emitter for streaming
const events: EventEmitter = createEventEmitter();
@@ -118,33 +119,34 @@ const claudeUsageService = new ClaudeUsageService();
// Initialize services
(async () => {
await agentService.initialize();
console.log("[Server] Agent service initialized");
console.log('[Server] Agent service initialized');
})();
// Mount API routes - health is unauthenticated for monitoring
app.use("/api/health", createHealthRoutes());
app.use('/api/health', createHealthRoutes());
// Apply authentication to all other routes
app.use("/api", authMiddleware);
app.use('/api', authMiddleware);
app.use("/api/fs", createFsRoutes(events));
app.use("/api/agent", createAgentRoutes(agentService, events));
app.use("/api/sessions", createSessionsRoutes(agentService));
app.use("/api/features", createFeaturesRoutes(featureLoader));
app.use("/api/auto-mode", createAutoModeRoutes(autoModeService));
app.use("/api/enhance-prompt", createEnhancePromptRoutes());
app.use("/api/worktree", createWorktreeRoutes());
app.use("/api/git", createGitRoutes());
app.use("/api/setup", createSetupRoutes());
app.use("/api/suggestions", createSuggestionsRoutes(events));
app.use("/api/models", createModelsRoutes());
app.use("/api/spec-regeneration", createSpecRegenerationRoutes(events));
app.use("/api/running-agents", createRunningAgentsRoutes(autoModeService));
app.use("/api/workspace", createWorkspaceRoutes());
app.use("/api/templates", createTemplatesRoutes());
app.use("/api/terminal", createTerminalRoutes());
app.use("/api/settings", createSettingsRoutes(settingsService));
app.use("/api/claude", createClaudeRoutes(claudeUsageService));
app.use('/api/fs', createFsRoutes(events));
app.use('/api/agent', createAgentRoutes(agentService, events));
app.use('/api/sessions', createSessionsRoutes(agentService));
app.use('/api/features', createFeaturesRoutes(featureLoader));
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
app.use('/api/enhance-prompt', createEnhancePromptRoutes());
app.use('/api/worktree', createWorktreeRoutes());
app.use('/api/git', createGitRoutes());
app.use('/api/setup', createSetupRoutes());
app.use('/api/suggestions', createSuggestionsRoutes(events));
app.use('/api/models', createModelsRoutes());
app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events));
app.use('/api/running-agents', createRunningAgentsRoutes(autoModeService));
app.use('/api/workspace', createWorkspaceRoutes());
app.use('/api/templates', createTemplatesRoutes());
app.use('/api/terminal', createTerminalRoutes());
app.use('/api/settings', createSettingsRoutes(settingsService));
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
app.use('/api/context', createContextRoutes());
// Create HTTP server
const server = createServer(app);
@@ -155,19 +157,16 @@ const terminalWss = new WebSocketServer({ noServer: true });
const terminalService = getTerminalService();
// Handle HTTP upgrade requests manually to route to correct WebSocket server
server.on("upgrade", (request, socket, head) => {
const { pathname } = new URL(
request.url || "",
`http://${request.headers.host}`
);
server.on('upgrade', (request, socket, head) => {
const { pathname } = new URL(request.url || '', `http://${request.headers.host}`);
if (pathname === "/api/events") {
if (pathname === '/api/events') {
wss.handleUpgrade(request, socket, head, (ws) => {
wss.emit("connection", ws, request);
wss.emit('connection', ws, request);
});
} else if (pathname === "/api/terminal/ws") {
} else if (pathname === '/api/terminal/ws') {
terminalWss.handleUpgrade(request, socket, head, (ws) => {
terminalWss.emit("connection", ws, request);
terminalWss.emit('connection', ws, request);
});
} else {
socket.destroy();
@@ -175,8 +174,8 @@ server.on("upgrade", (request, socket, head) => {
});
// Events WebSocket connection handler
wss.on("connection", (ws: WebSocket) => {
console.log("[WebSocket] Client connected");
wss.on('connection', (ws: WebSocket) => {
console.log('[WebSocket] Client connected');
// Subscribe to all events and forward to this client
const unsubscribe = events.subscribe((type, payload) => {
@@ -185,13 +184,13 @@ wss.on("connection", (ws: WebSocket) => {
}
});
ws.on("close", () => {
console.log("[WebSocket] Client disconnected");
ws.on('close', () => {
console.log('[WebSocket] Client disconnected');
unsubscribe();
});
ws.on("error", (error) => {
console.error("[WebSocket] Error:", error);
ws.on('error', (error) => {
console.error('[WebSocket] Error:', error);
unsubscribe();
});
});
@@ -212,184 +211,176 @@ terminalService.onExit((sessionId) => {
});
// Terminal WebSocket connection handler
terminalWss.on(
"connection",
(ws: WebSocket, req: import("http").IncomingMessage) => {
// Parse URL to get session ID and token
const url = new URL(req.url || "", `http://${req.headers.host}`);
const sessionId = url.searchParams.get("sessionId");
const token = url.searchParams.get("token");
terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage) => {
// Parse URL to get session ID and token
const url = new URL(req.url || '', `http://${req.headers.host}`);
const sessionId = url.searchParams.get('sessionId');
const token = url.searchParams.get('token');
console.log(`[Terminal WS] Connection attempt for session: ${sessionId}`);
console.log(`[Terminal WS] Connection attempt for session: ${sessionId}`);
// Check if terminal is enabled
if (!isTerminalEnabled()) {
console.log("[Terminal WS] Terminal is disabled");
ws.close(4003, "Terminal access is disabled");
return;
}
// Check if terminal is enabled
if (!isTerminalEnabled()) {
console.log('[Terminal WS] Terminal is disabled');
ws.close(4003, 'Terminal access is disabled');
return;
}
// Validate token if password is required
if (
isTerminalPasswordRequired() &&
!validateTerminalToken(token || undefined)
) {
console.log("[Terminal WS] Invalid or missing token");
ws.close(4001, "Authentication required");
return;
}
// Validate token if password is required
if (isTerminalPasswordRequired() && !validateTerminalToken(token || undefined)) {
console.log('[Terminal WS] Invalid or missing token');
ws.close(4001, 'Authentication required');
return;
}
if (!sessionId) {
console.log("[Terminal WS] No session ID provided");
ws.close(4002, "Session ID required");
return;
}
if (!sessionId) {
console.log('[Terminal WS] No session ID provided');
ws.close(4002, 'Session ID required');
return;
}
// Check if session exists
const session = terminalService.getSession(sessionId);
if (!session) {
console.log(`[Terminal WS] Session ${sessionId} not found`);
ws.close(4004, "Session not found");
return;
}
// Check if session exists
const session = terminalService.getSession(sessionId);
if (!session) {
console.log(`[Terminal WS] Session ${sessionId} not found`);
ws.close(4004, 'Session not found');
return;
}
console.log(`[Terminal WS] Client connected to session ${sessionId}`);
console.log(`[Terminal WS] Client connected to session ${sessionId}`);
// Track this connection
if (!terminalConnections.has(sessionId)) {
terminalConnections.set(sessionId, new Set());
}
terminalConnections.get(sessionId)!.add(ws);
// Track this connection
if (!terminalConnections.has(sessionId)) {
terminalConnections.set(sessionId, new Set());
}
terminalConnections.get(sessionId)!.add(ws);
// Send initial connection success FIRST
// Send initial connection success FIRST
ws.send(
JSON.stringify({
type: 'connected',
sessionId,
shell: session.shell,
cwd: session.cwd,
})
);
// Send scrollback buffer BEFORE subscribing to prevent race condition
// Also clear pending output buffer to prevent duplicates from throttled flush
const scrollback = terminalService.getScrollbackAndClearPending(sessionId);
if (scrollback && scrollback.length > 0) {
ws.send(
JSON.stringify({
type: "connected",
sessionId,
shell: session.shell,
cwd: session.cwd,
type: 'scrollback',
data: scrollback,
})
);
// Send scrollback buffer BEFORE subscribing to prevent race condition
// Also clear pending output buffer to prevent duplicates from throttled flush
const scrollback = terminalService.getScrollbackAndClearPending(sessionId);
if (scrollback && scrollback.length > 0) {
ws.send(
JSON.stringify({
type: "scrollback",
data: scrollback,
})
);
}
// NOW subscribe to terminal data (after scrollback is sent)
const unsubscribeData = terminalService.onData((sid, data) => {
if (sid === sessionId && ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: "data", data }));
}
});
// Subscribe to terminal exit
const unsubscribeExit = terminalService.onExit((sid, exitCode) => {
if (sid === sessionId && ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: "exit", exitCode }));
ws.close(1000, "Session ended");
}
});
// Handle incoming messages
ws.on("message", (message) => {
try {
const msg = JSON.parse(message.toString());
switch (msg.type) {
case "input":
// Write user input to terminal
terminalService.write(sessionId, msg.data);
break;
case "resize":
// Resize terminal with deduplication and rate limiting
if (msg.cols && msg.rows) {
const now = Date.now();
const lastTime = lastResizeTime.get(sessionId) || 0;
const lastDimensions = lastResizeDimensions.get(sessionId);
// Skip if resized too recently (prevents resize storm during splits)
if (now - lastTime < RESIZE_MIN_INTERVAL_MS) {
break;
}
// Check if dimensions are different from last resize
if (
!lastDimensions ||
lastDimensions.cols !== msg.cols ||
lastDimensions.rows !== msg.rows
) {
// Only suppress output on subsequent resizes, not the first one
// The first resize happens on terminal open and we don't want to drop the initial prompt
const isFirstResize = !lastDimensions;
terminalService.resize(sessionId, msg.cols, msg.rows, !isFirstResize);
lastResizeDimensions.set(sessionId, {
cols: msg.cols,
rows: msg.rows,
});
lastResizeTime.set(sessionId, now);
}
}
break;
case "ping":
// Respond to ping
ws.send(JSON.stringify({ type: "pong" }));
break;
default:
console.warn(`[Terminal WS] Unknown message type: ${msg.type}`);
}
} catch (error) {
console.error("[Terminal WS] Error processing message:", error);
}
});
ws.on("close", () => {
console.log(
`[Terminal WS] Client disconnected from session ${sessionId}`
);
unsubscribeData();
unsubscribeExit();
// Remove from connections tracking
const connections = terminalConnections.get(sessionId);
if (connections) {
connections.delete(ws);
if (connections.size === 0) {
terminalConnections.delete(sessionId);
// DON'T delete lastResizeDimensions/lastResizeTime here!
// The session still exists, and reconnecting clients need to know
// this isn't the "first resize" to prevent duplicate prompts.
// These get cleaned up when the session actually exits.
}
}
});
ws.on("error", (error) => {
console.error(`[Terminal WS] Error on session ${sessionId}:`, error);
unsubscribeData();
unsubscribeExit();
});
}
);
// NOW subscribe to terminal data (after scrollback is sent)
const unsubscribeData = terminalService.onData((sid, data) => {
if (sid === sessionId && ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'data', data }));
}
});
// Subscribe to terminal exit
const unsubscribeExit = terminalService.onExit((sid, exitCode) => {
if (sid === sessionId && ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'exit', exitCode }));
ws.close(1000, 'Session ended');
}
});
// Handle incoming messages
ws.on('message', (message) => {
try {
const msg = JSON.parse(message.toString());
switch (msg.type) {
case 'input':
// Write user input to terminal
terminalService.write(sessionId, msg.data);
break;
case 'resize':
// Resize terminal with deduplication and rate limiting
if (msg.cols && msg.rows) {
const now = Date.now();
const lastTime = lastResizeTime.get(sessionId) || 0;
const lastDimensions = lastResizeDimensions.get(sessionId);
// Skip if resized too recently (prevents resize storm during splits)
if (now - lastTime < RESIZE_MIN_INTERVAL_MS) {
break;
}
// Check if dimensions are different from last resize
if (
!lastDimensions ||
lastDimensions.cols !== msg.cols ||
lastDimensions.rows !== msg.rows
) {
// Only suppress output on subsequent resizes, not the first one
// The first resize happens on terminal open and we don't want to drop the initial prompt
const isFirstResize = !lastDimensions;
terminalService.resize(sessionId, msg.cols, msg.rows, !isFirstResize);
lastResizeDimensions.set(sessionId, {
cols: msg.cols,
rows: msg.rows,
});
lastResizeTime.set(sessionId, now);
}
}
break;
case 'ping':
// Respond to ping
ws.send(JSON.stringify({ type: 'pong' }));
break;
default:
console.warn(`[Terminal WS] Unknown message type: ${msg.type}`);
}
} catch (error) {
console.error('[Terminal WS] Error processing message:', error);
}
});
ws.on('close', () => {
console.log(`[Terminal WS] Client disconnected from session ${sessionId}`);
unsubscribeData();
unsubscribeExit();
// Remove from connections tracking
const connections = terminalConnections.get(sessionId);
if (connections) {
connections.delete(ws);
if (connections.size === 0) {
terminalConnections.delete(sessionId);
// DON'T delete lastResizeDimensions/lastResizeTime here!
// The session still exists, and reconnecting clients need to know
// this isn't the "first resize" to prevent duplicate prompts.
// These get cleaned up when the session actually exits.
}
}
});
ws.on('error', (error) => {
console.error(`[Terminal WS] Error on session ${sessionId}:`, error);
unsubscribeData();
unsubscribeExit();
});
});
// Start server with error handling for port conflicts
const startServer = (port: number) => {
server.listen(port, () => {
const terminalStatus = isTerminalEnabled()
? isTerminalPasswordRequired()
? "enabled (password protected)"
: "enabled"
: "disabled";
? 'enabled (password protected)'
: 'enabled'
: 'disabled';
const portStr = port.toString().padEnd(4);
console.log(`
╔═══════════════════════════════════════════════════════╗
@@ -404,8 +395,8 @@ const startServer = (port: number) => {
`);
});
server.on("error", (error: NodeJS.ErrnoException) => {
if (error.code === "EADDRINUSE") {
server.on('error', (error: NodeJS.ErrnoException) => {
if (error.code === 'EADDRINUSE') {
console.error(`
╔═══════════════════════════════════════════════════════╗
║ ❌ ERROR: Port ${port} is already in use ║
@@ -426,7 +417,7 @@ const startServer = (port: number) => {
`);
process.exit(1);
} else {
console.error("[Server] Error starting server:", error);
console.error('[Server] Error starting server:', error);
process.exit(1);
}
});
@@ -435,20 +426,20 @@ const startServer = (port: number) => {
startServer(PORT);
// Graceful shutdown
process.on("SIGTERM", () => {
console.log("SIGTERM received, shutting down...");
process.on('SIGTERM', () => {
console.log('SIGTERM received, shutting down...');
terminalService.cleanup();
server.close(() => {
console.log("Server closed");
console.log('Server closed');
process.exit(0);
});
});
process.on("SIGINT", () => {
console.log("SIGINT received, shutting down...");
process.on('SIGINT', () => {
console.log('SIGINT received, shutting down...');
terminalService.cleanup();
server.close(() => {
console.log("Server closed");
console.log('Server closed');
process.exit(0);
});
});

View File

@@ -5,26 +5,24 @@
* with the provider architecture.
*/
import { query, type Options } from "@anthropic-ai/claude-agent-sdk";
import { BaseProvider } from "./base-provider.js";
import { query, type Options } from '@anthropic-ai/claude-agent-sdk';
import { BaseProvider } from './base-provider.js';
import type {
ExecuteOptions,
ProviderMessage,
InstallationStatus,
ModelDefinition,
} from "./types.js";
} from './types.js';
export class ClaudeProvider extends BaseProvider {
getName(): string {
return "claude";
return 'claude';
}
/**
* Execute a query using Claude Agent SDK
*/
async *executeQuery(
options: ExecuteOptions
): AsyncGenerator<ProviderMessage> {
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
const {
prompt,
model,
@@ -38,16 +36,7 @@ export class ClaudeProvider extends BaseProvider {
} = options;
// Build Claude SDK options
const defaultTools = [
"Read",
"Write",
"Edit",
"Glob",
"Grep",
"Bash",
"WebSearch",
"WebFetch",
];
const defaultTools = ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'];
const toolsToUse = allowedTools || defaultTools;
const sdkOptions: Options = {
@@ -56,7 +45,7 @@ export class ClaudeProvider extends BaseProvider {
maxTurns,
cwd,
allowedTools: toolsToUse,
permissionMode: "acceptEdits",
permissionMode: 'acceptEdits',
sandbox: {
enabled: true,
autoAllowBashIfSandboxed: true,
@@ -75,10 +64,10 @@ export class ClaudeProvider extends BaseProvider {
// Multi-part prompt (with images)
promptPayload = (async function* () {
const multiPartPrompt = {
type: "user" as const,
session_id: "",
type: 'user' as const,
session_id: '',
message: {
role: "user" as const,
role: 'user' as const,
content: prompt,
},
parent_tool_use_id: null,
@@ -99,10 +88,7 @@ export class ClaudeProvider extends BaseProvider {
yield msg as ProviderMessage;
}
} catch (error) {
console.error(
"[ClaudeProvider] executeQuery() error during execution:",
error
);
console.error('[ClaudeProvider] executeQuery() error during execution:', error);
throw error;
}
}
@@ -116,7 +102,7 @@ export class ClaudeProvider extends BaseProvider {
const status: InstallationStatus = {
installed: true,
method: "sdk",
method: 'sdk',
hasApiKey,
authenticated: hasApiKey,
};
@@ -130,53 +116,53 @@ export class ClaudeProvider extends BaseProvider {
getAvailableModels(): ModelDefinition[] {
const models = [
{
id: "claude-opus-4-5-20251101",
name: "Claude Opus 4.5",
modelString: "claude-opus-4-5-20251101",
provider: "anthropic",
description: "Most capable Claude model",
id: 'claude-opus-4-5-20251101',
name: 'Claude Opus 4.5',
modelString: 'claude-opus-4-5-20251101',
provider: 'anthropic',
description: 'Most capable Claude model',
contextWindow: 200000,
maxOutputTokens: 16000,
supportsVision: true,
supportsTools: true,
tier: "premium" as const,
tier: 'premium' as const,
default: true,
},
{
id: "claude-sonnet-4-20250514",
name: "Claude Sonnet 4",
modelString: "claude-sonnet-4-20250514",
provider: "anthropic",
description: "Balanced performance and cost",
id: 'claude-sonnet-4-20250514',
name: 'Claude Sonnet 4',
modelString: 'claude-sonnet-4-20250514',
provider: 'anthropic',
description: 'Balanced performance and cost',
contextWindow: 200000,
maxOutputTokens: 16000,
supportsVision: true,
supportsTools: true,
tier: "standard" as const,
tier: 'standard' as const,
},
{
id: "claude-3-5-sonnet-20241022",
name: "Claude 3.5 Sonnet",
modelString: "claude-3-5-sonnet-20241022",
provider: "anthropic",
description: "Fast and capable",
id: 'claude-3-5-sonnet-20241022',
name: 'Claude 3.5 Sonnet',
modelString: 'claude-3-5-sonnet-20241022',
provider: 'anthropic',
description: 'Fast and capable',
contextWindow: 200000,
maxOutputTokens: 8000,
supportsVision: true,
supportsTools: true,
tier: "standard" as const,
tier: 'standard' as const,
},
{
id: "claude-3-5-haiku-20241022",
name: "Claude 3.5 Haiku",
modelString: "claude-3-5-haiku-20241022",
provider: "anthropic",
description: "Fastest Claude model",
id: 'claude-haiku-4-5-20251001',
name: 'Claude Haiku 4.5',
modelString: 'claude-haiku-4-5-20251001',
provider: 'anthropic',
description: 'Fastest Claude model',
contextWindow: 200000,
maxOutputTokens: 8000,
supportsVision: true,
supportsTools: true,
tier: "basic" as const,
tier: 'basic' as const,
},
] satisfies ModelDefinition[];
return models;
@@ -186,7 +172,7 @@ export class ClaudeProvider extends BaseProvider {
* Check if the provider supports a specific feature
*/
supportsFeature(feature: string): boolean {
const supportedFeatures = ["tools", "text", "vision", "thinking"];
const supportedFeatures = ['tools', 'text', 'vision', 'thinking'];
return supportedFeatures.includes(feature);
}
}

View File

@@ -0,0 +1,24 @@
/**
* Context routes - HTTP API for context file operations
*
* Provides endpoints for managing context files including
* AI-powered image description generation.
*/
import { Router } from 'express';
import { createDescribeImageHandler } from './routes/describe-image.js';
import { createDescribeFileHandler } from './routes/describe-file.js';
/**
* Create the context router
*
* @returns Express router with context endpoints
*/
export function createContextRoutes(): Router {
const router = Router();
router.post('/describe-image', createDescribeImageHandler());
router.post('/describe-file', createDescribeFileHandler());
return router;
}

View File

@@ -0,0 +1,140 @@
/**
* POST /context/describe-file endpoint - Generate description for a text file
*
* Uses Claude Haiku to analyze a text file and generate a concise description
* suitable for context file metadata.
*/
import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { CLAUDE_MODEL_MAP } from '@automaker/types';
const logger = createLogger('DescribeFile');
/**
* Request body for the describe-file endpoint
*/
interface DescribeFileRequestBody {
/** Path to the file */
filePath: string;
}
/**
* Success response from the describe-file endpoint
*/
interface DescribeFileSuccessResponse {
success: true;
description: string;
}
/**
* Error response from the describe-file endpoint
*/
interface DescribeFileErrorResponse {
success: false;
error: string;
}
/**
* Extract text content from Claude SDK response messages
*/
async function extractTextFromStream(
stream: AsyncIterable<{
type: string;
subtype?: string;
result?: string;
message?: {
content?: Array<{ type: string; text?: string }>;
};
}>
): Promise<string> {
let responseText = '';
for await (const msg of stream) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
} else if (msg.type === 'result' && msg.subtype === 'success') {
responseText = msg.result || responseText;
}
}
return responseText;
}
/**
* Create the describe-file request handler
*
* @returns Express request handler for file description
*/
export function createDescribeFileHandler(): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => {
try {
const { filePath } = req.body as DescribeFileRequestBody;
// Validate required fields
if (!filePath || typeof filePath !== 'string') {
const response: DescribeFileErrorResponse = {
success: false,
error: 'filePath is required and must be a string',
};
res.status(400).json(response);
return;
}
logger.info(`[DescribeFile] Starting description generation for: ${filePath}`);
// Build prompt that explicitly asks to read and describe the file
const prompt = `Read the file at "${filePath}" and describe what it contains.
After reading the file, provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
// Use Claude SDK query function - needs 3+ turns for: tool call, tool result, response
const stream = query({
prompt,
options: {
model: CLAUDE_MODEL_MAP.haiku,
maxTurns: 3,
allowedTools: ['Read'],
permissionMode: 'acceptEdits',
},
});
// Extract the description from the response
const description = await extractTextFromStream(stream);
if (!description || description.trim().length === 0) {
logger.warn('Received empty response from Claude');
const response: DescribeFileErrorResponse = {
success: false,
error: 'Failed to generate description - empty response',
};
res.status(500).json(response);
return;
}
logger.info(`Description generated, length: ${description.length} chars`);
const response: DescribeFileSuccessResponse = {
success: true,
description: description.trim(),
};
res.json(response);
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
logger.error('File description failed:', errorMessage);
const response: DescribeFileErrorResponse = {
success: false,
error: errorMessage,
};
res.status(500).json(response);
}
};
}

View File

@@ -0,0 +1,387 @@
/**
* POST /context/describe-image endpoint - Generate description for an image
*
* Uses Claude Haiku to analyze an image and generate a concise description
* suitable for context file metadata.
*
* IMPORTANT:
* The agent runner (chat/auto-mode) sends images as multi-part content blocks (base64 image blocks),
* not by asking Claude to use the Read tool to open files. This endpoint now mirrors that approach
* so it doesn't depend on Claude's filesystem tool access or working directory restrictions.
*/
import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger, readImageAsBase64 } from '@automaker/utils';
import { CLAUDE_MODEL_MAP } from '@automaker/types';
import { createCustomOptions } from '../../../lib/sdk-options.js';
import * as fs from 'fs';
import * as path from 'path';
const logger = createLogger('DescribeImage');
/**
* Find the actual file path, handling Unicode character variations.
* macOS screenshots use U+202F (NARROW NO-BREAK SPACE) before AM/PM,
* but this may be transmitted as a regular space through the API.
*/
function findActualFilePath(requestedPath: string): string | null {
// First, try the exact path
if (fs.existsSync(requestedPath)) {
return requestedPath;
}
// Try with Unicode normalization
const normalizedPath = requestedPath.normalize('NFC');
if (fs.existsSync(normalizedPath)) {
return normalizedPath;
}
// If not found, try to find the file in the directory by matching the basename
// This handles cases where the space character differs (U+0020 vs U+202F vs U+00A0)
const dir = path.dirname(requestedPath);
const baseName = path.basename(requestedPath);
if (!fs.existsSync(dir)) {
return null;
}
try {
const files = fs.readdirSync(dir);
// Normalize the requested basename for comparison
// Replace various space-like characters with regular space for comparison
const normalizeSpaces = (s: string): string => s.replace(/[\u00A0\u202F\u2009\u200A]/g, ' ');
const normalizedBaseName = normalizeSpaces(baseName);
for (const file of files) {
if (normalizeSpaces(file) === normalizedBaseName) {
logger.info(`Found matching file with different space encoding: ${file}`);
return path.join(dir, file);
}
}
} catch (err) {
logger.error(`Error reading directory ${dir}: ${err}`);
}
return null;
}
/**
* Request body for the describe-image endpoint
*/
interface DescribeImageRequestBody {
/** Path to the image file */
imagePath: string;
}
/**
* Success response from the describe-image endpoint
*/
interface DescribeImageSuccessResponse {
success: true;
description: string;
}
/**
* Error response from the describe-image endpoint
*/
interface DescribeImageErrorResponse {
success: false;
error: string;
requestId?: string;
}
/**
* Map SDK/CLI errors to a stable status + user-facing message.
*/
function mapDescribeImageError(rawMessage: string | undefined): {
statusCode: number;
userMessage: string;
} {
const baseResponse = {
statusCode: 500,
userMessage: 'Failed to generate an image description. Please try again.',
};
if (!rawMessage) return baseResponse;
if (rawMessage.includes('Claude Code process exited')) {
return {
statusCode: 503,
userMessage:
'Claude exited unexpectedly while describing the image. Try again. If it keeps happening, re-run `claude login` or update your API key in Setup so Claude can restart cleanly.',
};
}
if (
rawMessage.includes('Failed to spawn Claude Code process') ||
rawMessage.includes('Claude Code executable not found') ||
rawMessage.includes('Claude Code native binary not found')
) {
return {
statusCode: 503,
userMessage:
'Claude CLI could not be launched. Make sure the Claude CLI is installed and available in PATH, then try again.',
};
}
if (rawMessage.toLowerCase().includes('rate limit') || rawMessage.includes('429')) {
return {
statusCode: 429,
userMessage: 'Rate limited while describing the image. Please wait a moment and try again.',
};
}
if (rawMessage.toLowerCase().includes('payload too large') || rawMessage.includes('413')) {
return {
statusCode: 413,
userMessage:
'The image is too large to send for description. Please resize/compress it and try again.',
};
}
return baseResponse;
}
/**
* Extract text content from Claude SDK response messages and log high-signal stream events.
*/
async function extractTextFromStream(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
stream: AsyncIterable<any>,
requestId: string
): Promise<string> {
let responseText = '';
let messageCount = 0;
logger.info(`[${requestId}] [Stream] Begin reading SDK stream...`);
for await (const msg of stream) {
messageCount++;
const msgType = msg?.type;
const msgSubtype = msg?.subtype;
// Keep this concise but informative. Full error object is logged in catch blocks.
logger.info(
`[${requestId}] [Stream] #${messageCount} type=${String(msgType)} subtype=${String(msgSubtype ?? '')}`
);
if (msgType === 'assistant' && msg.message?.content) {
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
logger.info(`[${requestId}] [Stream] assistant blocks=${blocks.length}`);
for (const block of blocks) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
}
if (msgType === 'result' && msgSubtype === 'success') {
if (typeof msg.result === 'string' && msg.result.length > 0) {
responseText = msg.result;
}
}
}
logger.info(
`[${requestId}] [Stream] End of stream. messages=${messageCount} textLength=${responseText.length}`
);
return responseText;
}
/**
* Create the describe-image request handler
*
* Uses Claude SDK query with multi-part content blocks to include the image (base64),
* matching the agent runner behavior.
*
* @returns Express request handler for image description
*/
export function createDescribeImageHandler(): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => {
const requestId = `describe-image-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
const startedAt = Date.now();
// Request envelope logs (high value when correlating failures)
logger.info(`[${requestId}] ===== POST /api/context/describe-image =====`);
logger.info(`[${requestId}] headers=${JSON.stringify(req.headers)}`);
logger.info(`[${requestId}] body=${JSON.stringify(req.body)}`);
try {
const { imagePath } = req.body as DescribeImageRequestBody;
// Validate required fields
if (!imagePath || typeof imagePath !== 'string') {
const response: DescribeImageErrorResponse = {
success: false,
error: 'imagePath is required and must be a string',
requestId,
};
res.status(400).json(response);
return;
}
logger.info(`[${requestId}] imagePath="${imagePath}" type=${typeof imagePath}`);
// Find the actual file path (handles Unicode space character variations)
const actualPath = findActualFilePath(imagePath);
if (!actualPath) {
logger.error(`[${requestId}] File not found: ${imagePath}`);
// Log hex representation of the path for debugging
const hexPath = Buffer.from(imagePath).toString('hex');
logger.error(`[${requestId}] imagePath hex: ${hexPath}`);
const response: DescribeImageErrorResponse = {
success: false,
error: `File not found: ${imagePath}`,
requestId,
};
res.status(404).json(response);
return;
}
if (actualPath !== imagePath) {
logger.info(`[${requestId}] Using actual path: ${actualPath}`);
}
// Log path + stats (this is often where issues start: missing file, perms, size)
let stat: fs.Stats | null = null;
try {
stat = fs.statSync(actualPath);
logger.info(
`[${requestId}] fileStats size=${stat.size} bytes mtime=${stat.mtime.toISOString()}`
);
} catch (statErr) {
logger.warn(
`[${requestId}] Unable to stat image file (continuing to read base64): ${String(statErr)}`
);
}
// Read image and convert to base64 (same as agent runner)
logger.info(`[${requestId}] Reading image into base64...`);
const imageReadStart = Date.now();
const imageData = await readImageAsBase64(actualPath);
const imageReadMs = Date.now() - imageReadStart;
const base64Length = imageData.base64.length;
const estimatedBytes = Math.ceil((base64Length * 3) / 4);
logger.info(`[${requestId}] imageReadMs=${imageReadMs}`);
logger.info(
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
);
// Build multi-part prompt with image block (no Read tool required)
const instructionText =
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
const promptContent = [
{ type: 'text' as const, text: instructionText },
{
type: 'image' as const,
source: {
type: 'base64' as const,
media_type: imageData.mimeType,
data: imageData.base64,
},
},
];
logger.info(`[${requestId}] Built multi-part prompt blocks=${promptContent.length}`);
const cwd = path.dirname(actualPath);
logger.info(`[${requestId}] Using cwd=${cwd}`);
// Use the same centralized option builder used across the server (validates cwd)
const sdkOptions = createCustomOptions({
cwd,
model: CLAUDE_MODEL_MAP.haiku,
maxTurns: 1,
allowedTools: [],
sandbox: { enabled: true, autoAllowBashIfSandboxed: true },
});
logger.info(
`[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
sdkOptions.allowedTools
)} sandbox=${JSON.stringify(sdkOptions.sandbox)}`
);
const promptGenerator = (async function* () {
yield {
type: 'user' as const,
session_id: '',
message: { role: 'user' as const, content: promptContent },
parent_tool_use_id: null,
};
})();
logger.info(`[${requestId}] Calling query()...`);
const queryStart = Date.now();
const stream = query({ prompt: promptGenerator, options: sdkOptions });
logger.info(`[${requestId}] query() returned stream in ${Date.now() - queryStart}ms`);
// Extract the description from the response
const extractStart = Date.now();
const description = await extractTextFromStream(stream, requestId);
logger.info(`[${requestId}] extractMs=${Date.now() - extractStart}`);
if (!description || description.trim().length === 0) {
logger.warn(`[${requestId}] Received empty response from Claude`);
const response: DescribeImageErrorResponse = {
success: false,
error: 'Failed to generate description - empty response',
requestId,
};
res.status(500).json(response);
return;
}
const totalMs = Date.now() - startedAt;
logger.info(`[${requestId}] Success descriptionLen=${description.length} totalMs=${totalMs}`);
const response: DescribeImageSuccessResponse = {
success: true,
description: description.trim(),
};
res.json(response);
} catch (error) {
const totalMs = Date.now() - startedAt;
const err = error as unknown;
const errMessage = err instanceof Error ? err.message : String(err);
const errName = err instanceof Error ? err.name : 'UnknownError';
const errStack = err instanceof Error ? err.stack : undefined;
logger.error(`[${requestId}] FAILED totalMs=${totalMs}`);
logger.error(`[${requestId}] errorName=${errName}`);
logger.error(`[${requestId}] errorMessage=${errMessage}`);
if (errStack) logger.error(`[${requestId}] errorStack=${errStack}`);
// Dump all enumerable + non-enumerable props (this is where stderr/stdout/exitCode often live)
try {
const props = err && typeof err === 'object' ? Object.getOwnPropertyNames(err) : [];
logger.error(`[${requestId}] errorProps=${JSON.stringify(props)}`);
if (err && typeof err === 'object') {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const anyErr = err as any;
const details = JSON.stringify(anyErr, props as unknown as string[]);
logger.error(`[${requestId}] errorDetails=${details}`);
}
} catch (stringifyErr) {
logger.error(`[${requestId}] Failed to serialize error object: ${String(stringifyErr)}`);
}
const { statusCode, userMessage } = mapDescribeImageError(errMessage);
const response: DescribeImageErrorResponse = {
success: false,
error: `${userMessage} (requestId: ${requestId})`,
requestId,
};
res.status(statusCode).json(response);
}
};
}

View File

@@ -1,41 +1,34 @@
import { useState, useRef, useCallback, useEffect } from "react";
import { ImageIcon, Upload, Loader2, Trash2 } from "lucide-react";
import { useState, useRef, useCallback, useEffect } from 'react';
import { ImageIcon, Upload, Loader2, Trash2 } from 'lucide-react';
import {
Sheet,
SheetContent,
SheetDescription,
SheetHeader,
SheetTitle,
} from "@/components/ui/sheet";
import { Button } from "@/components/ui/button";
import { Slider } from "@/components/ui/slider";
import { Label } from "@/components/ui/label";
import { Checkbox } from "@/components/ui/checkbox";
import { cn } from "@/lib/utils";
import { useAppStore, defaultBackgroundSettings } from "@/store/app-store";
import { getHttpApiClient } from "@/lib/http-api-client";
import { useBoardBackgroundSettings } from "@/hooks/use-board-background-settings";
import { toast } from "sonner";
const ACCEPTED_IMAGE_TYPES = [
"image/jpeg",
"image/jpg",
"image/png",
"image/gif",
"image/webp",
];
const DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
} from '@/components/ui/sheet';
import { Button } from '@/components/ui/button';
import { Slider } from '@/components/ui/slider';
import { Label } from '@/components/ui/label';
import { Checkbox } from '@/components/ui/checkbox';
import { cn } from '@/lib/utils';
import { useAppStore, defaultBackgroundSettings } from '@/store/app-store';
import { getHttpApiClient } from '@/lib/http-api-client';
import { useBoardBackgroundSettings } from '@/hooks/use-board-background-settings';
import { toast } from 'sonner';
import {
fileToBase64,
validateImageFile,
ACCEPTED_IMAGE_TYPES,
DEFAULT_MAX_FILE_SIZE,
} from '@/lib/image-utils';
interface BoardBackgroundModalProps {
open: boolean;
onOpenChange: (open: boolean) => void;
}
export function BoardBackgroundModal({
open,
onOpenChange,
}: BoardBackgroundModalProps) {
export function BoardBackgroundModal({ open, onOpenChange }: BoardBackgroundModalProps) {
const { currentProject, boardBackgroundByProject } = useAppStore();
const {
setBoardBackground,
@@ -55,8 +48,7 @@ export function BoardBackgroundModal({
// Get current background settings (live from store)
const backgroundSettings =
(currentProject && boardBackgroundByProject[currentProject.path]) ||
defaultBackgroundSettings;
(currentProject && boardBackgroundByProject[currentProject.path]) || defaultBackgroundSettings;
const cardOpacity = backgroundSettings.cardOpacity;
const columnOpacity = backgroundSettings.columnOpacity;
@@ -70,12 +62,9 @@ export function BoardBackgroundModal({
// Update preview image when background settings change
useEffect(() => {
if (currentProject && backgroundSettings.imagePath) {
const serverUrl =
import.meta.env.VITE_SERVER_URL || "http://localhost:3008";
const serverUrl = import.meta.env.VITE_SERVER_URL || 'http://localhost:3008';
// Add cache-busting query parameter to force browser to reload image
const cacheBuster = imageVersion
? `&v=${imageVersion}`
: `&v=${Date.now()}`;
const cacheBuster = imageVersion ? `&v=${imageVersion}` : `&v=${Date.now()}`;
const imagePath = `${serverUrl}/api/fs/image?path=${encodeURIComponent(
backgroundSettings.imagePath
)}&projectPath=${encodeURIComponent(currentProject.path)}${cacheBuster}`;
@@ -85,40 +74,17 @@ export function BoardBackgroundModal({
}
}, [currentProject, backgroundSettings.imagePath, imageVersion]);
const fileToBase64 = (file: File): Promise<string> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
if (typeof reader.result === "string") {
resolve(reader.result);
} else {
reject(new Error("Failed to read file as base64"));
}
};
reader.onerror = () => reject(new Error("Failed to read file"));
reader.readAsDataURL(file);
});
};
const processFile = useCallback(
async (file: File) => {
if (!currentProject) {
toast.error("No project selected");
toast.error('No project selected');
return;
}
// Validate file type
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
toast.error(
"Unsupported file type. Please use JPG, PNG, GIF, or WebP."
);
return;
}
// Validate file size
if (file.size > DEFAULT_MAX_FILE_SIZE) {
const maxSizeMB = DEFAULT_MAX_FILE_SIZE / (1024 * 1024);
toast.error(`File too large. Maximum size is ${maxSizeMB}MB.`);
// Validate file
const validation = validateImageFile(file, DEFAULT_MAX_FILE_SIZE);
if (!validation.isValid) {
toast.error(validation.error);
return;
}
@@ -141,14 +107,14 @@ export function BoardBackgroundModal({
if (result.success && result.path) {
// Update store and persist to server
await setBoardBackground(currentProject.path, result.path);
toast.success("Background image saved");
toast.success('Background image saved');
} else {
toast.error(result.error || "Failed to save background image");
toast.error(result.error || 'Failed to save background image');
setPreviewImage(null);
}
} catch (error) {
console.error("Failed to process image:", error);
toast.error("Failed to process image");
console.error('Failed to process image:', error);
toast.error('Failed to process image');
setPreviewImage(null);
} finally {
setIsProcessing(false);
@@ -191,7 +157,7 @@ export function BoardBackgroundModal({
}
// Reset the input so the same file can be selected again
if (fileInputRef.current) {
fileInputRef.current.value = "";
fileInputRef.current.value = '';
}
},
[processFile]
@@ -209,20 +175,18 @@ export function BoardBackgroundModal({
try {
setIsProcessing(true);
const httpClient = getHttpApiClient();
const result = await httpClient.deleteBoardBackground(
currentProject.path
);
const result = await httpClient.deleteBoardBackground(currentProject.path);
if (result.success) {
await clearBoardBackground(currentProject.path);
setPreviewImage(null);
toast.success("Background image cleared");
toast.success('Background image cleared');
} else {
toast.error(result.error || "Failed to clear background image");
toast.error(result.error || 'Failed to clear background image');
}
} catch (error) {
console.error("Failed to clear background:", error);
toast.error("Failed to clear background");
console.error('Failed to clear background:', error);
toast.error('Failed to clear background');
} finally {
setIsProcessing(false);
}
@@ -298,8 +262,7 @@ export function BoardBackgroundModal({
Board Background Settings
</SheetTitle>
<SheetDescription className="text-muted-foreground">
Set a custom background image for your kanban board and adjust
card/column opacity
Set a custom background image for your kanban board and adjust card/column opacity
</SheetDescription>
</SheetHeader>
@@ -312,7 +275,7 @@ export function BoardBackgroundModal({
<input
ref={fileInputRef}
type="file"
accept={ACCEPTED_IMAGE_TYPES.join(",")}
accept={ACCEPTED_IMAGE_TYPES.join(',')}
onChange={handleFileSelect}
className="hidden"
disabled={isProcessing}
@@ -324,14 +287,13 @@ export function BoardBackgroundModal({
onDragOver={handleDragOver}
onDragLeave={handleDragLeave}
className={cn(
"relative rounded-lg border-2 border-dashed transition-all duration-200",
'relative rounded-lg border-2 border-dashed transition-all duration-200',
{
"border-brand-500/60 bg-brand-500/5 dark:bg-brand-500/10":
'border-brand-500/60 bg-brand-500/5 dark:bg-brand-500/10':
isDragOver && !isProcessing,
"border-muted-foreground/25": !isDragOver && !isProcessing,
"border-muted-foreground/10 opacity-50 cursor-not-allowed":
isProcessing,
"hover:border-brand-500/40 hover:bg-brand-500/5 dark:hover:bg-brand-500/5":
'border-muted-foreground/25': !isDragOver && !isProcessing,
'border-muted-foreground/10 opacity-50 cursor-not-allowed': isProcessing,
'hover:border-brand-500/40 hover:bg-brand-500/5 dark:hover:bg-brand-500/5':
!isProcessing && !isDragOver,
}
)}
@@ -379,10 +341,10 @@ export function BoardBackgroundModal({
>
<div
className={cn(
"rounded-full p-3 mb-3",
'rounded-full p-3 mb-3',
isDragOver && !isProcessing
? "bg-brand-500/10 dark:bg-brand-500/20"
: "bg-muted"
? 'bg-brand-500/10 dark:bg-brand-500/20'
: 'bg-muted'
)}
>
{isProcessing ? (
@@ -393,12 +355,12 @@ export function BoardBackgroundModal({
</div>
<p className="text-sm text-muted-foreground">
{isDragOver && !isProcessing
? "Drop image here"
: "Click to upload or drag and drop"}
? 'Drop image here'
: 'Click to upload or drag and drop'}
</p>
<p className="text-xs text-muted-foreground mt-1">
JPG, PNG, GIF, or WebP (max{" "}
{Math.round(DEFAULT_MAX_FILE_SIZE / (1024 * 1024))}MB)
JPG, PNG, GIF, or WebP (max {Math.round(DEFAULT_MAX_FILE_SIZE / (1024 * 1024))}
MB)
</p>
</div>
)}
@@ -410,9 +372,7 @@ export function BoardBackgroundModal({
<div className="space-y-2">
<div className="flex items-center justify-between">
<Label>Card Opacity</Label>
<span className="text-sm text-muted-foreground">
{cardOpacity}%
</span>
<span className="text-sm text-muted-foreground">{cardOpacity}%</span>
</div>
<Slider
value={[cardOpacity]}
@@ -427,9 +387,7 @@ export function BoardBackgroundModal({
<div className="space-y-2">
<div className="flex items-center justify-between">
<Label>Column Opacity</Label>
<span className="text-sm text-muted-foreground">
{columnOpacity}%
</span>
<span className="text-sm text-muted-foreground">{columnOpacity}%</span>
</div>
<Slider
value={[columnOpacity]}
@@ -460,10 +418,7 @@ export function BoardBackgroundModal({
checked={cardGlassmorphism}
onCheckedChange={handleCardGlassmorphismToggle}
/>
<Label
htmlFor="card-glassmorphism-toggle"
className="cursor-pointer"
>
<Label htmlFor="card-glassmorphism-toggle" className="cursor-pointer">
Card Glassmorphism (blur effect)
</Label>
</div>
@@ -485,9 +440,7 @@ export function BoardBackgroundModal({
<div className="space-y-2">
<div className="flex items-center justify-between">
<Label>Card Border Opacity</Label>
<span className="text-sm text-muted-foreground">
{cardBorderOpacity}%
</span>
<span className="text-sm text-muted-foreground">{cardBorderOpacity}%</span>
</div>
<Slider
value={[cardBorderOpacity]}

View File

@@ -1,18 +1,38 @@
import React, { useState, useRef, useCallback } from 'react';
import { cn } from '@/lib/utils';
import { ImageIcon, X, Loader2 } from 'lucide-react';
import { ImageIcon, X, Loader2, FileText } from 'lucide-react';
import { Textarea } from '@/components/ui/textarea';
import { getElectronAPI } from '@/lib/electron';
import { useAppStore, type FeatureImagePath } from '@/store/app-store';
import { useAppStore, type FeatureImagePath, type FeatureTextFilePath } from '@/store/app-store';
import {
sanitizeFilename,
fileToBase64,
fileToText,
isTextFile,
isImageFile,
validateTextFile,
getTextFileMimeType,
generateFileId,
ACCEPTED_IMAGE_TYPES,
ACCEPTED_TEXT_EXTENSIONS,
DEFAULT_MAX_FILE_SIZE,
DEFAULT_MAX_TEXT_FILE_SIZE,
formatFileSize,
} from '@/lib/image-utils';
// Map to store preview data by image ID (persisted across component re-mounts)
export type ImagePreviewMap = Map<string, string>;
// Re-export for convenience
export type { FeatureImagePath, FeatureTextFilePath };
interface DescriptionImageDropZoneProps {
value: string;
onChange: (value: string) => void;
images: FeatureImagePath[];
onImagesChange: (images: FeatureImagePath[]) => void;
textFiles?: FeatureTextFilePath[];
onTextFilesChange?: (textFiles: FeatureTextFilePath[]) => void;
placeholder?: string;
className?: string;
disabled?: boolean;
@@ -25,14 +45,13 @@ interface DescriptionImageDropZoneProps {
error?: boolean; // Show error state with red border
}
const ACCEPTED_IMAGE_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/webp'];
const DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
export function DescriptionImageDropZone({
value,
onChange,
images,
onImagesChange,
textFiles = [],
onTextFilesChange,
placeholder = 'Describe the feature...',
className,
disabled = false,
@@ -81,21 +100,6 @@ export function DescriptionImageDropZone({
[currentProject?.path]
);
const fileToBase64 = (file: File): Promise<string> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
if (typeof reader.result === 'string') {
resolve(reader.result);
} else {
reject(new Error('Failed to read file as base64'));
}
};
reader.onerror = () => reject(new Error('Failed to read file'));
reader.readAsDataURL(file);
});
};
const saveImageToTemp = useCallback(
async (base64Data: string, filename: string, mimeType: string): Promise<string | null> => {
try {
@@ -129,54 +133,89 @@ export function DescriptionImageDropZone({
setIsProcessing(true);
const newImages: FeatureImagePath[] = [];
const newTextFiles: FeatureTextFilePath[] = [];
const newPreviews = new Map(previewImages);
const errors: string[] = [];
// Calculate total current files
const currentTotalFiles = images.length + textFiles.length;
for (const file of Array.from(files)) {
// Validate file type
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
errors.push(`${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`);
continue;
}
// Validate file size
if (file.size > maxFileSize) {
const maxSizeMB = maxFileSize / (1024 * 1024);
errors.push(`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`);
continue;
}
// Check if we've reached max files
if (newImages.length + images.length >= maxFiles) {
errors.push(`Maximum ${maxFiles} images allowed.`);
break;
}
try {
const base64 = await fileToBase64(file);
const tempPath = await saveImageToTemp(base64, file.name, file.type);
if (tempPath) {
const imageId = `img-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
const imagePathRef: FeatureImagePath = {
id: imageId,
path: tempPath,
filename: file.name,
mimeType: file.type,
};
newImages.push(imagePathRef);
// Store preview for display
newPreviews.set(imageId, base64);
} else {
errors.push(`${file.name}: Failed to save image.`);
// Check if it's a text file
if (isTextFile(file)) {
const validation = validateTextFile(file, DEFAULT_MAX_TEXT_FILE_SIZE);
if (!validation.isValid) {
errors.push(validation.error!);
continue;
}
} catch {
errors.push(`${file.name}: Failed to process image.`);
// Check if we've reached max files
const totalFiles = newImages.length + newTextFiles.length + currentTotalFiles;
if (totalFiles >= maxFiles) {
errors.push(`Maximum ${maxFiles} files allowed.`);
break;
}
try {
const content = await fileToText(file);
const sanitizedName = sanitizeFilename(file.name);
const textFilePath: FeatureTextFilePath = {
id: generateFileId(),
path: '', // Text files don't need to be saved to disk
filename: sanitizedName,
mimeType: getTextFileMimeType(file.name),
content,
};
newTextFiles.push(textFilePath);
} catch {
errors.push(`${file.name}: Failed to read text file.`);
}
}
// Check if it's an image file
else if (isImageFile(file)) {
// Validate file size
if (file.size > maxFileSize) {
const maxSizeMB = maxFileSize / (1024 * 1024);
errors.push(`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`);
continue;
}
// Check if we've reached max files
const totalFiles = newImages.length + newTextFiles.length + currentTotalFiles;
if (totalFiles >= maxFiles) {
errors.push(`Maximum ${maxFiles} files allowed.`);
break;
}
try {
const base64 = await fileToBase64(file);
const sanitizedName = sanitizeFilename(file.name);
const tempPath = await saveImageToTemp(base64, sanitizedName, file.type);
if (tempPath) {
const imageId = `img-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
const imagePathRef: FeatureImagePath = {
id: imageId,
path: tempPath,
filename: sanitizedName,
mimeType: file.type,
};
newImages.push(imagePathRef);
// Store preview for display
newPreviews.set(imageId, base64);
} else {
errors.push(`${file.name}: Failed to save image.`);
}
} catch {
errors.push(`${file.name}: Failed to process image.`);
}
} else {
errors.push(`${file.name}: Unsupported file type. Use images, .txt, or .md files.`);
}
}
if (errors.length > 0) {
console.warn('Image upload errors:', errors);
console.warn('File upload errors:', errors);
}
if (newImages.length > 0) {
@@ -184,15 +223,21 @@ export function DescriptionImageDropZone({
setPreviewImages(newPreviews);
}
if (newTextFiles.length > 0 && onTextFilesChange) {
onTextFilesChange([...textFiles, ...newTextFiles]);
}
setIsProcessing(false);
},
[
disabled,
isProcessing,
images,
textFiles,
maxFiles,
maxFileSize,
onImagesChange,
onTextFilesChange,
previewImages,
saveImageToTemp,
]
@@ -263,6 +308,15 @@ export function DescriptionImageDropZone({
[images, onImagesChange]
);
const removeTextFile = useCallback(
(fileId: string) => {
if (onTextFilesChange) {
onTextFilesChange(textFiles.filter((file) => file.id !== fileId));
}
},
[textFiles, onTextFilesChange]
);
// Handle paste events to detect and process images from clipboard
// Works across all OS (Windows, Linux, macOS)
const handlePaste = useCallback(
@@ -314,11 +368,11 @@ export function DescriptionImageDropZone({
ref={fileInputRef}
type="file"
multiple
accept={ACCEPTED_IMAGE_TYPES.join(',')}
accept={[...ACCEPTED_IMAGE_TYPES, ...ACCEPTED_TEXT_EXTENSIONS].join(',')}
onChange={handleFileSelect}
className="hidden"
disabled={disabled}
data-testid="description-image-input"
data-testid="description-file-input"
/>
{/* Drop zone wrapper */}
@@ -338,7 +392,7 @@ export function DescriptionImageDropZone({
>
<div className="flex flex-col items-center gap-2 text-blue-400">
<ImageIcon className="w-8 h-8" />
<span className="text-sm font-medium">Drop images here</span>
<span className="text-sm font-medium">Drop files here</span>
</div>
</div>
)}
@@ -359,7 +413,7 @@ export function DescriptionImageDropZone({
{/* Hint text */}
<p className="text-xs text-muted-foreground mt-1">
Paste, drag and drop images, or{' '}
Paste, drag and drop files, or{' '}
<button
type="button"
onClick={handleBrowseClick}
@@ -368,29 +422,33 @@ export function DescriptionImageDropZone({
>
browse
</button>{' '}
to attach context images
to attach context (images, .txt, .md)
</p>
{/* Processing indicator */}
{isProcessing && (
<div className="flex items-center gap-2 mt-2 text-sm text-muted-foreground">
<Loader2 className="w-4 h-4 animate-spin" />
<span>Saving images...</span>
<span>Processing files...</span>
</div>
)}
{/* Image previews */}
{images.length > 0 && (
<div className="mt-3 space-y-2" data-testid="description-image-previews">
{/* File previews (images and text files) */}
{(images.length > 0 || textFiles.length > 0) && (
<div className="mt-3 space-y-2" data-testid="description-file-previews">
<div className="flex items-center justify-between">
<p className="text-xs font-medium text-foreground">
{images.length} image{images.length > 1 ? 's' : ''} attached
{images.length + textFiles.length} file
{images.length + textFiles.length > 1 ? 's' : ''} attached
</p>
<button
type="button"
onClick={() => {
onImagesChange([]);
setPreviewImages(new Map());
if (onTextFilesChange) {
onTextFilesChange([]);
}
}}
className="text-xs text-muted-foreground hover:text-foreground"
disabled={disabled}
@@ -399,6 +457,7 @@ export function DescriptionImageDropZone({
</button>
</div>
<div className="flex flex-wrap gap-2">
{/* Image previews */}
{images.map((image) => (
<div
key={image.id}
@@ -445,6 +504,38 @@ export function DescriptionImageDropZone({
</div>
</div>
))}
{/* Text file previews */}
{textFiles.map((file) => (
<div
key={file.id}
className="relative group rounded-md border border-muted bg-muted/50 overflow-hidden"
data-testid={`description-text-file-preview-${file.id}`}
>
{/* Text file icon */}
<div className="w-16 h-16 flex items-center justify-center bg-zinc-800">
<FileText className="w-6 h-6 text-muted-foreground" />
</div>
{/* Remove button */}
{!disabled && (
<button
type="button"
onClick={(e) => {
e.stopPropagation();
removeTextFile(file.id);
}}
className="absolute top-0.5 right-0.5 p-0.5 rounded-full bg-destructive text-destructive-foreground opacity-0 group-hover:opacity-100 transition-opacity"
data-testid={`remove-description-text-file-${file.id}`}
>
<X className="h-3 w-3" />
</button>
)}
{/* Filename and size tooltip on hover */}
<div className="absolute bottom-0 left-0 right-0 bg-black/60 px-1 py-0.5 opacity-0 group-hover:opacity-100 transition-opacity">
<p className="text-[10px] text-white truncate">{file.filename}</p>
<p className="text-[9px] text-white/70">{formatFileSize(file.content.length)}</p>
</div>
</div>
))}
</div>
</div>
)}

View File

@@ -1,6 +1,14 @@
import React, { useState, useRef, useCallback } from 'react';
import { cn } from '@/lib/utils';
import { ImageIcon, X, Upload } from 'lucide-react';
import {
fileToBase64,
generateImageId,
ACCEPTED_IMAGE_TYPES,
DEFAULT_MAX_FILE_SIZE,
DEFAULT_MAX_FILES,
validateImageFile,
} from '@/lib/image-utils';
export interface FeatureImage {
id: string;
@@ -19,13 +27,10 @@ interface FeatureImageUploadProps {
disabled?: boolean;
}
const ACCEPTED_IMAGE_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/webp'];
const DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
export function FeatureImageUpload({
images,
onImagesChange,
maxFiles = 5,
maxFiles = DEFAULT_MAX_FILES,
maxFileSize = DEFAULT_MAX_FILE_SIZE,
className,
disabled = false,
@@ -34,21 +39,6 @@ export function FeatureImageUpload({
const [isProcessing, setIsProcessing] = useState(false);
const fileInputRef = useRef<HTMLInputElement>(null);
const fileToBase64 = (file: File): Promise<string> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
if (typeof reader.result === 'string') {
resolve(reader.result);
} else {
reject(new Error('Failed to read file as base64'));
}
};
reader.onerror = () => reject(new Error('Failed to read file'));
reader.readAsDataURL(file);
});
};
const processFiles = useCallback(
async (files: FileList) => {
if (disabled || isProcessing) return;
@@ -58,16 +48,10 @@ export function FeatureImageUpload({
const errors: string[] = [];
for (const file of Array.from(files)) {
// Validate file type
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
errors.push(`${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`);
continue;
}
// Validate file size
if (file.size > maxFileSize) {
const maxSizeMB = maxFileSize / (1024 * 1024);
errors.push(`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`);
// Validate file
const validation = validateImageFile(file, maxFileSize);
if (!validation.isValid) {
errors.push(validation.error!);
continue;
}
@@ -80,7 +64,7 @@ export function FeatureImageUpload({
try {
const base64 = await fileToBase64(file);
const imageAttachment: FeatureImage = {
id: `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
id: generateImageId(),
data: base64,
mimeType: file.type,
filename: file.name,

View File

@@ -2,6 +2,15 @@ import React, { useState, useRef, useCallback } from 'react';
import { cn } from '@/lib/utils';
import { ImageIcon, X, Upload } from 'lucide-react';
import type { ImageAttachment } from '@/store/app-store';
import {
fileToBase64,
generateImageId,
formatFileSize,
validateImageFile,
ACCEPTED_IMAGE_TYPES,
DEFAULT_MAX_FILE_SIZE,
DEFAULT_MAX_FILES,
} from '@/lib/image-utils';
interface ImageDropZoneProps {
onImagesSelected: (images: ImageAttachment[]) => void;
@@ -13,12 +22,9 @@ interface ImageDropZoneProps {
images?: ImageAttachment[]; // Optional controlled images prop
}
const ACCEPTED_IMAGE_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/webp'];
const DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
export function ImageDropZone({
onImagesSelected,
maxFiles = 5,
maxFiles = DEFAULT_MAX_FILES,
maxFileSize = DEFAULT_MAX_FILE_SIZE,
className,
children,
@@ -53,16 +59,10 @@ export function ImageDropZone({
const errors: string[] = [];
for (const file of Array.from(files)) {
// Validate file type
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
errors.push(`${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`);
continue;
}
// Validate file size
if (file.size > maxFileSize) {
const maxSizeMB = maxFileSize / (1024 * 1024);
errors.push(`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`);
// Validate file
const validation = validateImageFile(file, maxFileSize);
if (!validation.isValid) {
errors.push(validation.error!);
continue;
}
@@ -75,7 +75,7 @@ export function ImageDropZone({
try {
const base64 = await fileToBase64(file);
const imageAttachment: ImageAttachment = {
id: `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
id: generateImageId(),
data: base64,
mimeType: file.type,
filename: file.name,
@@ -89,7 +89,6 @@ export function ImageDropZone({
if (errors.length > 0) {
console.warn('Image upload errors:', errors);
// You could show these errors to the user via a toast or notification
}
if (newImages.length > 0) {
@@ -282,26 +281,3 @@ export function ImageDropZone({
</div>
);
}
function fileToBase64(file: File): Promise<string> {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
if (typeof reader.result === 'string') {
resolve(reader.result);
} else {
reject(new Error('Failed to read file as base64'));
}
};
reader.onerror = () => reject(new Error('Failed to read file'));
reader.readAsDataURL(file);
});
}
function formatFileSize(bytes: number): string {
if (bytes === 0) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
}

View File

@@ -16,12 +16,26 @@ import {
X,
ImageIcon,
ChevronDown,
FileText,
} from 'lucide-react';
import { cn } from '@/lib/utils';
import { useElectronAgent } from '@/hooks/use-electron-agent';
import { SessionManager } from '@/components/session-manager';
import { Markdown } from '@/components/ui/markdown';
import type { ImageAttachment } from '@/store/app-store';
import type { ImageAttachment, TextFileAttachment } from '@/store/app-store';
import {
fileToBase64,
generateImageId,
generateFileId,
validateImageFile,
validateTextFile,
isTextFile,
isImageFile,
fileToText,
getTextFileMimeType,
DEFAULT_MAX_FILE_SIZE,
DEFAULT_MAX_FILES,
} from '@/lib/image-utils';
import {
useKeyboardShortcuts,
useKeyboardShortcutsConfig,
@@ -40,6 +54,7 @@ export function AgentView() {
const shortcuts = useKeyboardShortcutsConfig();
const [input, setInput] = useState('');
const [selectedImages, setSelectedImages] = useState<ImageAttachment[]>([]);
const [selectedTextFiles, setSelectedTextFiles] = useState<TextFileAttachment[]>([]);
const [showImageDropZone, setShowImageDropZone] = useState(false);
const [currentTool, setCurrentTool] = useState<string | null>(null);
const [currentSessionId, setCurrentSessionId] = useState<string | null>(null);
@@ -116,17 +131,23 @@ export function AgentView() {
}, [currentProject?.path]);
const handleSend = useCallback(async () => {
if ((!input.trim() && selectedImages.length === 0) || isProcessing) return;
if (
(!input.trim() && selectedImages.length === 0 && selectedTextFiles.length === 0) ||
isProcessing
)
return;
const messageContent = input;
const messageImages = selectedImages;
const messageTextFiles = selectedTextFiles;
setInput('');
setSelectedImages([]);
setSelectedTextFiles([]);
setShowImageDropZone(false);
await sendMessage(messageContent, messageImages);
}, [input, selectedImages, isProcessing, sendMessage]);
await sendMessage(messageContent, messageImages, messageTextFiles);
}, [input, selectedImages, selectedTextFiles, isProcessing, sendMessage]);
const handleImagesSelected = useCallback((images: ImageAttachment[]) => {
setSelectedImages(images);
@@ -136,84 +157,99 @@ export function AgentView() {
setShowImageDropZone(!showImageDropZone);
}, [showImageDropZone]);
// Helper function to convert file to base64
const fileToBase64 = useCallback((file: File): Promise<string> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
if (typeof reader.result === 'string') {
resolve(reader.result);
} else {
reject(new Error('Failed to read file as base64'));
}
};
reader.onerror = () => reject(new Error('Failed to read file'));
reader.readAsDataURL(file);
});
}, []);
// Process dropped files
// Process dropped files (images and text files)
const processDroppedFiles = useCallback(
async (files: FileList) => {
if (isProcessing) return;
const ACCEPTED_IMAGE_TYPES = [
'image/jpeg',
'image/jpg',
'image/png',
'image/gif',
'image/webp',
];
const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
const MAX_FILES = 5;
const newImages: ImageAttachment[] = [];
const newTextFiles: TextFileAttachment[] = [];
const errors: string[] = [];
for (const file of Array.from(files)) {
// Validate file type
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
errors.push(`${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`);
continue;
}
// Check if it's a text file
if (isTextFile(file)) {
const validation = validateTextFile(file);
if (!validation.isValid) {
errors.push(validation.error!);
continue;
}
// Validate file size
if (file.size > MAX_FILE_SIZE) {
const maxSizeMB = MAX_FILE_SIZE / (1024 * 1024);
errors.push(`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`);
continue;
}
// Check if we've reached max files
const totalFiles =
newImages.length +
selectedImages.length +
newTextFiles.length +
selectedTextFiles.length;
if (totalFiles >= DEFAULT_MAX_FILES) {
errors.push(`Maximum ${DEFAULT_MAX_FILES} files allowed.`);
break;
}
// Check if we've reached max files
if (newImages.length + selectedImages.length >= MAX_FILES) {
errors.push(`Maximum ${MAX_FILES} images allowed.`);
break;
try {
const content = await fileToText(file);
const textFileAttachment: TextFileAttachment = {
id: generateFileId(),
content,
mimeType: getTextFileMimeType(file.name),
filename: file.name,
size: file.size,
};
newTextFiles.push(textFileAttachment);
} catch {
errors.push(`${file.name}: Failed to read text file.`);
}
}
// Check if it's an image file
else if (isImageFile(file)) {
const validation = validateImageFile(file, DEFAULT_MAX_FILE_SIZE);
if (!validation.isValid) {
errors.push(validation.error!);
continue;
}
try {
const base64 = await fileToBase64(file);
const imageAttachment: ImageAttachment = {
id: `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
data: base64,
mimeType: file.type,
filename: file.name,
size: file.size,
};
newImages.push(imageAttachment);
} catch (error) {
errors.push(`${file.name}: Failed to process image.`);
// Check if we've reached max files
const totalFiles =
newImages.length +
selectedImages.length +
newTextFiles.length +
selectedTextFiles.length;
if (totalFiles >= DEFAULT_MAX_FILES) {
errors.push(`Maximum ${DEFAULT_MAX_FILES} files allowed.`);
break;
}
try {
const base64 = await fileToBase64(file);
const imageAttachment: ImageAttachment = {
id: generateImageId(),
data: base64,
mimeType: file.type,
filename: file.name,
size: file.size,
};
newImages.push(imageAttachment);
} catch {
errors.push(`${file.name}: Failed to process image.`);
}
} else {
errors.push(`${file.name}: Unsupported file type. Use images, .txt, or .md files.`);
}
}
if (errors.length > 0) {
console.warn('Image upload errors:', errors);
console.warn('File upload errors:', errors);
}
if (newImages.length > 0) {
setSelectedImages((prev) => [...prev, ...newImages]);
}
if (newTextFiles.length > 0) {
setSelectedTextFiles((prev) => [...prev, ...newTextFiles]);
}
},
[isProcessing, selectedImages, fileToBase64]
[isProcessing, selectedImages, selectedTextFiles]
);
// Remove individual image
@@ -221,6 +257,11 @@ export function AgentView() {
setSelectedImages((prev) => prev.filter((img) => img.id !== imageId));
}, []);
// Remove individual text file
const removeTextFile = useCallback((fileId: string) => {
setSelectedTextFiles((prev) => prev.filter((file) => file.id !== fileId));
}, []);
// Drag and drop handlers for the input area
const handleDragEnter = useCallback(
(e: React.DragEvent) => {
@@ -720,16 +761,19 @@ export function AgentView() {
/>
)}
{/* Selected Images Preview - only show when ImageDropZone is hidden to avoid duplicate display */}
{selectedImages.length > 0 && !showImageDropZone && (
{/* Selected Files Preview - only show when ImageDropZone is hidden to avoid duplicate display */}
{(selectedImages.length > 0 || selectedTextFiles.length > 0) && !showImageDropZone && (
<div className="mb-4 space-y-2">
<div className="flex items-center justify-between">
<p className="text-xs font-medium text-foreground">
{selectedImages.length} image
{selectedImages.length > 1 ? 's' : ''} attached
{selectedImages.length + selectedTextFiles.length} file
{selectedImages.length + selectedTextFiles.length > 1 ? 's' : ''} attached
</p>
<button
onClick={() => setSelectedImages([])}
onClick={() => {
setSelectedImages([]);
setSelectedTextFiles([]);
}}
className="text-xs text-muted-foreground hover:text-foreground transition-colors"
disabled={isProcessing}
>
@@ -737,6 +781,7 @@ export function AgentView() {
</button>
</div>
<div className="flex flex-wrap gap-2">
{/* Image attachments */}
{selectedImages.map((image) => (
<div
key={image.id}
@@ -773,6 +818,35 @@ export function AgentView() {
)}
</div>
))}
{/* Text file attachments */}
{selectedTextFiles.map((file) => (
<div
key={file.id}
className="group relative rounded-lg border border-border bg-muted/30 p-2 flex items-center gap-2 hover:border-primary/30 transition-colors"
>
{/* File icon */}
<div className="w-8 h-8 rounded-md bg-muted flex-shrink-0 flex items-center justify-center">
<FileText className="w-4 h-4 text-muted-foreground" />
</div>
{/* File info */}
<div className="min-w-0 flex-1">
<p className="text-xs font-medium text-foreground truncate max-w-24">
{file.filename}
</p>
<p className="text-[10px] text-muted-foreground">
{formatFileSize(file.size)}
</p>
</div>
{/* Remove button */}
<button
onClick={() => removeTextFile(file.id)}
className="opacity-0 group-hover:opacity-100 transition-opacity p-1 rounded-full hover:bg-destructive/10 text-muted-foreground hover:text-destructive"
disabled={isProcessing}
>
<X className="h-3 w-3" />
</button>
</div>
))}
</div>
</div>
)}
@@ -792,7 +866,7 @@ export function AgentView() {
<Input
ref={inputRef}
placeholder={
isDragOver ? 'Drop your images here...' : 'Describe what you want to build...'
isDragOver ? 'Drop your files here...' : 'Describe what you want to build...'
}
value={input}
onChange={(e) => setInput(e.target.value)}
@@ -803,14 +877,15 @@ export function AgentView() {
className={cn(
'h-11 bg-background border-border rounded-xl pl-4 pr-20 text-sm transition-all',
'focus:ring-2 focus:ring-primary/20 focus:border-primary/50',
selectedImages.length > 0 && 'border-primary/30',
(selectedImages.length > 0 || selectedTextFiles.length > 0) &&
'border-primary/30',
isDragOver && 'border-primary bg-primary/5'
)}
/>
{selectedImages.length > 0 && !isDragOver && (
{(selectedImages.length > 0 || selectedTextFiles.length > 0) && !isDragOver && (
<div className="absolute right-3 top-1/2 -translate-y-1/2 text-xs bg-primary text-primary-foreground px-2 py-0.5 rounded-full font-medium">
{selectedImages.length} image
{selectedImages.length > 1 ? 's' : ''}
{selectedImages.length + selectedTextFiles.length} file
{selectedImages.length + selectedTextFiles.length > 1 ? 's' : ''}
</div>
)}
{isDragOver && (
@@ -821,7 +896,7 @@ export function AgentView() {
)}
</div>
{/* Image Attachment Button */}
{/* File Attachment Button */}
<Button
variant="outline"
size="icon"
@@ -830,9 +905,10 @@ export function AgentView() {
className={cn(
'h-11 w-11 rounded-xl border-border',
showImageDropZone && 'bg-primary/10 text-primary border-primary/30',
selectedImages.length > 0 && 'border-primary/30 text-primary'
(selectedImages.length > 0 || selectedTextFiles.length > 0) &&
'border-primary/30 text-primary'
)}
title="Attach images"
title="Attach files (images, .txt, .md)"
>
<Paperclip className="w-4 h-4" />
</Button>
@@ -841,7 +917,11 @@ export function AgentView() {
<Button
onClick={handleSend}
disabled={
(!input.trim() && selectedImages.length === 0) || isProcessing || !isConnected
(!input.trim() &&
selectedImages.length === 0 &&
selectedTextFiles.length === 0) ||
isProcessing ||
!isConnected
}
className="h-11 px-4 rounded-xl"
data-testid="send-message"

View File

@@ -1,5 +1,4 @@
import { useState, useEffect } from "react";
import { useState, useEffect } from 'react';
import {
Dialog,
DialogContent,
@@ -7,18 +6,19 @@ import {
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import { Tabs, TabsList, TabsTrigger, TabsContent } from "@/components/ui/tabs";
import { Button } from "@/components/ui/button";
import { HotkeyButton } from "@/components/ui/hotkey-button";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { CategoryAutocomplete } from "@/components/ui/category-autocomplete";
} from '@/components/ui/dialog';
import { Tabs, TabsList, TabsTrigger, TabsContent } from '@/components/ui/tabs';
import { Button } from '@/components/ui/button';
import { HotkeyButton } from '@/components/ui/hotkey-button';
import { Input } from '@/components/ui/input';
import { Label } from '@/components/ui/label';
import { CategoryAutocomplete } from '@/components/ui/category-autocomplete';
import {
DescriptionImageDropZone,
FeatureImagePath as DescriptionImagePath,
FeatureTextFilePath as DescriptionTextFilePath,
ImagePreviewMap,
} from "@/components/ui/description-image-dropzone";
} from '@/components/ui/description-image-dropzone';
import {
MessageSquare,
Settings2,
@@ -26,10 +26,10 @@ import {
FlaskConical,
Sparkles,
ChevronDown,
} from "lucide-react";
import { toast } from "sonner";
import { getElectronAPI } from "@/lib/electron";
import { modelSupportsThinking } from "@/lib/utils";
} from 'lucide-react';
import { toast } from 'sonner';
import { getElectronAPI } from '@/lib/electron';
import { modelSupportsThinking } from '@/lib/utils';
import {
useAppStore,
AgentModel,
@@ -37,7 +37,7 @@ import {
FeatureImage,
AIProfile,
PlanningMode,
} from "@/store/app-store";
} from '@/store/app-store';
import {
ModelSelector,
ThinkingLevelSelector,
@@ -46,14 +46,14 @@ import {
PrioritySelector,
BranchSelector,
PlanningModeSelector,
} from "../shared";
} from '../shared';
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import { useNavigate } from "@tanstack/react-router";
} from '@/components/ui/dropdown-menu';
import { useNavigate } from '@tanstack/react-router';
interface AddFeatureDialogProps {
open: boolean;
@@ -65,6 +65,7 @@ interface AddFeatureDialogProps {
steps: string[];
images: FeatureImage[];
imagePaths: DescriptionImagePath[];
textFilePaths: DescriptionTextFilePath[];
skipTests: boolean;
model: AgentModel;
thinkingLevel: ThinkingLevel;
@@ -92,7 +93,7 @@ export function AddFeatureDialog({
branchSuggestions,
branchCardCounts,
defaultSkipTests,
defaultBranch = "main",
defaultBranch = 'main',
currentBranch,
isMaximized,
showProfilesOnly,
@@ -101,27 +102,29 @@ export function AddFeatureDialog({
const navigate = useNavigate();
const [useCurrentBranch, setUseCurrentBranch] = useState(true);
const [newFeature, setNewFeature] = useState({
title: "",
category: "",
description: "",
steps: [""],
title: '',
category: '',
description: '',
steps: [''],
images: [] as FeatureImage[],
imagePaths: [] as DescriptionImagePath[],
textFilePaths: [] as DescriptionTextFilePath[],
skipTests: false,
model: "opus" as AgentModel,
thinkingLevel: "none" as ThinkingLevel,
branchName: "",
model: 'opus' as AgentModel,
thinkingLevel: 'none' as ThinkingLevel,
branchName: '',
priority: 2 as number, // Default to medium priority
});
const [newFeaturePreviewMap, setNewFeaturePreviewMap] =
useState<ImagePreviewMap>(() => new Map());
const [newFeaturePreviewMap, setNewFeaturePreviewMap] = useState<ImagePreviewMap>(
() => new Map()
);
const [showAdvancedOptions, setShowAdvancedOptions] = useState(false);
const [descriptionError, setDescriptionError] = useState(false);
const [isEnhancing, setIsEnhancing] = useState(false);
const [enhancementMode, setEnhancementMode] = useState<
"improve" | "technical" | "simplify" | "acceptance"
>("improve");
const [planningMode, setPlanningMode] = useState<PlanningMode>("skip");
'improve' | 'technical' | 'simplify' | 'acceptance'
>('improve');
const [planningMode, setPlanningMode] = useState<PlanningMode>('skip');
const [requirePlanApproval, setRequirePlanApproval] = useState(false);
// Get enhancement model, planning mode defaults, and worktrees setting from store
@@ -144,10 +147,10 @@ export function AddFeatureDialog({
setNewFeature((prev) => ({
...prev,
skipTests: defaultSkipTests,
branchName: defaultBranch || "",
branchName: defaultBranch || '',
// Use default profile's model/thinkingLevel if set, else fallback to defaults
model: defaultProfile?.model ?? "opus",
thinkingLevel: defaultProfile?.thinkingLevel ?? "none",
model: defaultProfile?.model ?? 'opus',
thinkingLevel: defaultProfile?.thinkingLevel ?? 'none',
}));
setUseCurrentBranch(true);
setPlanningMode(defaultPlanningMode);
@@ -171,22 +174,20 @@ export function AddFeatureDialog({
// Validate branch selection when "other branch" is selected
if (useWorktrees && !useCurrentBranch && !newFeature.branchName.trim()) {
toast.error("Please select a branch name");
toast.error('Please select a branch name');
return;
}
const category = newFeature.category || "Uncategorized";
const category = newFeature.category || 'Uncategorized';
const selectedModel = newFeature.model;
const normalizedThinking = modelSupportsThinking(selectedModel)
? newFeature.thinkingLevel
: "none";
: 'none';
// Use current branch if toggle is on
// If currentBranch is provided (non-primary worktree), use it
// Otherwise (primary worktree), use empty string which means "unassigned" (show only on primary)
const finalBranchName = useCurrentBranch
? currentBranch || ""
: newFeature.branchName || "";
const finalBranchName = useCurrentBranch ? currentBranch || '' : newFeature.branchName || '';
onAdd({
title: newFeature.title,
@@ -195,6 +196,7 @@ export function AddFeatureDialog({
steps: newFeature.steps.filter((s) => s.trim()),
images: newFeature.images,
imagePaths: newFeature.imagePaths,
textFilePaths: newFeature.textFilePaths,
skipTests: newFeature.skipTests,
model: selectedModel,
thinkingLevel: normalizedThinking,
@@ -206,17 +208,18 @@ export function AddFeatureDialog({
// Reset form
setNewFeature({
title: "",
category: "",
description: "",
steps: [""],
title: '',
category: '',
description: '',
steps: [''],
images: [],
imagePaths: [],
textFilePaths: [],
skipTests: defaultSkipTests,
model: "opus",
model: 'opus',
priority: 2,
thinkingLevel: "none",
branchName: "",
thinkingLevel: 'none',
branchName: '',
});
setUseCurrentBranch(true);
setPlanningMode(defaultPlanningMode);
@@ -251,13 +254,13 @@ export function AddFeatureDialog({
if (result?.success && result.enhancedText) {
const enhancedText = result.enhancedText;
setNewFeature((prev) => ({ ...prev, description: enhancedText }));
toast.success("Description enhanced!");
toast.success('Description enhanced!');
} else {
toast.error(result?.error || "Failed to enhance description");
toast.error(result?.error || 'Failed to enhance description');
}
} catch (error) {
console.error("Enhancement failed:", error);
toast.error("Failed to enhance description");
console.error('Enhancement failed:', error);
toast.error('Failed to enhance description');
} finally {
setIsEnhancing(false);
}
@@ -267,16 +270,11 @@ export function AddFeatureDialog({
setNewFeature({
...newFeature,
model,
thinkingLevel: modelSupportsThinking(model)
? newFeature.thinkingLevel
: "none",
thinkingLevel: modelSupportsThinking(model) ? newFeature.thinkingLevel : 'none',
});
};
const handleProfileSelect = (
model: AgentModel,
thinkingLevel: ThinkingLevel
) => {
const handleProfileSelect = (model: AgentModel, thinkingLevel: ThinkingLevel) => {
setNewFeature({
...newFeature,
model,
@@ -306,14 +304,9 @@ export function AddFeatureDialog({
>
<DialogHeader>
<DialogTitle>Add New Feature</DialogTitle>
<DialogDescription>
Create a new feature card for the Kanban board.
</DialogDescription>
<DialogDescription>Create a new feature card for the Kanban board.</DialogDescription>
</DialogHeader>
<Tabs
defaultValue="prompt"
className="py-4 flex-1 min-h-0 flex flex-col"
>
<Tabs defaultValue="prompt" className="py-4 flex-1 min-h-0 flex flex-col">
<TabsList className="w-full grid grid-cols-3 mb-4">
<TabsTrigger value="prompt" data-testid="tab-prompt">
<MessageSquare className="w-4 h-4 mr-2" />
@@ -330,10 +323,7 @@ export function AddFeatureDialog({
</TabsList>
{/* Prompt Tab */}
<TabsContent
value="prompt"
className="space-y-4 overflow-y-auto cursor-default"
>
<TabsContent value="prompt" className="space-y-4 overflow-y-auto cursor-default">
<div className="space-y-2">
<Label htmlFor="description">Description</Label>
<DescriptionImageDropZone
@@ -345,8 +335,10 @@ export function AddFeatureDialog({
}
}}
images={newFeature.imagePaths}
onImagesChange={(images) =>
setNewFeature({ ...newFeature, imagePaths: images })
onImagesChange={(images) => setNewFeature({ ...newFeature, imagePaths: images })}
textFiles={newFeature.textFilePaths}
onTextFilesChange={(textFiles) =>
setNewFeature({ ...newFeature, textFilePaths: textFiles })
}
placeholder="Describe the feature..."
previewMap={newFeaturePreviewMap}
@@ -360,47 +352,32 @@ export function AddFeatureDialog({
<Input
id="title"
value={newFeature.title}
onChange={(e) =>
setNewFeature({ ...newFeature, title: e.target.value })
}
onChange={(e) => setNewFeature({ ...newFeature, title: e.target.value })}
placeholder="Leave blank to auto-generate"
/>
</div>
<div className="flex w-fit items-center gap-3 select-none cursor-default">
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="outline"
size="sm"
className="w-[200px] justify-between"
>
{enhancementMode === "improve" && "Improve Clarity"}
{enhancementMode === "technical" && "Add Technical Details"}
{enhancementMode === "simplify" && "Simplify"}
{enhancementMode === "acceptance" &&
"Add Acceptance Criteria"}
<Button variant="outline" size="sm" className="w-[200px] justify-between">
{enhancementMode === 'improve' && 'Improve Clarity'}
{enhancementMode === 'technical' && 'Add Technical Details'}
{enhancementMode === 'simplify' && 'Simplify'}
{enhancementMode === 'acceptance' && 'Add Acceptance Criteria'}
<ChevronDown className="w-4 h-4 ml-2" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="start">
<DropdownMenuItem
onClick={() => setEnhancementMode("improve")}
>
<DropdownMenuItem onClick={() => setEnhancementMode('improve')}>
Improve Clarity
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => setEnhancementMode("technical")}
>
<DropdownMenuItem onClick={() => setEnhancementMode('technical')}>
Add Technical Details
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => setEnhancementMode("simplify")}
>
<DropdownMenuItem onClick={() => setEnhancementMode('simplify')}>
Simplify
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => setEnhancementMode("acceptance")}
>
<DropdownMenuItem onClick={() => setEnhancementMode('acceptance')}>
Add Acceptance Criteria
</DropdownMenuItem>
</DropdownMenuContent>
@@ -422,9 +399,7 @@ export function AddFeatureDialog({
<Label htmlFor="category">Category (optional)</Label>
<CategoryAutocomplete
value={newFeature.category}
onChange={(value) =>
setNewFeature({ ...newFeature, category: value })
}
onChange={(value) => setNewFeature({ ...newFeature, category: value })}
suggestions={categorySuggestions}
placeholder="e.g., Core, UI, API"
data-testid="feature-category-input"
@@ -435,9 +410,7 @@ export function AddFeatureDialog({
useCurrentBranch={useCurrentBranch}
onUseCurrentBranchChange={setUseCurrentBranch}
branchName={newFeature.branchName}
onBranchNameChange={(value) =>
setNewFeature({ ...newFeature, branchName: value })
}
onBranchNameChange={(value) => setNewFeature({ ...newFeature, branchName: value })}
branchSuggestions={branchSuggestions}
branchCardCounts={branchCardCounts}
currentBranch={currentBranch}
@@ -448,25 +421,18 @@ export function AddFeatureDialog({
{/* Priority Selector */}
<PrioritySelector
selectedPriority={newFeature.priority}
onPrioritySelect={(priority) =>
setNewFeature({ ...newFeature, priority })
}
onPrioritySelect={(priority) => setNewFeature({ ...newFeature, priority })}
testIdPrefix="priority"
/>
</TabsContent>
{/* Model Tab */}
<TabsContent
value="model"
className="space-y-4 overflow-y-auto cursor-default"
>
<TabsContent value="model" className="space-y-4 overflow-y-auto cursor-default">
{/* Show Advanced Options Toggle */}
{showProfilesOnly && (
<div className="flex items-center justify-between p-3 bg-muted/30 rounded-lg border border-border">
<div className="space-y-1">
<p className="text-sm font-medium text-foreground">
Simple Mode Active
</p>
<p className="text-sm font-medium text-foreground">Simple Mode Active</p>
<p className="text-xs text-muted-foreground">
Only showing AI profiles. Advanced model tweaking is hidden.
</p>
@@ -478,7 +444,7 @@ export function AddFeatureDialog({
data-testid="show-advanced-options-toggle"
>
<Settings2 className="w-4 h-4 mr-2" />
{showAdvancedOptions ? "Hide" : "Show"} Advanced
{showAdvancedOptions ? 'Hide' : 'Show'} Advanced
</Button>
</div>
)}
@@ -492,23 +458,19 @@ export function AddFeatureDialog({
showManageLink
onManageLinkClick={() => {
onOpenChange(false);
navigate({ to: "/profiles" });
navigate({ to: '/profiles' });
}}
/>
{/* Separator */}
{aiProfiles.length > 0 &&
(!showProfilesOnly || showAdvancedOptions) && (
<div className="border-t border-border" />
)}
{aiProfiles.length > 0 && (!showProfilesOnly || showAdvancedOptions) && (
<div className="border-t border-border" />
)}
{/* Claude Models Section */}
{(!showProfilesOnly || showAdvancedOptions) && (
<>
<ModelSelector
selectedModel={newFeature.model}
onModelSelect={handleModelSelect}
/>
<ModelSelector selectedModel={newFeature.model} onModelSelect={handleModelSelect} />
{newModelAllowsThinking && (
<ThinkingLevelSelector
selectedLevel={newFeature.thinkingLevel}
@@ -522,10 +484,7 @@ export function AddFeatureDialog({
</TabsContent>
{/* Options Tab */}
<TabsContent
value="options"
className="space-y-4 overflow-y-auto cursor-default"
>
<TabsContent value="options" className="space-y-4 overflow-y-auto cursor-default">
{/* Planning Mode Section */}
<PlanningModeSelector
mode={planningMode}
@@ -542,9 +501,7 @@ export function AddFeatureDialog({
{/* Testing Section */}
<TestingTabContent
skipTests={newFeature.skipTests}
onSkipTestsChange={(skipTests) =>
setNewFeature({ ...newFeature, skipTests })
}
onSkipTestsChange={(skipTests) => setNewFeature({ ...newFeature, skipTests })}
steps={newFeature.steps}
onStepsChange={(steps) => setNewFeature({ ...newFeature, steps })}
/>
@@ -556,12 +513,10 @@ export function AddFeatureDialog({
</Button>
<HotkeyButton
onClick={handleAdd}
hotkey={{ key: "Enter", cmdCtrl: true }}
hotkey={{ key: 'Enter', cmdCtrl: true }}
hotkeyActive={open}
data-testid="confirm-add-feature"
disabled={
useWorktrees && !useCurrentBranch && !newFeature.branchName.trim()
}
disabled={useWorktrees && !useCurrentBranch && !newFeature.branchName.trim()}
>
Add Feature
</HotkeyButton>

View File

@@ -16,6 +16,7 @@ import { CategoryAutocomplete } from '@/components/ui/category-autocomplete';
import {
DescriptionImageDropZone,
FeatureImagePath as DescriptionImagePath,
FeatureTextFilePath as DescriptionTextFilePath,
ImagePreviewMap,
} from '@/components/ui/description-image-dropzone';
import {
@@ -68,6 +69,7 @@ interface EditFeatureDialogProps {
model: AgentModel;
thinkingLevel: ThinkingLevel;
imagePaths: DescriptionImagePath[];
textFilePaths: DescriptionTextFilePath[];
branchName: string; // Can be empty string to use current branch
priority: number;
planningMode: PlanningMode;
@@ -168,6 +170,7 @@ export function EditFeatureDialog({
model: selectedModel,
thinkingLevel: normalizedThinking,
imagePaths: editingFeature.imagePaths ?? [],
textFilePaths: editingFeature.textFilePaths ?? [],
branchName: finalBranchName,
priority: editingFeature.priority ?? 2,
planningMode,
@@ -294,6 +297,13 @@ export function EditFeatureDialog({
imagePaths: images,
})
}
textFiles={editingFeature.textFilePaths ?? []}
onTextFilesChange={(textFiles) =>
setEditingFeature({
...editingFeature,
textFilePaths: textFiles,
})
}
placeholder="Describe the feature..."
previewMap={editFeaturePreviewMap}
onPreviewMapChange={setEditFeaturePreviewMap}

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,9 @@
import { useState, useEffect, useCallback, useRef } from "react";
import type { Message, StreamEvent } from "@/types/electron";
import { useMessageQueue } from "./use-message-queue";
import type { ImageAttachment } from "@/store/app-store";
import { getElectronAPI } from "@/lib/electron";
import { useState, useEffect, useCallback, useRef } from 'react';
import type { Message, StreamEvent } from '@/types/electron';
import { useMessageQueue } from './use-message-queue';
import type { ImageAttachment, TextFileAttachment } from '@/store/app-store';
import { getElectronAPI } from '@/lib/electron';
import { sanitizeFilename } from '@/lib/image-utils';
interface UseElectronAgentOptions {
sessionId: string;
@@ -15,12 +16,22 @@ interface UseElectronAgentResult {
messages: Message[];
isProcessing: boolean;
isConnected: boolean;
sendMessage: (content: string, images?: ImageAttachment[]) => Promise<void>;
sendMessage: (
content: string,
images?: ImageAttachment[],
textFiles?: TextFileAttachment[]
) => Promise<void>;
stopExecution: () => Promise<void>;
clearHistory: () => Promise<void>;
error: string | null;
// Queue-related state
queuedMessages: { id: string; content: string; images?: ImageAttachment[]; timestamp: Date }[];
queuedMessages: {
id: string;
content: string;
images?: ImageAttachment[];
textFiles?: TextFileAttachment[];
timestamp: Date;
}[];
isQueueProcessing: boolean;
clearMessageQueue: () => void;
}
@@ -46,26 +57,38 @@ export function useElectronAgent({
// Send message directly to the agent (bypassing queue)
const sendMessageDirectly = useCallback(
async (content: string, images?: ImageAttachment[]) => {
async (content: string, images?: ImageAttachment[], textFiles?: TextFileAttachment[]) => {
const api = getElectronAPI();
if (!api?.agent) {
setError("API not available");
setError('API not available');
return;
}
if (isProcessing) {
throw new Error("Agent is already processing a message");
throw new Error('Agent is already processing a message');
}
setIsProcessing(true);
setError(null);
try {
console.log("[useElectronAgent] Sending message directly", {
console.log('[useElectronAgent] Sending message directly', {
hasImages: images && images.length > 0,
imageCount: images?.length || 0
imageCount: images?.length || 0,
hasTextFiles: textFiles && textFiles.length > 0,
textFileCount: textFiles?.length || 0,
});
// Build message content with text file context prepended
let messageContent = content;
if (textFiles && textFiles.length > 0) {
const contextParts = textFiles.map((file) => {
return `<file name="${file.filename}">\n${file.content}\n</file>`;
});
const contextBlock = `Here are some files for context:\n\n${contextParts.join('\n\n')}\n\n`;
messageContent = contextBlock + content;
}
// Save images to .automaker/images and get paths
let imagePaths: string[] | undefined;
if (images && images.length > 0 && api.saveImageToTemp) {
@@ -73,36 +96,36 @@ export function useElectronAgent({
for (const image of images) {
const result = await api.saveImageToTemp(
image.data,
image.filename,
sanitizeFilename(image.filename),
image.mimeType,
workingDirectory // Pass workingDirectory as projectPath
);
if (result.success && result.path) {
imagePaths.push(result.path);
console.log("[useElectronAgent] Saved image to .automaker/images:", result.path);
console.log('[useElectronAgent] Saved image to .automaker/images:', result.path);
} else {
console.error("[useElectronAgent] Failed to save image:", result.error);
console.error('[useElectronAgent] Failed to save image:', result.error);
}
}
}
const result = await api.agent!.send(
sessionId,
content,
messageContent,
workingDirectory,
imagePaths,
model
);
if (!result.success) {
setError(result.error || "Failed to send message");
setError(result.error || 'Failed to send message');
setIsProcessing(false);
}
// Note: We don't set isProcessing to false here because
// it will be set by the "complete" or "error" stream event
} catch (err) {
console.error("[useElectronAgent] Failed to send message:", err);
setError(err instanceof Error ? err.message : "Failed to send message");
console.error('[useElectronAgent] Failed to send message:', err);
setError(err instanceof Error ? err.message : 'Failed to send message');
setIsProcessing(false);
throw err;
}
@@ -111,23 +134,22 @@ export function useElectronAgent({
);
// Message queue for queuing messages when agent is busy
const {
queuedMessages,
isProcessingQueue,
addToQueue,
clearQueue,
processNext,
} = useMessageQueue({
onProcessNext: async (queuedMessage) => {
await sendMessageDirectly(queuedMessage.content, queuedMessage.images);
},
});
const { queuedMessages, isProcessingQueue, addToQueue, clearQueue, processNext } =
useMessageQueue({
onProcessNext: async (queuedMessage) => {
await sendMessageDirectly(
queuedMessage.content,
queuedMessage.images,
queuedMessage.textFiles
);
},
});
// Initialize connection and load history
useEffect(() => {
const api = getElectronAPI();
if (!api?.agent) {
setError("API not available.");
setError('API not available.');
return;
}
@@ -147,16 +169,13 @@ export function useElectronAgent({
setError(null);
try {
console.log("[useElectronAgent] Starting session:", sessionId);
const result = await api.agent!.start(
sessionId,
workingDirectory
);
console.log('[useElectronAgent] Starting session:', sessionId);
const result = await api.agent!.start(sessionId, workingDirectory);
if (!mounted) return;
if (result.success && result.messages) {
console.log("[useElectronAgent] Loaded", result.messages.length, "messages");
console.log('[useElectronAgent] Loaded', result.messages.length, 'messages');
setMessages(result.messages);
setIsConnected(true);
@@ -164,17 +183,17 @@ export function useElectronAgent({
const historyResult = await api.agent!.getHistory(sessionId);
if (mounted && historyResult.success) {
const isRunning = historyResult.isRunning || false;
console.log("[useElectronAgent] Session running state:", isRunning);
console.log('[useElectronAgent] Session running state:', isRunning);
setIsProcessing(isRunning);
}
} else {
setError(result.error || "Failed to start session");
setError(result.error || 'Failed to start session');
setIsProcessing(false);
}
} catch (err) {
if (!mounted) return;
console.error("[useElectronAgent] Failed to initialize:", err);
setError(err instanceof Error ? err.message : "Failed to initialize");
console.error('[useElectronAgent] Failed to initialize:', err);
setError(err instanceof Error ? err.message : 'Failed to initialize');
setIsProcessing(false);
}
};
@@ -189,7 +208,7 @@ export function useElectronAgent({
// Auto-process queue when agent finishes processing
useEffect(() => {
if (!isProcessing && !isProcessingQueue && queuedMessages.length > 0) {
console.log("[useElectronAgent] Auto-processing next queued message");
console.log('[useElectronAgent] Auto-processing next queued message');
processNext();
}
}, [isProcessing, isProcessingQueue, queuedMessages.length, processNext]);
@@ -200,32 +219,30 @@ export function useElectronAgent({
if (!api?.agent) return;
if (!sessionId) return; // Don't subscribe if no session
console.log("[useElectronAgent] Subscribing to stream events for session:", sessionId);
console.log('[useElectronAgent] Subscribing to stream events for session:', sessionId);
const handleStream = (event: StreamEvent) => {
// CRITICAL: Only process events for our specific session
if (event.sessionId !== sessionId) {
console.log("[useElectronAgent] Ignoring event for different session:", event.sessionId);
console.log('[useElectronAgent] Ignoring event for different session:', event.sessionId);
return;
}
console.log("[useElectronAgent] Stream event for", sessionId, ":", event.type);
console.log('[useElectronAgent] Stream event for', sessionId, ':', event.type);
switch (event.type) {
case "message":
case 'message':
// User message added
setMessages((prev) => [...prev, event.message]);
break;
case "stream":
case 'stream':
// Assistant message streaming
if (event.isComplete) {
// Final update
setMessages((prev) =>
prev.map((msg) =>
msg.id === event.messageId
? { ...msg, content: event.content }
: msg
msg.id === event.messageId ? { ...msg, content: event.content } : msg
)
);
currentMessageRef.current = null;
@@ -236,15 +253,13 @@ export function useElectronAgent({
if (existingIndex >= 0) {
// Update existing message
return prev.map((msg) =>
msg.id === event.messageId
? { ...msg, content: event.content }
: msg
msg.id === event.messageId ? { ...msg, content: event.content } : msg
);
} else {
// Create new message
const newMessage: Message = {
id: event.messageId,
role: "assistant",
role: 'assistant',
content: event.content,
timestamp: new Date().toISOString(),
};
@@ -255,30 +270,28 @@ export function useElectronAgent({
}
break;
case "tool_use":
case 'tool_use':
// Tool being used
console.log("[useElectronAgent] Tool use:", event.tool.name);
console.log('[useElectronAgent] Tool use:', event.tool.name);
onToolUse?.(event.tool.name, event.tool.input);
break;
case "complete":
case 'complete':
// Agent finished processing for THIS session
console.log("[useElectronAgent] Processing complete for session:", sessionId);
console.log('[useElectronAgent] Processing complete for session:', sessionId);
setIsProcessing(false);
if (event.messageId) {
setMessages((prev) =>
prev.map((msg) =>
msg.id === event.messageId
? { ...msg, content: event.content }
: msg
msg.id === event.messageId ? { ...msg, content: event.content } : msg
)
);
}
break;
case "error":
case 'error':
// Error occurred for THIS session
console.error("[useElectronAgent] Agent error for session:", sessionId, event.error);
console.error('[useElectronAgent] Agent error for session:', sessionId, event.error);
setIsProcessing(false);
setError(event.error);
if (event.message) {
@@ -293,7 +306,7 @@ export function useElectronAgent({
return () => {
if (unsubscribeRef.current) {
console.log("[useElectronAgent] Unsubscribing from stream events for session:", sessionId);
console.log('[useElectronAgent] Unsubscribing from stream events for session:', sessionId);
unsubscribeRef.current();
unsubscribeRef.current = null;
}
@@ -302,15 +315,15 @@ export function useElectronAgent({
// Send a message to the agent
const sendMessage = useCallback(
async (content: string, images?: ImageAttachment[]) => {
async (content: string, images?: ImageAttachment[], textFiles?: TextFileAttachment[]) => {
const api = getElectronAPI();
if (!api?.agent) {
setError("API not available");
setError('API not available');
return;
}
if (isProcessing) {
console.warn("[useElectronAgent] Already processing a message");
console.warn('[useElectronAgent] Already processing a message');
return;
}
@@ -318,11 +331,23 @@ export function useElectronAgent({
setError(null);
try {
console.log("[useElectronAgent] Sending message", {
console.log('[useElectronAgent] Sending message', {
hasImages: images && images.length > 0,
imageCount: images?.length || 0
imageCount: images?.length || 0,
hasTextFiles: textFiles && textFiles.length > 0,
textFileCount: textFiles?.length || 0,
});
// Build message content with text file context prepended
let messageContent = content;
if (textFiles && textFiles.length > 0) {
const contextParts = textFiles.map((file) => {
return `<file name="${file.filename}">\n${file.content}\n</file>`;
});
const contextBlock = `Here are some files for context:\n\n${contextParts.join('\n\n')}\n\n`;
messageContent = contextBlock + content;
}
// Save images to .automaker/images and get paths
let imagePaths: string[] | undefined;
if (images && images.length > 0 && api.saveImageToTemp) {
@@ -330,36 +355,36 @@ export function useElectronAgent({
for (const image of images) {
const result = await api.saveImageToTemp(
image.data,
image.filename,
sanitizeFilename(image.filename),
image.mimeType,
workingDirectory // Pass workingDirectory as projectPath
);
if (result.success && result.path) {
imagePaths.push(result.path);
console.log("[useElectronAgent] Saved image to .automaker/images:", result.path);
console.log('[useElectronAgent] Saved image to .automaker/images:', result.path);
} else {
console.error("[useElectronAgent] Failed to save image:", result.error);
console.error('[useElectronAgent] Failed to save image:', result.error);
}
}
}
const result = await api.agent!.send(
sessionId,
content,
messageContent,
workingDirectory,
imagePaths,
model
);
if (!result.success) {
setError(result.error || "Failed to send message");
setError(result.error || 'Failed to send message');
setIsProcessing(false);
}
// Note: We don't set isProcessing to false here because
// it will be set by the "complete" or "error" stream event
} catch (err) {
console.error("[useElectronAgent] Failed to send message:", err);
setError(err instanceof Error ? err.message : "Failed to send message");
console.error('[useElectronAgent] Failed to send message:', err);
setError(err instanceof Error ? err.message : 'Failed to send message');
setIsProcessing(false);
}
},
@@ -370,22 +395,22 @@ export function useElectronAgent({
const stopExecution = useCallback(async () => {
const api = getElectronAPI();
if (!api?.agent) {
setError("API not available");
setError('API not available');
return;
}
try {
console.log("[useElectronAgent] Stopping execution");
console.log('[useElectronAgent] Stopping execution');
const result = await api.agent!.stop(sessionId);
if (!result.success) {
setError(result.error || "Failed to stop execution");
setError(result.error || 'Failed to stop execution');
} else {
setIsProcessing(false);
}
} catch (err) {
console.error("[useElectronAgent] Failed to stop:", err);
setError(err instanceof Error ? err.message : "Failed to stop execution");
console.error('[useElectronAgent] Failed to stop:', err);
setError(err instanceof Error ? err.message : 'Failed to stop execution');
}
}, [sessionId]);
@@ -393,23 +418,23 @@ export function useElectronAgent({
const clearHistory = useCallback(async () => {
const api = getElectronAPI();
if (!api?.agent) {
setError("API not available");
setError('API not available');
return;
}
try {
console.log("[useElectronAgent] Clearing history");
console.log('[useElectronAgent] Clearing history');
const result = await api.agent!.clear(sessionId);
if (result.success) {
setMessages([]);
setError(null);
} else {
setError(result.error || "Failed to clear history");
setError(result.error || 'Failed to clear history');
}
} catch (err) {
console.error("[useElectronAgent] Failed to clear:", err);
setError(err instanceof Error ? err.message : "Failed to clear history");
console.error('[useElectronAgent] Failed to clear:', err);
setError(err instanceof Error ? err.message : 'Failed to clear history');
}
}, [sessionId]);

View File

@@ -1,10 +1,11 @@
import { useState, useCallback } from 'react';
import type { ImageAttachment } from '@/store/app-store';
import type { ImageAttachment, TextFileAttachment } from '@/store/app-store';
export interface QueuedMessage {
id: string;
content: string;
images?: ImageAttachment[];
textFiles?: TextFileAttachment[];
timestamp: Date;
}
@@ -15,7 +16,11 @@ interface UseMessageQueueOptions {
interface UseMessageQueueResult {
queuedMessages: QueuedMessage[];
isProcessingQueue: boolean;
addToQueue: (content: string, images?: ImageAttachment[]) => void;
addToQueue: (
content: string,
images?: ImageAttachment[],
textFiles?: TextFileAttachment[]
) => void;
clearQueue: () => void;
removeFromQueue: (messageId: string) => void;
processNext: () => Promise<void>;
@@ -31,19 +36,23 @@ export function useMessageQueue({ onProcessNext }: UseMessageQueueOptions): UseM
const [queuedMessages, setQueuedMessages] = useState<QueuedMessage[]>([]);
const [isProcessingQueue, setIsProcessingQueue] = useState(false);
const addToQueue = useCallback((content: string, images?: ImageAttachment[]) => {
const queuedMessage: QueuedMessage = {
id: `queued-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
content: content.trim(),
images,
timestamp: new Date(),
};
const addToQueue = useCallback(
(content: string, images?: ImageAttachment[], textFiles?: TextFileAttachment[]) => {
const queuedMessage: QueuedMessage = {
id: `queued-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
content: content.trim(),
images,
textFiles,
timestamp: new Date(),
};
setQueuedMessages(prev => [...prev, queuedMessage]);
}, []);
setQueuedMessages((prev) => [...prev, queuedMessage]);
},
[]
);
const removeFromQueue = useCallback((messageId: string) => {
setQueuedMessages(prev => prev.filter(msg => msg.id !== messageId));
setQueuedMessages((prev) => prev.filter((msg) => msg.id !== messageId));
}, []);
const clearQueue = useCallback(() => {
@@ -61,7 +70,7 @@ export function useMessageQueue({ onProcessNext }: UseMessageQueueOptions): UseM
try {
await onProcessNext(nextMessage);
// Remove the processed message from queue
setQueuedMessages(prev => prev.slice(1));
setQueuedMessages((prev) => prev.slice(1));
} catch (error) {
console.error('Error processing queued message:', error);
// Keep the message in queue for retry or manual removal
@@ -78,4 +87,4 @@ export function useMessageQueue({ onProcessNext }: UseMessageQueueOptions): UseM
removeFromQueue,
processNext,
};
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -962,6 +962,25 @@ export class HttpApiClient implements ElectronAPI {
claude = {
getUsage: (): Promise<ClaudeUsageResponse> => this.get('/api/claude/usage'),
};
// Context API
context = {
describeImage: (
imagePath: string
): Promise<{
success: boolean;
description?: string;
error?: string;
}> => this.post('/api/context/describe-image', { imagePath }),
describeFile: (
filePath: string
): Promise<{
success: boolean;
description?: string;
error?: string;
}> => this.post('/api/context/describe-file', { filePath }),
};
}
// Singleton instance

View File

@@ -0,0 +1,236 @@
/**
* Shared utilities for image and file handling across the UI
*/
// Accepted image MIME types
export const ACCEPTED_IMAGE_TYPES = [
'image/jpeg',
'image/jpg',
'image/png',
'image/gif',
'image/webp',
];
// Accepted text file MIME types
export const ACCEPTED_TEXT_TYPES = ['text/plain', 'text/markdown', 'text/x-markdown'];
// File extensions for text files (used for validation when MIME type is unreliable)
export const ACCEPTED_TEXT_EXTENSIONS = ['.txt', '.md'];
// Default max file size (10MB)
export const DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024;
// Default max text file size (1MB - text files should be smaller)
export const DEFAULT_MAX_TEXT_FILE_SIZE = 1 * 1024 * 1024;
// Default max number of files
export const DEFAULT_MAX_FILES = 5;
/**
* Sanitize a filename by replacing spaces and special characters with underscores.
* This is important for:
* - Mac screenshot filenames that contain Unicode narrow no-break spaces (U+202F)
* - Filenames with regular spaces
* - Filenames with special characters that may cause path issues
*
* @param filename - The original filename
* @returns A sanitized filename safe for file system operations
*/
export function sanitizeFilename(filename: string): string {
const lastDot = filename.lastIndexOf('.');
const name = lastDot > 0 ? filename.substring(0, lastDot) : filename;
const ext = lastDot > 0 ? filename.substring(lastDot) : '';
const sanitized = name
.replace(/[\s\u00A0\u202F\u2009\u200A]+/g, '_') // Various space characters (regular, non-breaking, narrow no-break, thin, hair)
.replace(/[^a-zA-Z0-9_-]/g, '_') // Non-alphanumeric chars
.replace(/_+/g, '_') // Collapse multiple underscores
.replace(/^_|_$/g, ''); // Trim leading/trailing underscores
return `${sanitized || 'image'}${ext}`;
}
/**
* Convert a File object to a base64 data URL string
*
* @param file - The file to convert
* @returns Promise resolving to a base64 data URL string
*/
export function fileToBase64(file: File): Promise<string> {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
if (typeof reader.result === 'string') {
resolve(reader.result);
} else {
reject(new Error('Failed to read file as base64'));
}
};
reader.onerror = () => reject(new Error('Failed to read file'));
reader.readAsDataURL(file);
});
}
/**
* Extract the base64 data from a data URL (removes the prefix)
*
* @param dataUrl - The full data URL (e.g., "data:image/png;base64,...")
* @returns The base64 data without the prefix
*/
export function extractBase64Data(dataUrl: string): string {
return dataUrl.split(',')[1] || dataUrl;
}
/**
* Format file size in human-readable format
*
* @param bytes - File size in bytes
* @returns Formatted string (e.g., "1.5 MB")
*/
export function formatFileSize(bytes: number): string {
if (bytes === 0) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
}
/**
* Validate an image file for upload
*
* @param file - The file to validate
* @param maxFileSize - Maximum file size in bytes (default: 10MB)
* @returns Object with isValid boolean and optional error message
*/
export function validateImageFile(
file: File,
maxFileSize: number = DEFAULT_MAX_FILE_SIZE
): { isValid: boolean; error?: string } {
// Validate file type
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
return {
isValid: false,
error: `${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`,
};
}
// Validate file size
if (file.size > maxFileSize) {
const maxSizeMB = maxFileSize / (1024 * 1024);
return {
isValid: false,
error: `${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`,
};
}
return { isValid: true };
}
/**
* Generate a unique image ID
*
* @returns A unique ID string for an image attachment
*/
export function generateImageId(): string {
return `img-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
}
/**
* Generate a unique file ID
*
* @returns A unique ID string for a file attachment
*/
export function generateFileId(): string {
return `file-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
}
/**
* Check if a file is a text file by extension or MIME type
*
* @param file - The file to check
* @returns True if the file is a text file
*/
export function isTextFile(file: File): boolean {
const extension = file.name.toLowerCase().slice(file.name.lastIndexOf('.'));
const isTextExtension = ACCEPTED_TEXT_EXTENSIONS.includes(extension);
const isTextMime = ACCEPTED_TEXT_TYPES.includes(file.type);
return isTextExtension || isTextMime;
}
/**
* Check if a file is an image file by MIME type
*
* @param file - The file to check
* @returns True if the file is an image file
*/
export function isImageFile(file: File): boolean {
return ACCEPTED_IMAGE_TYPES.includes(file.type);
}
/**
* Validate a text file for upload
*
* @param file - The file to validate
* @param maxFileSize - Maximum file size in bytes (default: 1MB)
* @returns Object with isValid boolean and optional error message
*/
export function validateTextFile(
file: File,
maxFileSize: number = DEFAULT_MAX_TEXT_FILE_SIZE
): { isValid: boolean; error?: string } {
const extension = file.name.toLowerCase().slice(file.name.lastIndexOf('.'));
// Validate file type by extension (MIME types for text files are often unreliable)
if (!ACCEPTED_TEXT_EXTENSIONS.includes(extension)) {
return {
isValid: false,
error: `${file.name}: Unsupported file type. Please use .txt or .md files.`,
};
}
// Validate file size
if (file.size > maxFileSize) {
const maxSizeMB = maxFileSize / (1024 * 1024);
return {
isValid: false,
error: `${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`,
};
}
return { isValid: true };
}
/**
* Read text content from a file
*
* @param file - The file to read
* @returns Promise resolving to the text content
*/
export function fileToText(file: File): Promise<string> {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
if (typeof reader.result === 'string') {
resolve(reader.result);
} else {
reject(new Error('Failed to read file as text'));
}
};
reader.onerror = () => reject(new Error('Failed to read file'));
reader.readAsText(file);
});
}
/**
* Get the MIME type for a text file based on extension
*
* @param filename - The filename to check
* @returns The MIME type for the file
*/
export function getTextFileMimeType(filename: string): string {
const extension = filename.toLowerCase().slice(filename.lastIndexOf('.'));
if (extension === '.md') {
return 'text/markdown';
}
return 'text/plain';
}

View File

@@ -4,6 +4,7 @@ import type { Project, TrashedProject } from '@/lib/electron';
import type {
Feature as BaseFeature,
FeatureImagePath,
FeatureTextFilePath,
AgentModel,
PlanningMode,
ThinkingLevel,
@@ -194,12 +195,21 @@ export interface ImageAttachment {
size?: number; // file size in bytes - optional for messages from server
}
export interface TextFileAttachment {
id: string;
content: string; // text content of the file
mimeType: string; // e.g., "text/plain", "text/markdown"
filename: string;
size: number; // file size in bytes
}
export interface ChatMessage {
id: string;
role: 'user' | 'assistant';
content: string;
timestamp: Date;
images?: ImageAttachment[];
textFiles?: TextFileAttachment[];
}
export interface ChatSession {
@@ -213,7 +223,15 @@ export interface ChatSession {
}
// Re-export for backward compatibility
export type { FeatureImagePath, AgentModel, PlanningMode, ThinkingLevel, ModelProvider, AIProfile };
export type {
FeatureImagePath,
FeatureTextFilePath,
AgentModel,
PlanningMode,
ThinkingLevel,
ModelProvider,
AIProfile,
};
// UI-specific: base64-encoded images (not in shared types)
export interface FeatureImage {
@@ -228,11 +246,15 @@ export interface FeatureImage {
export type ClaudeModel = AgentModel;
// UI-specific Feature extension with UI-only fields and stricter types
export interface Feature extends Omit<BaseFeature, 'steps' | 'imagePaths' | 'status'> {
export interface Feature extends Omit<
BaseFeature,
'steps' | 'imagePaths' | 'textFilePaths' | 'status'
> {
steps: string[]; // Required in UI (not optional)
status: 'backlog' | 'in_progress' | 'waiting_approval' | 'verified' | 'completed';
images?: FeatureImage[]; // UI-specific base64 images
imagePaths?: FeatureImagePath[]; // Stricter type than base (no string | union)
textFilePaths?: FeatureTextFilePath[]; // Text file attachments for context
justFinishedAt?: string; // UI-specific: ISO timestamp when agent just finished
prUrl?: string; // UI-specific: Pull request URL
}
@@ -465,9 +487,7 @@ export type ClaudeUsage = {
};
// Response type for Claude usage API (can be success or error)
export type ClaudeUsageResponse =
| ClaudeUsage
| { error: string; message?: string };
export type ClaudeUsageResponse = ClaudeUsage | { error: string; message?: string };
/**
* Check if Claude usage is at its limit (any of: session >= 100%, weekly >= 100%, OR cost >= limit)
@@ -1129,9 +1149,7 @@ export const useAppStore = create<AppState & AppActions>()(
},
addFeature: (feature) => {
const id =
feature.id ||
`feature-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
const id = feature.id || `feature-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
const featureWithId = { ...feature, id } as unknown as Feature;
set({ features: [...get().features, featureWithId] });
return featureWithId;
@@ -2178,10 +2196,11 @@ export const useAppStore = create<AppState & AppActions>()(
// Claude Usage Tracking actions
setClaudeRefreshInterval: (interval: number) => set({ claudeRefreshInterval: interval }),
setClaudeUsageLastUpdated: (timestamp: number) => set({ claudeUsageLastUpdated: timestamp }),
setClaudeUsage: (usage: ClaudeUsage | null) => set({
claudeUsage: usage,
claudeUsageLastUpdated: usage ? Date.now() : null,
}),
setClaudeUsage: (usage: ClaudeUsage | null) =>
set({
claudeUsage: usage,
claudeUsageLastUpdated: usage ? Date.now() : null,
}),
// Reset
reset: () => set(initialState),

View File

@@ -12,6 +12,15 @@ export interface FeatureImagePath {
[key: string]: unknown;
}
export interface FeatureTextFilePath {
id: string;
path: string;
filename: string;
mimeType: string;
content: string; // Text content of the file
[key: string]: unknown;
}
export interface Feature {
id: string;
title?: string;
@@ -26,6 +35,7 @@ export interface Feature {
spec?: string;
model?: string;
imagePaths?: Array<string | FeatureImagePath | { path: string; [key: string]: unknown }>;
textFilePaths?: FeatureTextFilePath[];
// Branch info - worktree path is derived at runtime from branchName
branchName?: string; // Name of the feature branch (undefined = use current worktree)
skipTests?: boolean;
@@ -45,7 +55,7 @@ export interface Feature {
error?: string;
summary?: string;
startedAt?: string;
[key: string]: unknown; // Keep catch-all for extensibility
[key: string]: unknown; // Keep catch-all for extensibility
}
export type FeatureStatus = 'pending' | 'running' | 'completed' | 'failed' | 'verified';

View File

@@ -16,11 +16,7 @@ export type {
} from './provider.js';
// Feature types
export type {
Feature,
FeatureImagePath,
FeatureStatus,
} from './feature.js';
export type { Feature, FeatureImagePath, FeatureTextFilePath, FeatureStatus } from './feature.js';
// Session types
export type {
@@ -31,44 +27,23 @@ export type {
} from './session.js';
// Error types
export type {
ErrorType,
ErrorInfo,
} from './error.js';
export type { ErrorType, ErrorInfo } from './error.js';
// Image types
export type {
ImageData,
ImageContentBlock,
} from './image.js';
export type { ImageData, ImageContentBlock } from './image.js';
// Model types and constants
export {
CLAUDE_MODEL_MAP,
DEFAULT_MODELS,
type ModelAlias,
type AgentModel,
} from './model.js';
export { CLAUDE_MODEL_MAP, DEFAULT_MODELS, type ModelAlias, type AgentModel } from './model.js';
// Event types
export type {
EventType,
EventCallback,
} from './event.js';
export type { EventType, EventCallback } from './event.js';
// Spec types
export type {
SpecOutput,
} from './spec.js';
export {
specOutputSchema,
} from './spec.js';
export type { SpecOutput } from './spec.js';
export { specOutputSchema } from './spec.js';
// Enhancement types
export type {
EnhancementMode,
EnhancementExample,
} from './enhancement.js';
export type { EnhancementMode, EnhancementExample } from './enhancement.js';
// Settings types and constants
export type {
@@ -99,10 +74,7 @@ export {
} from './settings.js';
// Model display constants
export type {
ModelOption,
ThinkingLevelOption,
} from './model-display.js';
export type { ModelOption, ThinkingLevelOption } from './model-display.js';
export {
CLAUDE_MODELS,
THINKING_LEVELS,

View File

@@ -2,16 +2,16 @@
* Model alias mapping for Claude models
*/
export const CLAUDE_MODEL_MAP: Record<string, string> = {
haiku: "claude-haiku-4-5",
sonnet: "claude-sonnet-4-20250514",
opus: "claude-opus-4-5-20251101",
haiku: 'claude-haiku-4-5-20251001',
sonnet: 'claude-sonnet-4-20250514',
opus: 'claude-opus-4-5-20251101',
} as const;
/**
* Default models per provider
*/
export const DEFAULT_MODELS = {
claude: "claude-opus-4-5-20251101",
claude: 'claude-opus-4-5-20251101',
} as const;
export type ModelAlias = keyof typeof CLAUDE_MODEL_MAP;