From 7ab2aaaa2363ed99e5389c06b881c1da8693abba Mon Sep 17 00:00:00 2001 From: Kacper Date: Wed, 10 Dec 2025 13:41:52 +0100 Subject: [PATCH] feat(worktree): enhance worktree management and git diff functionality - Integrated git worktree isolation for feature execution, allowing agents to work in isolated branches. - Added GitDiffPanel component to visualize changes in both worktree and main project contexts. - Updated AutoModeService and IPC handlers to support worktree settings. - Implemented Git API for non-worktree operations, enabling file diff retrieval for the main project. - Enhanced UI components to reflect worktree settings and improve user experience. These changes provide a more robust and flexible environment for feature development and testing. --- .automaker/feature_list.json | 61 +- app/electron/auto-mode-service.js | 36 +- app/electron/main.js | 28 +- app/electron/preload.js | 15 +- app/example/AppSidebar.tsx | 417 --- app/example/page.tsx | 2590 ----------------- app/src/components/ui/git-diff-panel.tsx | 36 +- .../components/views/agent-output-modal.tsx | 3 + app/src/components/views/board-view.tsx | 4 +- app/src/components/views/settings-view.tsx | 1877 ++++++------ app/src/lib/electron.ts | 38 +- app/src/store/app-store.ts | 11 + app/src/types/electron.d.ts | 13 +- 13 files changed, 1190 insertions(+), 3939 deletions(-) delete mode 100644 app/example/AppSidebar.tsx delete mode 100644 app/example/page.tsx diff --git a/.automaker/feature_list.json b/.automaker/feature_list.json index 56c17ebf..fda8794b 100644 --- a/.automaker/feature_list.json +++ b/.automaker/feature_list.json @@ -72,9 +72,9 @@ "description": "When agent finish work the cards is moved either to waiting approval or into verified one But mostly its include some type of summary at the end i want you to modify our prompts and ui so when its in both states we can see the feature summary of what was done / modified instead of relying on going to code editor to see what got changed etc.", "steps": [], "status": "verified", - "skipTests": true, - "imagePaths": [], "startedAt": "2025-12-09T22:09:13.684Z", + "imagePaths": [], + "skipTests": true, "model": "opus", "thinkingLevel": "none" }, @@ -84,9 +84,9 @@ "description": "When running new feature in skip automated testing once its got finished its moved to waiting approval for us to manual test it / follow up prompt. Once we are satisfied we can click commit button so ai agent can commit it work this is only hapening in this scenerio because if we have unchecked the skip automated testing its do it automaticly and commit already. But the issue is when its going to commit we move it to in progress state where we can use stop button and if user use that button its moved to backlog column and. that kinda break what we are doing becase we have no longer even abbility to move it back to waiting approval or to run commit button / follow up again so if user use manual one and stop the commit i want it to be again moved back to waiting approval state / column", "steps": [], "status": "verified", - "skipTests": true, - "imagePaths": [], "startedAt": "2025-12-09T22:31:41.946Z", + "imagePaths": [], + "skipTests": true, "model": "opus", "thinkingLevel": "none" }, @@ -102,9 +102,9 @@ "agent execute task with correct model " ], "status": "verified", - "skipTests": false, - "imagePaths": [], "startedAt": "2025-12-09T23:07:37.223Z", + "imagePaths": [], + "skipTests": false, "summary": "Added model selection (Haiku/Sonnet/Opus) and thinking level (None/Low/Medium/High) controls to feature creation and edit dialogs. Modified: app-store.ts (added AgentModel and ThinkingLevel types), board-view.tsx (UI controls), feature-executor.js (dynamic model/thinking config), feature-loader.js (field persistence). Agent now executes with user-selected model and extended thinking settings.", "model": "opus", "thinkingLevel": "none" @@ -115,9 +115,9 @@ "description": "I want you to refactor the add new feature modal there are to many settings going on and its hard / annoyig to navigate lets split the settings in modal into tabs \nprompt icon - prompt and category\ngear icon - model and thinking ( here i would also like to split somehow the claude with thinking and codex that dont use it )\ntest icon - skip automated testing and verification steps\n", "steps": [], "status": "verified", - "skipTests": true, - "imagePaths": [], "startedAt": "2025-12-10T02:17:18.943Z", + "imagePaths": [], + "skipTests": true, "summary": "Made model selection buttons compact. Removed descriptions and badges from cards, now shows short model names (Haiku, Sonnet, Opus, Max, Codex, Mini) in horizontal row. Full description available on hover. Modified: board-view.tsx (renderModelOptions function).", "model": "opus", "thinkingLevel": "high" @@ -128,7 +128,7 @@ "description": "Make the add new feature modal widther ", "steps": [], "status": "verified", - "skipTests": true, + "startedAt": "2025-12-10T02:25:21.328Z", "imagePaths": [ { "id": "img-1765333063064-qygrbjul4", @@ -137,7 +137,7 @@ "mimeType": "image/png" } ], - "startedAt": "2025-12-10T02:25:21.328Z", + "skipTests": true, "summary": "Increased dialog max-width from max-w-md/max-w-lg to max-w-2xl. Modified: app/src/components/ui/dialog.tsx. This makes the add new feature modal and all other dialogs wider (from 448-512px to 672px) for better content display.", "model": "haiku", "thinkingLevel": "none" @@ -148,9 +148,9 @@ "description": "For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task", "steps": [], "status": "verified", - "skipTests": true, - "imagePaths": [], "startedAt": "2025-12-10T02:40:43.706Z", + "imagePaths": [], + "skipTests": true, "summary": "Kanban cards now render the agent info model badge using feature.model so the displayed model matches the one selected for the task.", "model": "gpt-5.1-codex", "thinkingLevel": "none" @@ -161,7 +161,7 @@ "description": "describe the attached image do not change code", "steps": [], "status": "verified", - "skipTests": true, + "startedAt": "2025-12-10T02:02:54.785Z", "imagePaths": [ { "id": "img-1765331797511-v4ssc1hha", @@ -170,7 +170,7 @@ "mimeType": "image/png" } ], - "startedAt": "2025-12-10T02:02:54.785Z", + "skipTests": true, "model": "opus", "thinkingLevel": "none" }, @@ -180,7 +180,7 @@ "description": "Add claude and codex to the left sidebar of settings so its will scroll to thoes sections as well", "steps": [], "status": "verified", - "skipTests": true, + "startedAt": "2025-12-10T09:32:31.638Z", "imagePaths": [ { "id": "img-1765358823366-6vchdhwsj", @@ -189,7 +189,7 @@ "mimeType": "image/png" } ], - "startedAt": "2025-12-10T09:32:31.638Z", + "skipTests": true, "model": "sonnet", "thinkingLevel": "none" }, @@ -199,9 +199,9 @@ "description": "When u write new feature for ai agent and attacht context images and change tab to choose diff model and go back to prompt tab the image preview break and im not sure if it even saved properly in state to be later attached check it out for me", "steps": [], "status": "verified", - "skipTests": true, - "imagePaths": [], "startedAt": "2025-12-10T09:59:02.988Z", + "imagePaths": [], + "skipTests": true, "summary": "Fixed image preview breaking when switching tabs in Add Feature modal. Added previewMap/onPreviewMapChange props to DescriptionImageDropZone component to lift preview state up to parent. Modified: description-image-dropzone.tsx (added parent-controlled state support), board-view.tsx (added newFeaturePreviewMap and followUpPreviewMap state, wired up to DescriptionImageDropZone). Image paths were already stored correctly in state - only the preview thumbnails (base64) were lost on tab switch due to component unmounting.", "model": "opus", "thinkingLevel": "high" @@ -212,7 +212,7 @@ "description": "Take a look at waiting aproval column in kanban board and fix the card that render in it u can see in attached images that they text is overlaping check other columns how we have them", "steps": [], "status": "verified", - "skipTests": true, + "startedAt": "2025-12-10T10:46:42.494Z", "imagePaths": [ { "id": "img-1765363296205-e4cwlj2j8", @@ -233,7 +233,7 @@ "mimeType": "image/png" } ], - "startedAt": "2025-12-10T10:46:42.494Z", + "skipTests": true, "model": "sonnet", "thinkingLevel": "low" }, @@ -243,9 +243,9 @@ "description": "I want to have some abbility when executing a task on project to have some type of rewing / checkpoint system so if the changes made by agent in the project dont satisfy me / break something i can click in the ui to revert them. The best way for it would be to implement github worktress so when spin up new task claude take a look at it generate new branch that fit task issue and make it as gihub worktree then we would create a a new folder in project .automaker/worktree with branch name and clone of repo so agent can freely work one something like that ", "steps": [], "status": "verified", - "skipTests": true, - "imagePaths": [], "startedAt": "2025-12-10T11:11:06.115Z", + "imagePaths": [], + "skipTests": true, "summary": "Implemented Git Worktree Checkpoint/Revert System. Created: worktree-manager.js service. Modified: auto-mode-service.js (worktree integration, revert/merge methods), feature-loader.js (worktree tracking), main.js (IPC handlers), preload.js (API exposure), app-store.ts (Feature type), electron.d.ts (types), electron.ts (mock API), kanban-card.tsx (branch badge, revert/merge buttons), board-view.tsx (handlers). Features: isolated git branches per feature, branch badge on cards, revert changes button, merge to main button, file diff APIs.", "model": "opus", "thinkingLevel": "ultrathink" @@ -256,24 +256,11 @@ "description": "When a agent is workig on task or when its in waiting approval column its would be nice to have some type of git diff panel and see what files got changed as well as reusing our custom themes we have in settings for the editor view of it take a look at codebase and create implementation for it", "steps": [], "status": "verified", - "skipTests": true, - "imagePaths": [], "startedAt": "2025-12-10T11:16:54.069Z", + "imagePaths": [], + "skipTests": true, "summary": "Added git diff panel for in-progress and waiting approval features. Created GitDiffPanel component with themed syntax highlighting. Modified: git-diff-panel.tsx (new), agent-output-modal.tsx, worktree-manager.js, auto-mode-service.js, main.js, preload.js, electron.d.ts. The panel shows changed files with +/- stats and expandable unified diff view using CSS theme variables.", "model": "opus", "thinkingLevel": "ultrathink" - }, - { - "id": "feature-1765366278888-fobz39cc4", - "category": "Core", - "description": "Implement profile view and in the sidebar the profile view would allow user to defined different ai provider profiels like heavy-task would be claude opus model with ultrathink or debugging would be codex max. This will give user flexibillity in our model tab to quickly use own defined profiles preset of models.", - "steps": [], - "status": "waiting_approval", - "skipTests": true, - "imagePaths": [], - "startedAt": "2025-12-10T11:31:20.842Z", - "summary": "Implemented AI Profiles feature for managing model configuration presets. Created: profiles-view.tsx. Modified: app-store.ts (added AIProfile type, state, and CRUD actions), sidebar.tsx (added profiles nav item), page.tsx (added profiles view routing), board-view.tsx (added Quick Select Profile section in Add/Edit Feature dialogs). Features: 5 built-in profiles (Heavy Task, Balanced, Quick Edit, Codex Power, Codex Fast), custom profile CRUD, drag-and-drop reordering, quick profile selection in feature dialogs.", - "model": "opus", - "thinkingLevel": "high" } ] \ No newline at end of file diff --git a/app/electron/auto-mode-service.js b/app/electron/auto-mode-service.js index 105ad27f..1ebe370e 100644 --- a/app/electron/auto-mode-service.js +++ b/app/electron/auto-mode-service.js @@ -46,8 +46,18 @@ class AutoModeService { /** * Setup worktree for a feature * Creates an isolated git worktree where the agent can work + * @param {Object} feature - The feature object + * @param {string} projectPath - Path to the project + * @param {Function} sendToRenderer - Function to send events to the renderer + * @param {boolean} useWorktreesEnabled - Whether worktrees are enabled in settings (default: false) */ - async setupWorktreeForFeature(feature, projectPath, sendToRenderer) { + async setupWorktreeForFeature(feature, projectPath, sendToRenderer, useWorktreesEnabled = false) { + // If worktrees are disabled in settings, skip entirely + if (!useWorktreesEnabled) { + console.log(`[AutoMode] Worktrees disabled in settings, working directly on main project`); + return { useWorktree: false, workPath: projectPath }; + } + // Check if worktrees are enabled (project must be a git repo) const isGit = await worktreeManager.isGitRepo(projectPath); if (!isGit) { @@ -164,14 +174,18 @@ class AutoModeService { /** * Run a specific feature by ID + * @param {string} projectPath - Path to the project + * @param {string} featureId - ID of the feature to run + * @param {Function} sendToRenderer - Function to send events to renderer + * @param {boolean} useWorktrees - Whether to use git worktree isolation (default: false) */ - async runFeature({ projectPath, featureId, sendToRenderer }) { + async runFeature({ projectPath, featureId, sendToRenderer, useWorktrees = false }) { // Check if this specific feature is already running if (this.runningFeatures.has(featureId)) { throw new Error(`Feature ${featureId} is already running`); } - console.log(`[AutoMode] Running specific feature: ${featureId}`); + console.log(`[AutoMode] Running specific feature: ${featureId} (worktrees: ${useWorktrees})`); // Register this feature as running const execution = this.createExecutionContext(featureId); @@ -190,8 +204,8 @@ class AutoModeService { console.log(`[AutoMode] Running feature: ${feature.description}`); - // Setup worktree for isolated work - const worktreeSetup = await this.setupWorktreeForFeature(feature, projectPath, sendToRenderer); + // Setup worktree for isolated work (if enabled) + const worktreeSetup = await this.setupWorktreeForFeature(feature, projectPath, sendToRenderer, useWorktrees); execution.worktreePath = worktreeSetup.workPath; execution.branchName = worktreeSetup.branchName; @@ -621,8 +635,12 @@ class AutoModeService { /** * Start a feature asynchronously (similar to drag operation) + * @param {Object} feature - The feature to start + * @param {string} projectPath - Path to the project + * @param {Function} sendToRenderer - Function to send events to renderer + * @param {boolean} useWorktrees - Whether to use git worktree isolation (default: false) */ - async startFeatureAsync(feature, projectPath, sendToRenderer) { + async startFeatureAsync(feature, projectPath, sendToRenderer, useWorktrees = false) { const featureId = feature.id; // Skip if already running @@ -633,7 +651,7 @@ class AutoModeService { try { console.log( - `[AutoMode] Starting feature: ${feature.description.slice(0, 50)}...` + `[AutoMode] Starting feature: ${feature.description.slice(0, 50)}... (worktrees: ${useWorktrees})` ); // Register this feature as running @@ -642,8 +660,8 @@ class AutoModeService { execution.sendToRenderer = sendToRenderer; this.runningFeatures.set(featureId, execution); - // Setup worktree for isolated work - const worktreeSetup = await this.setupWorktreeForFeature(feature, projectPath, sendToRenderer); + // Setup worktree for isolated work (if enabled) + const worktreeSetup = await this.setupWorktreeForFeature(feature, projectPath, sendToRenderer, useWorktrees); execution.worktreePath = worktreeSetup.workPath; execution.branchName = worktreeSetup.branchName; diff --git a/app/electron/main.js b/app/electron/main.js index 974f4bf7..bc5b319e 100644 --- a/app/electron/main.js +++ b/app/electron/main.js @@ -7,6 +7,7 @@ const { app, BrowserWindow, ipcMain, dialog, shell } = require("electron"); const fs = require("fs/promises"); const agentService = require("./agent-service"); const autoModeService = require("./auto-mode-service"); +const worktreeManager = require("./services/worktree-manager"); let mainWindow = null; @@ -468,7 +469,7 @@ ipcMain.handle("auto-mode:status", () => { */ ipcMain.handle( "auto-mode:run-feature", - async (_, { projectPath, featureId }) => { + async (_, { projectPath, featureId, useWorktrees = false }) => { try { const sendToRenderer = (data) => { if (mainWindow && !mainWindow.isDestroyed()) { @@ -480,6 +481,7 @@ ipcMain.handle( projectPath, featureId, sendToRenderer, + useWorktrees, }); } catch (error) { console.error("[IPC] auto-mode:run-feature error:", error); @@ -934,3 +936,27 @@ ipcMain.handle("worktree:get-file-diff", async (_, { projectPath, featureId, fil return { success: false, error: error.message }; } }); + +/** + * Get file diffs for the main project (non-worktree) + */ +ipcMain.handle("git:get-diffs", async (_, { projectPath }) => { + try { + return await worktreeManager.getFileDiffs(projectPath); + } catch (error) { + console.error("[IPC] git:get-diffs error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Get diff for a specific file in the main project (non-worktree) + */ +ipcMain.handle("git:get-file-diff", async (_, { projectPath, filePath }) => { + try { + return await worktreeManager.getFileDiff(projectPath, filePath); + } catch (error) { + console.error("[IPC] git:get-file-diff error:", error); + return { success: false, error: error.message }; + } +}); diff --git a/app/electron/preload.js b/app/electron/preload.js index d51aba4a..2b06796e 100644 --- a/app/electron/preload.js +++ b/app/electron/preload.js @@ -97,8 +97,8 @@ contextBridge.exposeInMainWorld("electronAPI", { status: () => ipcRenderer.invoke("auto-mode:status"), // Run a specific feature - runFeature: (projectPath, featureId) => - ipcRenderer.invoke("auto-mode:run-feature", { projectPath, featureId }), + runFeature: (projectPath, featureId, useWorktrees) => + ipcRenderer.invoke("auto-mode:run-feature", { projectPath, featureId, useWorktrees }), // Verify a specific feature by running its tests verifyFeature: (projectPath, featureId) => @@ -189,6 +189,17 @@ contextBridge.exposeInMainWorld("electronAPI", { getFileDiff: (projectPath, featureId, filePath) => ipcRenderer.invoke("worktree:get-file-diff", { projectPath, featureId, filePath }), }, + + // Git Operations APIs (for non-worktree operations) + git: { + // Get file diffs for the main project + getDiffs: (projectPath) => + ipcRenderer.invoke("git:get-diffs", { projectPath }), + + // Get diff for a specific file in the main project + getFileDiff: (projectPath, filePath) => + ipcRenderer.invoke("git:get-file-diff", { projectPath, filePath }), + }, }); // Also expose a flag to detect if we're in Electron diff --git a/app/example/AppSidebar.tsx b/app/example/AppSidebar.tsx deleted file mode 100644 index 1bf707da..00000000 --- a/app/example/AppSidebar.tsx +++ /dev/null @@ -1,417 +0,0 @@ -"use client"; - -import { useState, useEffect, useRef } from "react"; -import Link from "next/link"; -import { usePathname } from "next/navigation"; -import { - Sparkles, - Wand2, - LayoutGrid, - Layers, - FolderOpen, - FileText, - List, - Cpu, - Search, - Share2, - Trash2, - BarChart3, - Settings, - PanelLeftClose, - PanelLeft, - Home, - LogOut, - User, - CreditCard, -} from "lucide-react"; - -interface AppSidebarProps { - user: any; - creditsBalance: number | null; -} - -interface NavItem { - href: string; - icon: any; - label: string; -} - -interface NavSection { - label?: string; - items: NavItem[]; -} - -export function AppSidebar({ user, creditsBalance }: AppSidebarProps) { - const pathname = usePathname(); - const [sidebarCollapsed, setSidebarCollapsed] = useState(false); - const [userMenuOpen, setUserMenuOpen] = useState(false); - const userMenuRef = useRef(null); - - // Close dropdown when clicking outside - useEffect(() => { - function handleClickOutside(event: MouseEvent) { - if ( - userMenuRef.current && - !userMenuRef.current.contains(event.target as Node) - ) { - setUserMenuOpen(false); - } - } - - if (userMenuOpen) { - document.addEventListener("mousedown", handleClickOutside); - return () => { - document.removeEventListener("mousedown", handleClickOutside); - }; - } - }, [userMenuOpen]); - - const navSections: NavSection[] = [ - { - items: [ - { href: "/generate", icon: Home, label: "Overview" }, - { href: "/generate/canvas", icon: Wand2, label: "Canvas" }, - ], - }, - { - label: "Content", - items: [ - { href: "/generate/gallery", icon: LayoutGrid, label: "Gallery" }, - { href: "/generate/collections", icon: Layers, label: "Collections" }, - { href: "/generate/projects", icon: FolderOpen, label: "Projects" }, - { href: "/generate/prompts", icon: FileText, label: "Prompts" }, - ], - }, - { - label: "Tools", - items: [ - { href: "/generate/batch", icon: List, label: "Batch" }, - { href: "/generate/models", icon: Cpu, label: "Models" }, - ], - }, - { - label: "Manage", - items: [ - { href: "/generate/shared", icon: Share2, label: "Shared" }, - { href: "/generate/trash", icon: Trash2, label: "Trash" }, - ], - }, - ]; - - const isActiveRoute = (href: string) => { - if (href === "/generate") { - return pathname === "/generate"; - } - return pathname?.startsWith(href); - }; - - return ( - - ); -} diff --git a/app/example/page.tsx b/app/example/page.tsx deleted file mode 100644 index fa379fd9..00000000 --- a/app/example/page.tsx +++ /dev/null @@ -1,2590 +0,0 @@ -"use client"; - -import { useState, useEffect, Suspense, useRef } from "react"; -import { useRouter, useSearchParams } from "next/navigation"; -import Link from "next/link"; -import { toast } from "sonner"; -import { Button } from "@/components/ui/button"; -import { Input } from "@/components/ui/input"; -import { Label } from "@/components/ui/label"; -import { Textarea } from "@/components/ui/textarea"; -import { Select } from "@/components/ui/select"; -import { Slider } from "@/components/ui/slider"; -import { FileInput } from "@/components/ui/file-input"; -import { - Dialog, - DialogContent, - DialogHeader, - DialogBody, - DialogFooter, -} from "@/components/ui/dialog"; -import { replaceVariables } from "@/lib/prompt-template"; -import { - Loader2, - Sparkles, - Wand2, - LayoutGrid, - Layers, - History, - Settings, - Bell, - HelpCircle, - Clock, - List, - Maximize2, - Copy, - Download, - SlidersHorizontal, - RotateCcw, - X, - Wand, - Dices, - ChevronRight, - ChevronDown, - ChevronLeft, - Plus, - ImagePlus, - Save, - Heart, - FolderOpen, - FileText, - PanelLeftClose, - PanelLeft, -} from "lucide-react"; -import Image from "next/image"; -import { ParameterTooltip } from "@/components/ui/tooltip"; -import { EmptyState } from "@/components/ui/empty-state"; -import { ImageLightbox } from "@/components/ImageLightbox"; -import { MediaRenderer } from "@/components/MediaRenderer"; -import { useSession } from "@/hooks/use-auth"; -import { useSettings } from "@/hooks/use-settings"; -import { usePresets, useCreatePreset } from "@/hooks/use-presets"; -import { usePrompts, useCreatePrompt } from "@/hooks/use-prompts"; -import { useImages, useImage, useToggleFavorite, useCreateVariation } from "@/hooks/use-images"; -import { useSubmitJob, useJobStatus } from "@/hooks/use-jobs"; -import { useUpload } from "@/hooks/use-upload"; -import { useQueryClient } from "@tanstack/react-query"; - -// Parameter tooltips content -const PARAMETER_TOOLTIPS = { - aspectRatio: - "The width-to-height ratio of the generated image. Square (1:1) works well for icons, while widescreen (16:9) is great for landscapes.", - imageCount: - "The number of images to generate in one batch. More images give you more options to choose from.", - guidance: - "Controls how closely the AI follows your prompt. Higher values (10-20) follow the prompt more strictly, while lower values (1-5) give more creative freedom.", - steps: - "The number of refinement iterations. More steps (50-150) produce higher quality but take longer. 20-30 steps is usually sufficient.", - seed: "A number that determines the random starting point. Using the same seed with the same prompt produces identical results, useful for variations.", - model: - "The AI model to use for generation. Different models have different strengths, speeds, and styles.", - negativePrompt: - "Things you don't want to appear in the image. For example: 'blurry, low quality, distorted'.", - styleModifiers: - "Quick-add keywords that enhance your prompt with common quality and style improvements.", - cameraModifiers: - "Add camera types, lenses, focal lengths, and apertures to achieve specific photographic looks and effects.", - depthAngleModifiers: - "Control camera angles, shot distances, and perspectives to create compelling compositions and viewpoints.", -}; - -interface GeneratedImage { - id: string; - fileUrl: string; - width: number; - height: number; - prompt: string; - modelId: string; - format?: string | null; - isFavorite?: boolean; - rating?: number | null; - parameters?: any; - createdAt?: string; - negativePrompt?: string; -} - -interface GenerationJob { - id: string; - status: "pending" | "processing" | "completed" | "failed"; - prompt: string; - modelId: string; - parameters: any; - errorMessage?: string; - createdAt: Date; - startedAt?: Date; - completedAt?: Date; - images?: GeneratedImage[]; -} - -const MODELS = [ - { - id: "flux-pro", - name: "Flux Pro", - description: "Highest quality", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "flux-dev", - name: "Flux Dev", - description: "Balanced speed/quality", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "flux-schnell", - name: "Flux Schnell", - description: "Fast generation", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "sdxl", - name: "Stable Diffusion XL", - description: "Versatile", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "wan-25", - name: "WAN 2.5", - description: "Image to Video", - supportsTextToImage: false, - supportsImageToImage: false, - supportsImageToVideo: true, - }, -]; - -// Helper function to get available models for a generation mode -const getAvailableModels = (mode: "text-to-image" | "image-to-image" | "image-to-video") => { - return MODELS.filter((model) => { - switch (mode) { - case "text-to-image": - return model.supportsTextToImage; - case "image-to-image": - return model.supportsImageToImage; - case "image-to-video": - return model.supportsImageToVideo; - default: - return false; - } - }); -}; - -const ASPECT_RATIOS = [ - { id: "square", label: "1:1", w: 5, h: 5 }, - { id: "portrait_4_3", label: "3:4", w: 3, h: 4 }, - { id: "landscape_4_3", label: "4:3", w: 4, h: 3 }, - { id: "landscape_16_9", label: "16:9", w: 7, h: 4 }, -]; - -const STYLE_MODIFIERS = [ - "4K", - "8K", - "Detailed", - "Cinematic", - "Octane Render", - "Ray Tracing", - "Ultra realistic", - "High quality", - "Award winning", - "Professional", -]; - -const CAMERA_MODIFIERS = [ - "DSLR", - "Mirrorless camera", - "Medium format", - "Large format", - "Film camera", - "14mm lens", - "24mm lens", - "35mm lens", - "50mm lens", - "85mm lens", - "135mm lens", - "200mm lens", - "Wide angle lens", - "Telephoto lens", - "Macro lens", - "Fisheye lens", - "Prime lens", - "Zoom lens", - "f/1.2", - "f/1.4", - "f/1.8", - "f/2.8", - "f/4", - "f/5.6", - "Shallow depth of field", - "Deep depth of field", - "Bokeh", - "Tilt-shift", - "Anamorphic", -]; - -const DEPTH_ANGLE_MODIFIERS = [ - "Extreme close-up", - "Close-up shot", - "Medium close-up", - "Medium shot", - "Medium long shot", - "Long shot", - "Extreme long shot", - "Full body shot", - "Cowboy shot", - "Eye level angle", - "High angle", - "Low angle", - "Bird's eye view", - "Worm's eye view", - "Dutch angle", - "Overhead shot", - "Aerial view", - "Ground level", - "Over-the-shoulder", - "Point of view shot", - "First-person view", - "Third-person view", - "Side profile", - "Three-quarter view", - "Front view", - "Back view", - "Isometric view", - "Forced perspective", - "Macro photography", - "Micro lens shot", - "Tracking shot", - "Establishing shot", - "Two-shot", -]; - -function GeneratePageContent() { - const router = useRouter(); - const searchParams = useSearchParams(); - const queryClient = useQueryClient(); - - // TanStack Query hooks - const { data: session, isPending: sessionLoading } = useSession(); - const { data: settingsData } = useSettings(); - const { data: presetsData } = usePresets(!!session); - const { data: promptsData } = usePrompts(!!session); - const { data: historyData } = useImages({ limit: 20 }); - const createPresetMutation = useCreatePreset(); - const createPromptMutation = useCreatePrompt(); - const toggleFavoriteMutation = useToggleFavorite(); - const createVariationMutation = useCreateVariation(); - const submitJobMutation = useSubmitJob(); - const uploadMutation = useUpload(); - - const [prompt, setPrompt] = useState(""); - const [negativePrompt, setNegativePrompt] = useState(""); - const [model, setModel] = useState("flux-pro"); - const [aspectRatio, setAspectRatio] = useState("landscape_16_9"); - const [numImages, setNumImages] = useState(1); - const [steps, setSteps] = useState(28); - const [guidance, setGuidance] = useState(3.5); - const [seed, setSeed] = useState(""); - const [loading, setLoading] = useState(false); - const [error, setError] = useState(""); - const [generatedImages, setGeneratedImages] = useState([]); - const [generationTime, setGenerationTime] = useState(null); - - // Job-based generation state - const [jobs, setJobs] = useState([]); - const processedJobsRef = useRef>(new Set()); - const autoStartTriggeredRef = useRef(false); - - // Job status polling - const pendingJobIds = jobs - .filter((j) => j.status === "pending" || j.status === "processing") - .map((j) => j.id); - const { data: jobStatusData } = useJobStatus(pendingJobIds, { - enabled: pendingJobIds.length > 0, - }); - - // UI States - const [showNegativePrompt, setShowNegativePrompt] = useState(false); - const [mobileSidebarOpen, setMobileSidebarOpen] = useState(false); - const [showAdvanced, setShowAdvanced] = useState(false); - - // Image-to-image mode - const [generationMode, setGenerationMode] = useState< - "text-to-image" | "image-to-image" | "image-to-video" - >("text-to-image"); - const [sourceImage, setSourceImage] = useState(null); // URL or data URL - const [sourceImageFile, setSourceImageFile] = useState(null); - const [strength, setStrength] = useState(0.75); // 0-1, how much to transform - const [isDragging, setIsDragging] = useState(false); - - // Video generation state (for WAN 2.5) - const [resolution, setResolution] = useState<"480p" | "720p" | "1080p">("1080p"); - const [duration, setDuration] = useState<5 | 10>(5); - - // Preset state - const [showSavePresetModal, setShowSavePresetModal] = useState(false); - const [savePresetData, setSavePresetData] = useState({ - name: "", - description: "", - }); - - // Save Prompt state - const [showSaveModal, setShowSaveModal] = useState(false); - const [savePromptData, setSavePromptData] = useState({ - title: "", - category: "", - }); - - // Template/Prompt loading state - const [showLoadPromptModal, setShowLoadPromptModal] = useState(false); - const [showTemplateVariablesModal, setShowTemplateVariablesModal] = - useState(false); - const [selectedTemplate, setSelectedTemplate] = useState(null); - const [templateVariableValues, setTemplateVariableValues] = useState< - Record - >({}); - - // Style modifiers state - const [activeStyles, setActiveStyles] = useState([]); - - // Camera modifiers state - const [activeCameras, setActiveCameras] = useState([]); - - // Depth/Angle modifiers state - const [activeDepthAngles, setActiveDepthAngles] = useState([]); - - // View mode state - const [viewMode, setViewMode] = useState<"grid" | "list">("grid"); - - // Lightbox state - const [lightboxOpen, setLightboxOpen] = useState(false); - const [selectedImageIndex, setSelectedImageIndex] = useState(0); - - // Derived state from queries - const presets = presetsData?.presets || []; - const savedPrompts = promptsData?.prompts || []; - const historyImages = historyData?.images || []; - - const handleAddStyleModifier = (style: string) => { - // Add the style to active styles if not already there - if (!activeStyles.includes(style)) { - setActiveStyles([...activeStyles, style]); - // Add the style to the prompt if it's not already there - const styleText = style.toLowerCase(); - if (!prompt.toLowerCase().includes(styleText)) { - setPrompt((prev) => (prev ? `${prev}, ${style}` : style)); - } - } - }; - - const handleRemoveStyleModifier = (style: string) => { - setActiveStyles(activeStyles.filter((s) => s !== style)); - // Remove the style from the prompt - const styleRegex = new RegExp( - `(,?\\s*${style}\\s*,?|${style}\\s*,|,\\s*${style})`, - "gi" - ); - const updatedPrompt = prompt - .replace(styleRegex, ",") - .replace(/,\s*,/g, ",") - .replace(/^\s*,\s*/, "") - .replace(/\s*,\s*$/, "") - .trim(); - setPrompt(updatedPrompt); - }; - - const handleAddCameraModifier = (camera: string) => { - if (!activeCameras.includes(camera)) { - setActiveCameras([...activeCameras, camera]); - const cameraText = camera.toLowerCase(); - if (!prompt.toLowerCase().includes(cameraText)) { - setPrompt((prev) => (prev ? `${prev}, ${camera}` : camera)); - } - } - }; - - const handleRemoveCameraModifier = (camera: string) => { - setActiveCameras(activeCameras.filter((c) => c !== camera)); - const cameraRegex = new RegExp( - `(,?\\s*${camera}\\s*,?|${camera}\\s*,|,\\s*${camera})`, - "gi" - ); - const updatedPrompt = prompt - .replace(cameraRegex, ",") - .replace(/,\s*,/g, ",") - .replace(/^\s*,\s*/, "") - .replace(/\s*,\s*$/, "") - .trim(); - setPrompt(updatedPrompt); - }; - - const handleAddDepthAngleModifier = (modifier: string) => { - if (!activeDepthAngles.includes(modifier)) { - setActiveDepthAngles([...activeDepthAngles, modifier]); - const modifierText = modifier.toLowerCase(); - if (!prompt.toLowerCase().includes(modifierText)) { - setPrompt((prev) => (prev ? `${prev}, ${modifier}` : modifier)); - } - } - }; - - const handleRemoveDepthAngleModifier = (modifier: string) => { - setActiveDepthAngles(activeDepthAngles.filter((m) => m !== modifier)); - const modifierRegex = new RegExp( - `(,?\\s*${modifier}\\s*,?|${modifier}\\s*,|,\\s*${modifier})`, - "gi" - ); - const updatedPrompt = prompt - .replace(modifierRegex, ",") - .replace(/,\s*,/g, ",") - .replace(/^\s*,\s*/, "") - .replace(/\s*,\s*$/, "") - .trim(); - setPrompt(updatedPrompt); - }; - - const handleSourceImageUpload = async ( - e: React.ChangeEvent - ) => { - const file = e.target.files?.[0]; - if (!file) return; - - // Preview the image - const reader = new FileReader(); - reader.onload = (event) => { - setSourceImage(event.target?.result as string); - setSourceImageFile(file); - }; - reader.readAsDataURL(file); - }; - - const handleDragEnter = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - setIsDragging(true); - }; - - const handleDragLeave = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - setIsDragging(false); - }; - - const handleDragOver = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - }; - - const handleImageDragStart = (e: React.DragEvent, imageUrl: string) => { - e.dataTransfer.setData("image/url", imageUrl); - e.dataTransfer.effectAllowed = "copy"; - }; - - const handleDrop = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - setIsDragging(false); - - // Check if dragging from gallery first - const imageUrl = e.dataTransfer.getData("image/url"); - if (imageUrl) { - setSourceImage(imageUrl); - setSourceImageFile(null); // Clear file if using existing image - return; - } - - // Otherwise handle file drop - const files = e.dataTransfer.files; - if (files && files.length > 0) { - const file = files[0]; - - // Check if it's an image - if (file.type.startsWith("image/")) { - const reader = new FileReader(); - reader.onload = (event) => { - setSourceImage(event.target?.result as string); - setSourceImageFile(file); - }; - reader.readAsDataURL(file); - } else { - toast.error("Invalid file type", { - description: "Please upload an image file", - }); - } - } - }; - - const handleSelectExistingImage = (imageUrl: string) => { - setSourceImage(imageUrl); - setSourceImageFile(null); // Clear file if using existing image - }; - - const handleDownload = async (image: GeneratedImage) => { - try { - const response = await fetch(image.fileUrl); - const blob = await response.blob(); - const url = window.URL.createObjectURL(blob); - const link = document.createElement("a"); - link.href = url; - link.download = `${image.id}.png`; - document.body.appendChild(link); - link.click(); - document.body.removeChild(link); - window.URL.revokeObjectURL(url); - toast.success("Image downloaded!", { - description: "Image saved to your downloads folder", - }); - } catch (error) { - console.error("Failed to download image:", error); - setError("Failed to download image. Please try again."); - toast.error("Download failed", { - description: "Failed to download image. Please try again.", - }); - } - }; - - const handleToggleFavorite = async (imageId: string) => { - toggleFavoriteMutation.mutate(imageId, { - onSuccess: () => { - setGeneratedImages((prev) => - prev.map((img) => - img.id === imageId ? { ...img, isFavorite: true } : img - ) - ); - toast.success("Added to favorites!"); - }, - onError: (error) => { - console.error("Failed to toggle favorite:", error); - toast.error("Failed to add to favorites"); - }, - }); - }; - - const handleGenerateVariation = async (image: GeneratedImage) => { - setError(""); - - createVariationMutation.mutate( - { imageId: image.id }, - { - onSuccess: (data) => { - setGeneratedImages((prev) => [data.image, ...prev]); - setGenerationTime(data.generationTime || null); - toast.success("Variation created!", { - description: "New variation added to your gallery", - }); - }, - onError: (err) => { - console.error("Failed to generate variation:", err); - const errorMessage = - err instanceof Error - ? err.message - : "Failed to generate variation. Please try again."; - setError(errorMessage); - toast.error("Variation failed", { - description: errorMessage, - }); - }, - } - ); - }; - - // Load prompt from URL parameters and handle variation/remix/upscale - useEffect(() => { - const urlPrompt = searchParams.get("prompt"); - const urlNegativePrompt = searchParams.get("negativePrompt"); - const urlModel = searchParams.get("model"); - const variationFrom = searchParams.get("variationFrom"); - const remixFrom = searchParams.get("remixFrom"); - const upscaleFrom = searchParams.get("upscaleFrom"); - - if (urlPrompt) setPrompt(urlPrompt); - if (urlNegativePrompt) setNegativePrompt(urlNegativePrompt); - if (urlModel) { - // Validate that it's a valid model ID - const validModels = MODELS.map((m) => m.id); - if (validModels.includes(urlModel)) { - setModel(urlModel); - } - } - - // Handle variation from existing image - if (variationFrom) { - loadImageForVariation(variationFrom); - } - - // Handle remix from existing image - if (remixFrom) { - loadImageForRemix(remixFrom); - } - - // Handle upscale from existing image - if (upscaleFrom) { - loadImageForUpscale(upscaleFrom); - } - }, [searchParams]); - - // Auto-start generation when variation is loaded - useEffect(() => { - const autoStart = searchParams.get("autoStart"); - - if (autoStart === "true" && prompt && !autoStartTriggeredRef.current) { - autoStartTriggeredRef.current = true; - // Small delay to ensure all state is set - setTimeout(() => { - handleGenerate(); - // Clean up URL to remove query parameters after starting the job - router.replace("/generate/canvas", { scroll: false }); - }, 100); - } - }, [prompt, searchParams, router]); - - const loadImageForVariation = async (imageId: string) => { - try { - const response = await fetch(`/api/images/${imageId}`); - if (response.ok) { - const data = await response.json(); - const image = data.image; - - // Populate form with parent image settings - setPrompt(image.prompt || ""); - setNegativePrompt(image.negativePrompt || ""); - setModel(image.modelId || "flux-pro"); - if (image.parameters) { - if (image.parameters.aspectRatio) - setAspectRatio(image.parameters.aspectRatio); - if (image.parameters.steps) setSteps(image.parameters.steps); - if (image.parameters.guidance) setGuidance(image.parameters.guidance); - // Generate a new seed for variation (slightly different from parent) - if (image.parameters.seed) { - const parentSeed = parseInt(image.parameters.seed) || 0; - const newSeed = parentSeed + Math.floor(Math.random() * 1000) + 1; - setSeed(newSeed.toString()); - } - } - toast.success("Variation settings loaded!", { - description: "Seed has been adjusted. Click Generate to create variation.", - }); - } - } catch (error) { - console.error("Failed to load image for variation:", error); - setError("Failed to load parent image"); - } - }; - - const loadImageForRemix = async (imageId: string) => { - try { - const response = await fetch(`/api/images/${imageId}`); - if (response.ok) { - const data = await response.json(); - const image = data.image; - - // Populate form with parent image settings but clear prompt for remix - setPrompt(""); // User will enter new prompt - setNegativePrompt(image.negativePrompt || ""); - setModel(image.modelId || "flux-pro"); - if (image.parameters) { - if (image.parameters.aspectRatio) - setAspectRatio(image.parameters.aspectRatio); - if (image.parameters.steps) setSteps(image.parameters.steps); - if (image.parameters.guidance) setGuidance(image.parameters.guidance); - if (image.parameters.seed) setSeed(image.parameters.seed); - } - - // Set to image-to-image mode and use the original image as the source - setGenerationMode("image-to-image"); - setSourceImage(image.fileUrl); - setSourceImageFile(null); - - toast.success("Remix settings loaded!", { - description: "Enter a new prompt and click Generate to remix with the original image.", - }); - } - } catch (error) { - console.error("Failed to load image for remix:", error); - setError("Failed to load parent image"); - } - }; - - const loadImageForUpscale = async (imageId: string) => { - try { - const response = await fetch(`/api/images/${imageId}`); - if (response.ok) { - const data = await response.json(); - toast.info("Upscaling feature coming soon!", { - description: "For now, you can download and use external upscaling tools.", - }); - } - } catch (error) { - console.error("Failed to load image for upscale:", error); - setError("Failed to load parent image"); - } - }; - - // Process job status updates from the hook - useEffect(() => { - if (!jobStatusData?.jobs) { - return; - } - - const updatedJobs = jobStatusData.jobs; - - setJobs((prevJobs) => - prevJobs.map((job) => { - const update = updatedJobs.find((u: any) => u.id === job.id); - if (update) { - // If job just completed, show toast notification - if ( - job.status !== "completed" && - update.status === "completed" && - !processedJobsRef.current.has(job.id) - ) { - // Mark this job as processed - processedJobsRef.current.add(job.id); - - toast.success( - `Generation completed for "${update.prompt.substring( - 0, - 30 - )}..."`, - { - description: `Generated ${ - update.images?.length || 0 - } image${update.images?.length !== 1 ? "s" : ""}`, - } - ); - - // Add completed images to generatedImages - if (update.images && update.images.length > 0) { - setGeneratedImages((prev) => [...update.images, ...prev]); - } - - // Refresh history and billing - queryClient.invalidateQueries({ queryKey: ["images"] }); - queryClient.invalidateQueries({ queryKey: ["billing"] }); - } else if ( - job.status !== "failed" && - update.status === "failed" && - !processedJobsRef.current.has(job.id) - ) { - // Mark this job as processed - processedJobsRef.current.add(job.id); - - toast.error( - `Generation failed for "${update.prompt.substring( - 0, - 30 - )}..."`, - { - description: - update.errorMessage || "Unknown error occurred", - } - ); - } - - return { - ...job, - ...update, - }; - } - return job; - }) - ); - }, [jobStatusData]); - - - const handleSavePreset = async (e: React.FormEvent) => { - e.preventDefault(); - - createPresetMutation.mutate( - { - name: savePresetData.name, - description: savePresetData.description || null, - modelId: model, - parameters: { - model, - width: 0, - height: 0, - steps, - guidanceScale: guidance, - aspectRatio, - numImages, - seed: seed || null, - }, - }, - { - onSuccess: () => { - setShowSavePresetModal(false); - setSavePresetData({ name: "", description: "" }); - setError(""); - toast.success("Preset saved!", { - description: `"${savePresetData.name}" is ready to use`, - }); - }, - onError: (err) => { - const errorMessage = - err instanceof Error ? err.message : "Failed to save preset"; - setError(errorMessage); - toast.error("Failed to save preset", { - description: errorMessage, - }); - }, - } - ); - }; - - const handleLoadPreset = (presetId: string) => { - const preset = presets.find((p) => p.id === presetId); - if (preset) { - setModel(preset.modelId); - const params = preset.parameters || {}; - setAspectRatio(params.aspectRatio || "landscape_16_9"); - setNumImages(params.numImages || 1); - setSteps(params.steps || 28); - setGuidance(params.guidance || 3.5); - setSeed(params.seed || ""); - } - }; - - const handleSavePrompt = async (e: React.FormEvent) => { - e.preventDefault(); - - createPromptMutation.mutate( - { - text: prompt, - name: savePromptData.title, - category: savePromptData.category || undefined, - tags: [], - }, - { - onSuccess: () => { - setShowSaveModal(false); - setSavePromptData({ title: "", category: "" }); - setError(""); - toast.success("Prompt saved!", { - description: `"${savePromptData.title}" has been saved to your library`, - }); - }, - onError: (err) => { - const errorMessage = - err instanceof Error ? err.message : "Failed to save prompt"; - setError(errorMessage); - toast.error("Failed to save prompt", { - description: errorMessage, - }); - }, - } - ); - }; - - const handleLoadPrompt = (promptItem: any) => { - // Check if this is a template with variables - if (promptItem.isTemplate && promptItem.templateVariables?.length > 0) { - // Open modal to fill in variables - setSelectedTemplate(promptItem); - // Initialize empty values for all variables - const initialValues: Record = {}; - promptItem.templateVariables.forEach((varName: string) => { - initialValues[varName] = ""; - }); - setTemplateVariableValues(initialValues); - setShowLoadPromptModal(false); - setShowTemplateVariablesModal(true); - } else { - // Regular prompt, just load it - setPrompt(promptItem.promptText); - setNegativePrompt(promptItem.negativePrompt || ""); - setShowLoadPromptModal(false); - } - }; - - const handleApplyTemplate = () => { - if (!selectedTemplate) return; - - // Replace variables in the template - const filledPrompt = replaceVariables( - selectedTemplate.promptText, - templateVariableValues - ); - setPrompt(filledPrompt); - - if (selectedTemplate.negativePrompt) { - const filledNegativePrompt = replaceVariables( - selectedTemplate.negativePrompt, - templateVariableValues - ); - setNegativePrompt(filledNegativePrompt); - } - - // Close modal and reset - setShowTemplateVariablesModal(false); - setSelectedTemplate(null); - setTemplateVariableValues({}); - }; - - const handleHistoryItemClick = (image: GeneratedImage) => { - router.push(`/generate/images/${image.id}?returnTo=/generate/canvas`); - }; - - const handleGenerate = async () => { - if (!session?.user) { - setError("Please sign in to generate images"); - return; - } - if (!prompt) return; - - // Check for source image in img2img mode - if (generationMode === "image-to-image" && !sourceImage) { - setError("Please select or upload a source image"); - return; - } - - // Check for source image in image-to-video mode - if (generationMode === "image-to-video" && !sourceImage) { - setError("Please select or upload a source image for video generation"); - return; - } - - setError(""); - - // Create optimistic job ID - const optimisticJobId = `optimistic-${Date.now()}`; - - // Create optimistic job immediately for instant UI feedback - const optimisticJob: GenerationJob = { - id: optimisticJobId, - status: "pending", - prompt, - modelId: model, - parameters: { - prompt, - negativePrompt: negativePrompt || undefined, - model, - aspectRatio, - numImages, - steps: steps || undefined, - guidance: guidance || undefined, - seed: seed ? parseInt(seed) : undefined, - generationMode, - }, - createdAt: new Date(), - }; - - // Add optimistic job to the queue immediately - setJobs((prev) => [optimisticJob, ...prev]); - - // Show toast immediately - toast.info(`Generation started for "${prompt.substring(0, 30)}..."`, { - description: `Job queued. You can continue working while it processes.`, - }); - - try { - let imageUrl = sourceImage; - - // If using uploaded file, first upload it - if ((generationMode === "image-to-image" || generationMode === "image-to-video") && sourceImageFile) { - const uploadData = await uploadMutation.mutateAsync(sourceImageFile); - imageUrl = uploadData.url; - } - - const requestBody: any = { - prompt, - negativePrompt: negativePrompt || undefined, - model, - aspectRatio, - numImages, - steps: steps || undefined, - guidance: guidance || undefined, - seed: seed ? parseInt(seed) : undefined, - generationMode, - }; - - // Add img2img specific parameters - if (generationMode === "image-to-image") { - requestBody.imageUrl = imageUrl; - requestBody.strength = strength; - } - - // Add video specific parameters for image-to-video mode - if (generationMode === "image-to-video") { - requestBody.imageUrl = imageUrl; - requestBody.resolution = resolution; - requestBody.duration = duration; - } - - // Submit job using mutation - const data = await submitJobMutation.mutateAsync(requestBody); - - // Replace optimistic job with real job data - setJobs((prev) => - prev.map((job) => - job.id === optimisticJobId - ? { - ...job, - id: data.jobId, - status: data.status, - parameters: requestBody, - } - : job - ) - ); - } catch (err: any) { - // Remove optimistic job on error - setJobs((prev) => prev.filter((job) => job.id !== optimisticJobId)); - - const errorMessage = err.message || "Failed to start generation"; - setError(errorMessage); - toast.error("Generation failed to start", { - description: errorMessage, - }); - } - }; - - return ( - <> - - -
- {/* MAIN CONTENT (Canvas/Gallery) */} -
- {/* Page Header */} -
-
-

- Canvas -

-
- -
- {/* Mobile Toggle for Right Sidebar */} - -
-
- - {/* Scrollable Area */} - -
- - {/* 3. RIGHT SIDEBAR (Controls) */} - {/* Mobile Overlay */} - {mobileSidebarOpen && ( -
setMobileSidebarOpen(false)} - >
- )} - -