mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-30 06:12:03 +00:00
feat: add comprehensive integration tests for auto-mode-service
- Created git-test-repo helper for managing test git repositories - Added 13 integration tests covering: - Worktree operations (create, error handling, non-worktree mode) - Feature execution (status updates, model selection, duplicate prevention) - Auto loop (start/stop, pending features, max concurrency, events) - Error handling (provider errors, continue after failures) - Integration tests use real git operations with temporary repos - All 416 tests passing with 72.65% overall coverage - Service coverage improved: agent-service 58%, auto-mode-service 44%, feature-loader 66% 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
2
apps/server/.gitignore
vendored
2
apps/server/.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
.env
|
||||
data
|
||||
node_modules
|
||||
coverage
|
||||
@@ -9,7 +9,13 @@
|
||||
"dev": "tsx watch src/index.ts",
|
||||
"build": "tsc",
|
||||
"start": "node dist/index.js",
|
||||
"lint": "eslint src/"
|
||||
"lint": "eslint src/",
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:run": "vitest run",
|
||||
"test:cov": "vitest run --coverage",
|
||||
"test:watch": "vitest watch",
|
||||
"test:unit": "vitest run tests/unit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.61",
|
||||
@@ -23,7 +29,10 @@
|
||||
"@types/express": "^5.0.1",
|
||||
"@types/node": "^20",
|
||||
"@types/ws": "^8.18.1",
|
||||
"@vitest/coverage-v8": "^4.0.15",
|
||||
"@vitest/ui": "^4.0.15",
|
||||
"tsx": "^4.19.4",
|
||||
"typescript": "^5"
|
||||
"typescript": "^5",
|
||||
"vitest": "^4.0.15"
|
||||
}
|
||||
}
|
||||
|
||||
2267
apps/server/pnpm-lock.yaml
generated
Normal file
2267
apps/server/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
25
apps/server/tests/fixtures/configs.ts
vendored
Normal file
25
apps/server/tests/fixtures/configs.ts
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
/**
|
||||
* Configuration fixtures for testing Codex config manager
|
||||
*/
|
||||
|
||||
export const tomlConfigFixture = `
|
||||
experimental_use_rmcp_client = true
|
||||
|
||||
[mcp_servers.automaker-tools]
|
||||
command = "node"
|
||||
args = ["/path/to/server.js"]
|
||||
startup_timeout_sec = 10
|
||||
tool_timeout_sec = 60
|
||||
enabled_tools = ["UpdateFeatureStatus"]
|
||||
|
||||
[mcp_servers.automaker-tools.env]
|
||||
AUTOMAKER_PROJECT_PATH = "/path/to/project"
|
||||
`;
|
||||
|
||||
export const codexAuthJsonFixture = {
|
||||
token: {
|
||||
access_token: "test-access-token",
|
||||
refresh_token: "test-refresh-token",
|
||||
id_token: "test-id-token",
|
||||
},
|
||||
};
|
||||
14
apps/server/tests/fixtures/images.ts
vendored
Normal file
14
apps/server/tests/fixtures/images.ts
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
/**
|
||||
* Image fixtures for testing image handling
|
||||
*/
|
||||
|
||||
// 1x1 transparent PNG base64 data
|
||||
export const pngBase64Fixture =
|
||||
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==";
|
||||
|
||||
export const imageDataFixture = {
|
||||
base64: pngBase64Fixture,
|
||||
mimeType: "image/png",
|
||||
filename: "test.png",
|
||||
originalPath: "/path/to/test.png",
|
||||
};
|
||||
62
apps/server/tests/fixtures/messages.ts
vendored
Normal file
62
apps/server/tests/fixtures/messages.ts
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Message fixtures for testing providers and lib utilities
|
||||
*/
|
||||
|
||||
import type {
|
||||
ConversationMessage,
|
||||
ProviderMessage,
|
||||
ContentBlock,
|
||||
} from "../../src/providers/types.js";
|
||||
|
||||
export const conversationHistoryFixture: ConversationMessage[] = [
|
||||
{
|
||||
role: "user",
|
||||
content: "Hello, can you help me?",
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Of course! How can I assist you today?",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "What is in this image?" },
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "base64data" },
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
export const claudeProviderMessageFixture: ProviderMessage = {
|
||||
type: "assistant",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "This is a test response" }],
|
||||
},
|
||||
};
|
||||
|
||||
export const codexThinkingMessageFixture = {
|
||||
type: "item.completed",
|
||||
item: {
|
||||
type: "reasoning",
|
||||
text: "I need to analyze the problem first...",
|
||||
},
|
||||
};
|
||||
|
||||
export const codexCommandExecutionFixture = {
|
||||
type: "item.completed",
|
||||
item: {
|
||||
type: "command_execution",
|
||||
command: "ls -la",
|
||||
aggregated_output: "total 12\ndrwxr-xr-x 3 user user 4096 Dec 13",
|
||||
},
|
||||
};
|
||||
|
||||
export const codexErrorFixture = {
|
||||
type: "error",
|
||||
data: {
|
||||
message: "Authentication failed",
|
||||
},
|
||||
};
|
||||
144
apps/server/tests/integration/helpers/git-test-repo.ts
Normal file
144
apps/server/tests/integration/helpers/git-test-repo.ts
Normal file
@@ -0,0 +1,144 @@
|
||||
/**
|
||||
* Helper for creating test git repositories for integration tests
|
||||
*/
|
||||
import { exec } from "child_process";
|
||||
import { promisify } from "util";
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import * as os from "os";
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
export interface TestRepo {
|
||||
path: string;
|
||||
cleanup: () => Promise<void>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a temporary git repository for testing
|
||||
*/
|
||||
export async function createTestGitRepo(): Promise<TestRepo> {
|
||||
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "automaker-test-"));
|
||||
|
||||
// Initialize git repo
|
||||
await execAsync("git init", { cwd: tmpDir });
|
||||
await execAsync('git config user.email "test@example.com"', { cwd: tmpDir });
|
||||
await execAsync('git config user.name "Test User"', { cwd: tmpDir });
|
||||
|
||||
// Create initial commit
|
||||
await fs.writeFile(path.join(tmpDir, "README.md"), "# Test Project\n");
|
||||
await execAsync("git add .", { cwd: tmpDir });
|
||||
await execAsync('git commit -m "Initial commit"', { cwd: tmpDir });
|
||||
|
||||
// Create main branch explicitly
|
||||
await execAsync("git branch -M main", { cwd: tmpDir });
|
||||
|
||||
return {
|
||||
path: tmpDir,
|
||||
cleanup: async () => {
|
||||
try {
|
||||
// Remove all worktrees first
|
||||
const { stdout } = await execAsync("git worktree list --porcelain", {
|
||||
cwd: tmpDir,
|
||||
}).catch(() => ({ stdout: "" }));
|
||||
|
||||
const worktrees = stdout
|
||||
.split("\n\n")
|
||||
.slice(1) // Skip main worktree
|
||||
.map((block) => {
|
||||
const pathLine = block.split("\n").find((line) => line.startsWith("worktree "));
|
||||
return pathLine ? pathLine.replace("worktree ", "") : null;
|
||||
})
|
||||
.filter(Boolean);
|
||||
|
||||
for (const worktreePath of worktrees) {
|
||||
try {
|
||||
await execAsync(`git worktree remove "${worktreePath}" --force`, {
|
||||
cwd: tmpDir,
|
||||
});
|
||||
} catch (err) {
|
||||
// Ignore errors
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the repository
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
} catch (error) {
|
||||
console.error("Failed to cleanup test repo:", error);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a feature file in the test repo
|
||||
*/
|
||||
export async function createTestFeature(
|
||||
repoPath: string,
|
||||
featureId: string,
|
||||
featureData: any
|
||||
): Promise<void> {
|
||||
const featuresDir = path.join(repoPath, ".automaker", "features");
|
||||
const featureDir = path.join(featuresDir, featureId);
|
||||
|
||||
await fs.mkdir(featureDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(featureDir, "feature.json"),
|
||||
JSON.stringify(featureData, null, 2)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of git branches
|
||||
*/
|
||||
export async function listBranches(repoPath: string): Promise<string[]> {
|
||||
const { stdout } = await execAsync("git branch --list", { cwd: repoPath });
|
||||
return stdout
|
||||
.split("\n")
|
||||
.map((line) => line.trim().replace(/^[*+]\s*/, ""))
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of git worktrees
|
||||
*/
|
||||
export async function listWorktrees(repoPath: string): Promise<string[]> {
|
||||
try {
|
||||
const { stdout } = await execAsync("git worktree list --porcelain", {
|
||||
cwd: repoPath,
|
||||
});
|
||||
|
||||
return stdout
|
||||
.split("\n\n")
|
||||
.slice(1) // Skip main worktree
|
||||
.map((block) => {
|
||||
const pathLine = block.split("\n").find((line) => line.startsWith("worktree "));
|
||||
return pathLine ? pathLine.replace("worktree ", "") : null;
|
||||
})
|
||||
.filter(Boolean) as string[];
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a branch exists
|
||||
*/
|
||||
export async function branchExists(
|
||||
repoPath: string,
|
||||
branchName: string
|
||||
): Promise<boolean> {
|
||||
const branches = await listBranches(repoPath);
|
||||
return branches.includes(branchName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a worktree exists
|
||||
*/
|
||||
export async function worktreeExists(
|
||||
repoPath: string,
|
||||
worktreePath: string
|
||||
): Promise<boolean> {
|
||||
const worktrees = await listWorktrees(repoPath);
|
||||
return worktrees.some((wt) => wt === worktreePath);
|
||||
}
|
||||
@@ -0,0 +1,537 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { AutoModeService } from "@/services/auto-mode-service.js";
|
||||
import { ProviderFactory } from "@/providers/provider-factory.js";
|
||||
import { FeatureLoader } from "@/services/feature-loader.js";
|
||||
import {
|
||||
createTestGitRepo,
|
||||
createTestFeature,
|
||||
listBranches,
|
||||
listWorktrees,
|
||||
branchExists,
|
||||
worktreeExists,
|
||||
type TestRepo,
|
||||
} from "../helpers/git-test-repo.js";
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
|
||||
vi.mock("@/providers/provider-factory.js");
|
||||
|
||||
describe("auto-mode-service.ts (integration)", () => {
|
||||
let service: AutoModeService;
|
||||
let testRepo: TestRepo;
|
||||
let featureLoader: FeatureLoader;
|
||||
const mockEvents = {
|
||||
subscribe: vi.fn(),
|
||||
emit: vi.fn(),
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
service = new AutoModeService(mockEvents as any);
|
||||
featureLoader = new FeatureLoader();
|
||||
testRepo = await createTestGitRepo();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Stop any running auto loops
|
||||
await service.stopAutoLoop();
|
||||
|
||||
// Cleanup test repo
|
||||
if (testRepo) {
|
||||
await testRepo.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
describe("worktree operations", () => {
|
||||
it("should create git worktree for feature", async () => {
|
||||
// Create a test feature
|
||||
await createTestFeature(testRepo.path, "test-feature-1", {
|
||||
id: "test-feature-1",
|
||||
category: "test",
|
||||
description: "Test feature",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
// Mock provider to complete quickly
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "assistant",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "Feature implemented" }],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Execute feature with worktrees enabled
|
||||
await service.executeFeature(
|
||||
testRepo.path,
|
||||
"test-feature-1",
|
||||
true, // useWorktrees
|
||||
false // isAutoMode
|
||||
);
|
||||
|
||||
// Verify branch was created
|
||||
const branches = await listBranches(testRepo.path);
|
||||
expect(branches).toContain("feature/test-feature-1");
|
||||
|
||||
// Note: Worktrees are not automatically cleaned up by the service
|
||||
// This is expected behavior - manual cleanup is required
|
||||
}, 30000);
|
||||
|
||||
it("should handle error gracefully", async () => {
|
||||
await createTestFeature(testRepo.path, "test-feature-error", {
|
||||
id: "test-feature-error",
|
||||
category: "test",
|
||||
description: "Test feature that errors",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
// Mock provider that throws error
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
throw new Error("Provider error");
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Execute feature (should handle error)
|
||||
await service.executeFeature(
|
||||
testRepo.path,
|
||||
"test-feature-error",
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
// Verify feature status was updated to backlog (error status)
|
||||
const feature = await featureLoader.get(testRepo.path, "test-feature-error");
|
||||
expect(feature?.status).toBe("backlog");
|
||||
}, 30000);
|
||||
|
||||
it("should work without worktrees", async () => {
|
||||
await createTestFeature(testRepo.path, "test-no-worktree", {
|
||||
id: "test-no-worktree",
|
||||
category: "test",
|
||||
description: "Test without worktree",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Execute without worktrees
|
||||
await service.executeFeature(
|
||||
testRepo.path,
|
||||
"test-no-worktree",
|
||||
false, // useWorktrees = false
|
||||
false
|
||||
);
|
||||
|
||||
// Feature should be updated successfully
|
||||
const feature = await featureLoader.get(testRepo.path, "test-no-worktree");
|
||||
expect(feature?.status).toBe("waiting_approval");
|
||||
}, 30000);
|
||||
});
|
||||
|
||||
describe("feature execution", () => {
|
||||
it("should execute feature and update status", async () => {
|
||||
await createTestFeature(testRepo.path, "feature-exec-1", {
|
||||
id: "feature-exec-1",
|
||||
category: "ui",
|
||||
description: "Execute this feature",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "assistant",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "Implemented the feature" }],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
await service.executeFeature(
|
||||
testRepo.path,
|
||||
"feature-exec-1",
|
||||
false, // Don't use worktrees so agent output is saved to main project
|
||||
false
|
||||
);
|
||||
|
||||
// Check feature status was updated
|
||||
const feature = await featureLoader.get(testRepo.path, "feature-exec-1");
|
||||
expect(feature?.status).toBe("waiting_approval");
|
||||
|
||||
// Check agent output was saved
|
||||
const agentOutput = await featureLoader.getAgentOutput(
|
||||
testRepo.path,
|
||||
"feature-exec-1"
|
||||
);
|
||||
expect(agentOutput).toBeTruthy();
|
||||
expect(agentOutput).toContain("Implemented the feature");
|
||||
}, 30000);
|
||||
|
||||
it("should handle feature not found", async () => {
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Try to execute non-existent feature
|
||||
await service.executeFeature(
|
||||
testRepo.path,
|
||||
"nonexistent-feature",
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
// Should emit error event
|
||||
expect(mockEvents.emit).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
expect.objectContaining({
|
||||
featureId: "nonexistent-feature",
|
||||
error: expect.stringContaining("not found"),
|
||||
})
|
||||
);
|
||||
}, 30000);
|
||||
|
||||
it("should prevent duplicate feature execution", async () => {
|
||||
await createTestFeature(testRepo.path, "feature-dup", {
|
||||
id: "feature-dup",
|
||||
category: "test",
|
||||
description: "Duplicate test",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
// Simulate slow execution
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Start first execution
|
||||
const promise1 = service.executeFeature(
|
||||
testRepo.path,
|
||||
"feature-dup",
|
||||
false,
|
||||
false
|
||||
);
|
||||
|
||||
// Try to start second execution (should throw)
|
||||
await expect(
|
||||
service.executeFeature(testRepo.path, "feature-dup", false, false)
|
||||
).rejects.toThrow("already running");
|
||||
|
||||
await promise1;
|
||||
}, 30000);
|
||||
|
||||
it("should use feature-specific model", async () => {
|
||||
await createTestFeature(testRepo.path, "feature-model", {
|
||||
id: "feature-model",
|
||||
category: "test",
|
||||
description: "Model test",
|
||||
status: "pending",
|
||||
model: "gpt-5.2",
|
||||
});
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => "codex",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
await service.executeFeature(
|
||||
testRepo.path,
|
||||
"feature-model",
|
||||
false,
|
||||
false
|
||||
);
|
||||
|
||||
// Should have used gpt-5.2
|
||||
expect(ProviderFactory.getProviderForModel).toHaveBeenCalledWith("gpt-5.2");
|
||||
}, 30000);
|
||||
});
|
||||
|
||||
describe("auto loop", () => {
|
||||
it("should start and stop auto loop", async () => {
|
||||
const startPromise = service.startAutoLoop(testRepo.path, 2);
|
||||
|
||||
// Give it time to start
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
|
||||
// Stop the loop
|
||||
const runningCount = await service.stopAutoLoop();
|
||||
|
||||
expect(runningCount).toBe(0);
|
||||
await startPromise.catch(() => {}); // Cleanup
|
||||
}, 10000);
|
||||
|
||||
it("should process pending features in auto loop", async () => {
|
||||
// Create multiple pending features
|
||||
await createTestFeature(testRepo.path, "auto-1", {
|
||||
id: "auto-1",
|
||||
category: "test",
|
||||
description: "Auto feature 1",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
await createTestFeature(testRepo.path, "auto-2", {
|
||||
id: "auto-2",
|
||||
category: "test",
|
||||
description: "Auto feature 2",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Start auto loop
|
||||
const startPromise = service.startAutoLoop(testRepo.path, 2);
|
||||
|
||||
// Wait for features to be processed
|
||||
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||
|
||||
// Stop the loop
|
||||
await service.stopAutoLoop();
|
||||
await startPromise.catch(() => {});
|
||||
|
||||
// Check that features were updated
|
||||
const feature1 = await featureLoader.get(testRepo.path, "auto-1");
|
||||
const feature2 = await featureLoader.get(testRepo.path, "auto-2");
|
||||
|
||||
// At least one should have been processed
|
||||
const processedCount = [feature1, feature2].filter(
|
||||
(f) => f?.status === "waiting_approval" || f?.status === "in_progress"
|
||||
).length;
|
||||
|
||||
expect(processedCount).toBeGreaterThan(0);
|
||||
}, 15000);
|
||||
|
||||
it("should respect max concurrency", async () => {
|
||||
// Create 5 features
|
||||
for (let i = 1; i <= 5; i++) {
|
||||
await createTestFeature(testRepo.path, `concurrent-${i}`, {
|
||||
id: `concurrent-${i}`,
|
||||
category: "test",
|
||||
description: `Concurrent feature ${i}`,
|
||||
status: "pending",
|
||||
});
|
||||
}
|
||||
|
||||
let concurrentCount = 0;
|
||||
let maxConcurrent = 0;
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
concurrentCount++;
|
||||
maxConcurrent = Math.max(maxConcurrent, concurrentCount);
|
||||
|
||||
// Simulate work
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
|
||||
concurrentCount--;
|
||||
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Start with max concurrency of 2
|
||||
const startPromise = service.startAutoLoop(testRepo.path, 2);
|
||||
|
||||
// Wait for some features to be processed
|
||||
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||
|
||||
await service.stopAutoLoop();
|
||||
await startPromise.catch(() => {});
|
||||
|
||||
// Max concurrent should not exceed 2
|
||||
expect(maxConcurrent).toBeLessThanOrEqual(2);
|
||||
}, 15000);
|
||||
|
||||
it("should emit auto mode events", async () => {
|
||||
const startPromise = service.startAutoLoop(testRepo.path, 1);
|
||||
|
||||
// Wait for start event
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
|
||||
// Check start event was emitted
|
||||
const startEvent = mockEvents.emit.mock.calls.find((call) =>
|
||||
call[1]?.message?.includes("Auto mode started")
|
||||
);
|
||||
expect(startEvent).toBeTruthy();
|
||||
|
||||
await service.stopAutoLoop();
|
||||
await startPromise.catch(() => {});
|
||||
|
||||
// Check stop event was emitted (auto_mode_complete event)
|
||||
const stopEvent = mockEvents.emit.mock.calls.find((call) =>
|
||||
call[1]?.type === "auto_mode_complete" || call[1]?.message?.includes("stopped")
|
||||
);
|
||||
expect(stopEvent).toBeTruthy();
|
||||
}, 10000);
|
||||
});
|
||||
|
||||
describe("error handling", () => {
|
||||
it("should handle provider errors gracefully", async () => {
|
||||
await createTestFeature(testRepo.path, "error-feature", {
|
||||
id: "error-feature",
|
||||
category: "test",
|
||||
description: "Error test",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
throw new Error("Provider execution failed");
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
// Should not throw
|
||||
await service.executeFeature(
|
||||
testRepo.path,
|
||||
"error-feature",
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
// Feature should be marked as backlog (error status)
|
||||
const feature = await featureLoader.get(testRepo.path, "error-feature");
|
||||
expect(feature?.status).toBe("backlog");
|
||||
}, 30000);
|
||||
|
||||
it("should continue auto loop after feature error", async () => {
|
||||
await createTestFeature(testRepo.path, "fail-1", {
|
||||
id: "fail-1",
|
||||
category: "test",
|
||||
description: "Will fail",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
await createTestFeature(testRepo.path, "success-1", {
|
||||
id: "success-1",
|
||||
category: "test",
|
||||
description: "Will succeed",
|
||||
status: "pending",
|
||||
});
|
||||
|
||||
let callCount = 0;
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
callCount++;
|
||||
if (callCount === 1) {
|
||||
throw new Error("First feature fails");
|
||||
}
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
const startPromise = service.startAutoLoop(testRepo.path, 1);
|
||||
|
||||
// Wait for both features to be attempted
|
||||
await new Promise((resolve) => setTimeout(resolve, 5000));
|
||||
|
||||
await service.stopAutoLoop();
|
||||
await startPromise.catch(() => {});
|
||||
|
||||
// Both features should have been attempted
|
||||
expect(callCount).toBeGreaterThanOrEqual(1);
|
||||
}, 15000);
|
||||
});
|
||||
});
|
||||
16
apps/server/tests/setup.ts
Normal file
16
apps/server/tests/setup.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
/**
|
||||
* Vitest global setup file
|
||||
* Runs before each test file
|
||||
*/
|
||||
|
||||
import { vi, beforeEach } from "vitest";
|
||||
|
||||
// Set test environment variables
|
||||
process.env.NODE_ENV = "test";
|
||||
process.env.DATA_DIR = "/tmp/test-data";
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/tmp/test-projects";
|
||||
|
||||
// Reset all mocks before each test
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
116
apps/server/tests/unit/lib/auth.test.ts
Normal file
116
apps/server/tests/unit/lib/auth.test.ts
Normal file
@@ -0,0 +1,116 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { createMockExpressContext } from "../../utils/mocks.js";
|
||||
|
||||
/**
|
||||
* Note: auth.ts reads AUTOMAKER_API_KEY at module load time.
|
||||
* We need to reset modules and reimport for each test to get fresh state.
|
||||
*/
|
||||
describe("auth.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
describe("authMiddleware - no API key", () => {
|
||||
it("should call next() when no API key is set", async () => {
|
||||
delete process.env.AUTOMAKER_API_KEY;
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next } = createMockExpressContext();
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("authMiddleware - with API key", () => {
|
||||
it("should reject request without API key header", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-secret-key";
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next } = createMockExpressContext();
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(401);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
success: false,
|
||||
error: "Authentication required. Provide X-API-Key header.",
|
||||
});
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should reject request with invalid API key", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-secret-key";
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next } = createMockExpressContext();
|
||||
req.headers["x-api-key"] = "wrong-key";
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
success: false,
|
||||
error: "Invalid API key.",
|
||||
});
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should call next() with valid API key", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-secret-key";
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next} = createMockExpressContext();
|
||||
req.headers["x-api-key"] = "test-secret-key";
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("isAuthEnabled", () => {
|
||||
it("should return false when no API key is set", async () => {
|
||||
delete process.env.AUTOMAKER_API_KEY;
|
||||
|
||||
const { isAuthEnabled } = await import("@/lib/auth.js");
|
||||
expect(isAuthEnabled()).toBe(false);
|
||||
});
|
||||
|
||||
it("should return true when API key is set", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-key";
|
||||
|
||||
const { isAuthEnabled } = await import("@/lib/auth.js");
|
||||
expect(isAuthEnabled()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAuthStatus", () => {
|
||||
it("should return disabled status when no API key", async () => {
|
||||
delete process.env.AUTOMAKER_API_KEY;
|
||||
|
||||
const { getAuthStatus } = await import("@/lib/auth.js");
|
||||
const status = getAuthStatus();
|
||||
|
||||
expect(status).toEqual({
|
||||
enabled: false,
|
||||
method: "none",
|
||||
});
|
||||
});
|
||||
|
||||
it("should return enabled status when API key is set", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-key";
|
||||
|
||||
const { getAuthStatus } = await import("@/lib/auth.js");
|
||||
const status = getAuthStatus();
|
||||
|
||||
expect(status).toEqual({
|
||||
enabled: true,
|
||||
method: "api_key",
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
226
apps/server/tests/unit/lib/conversation-utils.test.ts
Normal file
226
apps/server/tests/unit/lib/conversation-utils.test.ts
Normal file
@@ -0,0 +1,226 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
extractTextFromContent,
|
||||
normalizeContentBlocks,
|
||||
formatHistoryAsText,
|
||||
convertHistoryToMessages,
|
||||
} from "@/lib/conversation-utils.js";
|
||||
import { conversationHistoryFixture } from "../../fixtures/messages.js";
|
||||
|
||||
describe("conversation-utils.ts", () => {
|
||||
describe("extractTextFromContent", () => {
|
||||
it("should return string content as-is", () => {
|
||||
const result = extractTextFromContent("Hello world");
|
||||
expect(result).toBe("Hello world");
|
||||
});
|
||||
|
||||
it("should extract text from single text block", () => {
|
||||
const content = [{ type: "text", text: "Hello" }];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("Hello");
|
||||
});
|
||||
|
||||
it("should extract and join multiple text blocks with newlines", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "First block" },
|
||||
{ type: "text", text: "Second block" },
|
||||
{ type: "text", text: "Third block" },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("First block\nSecond block\nThird block");
|
||||
});
|
||||
|
||||
it("should ignore non-text blocks", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "Text content" },
|
||||
{ type: "image", source: { type: "base64", data: "abc" } },
|
||||
{ type: "text", text: "More text" },
|
||||
{ type: "tool_use", name: "bash", input: {} },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("Text content\nMore text");
|
||||
});
|
||||
|
||||
it("should handle blocks without text property", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "Valid" },
|
||||
{ type: "text" } as any,
|
||||
{ type: "text", text: "Also valid" },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("Valid\n\nAlso valid");
|
||||
});
|
||||
|
||||
it("should handle empty array", () => {
|
||||
const result = extractTextFromContent([]);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
|
||||
it("should handle array with only non-text blocks", () => {
|
||||
const content = [
|
||||
{ type: "image", source: {} },
|
||||
{ type: "tool_use", name: "test" },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("normalizeContentBlocks", () => {
|
||||
it("should convert string to content block array", () => {
|
||||
const result = normalizeContentBlocks("Hello");
|
||||
expect(result).toEqual([{ type: "text", text: "Hello" }]);
|
||||
});
|
||||
|
||||
it("should return array content as-is", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "Hello" },
|
||||
{ type: "image", source: {} },
|
||||
];
|
||||
const result = normalizeContentBlocks(content);
|
||||
expect(result).toBe(content);
|
||||
expect(result).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("should handle empty string", () => {
|
||||
const result = normalizeContentBlocks("");
|
||||
expect(result).toEqual([{ type: "text", text: "" }]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("formatHistoryAsText", () => {
|
||||
it("should return empty string for empty history", () => {
|
||||
const result = formatHistoryAsText([]);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
|
||||
it("should format single user message", () => {
|
||||
const history = [{ role: "user" as const, content: "Hello" }];
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("Previous conversation:");
|
||||
expect(result).toContain("User: Hello");
|
||||
expect(result).toContain("---");
|
||||
});
|
||||
|
||||
it("should format single assistant message", () => {
|
||||
const history = [{ role: "assistant" as const, content: "Hi there" }];
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("Assistant: Hi there");
|
||||
});
|
||||
|
||||
it("should format multiple messages with correct roles", () => {
|
||||
const history = conversationHistoryFixture.slice(0, 2);
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("User: Hello, can you help me?");
|
||||
expect(result).toContain("Assistant: Of course! How can I assist you today?");
|
||||
expect(result).toContain("---");
|
||||
});
|
||||
|
||||
it("should handle messages with array content (multipart)", () => {
|
||||
const history = [conversationHistoryFixture[2]]; // Has text + image
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("What is in this image?");
|
||||
expect(result).not.toContain("base64"); // Should not include image data
|
||||
});
|
||||
|
||||
it("should format all messages from fixture", () => {
|
||||
const result = formatHistoryAsText(conversationHistoryFixture);
|
||||
|
||||
expect(result).toContain("Previous conversation:");
|
||||
expect(result).toContain("User: Hello, can you help me?");
|
||||
expect(result).toContain("Assistant: Of course!");
|
||||
expect(result).toContain("User: What is in this image?");
|
||||
expect(result).toContain("---");
|
||||
});
|
||||
|
||||
it("should separate messages with double newlines", () => {
|
||||
const history = [
|
||||
{ role: "user" as const, content: "First" },
|
||||
{ role: "assistant" as const, content: "Second" },
|
||||
];
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toMatch(/User: First\n\nAssistant: Second/);
|
||||
});
|
||||
});
|
||||
|
||||
describe("convertHistoryToMessages", () => {
|
||||
it("should convert empty history", () => {
|
||||
const result = convertHistoryToMessages([]);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it("should convert single message to SDK format", () => {
|
||||
const history = [{ role: "user" as const, content: "Hello" }];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toMatchObject({
|
||||
type: "user",
|
||||
session_id: "",
|
||||
message: {
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "Hello" }],
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
});
|
||||
});
|
||||
|
||||
it("should normalize string content to array", () => {
|
||||
const history = [{ role: "assistant" as const, content: "Response" }];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result[0].message.content).toEqual([
|
||||
{ type: "text", text: "Response" },
|
||||
]);
|
||||
});
|
||||
|
||||
it("should preserve array content", () => {
|
||||
const history = [
|
||||
{
|
||||
role: "user" as const,
|
||||
content: [
|
||||
{ type: "text", text: "Hello" },
|
||||
{ type: "image", source: {} },
|
||||
],
|
||||
},
|
||||
];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result[0].message.content).toHaveLength(2);
|
||||
expect(result[0].message.content[0]).toEqual({ type: "text", text: "Hello" });
|
||||
});
|
||||
|
||||
it("should convert multiple messages", () => {
|
||||
const history = conversationHistoryFixture.slice(0, 2);
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].type).toBe("user");
|
||||
expect(result[1].type).toBe("assistant");
|
||||
});
|
||||
|
||||
it("should set correct fields for SDK format", () => {
|
||||
const history = [{ role: "user" as const, content: "Test" }];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result[0].session_id).toBe("");
|
||||
expect(result[0].parent_tool_use_id).toBeNull();
|
||||
expect(result[0].type).toBe("user");
|
||||
expect(result[0].message.role).toBe("user");
|
||||
});
|
||||
|
||||
it("should handle all messages from fixture", () => {
|
||||
const result = convertHistoryToMessages(conversationHistoryFixture);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].message.content).toBeInstanceOf(Array);
|
||||
expect(result[1].message.content).toBeInstanceOf(Array);
|
||||
expect(result[2].message.content).toBeInstanceOf(Array);
|
||||
});
|
||||
});
|
||||
});
|
||||
146
apps/server/tests/unit/lib/error-handler.test.ts
Normal file
146
apps/server/tests/unit/lib/error-handler.test.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
isAbortError,
|
||||
isAuthenticationError,
|
||||
classifyError,
|
||||
getUserFriendlyErrorMessage,
|
||||
type ErrorType,
|
||||
} from "@/lib/error-handler.js";
|
||||
|
||||
describe("error-handler.ts", () => {
|
||||
describe("isAbortError", () => {
|
||||
it("should detect AbortError by error name", () => {
|
||||
const error = new Error("Operation cancelled");
|
||||
error.name = "AbortError";
|
||||
expect(isAbortError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect abort error by message content", () => {
|
||||
const error = new Error("Request was aborted");
|
||||
expect(isAbortError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it("should return false for non-abort errors", () => {
|
||||
const error = new Error("Something else went wrong");
|
||||
expect(isAbortError(error)).toBe(false);
|
||||
});
|
||||
|
||||
it("should return false for non-Error objects", () => {
|
||||
expect(isAbortError("not an error")).toBe(false);
|
||||
expect(isAbortError(null)).toBe(false);
|
||||
expect(isAbortError(undefined)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isAuthenticationError", () => {
|
||||
it("should detect 'Authentication failed' message", () => {
|
||||
expect(isAuthenticationError("Authentication failed")).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect 'Invalid API key' message", () => {
|
||||
expect(isAuthenticationError("Invalid API key provided")).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect 'authentication_failed' message", () => {
|
||||
expect(isAuthenticationError("authentication_failed")).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect 'Fix external API key' message", () => {
|
||||
expect(isAuthenticationError("Fix external API key configuration")).toBe(true);
|
||||
});
|
||||
|
||||
it("should return false for non-authentication errors", () => {
|
||||
expect(isAuthenticationError("Network connection error")).toBe(false);
|
||||
expect(isAuthenticationError("File not found")).toBe(false);
|
||||
});
|
||||
|
||||
it("should be case sensitive", () => {
|
||||
expect(isAuthenticationError("authentication Failed")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("classifyError", () => {
|
||||
it("should classify authentication errors", () => {
|
||||
const error = new Error("Authentication failed");
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("authentication");
|
||||
expect(result.isAuth).toBe(true);
|
||||
expect(result.isAbort).toBe(false);
|
||||
expect(result.message).toBe("Authentication failed");
|
||||
expect(result.originalError).toBe(error);
|
||||
});
|
||||
|
||||
it("should classify abort errors", () => {
|
||||
const error = new Error("Operation aborted");
|
||||
error.name = "AbortError";
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("abort");
|
||||
expect(result.isAbort).toBe(true);
|
||||
expect(result.isAuth).toBe(false);
|
||||
expect(result.message).toBe("Operation aborted");
|
||||
});
|
||||
|
||||
it("should prioritize auth over abort if both match", () => {
|
||||
const error = new Error("Authentication failed and aborted");
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("authentication");
|
||||
expect(result.isAuth).toBe(true);
|
||||
expect(result.isAbort).toBe(true); // Still detected as abort too
|
||||
});
|
||||
|
||||
it("should classify generic Error as execution error", () => {
|
||||
const error = new Error("Something went wrong");
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("execution");
|
||||
expect(result.isAuth).toBe(false);
|
||||
expect(result.isAbort).toBe(false);
|
||||
});
|
||||
|
||||
it("should classify non-Error objects as unknown", () => {
|
||||
const error = "string error";
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("unknown");
|
||||
expect(result.message).toBe("string error");
|
||||
});
|
||||
|
||||
it("should handle null and undefined", () => {
|
||||
const nullResult = classifyError(null);
|
||||
expect(nullResult.type).toBe("unknown");
|
||||
expect(nullResult.message).toBe("Unknown error");
|
||||
|
||||
const undefinedResult = classifyError(undefined);
|
||||
expect(undefinedResult.type).toBe("unknown");
|
||||
expect(undefinedResult.message).toBe("Unknown error");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getUserFriendlyErrorMessage", () => {
|
||||
it("should return friendly message for abort errors", () => {
|
||||
const error = new Error("abort");
|
||||
const result = getUserFriendlyErrorMessage(error);
|
||||
expect(result).toBe("Operation was cancelled");
|
||||
});
|
||||
|
||||
it("should return friendly message for authentication errors", () => {
|
||||
const error = new Error("Authentication failed");
|
||||
const result = getUserFriendlyErrorMessage(error);
|
||||
expect(result).toBe("Authentication failed. Please check your API key.");
|
||||
});
|
||||
|
||||
it("should return original message for other errors", () => {
|
||||
const error = new Error("File not found");
|
||||
const result = getUserFriendlyErrorMessage(error);
|
||||
expect(result).toBe("File not found");
|
||||
});
|
||||
|
||||
it("should handle non-Error objects", () => {
|
||||
const result = getUserFriendlyErrorMessage("Custom error");
|
||||
expect(result).toBe("Custom error");
|
||||
});
|
||||
});
|
||||
});
|
||||
130
apps/server/tests/unit/lib/events.test.ts
Normal file
130
apps/server/tests/unit/lib/events.test.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import { describe, it, expect, vi } from "vitest";
|
||||
import { createEventEmitter, type EventType } from "@/lib/events.js";
|
||||
|
||||
describe("events.ts", () => {
|
||||
describe("createEventEmitter", () => {
|
||||
it("should emit events to single subscriber", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback = vi.fn();
|
||||
|
||||
emitter.subscribe(callback);
|
||||
emitter.emit("agent:stream", { message: "test" });
|
||||
|
||||
expect(callback).toHaveBeenCalledOnce();
|
||||
expect(callback).toHaveBeenCalledWith("agent:stream", { message: "test" });
|
||||
});
|
||||
|
||||
it("should emit events to multiple subscribers", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback1 = vi.fn();
|
||||
const callback2 = vi.fn();
|
||||
const callback3 = vi.fn();
|
||||
|
||||
emitter.subscribe(callback1);
|
||||
emitter.subscribe(callback2);
|
||||
emitter.subscribe(callback3);
|
||||
emitter.emit("feature:started", { id: "123" });
|
||||
|
||||
expect(callback1).toHaveBeenCalledOnce();
|
||||
expect(callback2).toHaveBeenCalledOnce();
|
||||
expect(callback3).toHaveBeenCalledOnce();
|
||||
expect(callback1).toHaveBeenCalledWith("feature:started", { id: "123" });
|
||||
});
|
||||
|
||||
it("should support unsubscribe functionality", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback = vi.fn();
|
||||
|
||||
const unsubscribe = emitter.subscribe(callback);
|
||||
emitter.emit("agent:stream", { test: 1 });
|
||||
|
||||
expect(callback).toHaveBeenCalledOnce();
|
||||
|
||||
unsubscribe();
|
||||
emitter.emit("agent:stream", { test: 2 });
|
||||
|
||||
expect(callback).toHaveBeenCalledOnce(); // Still called only once
|
||||
});
|
||||
|
||||
it("should handle errors in subscribers without crashing", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const errorCallback = vi.fn(() => {
|
||||
throw new Error("Subscriber error");
|
||||
});
|
||||
const normalCallback = vi.fn();
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
emitter.subscribe(errorCallback);
|
||||
emitter.subscribe(normalCallback);
|
||||
|
||||
expect(() => {
|
||||
emitter.emit("feature:error", { error: "test" });
|
||||
}).not.toThrow();
|
||||
|
||||
expect(errorCallback).toHaveBeenCalledOnce();
|
||||
expect(normalCallback).toHaveBeenCalledOnce();
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("should emit different event types", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback = vi.fn();
|
||||
|
||||
emitter.subscribe(callback);
|
||||
|
||||
const eventTypes: EventType[] = [
|
||||
"agent:stream",
|
||||
"auto-mode:started",
|
||||
"feature:completed",
|
||||
"project:analysis-progress",
|
||||
];
|
||||
|
||||
eventTypes.forEach((type) => {
|
||||
emitter.emit(type, { type });
|
||||
});
|
||||
|
||||
expect(callback).toHaveBeenCalledTimes(4);
|
||||
});
|
||||
|
||||
it("should handle emitting without subscribers", () => {
|
||||
const emitter = createEventEmitter();
|
||||
|
||||
expect(() => {
|
||||
emitter.emit("agent:stream", { test: true });
|
||||
}).not.toThrow();
|
||||
});
|
||||
|
||||
it("should allow multiple subscriptions and unsubscriptions", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback1 = vi.fn();
|
||||
const callback2 = vi.fn();
|
||||
const callback3 = vi.fn();
|
||||
|
||||
const unsub1 = emitter.subscribe(callback1);
|
||||
const unsub2 = emitter.subscribe(callback2);
|
||||
const unsub3 = emitter.subscribe(callback3);
|
||||
|
||||
emitter.emit("feature:started", { test: 1 });
|
||||
expect(callback1).toHaveBeenCalledOnce();
|
||||
expect(callback2).toHaveBeenCalledOnce();
|
||||
expect(callback3).toHaveBeenCalledOnce();
|
||||
|
||||
unsub2();
|
||||
|
||||
emitter.emit("feature:started", { test: 2 });
|
||||
expect(callback1).toHaveBeenCalledTimes(2);
|
||||
expect(callback2).toHaveBeenCalledOnce(); // Still just once
|
||||
expect(callback3).toHaveBeenCalledTimes(2);
|
||||
|
||||
unsub1();
|
||||
unsub3();
|
||||
|
||||
emitter.emit("feature:started", { test: 3 });
|
||||
expect(callback1).toHaveBeenCalledTimes(2);
|
||||
expect(callback2).toHaveBeenCalledOnce();
|
||||
expect(callback3).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
231
apps/server/tests/unit/lib/image-handler.test.ts
Normal file
231
apps/server/tests/unit/lib/image-handler.test.ts
Normal file
@@ -0,0 +1,231 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import {
|
||||
getMimeTypeForImage,
|
||||
readImageAsBase64,
|
||||
convertImagesToContentBlocks,
|
||||
formatImagePathsForPrompt,
|
||||
} from "@/lib/image-handler.js";
|
||||
import { pngBase64Fixture } from "../../fixtures/images.js";
|
||||
import * as fs from "fs/promises";
|
||||
|
||||
vi.mock("fs/promises");
|
||||
|
||||
describe("image-handler.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe("getMimeTypeForImage", () => {
|
||||
it("should return correct MIME type for .jpg", () => {
|
||||
expect(getMimeTypeForImage("test.jpg")).toBe("image/jpeg");
|
||||
expect(getMimeTypeForImage("/path/to/test.jpg")).toBe("image/jpeg");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .jpeg", () => {
|
||||
expect(getMimeTypeForImage("test.jpeg")).toBe("image/jpeg");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .png", () => {
|
||||
expect(getMimeTypeForImage("test.png")).toBe("image/png");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .gif", () => {
|
||||
expect(getMimeTypeForImage("test.gif")).toBe("image/gif");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .webp", () => {
|
||||
expect(getMimeTypeForImage("test.webp")).toBe("image/webp");
|
||||
});
|
||||
|
||||
it("should be case-insensitive", () => {
|
||||
expect(getMimeTypeForImage("test.PNG")).toBe("image/png");
|
||||
expect(getMimeTypeForImage("test.JPG")).toBe("image/jpeg");
|
||||
expect(getMimeTypeForImage("test.GIF")).toBe("image/gif");
|
||||
expect(getMimeTypeForImage("test.WEBP")).toBe("image/webp");
|
||||
});
|
||||
|
||||
it("should default to image/png for unknown extensions", () => {
|
||||
expect(getMimeTypeForImage("test.unknown")).toBe("image/png");
|
||||
expect(getMimeTypeForImage("test.txt")).toBe("image/png");
|
||||
expect(getMimeTypeForImage("test")).toBe("image/png");
|
||||
});
|
||||
|
||||
it("should handle paths with multiple dots", () => {
|
||||
expect(getMimeTypeForImage("my.image.file.jpg")).toBe("image/jpeg");
|
||||
});
|
||||
});
|
||||
|
||||
describe("readImageAsBase64", () => {
|
||||
it("should read image and return base64 data", async () => {
|
||||
const mockBuffer = Buffer.from(pngBase64Fixture, "base64");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await readImageAsBase64("/path/to/test.png");
|
||||
|
||||
expect(result).toMatchObject({
|
||||
base64: pngBase64Fixture,
|
||||
mimeType: "image/png",
|
||||
filename: "test.png",
|
||||
originalPath: "/path/to/test.png",
|
||||
});
|
||||
expect(fs.readFile).toHaveBeenCalledWith("/path/to/test.png");
|
||||
});
|
||||
|
||||
it("should handle different image formats", async () => {
|
||||
const mockBuffer = Buffer.from("jpeg-data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await readImageAsBase64("/path/to/photo.jpg");
|
||||
|
||||
expect(result.mimeType).toBe("image/jpeg");
|
||||
expect(result.filename).toBe("photo.jpg");
|
||||
expect(result.base64).toBe(mockBuffer.toString("base64"));
|
||||
});
|
||||
|
||||
it("should extract filename from path", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await readImageAsBase64("/deep/nested/path/image.webp");
|
||||
|
||||
expect(result.filename).toBe("image.webp");
|
||||
});
|
||||
|
||||
it("should throw error if file cannot be read", async () => {
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error("File not found"));
|
||||
|
||||
await expect(readImageAsBase64("/nonexistent.png")).rejects.toThrow(
|
||||
"File not found"
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("convertImagesToContentBlocks", () => {
|
||||
it("should convert single image to content block", async () => {
|
||||
const mockBuffer = Buffer.from(pngBase64Fixture, "base64");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await convertImagesToContentBlocks(["/path/test.png"]);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toMatchObject({
|
||||
type: "image",
|
||||
source: {
|
||||
type: "base64",
|
||||
media_type: "image/png",
|
||||
data: pngBase64Fixture,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("should convert multiple images to content blocks", async () => {
|
||||
const mockBuffer = Buffer.from("test-data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await convertImagesToContentBlocks([
|
||||
"/a.png",
|
||||
"/b.jpg",
|
||||
"/c.webp",
|
||||
]);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].source.media_type).toBe("image/png");
|
||||
expect(result[1].source.media_type).toBe("image/jpeg");
|
||||
expect(result[2].source.media_type).toBe("image/webp");
|
||||
});
|
||||
|
||||
it("should resolve relative paths with workDir", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
await convertImagesToContentBlocks(["relative.png"], "/work/dir");
|
||||
|
||||
// Use path-agnostic check since Windows uses backslashes
|
||||
const calls = vi.mocked(fs.readFile).mock.calls;
|
||||
expect(calls[0][0]).toMatch(/relative\.png$/);
|
||||
expect(calls[0][0]).toContain("work");
|
||||
expect(calls[0][0]).toContain("dir");
|
||||
});
|
||||
|
||||
it("should handle absolute paths without workDir", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
await convertImagesToContentBlocks(["/absolute/path.png"]);
|
||||
|
||||
expect(fs.readFile).toHaveBeenCalledWith("/absolute/path.png");
|
||||
});
|
||||
|
||||
it("should continue processing on individual image errors", async () => {
|
||||
vi.mocked(fs.readFile)
|
||||
.mockResolvedValueOnce(Buffer.from("ok1"))
|
||||
.mockRejectedValueOnce(new Error("Failed"))
|
||||
.mockResolvedValueOnce(Buffer.from("ok2"));
|
||||
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
const result = await convertImagesToContentBlocks([
|
||||
"/a.png",
|
||||
"/b.png",
|
||||
"/c.png",
|
||||
]);
|
||||
|
||||
expect(result).toHaveLength(2); // Only successful images
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("should return empty array for empty input", async () => {
|
||||
const result = await convertImagesToContentBlocks([]);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it("should handle undefined workDir", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await convertImagesToContentBlocks(["/test.png"], undefined);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(fs.readFile).toHaveBeenCalledWith("/test.png");
|
||||
});
|
||||
});
|
||||
|
||||
describe("formatImagePathsForPrompt", () => {
|
||||
it("should format single image path as bulleted list", () => {
|
||||
const result = formatImagePathsForPrompt(["/path/image.png"]);
|
||||
|
||||
expect(result).toContain("\n\nAttached images:");
|
||||
expect(result).toContain("- /path/image.png");
|
||||
});
|
||||
|
||||
it("should format multiple image paths as bulleted list", () => {
|
||||
const result = formatImagePathsForPrompt([
|
||||
"/path/a.png",
|
||||
"/path/b.jpg",
|
||||
"/path/c.webp",
|
||||
]);
|
||||
|
||||
expect(result).toContain("Attached images:");
|
||||
expect(result).toContain("- /path/a.png");
|
||||
expect(result).toContain("- /path/b.jpg");
|
||||
expect(result).toContain("- /path/c.webp");
|
||||
});
|
||||
|
||||
it("should return empty string for empty array", () => {
|
||||
const result = formatImagePathsForPrompt([]);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
|
||||
it("should start with double newline", () => {
|
||||
const result = formatImagePathsForPrompt(["/test.png"]);
|
||||
expect(result.startsWith("\n\n")).toBe(true);
|
||||
});
|
||||
|
||||
it("should handle paths with special characters", () => {
|
||||
const result = formatImagePathsForPrompt(["/path/with spaces/image.png"]);
|
||||
expect(result).toContain("- /path/with spaces/image.png");
|
||||
});
|
||||
});
|
||||
});
|
||||
156
apps/server/tests/unit/lib/model-resolver.test.ts
Normal file
156
apps/server/tests/unit/lib/model-resolver.test.ts
Normal file
@@ -0,0 +1,156 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import {
|
||||
resolveModelString,
|
||||
getEffectiveModel,
|
||||
CLAUDE_MODEL_MAP,
|
||||
DEFAULT_MODELS,
|
||||
} from "@/lib/model-resolver.js";
|
||||
|
||||
describe("model-resolver.ts", () => {
|
||||
let consoleSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
consoleSpy = {
|
||||
log: vi.spyOn(console, "log").mockImplementation(() => {}),
|
||||
warn: vi.spyOn(console, "warn").mockImplementation(() => {}),
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleSpy.log.mockRestore();
|
||||
consoleSpy.warn.mockRestore();
|
||||
});
|
||||
|
||||
describe("resolveModelString", () => {
|
||||
it("should resolve 'haiku' alias to full model string", () => {
|
||||
const result = resolveModelString("haiku");
|
||||
expect(result).toBe("claude-haiku-4-5");
|
||||
});
|
||||
|
||||
it("should resolve 'sonnet' alias to full model string", () => {
|
||||
const result = resolveModelString("sonnet");
|
||||
expect(result).toBe("claude-sonnet-4-20250514");
|
||||
});
|
||||
|
||||
it("should resolve 'opus' alias to full model string", () => {
|
||||
const result = resolveModelString("opus");
|
||||
expect(result).toBe("claude-opus-4-5-20251101");
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Resolved model alias: "opus"')
|
||||
);
|
||||
});
|
||||
|
||||
it("should pass through OpenAI gpt-* models", () => {
|
||||
const models = ["gpt-5.2", "gpt-5.1-codex", "gpt-4"];
|
||||
models.forEach((model) => {
|
||||
const result = resolveModelString(model);
|
||||
expect(result).toBe(model);
|
||||
});
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Using OpenAI/Codex model")
|
||||
);
|
||||
});
|
||||
|
||||
it("should treat o-series models as unknown (Codex CLI doesn't support them)", () => {
|
||||
const models = ["o1", "o1-mini", "o3"];
|
||||
models.forEach((model) => {
|
||||
const result = resolveModelString(model);
|
||||
// Should fall back to default since these aren't supported
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
});
|
||||
|
||||
it("should pass through full Claude model strings", () => {
|
||||
const models = [
|
||||
"claude-opus-4-5-20251101",
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-haiku-4-5",
|
||||
];
|
||||
models.forEach((model) => {
|
||||
const result = resolveModelString(model);
|
||||
expect(result).toBe(model);
|
||||
});
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Using full Claude model string")
|
||||
);
|
||||
});
|
||||
|
||||
it("should return default model when modelKey is undefined", () => {
|
||||
const result = resolveModelString(undefined);
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
|
||||
it("should return custom default model when provided", () => {
|
||||
const customDefault = "custom-model";
|
||||
const result = resolveModelString(undefined, customDefault);
|
||||
expect(result).toBe(customDefault);
|
||||
});
|
||||
|
||||
it("should return default for unknown model key", () => {
|
||||
const result = resolveModelString("unknown-model");
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
expect(consoleSpy.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Unknown model key "unknown-model"')
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle empty string", () => {
|
||||
const result = resolveModelString("");
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getEffectiveModel", () => {
|
||||
it("should prioritize explicit model over session and default", () => {
|
||||
const result = getEffectiveModel("opus", "haiku", "gpt-5.2");
|
||||
expect(result).toBe("claude-opus-4-5-20251101");
|
||||
});
|
||||
|
||||
it("should use session model when explicit is not provided", () => {
|
||||
const result = getEffectiveModel(undefined, "sonnet", "gpt-5.2");
|
||||
expect(result).toBe("claude-sonnet-4-20250514");
|
||||
});
|
||||
|
||||
it("should use default when neither explicit nor session is provided", () => {
|
||||
const customDefault = "claude-haiku-4-5";
|
||||
const result = getEffectiveModel(undefined, undefined, customDefault);
|
||||
expect(result).toBe(customDefault);
|
||||
});
|
||||
|
||||
it("should use Claude default when no arguments provided", () => {
|
||||
const result = getEffectiveModel();
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
|
||||
it("should handle explicit empty strings as undefined", () => {
|
||||
const result = getEffectiveModel("", "haiku");
|
||||
expect(result).toBe("claude-haiku-4-5");
|
||||
});
|
||||
});
|
||||
|
||||
describe("CLAUDE_MODEL_MAP", () => {
|
||||
it("should have haiku, sonnet, opus mappings", () => {
|
||||
expect(CLAUDE_MODEL_MAP).toHaveProperty("haiku");
|
||||
expect(CLAUDE_MODEL_MAP).toHaveProperty("sonnet");
|
||||
expect(CLAUDE_MODEL_MAP).toHaveProperty("opus");
|
||||
});
|
||||
|
||||
it("should have valid Claude model strings", () => {
|
||||
expect(CLAUDE_MODEL_MAP.haiku).toContain("haiku");
|
||||
expect(CLAUDE_MODEL_MAP.sonnet).toContain("sonnet");
|
||||
expect(CLAUDE_MODEL_MAP.opus).toContain("opus");
|
||||
});
|
||||
});
|
||||
|
||||
describe("DEFAULT_MODELS", () => {
|
||||
it("should have claude and openai defaults", () => {
|
||||
expect(DEFAULT_MODELS).toHaveProperty("claude");
|
||||
expect(DEFAULT_MODELS).toHaveProperty("openai");
|
||||
});
|
||||
|
||||
it("should have valid default models", () => {
|
||||
expect(DEFAULT_MODELS.claude).toContain("claude");
|
||||
expect(DEFAULT_MODELS.openai).toContain("gpt");
|
||||
});
|
||||
});
|
||||
});
|
||||
197
apps/server/tests/unit/lib/prompt-builder.test.ts
Normal file
197
apps/server/tests/unit/lib/prompt-builder.test.ts
Normal file
@@ -0,0 +1,197 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { buildPromptWithImages } from "@/lib/prompt-builder.js";
|
||||
import * as imageHandler from "@/lib/image-handler.js";
|
||||
|
||||
vi.mock("@/lib/image-handler.js");
|
||||
|
||||
describe("prompt-builder.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe("buildPromptWithImages", () => {
|
||||
it("should return plain text when no images provided", async () => {
|
||||
const result = await buildPromptWithImages("Hello world");
|
||||
|
||||
expect(result).toEqual({
|
||||
content: "Hello world",
|
||||
hasImages: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("should return plain text when imagePaths is empty array", async () => {
|
||||
const result = await buildPromptWithImages("Hello world", []);
|
||||
|
||||
expect(result).toEqual({
|
||||
content: "Hello world",
|
||||
hasImages: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("should build content blocks with single image", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "base64data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Describe this image", [
|
||||
"/test.png",
|
||||
]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
expect(Array.isArray(result.content)).toBe(true);
|
||||
const content = result.content as Array<any>;
|
||||
expect(content).toHaveLength(2);
|
||||
expect(content[0]).toEqual({ type: "text", text: "Describe this image" });
|
||||
expect(content[1].type).toBe("image");
|
||||
});
|
||||
|
||||
it("should build content blocks with multiple images", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data1" },
|
||||
},
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/jpeg", data: "data2" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Analyze these", [
|
||||
"/a.png",
|
||||
"/b.jpg",
|
||||
]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
const content = result.content as Array<any>;
|
||||
expect(content).toHaveLength(3); // 1 text + 2 images
|
||||
expect(content[0].type).toBe("text");
|
||||
expect(content[1].type).toBe("image");
|
||||
expect(content[2].type).toBe("image");
|
||||
});
|
||||
|
||||
it("should include image paths in text when requested", async () => {
|
||||
vi.mocked(imageHandler.formatImagePathsForPrompt).mockReturnValue(
|
||||
"\n\nAttached images:\n- /test.png"
|
||||
);
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages(
|
||||
"Base prompt",
|
||||
["/test.png"],
|
||||
undefined,
|
||||
true
|
||||
);
|
||||
|
||||
expect(imageHandler.formatImagePathsForPrompt).toHaveBeenCalledWith([
|
||||
"/test.png",
|
||||
]);
|
||||
const content = result.content as Array<any>;
|
||||
expect(content[0].text).toContain("Base prompt");
|
||||
expect(content[0].text).toContain("Attached images:");
|
||||
});
|
||||
|
||||
it("should not include image paths by default", async () => {
|
||||
vi.mocked(imageHandler.formatImagePathsForPrompt).mockReturnValue(
|
||||
"\n\nAttached images:\n- /test.png"
|
||||
);
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Base prompt", ["/test.png"]);
|
||||
|
||||
expect(imageHandler.formatImagePathsForPrompt).not.toHaveBeenCalled();
|
||||
const content = result.content as Array<any>;
|
||||
expect(content[0].text).toBe("Base prompt");
|
||||
});
|
||||
|
||||
it("should pass workDir to convertImagesToContentBlocks", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
await buildPromptWithImages("Test", ["/test.png"], "/work/dir");
|
||||
|
||||
expect(imageHandler.convertImagesToContentBlocks).toHaveBeenCalledWith(
|
||||
["/test.png"],
|
||||
"/work/dir"
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle empty text content", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("", ["/test.png"]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
// When text is empty/whitespace, should only have image blocks
|
||||
const content = result.content as Array<any>;
|
||||
expect(content.every((block) => block.type === "image")).toBe(true);
|
||||
});
|
||||
|
||||
it("should trim text content before checking if empty", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages(" ", ["/test.png"]);
|
||||
|
||||
const content = result.content as Array<any>;
|
||||
// Whitespace-only text should be excluded
|
||||
expect(content.every((block) => block.type === "image")).toBe(true);
|
||||
});
|
||||
|
||||
it("should return text when only one block and it's text", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([]);
|
||||
|
||||
const result = await buildPromptWithImages("Just text", ["/missing.png"]);
|
||||
|
||||
// If no images are successfully loaded, should return just the text
|
||||
expect(result.content).toBe("Just text");
|
||||
expect(result.hasImages).toBe(true); // Still true because images were requested
|
||||
});
|
||||
|
||||
it("should handle workDir with relative paths", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
await buildPromptWithImages(
|
||||
"Test",
|
||||
["relative.png"],
|
||||
"/absolute/work/dir"
|
||||
);
|
||||
|
||||
expect(imageHandler.convertImagesToContentBlocks).toHaveBeenCalledWith(
|
||||
["relative.png"],
|
||||
"/absolute/work/dir"
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
297
apps/server/tests/unit/lib/security.test.ts
Normal file
297
apps/server/tests/unit/lib/security.test.ts
Normal file
@@ -0,0 +1,297 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import path from "path";
|
||||
|
||||
/**
|
||||
* Note: security.ts maintains module-level state (allowed paths Set).
|
||||
* We need to reset modules and reimport for each test to get fresh state.
|
||||
*/
|
||||
describe("security.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
describe("initAllowedPaths", () => {
|
||||
it("should parse comma-separated directories from environment", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,/path2,/path3";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/path1"));
|
||||
expect(allowed).toContain(path.resolve("/path2"));
|
||||
expect(allowed).toContain(path.resolve("/path3"));
|
||||
});
|
||||
|
||||
it("should trim whitespace from paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = " /path1 , /path2 , /path3 ";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/path1"));
|
||||
expect(allowed).toContain(path.resolve("/path2"));
|
||||
});
|
||||
|
||||
it("should always include DATA_DIR if set", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "/data/dir";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/data/dir"));
|
||||
});
|
||||
|
||||
it("should handle empty ALLOWED_PROJECT_DIRS", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "/data";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toHaveLength(1);
|
||||
expect(allowed[0]).toBe(path.resolve("/data"));
|
||||
});
|
||||
|
||||
it("should skip empty entries in comma list", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,,/path2, ,/path3";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe("addAllowedPath", () => {
|
||||
it("should add path to allowed list", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, addAllowedPath, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
initAllowedPaths();
|
||||
|
||||
addAllowedPath("/new/path");
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/new/path"));
|
||||
});
|
||||
|
||||
it("should resolve relative paths before adding", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, addAllowedPath, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
initAllowedPaths();
|
||||
|
||||
addAllowedPath("./relative/path");
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
const cwd = process.cwd();
|
||||
expect(allowed).toContain(path.resolve(cwd, "./relative/path"));
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPathAllowed", () => {
|
||||
it("should allow paths under allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/allowed/project/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/allowed/project/subdir/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/allowed/project/deep/nested/file.txt")).toBe(true);
|
||||
});
|
||||
|
||||
it("should allow the exact allowed directory", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/allowed/project")).toBe(true);
|
||||
});
|
||||
|
||||
it("should reject paths outside allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/not/allowed/file.txt")).toBe(false);
|
||||
expect(isPathAllowed("/tmp/file.txt")).toBe(false);
|
||||
expect(isPathAllowed("/etc/passwd")).toBe(false);
|
||||
});
|
||||
|
||||
it("should block path traversal attempts", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
// These should resolve outside the allowed directory
|
||||
expect(isPathAllowed("/allowed/project/../../../etc/passwd")).toBe(false);
|
||||
expect(isPathAllowed("/allowed/project/../../other/file.txt")).toBe(false);
|
||||
});
|
||||
|
||||
it("should resolve relative paths correctly", async () => {
|
||||
const cwd = process.cwd();
|
||||
process.env.ALLOWED_PROJECT_DIRS = cwd;
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("./file.txt")).toBe(true);
|
||||
expect(isPathAllowed("./subdir/file.txt")).toBe(true);
|
||||
});
|
||||
|
||||
it("should reject paths that are parents of allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project/subdir";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/allowed/project")).toBe(false);
|
||||
expect(isPathAllowed("/allowed")).toBe(false);
|
||||
});
|
||||
|
||||
it("should handle multiple allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,/path2,/path3";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/path1/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/path2/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/path3/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/path4/file.txt")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("validatePath", () => {
|
||||
it("should return resolved path for allowed paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = validatePath("/allowed/file.txt");
|
||||
expect(result).toBe(path.resolve("/allowed/file.txt"));
|
||||
});
|
||||
|
||||
it("should throw error for disallowed paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(() => validatePath("/disallowed/file.txt")).toThrow("Access denied");
|
||||
expect(() => validatePath("/disallowed/file.txt")).toThrow(
|
||||
"not in an allowed directory"
|
||||
);
|
||||
});
|
||||
|
||||
it("should include the file path in error message", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(() => validatePath("/bad/path.txt")).toThrow("/bad/path.txt");
|
||||
});
|
||||
|
||||
it("should resolve paths before validation", async () => {
|
||||
const cwd = process.cwd();
|
||||
process.env.ALLOWED_PROJECT_DIRS = cwd;
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = validatePath("./file.txt");
|
||||
expect(result).toBe(path.resolve(cwd, "./file.txt"));
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAllowedPaths", () => {
|
||||
it("should return array of allowed paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,/path2";
|
||||
process.env.DATA_DIR = "/data";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = getAllowedPaths();
|
||||
expect(Array.isArray(result)).toBe(true);
|
||||
expect(result.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should return resolved paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/test";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = getAllowedPaths();
|
||||
expect(result[0]).toBe(path.resolve("/test"));
|
||||
});
|
||||
});
|
||||
});
|
||||
482
apps/server/tests/unit/lib/subprocess-manager.test.ts
Normal file
482
apps/server/tests/unit/lib/subprocess-manager.test.ts
Normal file
@@ -0,0 +1,482 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import {
|
||||
spawnJSONLProcess,
|
||||
spawnProcess,
|
||||
type SubprocessOptions,
|
||||
} from "@/lib/subprocess-manager.js";
|
||||
import * as cp from "child_process";
|
||||
import { EventEmitter } from "events";
|
||||
import { Readable } from "stream";
|
||||
import { collectAsyncGenerator } from "../../utils/helpers.js";
|
||||
|
||||
vi.mock("child_process");
|
||||
|
||||
describe("subprocess-manager.ts", () => {
|
||||
let consoleSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
consoleSpy = {
|
||||
log: vi.spyOn(console, "log").mockImplementation(() => {}),
|
||||
error: vi.spyOn(console, "error").mockImplementation(() => {}),
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleSpy.log.mockRestore();
|
||||
consoleSpy.error.mockRestore();
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper to create a mock ChildProcess with stdout/stderr streams
|
||||
*/
|
||||
function createMockProcess(config: {
|
||||
stdoutLines?: string[];
|
||||
stderrLines?: string[];
|
||||
exitCode?: number;
|
||||
error?: Error;
|
||||
delayMs?: number;
|
||||
}) {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
|
||||
// Create readable streams for stdout and stderr
|
||||
const stdout = new Readable({ read() {} });
|
||||
const stderr = new Readable({ read() {} });
|
||||
|
||||
mockProcess.stdout = stdout;
|
||||
mockProcess.stderr = stderr;
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
// Use process.nextTick to ensure readline interface is set up first
|
||||
process.nextTick(() => {
|
||||
// Emit stderr lines immediately
|
||||
if (config.stderrLines) {
|
||||
for (const line of config.stderrLines) {
|
||||
stderr.emit("data", Buffer.from(line));
|
||||
}
|
||||
}
|
||||
|
||||
// Emit stdout lines with small delays to ensure readline processes them
|
||||
const emitLines = async () => {
|
||||
if (config.stdoutLines) {
|
||||
for (const line of config.stdoutLines) {
|
||||
stdout.push(line + "\n");
|
||||
// Small delay to allow readline to process
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay before ending stream
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
stdout.push(null); // End stdout
|
||||
|
||||
// Small delay before exit
|
||||
await new Promise((resolve) =>
|
||||
setTimeout(resolve, config.delayMs ?? 10)
|
||||
);
|
||||
|
||||
// Emit exit or error
|
||||
if (config.error) {
|
||||
mockProcess.emit("error", config.error);
|
||||
} else {
|
||||
mockProcess.emit("exit", config.exitCode ?? 0);
|
||||
}
|
||||
};
|
||||
|
||||
emitLines();
|
||||
});
|
||||
|
||||
return mockProcess;
|
||||
}
|
||||
|
||||
describe("spawnJSONLProcess", () => {
|
||||
const baseOptions: SubprocessOptions = {
|
||||
command: "test-command",
|
||||
args: ["arg1", "arg2"],
|
||||
cwd: "/test/dir",
|
||||
};
|
||||
|
||||
it("should yield parsed JSONL objects line by line", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [
|
||||
'{"type":"start","id":1}',
|
||||
'{"type":"progress","value":50}',
|
||||
'{"type":"complete","result":"success"}',
|
||||
],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(3);
|
||||
expect(results[0]).toEqual({ type: "start", id: 1 });
|
||||
expect(results[1]).toEqual({ type: "progress", value: 50 });
|
||||
expect(results[2]).toEqual({ type: "complete", result: "success" });
|
||||
});
|
||||
|
||||
it("should skip empty lines", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [
|
||||
'{"type":"first"}',
|
||||
"",
|
||||
" ",
|
||||
'{"type":"second"}',
|
||||
],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[0]).toEqual({ type: "first" });
|
||||
expect(results[1]).toEqual({ type: "second" });
|
||||
});
|
||||
|
||||
it("should yield error for malformed JSON and continue processing", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [
|
||||
'{"type":"valid"}',
|
||||
'{invalid json}',
|
||||
'{"type":"also_valid"}',
|
||||
],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(3);
|
||||
expect(results[0]).toEqual({ type: "valid" });
|
||||
expect(results[1]).toMatchObject({
|
||||
type: "error",
|
||||
error: expect.stringContaining("Failed to parse output"),
|
||||
});
|
||||
expect(results[2]).toEqual({ type: "also_valid" });
|
||||
});
|
||||
|
||||
it("should collect stderr output", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"test"}'],
|
||||
stderrLines: ["Warning: something happened", "Error: critical issue"],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(consoleSpy.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Warning: something happened")
|
||||
);
|
||||
expect(consoleSpy.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Error: critical issue")
|
||||
);
|
||||
});
|
||||
|
||||
it("should yield error on non-zero exit code", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"started"}'],
|
||||
stderrLines: ["Process failed with error"],
|
||||
exitCode: 1,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[0]).toEqual({ type: "started" });
|
||||
expect(results[1]).toMatchObject({
|
||||
type: "error",
|
||||
error: expect.stringContaining("Process failed with error"),
|
||||
});
|
||||
});
|
||||
|
||||
it("should yield error with exit code when stderr is empty", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"test"}'],
|
||||
exitCode: 127,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[1]).toMatchObject({
|
||||
type: "error",
|
||||
error: "Process exited with code 127",
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle process spawn errors", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
error: new Error("Command not found"),
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
// When process.on('error') fires, exitCode is null
|
||||
// The generator should handle this gracefully
|
||||
expect(results).toEqual([]);
|
||||
});
|
||||
|
||||
it("should kill process on AbortController signal", async () => {
|
||||
const abortController = new AbortController();
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"start"}'],
|
||||
exitCode: 0,
|
||||
delayMs: 100, // Delay to allow abort
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess({
|
||||
...baseOptions,
|
||||
abortController,
|
||||
});
|
||||
|
||||
// Start consuming the generator
|
||||
const promise = collectAsyncGenerator(generator);
|
||||
|
||||
// Abort after a short delay
|
||||
setTimeout(() => abortController.abort(), 20);
|
||||
|
||||
await promise;
|
||||
|
||||
expect(mockProcess.kill).toHaveBeenCalledWith("SIGTERM");
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Abort signal received")
|
||||
);
|
||||
});
|
||||
|
||||
// Note: Timeout behavior tests are omitted from unit tests as they involve
|
||||
// complex timing interactions that are difficult to mock reliably.
|
||||
// These scenarios are better covered by integration tests with real subprocesses.
|
||||
|
||||
it("should spawn process with correct arguments", async () => {
|
||||
const mockProcess = createMockProcess({ exitCode: 0 });
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const options: SubprocessOptions = {
|
||||
command: "my-command",
|
||||
args: ["--flag", "value"],
|
||||
cwd: "/work/dir",
|
||||
env: { CUSTOM_VAR: "test" },
|
||||
};
|
||||
|
||||
const generator = spawnJSONLProcess(options);
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(cp.spawn).toHaveBeenCalledWith("my-command", ["--flag", "value"], {
|
||||
cwd: "/work/dir",
|
||||
env: expect.objectContaining({ CUSTOM_VAR: "test" }),
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
});
|
||||
});
|
||||
|
||||
it("should merge env with process.env", async () => {
|
||||
const mockProcess = createMockProcess({ exitCode: 0 });
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const options: SubprocessOptions = {
|
||||
command: "test",
|
||||
args: [],
|
||||
cwd: "/test",
|
||||
env: { CUSTOM: "value" },
|
||||
};
|
||||
|
||||
const generator = spawnJSONLProcess(options);
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(cp.spawn).toHaveBeenCalledWith(
|
||||
"test",
|
||||
[],
|
||||
expect.objectContaining({
|
||||
env: expect.objectContaining({
|
||||
CUSTOM: "value",
|
||||
// Should also include existing process.env
|
||||
NODE_ENV: process.env.NODE_ENV,
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle complex JSON objects", async () => {
|
||||
const complexObject = {
|
||||
type: "complex",
|
||||
nested: { deep: { value: [1, 2, 3] } },
|
||||
array: [{ id: 1 }, { id: 2 }],
|
||||
string: "with \"quotes\" and \\backslashes",
|
||||
};
|
||||
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [JSON.stringify(complexObject)],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0]).toEqual(complexObject);
|
||||
});
|
||||
});
|
||||
|
||||
describe("spawnProcess", () => {
|
||||
const baseOptions: SubprocessOptions = {
|
||||
command: "test-command",
|
||||
args: ["arg1"],
|
||||
cwd: "/test",
|
||||
};
|
||||
|
||||
it("should collect stdout and stderr", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
const stdout = new Readable({ read() {} });
|
||||
const stderr = new Readable({ read() {} });
|
||||
|
||||
mockProcess.stdout = stdout;
|
||||
mockProcess.stderr = stderr;
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
stdout.push("line 1\n");
|
||||
stdout.push("line 2\n");
|
||||
stdout.push(null);
|
||||
|
||||
stderr.push("error 1\n");
|
||||
stderr.push("error 2\n");
|
||||
stderr.push(null);
|
||||
|
||||
mockProcess.emit("exit", 0);
|
||||
}, 10);
|
||||
|
||||
const result = await spawnProcess(baseOptions);
|
||||
|
||||
expect(result.stdout).toBe("line 1\nline 2\n");
|
||||
expect(result.stderr).toBe("error 1\nerror 2\n");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return correct exit code", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.push(null);
|
||||
mockProcess.stderr.push(null);
|
||||
mockProcess.emit("exit", 42);
|
||||
}, 10);
|
||||
|
||||
const result = await spawnProcess(baseOptions);
|
||||
|
||||
expect(result.exitCode).toBe(42);
|
||||
});
|
||||
|
||||
it("should handle process errors", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.emit("error", new Error("Spawn failed"));
|
||||
}, 10);
|
||||
|
||||
await expect(spawnProcess(baseOptions)).rejects.toThrow("Spawn failed");
|
||||
});
|
||||
|
||||
it("should handle AbortController signal", async () => {
|
||||
const abortController = new AbortController();
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => abortController.abort(), 20);
|
||||
|
||||
await expect(
|
||||
spawnProcess({ ...baseOptions, abortController })
|
||||
).rejects.toThrow("Process aborted");
|
||||
|
||||
expect(mockProcess.kill).toHaveBeenCalledWith("SIGTERM");
|
||||
});
|
||||
|
||||
it("should spawn with correct options", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.push(null);
|
||||
mockProcess.stderr.push(null);
|
||||
mockProcess.emit("exit", 0);
|
||||
}, 10);
|
||||
|
||||
const options: SubprocessOptions = {
|
||||
command: "my-cmd",
|
||||
args: ["--verbose"],
|
||||
cwd: "/my/dir",
|
||||
env: { MY_VAR: "value" },
|
||||
};
|
||||
|
||||
await spawnProcess(options);
|
||||
|
||||
expect(cp.spawn).toHaveBeenCalledWith("my-cmd", ["--verbose"], {
|
||||
cwd: "/my/dir",
|
||||
env: expect.objectContaining({ MY_VAR: "value" }),
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle empty stdout and stderr", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.push(null);
|
||||
mockProcess.stderr.push(null);
|
||||
mockProcess.emit("exit", 0);
|
||||
}, 10);
|
||||
|
||||
const result = await spawnProcess(baseOptions);
|
||||
|
||||
expect(result.stdout).toBe("");
|
||||
expect(result.stderr).toBe("");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
242
apps/server/tests/unit/providers/base-provider.test.ts
Normal file
242
apps/server/tests/unit/providers/base-provider.test.ts
Normal file
@@ -0,0 +1,242 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { BaseProvider } from "@/providers/base-provider.js";
|
||||
import type {
|
||||
ProviderConfig,
|
||||
ExecuteOptions,
|
||||
ProviderMessage,
|
||||
InstallationStatus,
|
||||
ModelDefinition,
|
||||
} from "@/providers/types.js";
|
||||
|
||||
// Concrete implementation for testing the abstract class
|
||||
class TestProvider extends BaseProvider {
|
||||
getName(): string {
|
||||
return "test-provider";
|
||||
}
|
||||
|
||||
async *executeQuery(
|
||||
_options: ExecuteOptions
|
||||
): AsyncGenerator<ProviderMessage> {
|
||||
yield { type: "text", text: "test response" };
|
||||
}
|
||||
|
||||
async detectInstallation(): Promise<InstallationStatus> {
|
||||
return { installed: true };
|
||||
}
|
||||
|
||||
getAvailableModels(): ModelDefinition[] {
|
||||
return [
|
||||
{ id: "test-model-1", name: "Test Model 1", description: "A test model" },
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
describe("base-provider.ts", () => {
|
||||
describe("constructor", () => {
|
||||
it("should initialize with empty config when none provided", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(provider.getConfig()).toEqual({});
|
||||
});
|
||||
|
||||
it("should initialize with provided config", () => {
|
||||
const config: ProviderConfig = {
|
||||
apiKey: "test-key",
|
||||
baseUrl: "https://test.com",
|
||||
};
|
||||
const provider = new TestProvider(config);
|
||||
expect(provider.getConfig()).toEqual(config);
|
||||
});
|
||||
|
||||
it("should call getName() during initialization", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(provider.getName()).toBe("test-provider");
|
||||
});
|
||||
});
|
||||
|
||||
describe("validateConfig", () => {
|
||||
it("should return valid when config exists", () => {
|
||||
const provider = new TestProvider({ apiKey: "test" });
|
||||
const result = provider.validateConfig();
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
expect(result.warnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should return invalid when config is undefined", () => {
|
||||
// Create provider without config
|
||||
const provider = new TestProvider();
|
||||
// Manually set config to undefined to test edge case
|
||||
(provider as any).config = undefined;
|
||||
|
||||
const result = provider.validateConfig();
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toContain("Provider config is missing");
|
||||
});
|
||||
|
||||
it("should return valid for empty config object", () => {
|
||||
const provider = new TestProvider({});
|
||||
const result = provider.validateConfig();
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should include warnings array in result", () => {
|
||||
const provider = new TestProvider();
|
||||
const result = provider.validateConfig();
|
||||
|
||||
expect(result).toHaveProperty("warnings");
|
||||
expect(Array.isArray(result.warnings)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("supportsFeature", () => {
|
||||
it("should support 'tools' feature", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(provider.supportsFeature("tools")).toBe(true);
|
||||
});
|
||||
|
||||
it("should support 'text' feature", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(provider.supportsFeature("text")).toBe(true);
|
||||
});
|
||||
|
||||
it("should not support unknown features", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(provider.supportsFeature("vision")).toBe(false);
|
||||
expect(provider.supportsFeature("mcp")).toBe(false);
|
||||
expect(provider.supportsFeature("unknown")).toBe(false);
|
||||
});
|
||||
|
||||
it("should be case-sensitive", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(provider.supportsFeature("TOOLS")).toBe(false);
|
||||
expect(provider.supportsFeature("Text")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getConfig", () => {
|
||||
it("should return current config", () => {
|
||||
const config: ProviderConfig = {
|
||||
apiKey: "test-key",
|
||||
model: "test-model",
|
||||
};
|
||||
const provider = new TestProvider(config);
|
||||
|
||||
expect(provider.getConfig()).toEqual(config);
|
||||
});
|
||||
|
||||
it("should return same reference", () => {
|
||||
const config: ProviderConfig = { apiKey: "test" };
|
||||
const provider = new TestProvider(config);
|
||||
|
||||
const retrieved1 = provider.getConfig();
|
||||
const retrieved2 = provider.getConfig();
|
||||
|
||||
expect(retrieved1).toBe(retrieved2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("setConfig", () => {
|
||||
it("should merge partial config with existing config", () => {
|
||||
const provider = new TestProvider({ apiKey: "original-key" });
|
||||
|
||||
provider.setConfig({ model: "new-model" });
|
||||
|
||||
expect(provider.getConfig()).toEqual({
|
||||
apiKey: "original-key",
|
||||
model: "new-model",
|
||||
});
|
||||
});
|
||||
|
||||
it("should override existing fields", () => {
|
||||
const provider = new TestProvider({ apiKey: "old-key", model: "old-model" });
|
||||
|
||||
provider.setConfig({ apiKey: "new-key" });
|
||||
|
||||
expect(provider.getConfig()).toEqual({
|
||||
apiKey: "new-key",
|
||||
model: "old-model",
|
||||
});
|
||||
});
|
||||
|
||||
it("should accept empty object", () => {
|
||||
const provider = new TestProvider({ apiKey: "test" });
|
||||
const originalConfig = provider.getConfig();
|
||||
|
||||
provider.setConfig({});
|
||||
|
||||
expect(provider.getConfig()).toEqual(originalConfig);
|
||||
});
|
||||
|
||||
it("should handle multiple updates", () => {
|
||||
const provider = new TestProvider();
|
||||
|
||||
provider.setConfig({ apiKey: "key1" });
|
||||
provider.setConfig({ model: "model1" });
|
||||
provider.setConfig({ baseUrl: "https://test.com" });
|
||||
|
||||
expect(provider.getConfig()).toEqual({
|
||||
apiKey: "key1",
|
||||
model: "model1",
|
||||
baseUrl: "https://test.com",
|
||||
});
|
||||
});
|
||||
|
||||
it("should preserve other fields when updating one field", () => {
|
||||
const provider = new TestProvider({
|
||||
apiKey: "key",
|
||||
model: "model",
|
||||
baseUrl: "https://test.com",
|
||||
});
|
||||
|
||||
provider.setConfig({ model: "new-model" });
|
||||
|
||||
expect(provider.getConfig()).toEqual({
|
||||
apiKey: "key",
|
||||
model: "new-model",
|
||||
baseUrl: "https://test.com",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("abstract methods", () => {
|
||||
it("should require getName implementation", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(typeof provider.getName).toBe("function");
|
||||
expect(provider.getName()).toBe("test-provider");
|
||||
});
|
||||
|
||||
it("should require executeQuery implementation", async () => {
|
||||
const provider = new TestProvider();
|
||||
expect(typeof provider.executeQuery).toBe("function");
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "test",
|
||||
projectDirectory: "/test",
|
||||
});
|
||||
const result = await generator.next();
|
||||
|
||||
expect(result.value).toEqual({ type: "text", text: "test response" });
|
||||
});
|
||||
|
||||
it("should require detectInstallation implementation", async () => {
|
||||
const provider = new TestProvider();
|
||||
expect(typeof provider.detectInstallation).toBe("function");
|
||||
|
||||
const status = await provider.detectInstallation();
|
||||
expect(status).toHaveProperty("installed");
|
||||
});
|
||||
|
||||
it("should require getAvailableModels implementation", () => {
|
||||
const provider = new TestProvider();
|
||||
expect(typeof provider.getAvailableModels).toBe("function");
|
||||
|
||||
const models = provider.getAvailableModels();
|
||||
expect(Array.isArray(models)).toBe(true);
|
||||
expect(models.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
398
apps/server/tests/unit/providers/claude-provider.test.ts
Normal file
398
apps/server/tests/unit/providers/claude-provider.test.ts
Normal file
@@ -0,0 +1,398 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { ClaudeProvider } from "@/providers/claude-provider.js";
|
||||
import * as sdk from "@anthropic-ai/claude-agent-sdk";
|
||||
import { collectAsyncGenerator } from "../../utils/helpers.js";
|
||||
|
||||
vi.mock("@anthropic-ai/claude-agent-sdk");
|
||||
|
||||
describe("claude-provider.ts", () => {
|
||||
let provider: ClaudeProvider;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
provider = new ClaudeProvider();
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
|
||||
});
|
||||
|
||||
describe("getName", () => {
|
||||
it("should return 'claude' as provider name", () => {
|
||||
expect(provider.getName()).toBe("claude");
|
||||
});
|
||||
});
|
||||
|
||||
describe("executeQuery", () => {
|
||||
it("should execute simple text query", async () => {
|
||||
const mockMessages = [
|
||||
{ type: "text", text: "Response 1" },
|
||||
{ type: "text", text: "Response 2" },
|
||||
];
|
||||
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
for (const msg of mockMessages) {
|
||||
yield msg;
|
||||
}
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "Hello",
|
||||
cwd: "/test",
|
||||
});
|
||||
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[0]).toEqual({ type: "text", text: "Response 1" });
|
||||
expect(results[1]).toEqual({ type: "text", text: "Response 2" });
|
||||
});
|
||||
|
||||
it("should pass correct options to SDK", async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: "text", text: "test" };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "Test prompt",
|
||||
model: "claude-opus-4-5-20251101",
|
||||
cwd: "/test/dir",
|
||||
systemPrompt: "You are helpful",
|
||||
maxTurns: 10,
|
||||
allowedTools: ["Read", "Write"],
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: "Test prompt",
|
||||
options: expect.objectContaining({
|
||||
model: "claude-opus-4-5-20251101",
|
||||
systemPrompt: "You are helpful",
|
||||
maxTurns: 10,
|
||||
cwd: "/test/dir",
|
||||
allowedTools: ["Read", "Write"],
|
||||
permissionMode: "acceptEdits",
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it("should use default allowed tools when not specified", async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: "text", text: "test" };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "Test",
|
||||
cwd: "/test",
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: "Test",
|
||||
options: expect.objectContaining({
|
||||
allowedTools: [
|
||||
"Read",
|
||||
"Write",
|
||||
"Edit",
|
||||
"Glob",
|
||||
"Grep",
|
||||
"Bash",
|
||||
"WebSearch",
|
||||
"WebFetch",
|
||||
],
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it("should enable sandbox by default", async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: "text", text: "test" };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "Test",
|
||||
cwd: "/test",
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: "Test",
|
||||
options: expect.objectContaining({
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it("should pass abortController if provided", async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: "text", text: "test" };
|
||||
})()
|
||||
);
|
||||
|
||||
const abortController = new AbortController();
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "Test",
|
||||
cwd: "/test",
|
||||
abortController,
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: "Test",
|
||||
options: expect.objectContaining({
|
||||
abortController,
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle conversation history", async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: "text", text: "test" };
|
||||
})()
|
||||
);
|
||||
|
||||
const conversationHistory = [
|
||||
{ role: "user" as const, content: "Previous message" },
|
||||
{ role: "assistant" as const, content: "Previous response" },
|
||||
];
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "Current message",
|
||||
cwd: "/test",
|
||||
conversationHistory,
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
// Should pass an async generator as prompt
|
||||
const callArgs = vi.mocked(sdk.query).mock.calls[0][0];
|
||||
expect(typeof callArgs.prompt).not.toBe("string");
|
||||
});
|
||||
|
||||
it("should handle array prompt (with images)", async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: "text", text: "test" };
|
||||
})()
|
||||
);
|
||||
|
||||
const arrayPrompt = [
|
||||
{ type: "text", text: "Describe this" },
|
||||
{ type: "image", source: { type: "base64", data: "..." } },
|
||||
];
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: arrayPrompt as any,
|
||||
cwd: "/test",
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
// Should pass an async generator as prompt for array inputs
|
||||
const callArgs = vi.mocked(sdk.query).mock.calls[0][0];
|
||||
expect(typeof callArgs.prompt).not.toBe("string");
|
||||
});
|
||||
|
||||
it("should use maxTurns default of 20", async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: "text", text: "test" };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: "Test",
|
||||
cwd: "/test",
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: "Test",
|
||||
options: expect.objectContaining({
|
||||
maxTurns: 20,
|
||||
}),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("detectInstallation", () => {
|
||||
it("should return installed with SDK method", async () => {
|
||||
const result = await provider.detectInstallation();
|
||||
|
||||
expect(result.installed).toBe(true);
|
||||
expect(result.method).toBe("sdk");
|
||||
});
|
||||
|
||||
it("should detect ANTHROPIC_API_KEY", async () => {
|
||||
process.env.ANTHROPIC_API_KEY = "test-key";
|
||||
|
||||
const result = await provider.detectInstallation();
|
||||
|
||||
expect(result.hasApiKey).toBe(true);
|
||||
expect(result.authenticated).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect CLAUDE_CODE_OAUTH_TOKEN", async () => {
|
||||
process.env.CLAUDE_CODE_OAUTH_TOKEN = "oauth-token";
|
||||
|
||||
const result = await provider.detectInstallation();
|
||||
|
||||
expect(result.hasApiKey).toBe(true);
|
||||
expect(result.authenticated).toBe(true);
|
||||
});
|
||||
|
||||
it("should return hasApiKey false when no keys present", async () => {
|
||||
const result = await provider.detectInstallation();
|
||||
|
||||
expect(result.hasApiKey).toBe(false);
|
||||
expect(result.authenticated).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAvailableModels", () => {
|
||||
it("should return 4 Claude models", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
expect(models).toHaveLength(4);
|
||||
});
|
||||
|
||||
it("should include Claude Opus 4.5", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
const opus = models.find((m) => m.id === "claude-opus-4-5-20251101");
|
||||
expect(opus).toBeDefined();
|
||||
expect(opus?.name).toBe("Claude Opus 4.5");
|
||||
expect(opus?.provider).toBe("anthropic");
|
||||
});
|
||||
|
||||
it("should include Claude Sonnet 4", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
const sonnet = models.find((m) => m.id === "claude-sonnet-4-20250514");
|
||||
expect(sonnet).toBeDefined();
|
||||
expect(sonnet?.name).toBe("Claude Sonnet 4");
|
||||
});
|
||||
|
||||
it("should include Claude 3.5 Sonnet", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
const sonnet35 = models.find(
|
||||
(m) => m.id === "claude-3-5-sonnet-20241022"
|
||||
);
|
||||
expect(sonnet35).toBeDefined();
|
||||
});
|
||||
|
||||
it("should include Claude 3.5 Haiku", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
const haiku = models.find((m) => m.id === "claude-3-5-haiku-20241022");
|
||||
expect(haiku).toBeDefined();
|
||||
});
|
||||
|
||||
it("should mark Opus as default", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
const opus = models.find((m) => m.id === "claude-opus-4-5-20251101");
|
||||
expect(opus?.default).toBe(true);
|
||||
});
|
||||
|
||||
it("should all support vision and tools", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
models.forEach((model) => {
|
||||
expect(model.supportsVision).toBe(true);
|
||||
expect(model.supportsTools).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("should have correct context windows", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
models.forEach((model) => {
|
||||
expect(model.contextWindow).toBe(200000);
|
||||
});
|
||||
});
|
||||
|
||||
it("should have modelString field matching id", () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
models.forEach((model) => {
|
||||
expect(model.modelString).toBe(model.id);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("supportsFeature", () => {
|
||||
it("should support 'tools' feature", () => {
|
||||
expect(provider.supportsFeature("tools")).toBe(true);
|
||||
});
|
||||
|
||||
it("should support 'text' feature", () => {
|
||||
expect(provider.supportsFeature("text")).toBe(true);
|
||||
});
|
||||
|
||||
it("should support 'vision' feature", () => {
|
||||
expect(provider.supportsFeature("vision")).toBe(true);
|
||||
});
|
||||
|
||||
it("should support 'thinking' feature", () => {
|
||||
expect(provider.supportsFeature("thinking")).toBe(true);
|
||||
});
|
||||
|
||||
it("should not support 'mcp' feature", () => {
|
||||
expect(provider.supportsFeature("mcp")).toBe(false);
|
||||
});
|
||||
|
||||
it("should not support 'cli' feature", () => {
|
||||
expect(provider.supportsFeature("cli")).toBe(false);
|
||||
});
|
||||
|
||||
it("should not support unknown features", () => {
|
||||
expect(provider.supportsFeature("unknown")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("validateConfig", () => {
|
||||
it("should validate config from base class", () => {
|
||||
const result = provider.validateConfig();
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("config management", () => {
|
||||
it("should get and set config", () => {
|
||||
provider.setConfig({ apiKey: "test-key" });
|
||||
|
||||
const config = provider.getConfig();
|
||||
expect(config.apiKey).toBe("test-key");
|
||||
});
|
||||
|
||||
it("should merge config updates", () => {
|
||||
provider.setConfig({ apiKey: "key1" });
|
||||
provider.setConfig({ model: "model1" });
|
||||
|
||||
const config = provider.getConfig();
|
||||
expect(config.apiKey).toBe("key1");
|
||||
expect(config.model).toBe("model1");
|
||||
});
|
||||
});
|
||||
});
|
||||
362
apps/server/tests/unit/providers/codex-cli-detector.test.ts
Normal file
362
apps/server/tests/unit/providers/codex-cli-detector.test.ts
Normal file
@@ -0,0 +1,362 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { CodexCliDetector } from "@/providers/codex-cli-detector.js";
|
||||
import * as cp from "child_process";
|
||||
import * as fs from "fs";
|
||||
import * as os from "os";
|
||||
import * as path from "path";
|
||||
|
||||
vi.mock("child_process");
|
||||
vi.mock("fs");
|
||||
|
||||
describe("codex-cli-detector.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
delete process.env.OPENAI_API_KEY;
|
||||
});
|
||||
|
||||
describe("getConfigDir", () => {
|
||||
it("should return .codex directory in user home", () => {
|
||||
const homeDir = os.homedir();
|
||||
const configDir = CodexCliDetector.getConfigDir();
|
||||
expect(configDir).toBe(path.join(homeDir, ".codex"));
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAuthPath", () => {
|
||||
it("should return auth.json path in config directory", () => {
|
||||
const authPath = CodexCliDetector.getAuthPath();
|
||||
expect(authPath).toContain(".codex");
|
||||
expect(authPath).toContain("auth.json");
|
||||
});
|
||||
});
|
||||
|
||||
describe("checkAuth", () => {
|
||||
const mockAuthPath = "/home/user/.codex/auth.json";
|
||||
|
||||
beforeEach(() => {
|
||||
vi.spyOn(CodexCliDetector, "getAuthPath").mockReturnValue(mockAuthPath);
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("should detect token object authentication", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue(
|
||||
JSON.stringify({
|
||||
token: {
|
||||
access_token: "test_access",
|
||||
refresh_token: "test_refresh",
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(true);
|
||||
expect(result.method).toBe("cli_tokens");
|
||||
expect(result.hasAuthFile).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect token with Id_token field", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue(
|
||||
JSON.stringify({
|
||||
token: {
|
||||
Id_token: "test_id_token",
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(true);
|
||||
expect(result.method).toBe("cli_tokens");
|
||||
});
|
||||
|
||||
it("should detect root-level tokens", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue(
|
||||
JSON.stringify({
|
||||
access_token: "test_access",
|
||||
refresh_token: "test_refresh",
|
||||
})
|
||||
);
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(true);
|
||||
expect(result.method).toBe("cli_tokens");
|
||||
});
|
||||
|
||||
it("should detect API key in auth file", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue(
|
||||
JSON.stringify({
|
||||
api_key: "test-api-key",
|
||||
})
|
||||
);
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(true);
|
||||
expect(result.method).toBe("auth_file");
|
||||
});
|
||||
|
||||
it("should detect openai_api_key field", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue(
|
||||
JSON.stringify({
|
||||
openai_api_key: "test-key",
|
||||
})
|
||||
);
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(true);
|
||||
expect(result.method).toBe("auth_file");
|
||||
});
|
||||
|
||||
it("should detect environment variable authentication", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
process.env.OPENAI_API_KEY = "env-api-key";
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(true);
|
||||
expect(result.method).toBe("env");
|
||||
expect(result.hasEnvKey).toBe(true);
|
||||
expect(result.hasAuthFile).toBe(false);
|
||||
});
|
||||
|
||||
it("should return not authenticated when no auth found", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(false);
|
||||
expect(result.method).toBe("none");
|
||||
expect(result.hasAuthFile).toBe(false);
|
||||
expect(result.hasEnvKey).toBe(false);
|
||||
});
|
||||
|
||||
it("should handle malformed auth file", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue("invalid json");
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result.authenticated).toBe(false);
|
||||
expect(result.method).toBe("none");
|
||||
});
|
||||
|
||||
it("should return auth result with required fields", () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
|
||||
const result = CodexCliDetector.checkAuth();
|
||||
|
||||
expect(result).toHaveProperty("authenticated");
|
||||
expect(result).toHaveProperty("method");
|
||||
expect(typeof result.authenticated).toBe("boolean");
|
||||
expect(typeof result.method).toBe("string");
|
||||
});
|
||||
});
|
||||
|
||||
describe("detectCodexInstallation", () => {
|
||||
// Note: Full detection logic involves OS-specific commands (which/where, npm, brew)
|
||||
// and is better tested in integration tests. Here we test the basic structure.
|
||||
|
||||
it("should return hasApiKey when OPENAI_API_KEY is set and CLI not found", () => {
|
||||
vi.mocked(cp.execSync).mockImplementation(() => {
|
||||
throw new Error("command not found");
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
process.env.OPENAI_API_KEY = "test-key";
|
||||
|
||||
const result = CodexCliDetector.detectCodexInstallation();
|
||||
|
||||
expect(result.installed).toBe(false);
|
||||
expect(result.hasApiKey).toBe(true);
|
||||
});
|
||||
|
||||
it("should return not installed when nothing found", () => {
|
||||
vi.mocked(cp.execSync).mockImplementation(() => {
|
||||
throw new Error("command failed");
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
delete process.env.OPENAI_API_KEY;
|
||||
|
||||
const result = CodexCliDetector.detectCodexInstallation();
|
||||
|
||||
expect(result.installed).toBe(false);
|
||||
expect(result.hasApiKey).toBeUndefined();
|
||||
});
|
||||
|
||||
it("should return installation status object with installed boolean", () => {
|
||||
vi.mocked(cp.execSync).mockImplementation(() => {
|
||||
throw new Error();
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
|
||||
const result = CodexCliDetector.detectCodexInstallation();
|
||||
|
||||
expect(result).toHaveProperty("installed");
|
||||
expect(typeof result.installed).toBe("boolean");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getCodexVersion", () => {
|
||||
// Note: Testing execSync calls is difficult in unit tests and better suited for integration tests
|
||||
// The method structure and error handling can be verified indirectly through other tests
|
||||
|
||||
it("should return null when given invalid path", () => {
|
||||
const version = CodexCliDetector.getCodexVersion("/nonexistent/path");
|
||||
expect(version).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("getInstallationInfo", () => {
|
||||
it("should return installed status when CLI is detected", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: true,
|
||||
path: "/usr/bin/codex",
|
||||
version: "0.5.0",
|
||||
method: "cli",
|
||||
});
|
||||
|
||||
const info = CodexCliDetector.getInstallationInfo();
|
||||
|
||||
expect(info.status).toBe("installed");
|
||||
expect(info.method).toBe("cli");
|
||||
expect(info.version).toBe("0.5.0");
|
||||
expect(info.path).toBe("/usr/bin/codex");
|
||||
expect(info.recommendation).toContain("ready for GPT-5.1/5.2");
|
||||
});
|
||||
|
||||
it("should return api_key_only when API key is set but CLI not installed", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
hasApiKey: true,
|
||||
});
|
||||
|
||||
const info = CodexCliDetector.getInstallationInfo();
|
||||
|
||||
expect(info.status).toBe("api_key_only");
|
||||
expect(info.method).toBe("api-key-only");
|
||||
expect(info.recommendation).toContain("OPENAI_API_KEY detected");
|
||||
expect(info.recommendation).toContain("Install Codex CLI");
|
||||
expect(info.installCommands).toBeDefined();
|
||||
});
|
||||
|
||||
it("should return not_installed when nothing detected", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
|
||||
const info = CodexCliDetector.getInstallationInfo();
|
||||
|
||||
expect(info.status).toBe("not_installed");
|
||||
expect(info.recommendation).toContain("Install OpenAI Codex CLI");
|
||||
expect(info.installCommands).toBeDefined();
|
||||
});
|
||||
|
||||
it("should include install commands for all platforms", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: false,
|
||||
});
|
||||
|
||||
const info = CodexCliDetector.getInstallationInfo();
|
||||
|
||||
expect(info.installCommands).toHaveProperty("npm");
|
||||
expect(info.installCommands).toHaveProperty("macos");
|
||||
expect(info.installCommands).toHaveProperty("linux");
|
||||
expect(info.installCommands).toHaveProperty("windows");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getInstallCommands", () => {
|
||||
it("should return installation commands for all platforms", () => {
|
||||
const commands = CodexCliDetector.getInstallCommands();
|
||||
|
||||
expect(commands.npm).toContain("npm install");
|
||||
expect(commands.npm).toContain("@openai/codex");
|
||||
expect(commands.macos).toContain("brew install");
|
||||
expect(commands.linux).toContain("npm install");
|
||||
expect(commands.windows).toContain("npm install");
|
||||
});
|
||||
});
|
||||
|
||||
describe("isModelSupported", () => {
|
||||
it("should return true for supported models", () => {
|
||||
expect(CodexCliDetector.isModelSupported("gpt-5.1-codex-max")).toBe(true);
|
||||
expect(CodexCliDetector.isModelSupported("gpt-5.1-codex")).toBe(true);
|
||||
expect(CodexCliDetector.isModelSupported("gpt-5.1-codex-mini")).toBe(true);
|
||||
expect(CodexCliDetector.isModelSupported("gpt-5.1")).toBe(true);
|
||||
expect(CodexCliDetector.isModelSupported("gpt-5.2")).toBe(true);
|
||||
});
|
||||
|
||||
it("should return false for unsupported models", () => {
|
||||
expect(CodexCliDetector.isModelSupported("gpt-4")).toBe(false);
|
||||
expect(CodexCliDetector.isModelSupported("claude-opus")).toBe(false);
|
||||
expect(CodexCliDetector.isModelSupported("unknown-model")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getDefaultModel", () => {
|
||||
it("should return gpt-5.2 as default", () => {
|
||||
const defaultModel = CodexCliDetector.getDefaultModel();
|
||||
expect(defaultModel).toBe("gpt-5.2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getFullStatus", () => {
|
||||
it("should include installation, auth, and info", () => {
|
||||
vi.spyOn(CodexCliDetector, "detectCodexInstallation").mockReturnValue({
|
||||
installed: true,
|
||||
path: "/usr/bin/codex",
|
||||
});
|
||||
vi.spyOn(CodexCliDetector, "checkAuth").mockReturnValue({
|
||||
authenticated: true,
|
||||
method: "cli_verified",
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: false,
|
||||
});
|
||||
|
||||
const status = CodexCliDetector.getFullStatus();
|
||||
|
||||
expect(status).toHaveProperty("status");
|
||||
expect(status).toHaveProperty("auth");
|
||||
expect(status).toHaveProperty("installation");
|
||||
expect(status.auth.authenticated).toBe(true);
|
||||
expect(status.installation.installed).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
430
apps/server/tests/unit/providers/codex-config-manager.test.ts
Normal file
430
apps/server/tests/unit/providers/codex-config-manager.test.ts
Normal file
@@ -0,0 +1,430 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { CodexConfigManager } from "@/providers/codex-config-manager.js";
|
||||
import * as fs from "fs/promises";
|
||||
import * as os from "os";
|
||||
import * as path from "path";
|
||||
import { tomlConfigFixture } from "../../fixtures/configs.js";
|
||||
|
||||
vi.mock("fs/promises");
|
||||
|
||||
describe("codex-config-manager.ts", () => {
|
||||
let manager: CodexConfigManager;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
manager = new CodexConfigManager();
|
||||
});
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should initialize with user config path", () => {
|
||||
const expectedPath = path.join(os.homedir(), ".codex", "config.toml");
|
||||
expect(manager["userConfigPath"]).toBe(expectedPath);
|
||||
});
|
||||
|
||||
it("should initialize with null project config path", () => {
|
||||
expect(manager["projectConfigPath"]).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("setProjectPath", () => {
|
||||
it("should set project config path", () => {
|
||||
manager.setProjectPath("/my/project");
|
||||
const configPath = manager["projectConfigPath"];
|
||||
expect(configPath).toContain("my");
|
||||
expect(configPath).toContain("project");
|
||||
expect(configPath).toContain(".codex");
|
||||
expect(configPath).toContain("config.toml");
|
||||
});
|
||||
|
||||
it("should handle paths with special characters", () => {
|
||||
manager.setProjectPath("/path with spaces/project");
|
||||
expect(manager["projectConfigPath"]).toContain("path with spaces");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getConfigPath", () => {
|
||||
it("should return user config path when no project path set", async () => {
|
||||
const result = await manager.getConfigPath();
|
||||
expect(result).toBe(manager["userConfigPath"]);
|
||||
});
|
||||
|
||||
it("should return project config path when it exists", async () => {
|
||||
manager.setProjectPath("/my/project");
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
|
||||
const result = await manager.getConfigPath();
|
||||
expect(result).toContain("my");
|
||||
expect(result).toContain("project");
|
||||
expect(result).toContain(".codex");
|
||||
expect(result).toContain("config.toml");
|
||||
});
|
||||
|
||||
it("should fall back to user config when project config doesn't exist", async () => {
|
||||
manager.setProjectPath("/my/project");
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT"));
|
||||
|
||||
const result = await manager.getConfigPath();
|
||||
expect(result).toBe(manager["userConfigPath"]);
|
||||
});
|
||||
|
||||
it("should create user config directory if it doesn't exist", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
|
||||
await manager.getConfigPath();
|
||||
|
||||
const expectedDir = path.dirname(manager["userConfigPath"]);
|
||||
expect(fs.mkdir).toHaveBeenCalledWith(expectedDir, { recursive: true });
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseToml", () => {
|
||||
it("should parse simple key-value pairs", () => {
|
||||
const toml = `
|
||||
key1 = "value1"
|
||||
key2 = "value2"
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.key1).toBe("value1");
|
||||
expect(result.key2).toBe("value2");
|
||||
});
|
||||
|
||||
it("should parse boolean values", () => {
|
||||
const toml = `
|
||||
enabled = true
|
||||
disabled = false
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.enabled).toBe(true);
|
||||
expect(result.disabled).toBe(false);
|
||||
});
|
||||
|
||||
it("should parse integer values", () => {
|
||||
const toml = `
|
||||
count = 42
|
||||
negative = -10
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.count).toBe(42);
|
||||
expect(result.negative).toBe(-10);
|
||||
});
|
||||
|
||||
it("should parse float values", () => {
|
||||
const toml = `
|
||||
pi = 3.14
|
||||
negative = -2.5
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.pi).toBe(3.14);
|
||||
expect(result.negative).toBe(-2.5);
|
||||
});
|
||||
|
||||
it("should skip comments", () => {
|
||||
const toml = `
|
||||
# This is a comment
|
||||
key = "value"
|
||||
# Another comment
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.key).toBe("value");
|
||||
expect(Object.keys(result)).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("should skip empty lines", () => {
|
||||
const toml = `
|
||||
key1 = "value1"
|
||||
|
||||
key2 = "value2"
|
||||
|
||||
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.key1).toBe("value1");
|
||||
expect(result.key2).toBe("value2");
|
||||
});
|
||||
|
||||
it("should parse sections", () => {
|
||||
const toml = `
|
||||
[section1]
|
||||
key1 = "value1"
|
||||
key2 = "value2"
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.section1).toBeDefined();
|
||||
expect(result.section1.key1).toBe("value1");
|
||||
expect(result.section1.key2).toBe("value2");
|
||||
});
|
||||
|
||||
it("should parse nested sections", () => {
|
||||
const toml = `
|
||||
[section.subsection]
|
||||
key = "value"
|
||||
`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.section).toBeDefined();
|
||||
expect(result.section.subsection).toBeDefined();
|
||||
expect(result.section.subsection.key).toBe("value");
|
||||
});
|
||||
|
||||
it("should parse MCP server configuration", () => {
|
||||
const result = manager.parseToml(tomlConfigFixture);
|
||||
|
||||
expect(result.experimental_use_rmcp_client).toBe(true);
|
||||
expect(result.mcp_servers).toBeDefined();
|
||||
expect(result.mcp_servers["automaker-tools"]).toBeDefined();
|
||||
expect(result.mcp_servers["automaker-tools"].command).toBe("node");
|
||||
});
|
||||
|
||||
it("should handle quoted strings with spaces", () => {
|
||||
const toml = `key = "value with spaces"`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.key).toBe("value with spaces");
|
||||
});
|
||||
|
||||
it("should handle single-quoted strings", () => {
|
||||
const toml = `key = 'single quoted'`;
|
||||
const result = manager.parseToml(toml);
|
||||
|
||||
expect(result.key).toBe("single quoted");
|
||||
});
|
||||
|
||||
it("should return empty object for empty input", () => {
|
||||
const result = manager.parseToml("");
|
||||
expect(result).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe("readConfig", () => {
|
||||
it("should read and parse existing config", async () => {
|
||||
vi.mocked(fs.readFile).mockResolvedValue(tomlConfigFixture);
|
||||
|
||||
const result = await manager.readConfig("/path/to/config.toml");
|
||||
|
||||
expect(result.experimental_use_rmcp_client).toBe(true);
|
||||
expect(result.mcp_servers).toBeDefined();
|
||||
});
|
||||
|
||||
it("should return empty object when file doesn't exist", async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
const result = await manager.readConfig("/nonexistent.toml");
|
||||
|
||||
expect(result).toEqual({});
|
||||
});
|
||||
|
||||
it("should throw other errors", async () => {
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error("Permission denied"));
|
||||
|
||||
await expect(manager.readConfig("/path.toml")).rejects.toThrow(
|
||||
"Permission denied"
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("escapeTomlString", () => {
|
||||
it("should escape backslashes", () => {
|
||||
const result = manager.escapeTomlString("path\\to\\file");
|
||||
expect(result).toBe("path\\\\to\\\\file");
|
||||
});
|
||||
|
||||
it("should escape double quotes", () => {
|
||||
const result = manager.escapeTomlString('say "hello"');
|
||||
expect(result).toBe('say \\"hello\\"');
|
||||
});
|
||||
|
||||
it("should escape newlines", () => {
|
||||
const result = manager.escapeTomlString("line1\nline2");
|
||||
expect(result).toBe("line1\\nline2");
|
||||
});
|
||||
|
||||
it("should escape carriage returns", () => {
|
||||
const result = manager.escapeTomlString("line1\rline2");
|
||||
expect(result).toBe("line1\\rline2");
|
||||
});
|
||||
|
||||
it("should escape tabs", () => {
|
||||
const result = manager.escapeTomlString("col1\tcol2");
|
||||
expect(result).toBe("col1\\tcol2");
|
||||
});
|
||||
});
|
||||
|
||||
describe("formatValue", () => {
|
||||
it("should format strings with quotes", () => {
|
||||
const result = manager.formatValue("test");
|
||||
expect(result).toBe('"test"');
|
||||
});
|
||||
|
||||
it("should format booleans as strings", () => {
|
||||
expect(manager.formatValue(true)).toBe("true");
|
||||
expect(manager.formatValue(false)).toBe("false");
|
||||
});
|
||||
|
||||
it("should format numbers as strings", () => {
|
||||
expect(manager.formatValue(42)).toBe("42");
|
||||
expect(manager.formatValue(3.14)).toBe("3.14");
|
||||
});
|
||||
|
||||
it("should escape special characters in strings", () => {
|
||||
const result = manager.formatValue('path\\with"quotes');
|
||||
expect(result).toBe('"path\\\\with\\"quotes"');
|
||||
});
|
||||
});
|
||||
|
||||
describe("writeConfig", () => {
|
||||
it("should write TOML config to file", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const config = {
|
||||
experimental_use_rmcp_client: true,
|
||||
mcp_servers: {
|
||||
"test-server": {
|
||||
command: "node",
|
||||
args: ["server.js"],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await manager.writeConfig("/path/config.toml", config);
|
||||
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
"/path/config.toml",
|
||||
expect.stringContaining("experimental_use_rmcp_client = true"),
|
||||
"utf-8"
|
||||
);
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
"/path/config.toml",
|
||||
expect.stringContaining("[mcp_servers.test-server]"),
|
||||
"utf-8"
|
||||
);
|
||||
});
|
||||
|
||||
it("should create config directory if it doesn't exist", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
await manager.writeConfig("/path/to/config.toml", {});
|
||||
|
||||
expect(fs.mkdir).toHaveBeenCalledWith("/path/to", { recursive: true });
|
||||
});
|
||||
|
||||
it("should include env section for MCP servers", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const config = {
|
||||
mcp_servers: {
|
||||
"test-server": {
|
||||
command: "node",
|
||||
env: {
|
||||
MY_VAR: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await manager.writeConfig("/path/config.toml", config);
|
||||
|
||||
const writtenContent = vi.mocked(fs.writeFile).mock.calls[0][1] as string;
|
||||
expect(writtenContent).toContain("[mcp_servers.test-server.env]");
|
||||
expect(writtenContent).toContain('MY_VAR = "value"');
|
||||
});
|
||||
});
|
||||
|
||||
describe("configureMcpServer", () => {
|
||||
it("should configure automaker-tools MCP server", async () => {
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT"));
|
||||
vi.mocked(fs.readFile).mockRejectedValue(Object.assign(new Error("ENOENT"), { code: "ENOENT" }));
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const result = await manager.configureMcpServer(
|
||||
"/my/project",
|
||||
"/path/to/mcp-server.js"
|
||||
);
|
||||
|
||||
expect(result).toContain("config.toml");
|
||||
|
||||
const writtenContent = vi.mocked(fs.writeFile).mock.calls[0][1] as string;
|
||||
expect(writtenContent).toContain("[mcp_servers.automaker-tools]");
|
||||
expect(writtenContent).toContain('command = "node"');
|
||||
expect(writtenContent).toContain("/path/to/mcp-server.js");
|
||||
expect(writtenContent).toContain("AUTOMAKER_PROJECT_PATH");
|
||||
});
|
||||
|
||||
it("should preserve existing MCP servers", async () => {
|
||||
const existingConfig = `
|
||||
[mcp_servers.other-server]
|
||||
command = "other"
|
||||
`;
|
||||
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT"));
|
||||
vi.mocked(fs.readFile).mockResolvedValue(existingConfig);
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
await manager.configureMcpServer("/project", "/server.js");
|
||||
|
||||
const writtenContent = vi.mocked(fs.writeFile).mock.calls[0][1] as string;
|
||||
expect(writtenContent).toContain("[mcp_servers.other-server]");
|
||||
expect(writtenContent).toContain("[mcp_servers.automaker-tools]");
|
||||
});
|
||||
});
|
||||
|
||||
describe("removeMcpServer", () => {
|
||||
it("should remove automaker-tools MCP server", async () => {
|
||||
const configWithServer = `
|
||||
[mcp_servers.automaker-tools]
|
||||
command = "node"
|
||||
|
||||
[mcp_servers.other-server]
|
||||
command = "other"
|
||||
`;
|
||||
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT"));
|
||||
vi.mocked(fs.readFile).mockResolvedValue(configWithServer);
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
await manager.removeMcpServer("/project");
|
||||
|
||||
const writtenContent = vi.mocked(fs.writeFile).mock.calls[0][1] as string;
|
||||
expect(writtenContent).not.toContain("automaker-tools");
|
||||
expect(writtenContent).toContain("other-server");
|
||||
});
|
||||
|
||||
it("should remove mcp_servers section if empty", async () => {
|
||||
const configWithOnlyAutomaker = `
|
||||
[mcp_servers.automaker-tools]
|
||||
command = "node"
|
||||
`;
|
||||
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT"));
|
||||
vi.mocked(fs.readFile).mockResolvedValue(configWithOnlyAutomaker);
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
await manager.removeMcpServer("/project");
|
||||
|
||||
const writtenContent = vi.mocked(fs.writeFile).mock.calls[0][1] as string;
|
||||
expect(writtenContent).not.toContain("mcp_servers");
|
||||
});
|
||||
|
||||
it("should handle errors gracefully", async () => {
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error("Read error"));
|
||||
|
||||
// Should not throw
|
||||
await expect(manager.removeMcpServer("/project")).resolves.toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
1145
apps/server/tests/unit/providers/codex-provider.test.ts
Normal file
1145
apps/server/tests/unit/providers/codex-provider.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
293
apps/server/tests/unit/providers/provider-factory.test.ts
Normal file
293
apps/server/tests/unit/providers/provider-factory.test.ts
Normal file
@@ -0,0 +1,293 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { ProviderFactory } from "@/providers/provider-factory.js";
|
||||
import { ClaudeProvider } from "@/providers/claude-provider.js";
|
||||
import { CodexProvider } from "@/providers/codex-provider.js";
|
||||
|
||||
describe("provider-factory.ts", () => {
|
||||
let consoleSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
consoleSpy = {
|
||||
warn: vi.spyOn(console, "warn").mockImplementation(() => {}),
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleSpy.warn.mockRestore();
|
||||
});
|
||||
|
||||
describe("getProviderForModel", () => {
|
||||
describe("OpenAI/Codex models (gpt-*)", () => {
|
||||
it("should return CodexProvider for gpt-5.2", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("gpt-5.2");
|
||||
expect(provider).toBeInstanceOf(CodexProvider);
|
||||
});
|
||||
|
||||
it("should return CodexProvider for gpt-5.1-codex", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("gpt-5.1-codex");
|
||||
expect(provider).toBeInstanceOf(CodexProvider);
|
||||
});
|
||||
|
||||
it("should return CodexProvider for gpt-4", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("gpt-4");
|
||||
expect(provider).toBeInstanceOf(CodexProvider);
|
||||
});
|
||||
|
||||
it("should be case-insensitive for gpt models", () => {
|
||||
const provider1 = ProviderFactory.getProviderForModel("GPT-5.2");
|
||||
const provider2 = ProviderFactory.getProviderForModel("Gpt-5.1");
|
||||
expect(provider1).toBeInstanceOf(CodexProvider);
|
||||
expect(provider2).toBeInstanceOf(CodexProvider);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Unsupported o-series models", () => {
|
||||
it("should default to ClaudeProvider for o1 (not supported by Codex CLI)", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("o1");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
expect(consoleSpy.warn).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should default to ClaudeProvider for o3", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("o3");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should default to ClaudeProvider for o1-mini", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("o1-mini");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Claude models (claude-* prefix)", () => {
|
||||
it("should return ClaudeProvider for claude-opus-4-5-20251101", () => {
|
||||
const provider = ProviderFactory.getProviderForModel(
|
||||
"claude-opus-4-5-20251101"
|
||||
);
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should return ClaudeProvider for claude-sonnet-4-20250514", () => {
|
||||
const provider = ProviderFactory.getProviderForModel(
|
||||
"claude-sonnet-4-20250514"
|
||||
);
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should return ClaudeProvider for claude-haiku-4-5", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("claude-haiku-4-5");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should be case-insensitive for claude models", () => {
|
||||
const provider = ProviderFactory.getProviderForModel(
|
||||
"CLAUDE-OPUS-4-5-20251101"
|
||||
);
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Claude aliases", () => {
|
||||
it("should return ClaudeProvider for 'haiku'", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("haiku");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should return ClaudeProvider for 'sonnet'", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("sonnet");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should return ClaudeProvider for 'opus'", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("opus");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should be case-insensitive for aliases", () => {
|
||||
const provider1 = ProviderFactory.getProviderForModel("HAIKU");
|
||||
const provider2 = ProviderFactory.getProviderForModel("Sonnet");
|
||||
const provider3 = ProviderFactory.getProviderForModel("Opus");
|
||||
|
||||
expect(provider1).toBeInstanceOf(ClaudeProvider);
|
||||
expect(provider2).toBeInstanceOf(ClaudeProvider);
|
||||
expect(provider3).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Unknown models", () => {
|
||||
it("should default to ClaudeProvider for unknown model", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("unknown-model-123");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should warn when defaulting to Claude", () => {
|
||||
ProviderFactory.getProviderForModel("random-model");
|
||||
expect(consoleSpy.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Unknown model prefix")
|
||||
);
|
||||
expect(consoleSpy.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining("random-model")
|
||||
);
|
||||
expect(consoleSpy.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining("defaulting to Claude")
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle empty string", () => {
|
||||
const provider = ProviderFactory.getProviderForModel("");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
expect(consoleSpy.warn).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAllProviders", () => {
|
||||
it("should return array of all providers", () => {
|
||||
const providers = ProviderFactory.getAllProviders();
|
||||
expect(Array.isArray(providers)).toBe(true);
|
||||
});
|
||||
|
||||
it("should include ClaudeProvider", () => {
|
||||
const providers = ProviderFactory.getAllProviders();
|
||||
const hasClaudeProvider = providers.some(
|
||||
(p) => p instanceof ClaudeProvider
|
||||
);
|
||||
expect(hasClaudeProvider).toBe(true);
|
||||
});
|
||||
|
||||
it("should include CodexProvider", () => {
|
||||
const providers = ProviderFactory.getAllProviders();
|
||||
const hasCodexProvider = providers.some((p) => p instanceof CodexProvider);
|
||||
expect(hasCodexProvider).toBe(true);
|
||||
});
|
||||
|
||||
it("should return exactly 2 providers", () => {
|
||||
const providers = ProviderFactory.getAllProviders();
|
||||
expect(providers).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("should create new instances each time", () => {
|
||||
const providers1 = ProviderFactory.getAllProviders();
|
||||
const providers2 = ProviderFactory.getAllProviders();
|
||||
|
||||
expect(providers1[0]).not.toBe(providers2[0]);
|
||||
expect(providers1[1]).not.toBe(providers2[1]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("checkAllProviders", () => {
|
||||
it("should return installation status for all providers", async () => {
|
||||
const statuses = await ProviderFactory.checkAllProviders();
|
||||
|
||||
expect(statuses).toHaveProperty("claude");
|
||||
expect(statuses).toHaveProperty("codex");
|
||||
});
|
||||
|
||||
it("should call detectInstallation on each provider", async () => {
|
||||
const statuses = await ProviderFactory.checkAllProviders();
|
||||
|
||||
expect(statuses.claude).toHaveProperty("installed");
|
||||
expect(statuses.codex).toHaveProperty("installed");
|
||||
});
|
||||
|
||||
it("should return correct provider names as keys", async () => {
|
||||
const statuses = await ProviderFactory.checkAllProviders();
|
||||
const keys = Object.keys(statuses);
|
||||
|
||||
expect(keys).toContain("claude");
|
||||
expect(keys).toContain("codex");
|
||||
expect(keys).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getProviderByName", () => {
|
||||
it("should return ClaudeProvider for 'claude'", () => {
|
||||
const provider = ProviderFactory.getProviderByName("claude");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should return ClaudeProvider for 'anthropic'", () => {
|
||||
const provider = ProviderFactory.getProviderByName("anthropic");
|
||||
expect(provider).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should return CodexProvider for 'codex'", () => {
|
||||
const provider = ProviderFactory.getProviderByName("codex");
|
||||
expect(provider).toBeInstanceOf(CodexProvider);
|
||||
});
|
||||
|
||||
it("should return CodexProvider for 'openai'", () => {
|
||||
const provider = ProviderFactory.getProviderByName("openai");
|
||||
expect(provider).toBeInstanceOf(CodexProvider);
|
||||
});
|
||||
|
||||
it("should be case-insensitive", () => {
|
||||
const provider1 = ProviderFactory.getProviderByName("CLAUDE");
|
||||
const provider2 = ProviderFactory.getProviderByName("Codex");
|
||||
const provider3 = ProviderFactory.getProviderByName("ANTHROPIC");
|
||||
|
||||
expect(provider1).toBeInstanceOf(ClaudeProvider);
|
||||
expect(provider2).toBeInstanceOf(CodexProvider);
|
||||
expect(provider3).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
|
||||
it("should return null for unknown provider", () => {
|
||||
const provider = ProviderFactory.getProviderByName("unknown");
|
||||
expect(provider).toBeNull();
|
||||
});
|
||||
|
||||
it("should return null for empty string", () => {
|
||||
const provider = ProviderFactory.getProviderByName("");
|
||||
expect(provider).toBeNull();
|
||||
});
|
||||
|
||||
it("should create new instance each time", () => {
|
||||
const provider1 = ProviderFactory.getProviderByName("claude");
|
||||
const provider2 = ProviderFactory.getProviderByName("claude");
|
||||
|
||||
expect(provider1).not.toBe(provider2);
|
||||
expect(provider1).toBeInstanceOf(ClaudeProvider);
|
||||
expect(provider2).toBeInstanceOf(ClaudeProvider);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAllAvailableModels", () => {
|
||||
it("should return array of models", () => {
|
||||
const models = ProviderFactory.getAllAvailableModels();
|
||||
expect(Array.isArray(models)).toBe(true);
|
||||
});
|
||||
|
||||
it("should include models from all providers", () => {
|
||||
const models = ProviderFactory.getAllAvailableModels();
|
||||
expect(models.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should return models with required fields", () => {
|
||||
const models = ProviderFactory.getAllAvailableModels();
|
||||
|
||||
models.forEach((model) => {
|
||||
expect(model).toHaveProperty("id");
|
||||
expect(model).toHaveProperty("name");
|
||||
expect(typeof model.id).toBe("string");
|
||||
expect(typeof model.name).toBe("string");
|
||||
});
|
||||
});
|
||||
|
||||
it("should aggregate models from both Claude and Codex", () => {
|
||||
const models = ProviderFactory.getAllAvailableModels();
|
||||
|
||||
// Claude models should include claude-* in their IDs
|
||||
const hasClaudeModels = models.some((m) =>
|
||||
m.id.toLowerCase().includes("claude")
|
||||
);
|
||||
|
||||
// Codex models should include gpt-* in their IDs
|
||||
const hasCodexModels = models.some((m) =>
|
||||
m.id.toLowerCase().includes("gpt")
|
||||
);
|
||||
|
||||
expect(hasClaudeModels).toBe(true);
|
||||
expect(hasCodexModels).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
361
apps/server/tests/unit/services/agent-service.test.ts
Normal file
361
apps/server/tests/unit/services/agent-service.test.ts
Normal file
@@ -0,0 +1,361 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { AgentService } from "@/services/agent-service.js";
|
||||
import { ProviderFactory } from "@/providers/provider-factory.js";
|
||||
import * as fs from "fs/promises";
|
||||
import * as imageHandler from "@/lib/image-handler.js";
|
||||
import * as promptBuilder from "@/lib/prompt-builder.js";
|
||||
import { collectAsyncGenerator } from "../../utils/helpers.js";
|
||||
|
||||
vi.mock("fs/promises");
|
||||
vi.mock("@/providers/provider-factory.js");
|
||||
vi.mock("@/lib/image-handler.js");
|
||||
vi.mock("@/lib/prompt-builder.js");
|
||||
|
||||
describe("agent-service.ts", () => {
|
||||
let service: AgentService;
|
||||
const mockEvents = {
|
||||
subscribe: vi.fn(),
|
||||
emit: vi.fn(),
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
service = new AgentService("/test/data", mockEvents as any);
|
||||
});
|
||||
|
||||
describe("initialize", () => {
|
||||
it("should create state directory", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
|
||||
await service.initialize();
|
||||
|
||||
expect(fs.mkdir).toHaveBeenCalledWith(
|
||||
expect.stringContaining("agent-sessions"),
|
||||
{ recursive: true }
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("startConversation", () => {
|
||||
it("should create new session with empty messages", async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
const result = await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
workingDirectory: "/test/dir",
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.messages).toEqual([]);
|
||||
expect(result.sessionId).toBe("session-1");
|
||||
});
|
||||
|
||||
it("should load existing session", async () => {
|
||||
const existingMessages = [
|
||||
{
|
||||
id: "msg-1",
|
||||
role: "user",
|
||||
content: "Hello",
|
||||
timestamp: "2024-01-01T00:00:00Z",
|
||||
},
|
||||
];
|
||||
|
||||
vi.mocked(fs.readFile).mockResolvedValue(
|
||||
JSON.stringify(existingMessages)
|
||||
);
|
||||
|
||||
const result = await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
workingDirectory: "/test/dir",
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.messages).toEqual(existingMessages);
|
||||
});
|
||||
|
||||
it("should use process.cwd() if no working directory provided", async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
const result = await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it("should reuse existing session if already started", async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
// Start session first time
|
||||
await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
});
|
||||
|
||||
// Start again with same ID
|
||||
const result = await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
// Should only read file once
|
||||
expect(fs.readFile).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("sendMessage", () => {
|
||||
beforeEach(async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
|
||||
await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
workingDirectory: "/test/dir",
|
||||
});
|
||||
});
|
||||
|
||||
it("should throw if session not found", async () => {
|
||||
await expect(
|
||||
service.sendMessage({
|
||||
sessionId: "nonexistent",
|
||||
message: "Hello",
|
||||
})
|
||||
).rejects.toThrow("Session nonexistent not found");
|
||||
});
|
||||
|
||||
|
||||
it("should process message and stream responses", async () => {
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "assistant",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "Response" }],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: "Hello",
|
||||
hasImages: false,
|
||||
});
|
||||
|
||||
const result = await service.sendMessage({
|
||||
sessionId: "session-1",
|
||||
message: "Hello",
|
||||
workingDirectory: "/custom/dir",
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(mockEvents.emit).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should handle images in message", async () => {
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
vi.mocked(imageHandler.readImageAsBase64).mockResolvedValue({
|
||||
base64: "base64data",
|
||||
mimeType: "image/png",
|
||||
filename: "test.png",
|
||||
originalPath: "/path/test.png",
|
||||
});
|
||||
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: "Check image",
|
||||
hasImages: true,
|
||||
});
|
||||
|
||||
await service.sendMessage({
|
||||
sessionId: "session-1",
|
||||
message: "Check this",
|
||||
imagePaths: ["/path/test.png"],
|
||||
});
|
||||
|
||||
expect(imageHandler.readImageAsBase64).toHaveBeenCalledWith(
|
||||
"/path/test.png"
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle failed image loading gracefully", async () => {
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
vi.mocked(imageHandler.readImageAsBase64).mockRejectedValue(
|
||||
new Error("Image not found")
|
||||
);
|
||||
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: "Check image",
|
||||
hasImages: false,
|
||||
});
|
||||
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
await service.sendMessage({
|
||||
sessionId: "session-1",
|
||||
message: "Check this",
|
||||
imagePaths: ["/path/test.png"],
|
||||
});
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("should use custom model if provided", async () => {
|
||||
const mockProvider = {
|
||||
getName: () => "codex",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: "Hello",
|
||||
hasImages: false,
|
||||
});
|
||||
|
||||
await service.sendMessage({
|
||||
sessionId: "session-1",
|
||||
message: "Hello",
|
||||
model: "gpt-5.2",
|
||||
});
|
||||
|
||||
expect(ProviderFactory.getProviderForModel).toHaveBeenCalledWith("gpt-5.2");
|
||||
});
|
||||
|
||||
it("should save session messages", async () => {
|
||||
const mockProvider = {
|
||||
getName: () => "claude",
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(
|
||||
mockProvider as any
|
||||
);
|
||||
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: "Hello",
|
||||
hasImages: false,
|
||||
});
|
||||
|
||||
await service.sendMessage({
|
||||
sessionId: "session-1",
|
||||
message: "Hello",
|
||||
});
|
||||
|
||||
expect(fs.writeFile).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("stopExecution", () => {
|
||||
it("should stop execution for a session", async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
});
|
||||
|
||||
// Should return success
|
||||
const result = await service.stopExecution("session-1");
|
||||
expect(result.success).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("getHistory", () => {
|
||||
it("should return message history", async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
});
|
||||
|
||||
const history = service.getHistory("session-1");
|
||||
|
||||
expect(history).toBeDefined();
|
||||
expect(history?.messages).toEqual([]);
|
||||
});
|
||||
|
||||
it("should handle non-existent session", () => {
|
||||
const history = service.getHistory("nonexistent");
|
||||
expect(history).toBeDefined(); // Returns error object
|
||||
});
|
||||
});
|
||||
|
||||
describe("clearSession", () => {
|
||||
it("should clear session messages", async () => {
|
||||
const error: any = new Error("ENOENT");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
|
||||
await service.startConversation({
|
||||
sessionId: "session-1",
|
||||
});
|
||||
|
||||
await service.clearSession("session-1");
|
||||
|
||||
const history = service.getHistory("session-1");
|
||||
expect(history?.messages).toEqual([]);
|
||||
expect(fs.writeFile).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
71
apps/server/tests/unit/services/auto-mode-service.test.ts
Normal file
71
apps/server/tests/unit/services/auto-mode-service.test.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { AutoModeService } from "@/services/auto-mode-service.js";
|
||||
|
||||
describe("auto-mode-service.ts", () => {
|
||||
let service: AutoModeService;
|
||||
const mockEvents = {
|
||||
subscribe: vi.fn(),
|
||||
emit: vi.fn(),
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
service = new AutoModeService(mockEvents as any);
|
||||
});
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should initialize with event emitter", () => {
|
||||
expect(service).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("startAutoLoop", () => {
|
||||
it("should throw if auto mode is already running", async () => {
|
||||
// Start first loop
|
||||
const promise1 = service.startAutoLoop("/test/project", 3);
|
||||
|
||||
// Try to start second loop
|
||||
await expect(
|
||||
service.startAutoLoop("/test/project", 3)
|
||||
).rejects.toThrow("already running");
|
||||
|
||||
// Cleanup
|
||||
await service.stopAutoLoop();
|
||||
await promise1.catch(() => {});
|
||||
});
|
||||
|
||||
it("should emit auto mode start event", async () => {
|
||||
const promise = service.startAutoLoop("/test/project", 3);
|
||||
|
||||
// Give it time to emit the event
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
|
||||
expect(mockEvents.emit).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
expect.objectContaining({
|
||||
message: expect.stringContaining("Auto mode started"),
|
||||
})
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
await service.stopAutoLoop();
|
||||
await promise.catch(() => {});
|
||||
});
|
||||
});
|
||||
|
||||
describe("stopAutoLoop", () => {
|
||||
it("should stop the auto loop", async () => {
|
||||
const promise = service.startAutoLoop("/test/project", 3);
|
||||
|
||||
const runningCount = await service.stopAutoLoop();
|
||||
|
||||
expect(runningCount).toBe(0);
|
||||
await promise.catch(() => {});
|
||||
});
|
||||
|
||||
it("should return 0 when not running", async () => {
|
||||
const runningCount = await service.stopAutoLoop();
|
||||
expect(runningCount).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
446
apps/server/tests/unit/services/feature-loader.test.ts
Normal file
446
apps/server/tests/unit/services/feature-loader.test.ts
Normal file
@@ -0,0 +1,446 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { FeatureLoader } from "@/services/feature-loader.js";
|
||||
import * as fs from "fs/promises";
|
||||
import path from "path";
|
||||
|
||||
vi.mock("fs/promises");
|
||||
|
||||
describe("feature-loader.ts", () => {
|
||||
let loader: FeatureLoader;
|
||||
const testProjectPath = "/test/project";
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
loader = new FeatureLoader();
|
||||
});
|
||||
|
||||
describe("getFeaturesDir", () => {
|
||||
it("should return features directory path", () => {
|
||||
const result = loader.getFeaturesDir(testProjectPath);
|
||||
expect(result).toContain("test");
|
||||
expect(result).toContain("project");
|
||||
expect(result).toContain(".automaker");
|
||||
expect(result).toContain("features");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getFeatureImagesDir", () => {
|
||||
it("should return feature images directory path", () => {
|
||||
const result = loader.getFeatureImagesDir(testProjectPath, "feature-123");
|
||||
expect(result).toContain("features");
|
||||
expect(result).toContain("feature-123");
|
||||
expect(result).toContain("images");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getFeatureDir", () => {
|
||||
it("should return feature directory path", () => {
|
||||
const result = loader.getFeatureDir(testProjectPath, "feature-123");
|
||||
expect(result).toContain("features");
|
||||
expect(result).toContain("feature-123");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getFeatureJsonPath", () => {
|
||||
it("should return feature.json path", () => {
|
||||
const result = loader.getFeatureJsonPath(testProjectPath, "feature-123");
|
||||
expect(result).toContain("features");
|
||||
expect(result).toContain("feature-123");
|
||||
expect(result).toContain("feature.json");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAgentOutputPath", () => {
|
||||
it("should return agent-output.md path", () => {
|
||||
const result = loader.getAgentOutputPath(testProjectPath, "feature-123");
|
||||
expect(result).toContain("features");
|
||||
expect(result).toContain("feature-123");
|
||||
expect(result).toContain("agent-output.md");
|
||||
});
|
||||
});
|
||||
|
||||
describe("generateFeatureId", () => {
|
||||
it("should generate unique feature ID with timestamp", () => {
|
||||
const id1 = loader.generateFeatureId();
|
||||
const id2 = loader.generateFeatureId();
|
||||
|
||||
expect(id1).toMatch(/^feature-\d+-[a-z0-9]+$/);
|
||||
expect(id2).toMatch(/^feature-\d+-[a-z0-9]+$/);
|
||||
expect(id1).not.toBe(id2);
|
||||
});
|
||||
|
||||
it("should start with 'feature-'", () => {
|
||||
const id = loader.generateFeatureId();
|
||||
expect(id).toMatch(/^feature-/);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAll", () => {
|
||||
it("should return empty array when features directory doesn't exist", async () => {
|
||||
vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT"));
|
||||
|
||||
const result = await loader.getAll(testProjectPath);
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it("should load all features from feature directories", async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
{ name: "feature-1", isDirectory: () => true } as any,
|
||||
{ name: "feature-2", isDirectory: () => true } as any,
|
||||
{ name: "file.txt", isDirectory: () => false } as any,
|
||||
]);
|
||||
|
||||
vi.mocked(fs.readFile)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
id: "feature-1",
|
||||
category: "ui",
|
||||
description: "Feature 1",
|
||||
})
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
id: "feature-2",
|
||||
category: "backend",
|
||||
description: "Feature 2",
|
||||
})
|
||||
);
|
||||
|
||||
const result = await loader.getAll(testProjectPath);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].id).toBe("feature-1");
|
||||
expect(result[1].id).toBe("feature-2");
|
||||
});
|
||||
|
||||
it("should skip features without id field", async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
{ name: "feature-1", isDirectory: () => true } as any,
|
||||
{ name: "feature-2", isDirectory: () => true } as any,
|
||||
]);
|
||||
|
||||
const consoleSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
|
||||
|
||||
vi.mocked(fs.readFile)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
category: "ui",
|
||||
description: "Missing ID",
|
||||
})
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
id: "feature-2",
|
||||
category: "backend",
|
||||
description: "Feature 2",
|
||||
})
|
||||
);
|
||||
|
||||
const result = await loader.getAll(testProjectPath);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe("feature-2");
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining("missing required 'id' field")
|
||||
);
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("should skip features with missing feature.json", async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
{ name: "feature-1", isDirectory: () => true } as any,
|
||||
{ name: "feature-2", isDirectory: () => true } as any,
|
||||
]);
|
||||
|
||||
const error: any = new Error("File not found");
|
||||
error.code = "ENOENT";
|
||||
|
||||
vi.mocked(fs.readFile)
|
||||
.mockRejectedValueOnce(error)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
id: "feature-2",
|
||||
category: "backend",
|
||||
description: "Feature 2",
|
||||
})
|
||||
);
|
||||
|
||||
const result = await loader.getAll(testProjectPath);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].id).toBe("feature-2");
|
||||
});
|
||||
|
||||
it("should handle malformed JSON gracefully", async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
{ name: "feature-1", isDirectory: () => true } as any,
|
||||
]);
|
||||
|
||||
const consoleSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
|
||||
|
||||
vi.mocked(fs.readFile).mockResolvedValue("invalid json{");
|
||||
|
||||
const result = await loader.getAll(testProjectPath);
|
||||
|
||||
expect(result).toEqual([]);
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("should sort features by creation order (timestamp)", async () => {
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
{ name: "feature-3", isDirectory: () => true } as any,
|
||||
{ name: "feature-1", isDirectory: () => true } as any,
|
||||
{ name: "feature-2", isDirectory: () => true } as any,
|
||||
]);
|
||||
|
||||
vi.mocked(fs.readFile)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
id: "feature-3000-xyz",
|
||||
category: "ui",
|
||||
})
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
id: "feature-1000-abc",
|
||||
category: "ui",
|
||||
})
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
JSON.stringify({
|
||||
id: "feature-2000-def",
|
||||
category: "ui",
|
||||
})
|
||||
);
|
||||
|
||||
const result = await loader.getAll(testProjectPath);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].id).toBe("feature-1000-abc");
|
||||
expect(result[1].id).toBe("feature-2000-def");
|
||||
expect(result[2].id).toBe("feature-3000-xyz");
|
||||
});
|
||||
});
|
||||
|
||||
describe("get", () => {
|
||||
it("should return feature by ID", async () => {
|
||||
const featureData = {
|
||||
id: "feature-123",
|
||||
category: "ui",
|
||||
description: "Test feature",
|
||||
};
|
||||
|
||||
vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(featureData));
|
||||
|
||||
const result = await loader.get(testProjectPath, "feature-123");
|
||||
|
||||
expect(result).toEqual(featureData);
|
||||
});
|
||||
|
||||
it("should return null when feature doesn't exist", async () => {
|
||||
const error: any = new Error("File not found");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
const result = await loader.get(testProjectPath, "feature-123");
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it("should throw on other errors", async () => {
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error("Permission denied"));
|
||||
|
||||
await expect(
|
||||
loader.get(testProjectPath, "feature-123")
|
||||
).rejects.toThrow("Permission denied");
|
||||
});
|
||||
});
|
||||
|
||||
describe("create", () => {
|
||||
it("should create new feature", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const featureData = {
|
||||
category: "ui",
|
||||
description: "New feature",
|
||||
};
|
||||
|
||||
const result = await loader.create(testProjectPath, featureData);
|
||||
|
||||
expect(result).toMatchObject({
|
||||
category: "ui",
|
||||
description: "New feature",
|
||||
id: expect.stringMatching(/^feature-/),
|
||||
});
|
||||
expect(fs.writeFile).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should use provided ID if given", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const result = await loader.create(testProjectPath, {
|
||||
id: "custom-id",
|
||||
category: "ui",
|
||||
description: "Test",
|
||||
});
|
||||
|
||||
expect(result.id).toBe("custom-id");
|
||||
});
|
||||
|
||||
it("should set default category if not provided", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const result = await loader.create(testProjectPath, {
|
||||
description: "Test",
|
||||
});
|
||||
|
||||
expect(result.category).toBe("Uncategorized");
|
||||
});
|
||||
});
|
||||
|
||||
describe("update", () => {
|
||||
it("should update existing feature", async () => {
|
||||
vi.mocked(fs.readFile).mockResolvedValue(
|
||||
JSON.stringify({
|
||||
id: "feature-123",
|
||||
category: "ui",
|
||||
description: "Old description",
|
||||
})
|
||||
);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
const result = await loader.update(testProjectPath, "feature-123", {
|
||||
description: "New description",
|
||||
});
|
||||
|
||||
expect(result.description).toBe("New description");
|
||||
expect(result.category).toBe("ui");
|
||||
expect(fs.writeFile).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should throw if feature doesn't exist", async () => {
|
||||
const error: any = new Error("File not found");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
await expect(
|
||||
loader.update(testProjectPath, "feature-123", {})
|
||||
).rejects.toThrow("not found");
|
||||
});
|
||||
});
|
||||
|
||||
describe("delete", () => {
|
||||
it("should delete feature directory", async () => {
|
||||
vi.mocked(fs.rm).mockResolvedValue(undefined);
|
||||
|
||||
const result = await loader.delete(testProjectPath, "feature-123");
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(fs.rm).toHaveBeenCalledWith(
|
||||
expect.stringContaining("feature-123"),
|
||||
{ recursive: true, force: true }
|
||||
);
|
||||
});
|
||||
|
||||
it("should return false on error", async () => {
|
||||
vi.mocked(fs.rm).mockRejectedValue(new Error("Permission denied"));
|
||||
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
const result = await loader.delete(testProjectPath, "feature-123");
|
||||
|
||||
expect(result).toBe(false);
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAgentOutput", () => {
|
||||
it("should return agent output content", async () => {
|
||||
vi.mocked(fs.readFile).mockResolvedValue("Agent output content");
|
||||
|
||||
const result = await loader.getAgentOutput(testProjectPath, "feature-123");
|
||||
|
||||
expect(result).toBe("Agent output content");
|
||||
});
|
||||
|
||||
it("should return null when file doesn't exist", async () => {
|
||||
const error: any = new Error("File not found");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error);
|
||||
|
||||
const result = await loader.getAgentOutput(testProjectPath, "feature-123");
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it("should throw on other errors", async () => {
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error("Permission denied"));
|
||||
|
||||
await expect(
|
||||
loader.getAgentOutput(testProjectPath, "feature-123")
|
||||
).rejects.toThrow("Permission denied");
|
||||
});
|
||||
});
|
||||
|
||||
describe("saveAgentOutput", () => {
|
||||
it("should save agent output to file", async () => {
|
||||
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
||||
|
||||
await loader.saveAgentOutput(
|
||||
testProjectPath,
|
||||
"feature-123",
|
||||
"Output content"
|
||||
);
|
||||
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(
|
||||
expect.stringContaining("agent-output.md"),
|
||||
"Output content",
|
||||
"utf-8"
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("deleteAgentOutput", () => {
|
||||
it("should delete agent output file", async () => {
|
||||
vi.mocked(fs.unlink).mockResolvedValue(undefined);
|
||||
|
||||
await loader.deleteAgentOutput(testProjectPath, "feature-123");
|
||||
|
||||
expect(fs.unlink).toHaveBeenCalledWith(
|
||||
expect.stringContaining("agent-output.md")
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle missing file gracefully", async () => {
|
||||
const error: any = new Error("File not found");
|
||||
error.code = "ENOENT";
|
||||
vi.mocked(fs.unlink).mockRejectedValue(error);
|
||||
|
||||
// Should not throw
|
||||
await expect(
|
||||
loader.deleteAgentOutput(testProjectPath, "feature-123")
|
||||
).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it("should throw on other errors", async () => {
|
||||
vi.mocked(fs.unlink).mockRejectedValue(new Error("Permission denied"));
|
||||
|
||||
await expect(
|
||||
loader.deleteAgentOutput(testProjectPath, "feature-123")
|
||||
).rejects.toThrow("Permission denied");
|
||||
});
|
||||
});
|
||||
});
|
||||
38
apps/server/tests/utils/helpers.ts
Normal file
38
apps/server/tests/utils/helpers.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
/**
|
||||
* Test helper functions
|
||||
*/
|
||||
|
||||
/**
|
||||
* Collect all values from an async generator
|
||||
*/
|
||||
export async function collectAsyncGenerator<T>(gen: AsyncGenerator<T>): Promise<T[]> {
|
||||
const results: T[] = [];
|
||||
for await (const item of gen) {
|
||||
results.push(item);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for a condition to be true
|
||||
*/
|
||||
export async function waitFor(
|
||||
condition: () => boolean,
|
||||
timeout = 1000,
|
||||
interval = 10
|
||||
): Promise<void> {
|
||||
const start = Date.now();
|
||||
while (!condition()) {
|
||||
if (Date.now() - start > timeout) {
|
||||
throw new Error("Timeout waiting for condition");
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, interval));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a temporary directory for tests
|
||||
*/
|
||||
export function createTempDir(): string {
|
||||
return `/tmp/test-${Date.now()}-${Math.random().toString(36).slice(2)}`;
|
||||
}
|
||||
107
apps/server/tests/utils/mocks.ts
Normal file
107
apps/server/tests/utils/mocks.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
/**
|
||||
* Mock utilities for testing
|
||||
* Provides reusable mocks for common dependencies
|
||||
*/
|
||||
|
||||
import { vi } from "vitest";
|
||||
import type { ChildProcess } from "child_process";
|
||||
import { EventEmitter } from "events";
|
||||
import type { Readable } from "stream";
|
||||
|
||||
/**
|
||||
* Mock child_process.spawn for subprocess tests
|
||||
*/
|
||||
export function createMockChildProcess(options: {
|
||||
stdout?: string[];
|
||||
stderr?: string[];
|
||||
exitCode?: number | null;
|
||||
shouldError?: boolean;
|
||||
}): ChildProcess {
|
||||
const { stdout = [], stderr = [], exitCode = 0, shouldError = false } = options;
|
||||
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
|
||||
// Create mock stdout stream
|
||||
mockProcess.stdout = new EventEmitter() as Readable;
|
||||
mockProcess.stderr = new EventEmitter() as Readable;
|
||||
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
// Simulate async output
|
||||
process.nextTick(() => {
|
||||
// Emit stdout lines
|
||||
for (const line of stdout) {
|
||||
mockProcess.stdout.emit("data", Buffer.from(line + "\n"));
|
||||
}
|
||||
|
||||
// Emit stderr lines
|
||||
for (const line of stderr) {
|
||||
mockProcess.stderr.emit("data", Buffer.from(line + "\n"));
|
||||
}
|
||||
|
||||
// Emit exit or error
|
||||
if (shouldError) {
|
||||
mockProcess.emit("error", new Error("Process error"));
|
||||
} else {
|
||||
mockProcess.emit("exit", exitCode);
|
||||
}
|
||||
});
|
||||
|
||||
return mockProcess as ChildProcess;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock fs/promises for file system tests
|
||||
*/
|
||||
export function createMockFs() {
|
||||
return {
|
||||
readFile: vi.fn(),
|
||||
writeFile: vi.fn(),
|
||||
mkdir: vi.fn(),
|
||||
access: vi.fn(),
|
||||
stat: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock Express request/response/next for middleware tests
|
||||
*/
|
||||
export function createMockExpressContext() {
|
||||
const req = {
|
||||
headers: {},
|
||||
body: {},
|
||||
params: {},
|
||||
query: {},
|
||||
} as any;
|
||||
|
||||
const res = {
|
||||
status: vi.fn().mockReturnThis(),
|
||||
json: vi.fn().mockReturnThis(),
|
||||
send: vi.fn().mockReturnThis(),
|
||||
} as any;
|
||||
|
||||
const next = vi.fn();
|
||||
|
||||
return { req, res, next };
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock AbortController for async operation tests
|
||||
*/
|
||||
export function createMockAbortController() {
|
||||
const controller = new AbortController();
|
||||
const originalAbort = controller.abort.bind(controller);
|
||||
controller.abort = vi.fn(originalAbort);
|
||||
return controller;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock Claude SDK query function
|
||||
*/
|
||||
export function createMockClaudeQuery(messages: any[] = []) {
|
||||
return vi.fn(async function* ({ prompt, options }: any) {
|
||||
for (const msg of messages) {
|
||||
yield msg;
|
||||
}
|
||||
});
|
||||
}
|
||||
10
apps/server/tsconfig.test.json
Normal file
10
apps/server/tsconfig.test.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"types": ["vitest/globals", "node"],
|
||||
"moduleResolution": "Bundler",
|
||||
"module": "ESNext"
|
||||
},
|
||||
"include": ["src/**/*", "tests/**/*"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
37
apps/server/vitest.config.ts
Normal file
37
apps/server/vitest.config.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import { defineConfig } from "vitest/config";
|
||||
import path from "path";
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
reporters: ['verbose'],
|
||||
globals: true,
|
||||
environment: "node",
|
||||
setupFiles: ["./tests/setup.ts"],
|
||||
coverage: {
|
||||
provider: "v8",
|
||||
reporter: ["text", "json", "html", "lcov"],
|
||||
include: ["src/**/*.ts"],
|
||||
exclude: [
|
||||
"src/**/*.d.ts",
|
||||
"src/index.ts",
|
||||
"src/routes/**", // Routes are better tested with integration tests
|
||||
],
|
||||
thresholds: {
|
||||
lines: 80,
|
||||
functions: 80,
|
||||
branches: 75,
|
||||
statements: 80,
|
||||
},
|
||||
},
|
||||
include: ["tests/**/*.test.ts", "tests/**/*.spec.ts"],
|
||||
exclude: ["**/node_modules/**", "**/dist/**"],
|
||||
mockReset: true,
|
||||
restoreMocks: true,
|
||||
clearMocks: true,
|
||||
},
|
||||
resolve: {
|
||||
alias: {
|
||||
"@": path.resolve(__dirname, "./src"),
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -23,6 +23,7 @@
|
||||
"lint": "npm run lint --workspace=apps/app",
|
||||
"test": "npm run test --workspace=apps/app",
|
||||
"test:headed": "npm run test:headed --workspace=apps/app",
|
||||
"test:server": "npm run test --workspace=apps/server",
|
||||
"dev:marketing": "npm run dev --workspace=apps/marketing"
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user