mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-02-01 20:23:36 +00:00
feat: add comprehensive integration tests for auto-mode-service
- Created git-test-repo helper for managing test git repositories - Added 13 integration tests covering: - Worktree operations (create, error handling, non-worktree mode) - Feature execution (status updates, model selection, duplicate prevention) - Auto loop (start/stop, pending features, max concurrency, events) - Error handling (provider errors, continue after failures) - Integration tests use real git operations with temporary repos - All 416 tests passing with 72.65% overall coverage - Service coverage improved: agent-service 58%, auto-mode-service 44%, feature-loader 66% 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
116
apps/server/tests/unit/lib/auth.test.ts
Normal file
116
apps/server/tests/unit/lib/auth.test.ts
Normal file
@@ -0,0 +1,116 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { createMockExpressContext } from "../../utils/mocks.js";
|
||||
|
||||
/**
|
||||
* Note: auth.ts reads AUTOMAKER_API_KEY at module load time.
|
||||
* We need to reset modules and reimport for each test to get fresh state.
|
||||
*/
|
||||
describe("auth.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
describe("authMiddleware - no API key", () => {
|
||||
it("should call next() when no API key is set", async () => {
|
||||
delete process.env.AUTOMAKER_API_KEY;
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next } = createMockExpressContext();
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("authMiddleware - with API key", () => {
|
||||
it("should reject request without API key header", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-secret-key";
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next } = createMockExpressContext();
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(401);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
success: false,
|
||||
error: "Authentication required. Provide X-API-Key header.",
|
||||
});
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should reject request with invalid API key", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-secret-key";
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next } = createMockExpressContext();
|
||||
req.headers["x-api-key"] = "wrong-key";
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
success: false,
|
||||
error: "Invalid API key.",
|
||||
});
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should call next() with valid API key", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-secret-key";
|
||||
|
||||
const { authMiddleware } = await import("@/lib/auth.js");
|
||||
const { req, res, next} = createMockExpressContext();
|
||||
req.headers["x-api-key"] = "test-secret-key";
|
||||
|
||||
authMiddleware(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("isAuthEnabled", () => {
|
||||
it("should return false when no API key is set", async () => {
|
||||
delete process.env.AUTOMAKER_API_KEY;
|
||||
|
||||
const { isAuthEnabled } = await import("@/lib/auth.js");
|
||||
expect(isAuthEnabled()).toBe(false);
|
||||
});
|
||||
|
||||
it("should return true when API key is set", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-key";
|
||||
|
||||
const { isAuthEnabled } = await import("@/lib/auth.js");
|
||||
expect(isAuthEnabled()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAuthStatus", () => {
|
||||
it("should return disabled status when no API key", async () => {
|
||||
delete process.env.AUTOMAKER_API_KEY;
|
||||
|
||||
const { getAuthStatus } = await import("@/lib/auth.js");
|
||||
const status = getAuthStatus();
|
||||
|
||||
expect(status).toEqual({
|
||||
enabled: false,
|
||||
method: "none",
|
||||
});
|
||||
});
|
||||
|
||||
it("should return enabled status when API key is set", async () => {
|
||||
process.env.AUTOMAKER_API_KEY = "test-key";
|
||||
|
||||
const { getAuthStatus } = await import("@/lib/auth.js");
|
||||
const status = getAuthStatus();
|
||||
|
||||
expect(status).toEqual({
|
||||
enabled: true,
|
||||
method: "api_key",
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
226
apps/server/tests/unit/lib/conversation-utils.test.ts
Normal file
226
apps/server/tests/unit/lib/conversation-utils.test.ts
Normal file
@@ -0,0 +1,226 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
extractTextFromContent,
|
||||
normalizeContentBlocks,
|
||||
formatHistoryAsText,
|
||||
convertHistoryToMessages,
|
||||
} from "@/lib/conversation-utils.js";
|
||||
import { conversationHistoryFixture } from "../../fixtures/messages.js";
|
||||
|
||||
describe("conversation-utils.ts", () => {
|
||||
describe("extractTextFromContent", () => {
|
||||
it("should return string content as-is", () => {
|
||||
const result = extractTextFromContent("Hello world");
|
||||
expect(result).toBe("Hello world");
|
||||
});
|
||||
|
||||
it("should extract text from single text block", () => {
|
||||
const content = [{ type: "text", text: "Hello" }];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("Hello");
|
||||
});
|
||||
|
||||
it("should extract and join multiple text blocks with newlines", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "First block" },
|
||||
{ type: "text", text: "Second block" },
|
||||
{ type: "text", text: "Third block" },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("First block\nSecond block\nThird block");
|
||||
});
|
||||
|
||||
it("should ignore non-text blocks", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "Text content" },
|
||||
{ type: "image", source: { type: "base64", data: "abc" } },
|
||||
{ type: "text", text: "More text" },
|
||||
{ type: "tool_use", name: "bash", input: {} },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("Text content\nMore text");
|
||||
});
|
||||
|
||||
it("should handle blocks without text property", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "Valid" },
|
||||
{ type: "text" } as any,
|
||||
{ type: "text", text: "Also valid" },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("Valid\n\nAlso valid");
|
||||
});
|
||||
|
||||
it("should handle empty array", () => {
|
||||
const result = extractTextFromContent([]);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
|
||||
it("should handle array with only non-text blocks", () => {
|
||||
const content = [
|
||||
{ type: "image", source: {} },
|
||||
{ type: "tool_use", name: "test" },
|
||||
];
|
||||
const result = extractTextFromContent(content);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("normalizeContentBlocks", () => {
|
||||
it("should convert string to content block array", () => {
|
||||
const result = normalizeContentBlocks("Hello");
|
||||
expect(result).toEqual([{ type: "text", text: "Hello" }]);
|
||||
});
|
||||
|
||||
it("should return array content as-is", () => {
|
||||
const content = [
|
||||
{ type: "text", text: "Hello" },
|
||||
{ type: "image", source: {} },
|
||||
];
|
||||
const result = normalizeContentBlocks(content);
|
||||
expect(result).toBe(content);
|
||||
expect(result).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("should handle empty string", () => {
|
||||
const result = normalizeContentBlocks("");
|
||||
expect(result).toEqual([{ type: "text", text: "" }]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("formatHistoryAsText", () => {
|
||||
it("should return empty string for empty history", () => {
|
||||
const result = formatHistoryAsText([]);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
|
||||
it("should format single user message", () => {
|
||||
const history = [{ role: "user" as const, content: "Hello" }];
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("Previous conversation:");
|
||||
expect(result).toContain("User: Hello");
|
||||
expect(result).toContain("---");
|
||||
});
|
||||
|
||||
it("should format single assistant message", () => {
|
||||
const history = [{ role: "assistant" as const, content: "Hi there" }];
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("Assistant: Hi there");
|
||||
});
|
||||
|
||||
it("should format multiple messages with correct roles", () => {
|
||||
const history = conversationHistoryFixture.slice(0, 2);
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("User: Hello, can you help me?");
|
||||
expect(result).toContain("Assistant: Of course! How can I assist you today?");
|
||||
expect(result).toContain("---");
|
||||
});
|
||||
|
||||
it("should handle messages with array content (multipart)", () => {
|
||||
const history = [conversationHistoryFixture[2]]; // Has text + image
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toContain("What is in this image?");
|
||||
expect(result).not.toContain("base64"); // Should not include image data
|
||||
});
|
||||
|
||||
it("should format all messages from fixture", () => {
|
||||
const result = formatHistoryAsText(conversationHistoryFixture);
|
||||
|
||||
expect(result).toContain("Previous conversation:");
|
||||
expect(result).toContain("User: Hello, can you help me?");
|
||||
expect(result).toContain("Assistant: Of course!");
|
||||
expect(result).toContain("User: What is in this image?");
|
||||
expect(result).toContain("---");
|
||||
});
|
||||
|
||||
it("should separate messages with double newlines", () => {
|
||||
const history = [
|
||||
{ role: "user" as const, content: "First" },
|
||||
{ role: "assistant" as const, content: "Second" },
|
||||
];
|
||||
const result = formatHistoryAsText(history);
|
||||
|
||||
expect(result).toMatch(/User: First\n\nAssistant: Second/);
|
||||
});
|
||||
});
|
||||
|
||||
describe("convertHistoryToMessages", () => {
|
||||
it("should convert empty history", () => {
|
||||
const result = convertHistoryToMessages([]);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it("should convert single message to SDK format", () => {
|
||||
const history = [{ role: "user" as const, content: "Hello" }];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toMatchObject({
|
||||
type: "user",
|
||||
session_id: "",
|
||||
message: {
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "Hello" }],
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
});
|
||||
});
|
||||
|
||||
it("should normalize string content to array", () => {
|
||||
const history = [{ role: "assistant" as const, content: "Response" }];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result[0].message.content).toEqual([
|
||||
{ type: "text", text: "Response" },
|
||||
]);
|
||||
});
|
||||
|
||||
it("should preserve array content", () => {
|
||||
const history = [
|
||||
{
|
||||
role: "user" as const,
|
||||
content: [
|
||||
{ type: "text", text: "Hello" },
|
||||
{ type: "image", source: {} },
|
||||
],
|
||||
},
|
||||
];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result[0].message.content).toHaveLength(2);
|
||||
expect(result[0].message.content[0]).toEqual({ type: "text", text: "Hello" });
|
||||
});
|
||||
|
||||
it("should convert multiple messages", () => {
|
||||
const history = conversationHistoryFixture.slice(0, 2);
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].type).toBe("user");
|
||||
expect(result[1].type).toBe("assistant");
|
||||
});
|
||||
|
||||
it("should set correct fields for SDK format", () => {
|
||||
const history = [{ role: "user" as const, content: "Test" }];
|
||||
const result = convertHistoryToMessages(history);
|
||||
|
||||
expect(result[0].session_id).toBe("");
|
||||
expect(result[0].parent_tool_use_id).toBeNull();
|
||||
expect(result[0].type).toBe("user");
|
||||
expect(result[0].message.role).toBe("user");
|
||||
});
|
||||
|
||||
it("should handle all messages from fixture", () => {
|
||||
const result = convertHistoryToMessages(conversationHistoryFixture);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].message.content).toBeInstanceOf(Array);
|
||||
expect(result[1].message.content).toBeInstanceOf(Array);
|
||||
expect(result[2].message.content).toBeInstanceOf(Array);
|
||||
});
|
||||
});
|
||||
});
|
||||
146
apps/server/tests/unit/lib/error-handler.test.ts
Normal file
146
apps/server/tests/unit/lib/error-handler.test.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
import { describe, it, expect } from "vitest";
|
||||
import {
|
||||
isAbortError,
|
||||
isAuthenticationError,
|
||||
classifyError,
|
||||
getUserFriendlyErrorMessage,
|
||||
type ErrorType,
|
||||
} from "@/lib/error-handler.js";
|
||||
|
||||
describe("error-handler.ts", () => {
|
||||
describe("isAbortError", () => {
|
||||
it("should detect AbortError by error name", () => {
|
||||
const error = new Error("Operation cancelled");
|
||||
error.name = "AbortError";
|
||||
expect(isAbortError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect abort error by message content", () => {
|
||||
const error = new Error("Request was aborted");
|
||||
expect(isAbortError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it("should return false for non-abort errors", () => {
|
||||
const error = new Error("Something else went wrong");
|
||||
expect(isAbortError(error)).toBe(false);
|
||||
});
|
||||
|
||||
it("should return false for non-Error objects", () => {
|
||||
expect(isAbortError("not an error")).toBe(false);
|
||||
expect(isAbortError(null)).toBe(false);
|
||||
expect(isAbortError(undefined)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isAuthenticationError", () => {
|
||||
it("should detect 'Authentication failed' message", () => {
|
||||
expect(isAuthenticationError("Authentication failed")).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect 'Invalid API key' message", () => {
|
||||
expect(isAuthenticationError("Invalid API key provided")).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect 'authentication_failed' message", () => {
|
||||
expect(isAuthenticationError("authentication_failed")).toBe(true);
|
||||
});
|
||||
|
||||
it("should detect 'Fix external API key' message", () => {
|
||||
expect(isAuthenticationError("Fix external API key configuration")).toBe(true);
|
||||
});
|
||||
|
||||
it("should return false for non-authentication errors", () => {
|
||||
expect(isAuthenticationError("Network connection error")).toBe(false);
|
||||
expect(isAuthenticationError("File not found")).toBe(false);
|
||||
});
|
||||
|
||||
it("should be case sensitive", () => {
|
||||
expect(isAuthenticationError("authentication Failed")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("classifyError", () => {
|
||||
it("should classify authentication errors", () => {
|
||||
const error = new Error("Authentication failed");
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("authentication");
|
||||
expect(result.isAuth).toBe(true);
|
||||
expect(result.isAbort).toBe(false);
|
||||
expect(result.message).toBe("Authentication failed");
|
||||
expect(result.originalError).toBe(error);
|
||||
});
|
||||
|
||||
it("should classify abort errors", () => {
|
||||
const error = new Error("Operation aborted");
|
||||
error.name = "AbortError";
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("abort");
|
||||
expect(result.isAbort).toBe(true);
|
||||
expect(result.isAuth).toBe(false);
|
||||
expect(result.message).toBe("Operation aborted");
|
||||
});
|
||||
|
||||
it("should prioritize auth over abort if both match", () => {
|
||||
const error = new Error("Authentication failed and aborted");
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("authentication");
|
||||
expect(result.isAuth).toBe(true);
|
||||
expect(result.isAbort).toBe(true); // Still detected as abort too
|
||||
});
|
||||
|
||||
it("should classify generic Error as execution error", () => {
|
||||
const error = new Error("Something went wrong");
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("execution");
|
||||
expect(result.isAuth).toBe(false);
|
||||
expect(result.isAbort).toBe(false);
|
||||
});
|
||||
|
||||
it("should classify non-Error objects as unknown", () => {
|
||||
const error = "string error";
|
||||
const result = classifyError(error);
|
||||
|
||||
expect(result.type).toBe("unknown");
|
||||
expect(result.message).toBe("string error");
|
||||
});
|
||||
|
||||
it("should handle null and undefined", () => {
|
||||
const nullResult = classifyError(null);
|
||||
expect(nullResult.type).toBe("unknown");
|
||||
expect(nullResult.message).toBe("Unknown error");
|
||||
|
||||
const undefinedResult = classifyError(undefined);
|
||||
expect(undefinedResult.type).toBe("unknown");
|
||||
expect(undefinedResult.message).toBe("Unknown error");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getUserFriendlyErrorMessage", () => {
|
||||
it("should return friendly message for abort errors", () => {
|
||||
const error = new Error("abort");
|
||||
const result = getUserFriendlyErrorMessage(error);
|
||||
expect(result).toBe("Operation was cancelled");
|
||||
});
|
||||
|
||||
it("should return friendly message for authentication errors", () => {
|
||||
const error = new Error("Authentication failed");
|
||||
const result = getUserFriendlyErrorMessage(error);
|
||||
expect(result).toBe("Authentication failed. Please check your API key.");
|
||||
});
|
||||
|
||||
it("should return original message for other errors", () => {
|
||||
const error = new Error("File not found");
|
||||
const result = getUserFriendlyErrorMessage(error);
|
||||
expect(result).toBe("File not found");
|
||||
});
|
||||
|
||||
it("should handle non-Error objects", () => {
|
||||
const result = getUserFriendlyErrorMessage("Custom error");
|
||||
expect(result).toBe("Custom error");
|
||||
});
|
||||
});
|
||||
});
|
||||
130
apps/server/tests/unit/lib/events.test.ts
Normal file
130
apps/server/tests/unit/lib/events.test.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import { describe, it, expect, vi } from "vitest";
|
||||
import { createEventEmitter, type EventType } from "@/lib/events.js";
|
||||
|
||||
describe("events.ts", () => {
|
||||
describe("createEventEmitter", () => {
|
||||
it("should emit events to single subscriber", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback = vi.fn();
|
||||
|
||||
emitter.subscribe(callback);
|
||||
emitter.emit("agent:stream", { message: "test" });
|
||||
|
||||
expect(callback).toHaveBeenCalledOnce();
|
||||
expect(callback).toHaveBeenCalledWith("agent:stream", { message: "test" });
|
||||
});
|
||||
|
||||
it("should emit events to multiple subscribers", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback1 = vi.fn();
|
||||
const callback2 = vi.fn();
|
||||
const callback3 = vi.fn();
|
||||
|
||||
emitter.subscribe(callback1);
|
||||
emitter.subscribe(callback2);
|
||||
emitter.subscribe(callback3);
|
||||
emitter.emit("feature:started", { id: "123" });
|
||||
|
||||
expect(callback1).toHaveBeenCalledOnce();
|
||||
expect(callback2).toHaveBeenCalledOnce();
|
||||
expect(callback3).toHaveBeenCalledOnce();
|
||||
expect(callback1).toHaveBeenCalledWith("feature:started", { id: "123" });
|
||||
});
|
||||
|
||||
it("should support unsubscribe functionality", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback = vi.fn();
|
||||
|
||||
const unsubscribe = emitter.subscribe(callback);
|
||||
emitter.emit("agent:stream", { test: 1 });
|
||||
|
||||
expect(callback).toHaveBeenCalledOnce();
|
||||
|
||||
unsubscribe();
|
||||
emitter.emit("agent:stream", { test: 2 });
|
||||
|
||||
expect(callback).toHaveBeenCalledOnce(); // Still called only once
|
||||
});
|
||||
|
||||
it("should handle errors in subscribers without crashing", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const errorCallback = vi.fn(() => {
|
||||
throw new Error("Subscriber error");
|
||||
});
|
||||
const normalCallback = vi.fn();
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
emitter.subscribe(errorCallback);
|
||||
emitter.subscribe(normalCallback);
|
||||
|
||||
expect(() => {
|
||||
emitter.emit("feature:error", { error: "test" });
|
||||
}).not.toThrow();
|
||||
|
||||
expect(errorCallback).toHaveBeenCalledOnce();
|
||||
expect(normalCallback).toHaveBeenCalledOnce();
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("should emit different event types", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback = vi.fn();
|
||||
|
||||
emitter.subscribe(callback);
|
||||
|
||||
const eventTypes: EventType[] = [
|
||||
"agent:stream",
|
||||
"auto-mode:started",
|
||||
"feature:completed",
|
||||
"project:analysis-progress",
|
||||
];
|
||||
|
||||
eventTypes.forEach((type) => {
|
||||
emitter.emit(type, { type });
|
||||
});
|
||||
|
||||
expect(callback).toHaveBeenCalledTimes(4);
|
||||
});
|
||||
|
||||
it("should handle emitting without subscribers", () => {
|
||||
const emitter = createEventEmitter();
|
||||
|
||||
expect(() => {
|
||||
emitter.emit("agent:stream", { test: true });
|
||||
}).not.toThrow();
|
||||
});
|
||||
|
||||
it("should allow multiple subscriptions and unsubscriptions", () => {
|
||||
const emitter = createEventEmitter();
|
||||
const callback1 = vi.fn();
|
||||
const callback2 = vi.fn();
|
||||
const callback3 = vi.fn();
|
||||
|
||||
const unsub1 = emitter.subscribe(callback1);
|
||||
const unsub2 = emitter.subscribe(callback2);
|
||||
const unsub3 = emitter.subscribe(callback3);
|
||||
|
||||
emitter.emit("feature:started", { test: 1 });
|
||||
expect(callback1).toHaveBeenCalledOnce();
|
||||
expect(callback2).toHaveBeenCalledOnce();
|
||||
expect(callback3).toHaveBeenCalledOnce();
|
||||
|
||||
unsub2();
|
||||
|
||||
emitter.emit("feature:started", { test: 2 });
|
||||
expect(callback1).toHaveBeenCalledTimes(2);
|
||||
expect(callback2).toHaveBeenCalledOnce(); // Still just once
|
||||
expect(callback3).toHaveBeenCalledTimes(2);
|
||||
|
||||
unsub1();
|
||||
unsub3();
|
||||
|
||||
emitter.emit("feature:started", { test: 3 });
|
||||
expect(callback1).toHaveBeenCalledTimes(2);
|
||||
expect(callback2).toHaveBeenCalledOnce();
|
||||
expect(callback3).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
231
apps/server/tests/unit/lib/image-handler.test.ts
Normal file
231
apps/server/tests/unit/lib/image-handler.test.ts
Normal file
@@ -0,0 +1,231 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import {
|
||||
getMimeTypeForImage,
|
||||
readImageAsBase64,
|
||||
convertImagesToContentBlocks,
|
||||
formatImagePathsForPrompt,
|
||||
} from "@/lib/image-handler.js";
|
||||
import { pngBase64Fixture } from "../../fixtures/images.js";
|
||||
import * as fs from "fs/promises";
|
||||
|
||||
vi.mock("fs/promises");
|
||||
|
||||
describe("image-handler.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe("getMimeTypeForImage", () => {
|
||||
it("should return correct MIME type for .jpg", () => {
|
||||
expect(getMimeTypeForImage("test.jpg")).toBe("image/jpeg");
|
||||
expect(getMimeTypeForImage("/path/to/test.jpg")).toBe("image/jpeg");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .jpeg", () => {
|
||||
expect(getMimeTypeForImage("test.jpeg")).toBe("image/jpeg");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .png", () => {
|
||||
expect(getMimeTypeForImage("test.png")).toBe("image/png");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .gif", () => {
|
||||
expect(getMimeTypeForImage("test.gif")).toBe("image/gif");
|
||||
});
|
||||
|
||||
it("should return correct MIME type for .webp", () => {
|
||||
expect(getMimeTypeForImage("test.webp")).toBe("image/webp");
|
||||
});
|
||||
|
||||
it("should be case-insensitive", () => {
|
||||
expect(getMimeTypeForImage("test.PNG")).toBe("image/png");
|
||||
expect(getMimeTypeForImage("test.JPG")).toBe("image/jpeg");
|
||||
expect(getMimeTypeForImage("test.GIF")).toBe("image/gif");
|
||||
expect(getMimeTypeForImage("test.WEBP")).toBe("image/webp");
|
||||
});
|
||||
|
||||
it("should default to image/png for unknown extensions", () => {
|
||||
expect(getMimeTypeForImage("test.unknown")).toBe("image/png");
|
||||
expect(getMimeTypeForImage("test.txt")).toBe("image/png");
|
||||
expect(getMimeTypeForImage("test")).toBe("image/png");
|
||||
});
|
||||
|
||||
it("should handle paths with multiple dots", () => {
|
||||
expect(getMimeTypeForImage("my.image.file.jpg")).toBe("image/jpeg");
|
||||
});
|
||||
});
|
||||
|
||||
describe("readImageAsBase64", () => {
|
||||
it("should read image and return base64 data", async () => {
|
||||
const mockBuffer = Buffer.from(pngBase64Fixture, "base64");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await readImageAsBase64("/path/to/test.png");
|
||||
|
||||
expect(result).toMatchObject({
|
||||
base64: pngBase64Fixture,
|
||||
mimeType: "image/png",
|
||||
filename: "test.png",
|
||||
originalPath: "/path/to/test.png",
|
||||
});
|
||||
expect(fs.readFile).toHaveBeenCalledWith("/path/to/test.png");
|
||||
});
|
||||
|
||||
it("should handle different image formats", async () => {
|
||||
const mockBuffer = Buffer.from("jpeg-data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await readImageAsBase64("/path/to/photo.jpg");
|
||||
|
||||
expect(result.mimeType).toBe("image/jpeg");
|
||||
expect(result.filename).toBe("photo.jpg");
|
||||
expect(result.base64).toBe(mockBuffer.toString("base64"));
|
||||
});
|
||||
|
||||
it("should extract filename from path", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await readImageAsBase64("/deep/nested/path/image.webp");
|
||||
|
||||
expect(result.filename).toBe("image.webp");
|
||||
});
|
||||
|
||||
it("should throw error if file cannot be read", async () => {
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error("File not found"));
|
||||
|
||||
await expect(readImageAsBase64("/nonexistent.png")).rejects.toThrow(
|
||||
"File not found"
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("convertImagesToContentBlocks", () => {
|
||||
it("should convert single image to content block", async () => {
|
||||
const mockBuffer = Buffer.from(pngBase64Fixture, "base64");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await convertImagesToContentBlocks(["/path/test.png"]);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toMatchObject({
|
||||
type: "image",
|
||||
source: {
|
||||
type: "base64",
|
||||
media_type: "image/png",
|
||||
data: pngBase64Fixture,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("should convert multiple images to content blocks", async () => {
|
||||
const mockBuffer = Buffer.from("test-data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await convertImagesToContentBlocks([
|
||||
"/a.png",
|
||||
"/b.jpg",
|
||||
"/c.webp",
|
||||
]);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].source.media_type).toBe("image/png");
|
||||
expect(result[1].source.media_type).toBe("image/jpeg");
|
||||
expect(result[2].source.media_type).toBe("image/webp");
|
||||
});
|
||||
|
||||
it("should resolve relative paths with workDir", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
await convertImagesToContentBlocks(["relative.png"], "/work/dir");
|
||||
|
||||
// Use path-agnostic check since Windows uses backslashes
|
||||
const calls = vi.mocked(fs.readFile).mock.calls;
|
||||
expect(calls[0][0]).toMatch(/relative\.png$/);
|
||||
expect(calls[0][0]).toContain("work");
|
||||
expect(calls[0][0]).toContain("dir");
|
||||
});
|
||||
|
||||
it("should handle absolute paths without workDir", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
await convertImagesToContentBlocks(["/absolute/path.png"]);
|
||||
|
||||
expect(fs.readFile).toHaveBeenCalledWith("/absolute/path.png");
|
||||
});
|
||||
|
||||
it("should continue processing on individual image errors", async () => {
|
||||
vi.mocked(fs.readFile)
|
||||
.mockResolvedValueOnce(Buffer.from("ok1"))
|
||||
.mockRejectedValueOnce(new Error("Failed"))
|
||||
.mockResolvedValueOnce(Buffer.from("ok2"));
|
||||
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
const result = await convertImagesToContentBlocks([
|
||||
"/a.png",
|
||||
"/b.png",
|
||||
"/c.png",
|
||||
]);
|
||||
|
||||
expect(result).toHaveLength(2); // Only successful images
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("should return empty array for empty input", async () => {
|
||||
const result = await convertImagesToContentBlocks([]);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it("should handle undefined workDir", async () => {
|
||||
const mockBuffer = Buffer.from("data");
|
||||
vi.mocked(fs.readFile).mockResolvedValue(mockBuffer);
|
||||
|
||||
const result = await convertImagesToContentBlocks(["/test.png"], undefined);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(fs.readFile).toHaveBeenCalledWith("/test.png");
|
||||
});
|
||||
});
|
||||
|
||||
describe("formatImagePathsForPrompt", () => {
|
||||
it("should format single image path as bulleted list", () => {
|
||||
const result = formatImagePathsForPrompt(["/path/image.png"]);
|
||||
|
||||
expect(result).toContain("\n\nAttached images:");
|
||||
expect(result).toContain("- /path/image.png");
|
||||
});
|
||||
|
||||
it("should format multiple image paths as bulleted list", () => {
|
||||
const result = formatImagePathsForPrompt([
|
||||
"/path/a.png",
|
||||
"/path/b.jpg",
|
||||
"/path/c.webp",
|
||||
]);
|
||||
|
||||
expect(result).toContain("Attached images:");
|
||||
expect(result).toContain("- /path/a.png");
|
||||
expect(result).toContain("- /path/b.jpg");
|
||||
expect(result).toContain("- /path/c.webp");
|
||||
});
|
||||
|
||||
it("should return empty string for empty array", () => {
|
||||
const result = formatImagePathsForPrompt([]);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
|
||||
it("should start with double newline", () => {
|
||||
const result = formatImagePathsForPrompt(["/test.png"]);
|
||||
expect(result.startsWith("\n\n")).toBe(true);
|
||||
});
|
||||
|
||||
it("should handle paths with special characters", () => {
|
||||
const result = formatImagePathsForPrompt(["/path/with spaces/image.png"]);
|
||||
expect(result).toContain("- /path/with spaces/image.png");
|
||||
});
|
||||
});
|
||||
});
|
||||
156
apps/server/tests/unit/lib/model-resolver.test.ts
Normal file
156
apps/server/tests/unit/lib/model-resolver.test.ts
Normal file
@@ -0,0 +1,156 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import {
|
||||
resolveModelString,
|
||||
getEffectiveModel,
|
||||
CLAUDE_MODEL_MAP,
|
||||
DEFAULT_MODELS,
|
||||
} from "@/lib/model-resolver.js";
|
||||
|
||||
describe("model-resolver.ts", () => {
|
||||
let consoleSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
consoleSpy = {
|
||||
log: vi.spyOn(console, "log").mockImplementation(() => {}),
|
||||
warn: vi.spyOn(console, "warn").mockImplementation(() => {}),
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleSpy.log.mockRestore();
|
||||
consoleSpy.warn.mockRestore();
|
||||
});
|
||||
|
||||
describe("resolveModelString", () => {
|
||||
it("should resolve 'haiku' alias to full model string", () => {
|
||||
const result = resolveModelString("haiku");
|
||||
expect(result).toBe("claude-haiku-4-5");
|
||||
});
|
||||
|
||||
it("should resolve 'sonnet' alias to full model string", () => {
|
||||
const result = resolveModelString("sonnet");
|
||||
expect(result).toBe("claude-sonnet-4-20250514");
|
||||
});
|
||||
|
||||
it("should resolve 'opus' alias to full model string", () => {
|
||||
const result = resolveModelString("opus");
|
||||
expect(result).toBe("claude-opus-4-5-20251101");
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Resolved model alias: "opus"')
|
||||
);
|
||||
});
|
||||
|
||||
it("should pass through OpenAI gpt-* models", () => {
|
||||
const models = ["gpt-5.2", "gpt-5.1-codex", "gpt-4"];
|
||||
models.forEach((model) => {
|
||||
const result = resolveModelString(model);
|
||||
expect(result).toBe(model);
|
||||
});
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Using OpenAI/Codex model")
|
||||
);
|
||||
});
|
||||
|
||||
it("should treat o-series models as unknown (Codex CLI doesn't support them)", () => {
|
||||
const models = ["o1", "o1-mini", "o3"];
|
||||
models.forEach((model) => {
|
||||
const result = resolveModelString(model);
|
||||
// Should fall back to default since these aren't supported
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
});
|
||||
|
||||
it("should pass through full Claude model strings", () => {
|
||||
const models = [
|
||||
"claude-opus-4-5-20251101",
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-haiku-4-5",
|
||||
];
|
||||
models.forEach((model) => {
|
||||
const result = resolveModelString(model);
|
||||
expect(result).toBe(model);
|
||||
});
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Using full Claude model string")
|
||||
);
|
||||
});
|
||||
|
||||
it("should return default model when modelKey is undefined", () => {
|
||||
const result = resolveModelString(undefined);
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
|
||||
it("should return custom default model when provided", () => {
|
||||
const customDefault = "custom-model";
|
||||
const result = resolveModelString(undefined, customDefault);
|
||||
expect(result).toBe(customDefault);
|
||||
});
|
||||
|
||||
it("should return default for unknown model key", () => {
|
||||
const result = resolveModelString("unknown-model");
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
expect(consoleSpy.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Unknown model key "unknown-model"')
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle empty string", () => {
|
||||
const result = resolveModelString("");
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getEffectiveModel", () => {
|
||||
it("should prioritize explicit model over session and default", () => {
|
||||
const result = getEffectiveModel("opus", "haiku", "gpt-5.2");
|
||||
expect(result).toBe("claude-opus-4-5-20251101");
|
||||
});
|
||||
|
||||
it("should use session model when explicit is not provided", () => {
|
||||
const result = getEffectiveModel(undefined, "sonnet", "gpt-5.2");
|
||||
expect(result).toBe("claude-sonnet-4-20250514");
|
||||
});
|
||||
|
||||
it("should use default when neither explicit nor session is provided", () => {
|
||||
const customDefault = "claude-haiku-4-5";
|
||||
const result = getEffectiveModel(undefined, undefined, customDefault);
|
||||
expect(result).toBe(customDefault);
|
||||
});
|
||||
|
||||
it("should use Claude default when no arguments provided", () => {
|
||||
const result = getEffectiveModel();
|
||||
expect(result).toBe(DEFAULT_MODELS.claude);
|
||||
});
|
||||
|
||||
it("should handle explicit empty strings as undefined", () => {
|
||||
const result = getEffectiveModel("", "haiku");
|
||||
expect(result).toBe("claude-haiku-4-5");
|
||||
});
|
||||
});
|
||||
|
||||
describe("CLAUDE_MODEL_MAP", () => {
|
||||
it("should have haiku, sonnet, opus mappings", () => {
|
||||
expect(CLAUDE_MODEL_MAP).toHaveProperty("haiku");
|
||||
expect(CLAUDE_MODEL_MAP).toHaveProperty("sonnet");
|
||||
expect(CLAUDE_MODEL_MAP).toHaveProperty("opus");
|
||||
});
|
||||
|
||||
it("should have valid Claude model strings", () => {
|
||||
expect(CLAUDE_MODEL_MAP.haiku).toContain("haiku");
|
||||
expect(CLAUDE_MODEL_MAP.sonnet).toContain("sonnet");
|
||||
expect(CLAUDE_MODEL_MAP.opus).toContain("opus");
|
||||
});
|
||||
});
|
||||
|
||||
describe("DEFAULT_MODELS", () => {
|
||||
it("should have claude and openai defaults", () => {
|
||||
expect(DEFAULT_MODELS).toHaveProperty("claude");
|
||||
expect(DEFAULT_MODELS).toHaveProperty("openai");
|
||||
});
|
||||
|
||||
it("should have valid default models", () => {
|
||||
expect(DEFAULT_MODELS.claude).toContain("claude");
|
||||
expect(DEFAULT_MODELS.openai).toContain("gpt");
|
||||
});
|
||||
});
|
||||
});
|
||||
197
apps/server/tests/unit/lib/prompt-builder.test.ts
Normal file
197
apps/server/tests/unit/lib/prompt-builder.test.ts
Normal file
@@ -0,0 +1,197 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import { buildPromptWithImages } from "@/lib/prompt-builder.js";
|
||||
import * as imageHandler from "@/lib/image-handler.js";
|
||||
|
||||
vi.mock("@/lib/image-handler.js");
|
||||
|
||||
describe("prompt-builder.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe("buildPromptWithImages", () => {
|
||||
it("should return plain text when no images provided", async () => {
|
||||
const result = await buildPromptWithImages("Hello world");
|
||||
|
||||
expect(result).toEqual({
|
||||
content: "Hello world",
|
||||
hasImages: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("should return plain text when imagePaths is empty array", async () => {
|
||||
const result = await buildPromptWithImages("Hello world", []);
|
||||
|
||||
expect(result).toEqual({
|
||||
content: "Hello world",
|
||||
hasImages: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("should build content blocks with single image", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "base64data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Describe this image", [
|
||||
"/test.png",
|
||||
]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
expect(Array.isArray(result.content)).toBe(true);
|
||||
const content = result.content as Array<any>;
|
||||
expect(content).toHaveLength(2);
|
||||
expect(content[0]).toEqual({ type: "text", text: "Describe this image" });
|
||||
expect(content[1].type).toBe("image");
|
||||
});
|
||||
|
||||
it("should build content blocks with multiple images", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data1" },
|
||||
},
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/jpeg", data: "data2" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Analyze these", [
|
||||
"/a.png",
|
||||
"/b.jpg",
|
||||
]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
const content = result.content as Array<any>;
|
||||
expect(content).toHaveLength(3); // 1 text + 2 images
|
||||
expect(content[0].type).toBe("text");
|
||||
expect(content[1].type).toBe("image");
|
||||
expect(content[2].type).toBe("image");
|
||||
});
|
||||
|
||||
it("should include image paths in text when requested", async () => {
|
||||
vi.mocked(imageHandler.formatImagePathsForPrompt).mockReturnValue(
|
||||
"\n\nAttached images:\n- /test.png"
|
||||
);
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages(
|
||||
"Base prompt",
|
||||
["/test.png"],
|
||||
undefined,
|
||||
true
|
||||
);
|
||||
|
||||
expect(imageHandler.formatImagePathsForPrompt).toHaveBeenCalledWith([
|
||||
"/test.png",
|
||||
]);
|
||||
const content = result.content as Array<any>;
|
||||
expect(content[0].text).toContain("Base prompt");
|
||||
expect(content[0].text).toContain("Attached images:");
|
||||
});
|
||||
|
||||
it("should not include image paths by default", async () => {
|
||||
vi.mocked(imageHandler.formatImagePathsForPrompt).mockReturnValue(
|
||||
"\n\nAttached images:\n- /test.png"
|
||||
);
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("Base prompt", ["/test.png"]);
|
||||
|
||||
expect(imageHandler.formatImagePathsForPrompt).not.toHaveBeenCalled();
|
||||
const content = result.content as Array<any>;
|
||||
expect(content[0].text).toBe("Base prompt");
|
||||
});
|
||||
|
||||
it("should pass workDir to convertImagesToContentBlocks", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
await buildPromptWithImages("Test", ["/test.png"], "/work/dir");
|
||||
|
||||
expect(imageHandler.convertImagesToContentBlocks).toHaveBeenCalledWith(
|
||||
["/test.png"],
|
||||
"/work/dir"
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle empty text content", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages("", ["/test.png"]);
|
||||
|
||||
expect(result.hasImages).toBe(true);
|
||||
// When text is empty/whitespace, should only have image blocks
|
||||
const content = result.content as Array<any>;
|
||||
expect(content.every((block) => block.type === "image")).toBe(true);
|
||||
});
|
||||
|
||||
it("should trim text content before checking if empty", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
const result = await buildPromptWithImages(" ", ["/test.png"]);
|
||||
|
||||
const content = result.content as Array<any>;
|
||||
// Whitespace-only text should be excluded
|
||||
expect(content.every((block) => block.type === "image")).toBe(true);
|
||||
});
|
||||
|
||||
it("should return text when only one block and it's text", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([]);
|
||||
|
||||
const result = await buildPromptWithImages("Just text", ["/missing.png"]);
|
||||
|
||||
// If no images are successfully loaded, should return just the text
|
||||
expect(result.content).toBe("Just text");
|
||||
expect(result.hasImages).toBe(true); // Still true because images were requested
|
||||
});
|
||||
|
||||
it("should handle workDir with relative paths", async () => {
|
||||
vi.mocked(imageHandler.convertImagesToContentBlocks).mockResolvedValue([
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "data" },
|
||||
},
|
||||
]);
|
||||
|
||||
await buildPromptWithImages(
|
||||
"Test",
|
||||
["relative.png"],
|
||||
"/absolute/work/dir"
|
||||
);
|
||||
|
||||
expect(imageHandler.convertImagesToContentBlocks).toHaveBeenCalledWith(
|
||||
["relative.png"],
|
||||
"/absolute/work/dir"
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
297
apps/server/tests/unit/lib/security.test.ts
Normal file
297
apps/server/tests/unit/lib/security.test.ts
Normal file
@@ -0,0 +1,297 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import path from "path";
|
||||
|
||||
/**
|
||||
* Note: security.ts maintains module-level state (allowed paths Set).
|
||||
* We need to reset modules and reimport for each test to get fresh state.
|
||||
*/
|
||||
describe("security.ts", () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
describe("initAllowedPaths", () => {
|
||||
it("should parse comma-separated directories from environment", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,/path2,/path3";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/path1"));
|
||||
expect(allowed).toContain(path.resolve("/path2"));
|
||||
expect(allowed).toContain(path.resolve("/path3"));
|
||||
});
|
||||
|
||||
it("should trim whitespace from paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = " /path1 , /path2 , /path3 ";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/path1"));
|
||||
expect(allowed).toContain(path.resolve("/path2"));
|
||||
});
|
||||
|
||||
it("should always include DATA_DIR if set", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "/data/dir";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/data/dir"));
|
||||
});
|
||||
|
||||
it("should handle empty ALLOWED_PROJECT_DIRS", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "/data";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toHaveLength(1);
|
||||
expect(allowed[0]).toBe(path.resolve("/data"));
|
||||
});
|
||||
|
||||
it("should skip empty entries in comma list", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,,/path2, ,/path3";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe("addAllowedPath", () => {
|
||||
it("should add path to allowed list", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, addAllowedPath, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
initAllowedPaths();
|
||||
|
||||
addAllowedPath("/new/path");
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
expect(allowed).toContain(path.resolve("/new/path"));
|
||||
});
|
||||
|
||||
it("should resolve relative paths before adding", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, addAllowedPath, getAllowedPaths } =
|
||||
await import("@/lib/security.js");
|
||||
initAllowedPaths();
|
||||
|
||||
addAllowedPath("./relative/path");
|
||||
|
||||
const allowed = getAllowedPaths();
|
||||
const cwd = process.cwd();
|
||||
expect(allowed).toContain(path.resolve(cwd, "./relative/path"));
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPathAllowed", () => {
|
||||
it("should allow paths under allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/allowed/project/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/allowed/project/subdir/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/allowed/project/deep/nested/file.txt")).toBe(true);
|
||||
});
|
||||
|
||||
it("should allow the exact allowed directory", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/allowed/project")).toBe(true);
|
||||
});
|
||||
|
||||
it("should reject paths outside allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/not/allowed/file.txt")).toBe(false);
|
||||
expect(isPathAllowed("/tmp/file.txt")).toBe(false);
|
||||
expect(isPathAllowed("/etc/passwd")).toBe(false);
|
||||
});
|
||||
|
||||
it("should block path traversal attempts", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
// These should resolve outside the allowed directory
|
||||
expect(isPathAllowed("/allowed/project/../../../etc/passwd")).toBe(false);
|
||||
expect(isPathAllowed("/allowed/project/../../other/file.txt")).toBe(false);
|
||||
});
|
||||
|
||||
it("should resolve relative paths correctly", async () => {
|
||||
const cwd = process.cwd();
|
||||
process.env.ALLOWED_PROJECT_DIRS = cwd;
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("./file.txt")).toBe(true);
|
||||
expect(isPathAllowed("./subdir/file.txt")).toBe(true);
|
||||
});
|
||||
|
||||
it("should reject paths that are parents of allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed/project/subdir";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/allowed/project")).toBe(false);
|
||||
expect(isPathAllowed("/allowed")).toBe(false);
|
||||
});
|
||||
|
||||
it("should handle multiple allowed directories", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,/path2,/path3";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, isPathAllowed } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(isPathAllowed("/path1/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/path2/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/path3/file.txt")).toBe(true);
|
||||
expect(isPathAllowed("/path4/file.txt")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("validatePath", () => {
|
||||
it("should return resolved path for allowed paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = validatePath("/allowed/file.txt");
|
||||
expect(result).toBe(path.resolve("/allowed/file.txt"));
|
||||
});
|
||||
|
||||
it("should throw error for disallowed paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(() => validatePath("/disallowed/file.txt")).toThrow("Access denied");
|
||||
expect(() => validatePath("/disallowed/file.txt")).toThrow(
|
||||
"not in an allowed directory"
|
||||
);
|
||||
});
|
||||
|
||||
it("should include the file path in error message", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/allowed";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
expect(() => validatePath("/bad/path.txt")).toThrow("/bad/path.txt");
|
||||
});
|
||||
|
||||
it("should resolve paths before validation", async () => {
|
||||
const cwd = process.cwd();
|
||||
process.env.ALLOWED_PROJECT_DIRS = cwd;
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, validatePath } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = validatePath("./file.txt");
|
||||
expect(result).toBe(path.resolve(cwd, "./file.txt"));
|
||||
});
|
||||
});
|
||||
|
||||
describe("getAllowedPaths", () => {
|
||||
it("should return array of allowed paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/path1,/path2";
|
||||
process.env.DATA_DIR = "/data";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = getAllowedPaths();
|
||||
expect(Array.isArray(result)).toBe(true);
|
||||
expect(result.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should return resolved paths", async () => {
|
||||
process.env.ALLOWED_PROJECT_DIRS = "/test";
|
||||
process.env.DATA_DIR = "";
|
||||
|
||||
const { initAllowedPaths, getAllowedPaths } = await import(
|
||||
"@/lib/security.js"
|
||||
);
|
||||
initAllowedPaths();
|
||||
|
||||
const result = getAllowedPaths();
|
||||
expect(result[0]).toBe(path.resolve("/test"));
|
||||
});
|
||||
});
|
||||
});
|
||||
482
apps/server/tests/unit/lib/subprocess-manager.test.ts
Normal file
482
apps/server/tests/unit/lib/subprocess-manager.test.ts
Normal file
@@ -0,0 +1,482 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||
import {
|
||||
spawnJSONLProcess,
|
||||
spawnProcess,
|
||||
type SubprocessOptions,
|
||||
} from "@/lib/subprocess-manager.js";
|
||||
import * as cp from "child_process";
|
||||
import { EventEmitter } from "events";
|
||||
import { Readable } from "stream";
|
||||
import { collectAsyncGenerator } from "../../utils/helpers.js";
|
||||
|
||||
vi.mock("child_process");
|
||||
|
||||
describe("subprocess-manager.ts", () => {
|
||||
let consoleSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
consoleSpy = {
|
||||
log: vi.spyOn(console, "log").mockImplementation(() => {}),
|
||||
error: vi.spyOn(console, "error").mockImplementation(() => {}),
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleSpy.log.mockRestore();
|
||||
consoleSpy.error.mockRestore();
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper to create a mock ChildProcess with stdout/stderr streams
|
||||
*/
|
||||
function createMockProcess(config: {
|
||||
stdoutLines?: string[];
|
||||
stderrLines?: string[];
|
||||
exitCode?: number;
|
||||
error?: Error;
|
||||
delayMs?: number;
|
||||
}) {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
|
||||
// Create readable streams for stdout and stderr
|
||||
const stdout = new Readable({ read() {} });
|
||||
const stderr = new Readable({ read() {} });
|
||||
|
||||
mockProcess.stdout = stdout;
|
||||
mockProcess.stderr = stderr;
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
// Use process.nextTick to ensure readline interface is set up first
|
||||
process.nextTick(() => {
|
||||
// Emit stderr lines immediately
|
||||
if (config.stderrLines) {
|
||||
for (const line of config.stderrLines) {
|
||||
stderr.emit("data", Buffer.from(line));
|
||||
}
|
||||
}
|
||||
|
||||
// Emit stdout lines with small delays to ensure readline processes them
|
||||
const emitLines = async () => {
|
||||
if (config.stdoutLines) {
|
||||
for (const line of config.stdoutLines) {
|
||||
stdout.push(line + "\n");
|
||||
// Small delay to allow readline to process
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay before ending stream
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
stdout.push(null); // End stdout
|
||||
|
||||
// Small delay before exit
|
||||
await new Promise((resolve) =>
|
||||
setTimeout(resolve, config.delayMs ?? 10)
|
||||
);
|
||||
|
||||
// Emit exit or error
|
||||
if (config.error) {
|
||||
mockProcess.emit("error", config.error);
|
||||
} else {
|
||||
mockProcess.emit("exit", config.exitCode ?? 0);
|
||||
}
|
||||
};
|
||||
|
||||
emitLines();
|
||||
});
|
||||
|
||||
return mockProcess;
|
||||
}
|
||||
|
||||
describe("spawnJSONLProcess", () => {
|
||||
const baseOptions: SubprocessOptions = {
|
||||
command: "test-command",
|
||||
args: ["arg1", "arg2"],
|
||||
cwd: "/test/dir",
|
||||
};
|
||||
|
||||
it("should yield parsed JSONL objects line by line", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [
|
||||
'{"type":"start","id":1}',
|
||||
'{"type":"progress","value":50}',
|
||||
'{"type":"complete","result":"success"}',
|
||||
],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(3);
|
||||
expect(results[0]).toEqual({ type: "start", id: 1 });
|
||||
expect(results[1]).toEqual({ type: "progress", value: 50 });
|
||||
expect(results[2]).toEqual({ type: "complete", result: "success" });
|
||||
});
|
||||
|
||||
it("should skip empty lines", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [
|
||||
'{"type":"first"}',
|
||||
"",
|
||||
" ",
|
||||
'{"type":"second"}',
|
||||
],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[0]).toEqual({ type: "first" });
|
||||
expect(results[1]).toEqual({ type: "second" });
|
||||
});
|
||||
|
||||
it("should yield error for malformed JSON and continue processing", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [
|
||||
'{"type":"valid"}',
|
||||
'{invalid json}',
|
||||
'{"type":"also_valid"}',
|
||||
],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(3);
|
||||
expect(results[0]).toEqual({ type: "valid" });
|
||||
expect(results[1]).toMatchObject({
|
||||
type: "error",
|
||||
error: expect.stringContaining("Failed to parse output"),
|
||||
});
|
||||
expect(results[2]).toEqual({ type: "also_valid" });
|
||||
});
|
||||
|
||||
it("should collect stderr output", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"test"}'],
|
||||
stderrLines: ["Warning: something happened", "Error: critical issue"],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(consoleSpy.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Warning: something happened")
|
||||
);
|
||||
expect(consoleSpy.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Error: critical issue")
|
||||
);
|
||||
});
|
||||
|
||||
it("should yield error on non-zero exit code", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"started"}'],
|
||||
stderrLines: ["Process failed with error"],
|
||||
exitCode: 1,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[0]).toEqual({ type: "started" });
|
||||
expect(results[1]).toMatchObject({
|
||||
type: "error",
|
||||
error: expect.stringContaining("Process failed with error"),
|
||||
});
|
||||
});
|
||||
|
||||
it("should yield error with exit code when stderr is empty", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"test"}'],
|
||||
exitCode: 127,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[1]).toMatchObject({
|
||||
type: "error",
|
||||
error: "Process exited with code 127",
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle process spawn errors", async () => {
|
||||
const mockProcess = createMockProcess({
|
||||
error: new Error("Command not found"),
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
// When process.on('error') fires, exitCode is null
|
||||
// The generator should handle this gracefully
|
||||
expect(results).toEqual([]);
|
||||
});
|
||||
|
||||
it("should kill process on AbortController signal", async () => {
|
||||
const abortController = new AbortController();
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: ['{"type":"start"}'],
|
||||
exitCode: 0,
|
||||
delayMs: 100, // Delay to allow abort
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess({
|
||||
...baseOptions,
|
||||
abortController,
|
||||
});
|
||||
|
||||
// Start consuming the generator
|
||||
const promise = collectAsyncGenerator(generator);
|
||||
|
||||
// Abort after a short delay
|
||||
setTimeout(() => abortController.abort(), 20);
|
||||
|
||||
await promise;
|
||||
|
||||
expect(mockProcess.kill).toHaveBeenCalledWith("SIGTERM");
|
||||
expect(consoleSpy.log).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Abort signal received")
|
||||
);
|
||||
});
|
||||
|
||||
// Note: Timeout behavior tests are omitted from unit tests as they involve
|
||||
// complex timing interactions that are difficult to mock reliably.
|
||||
// These scenarios are better covered by integration tests with real subprocesses.
|
||||
|
||||
it("should spawn process with correct arguments", async () => {
|
||||
const mockProcess = createMockProcess({ exitCode: 0 });
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const options: SubprocessOptions = {
|
||||
command: "my-command",
|
||||
args: ["--flag", "value"],
|
||||
cwd: "/work/dir",
|
||||
env: { CUSTOM_VAR: "test" },
|
||||
};
|
||||
|
||||
const generator = spawnJSONLProcess(options);
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(cp.spawn).toHaveBeenCalledWith("my-command", ["--flag", "value"], {
|
||||
cwd: "/work/dir",
|
||||
env: expect.objectContaining({ CUSTOM_VAR: "test" }),
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
});
|
||||
});
|
||||
|
||||
it("should merge env with process.env", async () => {
|
||||
const mockProcess = createMockProcess({ exitCode: 0 });
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const options: SubprocessOptions = {
|
||||
command: "test",
|
||||
args: [],
|
||||
cwd: "/test",
|
||||
env: { CUSTOM: "value" },
|
||||
};
|
||||
|
||||
const generator = spawnJSONLProcess(options);
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(cp.spawn).toHaveBeenCalledWith(
|
||||
"test",
|
||||
[],
|
||||
expect.objectContaining({
|
||||
env: expect.objectContaining({
|
||||
CUSTOM: "value",
|
||||
// Should also include existing process.env
|
||||
NODE_ENV: process.env.NODE_ENV,
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle complex JSON objects", async () => {
|
||||
const complexObject = {
|
||||
type: "complex",
|
||||
nested: { deep: { value: [1, 2, 3] } },
|
||||
array: [{ id: 1 }, { id: 2 }],
|
||||
string: "with \"quotes\" and \\backslashes",
|
||||
};
|
||||
|
||||
const mockProcess = createMockProcess({
|
||||
stdoutLines: [JSON.stringify(complexObject)],
|
||||
exitCode: 0,
|
||||
});
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
const generator = spawnJSONLProcess(baseOptions);
|
||||
const results = await collectAsyncGenerator(generator);
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0]).toEqual(complexObject);
|
||||
});
|
||||
});
|
||||
|
||||
describe("spawnProcess", () => {
|
||||
const baseOptions: SubprocessOptions = {
|
||||
command: "test-command",
|
||||
args: ["arg1"],
|
||||
cwd: "/test",
|
||||
};
|
||||
|
||||
it("should collect stdout and stderr", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
const stdout = new Readable({ read() {} });
|
||||
const stderr = new Readable({ read() {} });
|
||||
|
||||
mockProcess.stdout = stdout;
|
||||
mockProcess.stderr = stderr;
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
stdout.push("line 1\n");
|
||||
stdout.push("line 2\n");
|
||||
stdout.push(null);
|
||||
|
||||
stderr.push("error 1\n");
|
||||
stderr.push("error 2\n");
|
||||
stderr.push(null);
|
||||
|
||||
mockProcess.emit("exit", 0);
|
||||
}, 10);
|
||||
|
||||
const result = await spawnProcess(baseOptions);
|
||||
|
||||
expect(result.stdout).toBe("line 1\nline 2\n");
|
||||
expect(result.stderr).toBe("error 1\nerror 2\n");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return correct exit code", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.push(null);
|
||||
mockProcess.stderr.push(null);
|
||||
mockProcess.emit("exit", 42);
|
||||
}, 10);
|
||||
|
||||
const result = await spawnProcess(baseOptions);
|
||||
|
||||
expect(result.exitCode).toBe(42);
|
||||
});
|
||||
|
||||
it("should handle process errors", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.emit("error", new Error("Spawn failed"));
|
||||
}, 10);
|
||||
|
||||
await expect(spawnProcess(baseOptions)).rejects.toThrow("Spawn failed");
|
||||
});
|
||||
|
||||
it("should handle AbortController signal", async () => {
|
||||
const abortController = new AbortController();
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => abortController.abort(), 20);
|
||||
|
||||
await expect(
|
||||
spawnProcess({ ...baseOptions, abortController })
|
||||
).rejects.toThrow("Process aborted");
|
||||
|
||||
expect(mockProcess.kill).toHaveBeenCalledWith("SIGTERM");
|
||||
});
|
||||
|
||||
it("should spawn with correct options", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.push(null);
|
||||
mockProcess.stderr.push(null);
|
||||
mockProcess.emit("exit", 0);
|
||||
}, 10);
|
||||
|
||||
const options: SubprocessOptions = {
|
||||
command: "my-cmd",
|
||||
args: ["--verbose"],
|
||||
cwd: "/my/dir",
|
||||
env: { MY_VAR: "value" },
|
||||
};
|
||||
|
||||
await spawnProcess(options);
|
||||
|
||||
expect(cp.spawn).toHaveBeenCalledWith("my-cmd", ["--verbose"], {
|
||||
cwd: "/my/dir",
|
||||
env: expect.objectContaining({ MY_VAR: "value" }),
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle empty stdout and stderr", async () => {
|
||||
const mockProcess = new EventEmitter() as any;
|
||||
mockProcess.stdout = new Readable({ read() {} });
|
||||
mockProcess.stderr = new Readable({ read() {} });
|
||||
mockProcess.kill = vi.fn();
|
||||
|
||||
vi.mocked(cp.spawn).mockReturnValue(mockProcess);
|
||||
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.push(null);
|
||||
mockProcess.stderr.push(null);
|
||||
mockProcess.emit("exit", 0);
|
||||
}, 10);
|
||||
|
||||
const result = await spawnProcess(baseOptions);
|
||||
|
||||
expect(result.stdout).toBe("");
|
||||
expect(result.stderr).toBe("");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user