feat: CLI & MCP progress tracking for parse-prd command (#1048)
* initial cutover * update log to debug * update tracker to pass units * update test to match new base tracker format * add streamTextService mocks * remove unused imports * Ensure the CLI waits for async main() completion * refactor to reduce code duplication * update comment * reuse function * ensure targetTag is defined in streaming mode * avoid throwing inside process.exit spy * check for null * remove reference to generate * fix formatting * fix textStream assignment * ensure no division by 0 * fix jest chalk mocks * refactor for maintainability * Improve bar chart calculation logic for consistent visual representation * use custom streaming error types; fix mocks * Update streamText extraction in parse-prd.js to match actual service response * remove check - doesn't belong here * update mocks * remove streaming test that wasn't really doing anything * add comment * make parsing logic more DRY * fix formatting * Fix textStream extraction to match actual service response * fix mock * Add a cleanup method to ensure proper resource disposal and prevent memory leaks * debounce progress updates to reduce UI flicker during rapid updates * Implement timeout protection for streaming operations (60-second timeout) with automatic fallback to non-streaming mode. * clear timeout properly * Add a maximum buffer size limit (1MB) to prevent unbounded memory growth with very large streaming responses. * fix formatting * remove duplicate mock * better docs * fix formatting * sanitize the dynamic property name * Fix incorrect remaining progress calculation * Use onError callback instead of console.warn * Remove unused chalk import * Add missing custom validator in fallback parsing configuration * add custom validator parameter in fallback parsing * chore: fix package-lock.json * chore: large code refactor * chore: increase timeout from 1 minute to 3 minutes * fix: refactor and fix streaming * Merge remote-tracking branch 'origin/next' into joedanz/parse-prd-progress * fix: cleanup and fix unit tests * chore: fix unit tests * chore: fix format * chore: run format * chore: fix weird CI unit test error * chore: fix format --------- Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
This commit is contained in:
5
.changeset/floppy-starts-find.md
Normal file
5
.changeset/floppy-starts-find.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Add CLI & MCP progress tracking for parse-prd command.
|
||||||
8
.taskmaster/docs/test-prd.txt
Normal file
8
.taskmaster/docs/test-prd.txt
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
Simple Todo App PRD
|
||||||
|
|
||||||
|
Create a basic todo list application with the following features:
|
||||||
|
1. Add new todos
|
||||||
|
2. Mark todos as complete
|
||||||
|
3. Delete todos
|
||||||
|
|
||||||
|
That's it. Keep it simple.
|
||||||
@@ -32,7 +32,7 @@ import { TASKMASTER_TASKS_FILE } from '../../../../src/constants/paths.js';
|
|||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function parsePRDDirect(args, log, context = {}) {
|
export async function parsePRDDirect(args, log, context = {}) {
|
||||||
const { session } = context;
|
const { session, reportProgress } = context;
|
||||||
// Extract projectRoot from args
|
// Extract projectRoot from args
|
||||||
const {
|
const {
|
||||||
input: inputArg,
|
input: inputArg,
|
||||||
@@ -164,6 +164,7 @@ export async function parsePRDDirect(args, log, context = {}) {
|
|||||||
force,
|
force,
|
||||||
append,
|
append,
|
||||||
research,
|
research,
|
||||||
|
reportProgress,
|
||||||
commandName: 'parse-prd',
|
commandName: 'parse-prd',
|
||||||
outputType: 'mcp'
|
outputType: 'mcp'
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -7,7 +7,8 @@ import { z } from 'zod';
|
|||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
withNormalizedProjectRoot,
|
withNormalizedProjectRoot,
|
||||||
createErrorResponse
|
createErrorResponse,
|
||||||
|
checkProgressCapability
|
||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { parsePRDDirect } from '../core/task-master-core.js';
|
import { parsePRDDirect } from '../core/task-master-core.js';
|
||||||
import {
|
import {
|
||||||
@@ -64,19 +65,24 @@ export function registerParsePRDTool(server) {
|
|||||||
.optional()
|
.optional()
|
||||||
.describe('Append generated tasks to existing file.')
|
.describe('Append generated tasks to existing file.')
|
||||||
}),
|
}),
|
||||||
execute: withNormalizedProjectRoot(async (args, { log, session }) => {
|
execute: withNormalizedProjectRoot(
|
||||||
|
async (args, { log, session, reportProgress }) => {
|
||||||
try {
|
try {
|
||||||
const resolvedTag = resolveTag({
|
const resolvedTag = resolveTag({
|
||||||
projectRoot: args.projectRoot,
|
projectRoot: args.projectRoot,
|
||||||
tag: args.tag
|
tag: args.tag
|
||||||
});
|
});
|
||||||
|
const progressCapability = checkProgressCapability(
|
||||||
|
reportProgress,
|
||||||
|
log
|
||||||
|
);
|
||||||
const result = await parsePRDDirect(
|
const result = await parsePRDDirect(
|
||||||
{
|
{
|
||||||
...args,
|
...args,
|
||||||
tag: resolvedTag
|
tag: resolvedTag
|
||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
{ session }
|
{ session, reportProgress: progressCapability }
|
||||||
);
|
);
|
||||||
return handleApiResult(
|
return handleApiResult(
|
||||||
result,
|
result,
|
||||||
@@ -89,6 +95,7 @@ export function registerParsePRDTool(server) {
|
|||||||
log.error(`Error in parse_prd: ${error.message}`);
|
log.error(`Error in parse_prd: ${error.message}`);
|
||||||
return createErrorResponse(`Failed to parse PRD: ${error.message}`);
|
return createErrorResponse(`Failed to parse PRD: ${error.message}`);
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
|
)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -778,6 +778,77 @@ function withNormalizedProjectRoot(executeFn) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks progress reporting capability and returns the validated function or undefined.
|
||||||
|
*
|
||||||
|
* STANDARD PATTERN for AI-powered, long-running operations (parse-prd, expand-task, expand-all, analyze):
|
||||||
|
*
|
||||||
|
* This helper should be used as the first step in any MCP tool that performs long-running
|
||||||
|
* AI operations. It validates the availability of progress reporting and provides consistent
|
||||||
|
* logging about the capability status.
|
||||||
|
*
|
||||||
|
* Operations that should use this pattern:
|
||||||
|
* - parse-prd: Parsing PRD documents with AI
|
||||||
|
* - expand-task: Expanding tasks into subtasks
|
||||||
|
* - expand-all: Expanding all tasks in batch
|
||||||
|
* - analyze-complexity: Analyzing task complexity
|
||||||
|
* - update-task: Updating tasks with AI assistance
|
||||||
|
* - add-task: Creating new tasks with AI
|
||||||
|
* - Any operation that makes AI service calls
|
||||||
|
*
|
||||||
|
* @example Basic usage in a tool's execute function:
|
||||||
|
* ```javascript
|
||||||
|
* import { checkProgressCapability } from './utils.js';
|
||||||
|
*
|
||||||
|
* async execute(args, context) {
|
||||||
|
* const { log, reportProgress, session } = context;
|
||||||
|
*
|
||||||
|
* // Always validate progress capability first
|
||||||
|
* const progressCapability = checkProgressCapability(reportProgress, log);
|
||||||
|
*
|
||||||
|
* // Pass to direct function - it handles undefined gracefully
|
||||||
|
* const result = await expandTask(taskId, numSubtasks, {
|
||||||
|
* session,
|
||||||
|
* reportProgress: progressCapability,
|
||||||
|
* mcpLog: log
|
||||||
|
* });
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @example With progress reporting available:
|
||||||
|
* ```javascript
|
||||||
|
* // When reportProgress is available, users see real-time updates:
|
||||||
|
* // "Starting PRD analysis (Input: 5432 tokens)..."
|
||||||
|
* // "Task 1/10 - Implement user authentication"
|
||||||
|
* // "Task 2/10 - Create database schema"
|
||||||
|
* // "Task Generation Completed | Tokens: 5432/1234"
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @example Without progress reporting (graceful degradation):
|
||||||
|
* ```javascript
|
||||||
|
* // When reportProgress is not available:
|
||||||
|
* // - Operation runs normally without progress updates
|
||||||
|
* // - Debug log: "reportProgress not available - operation will run without progress updates"
|
||||||
|
* // - User gets final result after completion
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @param {Function|undefined} reportProgress - The reportProgress function from MCP context.
|
||||||
|
* Expected signature: async (progress: {progress: number, total: number, message: string}) => void
|
||||||
|
* @param {Object} log - Logger instance with debug, info, warn, error methods
|
||||||
|
* @returns {Function|undefined} The validated reportProgress function or undefined if not available
|
||||||
|
*/
|
||||||
|
function checkProgressCapability(reportProgress, log) {
|
||||||
|
// Validate that reportProgress is available for long-running operations
|
||||||
|
if (typeof reportProgress !== 'function') {
|
||||||
|
log.debug(
|
||||||
|
'reportProgress not available - operation will run without progress updates'
|
||||||
|
);
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
return reportProgress;
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure all functions are exported
|
// Ensure all functions are exported
|
||||||
export {
|
export {
|
||||||
getProjectRoot,
|
getProjectRoot,
|
||||||
@@ -792,5 +863,6 @@ export {
|
|||||||
createLogWrapper,
|
createLogWrapper,
|
||||||
normalizeProjectRoot,
|
normalizeProjectRoot,
|
||||||
getRawProjectRootFromSession,
|
getRawProjectRootFromSession,
|
||||||
withNormalizedProjectRoot
|
withNormalizedProjectRoot,
|
||||||
|
checkProgressCapability
|
||||||
};
|
};
|
||||||
|
|||||||
562
package-lock.json
generated
562
package-lock.json
generated
@@ -27,12 +27,14 @@
|
|||||||
"@aws-sdk/credential-providers": "^3.817.0",
|
"@aws-sdk/credential-providers": "^3.817.0",
|
||||||
"@inquirer/search": "^3.0.15",
|
"@inquirer/search": "^3.0.15",
|
||||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||||
|
"@streamparser/json": "^0.0.22",
|
||||||
"ai": "^4.3.10",
|
"ai": "^4.3.10",
|
||||||
"ajv": "^8.17.1",
|
"ajv": "^8.17.1",
|
||||||
"ajv-formats": "^3.0.1",
|
"ajv-formats": "^3.0.1",
|
||||||
"boxen": "^8.0.1",
|
"boxen": "^8.0.1",
|
||||||
"chalk": "^5.4.1",
|
"chalk": "^5.4.1",
|
||||||
"cli-highlight": "^2.1.11",
|
"cli-highlight": "^2.1.11",
|
||||||
|
"cli-progress": "^3.12.0",
|
||||||
"cli-table3": "^0.6.5",
|
"cli-table3": "^0.6.5",
|
||||||
"commander": "^11.1.0",
|
"commander": "^11.1.0",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
@@ -4153,9 +4155,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@google/gemini-cli-core/node_modules/dotenv": {
|
"node_modules/@google/gemini-cli-core/node_modules/dotenv": {
|
||||||
"version": "17.2.0",
|
"version": "17.2.1",
|
||||||
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.2.1.tgz",
|
||||||
"integrity": "sha512-Q4sgBT60gzd0BB0lSyYD3xM4YxrXA9y4uBDof1JNYGzOXrQdQ6yX+7XIAqoFOGQFOTK1D3Hts5OllpxMDZFONQ==",
|
"integrity": "sha512-kQhDYKZecqnM0fCnzI5eIv5L4cAe/iRI+HqMbO/hbRdTAeXDG+M9FjipUxNfbARuEg4iHIbhnhs78BCHNbSxEQ==",
|
||||||
"license": "BSD-2-Clause",
|
"license": "BSD-2-Clause",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -4229,9 +4231,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@google/genai": {
|
"node_modules/@google/genai": {
|
||||||
"version": "1.10.0",
|
"version": "1.11.0",
|
||||||
"resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.10.0.tgz",
|
"resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.11.0.tgz",
|
||||||
"integrity": "sha512-PR4tLuiIFMrpAiiCko2Z16ydikFsPF1c5TBfI64hlZcv3xBEApSCceLuDYu1pNMq2SkNh4r66J4AG+ZexBnMLw==",
|
"integrity": "sha512-4XFAHCvU91ewdWOU3RUdSeXpDuZRJHNYLqT9LKw7WqPjRQcEJvVU+VOU49ocruaSp8VuLKMecl0iadlQK+Zgfw==",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
@@ -9622,6 +9624,12 @@
|
|||||||
"node": "^12.20 || >=14.13"
|
"node": "^12.20 || >=14.13"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@streamparser/json": {
|
||||||
|
"version": "0.0.22",
|
||||||
|
"resolved": "https://registry.npmjs.org/@streamparser/json/-/json-0.0.22.tgz",
|
||||||
|
"integrity": "sha512-b6gTSBjJ8G8SuO3Gbbj+zXbVx8NSs1EbpbMKpzGLWMdkR+98McH9bEjSz3+0mPJf68c5nxa3CrJHp5EQNXM6zQ==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/@szmarczak/http-timer": {
|
"node_modules/@szmarczak/http-timer": {
|
||||||
"version": "5.0.1",
|
"version": "5.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz",
|
||||||
@@ -9680,23 +9688,6 @@
|
|||||||
"@tailwindcss/oxide-win32-x64-msvc": "4.1.11"
|
"@tailwindcss/oxide-win32-x64-msvc": "4.1.11"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@tailwindcss/oxide-android-arm64": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-3IfFuATVRUMZZprEIx9OGDjG3Ou3jG4xQzNTvjDoKmU9JdmoCohQJ83MYd0GPnQIu89YoJqvMM0G3uqLRFtetg==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"android"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-darwin-arm64": {
|
"node_modules/@tailwindcss/oxide-darwin-arm64": {
|
||||||
"version": "4.1.11",
|
"version": "4.1.11",
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.11.tgz",
|
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.11.tgz",
|
||||||
@@ -9714,189 +9705,6 @@
|
|||||||
"node": ">= 10"
|
"node": ">= 10"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@tailwindcss/oxide-darwin-x64": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-EgnK8kRchgmgzG6jE10UQNaH9Mwi2n+yw1jWmof9Vyg2lpKNX2ioe7CJdf9M5f8V9uaQxInenZkOxnTVL3fhAw==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"darwin"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-freebsd-x64": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-xdqKtbpHs7pQhIKmqVpxStnY1skuNh4CtbcyOHeX1YBE0hArj2romsFGb6yUmzkq/6M24nkxDqU8GYrKrz+UcA==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"freebsd"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-ryHQK2eyDYYMwB5wZL46uoxz2zzDZsFBwfjssgB7pzytAeCCa6glsiJGjhTEddq/4OsIjsLNMAiMlHNYnkEEeg==",
|
|
||||||
"cpu": [
|
|
||||||
"arm"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-linux-arm64-gnu": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-mYwqheq4BXF83j/w75ewkPJmPZIqqP1nhoghS9D57CLjsh3Nfq0m4ftTotRYtGnZd3eCztgbSPJ9QhfC91gDZQ==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-linux-arm64-musl": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-m/NVRFNGlEHJrNVk3O6I9ggVuNjXHIPoD6bqay/pubtYC9QIdAMpS+cswZQPBLvVvEF6GtSNONbDkZrjWZXYNQ==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-linux-x64-gnu": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-YW6sblI7xukSD2TdbbaeQVDysIm/UPJtObHJHKxDEcW2exAtY47j52f8jZXkqE1krdnkhCMGqP3dbniu1Te2Fg==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-linux-x64-musl": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-e3C/RRhGunWYNC3aSF7exsQkdXzQ/M+aYuZHKnw4U7KQwTJotnWsGOIVih0s2qQzmEzOFIJ3+xt7iq67K/p56Q==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-wasm32-wasi": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-Xo1+/GU0JEN/C/dvcammKHzeM6NqKovG+6921MR6oadee5XPBaKOumrJCXvopJ/Qb5TH7LX/UAywbqrP4lax0g==",
|
|
||||||
"bundleDependencies": [
|
|
||||||
"@napi-rs/wasm-runtime",
|
|
||||||
"@emnapi/core",
|
|
||||||
"@emnapi/runtime",
|
|
||||||
"@tybys/wasm-util",
|
|
||||||
"@emnapi/wasi-threads",
|
|
||||||
"tslib"
|
|
||||||
],
|
|
||||||
"cpu": [
|
|
||||||
"wasm32"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"dependencies": {
|
|
||||||
"@emnapi/core": "^1.4.3",
|
|
||||||
"@emnapi/runtime": "^1.4.3",
|
|
||||||
"@emnapi/wasi-threads": "^1.0.2",
|
|
||||||
"@napi-rs/wasm-runtime": "^0.2.11",
|
|
||||||
"@tybys/wasm-util": "^0.9.0",
|
|
||||||
"tslib": "^2.8.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=14.0.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-UgKYx5PwEKrac3GPNPf6HVMNhUIGuUh4wlDFR2jYYdkX6pL/rn73zTq/4pzUm8fOjAn5L8zDeHp9iXmUGOXZ+w==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"win32"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/oxide-win32-x64-msvc": {
|
|
||||||
"version": "4.1.11",
|
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.11.tgz",
|
|
||||||
"integrity": "sha512-YfHoggn1j0LK7wR82TOucWc5LDCguHnoS879idHekmmiR7g9HUtMw9MI0NHatS28u/Xlkfi9w5RJWgz2Dl+5Qg==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"win32"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@tailwindcss/postcss": {
|
"node_modules/@tailwindcss/postcss": {
|
||||||
"version": "4.1.11",
|
"version": "4.1.11",
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.11.tgz",
|
"resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.11.tgz",
|
||||||
@@ -10638,34 +10446,6 @@
|
|||||||
"@vscode/vsce-sign-win32-x64": "2.0.5"
|
"@vscode/vsce-sign-win32-x64": "2.0.5"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@vscode/vsce-sign-alpine-arm64": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-alpine-arm64/-/vsce-sign-alpine-arm64-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-XVmnF40APwRPXSLYA28Ye+qWxB25KhSVpF2eZVtVOs6g7fkpOxsVnpRU1Bz2xG4ySI79IRuapDJoAQFkoOgfdQ==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"alpine"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce-sign-alpine-x64": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-alpine-x64/-/vsce-sign-alpine-x64-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-JuxY3xcquRsOezKq6PEHwCgd1rh1GnhyH6urVEWUzWn1c1PC4EOoyffMD+zLZtFuZF5qR1I0+cqDRNKyPvpK7Q==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"alpine"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce-sign-darwin-arm64": {
|
"node_modules/@vscode/vsce-sign-darwin-arm64": {
|
||||||
"version": "2.0.5",
|
"version": "2.0.5",
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-darwin-arm64/-/vsce-sign-darwin-arm64-2.0.5.tgz",
|
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-darwin-arm64/-/vsce-sign-darwin-arm64-2.0.5.tgz",
|
||||||
@@ -10680,90 +10460,6 @@
|
|||||||
"darwin"
|
"darwin"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@vscode/vsce-sign-darwin-x64": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-darwin-x64/-/vsce-sign-darwin-x64-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-ma9JDC7FJ16SuPXlLKkvOD2qLsmW/cKfqK4zzM2iJE1PbckF3BlR08lYqHV89gmuoTpYB55+z8Y5Fz4wEJBVDA==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"darwin"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce-sign-linux-arm": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-linux-arm/-/vsce-sign-linux-arm-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-cdCwtLGmvC1QVrkIsyzv01+o9eR+wodMJUZ9Ak3owhcGxPRB53/WvrDHAFYA6i8Oy232nuen1YqWeEohqBuSzA==",
|
|
||||||
"cpu": [
|
|
||||||
"arm"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce-sign-linux-arm64": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-linux-arm64/-/vsce-sign-linux-arm64-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-Hr1o0veBymg9SmkCqYnfaiUnes5YK6k/lKFA5MhNmiEN5fNqxyPUCdRZMFs3Ajtx2OFW4q3KuYVRwGA7jdLo7Q==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce-sign-linux-x64": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-linux-x64/-/vsce-sign-linux-x64-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-XLT0gfGMcxk6CMRLDkgqEPTyG8Oa0OFe1tPv2RVbphSOjFWJwZgK3TYWx39i/7gqpDHlax0AP6cgMygNJrA6zg==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce-sign-win32-arm64": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-win32-arm64/-/vsce-sign-win32-arm64-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-hco8eaoTcvtmuPhavyCZhrk5QIcLiyAUhEso87ApAWDllG7djIrWiOCtqn48k4pHz+L8oCQlE0nwNHfcYcxOPw==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"win32"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce-sign-win32-x64": {
|
|
||||||
"version": "2.0.5",
|
|
||||||
"resolved": "https://registry.npmjs.org/@vscode/vsce-sign-win32-x64/-/vsce-sign-win32-x64-2.0.5.tgz",
|
|
||||||
"integrity": "sha512-1ixKFGM2FwM+6kQS2ojfY3aAelICxjiCzeg4nTHpkeU1Tfs4RC+lVLrgq5NwcBC7ZLr6UfY3Ct3D6suPeOf7BQ==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "SEE LICENSE IN LICENSE.txt",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"win32"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"node_modules/@vscode/vsce/node_modules/ansi-styles": {
|
"node_modules/@vscode/vsce/node_modules/ansi-styles": {
|
||||||
"version": "3.2.1",
|
"version": "3.2.1",
|
||||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
||||||
@@ -12883,6 +12579,47 @@
|
|||||||
"url": "https://github.com/chalk/chalk?sponsor=1"
|
"url": "https://github.com/chalk/chalk?sponsor=1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/cli-progress": {
|
||||||
|
"version": "3.12.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/cli-progress/-/cli-progress-3.12.0.tgz",
|
||||||
|
"integrity": "sha512-tRkV3HJ1ASwm19THiiLIXLO7Im7wlTuKnvkYaTkyoAPefqjNg7W7DHKUlGRxy9vxDvbyCYQkQozvptuMkGCg8A==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"string-width": "^4.2.3"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-progress/node_modules/emoji-regex": {
|
||||||
|
"version": "8.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
||||||
|
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/cli-progress/node_modules/is-fullwidth-code-point": {
|
||||||
|
"version": "3.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
|
||||||
|
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-progress/node_modules/string-width": {
|
||||||
|
"version": "4.2.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
|
||||||
|
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"emoji-regex": "^8.0.0",
|
||||||
|
"is-fullwidth-code-point": "^3.0.0",
|
||||||
|
"strip-ansi": "^6.0.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/cli-spinners": {
|
"node_modules/cli-spinners": {
|
||||||
"version": "2.9.2",
|
"version": "2.9.2",
|
||||||
"resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz",
|
"resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz",
|
||||||
@@ -19956,195 +19693,6 @@
|
|||||||
"url": "https://opencollective.com/parcel"
|
"url": "https://opencollective.com/parcel"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/lightningcss-darwin-x64": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"darwin"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-freebsd-x64": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"freebsd"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-linux-arm-gnueabihf": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q==",
|
|
||||||
"cpu": [
|
|
||||||
"arm"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-linux-arm64-gnu": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-linux-arm64-musl": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-linux-x64-gnu": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-linux-x64-musl": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-win32-arm64-msvc": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"win32"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lightningcss-win32-x64-msvc": {
|
|
||||||
"version": "1.30.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.1.tgz",
|
|
||||||
"integrity": "sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"dev": true,
|
|
||||||
"license": "MPL-2.0",
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"win32"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 12.0.0"
|
|
||||||
},
|
|
||||||
"funding": {
|
|
||||||
"type": "opencollective",
|
|
||||||
"url": "https://opencollective.com/parcel"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/lilconfig": {
|
"node_modules/lilconfig": {
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
|
||||||
|
|||||||
@@ -54,12 +54,14 @@
|
|||||||
"@aws-sdk/credential-providers": "^3.817.0",
|
"@aws-sdk/credential-providers": "^3.817.0",
|
||||||
"@inquirer/search": "^3.0.15",
|
"@inquirer/search": "^3.0.15",
|
||||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||||
|
"@streamparser/json": "^0.0.22",
|
||||||
"ai": "^4.3.10",
|
"ai": "^4.3.10",
|
||||||
"ajv": "^8.17.1",
|
"ajv": "^8.17.1",
|
||||||
"ajv-formats": "^3.0.1",
|
"ajv-formats": "^3.0.1",
|
||||||
"boxen": "^8.0.1",
|
"boxen": "^8.0.1",
|
||||||
"chalk": "^5.4.1",
|
"chalk": "^5.4.1",
|
||||||
"cli-highlight": "^2.1.11",
|
"cli-highlight": "^2.1.11",
|
||||||
|
"cli-progress": "^3.12.0",
|
||||||
"cli-table3": "^0.6.5",
|
"cli-table3": "^0.6.5",
|
||||||
"commander": "^11.1.0",
|
"commander": "^11.1.0",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
|
|||||||
@@ -91,45 +91,74 @@ function _getProvider(providerName) {
|
|||||||
|
|
||||||
// Helper function to get cost for a specific model
|
// Helper function to get cost for a specific model
|
||||||
function _getCostForModel(providerName, modelId) {
|
function _getCostForModel(providerName, modelId) {
|
||||||
|
const DEFAULT_COST = { inputCost: 0, outputCost: 0, currency: 'USD' };
|
||||||
|
|
||||||
if (!MODEL_MAP || !MODEL_MAP[providerName]) {
|
if (!MODEL_MAP || !MODEL_MAP[providerName]) {
|
||||||
log(
|
log(
|
||||||
'warn',
|
'warn',
|
||||||
`Provider "${providerName}" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`
|
`Provider "${providerName}" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`
|
||||||
);
|
);
|
||||||
return { inputCost: 0, outputCost: 0, currency: 'USD' }; // Default to zero cost
|
return DEFAULT_COST;
|
||||||
}
|
}
|
||||||
|
|
||||||
const modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);
|
const modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);
|
||||||
|
|
||||||
if (!modelData || !modelData.cost_per_1m_tokens) {
|
if (!modelData?.cost_per_1m_tokens) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Cost data not found for model "${modelId}" under provider "${providerName}". Assuming zero cost.`
|
`Cost data not found for model "${modelId}" under provider "${providerName}". Assuming zero cost.`
|
||||||
);
|
);
|
||||||
return { inputCost: 0, outputCost: 0, currency: 'USD' }; // Default to zero cost
|
return DEFAULT_COST;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure currency is part of the returned object, defaulting if not present
|
const costs = modelData.cost_per_1m_tokens;
|
||||||
const currency = modelData.cost_per_1m_tokens.currency || 'USD';
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
inputCost: modelData.cost_per_1m_tokens.input || 0,
|
inputCost: costs.input || 0,
|
||||||
outputCost: modelData.cost_per_1m_tokens.output || 0,
|
outputCost: costs.output || 0,
|
||||||
currency: currency
|
currency: costs.currency || 'USD'
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate cost from token counts and cost per million
|
||||||
|
* @param {number} inputTokens - Number of input tokens
|
||||||
|
* @param {number} outputTokens - Number of output tokens
|
||||||
|
* @param {number} inputCost - Cost per million input tokens
|
||||||
|
* @param {number} outputCost - Cost per million output tokens
|
||||||
|
* @returns {number} Total calculated cost
|
||||||
|
*/
|
||||||
|
function _calculateCost(inputTokens, outputTokens, inputCost, outputCost) {
|
||||||
|
const calculatedCost =
|
||||||
|
((inputTokens || 0) / 1_000_000) * inputCost +
|
||||||
|
((outputTokens || 0) / 1_000_000) * outputCost;
|
||||||
|
return parseFloat(calculatedCost.toFixed(6));
|
||||||
|
}
|
||||||
|
|
||||||
// Helper function to get tag information for responses
|
// Helper function to get tag information for responses
|
||||||
function _getTagInfo(projectRoot) {
|
function _getTagInfo(projectRoot) {
|
||||||
|
const DEFAULT_TAG_INFO = { currentTag: 'master', availableTags: ['master'] };
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!projectRoot) {
|
if (!projectRoot) {
|
||||||
return { currentTag: 'master', availableTags: ['master'] };
|
return DEFAULT_TAG_INFO;
|
||||||
}
|
}
|
||||||
|
|
||||||
const currentTag = getCurrentTag(projectRoot);
|
const currentTag = getCurrentTag(projectRoot) || 'master';
|
||||||
|
const availableTags = _readAvailableTags(projectRoot);
|
||||||
|
|
||||||
|
return { currentTag, availableTags };
|
||||||
|
} catch (error) {
|
||||||
|
if (getDebugFlag()) {
|
||||||
|
log('debug', `Error getting tag information: ${error.message}`);
|
||||||
|
}
|
||||||
|
return DEFAULT_TAG_INFO;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract method for reading available tags
|
||||||
|
function _readAvailableTags(projectRoot) {
|
||||||
|
const DEFAULT_TAGS = ['master'];
|
||||||
|
|
||||||
// Read available tags from tasks.json
|
|
||||||
let availableTags = ['master']; // Default fallback
|
|
||||||
try {
|
try {
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
@@ -140,42 +169,37 @@ function _getTagInfo(projectRoot) {
|
|||||||
'tasks.json'
|
'tasks.json'
|
||||||
);
|
);
|
||||||
|
|
||||||
if (fs.existsSync(tasksPath)) {
|
if (!fs.existsSync(tasksPath)) {
|
||||||
|
return DEFAULT_TAGS;
|
||||||
|
}
|
||||||
|
|
||||||
const tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
const tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||||
if (tasksData && typeof tasksData === 'object') {
|
if (!tasksData || typeof tasksData !== 'object') {
|
||||||
|
return DEFAULT_TAGS;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if it's tagged format (has tag-like keys with tasks arrays)
|
// Check if it's tagged format (has tag-like keys with tasks arrays)
|
||||||
const potentialTags = Object.keys(tasksData).filter(
|
const potentialTags = Object.keys(tasksData).filter((key) =>
|
||||||
(key) =>
|
_isValidTaggedTask(tasksData[key])
|
||||||
tasksData[key] &&
|
|
||||||
typeof tasksData[key] === 'object' &&
|
|
||||||
Array.isArray(tasksData[key].tasks)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if (potentialTags.length > 0) {
|
return potentialTags.length > 0 ? potentialTags : DEFAULT_TAGS;
|
||||||
availableTags = potentialTags;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (readError) {
|
} catch (readError) {
|
||||||
// Silently fall back to default if we can't read tasks file
|
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag()) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Could not read tasks file for available tags: ${readError.message}`
|
`Could not read tasks file for available tags: ${readError.message}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
return DEFAULT_TAGS;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
// Helper to validate tagged task structure
|
||||||
currentTag: currentTag || 'master',
|
function _isValidTaggedTask(taskData) {
|
||||||
availableTags: availableTags
|
return (
|
||||||
};
|
taskData && typeof taskData === 'object' && Array.isArray(taskData.tasks)
|
||||||
} catch (error) {
|
);
|
||||||
if (getDebugFlag()) {
|
|
||||||
log('debug', `Error getting tag information: ${error.message}`);
|
|
||||||
}
|
|
||||||
return { currentTag: 'master', availableTags: ['master'] };
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Configuration for Retries ---
|
// --- Configuration for Retries ---
|
||||||
@@ -244,6 +268,65 @@ function _extractErrorMessage(error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get role configuration (provider and model) based on role type
|
||||||
|
* @param {string} role - The role ('main', 'research', 'fallback')
|
||||||
|
* @param {string} projectRoot - Project root path
|
||||||
|
* @returns {Object|null} Configuration object with provider and modelId
|
||||||
|
*/
|
||||||
|
function _getRoleConfiguration(role, projectRoot) {
|
||||||
|
const roleConfigs = {
|
||||||
|
main: {
|
||||||
|
provider: getMainProvider(projectRoot),
|
||||||
|
modelId: getMainModelId(projectRoot)
|
||||||
|
},
|
||||||
|
research: {
|
||||||
|
provider: getResearchProvider(projectRoot),
|
||||||
|
modelId: getResearchModelId(projectRoot)
|
||||||
|
},
|
||||||
|
fallback: {
|
||||||
|
provider: getFallbackProvider(projectRoot),
|
||||||
|
modelId: getFallbackModelId(projectRoot)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return roleConfigs[role] || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Vertex AI specific configuration
|
||||||
|
* @param {string} projectRoot - Project root path
|
||||||
|
* @param {Object} session - Session object
|
||||||
|
* @returns {Object} Vertex AI configuration parameters
|
||||||
|
*/
|
||||||
|
function _getVertexConfiguration(projectRoot, session) {
|
||||||
|
const projectId =
|
||||||
|
getVertexProjectId(projectRoot) ||
|
||||||
|
resolveEnvVariable('VERTEX_PROJECT_ID', session, projectRoot);
|
||||||
|
|
||||||
|
const location =
|
||||||
|
getVertexLocation(projectRoot) ||
|
||||||
|
resolveEnvVariable('VERTEX_LOCATION', session, projectRoot) ||
|
||||||
|
'us-central1';
|
||||||
|
|
||||||
|
const credentialsPath = resolveEnvVariable(
|
||||||
|
'GOOGLE_APPLICATION_CREDENTIALS',
|
||||||
|
session,
|
||||||
|
projectRoot
|
||||||
|
);
|
||||||
|
|
||||||
|
log(
|
||||||
|
'debug',
|
||||||
|
`Using Vertex AI configuration: Project ID=${projectId}, Location=${location}`
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
projectId,
|
||||||
|
location,
|
||||||
|
...(credentialsPath && { credentials: { credentialsFromEnv: true } })
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Internal helper to resolve the API key for a given provider.
|
* Internal helper to resolve the API key for a given provider.
|
||||||
* @param {string} providerName - The name of the provider (lowercase).
|
* @param {string} providerName - The name of the provider (lowercase).
|
||||||
@@ -424,18 +507,13 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
let telemetryData = null;
|
let telemetryData = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log('info', `New AI service call with role: ${currentRole}`);
|
log('debug', `New AI service call with role: ${currentRole}`);
|
||||||
|
|
||||||
if (currentRole === 'main') {
|
const roleConfig = _getRoleConfiguration(
|
||||||
providerName = getMainProvider(effectiveProjectRoot);
|
currentRole,
|
||||||
modelId = getMainModelId(effectiveProjectRoot);
|
effectiveProjectRoot
|
||||||
} else if (currentRole === 'research') {
|
);
|
||||||
providerName = getResearchProvider(effectiveProjectRoot);
|
if (!roleConfig) {
|
||||||
modelId = getResearchModelId(effectiveProjectRoot);
|
|
||||||
} else if (currentRole === 'fallback') {
|
|
||||||
providerName = getFallbackProvider(effectiveProjectRoot);
|
|
||||||
modelId = getFallbackModelId(effectiveProjectRoot);
|
|
||||||
} else {
|
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`Unknown role encountered in _unifiedServiceRunner: ${currentRole}`
|
`Unknown role encountered in _unifiedServiceRunner: ${currentRole}`
|
||||||
@@ -444,6 +522,8 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
lastError || new Error(`Unknown AI role specified: ${currentRole}`);
|
lastError || new Error(`Unknown AI role specified: ${currentRole}`);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
providerName = roleConfig.provider;
|
||||||
|
modelId = roleConfig.modelId;
|
||||||
|
|
||||||
if (!providerName || !modelId) {
|
if (!providerName || !modelId) {
|
||||||
log(
|
log(
|
||||||
@@ -517,41 +597,9 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
|
|
||||||
// Handle Vertex AI specific configuration
|
// Handle Vertex AI specific configuration
|
||||||
if (providerName?.toLowerCase() === 'vertex') {
|
if (providerName?.toLowerCase() === 'vertex') {
|
||||||
// Get Vertex project ID and location
|
providerSpecificParams = _getVertexConfiguration(
|
||||||
const projectId =
|
effectiveProjectRoot,
|
||||||
getVertexProjectId(effectiveProjectRoot) ||
|
session
|
||||||
resolveEnvVariable(
|
|
||||||
'VERTEX_PROJECT_ID',
|
|
||||||
session,
|
|
||||||
effectiveProjectRoot
|
|
||||||
);
|
|
||||||
|
|
||||||
const location =
|
|
||||||
getVertexLocation(effectiveProjectRoot) ||
|
|
||||||
resolveEnvVariable(
|
|
||||||
'VERTEX_LOCATION',
|
|
||||||
session,
|
|
||||||
effectiveProjectRoot
|
|
||||||
) ||
|
|
||||||
'us-central1';
|
|
||||||
|
|
||||||
// Get credentials path if available
|
|
||||||
const credentialsPath = resolveEnvVariable(
|
|
||||||
'GOOGLE_APPLICATION_CREDENTIALS',
|
|
||||||
session,
|
|
||||||
effectiveProjectRoot
|
|
||||||
);
|
|
||||||
|
|
||||||
// Add Vertex-specific parameters
|
|
||||||
providerSpecificParams = {
|
|
||||||
projectId,
|
|
||||||
location,
|
|
||||||
...(credentialsPath && { credentials: { credentialsFromEnv: true } })
|
|
||||||
};
|
|
||||||
|
|
||||||
log(
|
|
||||||
'debug',
|
|
||||||
`Using Vertex AI configuration: Project ID=${projectId}, Location=${location}`
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -594,7 +642,8 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
temperature: roleParams.temperature,
|
temperature: roleParams.temperature,
|
||||||
messages,
|
messages,
|
||||||
...(baseURL && { baseURL }),
|
...(baseURL && { baseURL }),
|
||||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
...((serviceType === 'generateObject' ||
|
||||||
|
serviceType === 'streamObject') && { schema, objectName }),
|
||||||
...providerSpecificParams,
|
...providerSpecificParams,
|
||||||
...restApiParams
|
...restApiParams
|
||||||
};
|
};
|
||||||
@@ -635,7 +684,10 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
finalMainResult = providerResponse.text;
|
finalMainResult = providerResponse.text;
|
||||||
} else if (serviceType === 'generateObject') {
|
} else if (serviceType === 'generateObject') {
|
||||||
finalMainResult = providerResponse.object;
|
finalMainResult = providerResponse.object;
|
||||||
} else if (serviceType === 'streamText') {
|
} else if (
|
||||||
|
serviceType === 'streamText' ||
|
||||||
|
serviceType === 'streamObject'
|
||||||
|
) {
|
||||||
finalMainResult = providerResponse;
|
finalMainResult = providerResponse;
|
||||||
} else {
|
} else {
|
||||||
log(
|
log(
|
||||||
@@ -651,7 +703,9 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
return {
|
return {
|
||||||
mainResult: finalMainResult,
|
mainResult: finalMainResult,
|
||||||
telemetryData: telemetryData,
|
telemetryData: telemetryData,
|
||||||
tagInfo: tagInfo
|
tagInfo: tagInfo,
|
||||||
|
providerName: providerName,
|
||||||
|
modelId: modelId
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const cleanMessage = _extractErrorMessage(error);
|
const cleanMessage = _extractErrorMessage(error);
|
||||||
@@ -732,6 +786,31 @@ async function streamTextService(params) {
|
|||||||
return _unifiedServiceRunner('streamText', combinedParams);
|
return _unifiedServiceRunner('streamText', combinedParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unified service function for streaming structured objects.
|
||||||
|
* Uses Vercel AI SDK's streamObject for proper JSON streaming.
|
||||||
|
*
|
||||||
|
* @param {object} params - Parameters for the service call.
|
||||||
|
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
|
||||||
|
* @param {object} [params.session=null] - Optional MCP session object.
|
||||||
|
* @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.
|
||||||
|
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object.
|
||||||
|
* @param {string} params.prompt - The prompt for the AI.
|
||||||
|
* @param {string} [params.systemPrompt] - Optional system prompt.
|
||||||
|
* @param {string} params.commandName - Name of the command invoking the service.
|
||||||
|
* @param {string} [params.outputType='cli'] - 'cli' or 'mcp'.
|
||||||
|
* @returns {Promise<object>} Result object containing the stream and usage data.
|
||||||
|
*/
|
||||||
|
async function streamObjectService(params) {
|
||||||
|
const defaults = { outputType: 'cli' };
|
||||||
|
const combinedParams = { ...defaults, ...params };
|
||||||
|
// Stream object requires a schema
|
||||||
|
if (!combinedParams.schema) {
|
||||||
|
throw new Error('streamObjectService requires a schema parameter');
|
||||||
|
}
|
||||||
|
return _unifiedServiceRunner('streamObject', combinedParams);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unified service function for generating structured objects.
|
* Unified service function for generating structured objects.
|
||||||
* Handles client retrieval, retries, and fallback sequence.
|
* Handles client retrieval, retries, and fallback sequence.
|
||||||
@@ -792,9 +871,12 @@ async function logAiUsage({
|
|||||||
modelId
|
modelId
|
||||||
);
|
);
|
||||||
|
|
||||||
const totalCost =
|
const totalCost = _calculateCost(
|
||||||
((inputTokens || 0) / 1_000_000) * inputCost +
|
inputTokens,
|
||||||
((outputTokens || 0) / 1_000_000) * outputCost;
|
outputTokens,
|
||||||
|
inputCost,
|
||||||
|
outputCost
|
||||||
|
);
|
||||||
|
|
||||||
const telemetryData = {
|
const telemetryData = {
|
||||||
timestamp,
|
timestamp,
|
||||||
@@ -805,7 +887,7 @@ async function logAiUsage({
|
|||||||
inputTokens: inputTokens || 0,
|
inputTokens: inputTokens || 0,
|
||||||
outputTokens: outputTokens || 0,
|
outputTokens: outputTokens || 0,
|
||||||
totalTokens,
|
totalTokens,
|
||||||
totalCost: parseFloat(totalCost.toFixed(6)),
|
totalCost,
|
||||||
currency // Add currency to the telemetry data
|
currency // Add currency to the telemetry data
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -828,6 +910,7 @@ async function logAiUsage({
|
|||||||
export {
|
export {
|
||||||
generateTextService,
|
generateTextService,
|
||||||
streamTextService,
|
streamTextService,
|
||||||
|
streamObjectService,
|
||||||
generateObjectService,
|
generateObjectService,
|
||||||
logAiUsage
|
logAiUsage
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -912,8 +912,6 @@ function registerCommands(programInstance) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
let spinner;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!(await confirmOverwriteIfNeeded())) return;
|
if (!(await confirmOverwriteIfNeeded())) return;
|
||||||
|
|
||||||
@@ -930,7 +928,6 @@ function registerCommands(programInstance) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
spinner = ora('Parsing PRD and generating tasks...\n').start();
|
|
||||||
// Handle case where getTasksPath() returns null
|
// Handle case where getTasksPath() returns null
|
||||||
const outputPath =
|
const outputPath =
|
||||||
taskMaster.getTasksPath() ||
|
taskMaster.getTasksPath() ||
|
||||||
@@ -942,13 +939,8 @@ function registerCommands(programInstance) {
|
|||||||
projectRoot: taskMaster.getProjectRoot(),
|
projectRoot: taskMaster.getProjectRoot(),
|
||||||
tag: tag
|
tag: tag
|
||||||
});
|
});
|
||||||
spinner.succeed('Tasks generated successfully!');
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (spinner) {
|
|
||||||
spinner.fail(`Error parsing PRD: ${error.message}`);
|
|
||||||
} else {
|
|
||||||
console.error(chalk.red(`Error parsing PRD: ${error.message}`));
|
console.error(chalk.red(`Error parsing PRD: ${error.message}`));
|
||||||
}
|
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ export class PromptManager {
|
|||||||
const schema = JSON.parse(schemaContent);
|
const schema = JSON.parse(schemaContent);
|
||||||
|
|
||||||
this.validatePrompt = this.ajv.compile(schema);
|
this.validatePrompt = this.ajv.compile(schema);
|
||||||
log('info', '✓ JSON schema validation enabled');
|
log('debug', '✓ JSON schema validation enabled');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('warn', `⚠ Schema validation disabled: ${error.message}`);
|
log('warn', `⚠ Schema validation disabled: ${error.message}`);
|
||||||
this.validatePrompt = () => true; // Fallback to no validation
|
this.validatePrompt = () => true; // Fallback to no validation
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { findTaskById } from './utils.js';
|
import { findTaskById } from './utils.js';
|
||||||
import parsePRD from './task-manager/parse-prd.js';
|
import parsePRD from './task-manager/parse-prd/index.js';
|
||||||
import updateTasks from './task-manager/update-tasks.js';
|
import updateTasks from './task-manager/update-tasks.js';
|
||||||
import updateTaskById from './task-manager/update-task-by-id.js';
|
import updateTaskById from './task-manager/update-task-by-id.js';
|
||||||
import generateTaskFiles from './task-manager/generate-task-files.js';
|
import generateTaskFiles from './task-manager/generate-task-files.js';
|
||||||
|
|||||||
@@ -1,395 +0,0 @@
|
|||||||
import fs from 'fs';
|
|
||||||
import path from 'path';
|
|
||||||
import chalk from 'chalk';
|
|
||||||
import boxen from 'boxen';
|
|
||||||
import { z } from 'zod';
|
|
||||||
|
|
||||||
import {
|
|
||||||
log,
|
|
||||||
writeJSON,
|
|
||||||
enableSilentMode,
|
|
||||||
disableSilentMode,
|
|
||||||
isSilentMode,
|
|
||||||
readJSON,
|
|
||||||
findTaskById,
|
|
||||||
ensureTagMetadata,
|
|
||||||
getCurrentTag
|
|
||||||
} from '../utils.js';
|
|
||||||
|
|
||||||
import { generateObjectService } from '../ai-services-unified.js';
|
|
||||||
import {
|
|
||||||
getDebugFlag,
|
|
||||||
getMainProvider,
|
|
||||||
getResearchProvider,
|
|
||||||
getDefaultPriority
|
|
||||||
} from '../config-manager.js';
|
|
||||||
import { getPromptManager } from '../prompt-manager.js';
|
|
||||||
import { displayAiUsageSummary } from '../ui.js';
|
|
||||||
import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';
|
|
||||||
|
|
||||||
// Define the Zod schema for a SINGLE task object
|
|
||||||
const prdSingleTaskSchema = z.object({
|
|
||||||
id: z.number(),
|
|
||||||
title: z.string().min(1),
|
|
||||||
description: z.string().min(1),
|
|
||||||
details: z.string(),
|
|
||||||
testStrategy: z.string(),
|
|
||||||
priority: z.enum(['high', 'medium', 'low']),
|
|
||||||
dependencies: z.array(z.number()),
|
|
||||||
status: z.string()
|
|
||||||
});
|
|
||||||
|
|
||||||
// Define the Zod schema for the ENTIRE expected AI response object
|
|
||||||
const prdResponseSchema = z.object({
|
|
||||||
tasks: z.array(prdSingleTaskSchema),
|
|
||||||
metadata: z.object({
|
|
||||||
projectName: z.string(),
|
|
||||||
totalTasks: z.number(),
|
|
||||||
sourceFile: z.string(),
|
|
||||||
generatedAt: z.string()
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse a PRD file and generate tasks
|
|
||||||
* @param {string} prdPath - Path to the PRD file
|
|
||||||
* @param {string} tasksPath - Path to the tasks.json file
|
|
||||||
* @param {number} numTasks - Number of tasks to generate
|
|
||||||
* @param {Object} options - Additional options
|
|
||||||
* @param {boolean} [options.force=false] - Whether to overwrite existing tasks.json.
|
|
||||||
* @param {boolean} [options.append=false] - Append to existing tasks file.
|
|
||||||
* @param {boolean} [options.research=false] - Use research model for enhanced PRD analysis.
|
|
||||||
* @param {Object} [options.reportProgress] - Function to report progress (optional, likely unused).
|
|
||||||
* @param {Object} [options.mcpLog] - MCP logger object (optional).
|
|
||||||
* @param {Object} [options.session] - Session object from MCP server (optional).
|
|
||||||
* @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).
|
|
||||||
* @param {string} [options.tag] - Target tag for task generation.
|
|
||||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json').
|
|
||||||
*/
|
|
||||||
async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
|
||||||
const {
|
|
||||||
reportProgress,
|
|
||||||
mcpLog,
|
|
||||||
session,
|
|
||||||
projectRoot,
|
|
||||||
force = false,
|
|
||||||
append = false,
|
|
||||||
research = false,
|
|
||||||
tag
|
|
||||||
} = options;
|
|
||||||
const isMCP = !!mcpLog;
|
|
||||||
const outputFormat = isMCP ? 'json' : 'text';
|
|
||||||
|
|
||||||
// Use the provided tag, or the current active tag, or default to 'master'
|
|
||||||
const targetTag = tag;
|
|
||||||
|
|
||||||
const logFn = mcpLog
|
|
||||||
? mcpLog
|
|
||||||
: {
|
|
||||||
// Wrapper for CLI
|
|
||||||
info: (...args) => log('info', ...args),
|
|
||||||
warn: (...args) => log('warn', ...args),
|
|
||||||
error: (...args) => log('error', ...args),
|
|
||||||
debug: (...args) => log('debug', ...args),
|
|
||||||
success: (...args) => log('success', ...args)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create custom reporter using logFn
|
|
||||||
const report = (message, level = 'info') => {
|
|
||||||
// Check logFn directly
|
|
||||||
if (logFn && typeof logFn[level] === 'function') {
|
|
||||||
logFn[level](message);
|
|
||||||
} else if (!isSilentMode() && outputFormat === 'text') {
|
|
||||||
// Fallback to original log only if necessary and in CLI text mode
|
|
||||||
log(level, message);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
report(
|
|
||||||
`Parsing PRD file: ${prdPath}, Force: ${force}, Append: ${append}, Research: ${research}`
|
|
||||||
);
|
|
||||||
|
|
||||||
let existingTasks = [];
|
|
||||||
let nextId = 1;
|
|
||||||
let aiServiceResponse = null;
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Check if there are existing tasks in the target tag
|
|
||||||
let hasExistingTasksInTag = false;
|
|
||||||
if (fs.existsSync(tasksPath)) {
|
|
||||||
try {
|
|
||||||
// Read the entire file to check if the tag exists
|
|
||||||
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
|
||||||
const allData = JSON.parse(existingFileContent);
|
|
||||||
|
|
||||||
// Check if the target tag exists and has tasks
|
|
||||||
if (
|
|
||||||
allData[targetTag] &&
|
|
||||||
Array.isArray(allData[targetTag].tasks) &&
|
|
||||||
allData[targetTag].tasks.length > 0
|
|
||||||
) {
|
|
||||||
hasExistingTasksInTag = true;
|
|
||||||
existingTasks = allData[targetTag].tasks;
|
|
||||||
nextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// If we can't read the file or parse it, assume no existing tasks in this tag
|
|
||||||
hasExistingTasksInTag = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle file existence and overwrite/append logic based on target tag
|
|
||||||
if (hasExistingTasksInTag) {
|
|
||||||
if (append) {
|
|
||||||
report(
|
|
||||||
`Append mode enabled. Found ${existingTasks.length} existing tasks in tag '${targetTag}'. Next ID will be ${nextId}.`,
|
|
||||||
'info'
|
|
||||||
);
|
|
||||||
} else if (!force) {
|
|
||||||
// Not appending and not forcing overwrite, and there are existing tasks in the target tag
|
|
||||||
const overwriteError = new Error(
|
|
||||||
`Tag '${targetTag}' already contains ${existingTasks.length} tasks. Use --force to overwrite or --append to add to existing tasks.`
|
|
||||||
);
|
|
||||||
report(overwriteError.message, 'error');
|
|
||||||
if (outputFormat === 'text') {
|
|
||||||
console.error(chalk.red(overwriteError.message));
|
|
||||||
}
|
|
||||||
throw overwriteError;
|
|
||||||
} else {
|
|
||||||
// Force overwrite is true
|
|
||||||
report(
|
|
||||||
`Force flag enabled. Overwriting existing tasks in tag '${targetTag}'.`,
|
|
||||||
'info'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// No existing tasks in target tag, proceed without confirmation
|
|
||||||
report(
|
|
||||||
`Tag '${targetTag}' is empty or doesn't exist. Creating/updating tag with new tasks.`,
|
|
||||||
'info'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
report(`Reading PRD content from ${prdPath}`, 'info');
|
|
||||||
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
|
||||||
if (!prdContent) {
|
|
||||||
throw new Error(`Input file ${prdPath} is empty or could not be read.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load prompts using PromptManager
|
|
||||||
const promptManager = getPromptManager();
|
|
||||||
|
|
||||||
// Get defaultTaskPriority from config
|
|
||||||
const defaultTaskPriority = getDefaultPriority(projectRoot) || 'medium';
|
|
||||||
|
|
||||||
// Check if Claude Code is being used as the provider
|
|
||||||
const currentProvider = research
|
|
||||||
? getResearchProvider(projectRoot)
|
|
||||||
: getMainProvider(projectRoot);
|
|
||||||
const isClaudeCode = currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE;
|
|
||||||
|
|
||||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
|
||||||
'parse-prd',
|
|
||||||
{
|
|
||||||
research,
|
|
||||||
numTasks,
|
|
||||||
nextId,
|
|
||||||
prdContent,
|
|
||||||
prdPath,
|
|
||||||
defaultTaskPriority,
|
|
||||||
isClaudeCode,
|
|
||||||
projectRoot: projectRoot || ''
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
// Call the unified AI service
|
|
||||||
report(
|
|
||||||
`Calling AI service to generate tasks from PRD${research ? ' with research-backed analysis' : ''}...`,
|
|
||||||
'info'
|
|
||||||
);
|
|
||||||
|
|
||||||
// Call generateObjectService with the CORRECT schema and additional telemetry params
|
|
||||||
aiServiceResponse = await generateObjectService({
|
|
||||||
role: research ? 'research' : 'main', // Use research role if flag is set
|
|
||||||
session: session,
|
|
||||||
projectRoot: projectRoot,
|
|
||||||
schema: prdResponseSchema,
|
|
||||||
objectName: 'tasks_data',
|
|
||||||
systemPrompt: systemPrompt,
|
|
||||||
prompt: userPrompt,
|
|
||||||
commandName: 'parse-prd',
|
|
||||||
outputType: isMCP ? 'mcp' : 'cli'
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create the directory if it doesn't exist
|
|
||||||
const tasksDir = path.dirname(tasksPath);
|
|
||||||
if (!fs.existsSync(tasksDir)) {
|
|
||||||
fs.mkdirSync(tasksDir, { recursive: true });
|
|
||||||
}
|
|
||||||
logFn.success(
|
|
||||||
`Successfully parsed PRD via AI service${research ? ' with research-backed analysis' : ''}.`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Validate and Process Tasks
|
|
||||||
// const generatedData = aiServiceResponse?.mainResult?.object;
|
|
||||||
|
|
||||||
// Robustly get the actual AI-generated object
|
|
||||||
let generatedData = null;
|
|
||||||
if (aiServiceResponse?.mainResult) {
|
|
||||||
if (
|
|
||||||
typeof aiServiceResponse.mainResult === 'object' &&
|
|
||||||
aiServiceResponse.mainResult !== null &&
|
|
||||||
'tasks' in aiServiceResponse.mainResult
|
|
||||||
) {
|
|
||||||
// If mainResult itself is the object with a 'tasks' property
|
|
||||||
generatedData = aiServiceResponse.mainResult;
|
|
||||||
} else if (
|
|
||||||
typeof aiServiceResponse.mainResult.object === 'object' &&
|
|
||||||
aiServiceResponse.mainResult.object !== null &&
|
|
||||||
'tasks' in aiServiceResponse.mainResult.object
|
|
||||||
) {
|
|
||||||
// If mainResult.object is the object with a 'tasks' property
|
|
||||||
generatedData = aiServiceResponse.mainResult.object;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!generatedData || !Array.isArray(generatedData.tasks)) {
|
|
||||||
logFn.error(
|
|
||||||
`Internal Error: generateObjectService returned unexpected data structure: ${JSON.stringify(generatedData)}`
|
|
||||||
);
|
|
||||||
throw new Error(
|
|
||||||
'AI service returned unexpected data structure after validation.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let currentId = nextId;
|
|
||||||
const taskMap = new Map();
|
|
||||||
const processedNewTasks = generatedData.tasks.map((task) => {
|
|
||||||
const newId = currentId++;
|
|
||||||
taskMap.set(task.id, newId);
|
|
||||||
return {
|
|
||||||
...task,
|
|
||||||
id: newId,
|
|
||||||
status: task.status || 'pending',
|
|
||||||
priority: task.priority || 'medium',
|
|
||||||
dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
|
|
||||||
subtasks: [],
|
|
||||||
// Ensure all required fields have values (even if empty strings)
|
|
||||||
title: task.title || '',
|
|
||||||
description: task.description || '',
|
|
||||||
details: task.details || '',
|
|
||||||
testStrategy: task.testStrategy || ''
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
// Remap dependencies for the NEWLY processed tasks
|
|
||||||
processedNewTasks.forEach((task) => {
|
|
||||||
task.dependencies = task.dependencies
|
|
||||||
.map((depId) => taskMap.get(depId)) // Map old AI ID to new sequential ID
|
|
||||||
.filter(
|
|
||||||
(newDepId) =>
|
|
||||||
newDepId != null && // Must exist
|
|
||||||
newDepId < task.id && // Must be a lower ID (could be existing or newly generated)
|
|
||||||
(findTaskById(existingTasks, newDepId) || // Check if it exists in old tasks OR
|
|
||||||
processedNewTasks.some((t) => t.id === newDepId)) // check if it exists in new tasks
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
const finalTasks = append
|
|
||||||
? [...existingTasks, ...processedNewTasks]
|
|
||||||
: processedNewTasks;
|
|
||||||
|
|
||||||
// Read the existing file to preserve other tags
|
|
||||||
let outputData = {};
|
|
||||||
if (fs.existsSync(tasksPath)) {
|
|
||||||
try {
|
|
||||||
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
|
||||||
outputData = JSON.parse(existingFileContent);
|
|
||||||
} catch (error) {
|
|
||||||
// If we can't read the existing file, start with empty object
|
|
||||||
outputData = {};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update only the target tag, preserving other tags
|
|
||||||
outputData[targetTag] = {
|
|
||||||
tasks: finalTasks,
|
|
||||||
metadata: {
|
|
||||||
created:
|
|
||||||
outputData[targetTag]?.metadata?.created || new Date().toISOString(),
|
|
||||||
updated: new Date().toISOString(),
|
|
||||||
description: `Tasks for ${targetTag} context`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Ensure the target tag has proper metadata
|
|
||||||
ensureTagMetadata(outputData[targetTag], {
|
|
||||||
description: `Tasks for ${targetTag} context`
|
|
||||||
});
|
|
||||||
|
|
||||||
// Write the complete data structure back to the file
|
|
||||||
fs.writeFileSync(tasksPath, JSON.stringify(outputData, null, 2));
|
|
||||||
report(
|
|
||||||
`Successfully ${append ? 'appended' : 'generated'} ${processedNewTasks.length} tasks in ${tasksPath}${research ? ' with research-backed analysis' : ''}`,
|
|
||||||
'success'
|
|
||||||
);
|
|
||||||
|
|
||||||
// Generate markdown task files after writing tasks.json
|
|
||||||
// await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
|
|
||||||
|
|
||||||
// Handle CLI output (e.g., success message)
|
|
||||||
if (outputFormat === 'text') {
|
|
||||||
console.log(
|
|
||||||
boxen(
|
|
||||||
chalk.green(
|
|
||||||
`Successfully generated ${processedNewTasks.length} new tasks${research ? ' with research-backed analysis' : ''}. Total tasks in ${tasksPath}: ${finalTasks.length}`
|
|
||||||
),
|
|
||||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
console.log(
|
|
||||||
boxen(
|
|
||||||
chalk.white.bold('Next Steps:') +
|
|
||||||
'\n\n' +
|
|
||||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\n` +
|
|
||||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,
|
|
||||||
{
|
|
||||||
padding: 1,
|
|
||||||
borderColor: 'cyan',
|
|
||||||
borderStyle: 'round',
|
|
||||||
margin: { top: 1 }
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
if (aiServiceResponse && aiServiceResponse.telemetryData) {
|
|
||||||
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return telemetry data
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
tasksPath,
|
|
||||||
telemetryData: aiServiceResponse?.telemetryData,
|
|
||||||
tagInfo: aiServiceResponse?.tagInfo
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
report(`Error parsing PRD: ${error.message}`, 'error');
|
|
||||||
|
|
||||||
// Only show error UI for text output (CLI)
|
|
||||||
if (outputFormat === 'text') {
|
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
|
||||||
|
|
||||||
if (getDebugFlag(projectRoot)) {
|
|
||||||
// Use projectRoot for debug flag check
|
|
||||||
console.error(error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
throw error; // Always re-throw for proper error handling
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export default parsePRD;
|
|
||||||
3
scripts/modules/task-manager/parse-prd/index.js
Normal file
3
scripts/modules/task-manager/parse-prd/index.js
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
// Main entry point for parse-prd module
|
||||||
|
export { default } from './parse-prd.js';
|
||||||
|
export { default as parsePRD } from './parse-prd.js';
|
||||||
105
scripts/modules/task-manager/parse-prd/parse-prd-config.js
Normal file
105
scripts/modules/task-manager/parse-prd/parse-prd-config.js
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
/**
|
||||||
|
* Configuration classes and schemas for PRD parsing
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { z } from 'zod';
|
||||||
|
import { TASK_PRIORITY_OPTIONS } from '../../../../src/constants/task-priority.js';
|
||||||
|
import { getCurrentTag, isSilentMode, log } from '../../utils.js';
|
||||||
|
import { Duration } from '../../../../src/utils/timeout-manager.js';
|
||||||
|
import { CUSTOM_PROVIDERS } from '../../../../src/constants/providers.js';
|
||||||
|
import { getMainProvider, getResearchProvider } from '../../config-manager.js';
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// SCHEMAS
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
// Define the Zod schema for a SINGLE task object
|
||||||
|
export const prdSingleTaskSchema = z.object({
|
||||||
|
id: z.number(),
|
||||||
|
title: z.string().min(1),
|
||||||
|
description: z.string().min(1),
|
||||||
|
details: z.string(),
|
||||||
|
testStrategy: z.string(),
|
||||||
|
priority: z.enum(TASK_PRIORITY_OPTIONS),
|
||||||
|
dependencies: z.array(z.number()),
|
||||||
|
status: z.string()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Define the Zod schema for the ENTIRE expected AI response object
|
||||||
|
export const prdResponseSchema = z.object({
|
||||||
|
tasks: z.array(prdSingleTaskSchema),
|
||||||
|
metadata: z.object({
|
||||||
|
projectName: z.string(),
|
||||||
|
totalTasks: z.number(),
|
||||||
|
sourceFile: z.string(),
|
||||||
|
generatedAt: z.string()
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// CONFIGURATION CLASSES
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration object for PRD parsing
|
||||||
|
*/
|
||||||
|
export class PrdParseConfig {
|
||||||
|
constructor(prdPath, tasksPath, numTasks, options = {}) {
|
||||||
|
this.prdPath = prdPath;
|
||||||
|
this.tasksPath = tasksPath;
|
||||||
|
this.numTasks = numTasks;
|
||||||
|
this.force = options.force || false;
|
||||||
|
this.append = options.append || false;
|
||||||
|
this.research = options.research || false;
|
||||||
|
this.reportProgress = options.reportProgress;
|
||||||
|
this.mcpLog = options.mcpLog;
|
||||||
|
this.session = options.session;
|
||||||
|
this.projectRoot = options.projectRoot;
|
||||||
|
this.tag = options.tag;
|
||||||
|
this.streamingTimeout =
|
||||||
|
options.streamingTimeout || Duration.seconds(180).milliseconds;
|
||||||
|
|
||||||
|
// Derived values
|
||||||
|
this.targetTag = this.tag || getCurrentTag(this.projectRoot) || 'master';
|
||||||
|
this.isMCP = !!this.mcpLog;
|
||||||
|
this.outputFormat = this.isMCP && !this.reportProgress ? 'json' : 'text';
|
||||||
|
this.useStreaming =
|
||||||
|
typeof this.reportProgress === 'function' || this.outputFormat === 'text';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if Claude Code is being used
|
||||||
|
*/
|
||||||
|
isClaudeCode() {
|
||||||
|
const currentProvider = this.research
|
||||||
|
? getResearchProvider(this.projectRoot)
|
||||||
|
: getMainProvider(this.projectRoot);
|
||||||
|
return currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logging configuration and utilities
|
||||||
|
*/
|
||||||
|
export class LoggingConfig {
|
||||||
|
constructor(mcpLog, reportProgress) {
|
||||||
|
this.isMCP = !!mcpLog;
|
||||||
|
this.outputFormat = this.isMCP && !reportProgress ? 'json' : 'text';
|
||||||
|
|
||||||
|
this.logFn = mcpLog || {
|
||||||
|
info: (...args) => log('info', ...args),
|
||||||
|
warn: (...args) => log('warn', ...args),
|
||||||
|
error: (...args) => log('error', ...args),
|
||||||
|
debug: (...args) => log('debug', ...args),
|
||||||
|
success: (...args) => log('success', ...args)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
report(message, level = 'info') {
|
||||||
|
if (this.logFn && typeof this.logFn[level] === 'function') {
|
||||||
|
this.logFn[level](message);
|
||||||
|
} else if (!isSilentMode() && this.outputFormat === 'text') {
|
||||||
|
log(level, message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
384
scripts/modules/task-manager/parse-prd/parse-prd-helpers.js
Normal file
384
scripts/modules/task-manager/parse-prd/parse-prd-helpers.js
Normal file
@@ -0,0 +1,384 @@
|
|||||||
|
/**
|
||||||
|
* Helper functions for PRD parsing
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import boxen from 'boxen';
|
||||||
|
import chalk from 'chalk';
|
||||||
|
import { ensureTagMetadata, findTaskById } from '../../utils.js';
|
||||||
|
import { getPriorityIndicators } from '../../../../src/ui/indicators.js';
|
||||||
|
import { displayParsePrdSummary } from '../../../../src/ui/parse-prd.js';
|
||||||
|
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
|
||||||
|
import { displayAiUsageSummary } from '../../ui.js';
|
||||||
|
import { getPromptManager } from '../../prompt-manager.js';
|
||||||
|
import { getDefaultPriority } from '../../config-manager.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Estimate token count from text
|
||||||
|
* @param {string} text - Text to estimate tokens for
|
||||||
|
* @returns {number} Estimated token count
|
||||||
|
*/
|
||||||
|
export function estimateTokens(text) {
|
||||||
|
// Common approximation: ~4 characters per token for English
|
||||||
|
return Math.ceil(text.length / 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read and validate PRD content
|
||||||
|
* @param {string} prdPath - Path to PRD file
|
||||||
|
* @returns {string} PRD content
|
||||||
|
* @throws {Error} If file is empty or cannot be read
|
||||||
|
*/
|
||||||
|
export function readPrdContent(prdPath) {
|
||||||
|
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
||||||
|
if (!prdContent) {
|
||||||
|
throw new Error(`Input file ${prdPath} is empty or could not be read.`);
|
||||||
|
}
|
||||||
|
return prdContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load existing tasks from file
|
||||||
|
* @param {string} tasksPath - Path to tasks file
|
||||||
|
* @param {string} targetTag - Target tag to load from
|
||||||
|
* @returns {{tasks: Array, nextId: number}} Existing tasks and next ID
|
||||||
|
*/
|
||||||
|
export function loadExistingTasks(tasksPath, targetTag) {
|
||||||
|
let existingTasks = [];
|
||||||
|
let nextId = 1;
|
||||||
|
|
||||||
|
if (!fs.existsSync(tasksPath)) {
|
||||||
|
return { existingTasks, nextId };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
||||||
|
const allData = JSON.parse(existingFileContent);
|
||||||
|
|
||||||
|
if (allData[targetTag]?.tasks && Array.isArray(allData[targetTag].tasks)) {
|
||||||
|
existingTasks = allData[targetTag].tasks;
|
||||||
|
if (existingTasks.length > 0) {
|
||||||
|
nextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// If we can't read the file or parse it, assume no existing tasks
|
||||||
|
return { existingTasks: [], nextId: 1 };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { existingTasks, nextId };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate overwrite/append operations
|
||||||
|
* @param {Object} params
|
||||||
|
* @returns {void}
|
||||||
|
* @throws {Error} If validation fails
|
||||||
|
*/
|
||||||
|
export function validateFileOperations({
|
||||||
|
existingTasks,
|
||||||
|
targetTag,
|
||||||
|
append,
|
||||||
|
force,
|
||||||
|
isMCP,
|
||||||
|
logger
|
||||||
|
}) {
|
||||||
|
const hasExistingTasks = existingTasks.length > 0;
|
||||||
|
|
||||||
|
if (!hasExistingTasks) {
|
||||||
|
logger.report(
|
||||||
|
`Tag '${targetTag}' is empty or doesn't exist. Creating/updating tag with new tasks.`,
|
||||||
|
'info'
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (append) {
|
||||||
|
logger.report(
|
||||||
|
`Append mode enabled. Found ${existingTasks.length} existing tasks in tag '${targetTag}'.`,
|
||||||
|
'info'
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!force) {
|
||||||
|
const errorMessage = `Tag '${targetTag}' already contains ${existingTasks.length} tasks. Use --force to overwrite or --append to add to existing tasks.`;
|
||||||
|
logger.report(errorMessage, 'error');
|
||||||
|
|
||||||
|
if (isMCP) {
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
} else {
|
||||||
|
console.error(chalk.red(errorMessage));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.report(
|
||||||
|
`Force flag enabled. Overwriting existing tasks in tag '${targetTag}'.`,
|
||||||
|
'debug'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process and transform tasks with ID remapping
|
||||||
|
* @param {Array} rawTasks - Raw tasks from AI
|
||||||
|
* @param {number} startId - Starting ID for new tasks
|
||||||
|
* @param {Array} existingTasks - Existing tasks for dependency validation
|
||||||
|
* @param {string} defaultPriority - Default priority for tasks
|
||||||
|
* @returns {Array} Processed tasks with remapped IDs
|
||||||
|
*/
|
||||||
|
export function processTasks(
|
||||||
|
rawTasks,
|
||||||
|
startId,
|
||||||
|
existingTasks,
|
||||||
|
defaultPriority
|
||||||
|
) {
|
||||||
|
let currentId = startId;
|
||||||
|
const taskMap = new Map();
|
||||||
|
|
||||||
|
// First pass: assign new IDs and create mapping
|
||||||
|
const processedTasks = rawTasks.map((task) => {
|
||||||
|
const newId = currentId++;
|
||||||
|
taskMap.set(task.id, newId);
|
||||||
|
|
||||||
|
return {
|
||||||
|
...task,
|
||||||
|
id: newId,
|
||||||
|
status: task.status || 'pending',
|
||||||
|
priority: task.priority || defaultPriority,
|
||||||
|
dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
|
||||||
|
subtasks: task.subtasks || [],
|
||||||
|
// Ensure all required fields have values
|
||||||
|
title: task.title || '',
|
||||||
|
description: task.description || '',
|
||||||
|
details: task.details || '',
|
||||||
|
testStrategy: task.testStrategy || ''
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Second pass: remap dependencies
|
||||||
|
processedTasks.forEach((task) => {
|
||||||
|
task.dependencies = task.dependencies
|
||||||
|
.map((depId) => taskMap.get(depId))
|
||||||
|
.filter(
|
||||||
|
(newDepId) =>
|
||||||
|
newDepId != null &&
|
||||||
|
newDepId < task.id &&
|
||||||
|
(findTaskById(existingTasks, newDepId) ||
|
||||||
|
processedTasks.some((t) => t.id === newDepId))
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
return processedTasks;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save tasks to file with tag support
|
||||||
|
* @param {string} tasksPath - Path to save tasks
|
||||||
|
* @param {Array} tasks - Tasks to save
|
||||||
|
* @param {string} targetTag - Target tag
|
||||||
|
* @param {Object} logger - Logger instance
|
||||||
|
*/
|
||||||
|
export function saveTasksToFile(tasksPath, tasks, targetTag, logger) {
|
||||||
|
// Create directory if it doesn't exist
|
||||||
|
const tasksDir = path.dirname(tasksPath);
|
||||||
|
if (!fs.existsSync(tasksDir)) {
|
||||||
|
fs.mkdirSync(tasksDir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read existing file to preserve other tags
|
||||||
|
let outputData = {};
|
||||||
|
if (fs.existsSync(tasksPath)) {
|
||||||
|
try {
|
||||||
|
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
||||||
|
outputData = JSON.parse(existingFileContent);
|
||||||
|
} catch (error) {
|
||||||
|
outputData = {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update only the target tag
|
||||||
|
outputData[targetTag] = {
|
||||||
|
tasks: tasks,
|
||||||
|
metadata: {
|
||||||
|
created:
|
||||||
|
outputData[targetTag]?.metadata?.created || new Date().toISOString(),
|
||||||
|
updated: new Date().toISOString(),
|
||||||
|
description: `Tasks for ${targetTag} context`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Ensure proper metadata
|
||||||
|
ensureTagMetadata(outputData[targetTag], {
|
||||||
|
description: `Tasks for ${targetTag} context`
|
||||||
|
});
|
||||||
|
|
||||||
|
// Write back to file
|
||||||
|
fs.writeFileSync(tasksPath, JSON.stringify(outputData, null, 2));
|
||||||
|
|
||||||
|
logger.report(
|
||||||
|
`Successfully saved ${tasks.length} tasks to ${tasksPath}`,
|
||||||
|
'debug'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build prompts for AI service
|
||||||
|
* @param {Object} config - Configuration object
|
||||||
|
* @param {string} prdContent - PRD content
|
||||||
|
* @param {number} nextId - Next task ID
|
||||||
|
* @returns {Promise<{systemPrompt: string, userPrompt: string}>}
|
||||||
|
*/
|
||||||
|
export async function buildPrompts(config, prdContent, nextId) {
|
||||||
|
const promptManager = getPromptManager();
|
||||||
|
const defaultTaskPriority =
|
||||||
|
getDefaultPriority(config.projectRoot) || 'medium';
|
||||||
|
|
||||||
|
return promptManager.loadPrompt('parse-prd', {
|
||||||
|
research: config.research,
|
||||||
|
numTasks: config.numTasks,
|
||||||
|
nextId,
|
||||||
|
prdContent,
|
||||||
|
prdPath: config.prdPath,
|
||||||
|
defaultTaskPriority,
|
||||||
|
isClaudeCode: config.isClaudeCode(),
|
||||||
|
projectRoot: config.projectRoot || ''
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle progress reporting for both CLI and MCP
|
||||||
|
* @param {Object} params
|
||||||
|
*/
|
||||||
|
export async function reportTaskProgress({
|
||||||
|
task,
|
||||||
|
currentCount,
|
||||||
|
totalTasks,
|
||||||
|
estimatedTokens,
|
||||||
|
progressTracker,
|
||||||
|
reportProgress,
|
||||||
|
priorityMap,
|
||||||
|
defaultPriority,
|
||||||
|
estimatedInputTokens
|
||||||
|
}) {
|
||||||
|
const priority = task.priority || defaultPriority;
|
||||||
|
const priorityIndicator = priorityMap[priority] || priorityMap.medium;
|
||||||
|
|
||||||
|
// CLI progress tracker
|
||||||
|
if (progressTracker) {
|
||||||
|
progressTracker.addTaskLine(currentCount, task.title, priority);
|
||||||
|
if (estimatedTokens) {
|
||||||
|
progressTracker.updateTokens(estimatedInputTokens, estimatedTokens);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MCP progress reporting
|
||||||
|
if (reportProgress) {
|
||||||
|
try {
|
||||||
|
const outputTokens = estimatedTokens
|
||||||
|
? Math.floor(estimatedTokens / totalTasks)
|
||||||
|
: 0;
|
||||||
|
|
||||||
|
await reportProgress({
|
||||||
|
progress: currentCount,
|
||||||
|
total: totalTasks,
|
||||||
|
message: `${priorityIndicator} Task ${currentCount}/${totalTasks} - ${task.title} | ~Output: ${outputTokens} tokens`
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore progress reporting errors
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display completion summary for CLI
|
||||||
|
* @param {Object} params
|
||||||
|
*/
|
||||||
|
export async function displayCliSummary({
|
||||||
|
processedTasks,
|
||||||
|
nextId,
|
||||||
|
summary,
|
||||||
|
prdPath,
|
||||||
|
tasksPath,
|
||||||
|
usedFallback,
|
||||||
|
aiServiceResponse
|
||||||
|
}) {
|
||||||
|
// Generate task file names
|
||||||
|
const taskFilesGenerated = (() => {
|
||||||
|
if (!Array.isArray(processedTasks) || processedTasks.length === 0) {
|
||||||
|
return `task_${String(nextId).padStart(3, '0')}.txt`;
|
||||||
|
}
|
||||||
|
const firstNewTaskId = processedTasks[0].id;
|
||||||
|
const lastNewTaskId = processedTasks[processedTasks.length - 1].id;
|
||||||
|
if (processedTasks.length === 1) {
|
||||||
|
return `task_${String(firstNewTaskId).padStart(3, '0')}.txt`;
|
||||||
|
}
|
||||||
|
return `task_${String(firstNewTaskId).padStart(3, '0')}.txt -> task_${String(lastNewTaskId).padStart(3, '0')}.txt`;
|
||||||
|
})();
|
||||||
|
|
||||||
|
displayParsePrdSummary({
|
||||||
|
totalTasks: processedTasks.length,
|
||||||
|
taskPriorities: summary.taskPriorities,
|
||||||
|
prdFilePath: prdPath,
|
||||||
|
outputPath: tasksPath,
|
||||||
|
elapsedTime: summary.elapsedTime,
|
||||||
|
usedFallback,
|
||||||
|
taskFilesGenerated,
|
||||||
|
actionVerb: summary.actionVerb
|
||||||
|
});
|
||||||
|
|
||||||
|
// Display telemetry
|
||||||
|
if (aiServiceResponse?.telemetryData) {
|
||||||
|
// For streaming, wait briefly to allow usage data to be captured
|
||||||
|
if (aiServiceResponse.mainResult?.usage) {
|
||||||
|
// Give the usage promise a short time to resolve
|
||||||
|
await TimeoutManager.withSoftTimeout(
|
||||||
|
aiServiceResponse.mainResult.usage,
|
||||||
|
1000,
|
||||||
|
undefined
|
||||||
|
);
|
||||||
|
}
|
||||||
|
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display non-streaming CLI output
|
||||||
|
* @param {Object} params
|
||||||
|
*/
|
||||||
|
export function displayNonStreamingCliOutput({
|
||||||
|
processedTasks,
|
||||||
|
research,
|
||||||
|
finalTasks,
|
||||||
|
tasksPath,
|
||||||
|
aiServiceResponse
|
||||||
|
}) {
|
||||||
|
console.log(
|
||||||
|
boxen(
|
||||||
|
chalk.green(
|
||||||
|
`Successfully generated ${processedTasks.length} new tasks${research ? ' with research-backed analysis' : ''}. Total tasks in ${tasksPath}: ${finalTasks.length}`
|
||||||
|
),
|
||||||
|
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
boxen(
|
||||||
|
chalk.white.bold('Next Steps:') +
|
||||||
|
'\n\n' +
|
||||||
|
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\n` +
|
||||||
|
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,
|
||||||
|
{
|
||||||
|
padding: 1,
|
||||||
|
borderColor: 'cyan',
|
||||||
|
borderStyle: 'round',
|
||||||
|
margin: { top: 1 }
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (aiServiceResponse?.telemetryData) {
|
||||||
|
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,85 @@
|
|||||||
|
/**
|
||||||
|
* Non-streaming handler for PRD parsing
|
||||||
|
*/
|
||||||
|
|
||||||
|
import ora from 'ora';
|
||||||
|
import { generateObjectService } from '../../ai-services-unified.js';
|
||||||
|
import { LoggingConfig, prdResponseSchema } from './parse-prd-config.js';
|
||||||
|
import { estimateTokens } from './parse-prd-helpers.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle non-streaming AI service call
|
||||||
|
* @param {Object} config - Configuration object
|
||||||
|
* @param {Object} prompts - System and user prompts
|
||||||
|
* @returns {Promise<Object>} Generated tasks and telemetry
|
||||||
|
*/
|
||||||
|
export async function handleNonStreamingService(config, prompts) {
|
||||||
|
const logger = new LoggingConfig(config.mcpLog, config.reportProgress);
|
||||||
|
const { systemPrompt, userPrompt } = prompts;
|
||||||
|
const estimatedInputTokens = estimateTokens(systemPrompt + userPrompt);
|
||||||
|
|
||||||
|
// Initialize spinner for CLI
|
||||||
|
let spinner = null;
|
||||||
|
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||||
|
spinner = ora('Parsing PRD and generating tasks...\n').start();
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Call AI service
|
||||||
|
logger.report(
|
||||||
|
`Calling AI service to generate tasks from PRD${config.research ? ' with research-backed analysis' : ''}...`,
|
||||||
|
'info'
|
||||||
|
);
|
||||||
|
|
||||||
|
const aiServiceResponse = await generateObjectService({
|
||||||
|
role: config.research ? 'research' : 'main',
|
||||||
|
session: config.session,
|
||||||
|
projectRoot: config.projectRoot,
|
||||||
|
schema: prdResponseSchema,
|
||||||
|
objectName: 'tasks_data',
|
||||||
|
systemPrompt,
|
||||||
|
prompt: userPrompt,
|
||||||
|
commandName: 'parse-prd',
|
||||||
|
outputType: config.isMCP ? 'mcp' : 'cli'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract generated data
|
||||||
|
let generatedData = null;
|
||||||
|
if (aiServiceResponse?.mainResult) {
|
||||||
|
if (
|
||||||
|
typeof aiServiceResponse.mainResult === 'object' &&
|
||||||
|
aiServiceResponse.mainResult !== null &&
|
||||||
|
'tasks' in aiServiceResponse.mainResult
|
||||||
|
) {
|
||||||
|
generatedData = aiServiceResponse.mainResult;
|
||||||
|
} else if (
|
||||||
|
typeof aiServiceResponse.mainResult.object === 'object' &&
|
||||||
|
aiServiceResponse.mainResult.object !== null &&
|
||||||
|
'tasks' in aiServiceResponse.mainResult.object
|
||||||
|
) {
|
||||||
|
generatedData = aiServiceResponse.mainResult.object;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!generatedData || !Array.isArray(generatedData.tasks)) {
|
||||||
|
throw new Error(
|
||||||
|
'AI service returned unexpected data structure after validation.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spinner) {
|
||||||
|
spinner.succeed('Tasks generated successfully!');
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
parsedTasks: generatedData.tasks,
|
||||||
|
aiServiceResponse,
|
||||||
|
estimatedInputTokens
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
if (spinner) {
|
||||||
|
spinner.fail(`Error parsing PRD: ${error.message}`);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
653
scripts/modules/task-manager/parse-prd/parse-prd-streaming.js
Normal file
653
scripts/modules/task-manager/parse-prd/parse-prd-streaming.js
Normal file
@@ -0,0 +1,653 @@
|
|||||||
|
/**
|
||||||
|
* Streaming handler for PRD parsing
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createParsePrdTracker } from '../../../../src/progress/parse-prd-tracker.js';
|
||||||
|
import { displayParsePrdStart } from '../../../../src/ui/parse-prd.js';
|
||||||
|
import { getPriorityIndicators } from '../../../../src/ui/indicators.js';
|
||||||
|
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
|
||||||
|
import {
|
||||||
|
streamObjectService,
|
||||||
|
generateObjectService
|
||||||
|
} from '../../ai-services-unified.js';
|
||||||
|
import {
|
||||||
|
getMainModelId,
|
||||||
|
getParametersForRole,
|
||||||
|
getResearchModelId,
|
||||||
|
getDefaultPriority
|
||||||
|
} from '../../config-manager.js';
|
||||||
|
import { LoggingConfig, prdResponseSchema } from './parse-prd-config.js';
|
||||||
|
import { estimateTokens, reportTaskProgress } from './parse-prd-helpers.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract a readable stream from various stream result formats
|
||||||
|
* @param {any} streamResult - The stream result object from AI service
|
||||||
|
* @returns {AsyncIterable|ReadableStream} The extracted stream
|
||||||
|
* @throws {StreamingError} If no valid stream can be extracted
|
||||||
|
*/
|
||||||
|
function extractStreamFromResult(streamResult) {
|
||||||
|
if (!streamResult) {
|
||||||
|
throw new StreamingError(
|
||||||
|
'Stream result is null or undefined',
|
||||||
|
STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try extraction strategies in priority order
|
||||||
|
const stream = tryExtractStream(streamResult);
|
||||||
|
|
||||||
|
if (!stream) {
|
||||||
|
throw new StreamingError(
|
||||||
|
'Stream object is not async iterable or readable',
|
||||||
|
STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return stream;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to extract stream using various strategies
|
||||||
|
*/
|
||||||
|
function tryExtractStream(streamResult) {
|
||||||
|
const streamExtractors = [
|
||||||
|
{ key: 'partialObjectStream', extractor: (obj) => obj.partialObjectStream },
|
||||||
|
{ key: 'textStream', extractor: (obj) => extractCallable(obj.textStream) },
|
||||||
|
{ key: 'stream', extractor: (obj) => extractCallable(obj.stream) },
|
||||||
|
{ key: 'baseStream', extractor: (obj) => obj.baseStream }
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const { key, extractor } of streamExtractors) {
|
||||||
|
const stream = extractor(streamResult);
|
||||||
|
if (stream && isStreamable(stream)) {
|
||||||
|
return stream;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if already streamable
|
||||||
|
return isStreamable(streamResult) ? streamResult : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract a property that might be a function or direct value
|
||||||
|
*/
|
||||||
|
function extractCallable(property) {
|
||||||
|
if (!property) return null;
|
||||||
|
return typeof property === 'function' ? property() : property;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if object is streamable (async iterable or readable stream)
|
||||||
|
*/
|
||||||
|
function isStreamable(obj) {
|
||||||
|
return (
|
||||||
|
obj &&
|
||||||
|
(typeof obj[Symbol.asyncIterator] === 'function' ||
|
||||||
|
(obj.getReader && typeof obj.getReader === 'function'))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle streaming AI service call and parsing
|
||||||
|
* @param {Object} config - Configuration object
|
||||||
|
* @param {Object} prompts - System and user prompts
|
||||||
|
* @param {number} numTasks - Number of tasks to generate
|
||||||
|
* @returns {Promise<Object>} Parsed tasks and telemetry
|
||||||
|
*/
|
||||||
|
export async function handleStreamingService(config, prompts, numTasks) {
|
||||||
|
const context = createStreamingContext(config, prompts, numTasks);
|
||||||
|
|
||||||
|
await initializeProgress(config, numTasks, context.estimatedInputTokens);
|
||||||
|
|
||||||
|
const aiServiceResponse = await callAIServiceWithTimeout(
|
||||||
|
config,
|
||||||
|
prompts,
|
||||||
|
config.streamingTimeout
|
||||||
|
);
|
||||||
|
|
||||||
|
const { progressTracker, priorityMap } = await setupProgressTracking(
|
||||||
|
config,
|
||||||
|
numTasks
|
||||||
|
);
|
||||||
|
|
||||||
|
const streamingResult = await processStreamResponse(
|
||||||
|
aiServiceResponse.mainResult,
|
||||||
|
config,
|
||||||
|
prompts,
|
||||||
|
numTasks,
|
||||||
|
progressTracker,
|
||||||
|
priorityMap,
|
||||||
|
context.defaultPriority,
|
||||||
|
context.estimatedInputTokens,
|
||||||
|
context.logger
|
||||||
|
);
|
||||||
|
|
||||||
|
validateStreamingResult(streamingResult);
|
||||||
|
|
||||||
|
// If we have usage data from streaming, log telemetry now
|
||||||
|
if (streamingResult.usage && config.projectRoot) {
|
||||||
|
const { logAiUsage } = await import('../../ai-services-unified.js');
|
||||||
|
const { getUserId } = await import('../../config-manager.js');
|
||||||
|
const userId = getUserId(config.projectRoot);
|
||||||
|
|
||||||
|
if (userId && aiServiceResponse.providerName && aiServiceResponse.modelId) {
|
||||||
|
try {
|
||||||
|
const telemetryData = await logAiUsage({
|
||||||
|
userId,
|
||||||
|
commandName: 'parse-prd',
|
||||||
|
providerName: aiServiceResponse.providerName,
|
||||||
|
modelId: aiServiceResponse.modelId,
|
||||||
|
inputTokens: streamingResult.usage.promptTokens || 0,
|
||||||
|
outputTokens: streamingResult.usage.completionTokens || 0,
|
||||||
|
outputType: config.isMCP ? 'mcp' : 'cli'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add telemetry to the response
|
||||||
|
if (telemetryData) {
|
||||||
|
aiServiceResponse.telemetryData = telemetryData;
|
||||||
|
}
|
||||||
|
} catch (telemetryError) {
|
||||||
|
context.logger.report(
|
||||||
|
`Failed to log telemetry: ${telemetryError.message}`,
|
||||||
|
'debug'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return prepareFinalResult(
|
||||||
|
streamingResult,
|
||||||
|
aiServiceResponse,
|
||||||
|
context.estimatedInputTokens,
|
||||||
|
progressTracker
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create streaming context with common values
|
||||||
|
*/
|
||||||
|
function createStreamingContext(config, prompts, numTasks) {
|
||||||
|
const { systemPrompt, userPrompt } = prompts;
|
||||||
|
return {
|
||||||
|
logger: new LoggingConfig(config.mcpLog, config.reportProgress),
|
||||||
|
estimatedInputTokens: estimateTokens(systemPrompt + userPrompt),
|
||||||
|
defaultPriority: getDefaultPriority(config.projectRoot) || 'medium'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate streaming result has tasks
|
||||||
|
*/
|
||||||
|
function validateStreamingResult(streamingResult) {
|
||||||
|
if (streamingResult.parsedTasks.length === 0) {
|
||||||
|
throw new Error('No tasks were generated from the PRD');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize progress reporting
|
||||||
|
*/
|
||||||
|
async function initializeProgress(config, numTasks, estimatedInputTokens) {
|
||||||
|
if (config.reportProgress) {
|
||||||
|
await config.reportProgress({
|
||||||
|
progress: 0,
|
||||||
|
total: numTasks,
|
||||||
|
message: `Starting PRD analysis (Input: ${estimatedInputTokens} tokens)${config.research ? ' with research' : ''}...`
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Call AI service with timeout
|
||||||
|
*/
|
||||||
|
async function callAIServiceWithTimeout(config, prompts, timeout) {
|
||||||
|
const { systemPrompt, userPrompt } = prompts;
|
||||||
|
|
||||||
|
return await TimeoutManager.withTimeout(
|
||||||
|
streamObjectService({
|
||||||
|
role: config.research ? 'research' : 'main',
|
||||||
|
session: config.session,
|
||||||
|
projectRoot: config.projectRoot,
|
||||||
|
schema: prdResponseSchema,
|
||||||
|
systemPrompt,
|
||||||
|
prompt: userPrompt,
|
||||||
|
commandName: 'parse-prd',
|
||||||
|
outputType: config.isMCP ? 'mcp' : 'cli'
|
||||||
|
}),
|
||||||
|
timeout,
|
||||||
|
'Streaming operation'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup progress tracking for CLI output
|
||||||
|
*/
|
||||||
|
async function setupProgressTracking(config, numTasks) {
|
||||||
|
const priorityMap = getPriorityIndicators(config.isMCP);
|
||||||
|
let progressTracker = null;
|
||||||
|
|
||||||
|
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||||
|
progressTracker = createParsePrdTracker({
|
||||||
|
numUnits: numTasks,
|
||||||
|
unitName: 'task',
|
||||||
|
append: config.append
|
||||||
|
});
|
||||||
|
|
||||||
|
const modelId = config.research ? getResearchModelId() : getMainModelId();
|
||||||
|
const parameters = getParametersForRole(
|
||||||
|
config.research ? 'research' : 'main'
|
||||||
|
);
|
||||||
|
|
||||||
|
displayParsePrdStart({
|
||||||
|
prdFilePath: config.prdPath,
|
||||||
|
outputPath: config.tasksPath,
|
||||||
|
numTasks,
|
||||||
|
append: config.append,
|
||||||
|
research: config.research,
|
||||||
|
force: config.force,
|
||||||
|
existingTasks: [],
|
||||||
|
nextId: 1,
|
||||||
|
model: modelId || 'Default',
|
||||||
|
temperature: parameters?.temperature || 0.7
|
||||||
|
});
|
||||||
|
|
||||||
|
progressTracker.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
return { progressTracker, priorityMap };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process stream response based on stream type
|
||||||
|
*/
|
||||||
|
async function processStreamResponse(
|
||||||
|
streamResult,
|
||||||
|
config,
|
||||||
|
prompts,
|
||||||
|
numTasks,
|
||||||
|
progressTracker,
|
||||||
|
priorityMap,
|
||||||
|
defaultPriority,
|
||||||
|
estimatedInputTokens,
|
||||||
|
logger
|
||||||
|
) {
|
||||||
|
const { systemPrompt, userPrompt } = prompts;
|
||||||
|
const context = {
|
||||||
|
config: {
|
||||||
|
...config,
|
||||||
|
schema: prdResponseSchema // Add the schema for generateObject fallback
|
||||||
|
},
|
||||||
|
numTasks,
|
||||||
|
progressTracker,
|
||||||
|
priorityMap,
|
||||||
|
defaultPriority,
|
||||||
|
estimatedInputTokens,
|
||||||
|
prompt: userPrompt,
|
||||||
|
systemPrompt: systemPrompt
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const streamingState = {
|
||||||
|
lastPartialObject: null,
|
||||||
|
taskCount: 0,
|
||||||
|
estimatedOutputTokens: 0,
|
||||||
|
usage: null
|
||||||
|
};
|
||||||
|
|
||||||
|
await processPartialStream(
|
||||||
|
streamResult.partialObjectStream,
|
||||||
|
streamingState,
|
||||||
|
context
|
||||||
|
);
|
||||||
|
|
||||||
|
// Wait for usage data if available
|
||||||
|
if (streamResult.usage) {
|
||||||
|
try {
|
||||||
|
streamingState.usage = await streamResult.usage;
|
||||||
|
} catch (usageError) {
|
||||||
|
logger.report(
|
||||||
|
`Failed to get usage data: ${usageError.message}`,
|
||||||
|
'debug'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return finalizeStreamingResults(streamingState, context);
|
||||||
|
} catch (error) {
|
||||||
|
logger.report(
|
||||||
|
`StreamObject processing failed: ${error.message}. Falling back to generateObject.`,
|
||||||
|
'debug'
|
||||||
|
);
|
||||||
|
return await processWithGenerateObject(context, logger);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process the partial object stream
|
||||||
|
*/
|
||||||
|
async function processPartialStream(partialStream, state, context) {
|
||||||
|
for await (const partialObject of partialStream) {
|
||||||
|
state.lastPartialObject = partialObject;
|
||||||
|
|
||||||
|
if (partialObject) {
|
||||||
|
state.estimatedOutputTokens = estimateTokens(
|
||||||
|
JSON.stringify(partialObject)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
await processStreamingTasks(partialObject, state, context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process tasks from a streaming partial object
|
||||||
|
*/
|
||||||
|
async function processStreamingTasks(partialObject, state, context) {
|
||||||
|
if (!partialObject?.tasks || !Array.isArray(partialObject.tasks)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const newTaskCount = partialObject.tasks.length;
|
||||||
|
|
||||||
|
if (newTaskCount > state.taskCount) {
|
||||||
|
await processNewTasks(
|
||||||
|
partialObject.tasks,
|
||||||
|
state.taskCount,
|
||||||
|
newTaskCount,
|
||||||
|
state.estimatedOutputTokens,
|
||||||
|
context
|
||||||
|
);
|
||||||
|
state.taskCount = newTaskCount;
|
||||||
|
} else if (context.progressTracker && state.estimatedOutputTokens > 0) {
|
||||||
|
context.progressTracker.updateTokens(
|
||||||
|
context.estimatedInputTokens,
|
||||||
|
state.estimatedOutputTokens,
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process newly appeared tasks in the stream
|
||||||
|
*/
|
||||||
|
async function processNewTasks(
|
||||||
|
tasks,
|
||||||
|
startIndex,
|
||||||
|
endIndex,
|
||||||
|
estimatedOutputTokens,
|
||||||
|
context
|
||||||
|
) {
|
||||||
|
for (let i = startIndex; i < endIndex; i++) {
|
||||||
|
const task = tasks[i] || {};
|
||||||
|
|
||||||
|
if (task.title) {
|
||||||
|
await reportTaskProgress({
|
||||||
|
task,
|
||||||
|
currentCount: i + 1,
|
||||||
|
totalTasks: context.numTasks,
|
||||||
|
estimatedTokens: estimatedOutputTokens,
|
||||||
|
progressTracker: context.progressTracker,
|
||||||
|
reportProgress: context.config.reportProgress,
|
||||||
|
priorityMap: context.priorityMap,
|
||||||
|
defaultPriority: context.defaultPriority,
|
||||||
|
estimatedInputTokens: context.estimatedInputTokens
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
await reportPlaceholderTask(i + 1, estimatedOutputTokens, context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Report a placeholder task while it's being generated
|
||||||
|
*/
|
||||||
|
async function reportPlaceholderTask(
|
||||||
|
taskNumber,
|
||||||
|
estimatedOutputTokens,
|
||||||
|
context
|
||||||
|
) {
|
||||||
|
const {
|
||||||
|
progressTracker,
|
||||||
|
config,
|
||||||
|
numTasks,
|
||||||
|
defaultPriority,
|
||||||
|
estimatedInputTokens
|
||||||
|
} = context;
|
||||||
|
|
||||||
|
if (progressTracker) {
|
||||||
|
progressTracker.addTaskLine(
|
||||||
|
taskNumber,
|
||||||
|
`Generating task ${taskNumber}...`,
|
||||||
|
defaultPriority
|
||||||
|
);
|
||||||
|
progressTracker.updateTokens(
|
||||||
|
estimatedInputTokens,
|
||||||
|
estimatedOutputTokens,
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.reportProgress && !progressTracker) {
|
||||||
|
await config.reportProgress({
|
||||||
|
progress: taskNumber,
|
||||||
|
total: numTasks,
|
||||||
|
message: `Generating task ${taskNumber}/${numTasks}...`
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Finalize streaming results and update progress display
|
||||||
|
*/
|
||||||
|
async function finalizeStreamingResults(state, context) {
|
||||||
|
const { lastPartialObject, estimatedOutputTokens, taskCount, usage } = state;
|
||||||
|
|
||||||
|
if (!lastPartialObject?.tasks || !Array.isArray(lastPartialObject.tasks)) {
|
||||||
|
throw new Error('No tasks generated from streamObject');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use actual token counts if available, otherwise use estimates
|
||||||
|
const finalOutputTokens = usage?.completionTokens || estimatedOutputTokens;
|
||||||
|
const finalInputTokens = usage?.promptTokens || context.estimatedInputTokens;
|
||||||
|
|
||||||
|
if (context.progressTracker) {
|
||||||
|
await updateFinalProgress(
|
||||||
|
lastPartialObject.tasks,
|
||||||
|
taskCount,
|
||||||
|
usage ? finalOutputTokens : estimatedOutputTokens,
|
||||||
|
context,
|
||||||
|
usage ? finalInputTokens : null
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
parsedTasks: lastPartialObject.tasks,
|
||||||
|
estimatedOutputTokens: finalOutputTokens,
|
||||||
|
actualInputTokens: finalInputTokens,
|
||||||
|
usage,
|
||||||
|
usedFallback: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update progress tracker with final task content
|
||||||
|
*/
|
||||||
|
async function updateFinalProgress(
|
||||||
|
tasks,
|
||||||
|
taskCount,
|
||||||
|
outputTokens,
|
||||||
|
context,
|
||||||
|
actualInputTokens = null
|
||||||
|
) {
|
||||||
|
const { progressTracker, defaultPriority, estimatedInputTokens } = context;
|
||||||
|
|
||||||
|
if (taskCount > 0) {
|
||||||
|
updateTaskLines(tasks, progressTracker, defaultPriority);
|
||||||
|
} else {
|
||||||
|
await reportAllTasks(tasks, outputTokens, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
progressTracker.updateTokens(
|
||||||
|
actualInputTokens || estimatedInputTokens,
|
||||||
|
outputTokens,
|
||||||
|
false
|
||||||
|
);
|
||||||
|
progressTracker.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update task lines in progress tracker with final content
|
||||||
|
*/
|
||||||
|
function updateTaskLines(tasks, progressTracker, defaultPriority) {
|
||||||
|
for (let i = 0; i < tasks.length; i++) {
|
||||||
|
const task = tasks[i];
|
||||||
|
if (task?.title) {
|
||||||
|
progressTracker.addTaskLine(
|
||||||
|
i + 1,
|
||||||
|
task.title,
|
||||||
|
task.priority || defaultPriority
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Report all tasks that were not streamed incrementally
|
||||||
|
*/
|
||||||
|
async function reportAllTasks(tasks, estimatedOutputTokens, context) {
|
||||||
|
for (let i = 0; i < tasks.length; i++) {
|
||||||
|
const task = tasks[i];
|
||||||
|
if (task?.title) {
|
||||||
|
await reportTaskProgress({
|
||||||
|
task,
|
||||||
|
currentCount: i + 1,
|
||||||
|
totalTasks: context.numTasks,
|
||||||
|
estimatedTokens: estimatedOutputTokens,
|
||||||
|
progressTracker: context.progressTracker,
|
||||||
|
reportProgress: context.config.reportProgress,
|
||||||
|
priorityMap: context.priorityMap,
|
||||||
|
defaultPriority: context.defaultPriority,
|
||||||
|
estimatedInputTokens: context.estimatedInputTokens
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process with generateObject as fallback when streaming fails
|
||||||
|
*/
|
||||||
|
async function processWithGenerateObject(context, logger) {
|
||||||
|
logger.report('Using generateObject fallback for PRD parsing', 'info');
|
||||||
|
|
||||||
|
// Show placeholder tasks while generating
|
||||||
|
if (context.progressTracker) {
|
||||||
|
for (let i = 0; i < context.numTasks; i++) {
|
||||||
|
context.progressTracker.addTaskLine(
|
||||||
|
i + 1,
|
||||||
|
`Generating task ${i + 1}...`,
|
||||||
|
context.defaultPriority
|
||||||
|
);
|
||||||
|
context.progressTracker.updateTokens(
|
||||||
|
context.estimatedInputTokens,
|
||||||
|
0,
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use generateObjectService instead of streaming
|
||||||
|
const result = await generateObjectService({
|
||||||
|
role: context.config.research ? 'research' : 'main',
|
||||||
|
commandName: 'parse-prd',
|
||||||
|
prompt: context.prompt,
|
||||||
|
systemPrompt: context.systemPrompt,
|
||||||
|
schema: context.config.schema,
|
||||||
|
outputFormat: context.config.outputFormat || 'text',
|
||||||
|
projectRoot: context.config.projectRoot,
|
||||||
|
session: context.config.session
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract tasks from the result (handle both direct tasks and mainResult.tasks)
|
||||||
|
const tasks = result?.mainResult || result;
|
||||||
|
|
||||||
|
// Process the generated tasks
|
||||||
|
if (tasks && Array.isArray(tasks.tasks)) {
|
||||||
|
// Update progress tracker with final tasks
|
||||||
|
if (context.progressTracker) {
|
||||||
|
for (let i = 0; i < tasks.tasks.length; i++) {
|
||||||
|
const task = tasks.tasks[i];
|
||||||
|
if (task && task.title) {
|
||||||
|
context.progressTracker.addTaskLine(
|
||||||
|
i + 1,
|
||||||
|
task.title,
|
||||||
|
task.priority || context.defaultPriority
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final token update - use actual telemetry if available
|
||||||
|
const outputTokens =
|
||||||
|
result.telemetryData?.outputTokens ||
|
||||||
|
estimateTokens(JSON.stringify(tasks));
|
||||||
|
const inputTokens =
|
||||||
|
result.telemetryData?.inputTokens || context.estimatedInputTokens;
|
||||||
|
|
||||||
|
context.progressTracker.updateTokens(inputTokens, outputTokens, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
parsedTasks: tasks.tasks,
|
||||||
|
estimatedOutputTokens:
|
||||||
|
result.telemetryData?.outputTokens ||
|
||||||
|
estimateTokens(JSON.stringify(tasks)),
|
||||||
|
actualInputTokens: result.telemetryData?.inputTokens,
|
||||||
|
telemetryData: result.telemetryData,
|
||||||
|
usedFallback: true
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error('Failed to generate tasks using generateObject fallback');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepare final result with cleanup
|
||||||
|
*/
|
||||||
|
function prepareFinalResult(
|
||||||
|
streamingResult,
|
||||||
|
aiServiceResponse,
|
||||||
|
estimatedInputTokens,
|
||||||
|
progressTracker
|
||||||
|
) {
|
||||||
|
let summary = null;
|
||||||
|
if (progressTracker) {
|
||||||
|
summary = progressTracker.getSummary();
|
||||||
|
progressTracker.cleanup();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have actual usage data from streaming, update the AI service response
|
||||||
|
if (streamingResult.usage && aiServiceResponse) {
|
||||||
|
// Map the Vercel AI SDK usage format to our telemetry format
|
||||||
|
const usage = streamingResult.usage;
|
||||||
|
if (!aiServiceResponse.usage) {
|
||||||
|
aiServiceResponse.usage = {
|
||||||
|
promptTokens: usage.promptTokens || 0,
|
||||||
|
completionTokens: usage.completionTokens || 0,
|
||||||
|
totalTokens: usage.totalTokens || 0
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// The telemetry should have been logged in the unified service runner
|
||||||
|
// but if not, the usage is now available for telemetry calculation
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
parsedTasks: streamingResult.parsedTasks,
|
||||||
|
aiServiceResponse,
|
||||||
|
estimatedInputTokens:
|
||||||
|
streamingResult.actualInputTokens || estimatedInputTokens,
|
||||||
|
estimatedOutputTokens: streamingResult.estimatedOutputTokens,
|
||||||
|
usedFallback: streamingResult.usedFallback,
|
||||||
|
progressTracker,
|
||||||
|
summary
|
||||||
|
};
|
||||||
|
}
|
||||||
272
scripts/modules/task-manager/parse-prd/parse-prd.js
Normal file
272
scripts/modules/task-manager/parse-prd/parse-prd.js
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
import chalk from 'chalk';
|
||||||
|
import {
|
||||||
|
StreamingError,
|
||||||
|
STREAMING_ERROR_CODES
|
||||||
|
} from '../../../../src/utils/stream-parser.js';
|
||||||
|
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
|
||||||
|
import { getDebugFlag, getDefaultPriority } from '../../config-manager.js';
|
||||||
|
|
||||||
|
// Import configuration classes
|
||||||
|
import { PrdParseConfig, LoggingConfig } from './parse-prd-config.js';
|
||||||
|
|
||||||
|
// Import helper functions
|
||||||
|
import {
|
||||||
|
readPrdContent,
|
||||||
|
loadExistingTasks,
|
||||||
|
validateFileOperations,
|
||||||
|
processTasks,
|
||||||
|
saveTasksToFile,
|
||||||
|
buildPrompts,
|
||||||
|
displayCliSummary,
|
||||||
|
displayNonStreamingCliOutput
|
||||||
|
} from './parse-prd-helpers.js';
|
||||||
|
|
||||||
|
// Import handlers
|
||||||
|
import { handleStreamingService } from './parse-prd-streaming.js';
|
||||||
|
import { handleNonStreamingService } from './parse-prd-non-streaming.js';
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// MAIN PARSING FUNCTIONS (Simplified after refactoring)
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shared parsing logic for both streaming and non-streaming
|
||||||
|
* @param {PrdParseConfig} config - Configuration object
|
||||||
|
* @param {Function} serviceHandler - Handler function for AI service
|
||||||
|
* @param {boolean} isStreaming - Whether this is streaming mode
|
||||||
|
* @returns {Promise<Object>} Result object with success status and telemetry
|
||||||
|
*/
|
||||||
|
async function parsePRDCore(config, serviceHandler, isStreaming) {
|
||||||
|
const logger = new LoggingConfig(config.mcpLog, config.reportProgress);
|
||||||
|
|
||||||
|
logger.report(
|
||||||
|
`Parsing PRD file: ${config.prdPath}, Force: ${config.force}, Append: ${config.append}, Research: ${config.research}`,
|
||||||
|
'debug'
|
||||||
|
);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Load existing tasks
|
||||||
|
const { existingTasks, nextId } = loadExistingTasks(
|
||||||
|
config.tasksPath,
|
||||||
|
config.targetTag
|
||||||
|
);
|
||||||
|
|
||||||
|
// Validate operations
|
||||||
|
validateFileOperations({
|
||||||
|
existingTasks,
|
||||||
|
targetTag: config.targetTag,
|
||||||
|
append: config.append,
|
||||||
|
force: config.force,
|
||||||
|
isMCP: config.isMCP,
|
||||||
|
logger
|
||||||
|
});
|
||||||
|
|
||||||
|
// Read PRD content and build prompts
|
||||||
|
const prdContent = readPrdContent(config.prdPath);
|
||||||
|
const prompts = await buildPrompts(config, prdContent, nextId);
|
||||||
|
|
||||||
|
// Call the appropriate service handler
|
||||||
|
const serviceResult = await serviceHandler(
|
||||||
|
config,
|
||||||
|
prompts,
|
||||||
|
config.numTasks
|
||||||
|
);
|
||||||
|
|
||||||
|
// Process tasks
|
||||||
|
const defaultPriority = getDefaultPriority(config.projectRoot) || 'medium';
|
||||||
|
const processedNewTasks = processTasks(
|
||||||
|
serviceResult.parsedTasks,
|
||||||
|
nextId,
|
||||||
|
existingTasks,
|
||||||
|
defaultPriority
|
||||||
|
);
|
||||||
|
|
||||||
|
// Combine with existing if appending
|
||||||
|
const finalTasks = config.append
|
||||||
|
? [...existingTasks, ...processedNewTasks]
|
||||||
|
: processedNewTasks;
|
||||||
|
|
||||||
|
// Save to file
|
||||||
|
saveTasksToFile(config.tasksPath, finalTasks, config.targetTag, logger);
|
||||||
|
|
||||||
|
// Handle completion reporting
|
||||||
|
await handleCompletionReporting(
|
||||||
|
config,
|
||||||
|
serviceResult,
|
||||||
|
processedNewTasks,
|
||||||
|
finalTasks,
|
||||||
|
nextId,
|
||||||
|
isStreaming
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
tasksPath: config.tasksPath,
|
||||||
|
telemetryData: serviceResult.aiServiceResponse?.telemetryData,
|
||||||
|
tagInfo: serviceResult.aiServiceResponse?.tagInfo
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
logger.report(`Error parsing PRD: ${error.message}`, 'error');
|
||||||
|
|
||||||
|
if (!config.isMCP) {
|
||||||
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
|
if (getDebugFlag(config.projectRoot)) {
|
||||||
|
console.error(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle completion reporting for both CLI and MCP
|
||||||
|
* @param {PrdParseConfig} config - Configuration object
|
||||||
|
* @param {Object} serviceResult - Result from service handler
|
||||||
|
* @param {Array} processedNewTasks - New tasks that were processed
|
||||||
|
* @param {Array} finalTasks - All tasks after processing
|
||||||
|
* @param {number} nextId - Next available task ID
|
||||||
|
* @param {boolean} isStreaming - Whether this was streaming mode
|
||||||
|
*/
|
||||||
|
async function handleCompletionReporting(
|
||||||
|
config,
|
||||||
|
serviceResult,
|
||||||
|
processedNewTasks,
|
||||||
|
finalTasks,
|
||||||
|
nextId,
|
||||||
|
isStreaming
|
||||||
|
) {
|
||||||
|
const { aiServiceResponse, estimatedInputTokens, estimatedOutputTokens } =
|
||||||
|
serviceResult;
|
||||||
|
|
||||||
|
// MCP progress reporting
|
||||||
|
if (config.reportProgress) {
|
||||||
|
const hasValidTelemetry =
|
||||||
|
aiServiceResponse?.telemetryData &&
|
||||||
|
(aiServiceResponse.telemetryData.inputTokens > 0 ||
|
||||||
|
aiServiceResponse.telemetryData.outputTokens > 0);
|
||||||
|
|
||||||
|
let completionMessage;
|
||||||
|
if (hasValidTelemetry) {
|
||||||
|
const cost = aiServiceResponse.telemetryData.totalCost || 0;
|
||||||
|
const currency = aiServiceResponse.telemetryData.currency || 'USD';
|
||||||
|
completionMessage = `✅ Task Generation Completed | Tokens (I/O): ${aiServiceResponse.telemetryData.inputTokens}/${aiServiceResponse.telemetryData.outputTokens} | Cost: ${currency === 'USD' ? '$' : currency}${cost.toFixed(4)}`;
|
||||||
|
} else {
|
||||||
|
const outputTokens = isStreaming ? estimatedOutputTokens : 'unknown';
|
||||||
|
completionMessage = `✅ Task Generation Completed | ~Tokens (I/O): ${estimatedInputTokens}/${outputTokens} | Cost: ~$0.00`;
|
||||||
|
}
|
||||||
|
|
||||||
|
await config.reportProgress({
|
||||||
|
progress: config.numTasks,
|
||||||
|
total: config.numTasks,
|
||||||
|
message: completionMessage
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// CLI output
|
||||||
|
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||||
|
if (isStreaming && serviceResult.summary) {
|
||||||
|
await displayCliSummary({
|
||||||
|
processedTasks: processedNewTasks,
|
||||||
|
nextId,
|
||||||
|
summary: serviceResult.summary,
|
||||||
|
prdPath: config.prdPath,
|
||||||
|
tasksPath: config.tasksPath,
|
||||||
|
usedFallback: serviceResult.usedFallback,
|
||||||
|
aiServiceResponse
|
||||||
|
});
|
||||||
|
} else if (!isStreaming) {
|
||||||
|
displayNonStreamingCliOutput({
|
||||||
|
processedTasks: processedNewTasks,
|
||||||
|
research: config.research,
|
||||||
|
finalTasks,
|
||||||
|
tasksPath: config.tasksPath,
|
||||||
|
aiServiceResponse
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse PRD with streaming progress reporting
|
||||||
|
*/
|
||||||
|
async function parsePRDWithStreaming(
|
||||||
|
prdPath,
|
||||||
|
tasksPath,
|
||||||
|
numTasks,
|
||||||
|
options = {}
|
||||||
|
) {
|
||||||
|
const config = new PrdParseConfig(prdPath, tasksPath, numTasks, options);
|
||||||
|
return parsePRDCore(config, handleStreamingService, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse PRD without streaming (fallback)
|
||||||
|
*/
|
||||||
|
async function parsePRDWithoutStreaming(
|
||||||
|
prdPath,
|
||||||
|
tasksPath,
|
||||||
|
numTasks,
|
||||||
|
options = {}
|
||||||
|
) {
|
||||||
|
const config = new PrdParseConfig(prdPath, tasksPath, numTasks, options);
|
||||||
|
return parsePRDCore(config, handleNonStreamingService, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main entry point - decides between streaming and non-streaming
|
||||||
|
*/
|
||||||
|
async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||||
|
const config = new PrdParseConfig(prdPath, tasksPath, numTasks, options);
|
||||||
|
|
||||||
|
if (config.useStreaming) {
|
||||||
|
try {
|
||||||
|
return await parsePRDWithStreaming(prdPath, tasksPath, numTasks, options);
|
||||||
|
} catch (streamingError) {
|
||||||
|
// Check if this is a streaming-specific error (including timeout)
|
||||||
|
const isStreamingError =
|
||||||
|
streamingError instanceof StreamingError ||
|
||||||
|
streamingError.code === STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE ||
|
||||||
|
streamingError.code ===
|
||||||
|
STREAMING_ERROR_CODES.STREAM_PROCESSING_FAILED ||
|
||||||
|
streamingError.code === STREAMING_ERROR_CODES.STREAM_NOT_ITERABLE ||
|
||||||
|
TimeoutManager.isTimeoutError(streamingError);
|
||||||
|
|
||||||
|
if (isStreamingError) {
|
||||||
|
const logger = new LoggingConfig(config.mcpLog, config.reportProgress);
|
||||||
|
|
||||||
|
// Show fallback message
|
||||||
|
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||||
|
console.log(
|
||||||
|
chalk.yellow(
|
||||||
|
`⚠️ Streaming operation ${streamingError.message.includes('timed out') ? 'timed out' : 'failed'}. Falling back to non-streaming mode...`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
logger.report(
|
||||||
|
`Streaming failed (${streamingError.message}), falling back to non-streaming mode...`,
|
||||||
|
'warn'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to non-streaming
|
||||||
|
return await parsePRDWithoutStreaming(
|
||||||
|
prdPath,
|
||||||
|
tasksPath,
|
||||||
|
numTasks,
|
||||||
|
options
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
throw streamingError;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return await parsePRDWithoutStreaming(
|
||||||
|
prdPath,
|
||||||
|
tasksPath,
|
||||||
|
numTasks,
|
||||||
|
options
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default parsePRD;
|
||||||
@@ -2,6 +2,7 @@ import {
|
|||||||
generateObject,
|
generateObject,
|
||||||
generateText,
|
generateText,
|
||||||
streamText,
|
streamText,
|
||||||
|
streamObject,
|
||||||
zodSchema,
|
zodSchema,
|
||||||
JSONParseError,
|
JSONParseError,
|
||||||
NoObjectGeneratedError
|
NoObjectGeneratedError
|
||||||
@@ -224,6 +225,46 @@ export class BaseAIProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streams a structured object using the provider's model
|
||||||
|
*/
|
||||||
|
async streamObject(params) {
|
||||||
|
try {
|
||||||
|
this.validateParams(params);
|
||||||
|
this.validateMessages(params.messages);
|
||||||
|
|
||||||
|
if (!params.schema) {
|
||||||
|
throw new Error('Schema is required for object streaming');
|
||||||
|
}
|
||||||
|
|
||||||
|
log(
|
||||||
|
'debug',
|
||||||
|
`Streaming ${this.name} object with model: ${params.modelId}`
|
||||||
|
);
|
||||||
|
|
||||||
|
const client = await this.getClient(params);
|
||||||
|
const result = await streamObject({
|
||||||
|
model: client(params.modelId),
|
||||||
|
messages: params.messages,
|
||||||
|
schema: zodSchema(params.schema),
|
||||||
|
mode: params.mode || 'auto',
|
||||||
|
maxTokens: params.maxTokens,
|
||||||
|
temperature: params.temperature
|
||||||
|
});
|
||||||
|
|
||||||
|
log(
|
||||||
|
'debug',
|
||||||
|
`${this.name} streamObject initiated successfully for model: ${params.modelId}`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Return the stream result directly
|
||||||
|
// The stream result contains partialObjectStream and other properties
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
this.handleError('object streaming', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates a structured object using the provider's model
|
* Generates a structured object using the provider's model
|
||||||
*/
|
*/
|
||||||
|
|||||||
298
src/progress/base-progress-tracker.js
Normal file
298
src/progress/base-progress-tracker.js
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
import { newMultiBar } from './cli-progress-factory.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base class for progress trackers, handling common logic for time, tokens, estimation, and multibar management.
|
||||||
|
*/
|
||||||
|
export class BaseProgressTracker {
|
||||||
|
constructor(options = {}) {
|
||||||
|
this.numUnits = options.numUnits || 1;
|
||||||
|
this.unitName = options.unitName || 'unit'; // e.g., 'task', 'subtask'
|
||||||
|
this.startTime = null;
|
||||||
|
this.completedUnits = 0;
|
||||||
|
this.tokensIn = 0;
|
||||||
|
this.tokensOut = 0;
|
||||||
|
this.isEstimate = true; // For token display
|
||||||
|
|
||||||
|
// Time estimation properties
|
||||||
|
this.bestAvgTimePerUnit = null;
|
||||||
|
this.lastEstimateTime = null;
|
||||||
|
this.lastEstimateSeconds = 0;
|
||||||
|
|
||||||
|
// UI components
|
||||||
|
this.multibar = null;
|
||||||
|
this.timeTokensBar = null;
|
||||||
|
this.progressBar = null;
|
||||||
|
this._timerInterval = null;
|
||||||
|
|
||||||
|
// State flags
|
||||||
|
this.isStarted = false;
|
||||||
|
this.isFinished = false;
|
||||||
|
|
||||||
|
// Allow subclasses to define custom properties
|
||||||
|
this._initializeCustomProperties(options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protected method for subclasses to initialize custom properties.
|
||||||
|
* @protected
|
||||||
|
*/
|
||||||
|
_initializeCustomProperties(options) {
|
||||||
|
// Subclasses can override this
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the pluralized form of the unit name for safe property keys.
|
||||||
|
* @returns {string} Pluralized unit name
|
||||||
|
*/
|
||||||
|
get unitNamePlural() {
|
||||||
|
return `${this.unitName}s`;
|
||||||
|
}
|
||||||
|
|
||||||
|
start() {
|
||||||
|
if (this.isStarted || this.isFinished) return;
|
||||||
|
|
||||||
|
this.isStarted = true;
|
||||||
|
this.startTime = Date.now();
|
||||||
|
|
||||||
|
this.multibar = newMultiBar();
|
||||||
|
|
||||||
|
// Create time/tokens bar using subclass-provided format
|
||||||
|
this.timeTokensBar = this.multibar.create(
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
format: this._getTimeTokensBarFormat(),
|
||||||
|
barsize: 1,
|
||||||
|
hideCursor: true,
|
||||||
|
clearOnComplete: false
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create main progress bar using subclass-provided format
|
||||||
|
this.progressBar = this.multibar.create(
|
||||||
|
this.numUnits,
|
||||||
|
0,
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
format: this._getProgressBarFormat(),
|
||||||
|
barCompleteChar: '\u2588',
|
||||||
|
barIncompleteChar: '\u2591'
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
this._updateTimeTokensBar();
|
||||||
|
this.progressBar.update(0, { [this.unitNamePlural]: `0/${this.numUnits}` });
|
||||||
|
|
||||||
|
// Start timer
|
||||||
|
this._timerInterval = setInterval(() => this._updateTimeTokensBar(), 1000);
|
||||||
|
|
||||||
|
// Allow subclasses to add custom bars or setup
|
||||||
|
this._setupCustomUI();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protected method for subclasses to add custom UI elements after start.
|
||||||
|
* @protected
|
||||||
|
*/
|
||||||
|
_setupCustomUI() {
|
||||||
|
// Subclasses can override this
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protected method to get the format for the time/tokens bar.
|
||||||
|
* @protected
|
||||||
|
* @returns {string} Format string for the time/tokens bar.
|
||||||
|
*/
|
||||||
|
_getTimeTokensBarFormat() {
|
||||||
|
return `{clock} {elapsed} | Tokens (I/O): {in}/{out} | Est: {remaining}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protected method to get the format for the main progress bar.
|
||||||
|
* @protected
|
||||||
|
* @returns {string} Format string for the progress bar.
|
||||||
|
*/
|
||||||
|
_getProgressBarFormat() {
|
||||||
|
return `${this.unitName.charAt(0).toUpperCase() + this.unitName.slice(1)}s {${this.unitNamePlural}} |{bar}| {percentage}%`;
|
||||||
|
}
|
||||||
|
|
||||||
|
updateTokens(tokensIn, tokensOut, isEstimate = false) {
|
||||||
|
this.tokensIn = tokensIn || 0;
|
||||||
|
this.tokensOut = tokensOut || 0;
|
||||||
|
this.isEstimate = isEstimate;
|
||||||
|
this._updateTimeTokensBar();
|
||||||
|
}
|
||||||
|
|
||||||
|
_updateTimeTokensBar() {
|
||||||
|
if (!this.timeTokensBar || this.isFinished) return;
|
||||||
|
|
||||||
|
const elapsed = this._formatElapsedTime();
|
||||||
|
const remaining = this._estimateRemainingTime();
|
||||||
|
const tokensLabel = this.isEstimate ? '~ Tokens (I/O)' : 'Tokens (I/O)';
|
||||||
|
|
||||||
|
this.timeTokensBar.update(1, {
|
||||||
|
clock: '⏱️',
|
||||||
|
elapsed,
|
||||||
|
in: this.tokensIn,
|
||||||
|
out: this.tokensOut,
|
||||||
|
remaining,
|
||||||
|
tokensLabel,
|
||||||
|
// Subclasses can add more payload here via override
|
||||||
|
...this._getCustomTimeTokensPayload()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protected method for subclasses to provide custom payload for time/tokens bar.
|
||||||
|
* @protected
|
||||||
|
* @returns {Object} Custom payload object.
|
||||||
|
*/
|
||||||
|
_getCustomTimeTokensPayload() {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
_formatElapsedTime() {
|
||||||
|
if (!this.startTime) return '0m 00s';
|
||||||
|
const seconds = Math.floor((Date.now() - this.startTime) / 1000);
|
||||||
|
const minutes = Math.floor(seconds / 60);
|
||||||
|
const remainingSeconds = seconds % 60;
|
||||||
|
return `${minutes}m ${remainingSeconds.toString().padStart(2, '0')}s`;
|
||||||
|
}
|
||||||
|
|
||||||
|
_estimateRemainingTime() {
|
||||||
|
const progress = this._getProgressFraction();
|
||||||
|
if (progress >= 1) return '~0s';
|
||||||
|
|
||||||
|
const now = Date.now();
|
||||||
|
const elapsed = (now - this.startTime) / 1000;
|
||||||
|
|
||||||
|
if (progress === 0) return '~calculating...';
|
||||||
|
|
||||||
|
const avgTimePerUnit = elapsed / progress;
|
||||||
|
|
||||||
|
if (
|
||||||
|
this.bestAvgTimePerUnit === null ||
|
||||||
|
avgTimePerUnit < this.bestAvgTimePerUnit
|
||||||
|
) {
|
||||||
|
this.bestAvgTimePerUnit = avgTimePerUnit;
|
||||||
|
}
|
||||||
|
|
||||||
|
const remainingUnits = this.numUnits * (1 - progress);
|
||||||
|
let estimatedSeconds = Math.ceil(remainingUnits * this.bestAvgTimePerUnit);
|
||||||
|
|
||||||
|
// Stabilization logic
|
||||||
|
if (this.lastEstimateTime) {
|
||||||
|
const elapsedSinceEstimate = Math.floor(
|
||||||
|
(now - this.lastEstimateTime) / 1000
|
||||||
|
);
|
||||||
|
const countdownSeconds = Math.max(
|
||||||
|
0,
|
||||||
|
this.lastEstimateSeconds - elapsedSinceEstimate
|
||||||
|
);
|
||||||
|
if (countdownSeconds === 0) return '~0s';
|
||||||
|
estimatedSeconds = Math.min(estimatedSeconds, countdownSeconds);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.lastEstimateTime = now;
|
||||||
|
this.lastEstimateSeconds = estimatedSeconds;
|
||||||
|
|
||||||
|
return `~${this._formatDuration(estimatedSeconds)}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protected method for subclasses to calculate current progress fraction (0-1).
|
||||||
|
* Defaults to simple completedUnits / numUnits.
|
||||||
|
* @protected
|
||||||
|
* @returns {number} Progress fraction (can be fractional for subtasks).
|
||||||
|
*/
|
||||||
|
_getProgressFraction() {
|
||||||
|
return this.completedUnits / this.numUnits;
|
||||||
|
}
|
||||||
|
|
||||||
|
_formatDuration(seconds) {
|
||||||
|
if (seconds < 60) return `${seconds}s`;
|
||||||
|
const minutes = Math.floor(seconds / 60);
|
||||||
|
const remainingSeconds = seconds % 60;
|
||||||
|
if (minutes < 60) {
|
||||||
|
return remainingSeconds > 0
|
||||||
|
? `${minutes}m ${remainingSeconds}s`
|
||||||
|
: `${minutes}m`;
|
||||||
|
}
|
||||||
|
const hours = Math.floor(minutes / 60);
|
||||||
|
const remainingMinutes = minutes % 60;
|
||||||
|
return `${hours}h ${remainingMinutes}m`;
|
||||||
|
}
|
||||||
|
|
||||||
|
getElapsedTime() {
|
||||||
|
return this.startTime ? Date.now() - this.startTime : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
if (this.isFinished) return;
|
||||||
|
|
||||||
|
this.isFinished = true;
|
||||||
|
|
||||||
|
if (this._timerInterval) {
|
||||||
|
clearInterval(this._timerInterval);
|
||||||
|
this._timerInterval = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.multibar) {
|
||||||
|
this._updateTimeTokensBar();
|
||||||
|
this.multibar.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure cleanup is called to prevent memory leaks
|
||||||
|
this.cleanup();
|
||||||
|
}
|
||||||
|
|
||||||
|
getSummary() {
|
||||||
|
return {
|
||||||
|
completedUnits: this.completedUnits,
|
||||||
|
elapsedTime: this.getElapsedTime()
|
||||||
|
// Subclasses should extend this
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup method to ensure proper resource disposal and prevent memory leaks.
|
||||||
|
* Should be called when the progress tracker is no longer needed.
|
||||||
|
*/
|
||||||
|
cleanup() {
|
||||||
|
// Stop any active timers
|
||||||
|
if (this._timerInterval) {
|
||||||
|
clearInterval(this._timerInterval);
|
||||||
|
this._timerInterval = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop and clear multibar
|
||||||
|
if (this.multibar) {
|
||||||
|
try {
|
||||||
|
this.multibar.stop();
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore errors during cleanup
|
||||||
|
}
|
||||||
|
this.multibar = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear progress bar references
|
||||||
|
this.timeTokensBar = null;
|
||||||
|
this.progressBar = null;
|
||||||
|
|
||||||
|
// Reset state
|
||||||
|
this.isStarted = false;
|
||||||
|
this.isFinished = true;
|
||||||
|
|
||||||
|
// Allow subclasses to perform custom cleanup
|
||||||
|
this._performCustomCleanup();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protected method for subclasses to perform custom cleanup.
|
||||||
|
* @protected
|
||||||
|
*/
|
||||||
|
_performCustomCleanup() {
|
||||||
|
// Subclasses can override this
|
||||||
|
}
|
||||||
|
}
|
||||||
115
src/progress/cli-progress-factory.js
Normal file
115
src/progress/cli-progress-factory.js
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
import cliProgress from 'cli-progress';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default configuration for progress bars
|
||||||
|
* Extracted to avoid duplication and provide single source of truth
|
||||||
|
*/
|
||||||
|
const DEFAULT_CONFIG = {
|
||||||
|
clearOnComplete: false,
|
||||||
|
stopOnComplete: true,
|
||||||
|
hideCursor: true,
|
||||||
|
barsize: 40 // Standard terminal width for progress bar
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Available presets for progress bar styling
|
||||||
|
* Makes it easy to see what options are available
|
||||||
|
*/
|
||||||
|
const PRESETS = {
|
||||||
|
shades_classic: cliProgress.Presets.shades_classic,
|
||||||
|
shades_grey: cliProgress.Presets.shades_grey,
|
||||||
|
rect: cliProgress.Presets.rect,
|
||||||
|
legacy: cliProgress.Presets.legacy
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory class for creating CLI progress bars
|
||||||
|
* Provides a consistent interface for creating both single and multi-bar instances
|
||||||
|
*/
|
||||||
|
export class ProgressBarFactory {
|
||||||
|
constructor(defaultOptions = {}, defaultPreset = PRESETS.shades_classic) {
|
||||||
|
this.defaultOptions = { ...DEFAULT_CONFIG, ...defaultOptions };
|
||||||
|
this.defaultPreset = defaultPreset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new single progress bar
|
||||||
|
* @param {Object} opts - Custom options to override defaults
|
||||||
|
* @param {Object} preset - Progress bar preset for styling
|
||||||
|
* @returns {cliProgress.SingleBar} Configured single progress bar instance
|
||||||
|
*/
|
||||||
|
createSingleBar(opts = {}, preset = null) {
|
||||||
|
const config = this._mergeConfig(opts);
|
||||||
|
const barPreset = preset || this.defaultPreset;
|
||||||
|
|
||||||
|
return new cliProgress.SingleBar(config, barPreset);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new multi-bar container
|
||||||
|
* @param {Object} opts - Custom options to override defaults
|
||||||
|
* @param {Object} preset - Progress bar preset for styling
|
||||||
|
* @returns {cliProgress.MultiBar} Configured multi-bar instance
|
||||||
|
*/
|
||||||
|
createMultiBar(opts = {}, preset = null) {
|
||||||
|
const config = this._mergeConfig(opts);
|
||||||
|
const barPreset = preset || this.defaultPreset;
|
||||||
|
|
||||||
|
return new cliProgress.MultiBar(config, barPreset);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Merges custom options with defaults
|
||||||
|
* @private
|
||||||
|
* @param {Object} customOpts - Custom options to merge
|
||||||
|
* @returns {Object} Merged configuration
|
||||||
|
*/
|
||||||
|
_mergeConfig(customOpts) {
|
||||||
|
return { ...this.defaultOptions, ...customOpts };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates the default configuration
|
||||||
|
* @param {Object} options - New default options
|
||||||
|
*/
|
||||||
|
setDefaultOptions(options) {
|
||||||
|
this.defaultOptions = { ...this.defaultOptions, ...options };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates the default preset
|
||||||
|
* @param {Object} preset - New default preset
|
||||||
|
*/
|
||||||
|
setDefaultPreset(preset) {
|
||||||
|
this.defaultPreset = preset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a default factory instance for backward compatibility
|
||||||
|
const defaultFactory = new ProgressBarFactory();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Legacy function for creating a single progress bar
|
||||||
|
* @deprecated Use ProgressBarFactory.createSingleBar() instead
|
||||||
|
* @param {Object} opts - Progress bar options
|
||||||
|
* @returns {cliProgress.SingleBar} Single progress bar instance
|
||||||
|
*/
|
||||||
|
export function newSingle(opts = {}) {
|
||||||
|
return defaultFactory.createSingleBar(opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Legacy function for creating a multi-bar
|
||||||
|
* @deprecated Use ProgressBarFactory.createMultiBar() instead
|
||||||
|
* @param {Object} opts - Progress bar options
|
||||||
|
* @returns {cliProgress.MultiBar} Multi-bar instance
|
||||||
|
*/
|
||||||
|
export function newMultiBar(opts = {}) {
|
||||||
|
return defaultFactory.createMultiBar(opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export presets for easy access
|
||||||
|
export { PRESETS };
|
||||||
|
|
||||||
|
// Export the factory class as default
|
||||||
|
export default ProgressBarFactory;
|
||||||
221
src/progress/parse-prd-tracker.js
Normal file
221
src/progress/parse-prd-tracker.js
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
import chalk from 'chalk';
|
||||||
|
import { newMultiBar } from './cli-progress-factory.js';
|
||||||
|
import { BaseProgressTracker } from './base-progress-tracker.js';
|
||||||
|
import {
|
||||||
|
createProgressHeader,
|
||||||
|
createProgressRow,
|
||||||
|
createBorder
|
||||||
|
} from './tracker-ui.js';
|
||||||
|
import {
|
||||||
|
getCliPriorityIndicators,
|
||||||
|
getPriorityIndicator,
|
||||||
|
getStatusBarPriorityIndicators,
|
||||||
|
getPriorityColors
|
||||||
|
} from '../ui/indicators.js';
|
||||||
|
|
||||||
|
// Get centralized priority indicators
|
||||||
|
const PRIORITY_INDICATORS = getCliPriorityIndicators();
|
||||||
|
const PRIORITY_DOTS = getStatusBarPriorityIndicators();
|
||||||
|
const PRIORITY_COLORS = getPriorityColors();
|
||||||
|
|
||||||
|
// Constants
|
||||||
|
const CONSTANTS = {
|
||||||
|
DEBOUNCE_DELAY: 100,
|
||||||
|
MAX_TITLE_LENGTH: 57,
|
||||||
|
TRUNCATED_LENGTH: 54,
|
||||||
|
TASK_ID_PAD_START: 3,
|
||||||
|
TASK_ID_PAD_END: 4,
|
||||||
|
PRIORITY_PAD_END: 3,
|
||||||
|
VALID_PRIORITIES: ['high', 'medium', 'low'],
|
||||||
|
DEFAULT_PRIORITY: 'medium'
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class to manage update debouncing
|
||||||
|
*/
|
||||||
|
class UpdateDebouncer {
|
||||||
|
constructor(delay = CONSTANTS.DEBOUNCE_DELAY) {
|
||||||
|
this.delay = delay;
|
||||||
|
this.pendingTimeout = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
debounce(callback) {
|
||||||
|
this.clear();
|
||||||
|
this.pendingTimeout = setTimeout(() => {
|
||||||
|
callback();
|
||||||
|
this.pendingTimeout = null;
|
||||||
|
}, this.delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
clear() {
|
||||||
|
if (this.pendingTimeout) {
|
||||||
|
clearTimeout(this.pendingTimeout);
|
||||||
|
this.pendingTimeout = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hasPending() {
|
||||||
|
return this.pendingTimeout !== null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class to manage priority counts
|
||||||
|
*/
|
||||||
|
class PriorityManager {
|
||||||
|
constructor() {
|
||||||
|
this.priorities = { high: 0, medium: 0, low: 0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
increment(priority) {
|
||||||
|
const normalized = this.normalize(priority);
|
||||||
|
this.priorities[normalized]++;
|
||||||
|
return normalized;
|
||||||
|
}
|
||||||
|
|
||||||
|
normalize(priority) {
|
||||||
|
const lowercased = priority
|
||||||
|
? priority.toLowerCase()
|
||||||
|
: CONSTANTS.DEFAULT_PRIORITY;
|
||||||
|
return CONSTANTS.VALID_PRIORITIES.includes(lowercased)
|
||||||
|
? lowercased
|
||||||
|
: CONSTANTS.DEFAULT_PRIORITY;
|
||||||
|
}
|
||||||
|
|
||||||
|
getCounts() {
|
||||||
|
return { ...this.priorities };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class for formatting task display elements
|
||||||
|
*/
|
||||||
|
class TaskFormatter {
|
||||||
|
static formatTitle(title, taskNumber) {
|
||||||
|
if (!title) return `Task ${taskNumber}`;
|
||||||
|
return title.length > CONSTANTS.MAX_TITLE_LENGTH
|
||||||
|
? title.substring(0, CONSTANTS.TRUNCATED_LENGTH) + '...'
|
||||||
|
: title;
|
||||||
|
}
|
||||||
|
|
||||||
|
static formatPriority(priority) {
|
||||||
|
return getPriorityIndicator(priority, false).padEnd(
|
||||||
|
CONSTANTS.PRIORITY_PAD_END,
|
||||||
|
' '
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
static formatTaskId(taskNumber) {
|
||||||
|
return taskNumber
|
||||||
|
.toString()
|
||||||
|
.padStart(CONSTANTS.TASK_ID_PAD_START, ' ')
|
||||||
|
.padEnd(CONSTANTS.TASK_ID_PAD_END, ' ');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tracks progress for PRD parsing operations with multibar display
|
||||||
|
*/
|
||||||
|
class ParsePrdTracker extends BaseProgressTracker {
|
||||||
|
_initializeCustomProperties(options) {
|
||||||
|
this.append = options.append;
|
||||||
|
this.priorityManager = new PriorityManager();
|
||||||
|
this.debouncer = new UpdateDebouncer();
|
||||||
|
this.headerShown = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_getTimeTokensBarFormat() {
|
||||||
|
return `{clock} {elapsed} | ${PRIORITY_DOTS.high} {high} ${PRIORITY_DOTS.medium} {medium} ${PRIORITY_DOTS.low} {low} | Tokens (I/O): {in}/{out} | Est: {remaining}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
_getProgressBarFormat() {
|
||||||
|
return 'Tasks {tasks} |{bar}| {percentage}%';
|
||||||
|
}
|
||||||
|
|
||||||
|
_getCustomTimeTokensPayload() {
|
||||||
|
return this.priorityManager.getCounts();
|
||||||
|
}
|
||||||
|
|
||||||
|
addTaskLine(taskNumber, title, priority = 'medium') {
|
||||||
|
if (!this.multibar || this.isFinished) return;
|
||||||
|
|
||||||
|
this._ensureHeaderShown();
|
||||||
|
const normalizedPriority = this._updateTaskCounters(taskNumber, priority);
|
||||||
|
|
||||||
|
// Immediately update the time/tokens bar to show the new priority count
|
||||||
|
this._updateTimeTokensBar();
|
||||||
|
|
||||||
|
this.debouncer.debounce(() => {
|
||||||
|
this._updateProgressDisplay(taskNumber, title, normalizedPriority);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
_ensureHeaderShown() {
|
||||||
|
if (!this.headerShown) {
|
||||||
|
this.headerShown = true;
|
||||||
|
createProgressHeader(
|
||||||
|
this.multibar,
|
||||||
|
' TASK | PRI | TITLE',
|
||||||
|
'------+-----+----------------------------------------------------------------'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_updateTaskCounters(taskNumber, priority) {
|
||||||
|
const normalizedPriority = this.priorityManager.increment(priority);
|
||||||
|
this.completedUnits = taskNumber;
|
||||||
|
return normalizedPriority;
|
||||||
|
}
|
||||||
|
|
||||||
|
_updateProgressDisplay(taskNumber, title, normalizedPriority) {
|
||||||
|
this.progressBar.update(this.completedUnits, {
|
||||||
|
tasks: `${this.completedUnits}/${this.numUnits}`
|
||||||
|
});
|
||||||
|
|
||||||
|
const displayTitle = TaskFormatter.formatTitle(title, taskNumber);
|
||||||
|
const priorityDisplay = TaskFormatter.formatPriority(normalizedPriority);
|
||||||
|
const taskIdCentered = TaskFormatter.formatTaskId(taskNumber);
|
||||||
|
|
||||||
|
createProgressRow(
|
||||||
|
this.multibar,
|
||||||
|
` ${taskIdCentered} | ${priorityDisplay} | {title}`,
|
||||||
|
{ title: displayTitle }
|
||||||
|
);
|
||||||
|
|
||||||
|
createBorder(
|
||||||
|
this.multibar,
|
||||||
|
'------+-----+----------------------------------------------------------------'
|
||||||
|
);
|
||||||
|
|
||||||
|
this._updateTimeTokensBar();
|
||||||
|
}
|
||||||
|
|
||||||
|
finish() {
|
||||||
|
// Flush any pending updates before finishing
|
||||||
|
if (this.debouncer.hasPending()) {
|
||||||
|
this.debouncer.clear();
|
||||||
|
this._updateTimeTokensBar();
|
||||||
|
}
|
||||||
|
this.cleanup();
|
||||||
|
super.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Override cleanup to handle pending updates
|
||||||
|
*/
|
||||||
|
_performCustomCleanup() {
|
||||||
|
this.debouncer.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
getSummary() {
|
||||||
|
return {
|
||||||
|
...super.getSummary(),
|
||||||
|
taskPriorities: this.priorityManager.getCounts(),
|
||||||
|
actionVerb: this.append ? 'appended' : 'generated'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createParsePrdTracker(options = {}) {
|
||||||
|
return new ParsePrdTracker(options);
|
||||||
|
}
|
||||||
152
src/progress/progress-tracker-builder.js
Normal file
152
src/progress/progress-tracker-builder.js
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
/**
|
||||||
|
* Configuration for progress tracker features
|
||||||
|
*/
|
||||||
|
class TrackerConfig {
|
||||||
|
constructor() {
|
||||||
|
this.features = new Set();
|
||||||
|
this.spinnerFrames = null;
|
||||||
|
this.unitName = 'unit';
|
||||||
|
this.totalUnits = 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
addFeature(feature) {
|
||||||
|
this.features.add(feature);
|
||||||
|
}
|
||||||
|
|
||||||
|
hasFeature(feature) {
|
||||||
|
return this.features.has(feature);
|
||||||
|
}
|
||||||
|
|
||||||
|
getOptions() {
|
||||||
|
return {
|
||||||
|
numUnits: this.totalUnits,
|
||||||
|
unitName: this.unitName,
|
||||||
|
spinnerFrames: this.spinnerFrames,
|
||||||
|
features: Array.from(this.features)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builder for creating configured progress trackers
|
||||||
|
*/
|
||||||
|
export class ProgressTrackerBuilder {
|
||||||
|
constructor() {
|
||||||
|
this.config = new TrackerConfig();
|
||||||
|
}
|
||||||
|
|
||||||
|
withPercent() {
|
||||||
|
this.config.addFeature('percent');
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
withTokens() {
|
||||||
|
this.config.addFeature('tokens');
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
withTasks() {
|
||||||
|
this.config.addFeature('tasks');
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
withSpinner(messages) {
|
||||||
|
if (!messages || !Array.isArray(messages)) {
|
||||||
|
throw new Error('Spinner messages must be an array');
|
||||||
|
}
|
||||||
|
this.config.spinnerFrames = messages;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
withUnits(total, unitName = 'unit') {
|
||||||
|
this.config.totalUnits = total;
|
||||||
|
this.config.unitName = unitName;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
build() {
|
||||||
|
return new ProgressTracker(this.config);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base progress tracker with configurable features
|
||||||
|
*/
|
||||||
|
class ProgressTracker {
|
||||||
|
constructor(config) {
|
||||||
|
this.config = config;
|
||||||
|
this.isActive = false;
|
||||||
|
this.current = 0;
|
||||||
|
this.spinnerIndex = 0;
|
||||||
|
this.startTime = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
start() {
|
||||||
|
this.isActive = true;
|
||||||
|
this.startTime = Date.now();
|
||||||
|
this.current = 0;
|
||||||
|
|
||||||
|
if (this.config.spinnerFrames) {
|
||||||
|
this._startSpinner();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
update(data = {}) {
|
||||||
|
if (!this.isActive) return;
|
||||||
|
|
||||||
|
if (data.current !== undefined) {
|
||||||
|
this.current = data.current;
|
||||||
|
}
|
||||||
|
|
||||||
|
const progress = this._buildProgressData(data);
|
||||||
|
return progress;
|
||||||
|
}
|
||||||
|
|
||||||
|
finish() {
|
||||||
|
this.isActive = false;
|
||||||
|
|
||||||
|
if (this.spinnerInterval) {
|
||||||
|
clearInterval(this.spinnerInterval);
|
||||||
|
this.spinnerInterval = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._buildSummary();
|
||||||
|
}
|
||||||
|
|
||||||
|
_startSpinner() {
|
||||||
|
this.spinnerInterval = setInterval(() => {
|
||||||
|
this.spinnerIndex =
|
||||||
|
(this.spinnerIndex + 1) % this.config.spinnerFrames.length;
|
||||||
|
}, 100);
|
||||||
|
}
|
||||||
|
|
||||||
|
_buildProgressData(data) {
|
||||||
|
const progress = { ...data };
|
||||||
|
|
||||||
|
if (this.config.hasFeature('percent')) {
|
||||||
|
progress.percentage = Math.round(
|
||||||
|
(this.current / this.config.totalUnits) * 100
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.config.hasFeature('tasks')) {
|
||||||
|
progress.tasks = `${this.current}/${this.config.totalUnits}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.config.spinnerFrames) {
|
||||||
|
progress.spinner = this.config.spinnerFrames[this.spinnerIndex];
|
||||||
|
}
|
||||||
|
|
||||||
|
return progress;
|
||||||
|
}
|
||||||
|
|
||||||
|
_buildSummary() {
|
||||||
|
const elapsed = Date.now() - this.startTime;
|
||||||
|
return {
|
||||||
|
total: this.config.totalUnits,
|
||||||
|
completed: this.current,
|
||||||
|
elapsedMs: elapsed,
|
||||||
|
features: Array.from(this.config.features)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
159
src/progress/tracker-ui.js
Normal file
159
src/progress/tracker-ui.js
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
import chalk from 'chalk';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory for creating progress bar elements
|
||||||
|
*/
|
||||||
|
class ProgressBarFactory {
|
||||||
|
constructor(multibar) {
|
||||||
|
if (!multibar) {
|
||||||
|
throw new Error('Multibar instance is required');
|
||||||
|
}
|
||||||
|
this.multibar = multibar;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a progress bar with the given format
|
||||||
|
*/
|
||||||
|
createBar(format, payload = {}) {
|
||||||
|
if (typeof format !== 'string') {
|
||||||
|
throw new Error('Format must be a string');
|
||||||
|
}
|
||||||
|
|
||||||
|
const bar = this.multibar.create(
|
||||||
|
1, // total
|
||||||
|
1, // current
|
||||||
|
{},
|
||||||
|
{
|
||||||
|
format,
|
||||||
|
barsize: 1,
|
||||||
|
hideCursor: true,
|
||||||
|
clearOnComplete: false
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
bar.update(1, payload);
|
||||||
|
return bar;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a header with borders
|
||||||
|
*/
|
||||||
|
createHeader(headerFormat, borderFormat) {
|
||||||
|
this.createBar(borderFormat); // Top border
|
||||||
|
this.createBar(headerFormat); // Header
|
||||||
|
this.createBar(borderFormat); // Bottom border
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a data row
|
||||||
|
*/
|
||||||
|
createRow(rowFormat, payload) {
|
||||||
|
if (!payload || typeof payload !== 'object') {
|
||||||
|
throw new Error('Payload must be an object');
|
||||||
|
}
|
||||||
|
return this.createBar(rowFormat, payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a border element
|
||||||
|
*/
|
||||||
|
createBorder(borderFormat) {
|
||||||
|
return this.createBar(borderFormat);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a bordered header for progress tables.
|
||||||
|
* @param {Object} multibar - The multibar instance.
|
||||||
|
* @param {string} headerFormat - Format string for the header row.
|
||||||
|
* @param {string} borderFormat - Format string for the top and bottom borders.
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
export function createProgressHeader(multibar, headerFormat, borderFormat) {
|
||||||
|
const factory = new ProgressBarFactory(multibar);
|
||||||
|
factory.createHeader(headerFormat, borderFormat);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a formatted data row for progress tables.
|
||||||
|
* @param {Object} multibar - The multibar instance.
|
||||||
|
* @param {string} rowFormat - Format string for the row.
|
||||||
|
* @param {Object} payload - Data payload for the row format.
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
export function createProgressRow(multibar, rowFormat, payload) {
|
||||||
|
const factory = new ProgressBarFactory(multibar);
|
||||||
|
factory.createRow(rowFormat, payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a border row for progress tables.
|
||||||
|
* @param {Object} multibar - The multibar instance.
|
||||||
|
* @param {string} borderFormat - Format string for the border.
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
export function createBorder(multibar, borderFormat) {
|
||||||
|
const factory = new ProgressBarFactory(multibar);
|
||||||
|
factory.createBorder(borderFormat);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builder for creating progress tables with consistent formatting
|
||||||
|
*/
|
||||||
|
export class ProgressTableBuilder {
|
||||||
|
constructor(multibar) {
|
||||||
|
this.factory = new ProgressBarFactory(multibar);
|
||||||
|
this.borderStyle = '─';
|
||||||
|
this.columnSeparator = '|';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shows a formatted table header
|
||||||
|
*/
|
||||||
|
showHeader(columns = null) {
|
||||||
|
// Default columns for task display
|
||||||
|
const defaultColumns = [
|
||||||
|
{ text: 'TASK', width: 6 },
|
||||||
|
{ text: 'PRI', width: 5 },
|
||||||
|
{ text: 'TITLE', width: 64 }
|
||||||
|
];
|
||||||
|
|
||||||
|
const cols = columns || defaultColumns;
|
||||||
|
const headerText = ' ' + cols.map((c) => c.text).join(' | ') + ' ';
|
||||||
|
const borderLine = this.createBorderLine(cols.map((c) => c.width));
|
||||||
|
|
||||||
|
this.factory.createHeader(headerText, borderLine);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a border line based on column widths
|
||||||
|
*/
|
||||||
|
createBorderLine(columnWidths) {
|
||||||
|
return columnWidths
|
||||||
|
.map((width) => this.borderStyle.repeat(width))
|
||||||
|
.join('─┼─');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a task row to the table
|
||||||
|
*/
|
||||||
|
addTaskRow(taskId, priority, title) {
|
||||||
|
const format = ` ${taskId} | ${priority} | {title}`;
|
||||||
|
this.factory.createRow(format, { title });
|
||||||
|
|
||||||
|
// Add separator after each row
|
||||||
|
const borderLine = '------+-----+' + '─'.repeat(64);
|
||||||
|
this.factory.createBorder(borderLine);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a summary row
|
||||||
|
*/
|
||||||
|
addSummaryRow(label, value) {
|
||||||
|
const format = ` ${label}: {value}`;
|
||||||
|
this.factory.createRow(format, { value });
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
273
src/ui/indicators.js
Normal file
273
src/ui/indicators.js
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
/**
|
||||||
|
* indicators.js
|
||||||
|
* UI functions for displaying priority and complexity indicators in different contexts
|
||||||
|
*/
|
||||||
|
|
||||||
|
import chalk from 'chalk';
|
||||||
|
import { TASK_PRIORITY_OPTIONS } from '../constants/task-priority.js';
|
||||||
|
|
||||||
|
// Extract priority values for cleaner object keys
|
||||||
|
const [HIGH, MEDIUM, LOW] = TASK_PRIORITY_OPTIONS;
|
||||||
|
|
||||||
|
// Cache for generated indicators
|
||||||
|
const INDICATOR_CACHE = new Map();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base configuration for indicator systems
|
||||||
|
*/
|
||||||
|
class IndicatorConfig {
|
||||||
|
constructor(name, levels, colors, thresholds = null) {
|
||||||
|
this.name = name;
|
||||||
|
this.levels = levels;
|
||||||
|
this.colors = colors;
|
||||||
|
this.thresholds = thresholds;
|
||||||
|
}
|
||||||
|
|
||||||
|
getColor(level) {
|
||||||
|
return this.colors[level] || chalk.gray;
|
||||||
|
}
|
||||||
|
|
||||||
|
getLevelFromScore(score) {
|
||||||
|
if (!this.thresholds) {
|
||||||
|
throw new Error(`${this.name} does not support score-based levels`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (score >= 7) return this.levels[0]; // high
|
||||||
|
if (score <= 3) return this.levels[2]; // low
|
||||||
|
return this.levels[1]; // medium
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Visual style definitions
|
||||||
|
*/
|
||||||
|
const VISUAL_STYLES = {
|
||||||
|
cli: {
|
||||||
|
filled: '●', // ●
|
||||||
|
empty: '○' // ○
|
||||||
|
},
|
||||||
|
statusBar: {
|
||||||
|
high: '⋮', // ⋮
|
||||||
|
medium: ':', // :
|
||||||
|
low: '.' // .
|
||||||
|
},
|
||||||
|
mcp: {
|
||||||
|
high: '🔴', // 🔴
|
||||||
|
medium: '🟠', // 🟠
|
||||||
|
low: '🟢' // 🟢
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Priority configuration
|
||||||
|
*/
|
||||||
|
const PRIORITY_CONFIG = new IndicatorConfig('priority', [HIGH, MEDIUM, LOW], {
|
||||||
|
[HIGH]: chalk.hex('#CC0000'),
|
||||||
|
[MEDIUM]: chalk.hex('#FF8800'),
|
||||||
|
[LOW]: chalk.yellow
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates CLI indicator with intensity
|
||||||
|
*/
|
||||||
|
function generateCliIndicator(intensity, color) {
|
||||||
|
const filled = VISUAL_STYLES.cli.filled;
|
||||||
|
const empty = VISUAL_STYLES.cli.empty;
|
||||||
|
|
||||||
|
let indicator = '';
|
||||||
|
for (let i = 0; i < 3; i++) {
|
||||||
|
if (i < intensity) {
|
||||||
|
indicator += color(filled);
|
||||||
|
} else {
|
||||||
|
indicator += chalk.white(empty);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return indicator;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get intensity level from priority/complexity level
|
||||||
|
*/
|
||||||
|
function getIntensityFromLevel(level, levels) {
|
||||||
|
const index = levels.indexOf(level);
|
||||||
|
return 3 - index; // high=3, medium=2, low=1
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generic cached indicator getter
|
||||||
|
* @param {string} cacheKey - Cache key for the indicators
|
||||||
|
* @param {Function} generator - Function to generate the indicators
|
||||||
|
* @returns {Object} Cached or newly generated indicators
|
||||||
|
*/
|
||||||
|
function getCachedIndicators(cacheKey, generator) {
|
||||||
|
if (INDICATOR_CACHE.has(cacheKey)) {
|
||||||
|
return INDICATOR_CACHE.get(cacheKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
const indicators = generator();
|
||||||
|
INDICATOR_CACHE.set(cacheKey, indicators);
|
||||||
|
return indicators;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get priority indicators for MCP context (single emojis)
|
||||||
|
* @returns {Object} Priority to emoji mapping
|
||||||
|
*/
|
||||||
|
export function getMcpPriorityIndicators() {
|
||||||
|
return getCachedIndicators('mcp-priority-all', () => ({
|
||||||
|
[HIGH]: VISUAL_STYLES.mcp.high,
|
||||||
|
[MEDIUM]: VISUAL_STYLES.mcp.medium,
|
||||||
|
[LOW]: VISUAL_STYLES.mcp.low
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get priority indicators for CLI context (colored dots with visual hierarchy)
|
||||||
|
* @returns {Object} Priority to colored dot string mapping
|
||||||
|
*/
|
||||||
|
export function getCliPriorityIndicators() {
|
||||||
|
return getCachedIndicators('cli-priority-all', () => {
|
||||||
|
const indicators = {};
|
||||||
|
PRIORITY_CONFIG.levels.forEach((level) => {
|
||||||
|
const intensity = getIntensityFromLevel(level, PRIORITY_CONFIG.levels);
|
||||||
|
const color = PRIORITY_CONFIG.getColor(level);
|
||||||
|
indicators[level] = generateCliIndicator(intensity, color);
|
||||||
|
});
|
||||||
|
return indicators;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get priority indicators for status bars (simplified single character versions)
|
||||||
|
* @returns {Object} Priority to single character indicator mapping
|
||||||
|
*/
|
||||||
|
export function getStatusBarPriorityIndicators() {
|
||||||
|
return getCachedIndicators('statusbar-priority-all', () => {
|
||||||
|
const indicators = {};
|
||||||
|
PRIORITY_CONFIG.levels.forEach((level, index) => {
|
||||||
|
const style =
|
||||||
|
index === 0
|
||||||
|
? VISUAL_STYLES.statusBar.high
|
||||||
|
: index === 1
|
||||||
|
? VISUAL_STYLES.statusBar.medium
|
||||||
|
: VISUAL_STYLES.statusBar.low;
|
||||||
|
const color = PRIORITY_CONFIG.getColor(level);
|
||||||
|
indicators[level] = color(style);
|
||||||
|
});
|
||||||
|
return indicators;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get priority colors for consistent styling
|
||||||
|
* @returns {Object} Priority to chalk color function mapping
|
||||||
|
*/
|
||||||
|
export function getPriorityColors() {
|
||||||
|
return {
|
||||||
|
[HIGH]: PRIORITY_CONFIG.colors[HIGH],
|
||||||
|
[MEDIUM]: PRIORITY_CONFIG.colors[MEDIUM],
|
||||||
|
[LOW]: PRIORITY_CONFIG.colors[LOW]
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get priority indicators based on context
|
||||||
|
* @param {boolean} isMcp - Whether this is for MCP context (true) or CLI context (false)
|
||||||
|
* @returns {Object} Priority to indicator mapping
|
||||||
|
*/
|
||||||
|
export function getPriorityIndicators(isMcp = false) {
|
||||||
|
return isMcp ? getMcpPriorityIndicators() : getCliPriorityIndicators();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a specific priority indicator
|
||||||
|
* @param {string} priority - The priority level ('high', 'medium', 'low')
|
||||||
|
* @param {boolean} isMcp - Whether this is for MCP context
|
||||||
|
* @returns {string} The indicator string for the priority
|
||||||
|
*/
|
||||||
|
export function getPriorityIndicator(priority, isMcp = false) {
|
||||||
|
const indicators = getPriorityIndicators(isMcp);
|
||||||
|
return indicators[priority] || indicators[MEDIUM];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Complexity Indicators
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complexity configuration
|
||||||
|
*/
|
||||||
|
const COMPLEXITY_CONFIG = new IndicatorConfig(
|
||||||
|
'complexity',
|
||||||
|
['high', 'medium', 'low'],
|
||||||
|
{
|
||||||
|
high: chalk.hex('#CC0000'),
|
||||||
|
medium: chalk.hex('#FF8800'),
|
||||||
|
low: chalk.green
|
||||||
|
},
|
||||||
|
{
|
||||||
|
high: (score) => score >= 7,
|
||||||
|
medium: (score) => score >= 4 && score <= 6,
|
||||||
|
low: (score) => score <= 3
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get complexity indicators for CLI context (colored dots with visual hierarchy)
|
||||||
|
* Complexity scores: 1-3 (low), 4-6 (medium), 7-10 (high)
|
||||||
|
* @returns {Object} Complexity level to colored dot string mapping
|
||||||
|
*/
|
||||||
|
export function getCliComplexityIndicators() {
|
||||||
|
return getCachedIndicators('cli-complexity-all', () => {
|
||||||
|
const indicators = {};
|
||||||
|
COMPLEXITY_CONFIG.levels.forEach((level) => {
|
||||||
|
const intensity = getIntensityFromLevel(level, COMPLEXITY_CONFIG.levels);
|
||||||
|
const color = COMPLEXITY_CONFIG.getColor(level);
|
||||||
|
indicators[level] = generateCliIndicator(intensity, color);
|
||||||
|
});
|
||||||
|
return indicators;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get complexity indicators for status bars (simplified single character versions)
|
||||||
|
* @returns {Object} Complexity level to single character indicator mapping
|
||||||
|
*/
|
||||||
|
export function getStatusBarComplexityIndicators() {
|
||||||
|
return getCachedIndicators('statusbar-complexity-all', () => {
|
||||||
|
const indicators = {};
|
||||||
|
COMPLEXITY_CONFIG.levels.forEach((level, index) => {
|
||||||
|
const style =
|
||||||
|
index === 0
|
||||||
|
? VISUAL_STYLES.statusBar.high
|
||||||
|
: index === 1
|
||||||
|
? VISUAL_STYLES.statusBar.medium
|
||||||
|
: VISUAL_STYLES.statusBar.low;
|
||||||
|
const color = COMPLEXITY_CONFIG.getColor(level);
|
||||||
|
indicators[level] = color(style);
|
||||||
|
});
|
||||||
|
return indicators;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get complexity colors for consistent styling
|
||||||
|
* @returns {Object} Complexity level to chalk color function mapping
|
||||||
|
*/
|
||||||
|
export function getComplexityColors() {
|
||||||
|
return { ...COMPLEXITY_CONFIG.colors };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a specific complexity indicator based on score
|
||||||
|
* @param {number} score - The complexity score (1-10)
|
||||||
|
* @param {boolean} statusBar - Whether to return status bar version (single char)
|
||||||
|
* @returns {string} The indicator string for the complexity level
|
||||||
|
*/
|
||||||
|
export function getComplexityIndicator(score, statusBar = false) {
|
||||||
|
const level = COMPLEXITY_CONFIG.getLevelFromScore(score);
|
||||||
|
const indicators = statusBar
|
||||||
|
? getStatusBarComplexityIndicators()
|
||||||
|
: getCliComplexityIndicators();
|
||||||
|
return indicators[level];
|
||||||
|
}
|
||||||
477
src/ui/parse-prd.js
Normal file
477
src/ui/parse-prd.js
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
/**
|
||||||
|
* parse-prd.js
|
||||||
|
* UI functions specifically for PRD parsing operations
|
||||||
|
*/
|
||||||
|
|
||||||
|
import chalk from 'chalk';
|
||||||
|
import boxen from 'boxen';
|
||||||
|
import Table from 'cli-table3';
|
||||||
|
import { formatElapsedTime } from '../utils/format.js';
|
||||||
|
|
||||||
|
// Constants
|
||||||
|
const CONSTANTS = {
|
||||||
|
BAR_WIDTH: 40,
|
||||||
|
TABLE_COL_WIDTHS: [28, 50],
|
||||||
|
DEFAULT_MODEL: 'Default',
|
||||||
|
DEFAULT_TEMPERATURE: 0.7
|
||||||
|
};
|
||||||
|
|
||||||
|
const PRIORITIES = {
|
||||||
|
HIGH: 'high',
|
||||||
|
MEDIUM: 'medium',
|
||||||
|
LOW: 'low'
|
||||||
|
};
|
||||||
|
|
||||||
|
const PRIORITY_COLORS = {
|
||||||
|
[PRIORITIES.HIGH]: '#CC0000',
|
||||||
|
[PRIORITIES.MEDIUM]: '#FF8800',
|
||||||
|
[PRIORITIES.LOW]: '#FFCC00'
|
||||||
|
};
|
||||||
|
|
||||||
|
// Reusable box styles
|
||||||
|
const BOX_STYLES = {
|
||||||
|
main: {
|
||||||
|
padding: { top: 1, bottom: 1, left: 2, right: 2 },
|
||||||
|
margin: { top: 0, bottom: 0 },
|
||||||
|
borderColor: 'blue',
|
||||||
|
borderStyle: 'round'
|
||||||
|
},
|
||||||
|
summary: {
|
||||||
|
padding: { top: 1, right: 1, bottom: 1, left: 1 },
|
||||||
|
borderColor: 'blue',
|
||||||
|
borderStyle: 'round',
|
||||||
|
margin: { top: 1, right: 1, bottom: 1, left: 0 }
|
||||||
|
},
|
||||||
|
warning: {
|
||||||
|
padding: 1,
|
||||||
|
borderColor: 'yellow',
|
||||||
|
borderStyle: 'round',
|
||||||
|
margin: { top: 1, bottom: 1 }
|
||||||
|
},
|
||||||
|
nextSteps: {
|
||||||
|
padding: 1,
|
||||||
|
borderColor: 'cyan',
|
||||||
|
borderStyle: 'round',
|
||||||
|
margin: { top: 1, right: 0, bottom: 1, left: 0 }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function for building main message content
|
||||||
|
* @param {Object} params - Message parameters
|
||||||
|
* @param {string} params.prdFilePath - Path to the PRD file
|
||||||
|
* @param {string} params.outputPath - Path where tasks will be saved
|
||||||
|
* @param {number} params.numTasks - Number of tasks to generate
|
||||||
|
* @param {string} params.model - AI model name
|
||||||
|
* @param {number} params.temperature - AI temperature setting
|
||||||
|
* @param {boolean} params.append - Whether appending to existing tasks
|
||||||
|
* @param {boolean} params.research - Whether research mode is enabled
|
||||||
|
* @returns {string} The formatted message content
|
||||||
|
*/
|
||||||
|
function buildMainMessage({
|
||||||
|
prdFilePath,
|
||||||
|
outputPath,
|
||||||
|
numTasks,
|
||||||
|
model,
|
||||||
|
temperature,
|
||||||
|
append,
|
||||||
|
research
|
||||||
|
}) {
|
||||||
|
const actionVerb = append ? 'Appending' : 'Generating';
|
||||||
|
|
||||||
|
let modelLine = `Model: ${model} | Temperature: ${temperature}`;
|
||||||
|
if (research) {
|
||||||
|
modelLine += ` | ${chalk.cyan.bold('🔬 Research Mode')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
chalk.bold(`🤖 Parsing PRD and ${actionVerb} Tasks`) +
|
||||||
|
'\n' +
|
||||||
|
chalk.dim(modelLine) +
|
||||||
|
'\n\n' +
|
||||||
|
chalk.blue(`Input: ${prdFilePath}`) +
|
||||||
|
'\n' +
|
||||||
|
chalk.blue(`Output: ${outputPath}`) +
|
||||||
|
'\n' +
|
||||||
|
chalk.blue(`Tasks to ${append ? 'Append' : 'Generate'}: ${numTasks}`)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function for displaying the main message box
|
||||||
|
* @param {string} message - The message content to display in the box
|
||||||
|
*/
|
||||||
|
function displayMainMessageBox(message) {
|
||||||
|
console.log(boxen(message, BOX_STYLES.main));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function for displaying append mode notice
|
||||||
|
* @param {number} existingTasksCount - Number of existing tasks
|
||||||
|
* @param {number} nextId - Next ID to be used
|
||||||
|
*/
|
||||||
|
function displayAppendModeNotice(existingTasksCount, nextId) {
|
||||||
|
console.log(
|
||||||
|
chalk.yellow.bold('📝 Append mode') +
|
||||||
|
` - Adding to ${existingTasksCount} existing tasks (next ID: ${nextId})`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function for force mode messages
|
||||||
|
* @param {boolean} append - Whether in append mode
|
||||||
|
* @returns {string} The formatted force mode message
|
||||||
|
*/
|
||||||
|
function createForceMessage(append) {
|
||||||
|
const baseMessage = chalk.red.bold('⚠️ Force flag enabled');
|
||||||
|
return append
|
||||||
|
? `${baseMessage} - Will overwrite if conflicts occur`
|
||||||
|
: `${baseMessage} - Overwriting existing tasks`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display the start of PRD parsing with a boxen announcement
|
||||||
|
* @param {Object} options - Options for PRD parsing start
|
||||||
|
* @param {string} options.prdFilePath - Path to the PRD file being parsed
|
||||||
|
* @param {string} options.outputPath - Path where the tasks will be saved
|
||||||
|
* @param {number} options.numTasks - Number of tasks to generate
|
||||||
|
* @param {string} [options.model] - AI model name
|
||||||
|
* @param {number} [options.temperature] - AI temperature setting
|
||||||
|
* @param {boolean} [options.append=false] - Whether to append to existing tasks
|
||||||
|
* @param {boolean} [options.research=false] - Whether research mode is enabled
|
||||||
|
* @param {boolean} [options.force=false] - Whether force mode is enabled
|
||||||
|
* @param {Array} [options.existingTasks=[]] - Existing tasks array
|
||||||
|
* @param {number} [options.nextId=1] - Next ID to be used
|
||||||
|
*/
|
||||||
|
function displayParsePrdStart({
|
||||||
|
prdFilePath,
|
||||||
|
outputPath,
|
||||||
|
numTasks,
|
||||||
|
model = CONSTANTS.DEFAULT_MODEL,
|
||||||
|
temperature = CONSTANTS.DEFAULT_TEMPERATURE,
|
||||||
|
append = false,
|
||||||
|
research = false,
|
||||||
|
force = false,
|
||||||
|
existingTasks = [],
|
||||||
|
nextId = 1
|
||||||
|
}) {
|
||||||
|
// Input validation
|
||||||
|
if (
|
||||||
|
!prdFilePath ||
|
||||||
|
typeof prdFilePath !== 'string' ||
|
||||||
|
prdFilePath.trim() === ''
|
||||||
|
) {
|
||||||
|
throw new Error('prdFilePath is required and must be a non-empty string');
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
!outputPath ||
|
||||||
|
typeof outputPath !== 'string' ||
|
||||||
|
outputPath.trim() === ''
|
||||||
|
) {
|
||||||
|
throw new Error('outputPath is required and must be a non-empty string');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build and display the main message box
|
||||||
|
const message = buildMainMessage({
|
||||||
|
prdFilePath,
|
||||||
|
outputPath,
|
||||||
|
numTasks,
|
||||||
|
model,
|
||||||
|
temperature,
|
||||||
|
append,
|
||||||
|
research
|
||||||
|
});
|
||||||
|
displayMainMessageBox(message);
|
||||||
|
|
||||||
|
// Display append/force notices beneath the boxen if either flag is set
|
||||||
|
if (append || force) {
|
||||||
|
// Add append mode details if enabled
|
||||||
|
if (append) {
|
||||||
|
displayAppendModeNotice(existingTasks.length, nextId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add force mode details if enabled
|
||||||
|
if (force) {
|
||||||
|
console.log(createForceMessage(append));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a blank line after notices for spacing
|
||||||
|
console.log();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate priority statistics
|
||||||
|
* @param {Object} taskPriorities - Priority counts object
|
||||||
|
* @param {number} totalTasks - Total number of tasks
|
||||||
|
* @returns {Object} Priority statistics with counts and percentages
|
||||||
|
*/
|
||||||
|
function calculatePriorityStats(taskPriorities, totalTasks) {
|
||||||
|
const stats = {};
|
||||||
|
|
||||||
|
Object.values(PRIORITIES).forEach((priority) => {
|
||||||
|
const count = taskPriorities[priority] || 0;
|
||||||
|
stats[priority] = {
|
||||||
|
count,
|
||||||
|
percentage: totalTasks > 0 ? Math.round((count / totalTasks) * 100) : 0
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate bar character distribution for priorities
|
||||||
|
* @param {Object} priorityStats - Priority statistics
|
||||||
|
* @param {number} totalTasks - Total number of tasks
|
||||||
|
* @returns {Object} Character counts for each priority
|
||||||
|
*/
|
||||||
|
function calculateBarDistribution(priorityStats, totalTasks) {
|
||||||
|
const barWidth = CONSTANTS.BAR_WIDTH;
|
||||||
|
const distribution = {};
|
||||||
|
|
||||||
|
if (totalTasks === 0) {
|
||||||
|
Object.values(PRIORITIES).forEach((priority) => {
|
||||||
|
distribution[priority] = 0;
|
||||||
|
});
|
||||||
|
return distribution;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate raw proportions
|
||||||
|
const rawChars = {};
|
||||||
|
Object.values(PRIORITIES).forEach((priority) => {
|
||||||
|
rawChars[priority] =
|
||||||
|
(priorityStats[priority].count / totalTasks) * barWidth;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initial distribution - floor values
|
||||||
|
Object.values(PRIORITIES).forEach((priority) => {
|
||||||
|
distribution[priority] = Math.floor(rawChars[priority]);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Ensure non-zero priorities get at least 1 character
|
||||||
|
Object.values(PRIORITIES).forEach((priority) => {
|
||||||
|
if (priorityStats[priority].count > 0 && distribution[priority] === 0) {
|
||||||
|
distribution[priority] = 1;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Distribute remaining characters based on decimal parts
|
||||||
|
const currentTotal = Object.values(distribution).reduce(
|
||||||
|
(sum, val) => sum + val,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
const remainingChars = barWidth - currentTotal;
|
||||||
|
|
||||||
|
if (remainingChars > 0) {
|
||||||
|
const decimals = Object.values(PRIORITIES)
|
||||||
|
.map((priority) => ({
|
||||||
|
priority,
|
||||||
|
decimal: rawChars[priority] - Math.floor(rawChars[priority])
|
||||||
|
}))
|
||||||
|
.sort((a, b) => b.decimal - a.decimal);
|
||||||
|
|
||||||
|
for (let i = 0; i < remainingChars && i < decimals.length; i++) {
|
||||||
|
distribution[decimals[i].priority]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return distribution;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create priority distribution bar visual
|
||||||
|
* @param {Object} barDistribution - Character distribution for priorities
|
||||||
|
* @returns {string} Visual bar string
|
||||||
|
*/
|
||||||
|
function createPriorityBar(barDistribution) {
|
||||||
|
let bar = '';
|
||||||
|
|
||||||
|
bar += chalk.hex(PRIORITY_COLORS[PRIORITIES.HIGH])(
|
||||||
|
'█'.repeat(barDistribution[PRIORITIES.HIGH])
|
||||||
|
);
|
||||||
|
bar += chalk.hex(PRIORITY_COLORS[PRIORITIES.MEDIUM])(
|
||||||
|
'█'.repeat(barDistribution[PRIORITIES.MEDIUM])
|
||||||
|
);
|
||||||
|
bar += chalk.yellow('█'.repeat(barDistribution[PRIORITIES.LOW]));
|
||||||
|
|
||||||
|
const totalChars = Object.values(barDistribution).reduce(
|
||||||
|
(sum, val) => sum + val,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
if (totalChars < CONSTANTS.BAR_WIDTH) {
|
||||||
|
bar += chalk.gray('░'.repeat(CONSTANTS.BAR_WIDTH - totalChars));
|
||||||
|
}
|
||||||
|
|
||||||
|
return bar;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build priority distribution row for table
|
||||||
|
* @param {Object} priorityStats - Priority statistics
|
||||||
|
* @returns {Array} Table row for priority distribution
|
||||||
|
*/
|
||||||
|
function buildPriorityRow(priorityStats) {
|
||||||
|
const parts = [];
|
||||||
|
|
||||||
|
Object.entries(PRIORITIES).forEach(([key, priority]) => {
|
||||||
|
const stats = priorityStats[priority];
|
||||||
|
const color =
|
||||||
|
priority === PRIORITIES.HIGH
|
||||||
|
? chalk.hex(PRIORITY_COLORS[PRIORITIES.HIGH])
|
||||||
|
: priority === PRIORITIES.MEDIUM
|
||||||
|
? chalk.hex(PRIORITY_COLORS[PRIORITIES.MEDIUM])
|
||||||
|
: chalk.yellow;
|
||||||
|
|
||||||
|
const label = key.charAt(0) + key.slice(1).toLowerCase();
|
||||||
|
parts.push(
|
||||||
|
`${color.bold(stats.count)} ${color(label)} (${stats.percentage}%)`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
return [chalk.cyan('Priority distribution:'), parts.join(' · ')];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display a summary of the PRD parsing results
|
||||||
|
* @param {Object} summary - Summary of the parsing results
|
||||||
|
* @param {number} summary.totalTasks - Total number of tasks generated
|
||||||
|
* @param {string} summary.prdFilePath - Path to the PRD file
|
||||||
|
* @param {string} summary.outputPath - Path where the tasks were saved
|
||||||
|
* @param {number} summary.elapsedTime - Total elapsed time in seconds
|
||||||
|
* @param {Object} summary.taskPriorities - Breakdown of tasks by category/priority
|
||||||
|
* @param {boolean} summary.usedFallback - Whether fallback parsing was used
|
||||||
|
* @param {string} summary.actionVerb - Whether tasks were 'generated' or 'appended'
|
||||||
|
*/
|
||||||
|
function displayParsePrdSummary(summary) {
|
||||||
|
const {
|
||||||
|
totalTasks,
|
||||||
|
taskPriorities = {},
|
||||||
|
prdFilePath,
|
||||||
|
outputPath,
|
||||||
|
elapsedTime,
|
||||||
|
usedFallback = false,
|
||||||
|
actionVerb = 'generated'
|
||||||
|
} = summary;
|
||||||
|
|
||||||
|
// Format the elapsed time
|
||||||
|
const timeDisplay = formatElapsedTime(elapsedTime);
|
||||||
|
|
||||||
|
// Create a table for better alignment
|
||||||
|
const table = new Table({
|
||||||
|
chars: {
|
||||||
|
top: '',
|
||||||
|
'top-mid': '',
|
||||||
|
'top-left': '',
|
||||||
|
'top-right': '',
|
||||||
|
bottom: '',
|
||||||
|
'bottom-mid': '',
|
||||||
|
'bottom-left': '',
|
||||||
|
'bottom-right': '',
|
||||||
|
left: '',
|
||||||
|
'left-mid': '',
|
||||||
|
mid: '',
|
||||||
|
'mid-mid': '',
|
||||||
|
right: '',
|
||||||
|
'right-mid': '',
|
||||||
|
middle: ' '
|
||||||
|
},
|
||||||
|
style: { border: [], 'padding-left': 2 },
|
||||||
|
colWidths: CONSTANTS.TABLE_COL_WIDTHS
|
||||||
|
});
|
||||||
|
|
||||||
|
// Basic info
|
||||||
|
// Use the action verb to properly display if tasks were generated or appended
|
||||||
|
table.push(
|
||||||
|
[chalk.cyan(`Total tasks ${actionVerb}:`), chalk.bold(totalTasks)],
|
||||||
|
[chalk.cyan('Processing time:'), chalk.bold(timeDisplay)]
|
||||||
|
);
|
||||||
|
|
||||||
|
// Priority distribution if available
|
||||||
|
if (taskPriorities && Object.keys(taskPriorities).length > 0) {
|
||||||
|
const priorityStats = calculatePriorityStats(taskPriorities, totalTasks);
|
||||||
|
const priorityRow = buildPriorityRow(priorityStats);
|
||||||
|
table.push(priorityRow);
|
||||||
|
|
||||||
|
// Visual bar representation
|
||||||
|
const barDistribution = calculateBarDistribution(priorityStats, totalTasks);
|
||||||
|
const distributionBar = createPriorityBar(barDistribution);
|
||||||
|
table.push([chalk.cyan('Distribution:'), distributionBar]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add file paths
|
||||||
|
table.push(
|
||||||
|
[chalk.cyan('PRD source:'), chalk.italic(prdFilePath)],
|
||||||
|
[chalk.cyan('Tasks file:'), chalk.italic(outputPath)]
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add fallback parsing indicator if applicable
|
||||||
|
if (usedFallback) {
|
||||||
|
table.push([
|
||||||
|
chalk.yellow('Fallback parsing:'),
|
||||||
|
chalk.yellow('✓ Used fallback parsing')
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final string output with title and footer
|
||||||
|
const output = [
|
||||||
|
chalk.bold.underline(
|
||||||
|
`PRD Parsing Complete - Tasks ${actionVerb.charAt(0).toUpperCase() + actionVerb.slice(1)}`
|
||||||
|
),
|
||||||
|
'',
|
||||||
|
table.toString()
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
// Display the summary box
|
||||||
|
console.log(boxen(output, BOX_STYLES.summary));
|
||||||
|
|
||||||
|
// Show fallback parsing warning if needed
|
||||||
|
if (usedFallback) {
|
||||||
|
displayFallbackWarning();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show next steps
|
||||||
|
displayNextSteps();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display fallback parsing warning
|
||||||
|
*/
|
||||||
|
function displayFallbackWarning() {
|
||||||
|
const warningContent =
|
||||||
|
chalk.yellow.bold('⚠️ Fallback Parsing Used') +
|
||||||
|
'\n\n' +
|
||||||
|
chalk.white(
|
||||||
|
'The system used fallback parsing to complete task generation.'
|
||||||
|
) +
|
||||||
|
'\n' +
|
||||||
|
chalk.white(
|
||||||
|
'This typically happens when streaming JSON parsing is incomplete.'
|
||||||
|
) +
|
||||||
|
'\n' +
|
||||||
|
chalk.white('Your tasks were successfully generated, but consider:') +
|
||||||
|
'\n' +
|
||||||
|
chalk.white('• Reviewing task completeness') +
|
||||||
|
'\n' +
|
||||||
|
chalk.white('• Checking for any missing details') +
|
||||||
|
'\n\n' +
|
||||||
|
chalk.white("This is normal and usually doesn't indicate any issues.");
|
||||||
|
|
||||||
|
console.log(boxen(warningContent, BOX_STYLES.warning));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display next steps after parsing
|
||||||
|
*/
|
||||||
|
function displayNextSteps() {
|
||||||
|
const stepsContent =
|
||||||
|
chalk.white.bold('Next Steps:') +
|
||||||
|
'\n\n' +
|
||||||
|
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\n` +
|
||||||
|
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks\n` +
|
||||||
|
`${chalk.cyan('3.')} Run ${chalk.yellow('task-master analyze-complexity')} to analyze task complexity`;
|
||||||
|
|
||||||
|
console.log(boxen(stepsContent, BOX_STYLES.nextSteps));
|
||||||
|
}
|
||||||
|
|
||||||
|
export { displayParsePrdStart, displayParsePrdSummary, formatElapsedTime };
|
||||||
12
src/utils/format.js
Normal file
12
src/utils/format.js
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
// src/utils/format.js
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Formats elapsed time as 0m 00s.
|
||||||
|
* @param {number} seconds - Elapsed time in seconds
|
||||||
|
* @returns {string} Formatted time string
|
||||||
|
*/
|
||||||
|
export function formatElapsedTime(seconds) {
|
||||||
|
const minutes = Math.floor(seconds / 60);
|
||||||
|
const remainingSeconds = Math.floor(seconds % 60);
|
||||||
|
return `${minutes}m ${remainingSeconds.toString().padStart(2, '0')}s`;
|
||||||
|
}
|
||||||
490
src/utils/stream-parser.js
Normal file
490
src/utils/stream-parser.js
Normal file
@@ -0,0 +1,490 @@
|
|||||||
|
import { JSONParser } from '@streamparser/json';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Custom error class for streaming-related failures
|
||||||
|
* Provides error codes for robust error handling without string matching
|
||||||
|
*/
|
||||||
|
export class StreamingError extends Error {
|
||||||
|
constructor(message, code) {
|
||||||
|
super(message);
|
||||||
|
this.name = 'StreamingError';
|
||||||
|
this.code = code;
|
||||||
|
|
||||||
|
// Maintain proper stack trace (V8 engines)
|
||||||
|
if (Error.captureStackTrace) {
|
||||||
|
Error.captureStackTrace(this, StreamingError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Standard streaming error codes
|
||||||
|
*/
|
||||||
|
export const STREAMING_ERROR_CODES = {
|
||||||
|
NOT_ASYNC_ITERABLE: 'STREAMING_NOT_SUPPORTED',
|
||||||
|
STREAM_PROCESSING_FAILED: 'STREAM_PROCESSING_FAILED',
|
||||||
|
STREAM_NOT_ITERABLE: 'STREAM_NOT_ITERABLE',
|
||||||
|
BUFFER_SIZE_EXCEEDED: 'BUFFER_SIZE_EXCEEDED'
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default maximum buffer size (1MB)
|
||||||
|
*/
|
||||||
|
export const DEFAULT_MAX_BUFFER_SIZE = 1024 * 1024; // 1MB in bytes
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration options for the streaming JSON parser
|
||||||
|
*/
|
||||||
|
class StreamParserConfig {
|
||||||
|
constructor(config = {}) {
|
||||||
|
this.jsonPaths = config.jsonPaths;
|
||||||
|
this.onProgress = config.onProgress;
|
||||||
|
this.onError = config.onError;
|
||||||
|
this.estimateTokens =
|
||||||
|
config.estimateTokens || ((text) => Math.ceil(text.length / 4));
|
||||||
|
this.expectedTotal = config.expectedTotal || 0;
|
||||||
|
this.fallbackItemExtractor = config.fallbackItemExtractor;
|
||||||
|
this.itemValidator =
|
||||||
|
config.itemValidator || StreamParserConfig.defaultItemValidator;
|
||||||
|
this.maxBufferSize = config.maxBufferSize || DEFAULT_MAX_BUFFER_SIZE;
|
||||||
|
|
||||||
|
this.validate();
|
||||||
|
}
|
||||||
|
|
||||||
|
validate() {
|
||||||
|
if (!this.jsonPaths || !Array.isArray(this.jsonPaths)) {
|
||||||
|
throw new Error('jsonPaths is required and must be an array');
|
||||||
|
}
|
||||||
|
if (this.jsonPaths.length === 0) {
|
||||||
|
throw new Error('jsonPaths array cannot be empty');
|
||||||
|
}
|
||||||
|
if (this.maxBufferSize <= 0) {
|
||||||
|
throw new Error('maxBufferSize must be positive');
|
||||||
|
}
|
||||||
|
if (this.expectedTotal < 0) {
|
||||||
|
throw new Error('expectedTotal cannot be negative');
|
||||||
|
}
|
||||||
|
if (this.estimateTokens && typeof this.estimateTokens !== 'function') {
|
||||||
|
throw new Error('estimateTokens must be a function');
|
||||||
|
}
|
||||||
|
if (this.onProgress && typeof this.onProgress !== 'function') {
|
||||||
|
throw new Error('onProgress must be a function');
|
||||||
|
}
|
||||||
|
if (this.onError && typeof this.onError !== 'function') {
|
||||||
|
throw new Error('onError must be a function');
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
this.fallbackItemExtractor &&
|
||||||
|
typeof this.fallbackItemExtractor !== 'function'
|
||||||
|
) {
|
||||||
|
throw new Error('fallbackItemExtractor must be a function');
|
||||||
|
}
|
||||||
|
if (this.itemValidator && typeof this.itemValidator !== 'function') {
|
||||||
|
throw new Error('itemValidator must be a function');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static defaultItemValidator(item) {
|
||||||
|
return (
|
||||||
|
item && item.title && typeof item.title === 'string' && item.title.trim()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages progress tracking and metadata
|
||||||
|
*/
|
||||||
|
class ProgressTracker {
|
||||||
|
constructor(config) {
|
||||||
|
this.onProgress = config.onProgress;
|
||||||
|
this.onError = config.onError;
|
||||||
|
this.estimateTokens = config.estimateTokens;
|
||||||
|
this.expectedTotal = config.expectedTotal;
|
||||||
|
this.parsedItems = [];
|
||||||
|
this.accumulatedText = '';
|
||||||
|
}
|
||||||
|
|
||||||
|
addItem(item) {
|
||||||
|
this.parsedItems.push(item);
|
||||||
|
this.reportProgress(item);
|
||||||
|
}
|
||||||
|
|
||||||
|
addText(chunk) {
|
||||||
|
this.accumulatedText += chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
getMetadata() {
|
||||||
|
return {
|
||||||
|
currentCount: this.parsedItems.length,
|
||||||
|
expectedTotal: this.expectedTotal,
|
||||||
|
accumulatedText: this.accumulatedText,
|
||||||
|
estimatedTokens: this.estimateTokens(this.accumulatedText)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
reportProgress(item) {
|
||||||
|
if (!this.onProgress) return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.onProgress(item, this.getMetadata());
|
||||||
|
} catch (progressError) {
|
||||||
|
this.handleProgressError(progressError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handleProgressError(error) {
|
||||||
|
if (this.onError) {
|
||||||
|
this.onError(new Error(`Progress callback failed: ${error.message}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles stream processing with different stream types
|
||||||
|
*/
|
||||||
|
class StreamProcessor {
|
||||||
|
constructor(onChunk) {
|
||||||
|
this.onChunk = onChunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
async process(textStream) {
|
||||||
|
const streamHandler = this.detectStreamType(textStream);
|
||||||
|
await streamHandler(textStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
detectStreamType(textStream) {
|
||||||
|
// Check for textStream property
|
||||||
|
if (this.hasAsyncIterator(textStream?.textStream)) {
|
||||||
|
return (stream) => this.processTextStream(stream.textStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for fullStream property
|
||||||
|
if (this.hasAsyncIterator(textStream?.fullStream)) {
|
||||||
|
return (stream) => this.processFullStream(stream.fullStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if stream itself is iterable
|
||||||
|
if (this.hasAsyncIterator(textStream)) {
|
||||||
|
return (stream) => this.processDirectStream(stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new StreamingError(
|
||||||
|
'Stream object is not iterable - no textStream, fullStream, or direct async iterator found',
|
||||||
|
STREAMING_ERROR_CODES.STREAM_NOT_ITERABLE
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
hasAsyncIterator(obj) {
|
||||||
|
return obj && typeof obj[Symbol.asyncIterator] === 'function';
|
||||||
|
}
|
||||||
|
|
||||||
|
async processTextStream(stream) {
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
this.onChunk(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async processFullStream(stream) {
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (chunk.type === 'text-delta' && chunk.textDelta) {
|
||||||
|
this.onChunk(chunk.textDelta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async processDirectStream(stream) {
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
this.onChunk(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages JSON parsing with the streaming parser
|
||||||
|
*/
|
||||||
|
class JSONStreamParser {
|
||||||
|
constructor(config, progressTracker) {
|
||||||
|
this.config = config;
|
||||||
|
this.progressTracker = progressTracker;
|
||||||
|
this.parser = new JSONParser({ paths: config.jsonPaths });
|
||||||
|
this.setupHandlers();
|
||||||
|
}
|
||||||
|
|
||||||
|
setupHandlers() {
|
||||||
|
this.parser.onValue = (value, key, parent, stack) => {
|
||||||
|
this.handleParsedValue(value);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.parser.onError = (error) => {
|
||||||
|
this.handleParseError(error);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
handleParsedValue(value) {
|
||||||
|
// Extract the actual item object from the parser's nested structure
|
||||||
|
const item = value.value || value;
|
||||||
|
|
||||||
|
if (this.config.itemValidator(item)) {
|
||||||
|
this.progressTracker.addItem(item);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handleParseError(error) {
|
||||||
|
if (this.config.onError) {
|
||||||
|
this.config.onError(new Error(`JSON parsing error: ${error.message}`));
|
||||||
|
}
|
||||||
|
// Don't throw here - we'll handle this in the fallback logic
|
||||||
|
}
|
||||||
|
|
||||||
|
write(chunk) {
|
||||||
|
this.parser.write(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
end() {
|
||||||
|
this.parser.end();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles fallback parsing when streaming fails
|
||||||
|
*/
|
||||||
|
class FallbackParser {
|
||||||
|
constructor(config, progressTracker) {
|
||||||
|
this.config = config;
|
||||||
|
this.progressTracker = progressTracker;
|
||||||
|
}
|
||||||
|
|
||||||
|
async attemptParsing() {
|
||||||
|
if (!this.shouldAttemptFallback()) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
return await this.parseFallbackItems();
|
||||||
|
} catch (parseError) {
|
||||||
|
this.handleFallbackError(parseError);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldAttemptFallback() {
|
||||||
|
return (
|
||||||
|
this.config.expectedTotal > 0 &&
|
||||||
|
this.progressTracker.parsedItems.length < this.config.expectedTotal &&
|
||||||
|
this.progressTracker.accumulatedText &&
|
||||||
|
this.config.fallbackItemExtractor
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async parseFallbackItems() {
|
||||||
|
const jsonText = this._cleanJsonText(this.progressTracker.accumulatedText);
|
||||||
|
const fullResponse = JSON.parse(jsonText);
|
||||||
|
const fallbackItems = this.config.fallbackItemExtractor(fullResponse);
|
||||||
|
|
||||||
|
if (!Array.isArray(fallbackItems)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._processNewItems(fallbackItems);
|
||||||
|
}
|
||||||
|
|
||||||
|
_cleanJsonText(text) {
|
||||||
|
// Remove markdown code block wrappers and trim whitespace
|
||||||
|
return text
|
||||||
|
.replace(/^```(?:json)?\s*\n?/i, '')
|
||||||
|
.replace(/\n?```\s*$/i, '')
|
||||||
|
.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
_processNewItems(fallbackItems) {
|
||||||
|
// Only add items we haven't already parsed
|
||||||
|
const itemsToAdd = fallbackItems.slice(
|
||||||
|
this.progressTracker.parsedItems.length
|
||||||
|
);
|
||||||
|
const newItems = [];
|
||||||
|
|
||||||
|
for (const item of itemsToAdd) {
|
||||||
|
if (this.config.itemValidator(item)) {
|
||||||
|
newItems.push(item);
|
||||||
|
this.progressTracker.addItem(item);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newItems;
|
||||||
|
}
|
||||||
|
|
||||||
|
handleFallbackError(error) {
|
||||||
|
if (this.progressTracker.parsedItems.length === 0) {
|
||||||
|
throw new Error(`Failed to parse AI response as JSON: ${error.message}`);
|
||||||
|
}
|
||||||
|
// If we have some items from streaming, continue with those
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Buffer size validator
|
||||||
|
*/
|
||||||
|
class BufferSizeValidator {
|
||||||
|
constructor(maxSize) {
|
||||||
|
this.maxSize = maxSize;
|
||||||
|
this.currentSize = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
validateChunk(existingText, newChunk) {
|
||||||
|
const newSize = Buffer.byteLength(existingText + newChunk, 'utf8');
|
||||||
|
|
||||||
|
if (newSize > this.maxSize) {
|
||||||
|
throw new StreamingError(
|
||||||
|
`Buffer size exceeded: ${newSize} bytes > ${this.maxSize} bytes maximum`,
|
||||||
|
STREAMING_ERROR_CODES.BUFFER_SIZE_EXCEEDED
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.currentSize = newSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main orchestrator for stream parsing
|
||||||
|
*/
|
||||||
|
class StreamParserOrchestrator {
|
||||||
|
constructor(config) {
|
||||||
|
this.config = new StreamParserConfig(config);
|
||||||
|
this.progressTracker = new ProgressTracker(this.config);
|
||||||
|
this.bufferValidator = new BufferSizeValidator(this.config.maxBufferSize);
|
||||||
|
this.jsonParser = new JSONStreamParser(this.config, this.progressTracker);
|
||||||
|
this.fallbackParser = new FallbackParser(this.config, this.progressTracker);
|
||||||
|
}
|
||||||
|
|
||||||
|
async parse(textStream) {
|
||||||
|
if (!textStream) {
|
||||||
|
throw new Error('No text stream provided');
|
||||||
|
}
|
||||||
|
|
||||||
|
await this.processStream(textStream);
|
||||||
|
await this.waitForParsingCompletion();
|
||||||
|
|
||||||
|
const usedFallback = await this.attemptFallbackIfNeeded();
|
||||||
|
|
||||||
|
return this.buildResult(usedFallback);
|
||||||
|
}
|
||||||
|
|
||||||
|
async processStream(textStream) {
|
||||||
|
const processor = new StreamProcessor((chunk) => {
|
||||||
|
this.bufferValidator.validateChunk(
|
||||||
|
this.progressTracker.accumulatedText,
|
||||||
|
chunk
|
||||||
|
);
|
||||||
|
this.progressTracker.addText(chunk);
|
||||||
|
this.jsonParser.write(chunk);
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
await processor.process(textStream);
|
||||||
|
} catch (streamError) {
|
||||||
|
this.handleStreamError(streamError);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.jsonParser.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
handleStreamError(error) {
|
||||||
|
// Re-throw StreamingError as-is, wrap other errors
|
||||||
|
if (error instanceof StreamingError) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
throw new StreamingError(
|
||||||
|
`Failed to process AI text stream: ${error.message}`,
|
||||||
|
STREAMING_ERROR_CODES.STREAM_PROCESSING_FAILED
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async waitForParsingCompletion() {
|
||||||
|
// Wait for final parsing to complete (JSON parser may still be processing)
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||||
|
}
|
||||||
|
|
||||||
|
async attemptFallbackIfNeeded() {
|
||||||
|
const fallbackItems = await this.fallbackParser.attemptParsing();
|
||||||
|
return fallbackItems.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
buildResult(usedFallback) {
|
||||||
|
const metadata = this.progressTracker.getMetadata();
|
||||||
|
|
||||||
|
return {
|
||||||
|
items: this.progressTracker.parsedItems,
|
||||||
|
accumulatedText: metadata.accumulatedText,
|
||||||
|
estimatedTokens: metadata.estimatedTokens,
|
||||||
|
usedFallback
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a streaming JSON response with progress tracking
|
||||||
|
*
|
||||||
|
* Example with custom buffer size:
|
||||||
|
* ```js
|
||||||
|
* const result = await parseStream(stream, {
|
||||||
|
* jsonPaths: ['$.tasks.*'],
|
||||||
|
* maxBufferSize: 2 * 1024 * 1024 // 2MB
|
||||||
|
* });
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @param {Object} textStream - The AI service text stream object
|
||||||
|
* @param {Object} config - Configuration options
|
||||||
|
* @returns {Promise<Object>} Parsed result with metadata
|
||||||
|
*/
|
||||||
|
export async function parseStream(textStream, config = {}) {
|
||||||
|
const orchestrator = new StreamParserOrchestrator(config);
|
||||||
|
return orchestrator.parse(textStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process different types of text streams
|
||||||
|
* @param {Object} textStream - The stream object from AI service
|
||||||
|
* @param {Function} onChunk - Callback for each text chunk
|
||||||
|
*/
|
||||||
|
export async function processTextStream(textStream, onChunk) {
|
||||||
|
const processor = new StreamProcessor(onChunk);
|
||||||
|
await processor.process(textStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempt fallback JSON parsing when streaming parsing is incomplete
|
||||||
|
* @param {string} accumulatedText - Complete accumulated text
|
||||||
|
* @param {Array} existingItems - Items already parsed from streaming
|
||||||
|
* @param {number} expectedTotal - Expected total number of items
|
||||||
|
* @param {Object} config - Configuration for progress reporting
|
||||||
|
* @returns {Promise<Array>} Additional items found via fallback parsing
|
||||||
|
*/
|
||||||
|
export async function attemptFallbackParsing(
|
||||||
|
accumulatedText,
|
||||||
|
existingItems,
|
||||||
|
expectedTotal,
|
||||||
|
config
|
||||||
|
) {
|
||||||
|
// Create a temporary progress tracker for backward compatibility
|
||||||
|
const progressTracker = new ProgressTracker({
|
||||||
|
onProgress: config.onProgress,
|
||||||
|
onError: config.onError,
|
||||||
|
estimateTokens: config.estimateTokens,
|
||||||
|
expectedTotal
|
||||||
|
});
|
||||||
|
|
||||||
|
progressTracker.parsedItems = existingItems;
|
||||||
|
progressTracker.accumulatedText = accumulatedText;
|
||||||
|
|
||||||
|
const fallbackParser = new FallbackParser(
|
||||||
|
{
|
||||||
|
...config,
|
||||||
|
expectedTotal,
|
||||||
|
itemValidator:
|
||||||
|
config.itemValidator || StreamParserConfig.defaultItemValidator
|
||||||
|
},
|
||||||
|
progressTracker
|
||||||
|
);
|
||||||
|
|
||||||
|
return fallbackParser.attemptParsing();
|
||||||
|
}
|
||||||
189
src/utils/timeout-manager.js
Normal file
189
src/utils/timeout-manager.js
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
import { StreamingError, STREAMING_ERROR_CODES } from './stream-parser.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility class for managing timeouts in async operations
|
||||||
|
* Reduces code duplication for timeout handling patterns
|
||||||
|
*/
|
||||||
|
export class TimeoutManager {
|
||||||
|
/**
|
||||||
|
* Wraps a promise with a timeout that will reject if not resolved in time
|
||||||
|
*
|
||||||
|
* @param {Promise} promise - The promise to wrap with timeout
|
||||||
|
* @param {number} timeoutMs - Timeout duration in milliseconds
|
||||||
|
* @param {string} operationName - Name of the operation for error messages
|
||||||
|
* @returns {Promise} The result of the promise or throws timeout error
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const result = await TimeoutManager.withTimeout(
|
||||||
|
* fetchData(),
|
||||||
|
* 5000,
|
||||||
|
* 'Data fetch operation'
|
||||||
|
* );
|
||||||
|
*/
|
||||||
|
static async withTimeout(promise, timeoutMs, operationName = 'Operation') {
|
||||||
|
let timeoutHandle;
|
||||||
|
|
||||||
|
const timeoutPromise = new Promise((_, reject) => {
|
||||||
|
timeoutHandle = setTimeout(() => {
|
||||||
|
reject(
|
||||||
|
new StreamingError(
|
||||||
|
`${operationName} timed out after ${timeoutMs / 1000} seconds`,
|
||||||
|
STREAMING_ERROR_CODES.STREAM_PROCESSING_FAILED
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}, timeoutMs);
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Race between the actual promise and the timeout
|
||||||
|
const result = await Promise.race([promise, timeoutPromise]);
|
||||||
|
// Clear timeout if promise resolved first
|
||||||
|
clearTimeout(timeoutHandle);
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
// Always clear timeout on error
|
||||||
|
clearTimeout(timeoutHandle);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wraps a promise with a timeout, but returns undefined instead of throwing on timeout
|
||||||
|
* Useful for optional operations that shouldn't fail the main flow
|
||||||
|
*
|
||||||
|
* @param {Promise} promise - The promise to wrap with timeout
|
||||||
|
* @param {number} timeoutMs - Timeout duration in milliseconds
|
||||||
|
* @param {*} defaultValue - Value to return on timeout (default: undefined)
|
||||||
|
* @returns {Promise} The result of the promise or defaultValue on timeout
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const usage = await TimeoutManager.withSoftTimeout(
|
||||||
|
* getUsageStats(),
|
||||||
|
* 1000,
|
||||||
|
* { tokens: 0 }
|
||||||
|
* );
|
||||||
|
*/
|
||||||
|
static async withSoftTimeout(promise, timeoutMs, defaultValue = undefined) {
|
||||||
|
let timeoutHandle;
|
||||||
|
|
||||||
|
const timeoutPromise = new Promise((resolve) => {
|
||||||
|
timeoutHandle = setTimeout(() => {
|
||||||
|
resolve(defaultValue);
|
||||||
|
}, timeoutMs);
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await Promise.race([promise, timeoutPromise]);
|
||||||
|
clearTimeout(timeoutHandle);
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
// On error, clear timeout and return default value
|
||||||
|
clearTimeout(timeoutHandle);
|
||||||
|
return defaultValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a reusable timeout controller for multiple operations
|
||||||
|
* Useful when you need to apply the same timeout to multiple promises
|
||||||
|
*
|
||||||
|
* @param {number} timeoutMs - Timeout duration in milliseconds
|
||||||
|
* @param {string} operationName - Base name for operations
|
||||||
|
* @returns {Object} Controller with wrap method
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const controller = TimeoutManager.createController(60000, 'AI Service');
|
||||||
|
* const result1 = await controller.wrap(service.call1(), 'call 1');
|
||||||
|
* const result2 = await controller.wrap(service.call2(), 'call 2');
|
||||||
|
*/
|
||||||
|
static createController(timeoutMs, operationName = 'Operation') {
|
||||||
|
return {
|
||||||
|
timeoutMs,
|
||||||
|
operationName,
|
||||||
|
|
||||||
|
async wrap(promise, specificName = null) {
|
||||||
|
const fullName = specificName
|
||||||
|
? `${operationName} - ${specificName}`
|
||||||
|
: operationName;
|
||||||
|
return TimeoutManager.withTimeout(promise, timeoutMs, fullName);
|
||||||
|
},
|
||||||
|
|
||||||
|
async wrapSoft(promise, defaultValue = undefined) {
|
||||||
|
return TimeoutManager.withSoftTimeout(promise, timeoutMs, defaultValue);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if an error is a timeout error from this manager
|
||||||
|
*
|
||||||
|
* @param {Error} error - The error to check
|
||||||
|
* @returns {boolean} True if this is a timeout error
|
||||||
|
*/
|
||||||
|
static isTimeoutError(error) {
|
||||||
|
return (
|
||||||
|
error instanceof StreamingError &&
|
||||||
|
error.code === STREAMING_ERROR_CODES.STREAM_PROCESSING_FAILED &&
|
||||||
|
error.message.includes('timed out')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Duration helper class for more readable timeout specifications
|
||||||
|
*/
|
||||||
|
export class Duration {
|
||||||
|
constructor(value, unit = 'ms') {
|
||||||
|
this.milliseconds = this._toMilliseconds(value, unit);
|
||||||
|
}
|
||||||
|
|
||||||
|
static milliseconds(value) {
|
||||||
|
return new Duration(value, 'ms');
|
||||||
|
}
|
||||||
|
|
||||||
|
static seconds(value) {
|
||||||
|
return new Duration(value, 's');
|
||||||
|
}
|
||||||
|
|
||||||
|
static minutes(value) {
|
||||||
|
return new Duration(value, 'm');
|
||||||
|
}
|
||||||
|
|
||||||
|
static hours(value) {
|
||||||
|
return new Duration(value, 'h');
|
||||||
|
}
|
||||||
|
|
||||||
|
get seconds() {
|
||||||
|
return this.milliseconds / 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
get minutes() {
|
||||||
|
return this.milliseconds / 60000;
|
||||||
|
}
|
||||||
|
|
||||||
|
get hours() {
|
||||||
|
return this.milliseconds / 3600000;
|
||||||
|
}
|
||||||
|
|
||||||
|
toString() {
|
||||||
|
if (this.milliseconds < 1000) {
|
||||||
|
return `${this.milliseconds}ms`;
|
||||||
|
} else if (this.milliseconds < 60000) {
|
||||||
|
return `${this.seconds}s`;
|
||||||
|
} else if (this.milliseconds < 3600000) {
|
||||||
|
return `${Math.floor(this.minutes)}m ${Math.floor(this.seconds % 60)}s`;
|
||||||
|
} else {
|
||||||
|
return `${Math.floor(this.hours)}h ${Math.floor(this.minutes % 60)}m`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_toMilliseconds(value, unit) {
|
||||||
|
const conversions = {
|
||||||
|
ms: 1,
|
||||||
|
s: 1000,
|
||||||
|
m: 60000,
|
||||||
|
h: 3600000
|
||||||
|
};
|
||||||
|
return value * (conversions[unit] || 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
97
tests/manual/progress/TESTING_GUIDE.md
Normal file
97
tests/manual/progress/TESTING_GUIDE.md
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# Task Master Progress Testing Guide
|
||||||
|
|
||||||
|
Quick reference for testing streaming/non-streaming functionality with token tracking.
|
||||||
|
|
||||||
|
## 🎯 Test Modes
|
||||||
|
|
||||||
|
1. **MCP Streaming** - Has `reportProgress` + `mcpLog`, shows emoji indicators (🔴🟠🟢)
|
||||||
|
2. **CLI Streaming** - No `reportProgress`, shows terminal progress bars
|
||||||
|
3. **Non-Streaming** - No progress reporting, single response
|
||||||
|
|
||||||
|
## 🚀 Quick Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test Scripts (accept: mcp-streaming, cli-streaming, non-streaming, both, all)
|
||||||
|
node test-parse-prd.js [mode]
|
||||||
|
node test-analyze-complexity.js [mode]
|
||||||
|
node test-expand.js [mode] [num_subtasks]
|
||||||
|
node test-expand-all.js [mode] [num_subtasks]
|
||||||
|
node parse-prd-analysis.js [accuracy|complexity|all]
|
||||||
|
|
||||||
|
# CLI Commands
|
||||||
|
node scripts/dev.js parse-prd test.txt # Local dev (streaming)
|
||||||
|
node scripts/dev.js analyze-complexity --research
|
||||||
|
node scripts/dev.js expand --id=1 --force
|
||||||
|
node scripts/dev.js expand --all --force
|
||||||
|
|
||||||
|
task-master [command] # Global CLI (non-streaming)
|
||||||
|
```
|
||||||
|
|
||||||
|
## ✅ Success Indicators
|
||||||
|
|
||||||
|
### Indicators
|
||||||
|
- **Priority**: 🔴🔴🔴 (high), 🟠🟠⚪ (medium), 🟢⚪⚪ (low)
|
||||||
|
- **Complexity**: ●●● (7-10), ●●○ (4-6), ●○○ (1-3)
|
||||||
|
|
||||||
|
### Token Format
|
||||||
|
`Tokens (I/O): 2,150/1,847 ($0.0423)` (~4 chars per token)
|
||||||
|
|
||||||
|
### Progress Bars
|
||||||
|
```
|
||||||
|
Single: Generating subtasks... |████████░░| 80% (4/5)
|
||||||
|
Dual: Expanding 3 tasks | Task 2/3 |████████░░| 66%
|
||||||
|
Generating 5 subtasks... |██████░░░░| 60%
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fractional Progress
|
||||||
|
`(completedTasks + currentSubtask/totalSubtasks) / totalTasks`
|
||||||
|
Example: 33% → 46% → 60% → 66% → 80% → 93% → 100%
|
||||||
|
|
||||||
|
## 🐛 Quick Fixes
|
||||||
|
|
||||||
|
| Issue | Fix |
|
||||||
|
|-------|-----|
|
||||||
|
| No streaming | Check `reportProgress` is passed |
|
||||||
|
| NaN% progress | Filter duplicate `subtask_progress` events |
|
||||||
|
| Missing tokens | Check `.env` has API keys |
|
||||||
|
| Broken bars | Terminal width > 80 |
|
||||||
|
| projectRoot.split | Use `projectRoot` not `session` |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Debug
|
||||||
|
TASKMASTER_DEBUG=true node test-expand.js
|
||||||
|
npm run lint
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📊 Benchmarks
|
||||||
|
- Single task: 10-20s (5 subtasks)
|
||||||
|
- Expand all: 30-45s (3 tasks)
|
||||||
|
- Streaming: ~10-20% faster
|
||||||
|
- Updates: Every 2-5s
|
||||||
|
|
||||||
|
## 🔄 Test Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Quick check
|
||||||
|
node test-parse-prd.js both && npm test
|
||||||
|
|
||||||
|
# Full suite (before release)
|
||||||
|
for test in parse-prd analyze-complexity expand expand-all; do
|
||||||
|
node test-$test.js all
|
||||||
|
done
|
||||||
|
node parse-prd-analysis.js all
|
||||||
|
npm test
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🎯 MCP Tool Example
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
{
|
||||||
|
"tool": "parse_prd",
|
||||||
|
"args": {
|
||||||
|
"input": "prd.txt",
|
||||||
|
"numTasks": "8",
|
||||||
|
"force": true,
|
||||||
|
"projectRoot": "/path/to/project"
|
||||||
|
}
|
||||||
|
}
|
||||||
334
tests/manual/progress/parse-prd-analysis.js
Normal file
334
tests/manual/progress/parse-prd-analysis.js
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
|
||||||
|
/**
|
||||||
|
* parse-prd-analysis.js
|
||||||
|
*
|
||||||
|
* Detailed timing and accuracy analysis for parse-prd progress reporting.
|
||||||
|
* Tests different task generation complexities using the sample PRD from fixtures.
|
||||||
|
* Validates real-time characteristics and focuses on progress behavior and performance metrics.
|
||||||
|
* Uses tests/fixtures/sample-prd.txt for consistent testing across all scenarios.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import chalk from 'chalk';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
|
||||||
|
import parsePRD from '../../../scripts/modules/task-manager/parse-prd/index.js';
|
||||||
|
|
||||||
|
// Use the same project root as the main test file
|
||||||
|
const PROJECT_ROOT = path.resolve(__dirname, '..', '..', '..');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the path to the sample PRD file
|
||||||
|
*/
|
||||||
|
function getSamplePRDPath() {
|
||||||
|
return path.resolve(PROJECT_ROOT, 'tests', 'fixtures', 'sample-prd.txt');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detailed Progress Reporter for timing analysis
|
||||||
|
*/
|
||||||
|
class DetailedProgressReporter {
|
||||||
|
constructor() {
|
||||||
|
this.progressHistory = [];
|
||||||
|
this.startTime = Date.now();
|
||||||
|
this.lastProgress = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
async reportProgress(data) {
|
||||||
|
const timestamp = Date.now() - this.startTime;
|
||||||
|
const timeSinceLastProgress =
|
||||||
|
this.progressHistory.length > 0
|
||||||
|
? timestamp -
|
||||||
|
this.progressHistory[this.progressHistory.length - 1].timestamp
|
||||||
|
: timestamp;
|
||||||
|
|
||||||
|
const entry = {
|
||||||
|
timestamp,
|
||||||
|
timeSinceLastProgress,
|
||||||
|
...data
|
||||||
|
};
|
||||||
|
|
||||||
|
this.progressHistory.push(entry);
|
||||||
|
|
||||||
|
const percentage = data.total
|
||||||
|
? Math.round((data.progress / data.total) * 100)
|
||||||
|
: 0;
|
||||||
|
console.log(
|
||||||
|
chalk.blue(`[${timestamp}ms] (+${timeSinceLastProgress}ms)`),
|
||||||
|
chalk.green(`${percentage}%`),
|
||||||
|
`(${data.progress}/${data.total})`,
|
||||||
|
chalk.yellow(data.message)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
getAnalysis() {
|
||||||
|
if (this.progressHistory.length === 0) return null;
|
||||||
|
|
||||||
|
const totalDuration =
|
||||||
|
this.progressHistory[this.progressHistory.length - 1].timestamp;
|
||||||
|
const intervals = this.progressHistory
|
||||||
|
.slice(1)
|
||||||
|
.map((entry) => entry.timeSinceLastProgress);
|
||||||
|
const avgInterval =
|
||||||
|
intervals.length > 0
|
||||||
|
? intervals.reduce((a, b) => a + b, 0) / intervals.length
|
||||||
|
: 0;
|
||||||
|
const minInterval = intervals.length > 0 ? Math.min(...intervals) : 0;
|
||||||
|
const maxInterval = intervals.length > 0 ? Math.max(...intervals) : 0;
|
||||||
|
|
||||||
|
return {
|
||||||
|
totalReports: this.progressHistory.length,
|
||||||
|
totalDuration,
|
||||||
|
avgInterval: Math.round(avgInterval),
|
||||||
|
minInterval,
|
||||||
|
maxInterval,
|
||||||
|
intervals
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
printDetailedAnalysis() {
|
||||||
|
const analysis = this.getAnalysis();
|
||||||
|
if (!analysis) {
|
||||||
|
console.log(chalk.red('No progress data to analyze'));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(chalk.cyan('\n=== Detailed Progress Analysis ==='));
|
||||||
|
console.log(`Total Progress Reports: ${analysis.totalReports}`);
|
||||||
|
console.log(`Total Duration: ${analysis.totalDuration}ms`);
|
||||||
|
console.log(`Average Interval: ${analysis.avgInterval}ms`);
|
||||||
|
console.log(`Min Interval: ${analysis.minInterval}ms`);
|
||||||
|
console.log(`Max Interval: ${analysis.maxInterval}ms`);
|
||||||
|
|
||||||
|
console.log(chalk.cyan('\n=== Progress Timeline ==='));
|
||||||
|
this.progressHistory.forEach((entry, index) => {
|
||||||
|
const percentage = entry.total
|
||||||
|
? Math.round((entry.progress / entry.total) * 100)
|
||||||
|
: 0;
|
||||||
|
const intervalText =
|
||||||
|
index > 0 ? ` (+${entry.timeSinceLastProgress}ms)` : '';
|
||||||
|
console.log(
|
||||||
|
`${index + 1}. [${entry.timestamp}ms]${intervalText} ${percentage}% - ${entry.message}`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check for real-time characteristics
|
||||||
|
console.log(chalk.cyan('\n=== Real-time Characteristics ==='));
|
||||||
|
const hasRealTimeUpdates = analysis.intervals.some(
|
||||||
|
(interval) => interval < 10000
|
||||||
|
); // Less than 10s
|
||||||
|
const hasConsistentUpdates = analysis.intervals.length > 3;
|
||||||
|
const hasProgressiveUpdates = this.progressHistory.every(
|
||||||
|
(entry, index) =>
|
||||||
|
index === 0 ||
|
||||||
|
entry.progress >= this.progressHistory[index - 1].progress
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`✅ Real-time updates: ${hasRealTimeUpdates ? 'YES' : 'NO'}`);
|
||||||
|
console.log(
|
||||||
|
`✅ Consistent updates: ${hasConsistentUpdates ? 'YES' : 'NO'}`
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`✅ Progressive updates: ${hasProgressiveUpdates ? 'YES' : 'NO'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get PRD path for complexity testing
|
||||||
|
* For complexity testing, we'll use the same sample PRD but request different numbers of tasks
|
||||||
|
* This provides more realistic testing since the AI will generate different complexity based on task count
|
||||||
|
*/
|
||||||
|
function getPRDPathForComplexity(complexity = 'medium') {
|
||||||
|
// Always use the same sample PRD file - complexity will be controlled by task count
|
||||||
|
return getSamplePRDPath();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test streaming with different task generation complexities
|
||||||
|
* Uses the same sample PRD but requests different numbers of tasks to test complexity scaling
|
||||||
|
*/
|
||||||
|
async function testStreamingComplexity() {
|
||||||
|
console.log(
|
||||||
|
chalk.cyan(
|
||||||
|
'🧪 Testing Streaming with Different Task Generation Complexities\n'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const complexities = ['simple', 'medium', 'complex'];
|
||||||
|
const results = [];
|
||||||
|
|
||||||
|
for (const complexity of complexities) {
|
||||||
|
console.log(
|
||||||
|
chalk.yellow(`\n--- Testing ${complexity.toUpperCase()} Complexity ---`)
|
||||||
|
);
|
||||||
|
|
||||||
|
const testPRDPath = getPRDPathForComplexity(complexity);
|
||||||
|
const testTasksPath = path.join(__dirname, `test-tasks-${complexity}.json`);
|
||||||
|
|
||||||
|
// Clean up existing file
|
||||||
|
if (fs.existsSync(testTasksPath)) {
|
||||||
|
fs.unlinkSync(testTasksPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
const progressReporter = new DetailedProgressReporter();
|
||||||
|
const expectedTasks =
|
||||||
|
complexity === 'simple' ? 3 : complexity === 'medium' ? 6 : 10;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
await parsePRD(testPRDPath, testTasksPath, expectedTasks, {
|
||||||
|
force: true,
|
||||||
|
append: false,
|
||||||
|
research: false,
|
||||||
|
reportProgress: progressReporter.reportProgress.bind(progressReporter),
|
||||||
|
projectRoot: PROJECT_ROOT
|
||||||
|
});
|
||||||
|
|
||||||
|
const endTime = Date.now();
|
||||||
|
const duration = endTime - startTime;
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
chalk.green(`✅ ${complexity} complexity completed in ${duration}ms`)
|
||||||
|
);
|
||||||
|
|
||||||
|
progressReporter.printDetailedAnalysis();
|
||||||
|
|
||||||
|
results.push({
|
||||||
|
complexity,
|
||||||
|
duration,
|
||||||
|
analysis: progressReporter.getAnalysis()
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(`❌ ${complexity} complexity failed: ${error.message}`)
|
||||||
|
);
|
||||||
|
results.push({
|
||||||
|
complexity,
|
||||||
|
error: error.message
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
// Clean up (only the tasks file, not the PRD since we're using the fixture)
|
||||||
|
if (fs.existsSync(testTasksPath)) fs.unlinkSync(testTasksPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summary
|
||||||
|
console.log(chalk.cyan('\n=== Complexity Test Summary ==='));
|
||||||
|
results.forEach((result) => {
|
||||||
|
if (result.error) {
|
||||||
|
console.log(`${result.complexity}: ❌ FAILED - ${result.error}`);
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
`${result.complexity}: ✅ ${result.duration}ms (${result.analysis.totalReports} reports)`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test progress accuracy
|
||||||
|
*/
|
||||||
|
async function testProgressAccuracy() {
|
||||||
|
console.log(chalk.cyan('🧪 Testing Progress Accuracy\n'));
|
||||||
|
|
||||||
|
const testPRDPath = getSamplePRDPath();
|
||||||
|
const testTasksPath = path.join(__dirname, 'test-accuracy-tasks.json');
|
||||||
|
|
||||||
|
// Clean up existing file
|
||||||
|
if (fs.existsSync(testTasksPath)) {
|
||||||
|
fs.unlinkSync(testTasksPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
const progressReporter = new DetailedProgressReporter();
|
||||||
|
|
||||||
|
try {
|
||||||
|
await parsePRD(testPRDPath, testTasksPath, 8, {
|
||||||
|
force: true,
|
||||||
|
append: false,
|
||||||
|
research: false,
|
||||||
|
reportProgress: progressReporter.reportProgress.bind(progressReporter),
|
||||||
|
projectRoot: PROJECT_ROOT
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(chalk.green('✅ Progress accuracy test completed'));
|
||||||
|
progressReporter.printDetailedAnalysis();
|
||||||
|
|
||||||
|
// Additional accuracy checks
|
||||||
|
const analysis = progressReporter.getAnalysis();
|
||||||
|
console.log(chalk.cyan('\n=== Accuracy Metrics ==='));
|
||||||
|
console.log(
|
||||||
|
`Progress consistency: ${analysis.intervals.every((i) => i > 0) ? 'PASS' : 'FAIL'}`
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`Reasonable intervals: ${analysis.intervals.every((i) => i < 30000) ? 'PASS' : 'FAIL'}`
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`Expected report count: ${analysis.totalReports >= 8 ? 'PASS' : 'FAIL'}`
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(`❌ Progress accuracy test failed: ${error.message}`)
|
||||||
|
);
|
||||||
|
} finally {
|
||||||
|
// Clean up (only the tasks file, not the PRD since we're using the fixture)
|
||||||
|
if (fs.existsSync(testTasksPath)) fs.unlinkSync(testTasksPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main test runner
|
||||||
|
*/
|
||||||
|
async function main() {
|
||||||
|
const args = process.argv.slice(2);
|
||||||
|
const testType = args[0] || 'accuracy';
|
||||||
|
|
||||||
|
console.log(chalk.bold.cyan('🚀 Task Master Detailed Progress Tests\n'));
|
||||||
|
console.log(chalk.blue(`Test type: ${testType}\n`));
|
||||||
|
|
||||||
|
try {
|
||||||
|
switch (testType.toLowerCase()) {
|
||||||
|
case 'accuracy':
|
||||||
|
await testProgressAccuracy();
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'complexity':
|
||||||
|
await testStreamingComplexity();
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'all':
|
||||||
|
console.log(chalk.yellow('Running all detailed tests...\n'));
|
||||||
|
await testProgressAccuracy();
|
||||||
|
console.log('\n' + '='.repeat(60) + '\n');
|
||||||
|
await testStreamingComplexity();
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
console.log(chalk.red(`Unknown test type: ${testType}`));
|
||||||
|
console.log(
|
||||||
|
chalk.yellow('Available options: accuracy, complexity, all')
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(chalk.green('\n🎉 Detailed tests completed successfully!'));
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red(`\n❌ Test failed: ${error.message}`));
|
||||||
|
console.error(chalk.red(error.stack));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run if called directly
|
||||||
|
if (import.meta.url === `file://${process.argv[1]}`) {
|
||||||
|
// Top-level await is available in ESM; keep compatibility with Node ≥14
|
||||||
|
await main();
|
||||||
|
}
|
||||||
577
tests/manual/progress/test-parse-prd.js
Normal file
577
tests/manual/progress/test-parse-prd.js
Normal file
@@ -0,0 +1,577 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
|
||||||
|
/**
|
||||||
|
* test-parse-prd.js
|
||||||
|
*
|
||||||
|
* Comprehensive integration test for parse-prd functionality.
|
||||||
|
* Tests MCP streaming, CLI streaming, and non-streaming modes.
|
||||||
|
* Validates token tracking, message formats, and priority indicators across all contexts.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import chalk from 'chalk';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
|
||||||
|
// Get current directory
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
|
||||||
|
// Get project root (three levels up from tests/manual/progress/)
|
||||||
|
const PROJECT_ROOT = path.resolve(__dirname, '..', '..', '..');
|
||||||
|
|
||||||
|
// Import the parse-prd function
|
||||||
|
import parsePRD from '../../../scripts/modules/task-manager/parse-prd/index.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Mock Progress Reporter for testing
|
||||||
|
*/
|
||||||
|
class MockProgressReporter {
|
||||||
|
constructor(enableDebug = true) {
|
||||||
|
this.enableDebug = enableDebug;
|
||||||
|
this.progressHistory = [];
|
||||||
|
this.startTime = Date.now();
|
||||||
|
}
|
||||||
|
|
||||||
|
async reportProgress(data) {
|
||||||
|
const timestamp = Date.now() - this.startTime;
|
||||||
|
|
||||||
|
const entry = {
|
||||||
|
timestamp,
|
||||||
|
...data
|
||||||
|
};
|
||||||
|
|
||||||
|
this.progressHistory.push(entry);
|
||||||
|
|
||||||
|
if (this.enableDebug) {
|
||||||
|
const percentage = data.total
|
||||||
|
? Math.round((data.progress / data.total) * 100)
|
||||||
|
: 0;
|
||||||
|
console.log(
|
||||||
|
chalk.blue(`[${timestamp}ms]`),
|
||||||
|
chalk.green(`${percentage}%`),
|
||||||
|
chalk.yellow(data.message)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getProgressHistory() {
|
||||||
|
return this.progressHistory;
|
||||||
|
}
|
||||||
|
|
||||||
|
printSummary() {
|
||||||
|
console.log(chalk.green('\n=== Progress Summary ==='));
|
||||||
|
console.log(`Total progress reports: ${this.progressHistory.length}`);
|
||||||
|
console.log(
|
||||||
|
`Duration: ${this.progressHistory[this.progressHistory.length - 1]?.timestamp || 0}ms`
|
||||||
|
);
|
||||||
|
|
||||||
|
this.progressHistory.forEach((entry, index) => {
|
||||||
|
const percentage = entry.total
|
||||||
|
? Math.round((entry.progress / entry.total) * 100)
|
||||||
|
: 0;
|
||||||
|
console.log(
|
||||||
|
`${index + 1}. [${entry.timestamp}ms] ${percentage}% - ${entry.message}`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check for expected message formats
|
||||||
|
const hasInitialMessage = this.progressHistory.some(
|
||||||
|
(entry) =>
|
||||||
|
entry.message.includes('Starting PRD analysis') &&
|
||||||
|
entry.message.includes('Input:') &&
|
||||||
|
entry.message.includes('tokens')
|
||||||
|
);
|
||||||
|
// Make regex more flexible to handle potential whitespace variations
|
||||||
|
const hasTaskMessages = this.progressHistory.some((entry) =>
|
||||||
|
/^[🔴🟠🟢⚪]{3} Task \d+\/\d+ - .+ \| ~Output: \d+ tokens/u.test(
|
||||||
|
entry.message.trim()
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const hasCompletionMessage = this.progressHistory.some(
|
||||||
|
(entry) =>
|
||||||
|
entry.message.includes('✅ Task Generation Completed') &&
|
||||||
|
entry.message.includes('Tokens (I/O):')
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(chalk.cyan('\n=== Message Format Validation ==='));
|
||||||
|
console.log(
|
||||||
|
`✅ Initial message format: ${hasInitialMessage ? 'PASS' : 'FAIL'}`
|
||||||
|
);
|
||||||
|
console.log(`✅ Task message format: ${hasTaskMessages ? 'PASS' : 'FAIL'}`);
|
||||||
|
console.log(
|
||||||
|
`✅ Completion message format: ${hasCompletionMessage ? 'PASS' : 'FAIL'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Mock MCP Logger for testing
|
||||||
|
*/
|
||||||
|
class MockMCPLogger {
|
||||||
|
constructor(enableDebug = true) {
|
||||||
|
this.enableDebug = enableDebug;
|
||||||
|
this.logs = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
_log(level, ...args) {
|
||||||
|
const entry = {
|
||||||
|
level,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
message: args.join(' ')
|
||||||
|
};
|
||||||
|
this.logs.push(entry);
|
||||||
|
|
||||||
|
if (this.enableDebug) {
|
||||||
|
const color =
|
||||||
|
{
|
||||||
|
info: chalk.blue,
|
||||||
|
warn: chalk.yellow,
|
||||||
|
error: chalk.red,
|
||||||
|
debug: chalk.gray,
|
||||||
|
success: chalk.green
|
||||||
|
}[level] || chalk.white;
|
||||||
|
|
||||||
|
console.log(color(`[${level.toUpperCase()}]`), ...args);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info(...args) {
|
||||||
|
this._log('info', ...args);
|
||||||
|
}
|
||||||
|
warn(...args) {
|
||||||
|
this._log('warn', ...args);
|
||||||
|
}
|
||||||
|
error(...args) {
|
||||||
|
this._log('error', ...args);
|
||||||
|
}
|
||||||
|
debug(...args) {
|
||||||
|
this._log('debug', ...args);
|
||||||
|
}
|
||||||
|
success(...args) {
|
||||||
|
this._log('success', ...args);
|
||||||
|
}
|
||||||
|
|
||||||
|
getLogs() {
|
||||||
|
return this.logs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the path to the sample PRD file
|
||||||
|
*/
|
||||||
|
function getSamplePRDPath() {
|
||||||
|
return path.resolve(PROJECT_ROOT, 'tests', 'fixtures', 'sample-prd.txt');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a basic test config file
|
||||||
|
*/
|
||||||
|
function createTestConfig() {
|
||||||
|
const testConfig = {
|
||||||
|
models: {
|
||||||
|
main: {
|
||||||
|
provider: 'anthropic',
|
||||||
|
modelId: 'claude-3-5-sonnet',
|
||||||
|
maxTokens: 64000,
|
||||||
|
temperature: 0.2
|
||||||
|
},
|
||||||
|
research: {
|
||||||
|
provider: 'perplexity',
|
||||||
|
modelId: 'sonar-pro',
|
||||||
|
maxTokens: 8700,
|
||||||
|
temperature: 0.1
|
||||||
|
},
|
||||||
|
fallback: {
|
||||||
|
provider: 'anthropic',
|
||||||
|
modelId: 'claude-3-5-sonnet',
|
||||||
|
maxTokens: 64000,
|
||||||
|
temperature: 0.2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
global: {
|
||||||
|
logLevel: 'info',
|
||||||
|
debug: false,
|
||||||
|
defaultSubtasks: 5,
|
||||||
|
defaultPriority: 'medium',
|
||||||
|
projectName: 'Task Master Test',
|
||||||
|
ollamaBaseURL: 'http://localhost:11434/api',
|
||||||
|
bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const taskmasterDir = path.join(__dirname, '.taskmaster');
|
||||||
|
const configPath = path.join(taskmasterDir, 'config.json');
|
||||||
|
|
||||||
|
// Create .taskmaster directory if it doesn't exist
|
||||||
|
if (!fs.existsSync(taskmasterDir)) {
|
||||||
|
fs.mkdirSync(taskmasterDir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.writeFileSync(configPath, JSON.stringify(testConfig, null, 2));
|
||||||
|
return configPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup test files and configuration
|
||||||
|
*/
|
||||||
|
function setupTestFiles(testName) {
|
||||||
|
const testPRDPath = getSamplePRDPath();
|
||||||
|
const testTasksPath = path.join(__dirname, `test-${testName}-tasks.json`);
|
||||||
|
const configPath = createTestConfig();
|
||||||
|
|
||||||
|
// Clean up existing files
|
||||||
|
if (fs.existsSync(testTasksPath)) {
|
||||||
|
fs.unlinkSync(testTasksPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { testPRDPath, testTasksPath, configPath };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up test files
|
||||||
|
*/
|
||||||
|
function cleanupTestFiles(testTasksPath, configPath) {
|
||||||
|
if (fs.existsSync(testTasksPath)) fs.unlinkSync(testTasksPath);
|
||||||
|
if (fs.existsSync(configPath)) fs.unlinkSync(configPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run parsePRD with configurable options
|
||||||
|
*/
|
||||||
|
async function runParsePRD(testPRDPath, testTasksPath, numTasks, options = {}) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
const result = await parsePRD(testPRDPath, testTasksPath, numTasks, {
|
||||||
|
force: true,
|
||||||
|
append: false,
|
||||||
|
research: false,
|
||||||
|
projectRoot: PROJECT_ROOT,
|
||||||
|
...options
|
||||||
|
});
|
||||||
|
|
||||||
|
const endTime = Date.now();
|
||||||
|
const duration = endTime - startTime;
|
||||||
|
|
||||||
|
return { result, duration };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify task file existence and structure
|
||||||
|
*/
|
||||||
|
function verifyTaskResults(testTasksPath) {
|
||||||
|
if (fs.existsSync(testTasksPath)) {
|
||||||
|
const tasksData = JSON.parse(fs.readFileSync(testTasksPath, 'utf8'));
|
||||||
|
console.log(
|
||||||
|
chalk.green(
|
||||||
|
`\n✅ Tasks file created with ${tasksData.tasks.length} tasks`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify task structure
|
||||||
|
const firstTask = tasksData.tasks[0];
|
||||||
|
if (firstTask && firstTask.id && firstTask.title && firstTask.description) {
|
||||||
|
console.log(chalk.green('✅ Task structure is valid'));
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
console.log(chalk.red('❌ Task structure is invalid'));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log(chalk.red('❌ Tasks file was not created'));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Print MCP-specific logs and validation
|
||||||
|
*/
|
||||||
|
function printMCPResults(mcpLogger, progressReporter) {
|
||||||
|
// Print progress summary
|
||||||
|
progressReporter.printSummary();
|
||||||
|
|
||||||
|
// Print MCP logs
|
||||||
|
console.log(chalk.cyan('\n=== MCP Logs ==='));
|
||||||
|
const logs = mcpLogger.getLogs();
|
||||||
|
logs.forEach((log, index) => {
|
||||||
|
const color =
|
||||||
|
{
|
||||||
|
info: chalk.blue,
|
||||||
|
warn: chalk.yellow,
|
||||||
|
error: chalk.red,
|
||||||
|
debug: chalk.gray,
|
||||||
|
success: chalk.green
|
||||||
|
}[log.level] || chalk.white;
|
||||||
|
console.log(
|
||||||
|
`${index + 1}. ${color(`[${log.level.toUpperCase()}]`)} ${log.message}`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Verify MCP-specific message formats (should use emoji indicators)
|
||||||
|
const hasEmojiIndicators = progressReporter
|
||||||
|
.getProgressHistory()
|
||||||
|
.some((entry) => /[🔴🟠🟢]/u.test(entry.message));
|
||||||
|
|
||||||
|
console.log(chalk.cyan('\n=== MCP-Specific Validation ==='));
|
||||||
|
console.log(
|
||||||
|
`✅ Emoji priority indicators: ${hasEmojiIndicators ? 'PASS' : 'FAIL'}`
|
||||||
|
);
|
||||||
|
|
||||||
|
return { hasEmojiIndicators, logs };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test MCP streaming with proper MCP context
|
||||||
|
*/
|
||||||
|
async function testMCPStreaming(numTasks = 10) {
|
||||||
|
console.log(chalk.cyan('🧪 Testing MCP Streaming Functionality\n'));
|
||||||
|
|
||||||
|
const { testPRDPath, testTasksPath, configPath } = setupTestFiles('mcp');
|
||||||
|
const progressReporter = new MockProgressReporter(true);
|
||||||
|
const mcpLogger = new MockMCPLogger(true); // Enable debug for MCP context
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log(chalk.yellow('Starting MCP streaming test...'));
|
||||||
|
|
||||||
|
const { result, duration } = await runParsePRD(
|
||||||
|
testPRDPath,
|
||||||
|
testTasksPath,
|
||||||
|
numTasks,
|
||||||
|
{
|
||||||
|
reportProgress: progressReporter.reportProgress.bind(progressReporter),
|
||||||
|
mcpLog: mcpLogger // Add MCP context - this is the key difference
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
chalk.green(`\n✅ MCP streaming test completed in ${duration}ms`)
|
||||||
|
);
|
||||||
|
|
||||||
|
const { hasEmojiIndicators, logs } = printMCPResults(
|
||||||
|
mcpLogger,
|
||||||
|
progressReporter
|
||||||
|
);
|
||||||
|
const isValidStructure = verifyTaskResults(testTasksPath);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
duration,
|
||||||
|
progressHistory: progressReporter.getProgressHistory(),
|
||||||
|
mcpLogs: logs,
|
||||||
|
hasEmojiIndicators,
|
||||||
|
result
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red(`❌ MCP streaming test failed: ${error.message}`));
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
};
|
||||||
|
} finally {
|
||||||
|
cleanupTestFiles(testTasksPath, configPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test CLI streaming (no reportProgress)
|
||||||
|
*/
|
||||||
|
async function testCLIStreaming(numTasks = 10) {
|
||||||
|
console.log(chalk.cyan('🧪 Testing CLI Streaming (No Progress Reporter)\n'));
|
||||||
|
|
||||||
|
const { testPRDPath, testTasksPath, configPath } = setupTestFiles('cli');
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log(chalk.yellow('Starting CLI streaming test...'));
|
||||||
|
|
||||||
|
// No reportProgress provided; CLI text mode uses the default streaming reporter
|
||||||
|
const { result, duration } = await runParsePRD(
|
||||||
|
testPRDPath,
|
||||||
|
testTasksPath,
|
||||||
|
numTasks
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
chalk.green(`\n✅ CLI streaming test completed in ${duration}ms`)
|
||||||
|
);
|
||||||
|
|
||||||
|
const isValidStructure = verifyTaskResults(testTasksPath);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
duration,
|
||||||
|
result
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red(`❌ CLI streaming test failed: ${error.message}`));
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
};
|
||||||
|
} finally {
|
||||||
|
cleanupTestFiles(testTasksPath, configPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test non-streaming functionality
|
||||||
|
*/
|
||||||
|
async function testNonStreaming(numTasks = 10) {
|
||||||
|
console.log(chalk.cyan('🧪 Testing Non-Streaming Functionality\n'));
|
||||||
|
|
||||||
|
const { testPRDPath, testTasksPath, configPath } =
|
||||||
|
setupTestFiles('non-streaming');
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log(chalk.yellow('Starting non-streaming test...'));
|
||||||
|
|
||||||
|
// Force non-streaming by not providing reportProgress
|
||||||
|
const { result, duration } = await runParsePRD(
|
||||||
|
testPRDPath,
|
||||||
|
testTasksPath,
|
||||||
|
numTasks
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
chalk.green(`\n✅ Non-streaming test completed in ${duration}ms`)
|
||||||
|
);
|
||||||
|
|
||||||
|
const isValidStructure = verifyTaskResults(testTasksPath);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
duration,
|
||||||
|
result
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red(`❌ Non-streaming test failed: ${error.message}`));
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: error.message
|
||||||
|
};
|
||||||
|
} finally {
|
||||||
|
cleanupTestFiles(testTasksPath, configPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare results between streaming and non-streaming
|
||||||
|
*/
|
||||||
|
function compareResults(streamingResult, nonStreamingResult) {
|
||||||
|
console.log(chalk.cyan('\n=== Results Comparison ==='));
|
||||||
|
|
||||||
|
if (!streamingResult.success || !nonStreamingResult.success) {
|
||||||
|
console.log(chalk.red('❌ Cannot compare - one or both tests failed'));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`Streaming duration: ${streamingResult.duration}ms`);
|
||||||
|
console.log(`Non-streaming duration: ${nonStreamingResult.duration}ms`);
|
||||||
|
|
||||||
|
const durationDiff = Math.abs(
|
||||||
|
streamingResult.duration - nonStreamingResult.duration
|
||||||
|
);
|
||||||
|
const durationDiffPercent = Math.round(
|
||||||
|
(durationDiff /
|
||||||
|
Math.max(streamingResult.duration, nonStreamingResult.duration)) *
|
||||||
|
100
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`Duration difference: ${durationDiff}ms (${durationDiffPercent}%)`
|
||||||
|
);
|
||||||
|
|
||||||
|
if (streamingResult.progressHistory) {
|
||||||
|
console.log(
|
||||||
|
`Streaming progress reports: ${streamingResult.progressHistory.length}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(chalk.green('✅ Both methods completed successfully'));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main test runner
|
||||||
|
*/
|
||||||
|
async function main() {
|
||||||
|
const args = process.argv.slice(2);
|
||||||
|
const testType = args[0] || 'streaming';
|
||||||
|
const numTasks = parseInt(args[1]) || 8;
|
||||||
|
|
||||||
|
console.log(chalk.bold.cyan('🚀 Task Master PRD Streaming Tests\n'));
|
||||||
|
console.log(chalk.blue(`Test type: ${testType}`));
|
||||||
|
console.log(chalk.blue(`Number of tasks: ${numTasks}\n`));
|
||||||
|
|
||||||
|
try {
|
||||||
|
switch (testType.toLowerCase()) {
|
||||||
|
case 'mcp':
|
||||||
|
case 'mcp-streaming':
|
||||||
|
await testMCPStreaming(numTasks);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'cli':
|
||||||
|
case 'cli-streaming':
|
||||||
|
await testCLIStreaming(numTasks);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'non-streaming':
|
||||||
|
case 'non':
|
||||||
|
await testNonStreaming(numTasks);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'both': {
|
||||||
|
console.log(
|
||||||
|
chalk.yellow(
|
||||||
|
'Running both MCP streaming and non-streaming tests...\n'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
const mcpStreamingResult = await testMCPStreaming(numTasks);
|
||||||
|
console.log('\n' + '='.repeat(60) + '\n');
|
||||||
|
const nonStreamingResult = await testNonStreaming(numTasks);
|
||||||
|
compareResults(mcpStreamingResult, nonStreamingResult);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'all': {
|
||||||
|
console.log(chalk.yellow('Running all test types...\n'));
|
||||||
|
const mcpResult = await testMCPStreaming(numTasks);
|
||||||
|
console.log('\n' + '='.repeat(60) + '\n');
|
||||||
|
const cliResult = await testCLIStreaming(numTasks);
|
||||||
|
console.log('\n' + '='.repeat(60) + '\n');
|
||||||
|
const nonStreamResult = await testNonStreaming(numTasks);
|
||||||
|
|
||||||
|
console.log(chalk.cyan('\n=== All Tests Summary ==='));
|
||||||
|
console.log(
|
||||||
|
`MCP Streaming: ${mcpResult.success ? '✅ PASS' : '❌ FAIL'} ${mcpResult.hasEmojiIndicators ? '(✅ Emojis)' : '(❌ No Emojis)'}`
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`CLI Streaming: ${cliResult.success ? '✅ PASS' : '❌ FAIL'}`
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`Non-streaming: ${nonStreamResult.success ? '✅ PASS' : '❌ FAIL'}`
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
console.log(chalk.red(`Unknown test type: ${testType}`));
|
||||||
|
console.log(
|
||||||
|
chalk.yellow(
|
||||||
|
'Available options: mcp-streaming, cli-streaming, non-streaming, both, all'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(chalk.green('\n🎉 Tests completed successfully!'));
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red(`\n❌ Test failed: ${error.message}`));
|
||||||
|
console.error(chalk.red(error.stack));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run if called directly
|
||||||
|
if (import.meta.url === `file://${process.argv[1]}`) {
|
||||||
|
main();
|
||||||
|
}
|
||||||
@@ -391,7 +391,7 @@ describe('Unified AI Services', () => {
|
|||||||
expect.stringContaining('Service call failed for role main')
|
expect.stringContaining('Service call failed for role main')
|
||||||
);
|
);
|
||||||
expect(mockLog).toHaveBeenCalledWith(
|
expect(mockLog).toHaveBeenCalledWith(
|
||||||
'info',
|
'debug',
|
||||||
expect.stringContaining('New AI service call with role: fallback')
|
expect.stringContaining('New AI service call with role: fallback')
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
@@ -435,7 +435,7 @@ describe('Unified AI Services', () => {
|
|||||||
expect.stringContaining('Service call failed for role fallback')
|
expect.stringContaining('Service call failed for role fallback')
|
||||||
);
|
);
|
||||||
expect(mockLog).toHaveBeenCalledWith(
|
expect(mockLog).toHaveBeenCalledWith(
|
||||||
'info',
|
'debug',
|
||||||
expect.stringContaining('New AI service call with role: research')
|
expect.stringContaining('New AI service call with role: research')
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,68 +1,470 @@
|
|||||||
// In tests/unit/parse-prd.test.js
|
// In tests/unit/parse-prd.test.js
|
||||||
// Testing that parse-prd.js handles both .txt and .md files the same way
|
// Testing parse-prd.js file extension compatibility with real files
|
||||||
|
|
||||||
import { jest } from '@jest/globals';
|
import { jest } from '@jest/globals';
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import os from 'os';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
|
||||||
|
// Mock the AI services to avoid real API calls
|
||||||
|
jest.unstable_mockModule(
|
||||||
|
'../../scripts/modules/ai-services-unified.js',
|
||||||
|
() => ({
|
||||||
|
streamTextService: jest.fn(),
|
||||||
|
generateObjectService: jest.fn(),
|
||||||
|
streamObjectService: jest.fn().mockImplementation(async () => {
|
||||||
|
return {
|
||||||
|
get partialObjectStream() {
|
||||||
|
return (async function* () {
|
||||||
|
yield { tasks: [] };
|
||||||
|
yield { tasks: [{ id: 1, title: 'Test Task', priority: 'high' }] };
|
||||||
|
})();
|
||||||
|
},
|
||||||
|
object: Promise.resolve({
|
||||||
|
tasks: [{ id: 1, title: 'Test Task', priority: 'high' }]
|
||||||
|
})
|
||||||
|
};
|
||||||
|
})
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Mock all config-manager exports comprehensively
|
||||||
|
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||||
|
getDebugFlag: jest.fn(() => false),
|
||||||
|
getDefaultPriority: jest.fn(() => 'medium'),
|
||||||
|
getMainModelId: jest.fn(() => 'test-model'),
|
||||||
|
getResearchModelId: jest.fn(() => 'test-research-model'),
|
||||||
|
getParametersForRole: jest.fn(() => ({ maxTokens: 1000, temperature: 0.7 })),
|
||||||
|
getMainProvider: jest.fn(() => 'anthropic'),
|
||||||
|
getResearchProvider: jest.fn(() => 'perplexity'),
|
||||||
|
getFallbackProvider: jest.fn(() => 'anthropic'),
|
||||||
|
getResponseLanguage: jest.fn(() => 'English'),
|
||||||
|
getDefaultNumTasks: jest.fn(() => 10),
|
||||||
|
getDefaultSubtasks: jest.fn(() => 5),
|
||||||
|
getLogLevel: jest.fn(() => 'info'),
|
||||||
|
getConfig: jest.fn(() => ({})),
|
||||||
|
getAllProviders: jest.fn(() => ['anthropic', 'perplexity']),
|
||||||
|
MODEL_MAP: {},
|
||||||
|
VALID_PROVIDERS: ['anthropic', 'perplexity'],
|
||||||
|
validateProvider: jest.fn(() => true),
|
||||||
|
validateProviderModelCombination: jest.fn(() => true),
|
||||||
|
isApiKeySet: jest.fn(() => true)
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Mock utils comprehensively to prevent CLI behavior
|
||||||
|
jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
|
||||||
|
log: jest.fn(),
|
||||||
|
writeJSON: jest.fn(),
|
||||||
|
enableSilentMode: jest.fn(),
|
||||||
|
disableSilentMode: jest.fn(),
|
||||||
|
isSilentMode: jest.fn(() => false),
|
||||||
|
getCurrentTag: jest.fn(() => 'master'),
|
||||||
|
ensureTagMetadata: jest.fn(),
|
||||||
|
readJSON: jest.fn(() => ({ master: { tasks: [] } })),
|
||||||
|
findProjectRoot: jest.fn(() => '/tmp/test'),
|
||||||
|
resolveEnvVariable: jest.fn(() => 'mock-key'),
|
||||||
|
findTaskById: jest.fn(() => null),
|
||||||
|
findTaskByPattern: jest.fn(() => []),
|
||||||
|
validateTaskId: jest.fn(() => true),
|
||||||
|
createTask: jest.fn(() => ({ id: 1, title: 'Mock Task' })),
|
||||||
|
sortByDependencies: jest.fn((tasks) => tasks),
|
||||||
|
isEmpty: jest.fn(() => false),
|
||||||
|
truncate: jest.fn((text) => text),
|
||||||
|
slugify: jest.fn((text) => text.toLowerCase()),
|
||||||
|
getTagFromPath: jest.fn(() => 'master'),
|
||||||
|
isValidTag: jest.fn(() => true),
|
||||||
|
migrateToTaggedFormat: jest.fn(() => ({ master: { tasks: [] } })),
|
||||||
|
performCompleteTagMigration: jest.fn(),
|
||||||
|
resolveCurrentTag: jest.fn(() => 'master'),
|
||||||
|
getDefaultTag: jest.fn(() => 'master'),
|
||||||
|
performMigrationIfNeeded: jest.fn()
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Mock prompt manager
|
||||||
|
jest.unstable_mockModule('../../scripts/modules/prompt-manager.js', () => ({
|
||||||
|
getPromptManager: jest.fn(() => ({
|
||||||
|
loadPrompt: jest.fn(() => ({
|
||||||
|
systemPrompt: 'Test system prompt',
|
||||||
|
userPrompt: 'Test user prompt'
|
||||||
|
}))
|
||||||
|
}))
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Mock progress/UI components to prevent real CLI UI
|
||||||
|
jest.unstable_mockModule('../../src/progress/parse-prd-tracker.js', () => ({
|
||||||
|
createParsePrdTracker: jest.fn(() => ({
|
||||||
|
start: jest.fn(),
|
||||||
|
stop: jest.fn(),
|
||||||
|
cleanup: jest.fn(),
|
||||||
|
addTaskLine: jest.fn(),
|
||||||
|
updateTokens: jest.fn(),
|
||||||
|
complete: jest.fn(),
|
||||||
|
getSummary: jest.fn().mockReturnValue({
|
||||||
|
taskPriorities: { high: 0, medium: 0, low: 0 },
|
||||||
|
elapsedTime: 0,
|
||||||
|
actionVerb: 'generated'
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.unstable_mockModule('../../src/ui/parse-prd.js', () => ({
|
||||||
|
displayParsePrdStart: jest.fn(),
|
||||||
|
displayParsePrdSummary: jest.fn()
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.unstable_mockModule('../../scripts/modules/ui.js', () => ({
|
||||||
|
displayAiUsageSummary: jest.fn()
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Mock task generation to prevent file operations
|
||||||
|
jest.unstable_mockModule(
|
||||||
|
'../../scripts/modules/task-manager/generate-task-files.js',
|
||||||
|
() => ({
|
||||||
|
default: jest.fn()
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Mock stream parser
|
||||||
|
jest.unstable_mockModule('../../src/utils/stream-parser.js', () => {
|
||||||
|
// Define mock StreamingError class
|
||||||
|
class StreamingError extends Error {
|
||||||
|
constructor(message, code) {
|
||||||
|
super(message);
|
||||||
|
this.name = 'StreamingError';
|
||||||
|
this.code = code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define mock error codes
|
||||||
|
const STREAMING_ERROR_CODES = {
|
||||||
|
NOT_ASYNC_ITERABLE: 'STREAMING_NOT_SUPPORTED',
|
||||||
|
STREAM_PROCESSING_FAILED: 'STREAM_PROCESSING_FAILED',
|
||||||
|
STREAM_NOT_ITERABLE: 'STREAM_NOT_ITERABLE'
|
||||||
|
};
|
||||||
|
|
||||||
|
return {
|
||||||
|
parseStream: jest.fn(),
|
||||||
|
StreamingError,
|
||||||
|
STREAMING_ERROR_CODES
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock other potential UI elements
|
||||||
|
jest.unstable_mockModule('ora', () => ({
|
||||||
|
default: jest.fn(() => ({
|
||||||
|
start: jest.fn(),
|
||||||
|
stop: jest.fn(),
|
||||||
|
succeed: jest.fn(),
|
||||||
|
fail: jest.fn()
|
||||||
|
}))
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.unstable_mockModule('chalk', () => ({
|
||||||
|
default: {
|
||||||
|
red: jest.fn((text) => text),
|
||||||
|
green: jest.fn((text) => text),
|
||||||
|
blue: jest.fn((text) => text),
|
||||||
|
yellow: jest.fn((text) => text),
|
||||||
|
cyan: jest.fn((text) => text),
|
||||||
|
white: {
|
||||||
|
bold: jest.fn((text) => text)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
red: jest.fn((text) => text),
|
||||||
|
green: jest.fn((text) => text),
|
||||||
|
blue: jest.fn((text) => text),
|
||||||
|
yellow: jest.fn((text) => text),
|
||||||
|
cyan: jest.fn((text) => text),
|
||||||
|
white: {
|
||||||
|
bold: jest.fn((text) => text)
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Mock boxen
|
||||||
|
jest.unstable_mockModule('boxen', () => ({
|
||||||
|
default: jest.fn((content) => content)
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Mock constants
|
||||||
|
jest.unstable_mockModule('../../src/constants/task-priority.js', () => ({
|
||||||
|
DEFAULT_TASK_PRIORITY: 'medium',
|
||||||
|
TASK_PRIORITY_OPTIONS: ['low', 'medium', 'high']
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Mock UI indicators
|
||||||
|
jest.unstable_mockModule('../../src/ui/indicators.js', () => ({
|
||||||
|
getPriorityIndicators: jest.fn(() => ({
|
||||||
|
high: '🔴',
|
||||||
|
medium: '🟡',
|
||||||
|
low: '🟢'
|
||||||
|
}))
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Import modules after mocking
|
||||||
|
const { generateObjectService } = await import(
|
||||||
|
'../../scripts/modules/ai-services-unified.js'
|
||||||
|
);
|
||||||
|
const parsePRD = (
|
||||||
|
await import('../../scripts/modules/task-manager/parse-prd/parse-prd.js')
|
||||||
|
).default;
|
||||||
|
|
||||||
describe('parse-prd file extension compatibility', () => {
|
describe('parse-prd file extension compatibility', () => {
|
||||||
// Test directly that the parse-prd functionality works with different extensions
|
let tempDir;
|
||||||
// by examining the parameter handling in mcp-server/src/tools/parse-prd.js
|
let testFiles;
|
||||||
|
|
||||||
test('Parameter description mentions support for .md files', () => {
|
const mockTasksResponse = {
|
||||||
// The parameter description for 'input' in parse-prd.js includes .md files
|
tasks: [
|
||||||
const description =
|
{
|
||||||
'Absolute path to the PRD document file (.txt, .md, etc.)';
|
id: 1,
|
||||||
|
title: 'Test Task 1',
|
||||||
// Verify the description explicitly mentions .md files
|
description: 'First test task',
|
||||||
expect(description).toContain('.md');
|
status: 'pending',
|
||||||
});
|
dependencies: [],
|
||||||
|
priority: 'high',
|
||||||
test('File extension validation is not restricted to .txt files', () => {
|
details: 'Implementation details for task 1',
|
||||||
// Check for absence of extension validation
|
testStrategy: 'Unit tests for task 1'
|
||||||
const fileValidator = (filePath) => {
|
},
|
||||||
// Return a boolean value to ensure the test passes
|
{
|
||||||
if (!filePath || filePath.length === 0) {
|
id: 2,
|
||||||
return false;
|
title: 'Test Task 2',
|
||||||
|
description: 'Second test task',
|
||||||
|
status: 'pending',
|
||||||
|
dependencies: [1],
|
||||||
|
priority: 'medium',
|
||||||
|
details: 'Implementation details for task 2',
|
||||||
|
testStrategy: 'Integration tests for task 2'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
metadata: {
|
||||||
|
projectName: 'Test Project',
|
||||||
|
totalTasks: 2,
|
||||||
|
sourceFile: 'test-prd',
|
||||||
|
generatedAt: new Date().toISOString()
|
||||||
}
|
}
|
||||||
return true;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Test with different extensions
|
const samplePRDContent = `# Test Project PRD
|
||||||
expect(fileValidator('/path/to/prd.txt')).toBe(true);
|
|
||||||
expect(fileValidator('/path/to/prd.md')).toBe(true);
|
|
||||||
|
|
||||||
// Invalid cases should still fail regardless of extension
|
## Overview
|
||||||
expect(fileValidator('')).toBe(false);
|
Build a simple task management application.
|
||||||
});
|
|
||||||
|
|
||||||
test('Implementation handles all file types the same way', () => {
|
## Features
|
||||||
// This test confirms that the implementation treats all file types equally
|
1. Create and manage tasks
|
||||||
// by simulating the core functionality
|
2. Set task priorities
|
||||||
|
3. Track task dependencies
|
||||||
|
|
||||||
const mockImplementation = (filePath) => {
|
## Technical Requirements
|
||||||
// The parse-prd.js implementation only checks file existence,
|
- React frontend
|
||||||
// not the file extension, which is what we want to verify
|
- Node.js backend
|
||||||
|
- PostgreSQL database
|
||||||
|
|
||||||
if (!filePath) {
|
## Success Criteria
|
||||||
return { success: false, error: { code: 'MISSING_INPUT_FILE' } };
|
- Users can create tasks successfully
|
||||||
}
|
- Task dependencies work correctly`;
|
||||||
|
|
||||||
// In the real implementation, this would check if the file exists
|
beforeAll(() => {
|
||||||
// But for our test, we're verifying that the same logic applies
|
// Create temporary directory for test files
|
||||||
// regardless of file extension
|
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'parse-prd-test-'));
|
||||||
|
|
||||||
// No special handling for different extensions
|
// Create test files with different extensions
|
||||||
return { success: true };
|
testFiles = {
|
||||||
|
txt: path.join(tempDir, 'test-prd.txt'),
|
||||||
|
md: path.join(tempDir, 'test-prd.md'),
|
||||||
|
rst: path.join(tempDir, 'test-prd.rst'),
|
||||||
|
noExt: path.join(tempDir, 'test-prd')
|
||||||
};
|
};
|
||||||
|
|
||||||
// Verify same behavior for different extensions
|
// Write the same content to all test files
|
||||||
const txtResult = mockImplementation('/path/to/prd.txt');
|
Object.values(testFiles).forEach((filePath) => {
|
||||||
const mdResult = mockImplementation('/path/to/prd.md');
|
fs.writeFileSync(filePath, samplePRDContent);
|
||||||
|
});
|
||||||
|
|
||||||
// Both should succeed since there's no extension-specific logic
|
// Mock process.exit to prevent actual exit
|
||||||
expect(txtResult.success).toBe(true);
|
jest.spyOn(process, 'exit').mockImplementation(() => undefined);
|
||||||
expect(mdResult.success).toBe(true);
|
|
||||||
|
|
||||||
// Both should have the same structure
|
// Mock console methods to prevent output
|
||||||
expect(Object.keys(txtResult)).toEqual(Object.keys(mdResult));
|
jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||||
|
jest.spyOn(console, 'error').mockImplementation(() => {});
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
// Clean up temporary directory
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
|
||||||
|
// Restore mocks
|
||||||
|
jest.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
|
||||||
|
// Mock successful AI response
|
||||||
|
generateObjectService.mockResolvedValue({
|
||||||
|
mainResult: { object: mockTasksResponse },
|
||||||
|
telemetryData: {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
userId: 'test-user',
|
||||||
|
commandName: 'parse-prd',
|
||||||
|
modelUsed: 'test-model',
|
||||||
|
providerName: 'test-provider',
|
||||||
|
inputTokens: 100,
|
||||||
|
outputTokens: 200,
|
||||||
|
totalTokens: 300,
|
||||||
|
totalCost: 0.01,
|
||||||
|
currency: 'USD'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should accept and parse .txt files', async () => {
|
||||||
|
const outputPath = path.join(tempDir, 'tasks-txt.json');
|
||||||
|
|
||||||
|
const result = await parsePRD(testFiles.txt, outputPath, 2, {
|
||||||
|
force: true,
|
||||||
|
mcpLog: {
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
debug: jest.fn(),
|
||||||
|
success: jest.fn()
|
||||||
|
},
|
||||||
|
projectRoot: tempDir
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.success).toBe(true);
|
||||||
|
expect(result.tasksPath).toBe(outputPath);
|
||||||
|
expect(fs.existsSync(outputPath)).toBe(true);
|
||||||
|
|
||||||
|
// Verify the content was parsed correctly
|
||||||
|
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||||
|
expect(tasksData.master.tasks).toHaveLength(2);
|
||||||
|
expect(tasksData.master.tasks[0].title).toBe('Test Task 1');
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should accept and parse .md files', async () => {
|
||||||
|
const outputPath = path.join(tempDir, 'tasks-md.json');
|
||||||
|
|
||||||
|
const result = await parsePRD(testFiles.md, outputPath, 2, {
|
||||||
|
force: true,
|
||||||
|
mcpLog: {
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
debug: jest.fn(),
|
||||||
|
success: jest.fn()
|
||||||
|
},
|
||||||
|
projectRoot: tempDir
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.success).toBe(true);
|
||||||
|
expect(result.tasksPath).toBe(outputPath);
|
||||||
|
expect(fs.existsSync(outputPath)).toBe(true);
|
||||||
|
|
||||||
|
// Verify the content was parsed correctly
|
||||||
|
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||||
|
expect(tasksData.master.tasks).toHaveLength(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should accept and parse files with other text extensions', async () => {
|
||||||
|
const outputPath = path.join(tempDir, 'tasks-rst.json');
|
||||||
|
|
||||||
|
const result = await parsePRD(testFiles.rst, outputPath, 2, {
|
||||||
|
force: true,
|
||||||
|
mcpLog: {
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
debug: jest.fn(),
|
||||||
|
success: jest.fn()
|
||||||
|
},
|
||||||
|
projectRoot: tempDir
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.success).toBe(true);
|
||||||
|
expect(result.tasksPath).toBe(outputPath);
|
||||||
|
expect(fs.existsSync(outputPath)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should accept and parse files with no extension', async () => {
|
||||||
|
const outputPath = path.join(tempDir, 'tasks-noext.json');
|
||||||
|
|
||||||
|
const result = await parsePRD(testFiles.noExt, outputPath, 2, {
|
||||||
|
force: true,
|
||||||
|
mcpLog: {
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
debug: jest.fn(),
|
||||||
|
success: jest.fn()
|
||||||
|
},
|
||||||
|
projectRoot: tempDir
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.success).toBe(true);
|
||||||
|
expect(result.tasksPath).toBe(outputPath);
|
||||||
|
expect(fs.existsSync(outputPath)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should produce identical results regardless of file extension', async () => {
|
||||||
|
const outputs = {};
|
||||||
|
|
||||||
|
// Parse each file type with a unique project root to avoid ID conflicts
|
||||||
|
for (const [ext, filePath] of Object.entries(testFiles)) {
|
||||||
|
// Create a unique subdirectory for each test to isolate them
|
||||||
|
const testSubDir = path.join(tempDir, `test-${ext}`);
|
||||||
|
fs.mkdirSync(testSubDir, { recursive: true });
|
||||||
|
|
||||||
|
const outputPath = path.join(testSubDir, `tasks.json`);
|
||||||
|
|
||||||
|
await parsePRD(filePath, outputPath, 2, {
|
||||||
|
force: true,
|
||||||
|
mcpLog: {
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
debug: jest.fn(),
|
||||||
|
success: jest.fn()
|
||||||
|
},
|
||||||
|
projectRoot: testSubDir
|
||||||
|
});
|
||||||
|
|
||||||
|
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||||
|
outputs[ext] = tasksData;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare all outputs - they should be identical (except metadata timestamps)
|
||||||
|
const baseOutput = outputs.txt;
|
||||||
|
Object.values(outputs).forEach((output) => {
|
||||||
|
expect(output.master.tasks).toEqual(baseOutput.master.tasks);
|
||||||
|
expect(output.master.metadata.projectName).toEqual(
|
||||||
|
baseOutput.master.metadata.projectName
|
||||||
|
);
|
||||||
|
expect(output.master.metadata.totalTasks).toEqual(
|
||||||
|
baseOutput.master.metadata.totalTasks
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should handle non-existent files gracefully', async () => {
|
||||||
|
const nonExistentFile = path.join(tempDir, 'does-not-exist.txt');
|
||||||
|
const outputPath = path.join(tempDir, 'tasks-error.json');
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
parsePRD(nonExistentFile, outputPath, 2, {
|
||||||
|
force: true,
|
||||||
|
mcpLog: {
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
debug: jest.fn(),
|
||||||
|
success: jest.fn()
|
||||||
|
},
|
||||||
|
projectRoot: tempDir
|
||||||
|
})
|
||||||
|
).rejects.toThrow();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
134
tests/unit/progress/base-progress-tracker.test.js
Normal file
134
tests/unit/progress/base-progress-tracker.test.js
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
import { jest } from '@jest/globals';
|
||||||
|
|
||||||
|
// Mock cli-progress factory before importing BaseProgressTracker
|
||||||
|
jest.unstable_mockModule(
|
||||||
|
'../../../src/progress/cli-progress-factory.js',
|
||||||
|
() => ({
|
||||||
|
newMultiBar: jest.fn(() => ({
|
||||||
|
create: jest.fn(() => ({
|
||||||
|
update: jest.fn()
|
||||||
|
})),
|
||||||
|
stop: jest.fn()
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
const { newMultiBar } = await import(
|
||||||
|
'../../../src/progress/cli-progress-factory.js'
|
||||||
|
);
|
||||||
|
const { BaseProgressTracker } = await import(
|
||||||
|
'../../../src/progress/base-progress-tracker.js'
|
||||||
|
);
|
||||||
|
|
||||||
|
describe('BaseProgressTracker', () => {
|
||||||
|
let tracker;
|
||||||
|
let mockMultiBar;
|
||||||
|
let mockProgressBar;
|
||||||
|
let mockTimeTokensBar;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
jest.useFakeTimers();
|
||||||
|
|
||||||
|
// Setup mocks
|
||||||
|
mockProgressBar = { update: jest.fn() };
|
||||||
|
mockTimeTokensBar = { update: jest.fn() };
|
||||||
|
mockMultiBar = {
|
||||||
|
create: jest
|
||||||
|
.fn()
|
||||||
|
.mockReturnValueOnce(mockTimeTokensBar)
|
||||||
|
.mockReturnValueOnce(mockProgressBar),
|
||||||
|
stop: jest.fn()
|
||||||
|
};
|
||||||
|
newMultiBar.mockReturnValue(mockMultiBar);
|
||||||
|
|
||||||
|
tracker = new BaseProgressTracker({ numUnits: 10, unitName: 'task' });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
jest.useRealTimers();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('cleanup', () => {
|
||||||
|
it('should stop and clear timer interval', () => {
|
||||||
|
tracker.start();
|
||||||
|
expect(tracker._timerInterval).toBeTruthy();
|
||||||
|
|
||||||
|
tracker.cleanup();
|
||||||
|
expect(tracker._timerInterval).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should stop and null multibar reference', () => {
|
||||||
|
tracker.start();
|
||||||
|
expect(tracker.multibar).toBeTruthy();
|
||||||
|
|
||||||
|
tracker.cleanup();
|
||||||
|
expect(mockMultiBar.stop).toHaveBeenCalled();
|
||||||
|
expect(tracker.multibar).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should null progress bar references', () => {
|
||||||
|
tracker.start();
|
||||||
|
expect(tracker.timeTokensBar).toBeTruthy();
|
||||||
|
expect(tracker.progressBar).toBeTruthy();
|
||||||
|
|
||||||
|
tracker.cleanup();
|
||||||
|
expect(tracker.timeTokensBar).toBeNull();
|
||||||
|
expect(tracker.progressBar).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set finished state', () => {
|
||||||
|
tracker.start();
|
||||||
|
expect(tracker.isStarted).toBe(true);
|
||||||
|
expect(tracker.isFinished).toBe(false);
|
||||||
|
|
||||||
|
tracker.cleanup();
|
||||||
|
expect(tracker.isStarted).toBe(false);
|
||||||
|
expect(tracker.isFinished).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle cleanup when multibar.stop throws error', () => {
|
||||||
|
tracker.start();
|
||||||
|
mockMultiBar.stop.mockImplementation(() => {
|
||||||
|
throw new Error('Stop failed');
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(() => tracker.cleanup()).not.toThrow();
|
||||||
|
expect(tracker.multibar).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be safe to call multiple times', () => {
|
||||||
|
tracker.start();
|
||||||
|
|
||||||
|
tracker.cleanup();
|
||||||
|
tracker.cleanup();
|
||||||
|
tracker.cleanup();
|
||||||
|
|
||||||
|
expect(mockMultiBar.stop).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be safe to call without starting', () => {
|
||||||
|
expect(() => tracker.cleanup()).not.toThrow();
|
||||||
|
expect(tracker.multibar).toBeNull();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('stop vs cleanup', () => {
|
||||||
|
it('stop should call cleanup and null multibar reference', () => {
|
||||||
|
tracker.start();
|
||||||
|
tracker.stop();
|
||||||
|
|
||||||
|
// stop() now calls cleanup() which nulls the multibar
|
||||||
|
expect(tracker.multibar).toBeNull();
|
||||||
|
expect(tracker.isFinished).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('cleanup should null multibar preventing getSummary', () => {
|
||||||
|
tracker.start();
|
||||||
|
tracker.cleanup();
|
||||||
|
|
||||||
|
expect(tracker.multibar).toBeNull();
|
||||||
|
expect(tracker.isFinished).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -79,6 +79,38 @@ jest.unstable_mockModule(
|
|||||||
totalCost: 0.012414,
|
totalCost: 0.012414,
|
||||||
currency: 'USD'
|
currency: 'USD'
|
||||||
}
|
}
|
||||||
|
}),
|
||||||
|
streamTextService: jest.fn().mockResolvedValue({
|
||||||
|
mainResult: async function* () {
|
||||||
|
yield '{"tasks":[';
|
||||||
|
yield '{"id":1,"title":"Test Task","priority":"high"}';
|
||||||
|
yield ']}';
|
||||||
|
},
|
||||||
|
telemetryData: {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
userId: '1234567890',
|
||||||
|
commandName: 'analyze-complexity',
|
||||||
|
modelUsed: 'claude-3-5-sonnet',
|
||||||
|
providerName: 'anthropic',
|
||||||
|
inputTokens: 1000,
|
||||||
|
outputTokens: 500,
|
||||||
|
totalTokens: 1500,
|
||||||
|
totalCost: 0.012414,
|
||||||
|
currency: 'USD'
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
streamObjectService: jest.fn().mockImplementation(async () => {
|
||||||
|
return {
|
||||||
|
get partialObjectStream() {
|
||||||
|
return (async function* () {
|
||||||
|
yield { tasks: [] };
|
||||||
|
yield { tasks: [{ id: 1, title: 'Test Task', priority: 'high' }] };
|
||||||
|
})();
|
||||||
|
},
|
||||||
|
object: Promise.resolve({
|
||||||
|
tasks: [{ id: 1, title: 'Test Task', priority: 'high' }]
|
||||||
|
})
|
||||||
|
};
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
@@ -189,9 +221,8 @@ const { readJSON, writeJSON, log, CONFIG, findTaskById } = await import(
|
|||||||
'../../../../../scripts/modules/utils.js'
|
'../../../../../scripts/modules/utils.js'
|
||||||
);
|
);
|
||||||
|
|
||||||
const { generateObjectService, generateTextService } = await import(
|
const { generateObjectService, generateTextService, streamTextService } =
|
||||||
'../../../../../scripts/modules/ai-services-unified.js'
|
await import('../../../../../scripts/modules/ai-services-unified.js');
|
||||||
);
|
|
||||||
|
|
||||||
const fs = await import('fs');
|
const fs = await import('fs');
|
||||||
|
|
||||||
|
|||||||
@@ -178,6 +178,24 @@ jest.unstable_mockModule(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
|
streamTextService: jest.fn().mockResolvedValue({
|
||||||
|
mainResult: async function* () {
|
||||||
|
yield '{"tasks":[';
|
||||||
|
yield '{"id":1,"title":"Test Task","priority":"high"}';
|
||||||
|
yield ']}';
|
||||||
|
},
|
||||||
|
telemetryData: {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
commandName: 'analyze-complexity',
|
||||||
|
modelUsed: 'claude-3-5-sonnet',
|
||||||
|
providerName: 'anthropic',
|
||||||
|
inputTokens: 1000,
|
||||||
|
outputTokens: 500,
|
||||||
|
totalTokens: 1500,
|
||||||
|
totalCost: 0.012414,
|
||||||
|
currency: 'USD'
|
||||||
|
}
|
||||||
|
}),
|
||||||
generateObjectService: jest.fn().mockResolvedValue({
|
generateObjectService: jest.fn().mockResolvedValue({
|
||||||
mainResult: {
|
mainResult: {
|
||||||
object: {
|
object: {
|
||||||
@@ -402,7 +420,7 @@ const { readJSON, writeJSON, getTagAwareFilePath } = await import(
|
|||||||
'../../../../../scripts/modules/utils.js'
|
'../../../../../scripts/modules/utils.js'
|
||||||
);
|
);
|
||||||
|
|
||||||
const { generateTextService } = await import(
|
const { generateTextService, streamTextService } = await import(
|
||||||
'../../../../../scripts/modules/ai-services-unified.js'
|
'../../../../../scripts/modules/ai-services-unified.js'
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
169
tests/unit/ui/indicators.test.js
Normal file
169
tests/unit/ui/indicators.test.js
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
/**
|
||||||
|
* Unit tests for indicators module (priority and complexity indicators)
|
||||||
|
*/
|
||||||
|
import { jest } from '@jest/globals';
|
||||||
|
|
||||||
|
// Mock chalk using unstable_mockModule for ESM compatibility
|
||||||
|
jest.unstable_mockModule('chalk', () => ({
|
||||||
|
default: {
|
||||||
|
red: jest.fn((str) => str),
|
||||||
|
yellow: jest.fn((str) => str),
|
||||||
|
green: jest.fn((str) => str),
|
||||||
|
white: jest.fn((str) => str),
|
||||||
|
hex: jest.fn(() => jest.fn((str) => str))
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Import after mocking
|
||||||
|
const {
|
||||||
|
getMcpPriorityIndicators,
|
||||||
|
getCliPriorityIndicators,
|
||||||
|
getPriorityIndicators,
|
||||||
|
getPriorityIndicator,
|
||||||
|
getStatusBarPriorityIndicators,
|
||||||
|
getPriorityColors,
|
||||||
|
getCliComplexityIndicators,
|
||||||
|
getStatusBarComplexityIndicators,
|
||||||
|
getComplexityColors,
|
||||||
|
getComplexityIndicator
|
||||||
|
} = await import('../../../src/ui/indicators.js');
|
||||||
|
|
||||||
|
describe('Priority Indicators', () => {
|
||||||
|
describe('getMcpPriorityIndicators', () => {
|
||||||
|
it('should return emoji indicators for MCP context', () => {
|
||||||
|
const indicators = getMcpPriorityIndicators();
|
||||||
|
expect(indicators).toEqual({
|
||||||
|
high: '🔴',
|
||||||
|
medium: '🟠',
|
||||||
|
low: '🟢'
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getCliPriorityIndicators', () => {
|
||||||
|
it('should return colored dot indicators for CLI context', () => {
|
||||||
|
const indicators = getCliPriorityIndicators();
|
||||||
|
expect(indicators).toHaveProperty('high');
|
||||||
|
expect(indicators).toHaveProperty('medium');
|
||||||
|
expect(indicators).toHaveProperty('low');
|
||||||
|
// Since chalk is mocked, we're just verifying structure
|
||||||
|
expect(indicators.high).toContain('●');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getPriorityIndicators', () => {
|
||||||
|
it('should return MCP indicators when isMcp is true', () => {
|
||||||
|
const indicators = getPriorityIndicators(true);
|
||||||
|
expect(indicators).toEqual({
|
||||||
|
high: '🔴',
|
||||||
|
medium: '🟠',
|
||||||
|
low: '🟢'
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return CLI indicators when isMcp is false', () => {
|
||||||
|
const indicators = getPriorityIndicators(false);
|
||||||
|
expect(indicators).toHaveProperty('high');
|
||||||
|
expect(indicators).toHaveProperty('medium');
|
||||||
|
expect(indicators).toHaveProperty('low');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should default to CLI indicators when no parameter provided', () => {
|
||||||
|
const indicators = getPriorityIndicators();
|
||||||
|
expect(indicators).toHaveProperty('high');
|
||||||
|
expect(indicators.high).toContain('●');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getPriorityIndicator', () => {
|
||||||
|
it('should return correct MCP indicator for valid priority', () => {
|
||||||
|
expect(getPriorityIndicator('high', true)).toBe('🔴');
|
||||||
|
expect(getPriorityIndicator('medium', true)).toBe('🟠');
|
||||||
|
expect(getPriorityIndicator('low', true)).toBe('🟢');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return correct CLI indicator for valid priority', () => {
|
||||||
|
const highIndicator = getPriorityIndicator('high', false);
|
||||||
|
const mediumIndicator = getPriorityIndicator('medium', false);
|
||||||
|
const lowIndicator = getPriorityIndicator('low', false);
|
||||||
|
|
||||||
|
expect(highIndicator).toContain('●');
|
||||||
|
expect(mediumIndicator).toContain('●');
|
||||||
|
expect(lowIndicator).toContain('●');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return medium indicator for invalid priority', () => {
|
||||||
|
expect(getPriorityIndicator('invalid', true)).toBe('🟠');
|
||||||
|
expect(getPriorityIndicator(null, true)).toBe('🟠');
|
||||||
|
expect(getPriorityIndicator(undefined, true)).toBe('🟠');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should default to CLI context when isMcp not provided', () => {
|
||||||
|
const indicator = getPriorityIndicator('high');
|
||||||
|
expect(indicator).toContain('●');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Complexity Indicators', () => {
|
||||||
|
describe('getCliComplexityIndicators', () => {
|
||||||
|
it('should return colored dot indicators for complexity levels', () => {
|
||||||
|
const indicators = getCliComplexityIndicators();
|
||||||
|
expect(indicators).toHaveProperty('high');
|
||||||
|
expect(indicators).toHaveProperty('medium');
|
||||||
|
expect(indicators).toHaveProperty('low');
|
||||||
|
expect(indicators.high).toContain('●');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getStatusBarComplexityIndicators', () => {
|
||||||
|
it('should return single character indicators for status bars', () => {
|
||||||
|
const indicators = getStatusBarComplexityIndicators();
|
||||||
|
// Since chalk is mocked, we need to check for the actual characters
|
||||||
|
expect(indicators.high).toContain('⋮');
|
||||||
|
expect(indicators.medium).toContain(':');
|
||||||
|
expect(indicators.low).toContain('.');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getComplexityColors', () => {
|
||||||
|
it('should return complexity color functions', () => {
|
||||||
|
const colors = getComplexityColors();
|
||||||
|
expect(colors).toHaveProperty('high');
|
||||||
|
expect(colors).toHaveProperty('medium');
|
||||||
|
expect(colors).toHaveProperty('low');
|
||||||
|
// Verify they are functions (mocked chalk functions)
|
||||||
|
expect(typeof colors.high).toBe('function');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getComplexityIndicator', () => {
|
||||||
|
it('should return high indicator for scores >= 7', () => {
|
||||||
|
const cliIndicators = getCliComplexityIndicators();
|
||||||
|
expect(getComplexityIndicator(7)).toBe(cliIndicators.high);
|
||||||
|
expect(getComplexityIndicator(8)).toBe(cliIndicators.high);
|
||||||
|
expect(getComplexityIndicator(10)).toBe(cliIndicators.high);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return low indicator for scores <= 3', () => {
|
||||||
|
const cliIndicators = getCliComplexityIndicators();
|
||||||
|
expect(getComplexityIndicator(1)).toBe(cliIndicators.low);
|
||||||
|
expect(getComplexityIndicator(2)).toBe(cliIndicators.low);
|
||||||
|
expect(getComplexityIndicator(3)).toBe(cliIndicators.low);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return medium indicator for scores 4-6', () => {
|
||||||
|
const cliIndicators = getCliComplexityIndicators();
|
||||||
|
expect(getComplexityIndicator(4)).toBe(cliIndicators.medium);
|
||||||
|
expect(getComplexityIndicator(5)).toBe(cliIndicators.medium);
|
||||||
|
expect(getComplexityIndicator(6)).toBe(cliIndicators.medium);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return status bar indicators when statusBar is true', () => {
|
||||||
|
const statusBarIndicators = getStatusBarComplexityIndicators();
|
||||||
|
expect(getComplexityIndicator(8, true)).toBe(statusBarIndicators.high);
|
||||||
|
expect(getComplexityIndicator(5, true)).toBe(statusBarIndicators.medium);
|
||||||
|
expect(getComplexityIndicator(2, true)).toBe(statusBarIndicators.low);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user