feat: implement research command with enhanced context gathering - Add comprehensive research command with AI-powered queries - Implement ContextGatherer utility for reusable context extraction - Support multiple context types: tasks, files, custom text, project tree - Add fuzzy search integration for automatic task discovery - Implement detailed token breakdown display with syntax highlighting - Add enhanced UI with boxed output and code block formatting - Support different detail levels (low, medium, high) for responses - Include project-specific context for more relevant AI responses - Add token counting with gpt-tokens library integration - Create reusable patterns for future context-aware commands - Task 94.4 completed
This commit is contained in:
5
.changeset/bright-windows-sing.md
Normal file
5
.changeset/bright-windows-sing.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Introduces the 'research' command to give your agent or yourself the ability to quickly get an answer using context from tasks, files, your project tree, or all.
|
||||||
19
.gitignore
vendored
19
.gitignore
vendored
@@ -19,13 +19,26 @@ npm-debug.log*
|
|||||||
yarn-debug.log*
|
yarn-debug.log*
|
||||||
yarn-error.log*
|
yarn-error.log*
|
||||||
lerna-debug.log*
|
lerna-debug.log*
|
||||||
tests/e2e/_runs/
|
|
||||||
tests/e2e/log/
|
|
||||||
|
|
||||||
# Coverage directory used by tools like istanbul
|
# Coverage directory used by tools like istanbul
|
||||||
coverage
|
coverage/
|
||||||
*.lcov
|
*.lcov
|
||||||
|
|
||||||
|
# Jest cache
|
||||||
|
.jest/
|
||||||
|
|
||||||
|
# Test temporary files and directories
|
||||||
|
tests/temp/
|
||||||
|
tests/e2e/_runs/
|
||||||
|
tests/e2e/log/
|
||||||
|
tests/**/*.log
|
||||||
|
tests/**/coverage/
|
||||||
|
|
||||||
|
# Test database files (if any)
|
||||||
|
tests/**/*.db
|
||||||
|
tests/**/*.sqlite
|
||||||
|
tests/**/*.sqlite3
|
||||||
|
|
||||||
# Optional npm cache directory
|
# Optional npm cache directory
|
||||||
.npm
|
.npm
|
||||||
|
|
||||||
|
|||||||
260
package-lock.json
generated
260
package-lock.json
generated
@@ -21,6 +21,7 @@
|
|||||||
"ai": "^4.3.10",
|
"ai": "^4.3.10",
|
||||||
"boxen": "^8.0.1",
|
"boxen": "^8.0.1",
|
||||||
"chalk": "^5.4.1",
|
"chalk": "^5.4.1",
|
||||||
|
"cli-highlight": "^2.1.11",
|
||||||
"cli-table3": "^0.6.5",
|
"cli-table3": "^0.6.5",
|
||||||
"commander": "^11.1.0",
|
"commander": "^11.1.0",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
@@ -29,6 +30,7 @@
|
|||||||
"fastmcp": "^1.20.5",
|
"fastmcp": "^1.20.5",
|
||||||
"figlet": "^1.8.0",
|
"figlet": "^1.8.0",
|
||||||
"fuse.js": "^7.1.0",
|
"fuse.js": "^7.1.0",
|
||||||
|
"gpt-tokens": "^1.3.14",
|
||||||
"gradient-string": "^3.0.0",
|
"gradient-string": "^3.0.0",
|
||||||
"helmet": "^8.1.0",
|
"helmet": "^8.1.0",
|
||||||
"inquirer": "^12.5.0",
|
"inquirer": "^12.5.0",
|
||||||
@@ -3417,6 +3419,12 @@
|
|||||||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/any-promise": {
|
||||||
|
"version": "1.3.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
|
||||||
|
"integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/anymatch": {
|
"node_modules/anymatch": {
|
||||||
"version": "3.1.3",
|
"version": "3.1.3",
|
||||||
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
|
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
|
||||||
@@ -3623,6 +3631,26 @@
|
|||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/base64-js": {
|
||||||
|
"version": "1.5.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||||
|
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "patreon",
|
||||||
|
"url": "https://www.patreon.com/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "consulting",
|
||||||
|
"url": "https://feross.org/support"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/better-path-resolve": {
|
"node_modules/better-path-resolve": {
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/better-path-resolve/-/better-path-resolve-1.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/better-path-resolve/-/better-path-resolve-1.0.0.tgz",
|
||||||
@@ -3951,6 +3979,139 @@
|
|||||||
"url": "https://github.com/sponsors/sindresorhus"
|
"url": "https://github.com/sponsors/sindresorhus"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/cli-highlight": {
|
||||||
|
"version": "2.1.11",
|
||||||
|
"resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz",
|
||||||
|
"integrity": "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg==",
|
||||||
|
"license": "ISC",
|
||||||
|
"dependencies": {
|
||||||
|
"chalk": "^4.0.0",
|
||||||
|
"highlight.js": "^10.7.1",
|
||||||
|
"mz": "^2.4.0",
|
||||||
|
"parse5": "^5.1.1",
|
||||||
|
"parse5-htmlparser2-tree-adapter": "^6.0.0",
|
||||||
|
"yargs": "^16.0.0"
|
||||||
|
},
|
||||||
|
"bin": {
|
||||||
|
"highlight": "bin/highlight"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8.0.0",
|
||||||
|
"npm": ">=5.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/ansi-regex": {
|
||||||
|
"version": "5.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
||||||
|
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/chalk": {
|
||||||
|
"version": "4.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
|
||||||
|
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"ansi-styles": "^4.1.0",
|
||||||
|
"supports-color": "^7.1.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/chalk/chalk?sponsor=1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/cliui": {
|
||||||
|
"version": "7.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz",
|
||||||
|
"integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==",
|
||||||
|
"license": "ISC",
|
||||||
|
"dependencies": {
|
||||||
|
"string-width": "^4.2.0",
|
||||||
|
"strip-ansi": "^6.0.0",
|
||||||
|
"wrap-ansi": "^7.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/emoji-regex": {
|
||||||
|
"version": "8.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
||||||
|
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/string-width": {
|
||||||
|
"version": "4.2.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
|
||||||
|
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"emoji-regex": "^8.0.0",
|
||||||
|
"is-fullwidth-code-point": "^3.0.0",
|
||||||
|
"strip-ansi": "^6.0.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/strip-ansi": {
|
||||||
|
"version": "6.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
|
||||||
|
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"ansi-regex": "^5.0.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/wrap-ansi": {
|
||||||
|
"version": "7.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
|
||||||
|
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"ansi-styles": "^4.0.0",
|
||||||
|
"string-width": "^4.1.0",
|
||||||
|
"strip-ansi": "^6.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/yargs": {
|
||||||
|
"version": "16.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz",
|
||||||
|
"integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"cliui": "^7.0.2",
|
||||||
|
"escalade": "^3.1.1",
|
||||||
|
"get-caller-file": "^2.0.5",
|
||||||
|
"require-directory": "^2.1.1",
|
||||||
|
"string-width": "^4.2.0",
|
||||||
|
"y18n": "^5.0.5",
|
||||||
|
"yargs-parser": "^20.2.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/cli-highlight/node_modules/yargs-parser": {
|
||||||
|
"version": "20.2.9",
|
||||||
|
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
|
||||||
|
"integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
|
||||||
|
"license": "ISC",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/cli-spinners": {
|
"node_modules/cli-spinners": {
|
||||||
"version": "2.9.2",
|
"version": "2.9.2",
|
||||||
"resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz",
|
"resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz",
|
||||||
@@ -4406,6 +4567,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/decimal.js": {
|
||||||
|
"version": "10.5.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.5.0.tgz",
|
||||||
|
"integrity": "sha512-8vDa8Qxvr/+d94hSh5P3IJwI5t8/c0KsMp+g8bNw9cY2icONa5aPfvKeieW1WlG0WQYwwhJ7mjui2xtiePQSXw==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/dedent": {
|
"node_modules/dedent": {
|
||||||
"version": "1.5.3",
|
"version": "1.5.3",
|
||||||
"resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz",
|
"resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz",
|
||||||
@@ -5652,6 +5819,17 @@
|
|||||||
"url": "https://github.com/sponsors/ljharb"
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/gpt-tokens": {
|
||||||
|
"version": "1.3.14",
|
||||||
|
"resolved": "https://registry.npmjs.org/gpt-tokens/-/gpt-tokens-1.3.14.tgz",
|
||||||
|
"integrity": "sha512-cFNErQQYGWRwYmew0wVqhCBZxTvGNr96/9pMwNXqSNu9afxqB5PNHOKHlWtUC/P4UW6Ne2UQHHaO2PaWWLpqWQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"decimal.js": "^10.4.3",
|
||||||
|
"js-tiktoken": "^1.0.15",
|
||||||
|
"openai-chat-tokens": "^0.2.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/graceful-fs": {
|
"node_modules/graceful-fs": {
|
||||||
"version": "4.2.11",
|
"version": "4.2.11",
|
||||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||||
@@ -5676,7 +5854,6 @@
|
|||||||
"version": "4.0.0",
|
"version": "4.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
|
||||||
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
|
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8"
|
"node": ">=8"
|
||||||
@@ -5740,6 +5917,15 @@
|
|||||||
"node": ">=8"
|
"node": ">=8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/highlight.js": {
|
||||||
|
"version": "10.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz",
|
||||||
|
"integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==",
|
||||||
|
"license": "BSD-3-Clause",
|
||||||
|
"engines": {
|
||||||
|
"node": "*"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/html-escaper": {
|
"node_modules/html-escaper": {
|
||||||
"version": "2.0.2",
|
"version": "2.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
|
"resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
|
||||||
@@ -7283,6 +7469,15 @@
|
|||||||
"url": "https://github.com/chalk/supports-color?sponsor=1"
|
"url": "https://github.com/chalk/supports-color?sponsor=1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/js-tiktoken": {
|
||||||
|
"version": "1.0.20",
|
||||||
|
"resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.20.tgz",
|
||||||
|
"integrity": "sha512-Xlaqhhs8VfCd6Sh7a1cFkZHQbYTLCwVJJWiHVxBYzLPxW0XsoxBy1hitmjkdIjD3Aon5BXLHFwU5O8WUx6HH+A==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"base64-js": "^1.5.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/js-tokens": {
|
"node_modules/js-tokens": {
|
||||||
"version": "4.0.0",
|
"version": "4.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
||||||
@@ -7786,6 +7981,17 @@
|
|||||||
"node": "^18.17.0 || >=20.5.0"
|
"node": "^18.17.0 || >=20.5.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/mz": {
|
||||||
|
"version": "2.7.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
|
||||||
|
"integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"any-promise": "^1.0.0",
|
||||||
|
"object-assign": "^4.0.1",
|
||||||
|
"thenify-all": "^1.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/nanoid": {
|
"node_modules/nanoid": {
|
||||||
"version": "3.3.11",
|
"version": "3.3.11",
|
||||||
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
|
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
|
||||||
@@ -8019,6 +8225,15 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/openai-chat-tokens": {
|
||||||
|
"version": "0.2.8",
|
||||||
|
"resolved": "https://registry.npmjs.org/openai-chat-tokens/-/openai-chat-tokens-0.2.8.tgz",
|
||||||
|
"integrity": "sha512-nW7QdFDIZlAYe6jsCT/VPJ/Lam3/w2DX9oxf/5wHpebBT49KI3TN43PPhYlq1klq2ajzXWKNOLY6U4FNZM7AoA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"js-tiktoken": "^1.0.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/ora": {
|
"node_modules/ora": {
|
||||||
"version": "8.2.0",
|
"version": "8.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/ora/-/ora-8.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/ora/-/ora-8.2.0.tgz",
|
||||||
@@ -8177,6 +8392,27 @@
|
|||||||
"url": "https://github.com/sponsors/sindresorhus"
|
"url": "https://github.com/sponsors/sindresorhus"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/parse5": {
|
||||||
|
"version": "5.1.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz",
|
||||||
|
"integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/parse5-htmlparser2-tree-adapter": {
|
||||||
|
"version": "6.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz",
|
||||||
|
"integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"parse5": "^6.0.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/parse5-htmlparser2-tree-adapter/node_modules/parse5": {
|
||||||
|
"version": "6.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
|
||||||
|
"integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/parseurl": {
|
"node_modules/parseurl": {
|
||||||
"version": "1.3.3",
|
"version": "1.3.3",
|
||||||
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
|
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
|
||||||
@@ -9286,7 +9522,6 @@
|
|||||||
"version": "7.2.0",
|
"version": "7.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
|
||||||
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
|
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"has-flag": "^4.0.0"
|
"has-flag": "^4.0.0"
|
||||||
@@ -9395,6 +9630,27 @@
|
|||||||
"node": ">=8"
|
"node": ">=8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/thenify": {
|
||||||
|
"version": "3.3.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
|
||||||
|
"integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"any-promise": "^1.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/thenify-all": {
|
||||||
|
"version": "1.6.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
|
||||||
|
"integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"thenify": ">= 3.1.0 < 4"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=0.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/throttleit": {
|
"node_modules/throttleit": {
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/throttleit/-/throttleit-2.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/throttleit/-/throttleit-2.1.0.tgz",
|
||||||
|
|||||||
@@ -51,6 +51,7 @@
|
|||||||
"ai": "^4.3.10",
|
"ai": "^4.3.10",
|
||||||
"boxen": "^8.0.1",
|
"boxen": "^8.0.1",
|
||||||
"chalk": "^5.4.1",
|
"chalk": "^5.4.1",
|
||||||
|
"cli-highlight": "^2.1.11",
|
||||||
"cli-table3": "^0.6.5",
|
"cli-table3": "^0.6.5",
|
||||||
"commander": "^11.1.0",
|
"commander": "^11.1.0",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
@@ -59,6 +60,7 @@
|
|||||||
"fastmcp": "^1.20.5",
|
"fastmcp": "^1.20.5",
|
||||||
"figlet": "^1.8.0",
|
"figlet": "^1.8.0",
|
||||||
"fuse.js": "^7.1.0",
|
"fuse.js": "^7.1.0",
|
||||||
|
"gpt-tokens": "^1.3.14",
|
||||||
"gradient-string": "^3.0.0",
|
"gradient-string": "^3.0.0",
|
||||||
"helmet": "^8.1.0",
|
"helmet": "^8.1.0",
|
||||||
"inquirer": "^12.5.0",
|
"inquirer": "^12.5.0",
|
||||||
|
|||||||
@@ -1374,6 +1374,244 @@ function registerCommands(programInstance) {
|
|||||||
await analyzeTaskComplexity(options);
|
await analyzeTaskComplexity(options);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// research command
|
||||||
|
programInstance
|
||||||
|
.command('research')
|
||||||
|
.description('Perform AI-powered research queries with project context')
|
||||||
|
.argument('<prompt>', 'Research prompt to investigate')
|
||||||
|
.option('--file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||||
|
.option(
|
||||||
|
'-i, --id <ids>',
|
||||||
|
'Comma-separated task/subtask IDs to include as context (e.g., "15,16.2")'
|
||||||
|
)
|
||||||
|
.option(
|
||||||
|
'-f, --files <paths>',
|
||||||
|
'Comma-separated file paths to include as context'
|
||||||
|
)
|
||||||
|
.option(
|
||||||
|
'-c, --context <text>',
|
||||||
|
'Additional custom context to include in the research prompt'
|
||||||
|
)
|
||||||
|
.option(
|
||||||
|
'--project-tree',
|
||||||
|
'Include project file tree structure in the research context'
|
||||||
|
)
|
||||||
|
.option(
|
||||||
|
'-s, --save <file>',
|
||||||
|
'Save research results to the specified task/subtask(s)'
|
||||||
|
)
|
||||||
|
.option(
|
||||||
|
'-d, --detail <level>',
|
||||||
|
'Output detail level: low, medium, high',
|
||||||
|
'medium'
|
||||||
|
)
|
||||||
|
.action(async (prompt, options) => {
|
||||||
|
// Parameter validation
|
||||||
|
if (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {
|
||||||
|
console.error(
|
||||||
|
chalk.red('Error: Research prompt is required and cannot be empty')
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate detail level
|
||||||
|
const validDetailLevels = ['low', 'medium', 'high'];
|
||||||
|
if (
|
||||||
|
options.detail &&
|
||||||
|
!validDetailLevels.includes(options.detail.toLowerCase())
|
||||||
|
) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
`Error: Detail level must be one of: ${validDetailLevels.join(', ')}`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate and parse task IDs if provided
|
||||||
|
let taskIds = [];
|
||||||
|
if (options.id) {
|
||||||
|
try {
|
||||||
|
taskIds = options.id.split(',').map((id) => {
|
||||||
|
const trimmedId = id.trim();
|
||||||
|
// Support both task IDs (e.g., "15") and subtask IDs (e.g., "15.2")
|
||||||
|
if (!/^\d+(\.\d+)?$/.test(trimmedId)) {
|
||||||
|
throw new Error(
|
||||||
|
`Invalid task ID format: "${trimmedId}". Expected format: "15" or "15.2"`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return trimmedId;
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red(`Error parsing task IDs: ${error.message}`));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate and parse file paths if provided
|
||||||
|
let filePaths = [];
|
||||||
|
if (options.files) {
|
||||||
|
try {
|
||||||
|
filePaths = options.files.split(',').map((filePath) => {
|
||||||
|
const trimmedPath = filePath.trim();
|
||||||
|
if (trimmedPath.length === 0) {
|
||||||
|
throw new Error('Empty file path provided');
|
||||||
|
}
|
||||||
|
return trimmedPath;
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(`Error parsing file paths: ${error.message}`)
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate save option if provided
|
||||||
|
if (options.save) {
|
||||||
|
const saveTarget = options.save.trim();
|
||||||
|
if (saveTarget.length === 0) {
|
||||||
|
console.error(chalk.red('Error: Save target cannot be empty'));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
// Check if it's a valid file path (basic validation)
|
||||||
|
if (saveTarget.includes('..') || saveTarget.startsWith('/')) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
'Error: Save path must be relative and cannot contain ".."'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine project root and tasks file path
|
||||||
|
const projectRoot = findProjectRoot() || '.';
|
||||||
|
const tasksPath =
|
||||||
|
options.file || path.join(projectRoot, 'tasks', 'tasks.json');
|
||||||
|
|
||||||
|
// Validate tasks file exists if task IDs are specified
|
||||||
|
if (taskIds.length > 0) {
|
||||||
|
try {
|
||||||
|
const tasksData = readJSON(tasksPath);
|
||||||
|
if (!tasksData || !tasksData.tasks) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(`Error: No valid tasks found in ${tasksPath}`)
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(`Error reading tasks file: ${error.message}`)
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate file paths exist if specified
|
||||||
|
if (filePaths.length > 0) {
|
||||||
|
for (const filePath of filePaths) {
|
||||||
|
const fullPath = path.isAbsolute(filePath)
|
||||||
|
? filePath
|
||||||
|
: path.join(projectRoot, filePath);
|
||||||
|
if (!fs.existsSync(fullPath)) {
|
||||||
|
console.error(chalk.red(`Error: File not found: ${filePath}`));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create validated parameters object
|
||||||
|
const validatedParams = {
|
||||||
|
prompt: prompt.trim(),
|
||||||
|
taskIds: taskIds,
|
||||||
|
filePaths: filePaths,
|
||||||
|
customContext: options.context ? options.context.trim() : null,
|
||||||
|
includeProjectTree: !!options.projectTree,
|
||||||
|
saveTarget: options.save ? options.save.trim() : null,
|
||||||
|
detailLevel: options.detail ? options.detail.toLowerCase() : 'medium',
|
||||||
|
tasksPath: tasksPath,
|
||||||
|
projectRoot: projectRoot
|
||||||
|
};
|
||||||
|
|
||||||
|
// Display what we're about to do
|
||||||
|
console.log(chalk.blue(`Researching: "${validatedParams.prompt}"`));
|
||||||
|
|
||||||
|
if (validatedParams.taskIds.length > 0) {
|
||||||
|
console.log(
|
||||||
|
chalk.gray(`Task context: ${validatedParams.taskIds.join(', ')}`)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (validatedParams.filePaths.length > 0) {
|
||||||
|
console.log(
|
||||||
|
chalk.gray(`File context: ${validatedParams.filePaths.join(', ')}`)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (validatedParams.customContext) {
|
||||||
|
console.log(
|
||||||
|
chalk.gray(
|
||||||
|
`Custom context: ${validatedParams.customContext.substring(0, 50)}${validatedParams.customContext.length > 50 ? '...' : ''}`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (validatedParams.includeProjectTree) {
|
||||||
|
console.log(chalk.gray('Including project file tree'));
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(chalk.gray(`Detail level: ${validatedParams.detailLevel}`));
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Import the research function
|
||||||
|
const { performResearch } = await import('./task-manager/research.js');
|
||||||
|
|
||||||
|
// Prepare research options
|
||||||
|
const researchOptions = {
|
||||||
|
taskIds: validatedParams.taskIds,
|
||||||
|
filePaths: validatedParams.filePaths,
|
||||||
|
customContext: validatedParams.customContext || '',
|
||||||
|
includeProjectTree: validatedParams.includeProjectTree,
|
||||||
|
detailLevel: validatedParams.detailLevel,
|
||||||
|
projectRoot: validatedParams.projectRoot
|
||||||
|
};
|
||||||
|
|
||||||
|
// Execute research
|
||||||
|
const result = await performResearch(
|
||||||
|
validatedParams.prompt,
|
||||||
|
researchOptions,
|
||||||
|
{
|
||||||
|
commandName: 'research',
|
||||||
|
outputType: 'cli'
|
||||||
|
},
|
||||||
|
'text'
|
||||||
|
);
|
||||||
|
|
||||||
|
// Save results if requested
|
||||||
|
if (validatedParams.saveTarget) {
|
||||||
|
const saveContent = `# Research Query: ${validatedParams.prompt}
|
||||||
|
|
||||||
|
**Detail Level:** ${result.detailLevel}
|
||||||
|
**Context Size:** ${result.contextSize} characters
|
||||||
|
**Timestamp:** ${new Date().toISOString()}
|
||||||
|
|
||||||
|
## Results
|
||||||
|
|
||||||
|
${result.result}
|
||||||
|
`;
|
||||||
|
|
||||||
|
fs.writeFileSync(validatedParams.saveTarget, saveContent, 'utf-8');
|
||||||
|
console.log(
|
||||||
|
chalk.green(`\n💾 Results saved to: ${validatedParams.saveTarget}`)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red(`\n❌ Research failed: ${error.message}`));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// clear-subtasks command
|
// clear-subtasks command
|
||||||
programInstance
|
programInstance
|
||||||
.command('clear-subtasks')
|
.command('clear-subtasks')
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ import removeTask from './task-manager/remove-task.js';
|
|||||||
import taskExists from './task-manager/task-exists.js';
|
import taskExists from './task-manager/task-exists.js';
|
||||||
import isTaskDependentOn from './task-manager/is-task-dependent.js';
|
import isTaskDependentOn from './task-manager/is-task-dependent.js';
|
||||||
import moveTask from './task-manager/move-task.js';
|
import moveTask from './task-manager/move-task.js';
|
||||||
|
import { performResearch } from './task-manager/research.js';
|
||||||
import { readComplexityReport } from './utils.js';
|
import { readComplexityReport } from './utils.js';
|
||||||
// Export task manager functions
|
// Export task manager functions
|
||||||
export {
|
export {
|
||||||
@@ -48,5 +49,6 @@ export {
|
|||||||
taskExists,
|
taskExists,
|
||||||
isTaskDependentOn,
|
isTaskDependentOn,
|
||||||
moveTask,
|
moveTask,
|
||||||
|
performResearch,
|
||||||
readComplexityReport
|
readComplexityReport
|
||||||
};
|
};
|
||||||
|
|||||||
564
scripts/modules/task-manager/research.js
Normal file
564
scripts/modules/task-manager/research.js
Normal file
@@ -0,0 +1,564 @@
|
|||||||
|
/**
|
||||||
|
* research.js
|
||||||
|
* Core research functionality for AI-powered queries with project context
|
||||||
|
*/
|
||||||
|
|
||||||
|
import path from 'path';
|
||||||
|
import chalk from 'chalk';
|
||||||
|
import boxen from 'boxen';
|
||||||
|
import { highlight } from 'cli-highlight';
|
||||||
|
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||||
|
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||||
|
import { generateTextService } from '../ai-services-unified.js';
|
||||||
|
import { log as consoleLog, findProjectRoot, readJSON } from '../utils.js';
|
||||||
|
import {
|
||||||
|
displayAiUsageSummary,
|
||||||
|
startLoadingIndicator,
|
||||||
|
stopLoadingIndicator
|
||||||
|
} from '../ui.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Perform AI-powered research with project context
|
||||||
|
* @param {string} query - Research query/prompt
|
||||||
|
* @param {Object} options - Research options
|
||||||
|
* @param {Array<string>} [options.taskIds] - Task/subtask IDs for context
|
||||||
|
* @param {Array<string>} [options.filePaths] - File paths for context
|
||||||
|
* @param {string} [options.customContext] - Additional custom context
|
||||||
|
* @param {boolean} [options.includeProjectTree] - Include project file tree
|
||||||
|
* @param {string} [options.detailLevel] - Detail level: 'low', 'medium', 'high'
|
||||||
|
* @param {string} [options.projectRoot] - Project root directory
|
||||||
|
* @param {Object} [context] - Execution context
|
||||||
|
* @param {Object} [context.session] - MCP session object
|
||||||
|
* @param {Object} [context.mcpLog] - MCP logger object
|
||||||
|
* @param {string} [context.commandName] - Command name for telemetry
|
||||||
|
* @param {string} [context.outputType] - Output type ('cli' or 'mcp')
|
||||||
|
* @param {string} [outputFormat] - Output format ('text' or 'json')
|
||||||
|
* @returns {Promise<Object>} Research results with telemetry data
|
||||||
|
*/
|
||||||
|
async function performResearch(
|
||||||
|
query,
|
||||||
|
options = {},
|
||||||
|
context = {},
|
||||||
|
outputFormat = 'text'
|
||||||
|
) {
|
||||||
|
const {
|
||||||
|
taskIds = [],
|
||||||
|
filePaths = [],
|
||||||
|
customContext = '',
|
||||||
|
includeProjectTree = false,
|
||||||
|
detailLevel = 'medium',
|
||||||
|
projectRoot: providedProjectRoot
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
const {
|
||||||
|
session,
|
||||||
|
mcpLog,
|
||||||
|
commandName = 'research',
|
||||||
|
outputType = 'cli'
|
||||||
|
} = context;
|
||||||
|
const isMCP = !!mcpLog;
|
||||||
|
|
||||||
|
// Determine project root
|
||||||
|
const projectRoot = providedProjectRoot || findProjectRoot();
|
||||||
|
if (!projectRoot) {
|
||||||
|
throw new Error('Could not determine project root directory');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create consistent logger
|
||||||
|
const logFn = isMCP
|
||||||
|
? mcpLog
|
||||||
|
: {
|
||||||
|
info: (...args) => consoleLog('info', ...args),
|
||||||
|
warn: (...args) => consoleLog('warn', ...args),
|
||||||
|
error: (...args) => consoleLog('error', ...args),
|
||||||
|
debug: (...args) => consoleLog('debug', ...args),
|
||||||
|
success: (...args) => consoleLog('success', ...args)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Show UI banner for CLI mode
|
||||||
|
if (outputFormat === 'text') {
|
||||||
|
console.log(
|
||||||
|
boxen(chalk.cyan.bold(`🔍 AI Research Query`), {
|
||||||
|
padding: 1,
|
||||||
|
borderColor: 'cyan',
|
||||||
|
borderStyle: 'round',
|
||||||
|
margin: { top: 1, bottom: 1 }
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Initialize context gatherer
|
||||||
|
const contextGatherer = new ContextGatherer(projectRoot);
|
||||||
|
|
||||||
|
// Auto-discover relevant tasks using fuzzy search to supplement provided tasks
|
||||||
|
let finalTaskIds = [...taskIds]; // Start with explicitly provided tasks
|
||||||
|
let autoDiscoveredIds = [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const tasksPath = path.join(projectRoot, 'tasks', 'tasks.json');
|
||||||
|
const tasksData = await readJSON(tasksPath);
|
||||||
|
|
||||||
|
if (tasksData && tasksData.tasks && tasksData.tasks.length > 0) {
|
||||||
|
const fuzzySearch = new FuzzyTaskSearch(tasksData.tasks, 'research');
|
||||||
|
const searchResults = fuzzySearch.findRelevantTasks(query, {
|
||||||
|
maxResults: 8,
|
||||||
|
includeRecent: true,
|
||||||
|
includeCategoryMatches: true
|
||||||
|
});
|
||||||
|
|
||||||
|
autoDiscoveredIds = fuzzySearch.getTaskIds(searchResults);
|
||||||
|
|
||||||
|
// Remove any auto-discovered tasks that were already explicitly provided
|
||||||
|
const uniqueAutoDiscovered = autoDiscoveredIds.filter(
|
||||||
|
(id) => !finalTaskIds.includes(id)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add unique auto-discovered tasks to the final list
|
||||||
|
finalTaskIds = [...finalTaskIds, ...uniqueAutoDiscovered];
|
||||||
|
|
||||||
|
if (outputFormat === 'text' && finalTaskIds.length > 0) {
|
||||||
|
// Sort task IDs numerically for better display
|
||||||
|
const sortedTaskIds = finalTaskIds
|
||||||
|
.map((id) => parseInt(id))
|
||||||
|
.sort((a, b) => a - b)
|
||||||
|
.map((id) => id.toString());
|
||||||
|
|
||||||
|
// Show different messages based on whether tasks were explicitly provided
|
||||||
|
if (taskIds.length > 0) {
|
||||||
|
const sortedProvidedIds = taskIds
|
||||||
|
.map((id) => parseInt(id))
|
||||||
|
.sort((a, b) => a - b)
|
||||||
|
.map((id) => id.toString());
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
chalk.gray('Provided tasks: ') +
|
||||||
|
chalk.cyan(sortedProvidedIds.join(', '))
|
||||||
|
);
|
||||||
|
|
||||||
|
if (uniqueAutoDiscovered.length > 0) {
|
||||||
|
const sortedAutoIds = uniqueAutoDiscovered
|
||||||
|
.map((id) => parseInt(id))
|
||||||
|
.sort((a, b) => a - b)
|
||||||
|
.map((id) => id.toString());
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
chalk.gray('+ Auto-discovered related tasks: ') +
|
||||||
|
chalk.cyan(sortedAutoIds.join(', '))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log(
|
||||||
|
chalk.gray('Auto-discovered relevant tasks: ') +
|
||||||
|
chalk.cyan(sortedTaskIds.join(', '))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Silently continue without auto-discovered tasks if there's an error
|
||||||
|
logFn.debug(`Could not auto-discover tasks: ${error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const contextResult = await contextGatherer.gather({
|
||||||
|
tasks: finalTaskIds,
|
||||||
|
files: filePaths,
|
||||||
|
customContext,
|
||||||
|
includeProjectTree,
|
||||||
|
format: 'research', // Use research format for AI consumption
|
||||||
|
includeTokenCounts: true
|
||||||
|
});
|
||||||
|
|
||||||
|
const gatheredContext = contextResult.context;
|
||||||
|
const tokenBreakdown = contextResult.tokenBreakdown;
|
||||||
|
|
||||||
|
// Build system prompt based on detail level
|
||||||
|
const systemPrompt = buildResearchSystemPrompt(detailLevel, projectRoot);
|
||||||
|
|
||||||
|
// Build user prompt with context
|
||||||
|
const userPrompt = buildResearchUserPrompt(
|
||||||
|
query,
|
||||||
|
gatheredContext,
|
||||||
|
detailLevel
|
||||||
|
);
|
||||||
|
|
||||||
|
// Count tokens for system and user prompts
|
||||||
|
const systemPromptTokens = contextGatherer.countTokens(systemPrompt);
|
||||||
|
const userPromptTokens = contextGatherer.countTokens(userPrompt);
|
||||||
|
const totalInputTokens = systemPromptTokens + userPromptTokens;
|
||||||
|
|
||||||
|
if (outputFormat === 'text') {
|
||||||
|
// Display detailed token breakdown in a clean box
|
||||||
|
displayDetailedTokenBreakdown(
|
||||||
|
tokenBreakdown,
|
||||||
|
systemPromptTokens,
|
||||||
|
userPromptTokens
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only log detailed info in debug mode or MCP
|
||||||
|
if (outputFormat !== 'text') {
|
||||||
|
logFn.info(
|
||||||
|
`Calling AI service with research role, context size: ${tokenBreakdown.total} tokens (${gatheredContext.length} characters)`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start loading indicator for CLI mode
|
||||||
|
let loadingIndicator = null;
|
||||||
|
if (outputFormat === 'text') {
|
||||||
|
loadingIndicator = startLoadingIndicator('Researching with AI...\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
let aiResult;
|
||||||
|
try {
|
||||||
|
// Call AI service with research role
|
||||||
|
aiResult = await generateTextService({
|
||||||
|
role: 'research', // Always use research role for research command
|
||||||
|
session,
|
||||||
|
projectRoot,
|
||||||
|
systemPrompt,
|
||||||
|
prompt: userPrompt,
|
||||||
|
commandName,
|
||||||
|
outputType
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
if (loadingIndicator) {
|
||||||
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
} finally {
|
||||||
|
if (loadingIndicator) {
|
||||||
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const researchResult = aiResult.mainResult;
|
||||||
|
const telemetryData = aiResult.telemetryData;
|
||||||
|
|
||||||
|
// Format and display results
|
||||||
|
if (outputFormat === 'text') {
|
||||||
|
displayResearchResults(
|
||||||
|
researchResult,
|
||||||
|
query,
|
||||||
|
detailLevel,
|
||||||
|
tokenBreakdown
|
||||||
|
);
|
||||||
|
|
||||||
|
// Display AI usage telemetry for CLI users
|
||||||
|
if (telemetryData) {
|
||||||
|
displayAiUsageSummary(telemetryData, 'cli');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logFn.success('Research query completed successfully');
|
||||||
|
|
||||||
|
return {
|
||||||
|
query,
|
||||||
|
result: researchResult,
|
||||||
|
contextSize: gatheredContext.length,
|
||||||
|
contextTokens: tokenBreakdown.total,
|
||||||
|
tokenBreakdown,
|
||||||
|
systemPromptTokens,
|
||||||
|
userPromptTokens,
|
||||||
|
totalInputTokens,
|
||||||
|
detailLevel,
|
||||||
|
telemetryData
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
logFn.error(`Research query failed: ${error.message}`);
|
||||||
|
|
||||||
|
if (outputFormat === 'text') {
|
||||||
|
console.error(chalk.red(`\n❌ Research failed: ${error.message}`));
|
||||||
|
}
|
||||||
|
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build system prompt for research based on detail level
|
||||||
|
* @param {string} detailLevel - Detail level: 'low', 'medium', 'high'
|
||||||
|
* @param {string} projectRoot - Project root for context
|
||||||
|
* @returns {string} System prompt
|
||||||
|
*/
|
||||||
|
function buildResearchSystemPrompt(detailLevel, projectRoot) {
|
||||||
|
const basePrompt = `You are an expert AI research assistant helping with a software development project. You have access to project context including tasks, files, and project structure.
|
||||||
|
|
||||||
|
Your role is to provide comprehensive, accurate, and actionable research responses based on the user's query and the provided project context.`;
|
||||||
|
|
||||||
|
const detailInstructions = {
|
||||||
|
low: `
|
||||||
|
**Response Style: Concise & Direct**
|
||||||
|
- Provide brief, focused answers (2-4 paragraphs maximum)
|
||||||
|
- Focus on the most essential information
|
||||||
|
- Use bullet points for key takeaways
|
||||||
|
- Avoid lengthy explanations unless critical
|
||||||
|
- Skip pleasantries, introductions, and conclusions
|
||||||
|
- No phrases like "Based on your project context" or "I'll provide guidance"
|
||||||
|
- No summary outros or alignment statements
|
||||||
|
- Get straight to the actionable information
|
||||||
|
- Use simple, direct language - users want info, not explanation`,
|
||||||
|
|
||||||
|
medium: `
|
||||||
|
**Response Style: Balanced & Comprehensive**
|
||||||
|
- Provide thorough but well-structured responses (4-8 paragraphs)
|
||||||
|
- Include relevant examples and explanations
|
||||||
|
- Balance depth with readability
|
||||||
|
- Use headings and bullet points for organization`,
|
||||||
|
|
||||||
|
high: `
|
||||||
|
**Response Style: Detailed & Exhaustive**
|
||||||
|
- Provide comprehensive, in-depth analysis (8+ paragraphs)
|
||||||
|
- Include multiple perspectives and approaches
|
||||||
|
- Provide detailed examples, code snippets, and step-by-step guidance
|
||||||
|
- Cover edge cases and potential pitfalls
|
||||||
|
- Use clear structure with headings, subheadings, and lists`
|
||||||
|
};
|
||||||
|
|
||||||
|
return `${basePrompt}
|
||||||
|
|
||||||
|
${detailInstructions[detailLevel]}
|
||||||
|
|
||||||
|
**Guidelines:**
|
||||||
|
- Always consider the project context when formulating responses
|
||||||
|
- Reference specific tasks, files, or project elements when relevant
|
||||||
|
- Provide actionable insights that can be applied to the project
|
||||||
|
- If the query relates to existing project tasks, suggest how the research applies to those tasks
|
||||||
|
- Use markdown formatting for better readability
|
||||||
|
- Be precise and avoid speculation unless clearly marked as such
|
||||||
|
|
||||||
|
**For LOW detail level specifically:**
|
||||||
|
- Start immediately with the core information
|
||||||
|
- No introductory phrases or context acknowledgments
|
||||||
|
- No concluding summaries or project alignment statements
|
||||||
|
- Focus purely on facts, steps, and actionable items`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build user prompt with query and context
|
||||||
|
* @param {string} query - User's research query
|
||||||
|
* @param {string} gatheredContext - Gathered project context
|
||||||
|
* @param {string} detailLevel - Detail level for response guidance
|
||||||
|
* @returns {string} Complete user prompt
|
||||||
|
*/
|
||||||
|
function buildResearchUserPrompt(query, gatheredContext, detailLevel) {
|
||||||
|
let prompt = `# Research Query
|
||||||
|
|
||||||
|
${query}`;
|
||||||
|
|
||||||
|
if (gatheredContext && gatheredContext.trim()) {
|
||||||
|
prompt += `
|
||||||
|
|
||||||
|
# Project Context
|
||||||
|
|
||||||
|
${gatheredContext}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt += `
|
||||||
|
|
||||||
|
# Instructions
|
||||||
|
|
||||||
|
Please research and provide a ${detailLevel}-detail response to the query above. Consider the project context provided and make your response as relevant and actionable as possible for this specific project.`;
|
||||||
|
|
||||||
|
return prompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display detailed token breakdown for context and prompts
|
||||||
|
* @param {Object} tokenBreakdown - Token breakdown from context gatherer
|
||||||
|
* @param {number} systemPromptTokens - System prompt token count
|
||||||
|
* @param {number} userPromptTokens - User prompt token count
|
||||||
|
*/
|
||||||
|
function displayDetailedTokenBreakdown(
|
||||||
|
tokenBreakdown,
|
||||||
|
systemPromptTokens,
|
||||||
|
userPromptTokens
|
||||||
|
) {
|
||||||
|
const parts = [];
|
||||||
|
|
||||||
|
// Custom context
|
||||||
|
if (tokenBreakdown.customContext) {
|
||||||
|
parts.push(
|
||||||
|
chalk.cyan('Custom: ') +
|
||||||
|
chalk.yellow(tokenBreakdown.customContext.tokens.toLocaleString())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tasks breakdown
|
||||||
|
if (tokenBreakdown.tasks && tokenBreakdown.tasks.length > 0) {
|
||||||
|
const totalTaskTokens = tokenBreakdown.tasks.reduce(
|
||||||
|
(sum, task) => sum + task.tokens,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
const taskDetails = tokenBreakdown.tasks
|
||||||
|
.map((task) => {
|
||||||
|
const titleDisplay =
|
||||||
|
task.title.length > 30
|
||||||
|
? task.title.substring(0, 30) + '...'
|
||||||
|
: task.title;
|
||||||
|
return ` ${chalk.gray(task.id)} ${chalk.white(titleDisplay)} ${chalk.yellow(task.tokens.toLocaleString())} tokens`;
|
||||||
|
})
|
||||||
|
.join('\n');
|
||||||
|
|
||||||
|
parts.push(
|
||||||
|
chalk.cyan('Tasks: ') +
|
||||||
|
chalk.yellow(totalTaskTokens.toLocaleString()) +
|
||||||
|
chalk.gray(` (${tokenBreakdown.tasks.length} items)`) +
|
||||||
|
'\n' +
|
||||||
|
taskDetails
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files breakdown
|
||||||
|
if (tokenBreakdown.files && tokenBreakdown.files.length > 0) {
|
||||||
|
const totalFileTokens = tokenBreakdown.files.reduce(
|
||||||
|
(sum, file) => sum + file.tokens,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
const fileDetails = tokenBreakdown.files
|
||||||
|
.map((file) => {
|
||||||
|
const pathDisplay =
|
||||||
|
file.path.length > 40
|
||||||
|
? '...' + file.path.substring(file.path.length - 37)
|
||||||
|
: file.path;
|
||||||
|
return ` ${chalk.gray(pathDisplay)} ${chalk.yellow(file.tokens.toLocaleString())} tokens ${chalk.gray(`(${file.sizeKB}KB)`)}`;
|
||||||
|
})
|
||||||
|
.join('\n');
|
||||||
|
|
||||||
|
parts.push(
|
||||||
|
chalk.cyan('Files: ') +
|
||||||
|
chalk.yellow(totalFileTokens.toLocaleString()) +
|
||||||
|
chalk.gray(` (${tokenBreakdown.files.length} files)`) +
|
||||||
|
'\n' +
|
||||||
|
fileDetails
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Project tree
|
||||||
|
if (tokenBreakdown.projectTree) {
|
||||||
|
parts.push(
|
||||||
|
chalk.cyan('Project Tree: ') +
|
||||||
|
chalk.yellow(tokenBreakdown.projectTree.tokens.toLocaleString()) +
|
||||||
|
chalk.gray(
|
||||||
|
` (${tokenBreakdown.projectTree.fileCount} files, ${tokenBreakdown.projectTree.dirCount} dirs)`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prompts breakdown
|
||||||
|
const totalPromptTokens = systemPromptTokens + userPromptTokens;
|
||||||
|
const promptDetails = [
|
||||||
|
` ${chalk.gray('System:')} ${chalk.yellow(systemPromptTokens.toLocaleString())} tokens`,
|
||||||
|
` ${chalk.gray('User:')} ${chalk.yellow(userPromptTokens.toLocaleString())} tokens`
|
||||||
|
].join('\n');
|
||||||
|
|
||||||
|
parts.push(
|
||||||
|
chalk.cyan('Prompts: ') +
|
||||||
|
chalk.yellow(totalPromptTokens.toLocaleString()) +
|
||||||
|
chalk.gray(' (generated)') +
|
||||||
|
'\n' +
|
||||||
|
promptDetails
|
||||||
|
);
|
||||||
|
|
||||||
|
// Display the breakdown in a clean box
|
||||||
|
if (parts.length > 0) {
|
||||||
|
const content = parts.join('\n\n');
|
||||||
|
const tokenBox = boxen(content, {
|
||||||
|
title: chalk.blue.bold('Context Analysis'),
|
||||||
|
titleAlignment: 'left',
|
||||||
|
padding: { top: 1, bottom: 1, left: 2, right: 2 },
|
||||||
|
margin: { top: 0, bottom: 1 },
|
||||||
|
borderStyle: 'single',
|
||||||
|
borderColor: 'blue'
|
||||||
|
});
|
||||||
|
console.log(tokenBox);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process research result text to highlight code blocks
|
||||||
|
* @param {string} text - Raw research result text
|
||||||
|
* @returns {string} Processed text with highlighted code blocks
|
||||||
|
*/
|
||||||
|
function processCodeBlocks(text) {
|
||||||
|
// Regex to match code blocks with optional language specification
|
||||||
|
const codeBlockRegex = /```(\w+)?\n([\s\S]*?)```/g;
|
||||||
|
|
||||||
|
return text.replace(codeBlockRegex, (match, language, code) => {
|
||||||
|
try {
|
||||||
|
// Default to javascript if no language specified
|
||||||
|
const lang = language || 'javascript';
|
||||||
|
|
||||||
|
// Highlight the code using cli-highlight
|
||||||
|
const highlightedCode = highlight(code.trim(), {
|
||||||
|
language: lang,
|
||||||
|
ignoreIllegals: true // Don't fail on unrecognized syntax
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add a subtle border around code blocks
|
||||||
|
const codeBox = boxen(highlightedCode, {
|
||||||
|
padding: { top: 0, bottom: 0, left: 1, right: 1 },
|
||||||
|
margin: { top: 0, bottom: 0 },
|
||||||
|
borderStyle: 'single',
|
||||||
|
borderColor: 'dim'
|
||||||
|
});
|
||||||
|
|
||||||
|
return '\n' + codeBox + '\n';
|
||||||
|
} catch (error) {
|
||||||
|
// If highlighting fails, return the original code block with basic formatting
|
||||||
|
return (
|
||||||
|
'\n' +
|
||||||
|
chalk.gray('```' + (language || '')) +
|
||||||
|
'\n' +
|
||||||
|
chalk.white(code.trim()) +
|
||||||
|
'\n' +
|
||||||
|
chalk.gray('```') +
|
||||||
|
'\n'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display research results in formatted output
|
||||||
|
* @param {string} result - AI research result
|
||||||
|
* @param {string} query - Original query
|
||||||
|
* @param {string} detailLevel - Detail level used
|
||||||
|
* @param {Object} tokenBreakdown - Detailed token usage
|
||||||
|
*/
|
||||||
|
function displayResearchResults(result, query, detailLevel, tokenBreakdown) {
|
||||||
|
// Header with query info
|
||||||
|
const header = boxen(
|
||||||
|
chalk.green.bold('Research Results') +
|
||||||
|
'\n\n' +
|
||||||
|
chalk.gray('Query: ') +
|
||||||
|
chalk.white(query) +
|
||||||
|
'\n' +
|
||||||
|
chalk.gray('Detail Level: ') +
|
||||||
|
chalk.cyan(detailLevel),
|
||||||
|
{
|
||||||
|
padding: { top: 1, bottom: 1, left: 2, right: 2 },
|
||||||
|
margin: { top: 1, bottom: 0 },
|
||||||
|
borderStyle: 'round',
|
||||||
|
borderColor: 'green'
|
||||||
|
}
|
||||||
|
);
|
||||||
|
console.log(header);
|
||||||
|
|
||||||
|
// Process the result to highlight code blocks
|
||||||
|
const processedResult = processCodeBlocks(result);
|
||||||
|
|
||||||
|
// Main research content in a clean box
|
||||||
|
const contentBox = boxen(processedResult, {
|
||||||
|
padding: { top: 1, bottom: 1, left: 2, right: 2 },
|
||||||
|
margin: { top: 0, bottom: 1 },
|
||||||
|
borderStyle: 'single',
|
||||||
|
borderColor: 'gray'
|
||||||
|
});
|
||||||
|
console.log(contentBox);
|
||||||
|
|
||||||
|
// Success footer
|
||||||
|
console.log(chalk.green('✅ Research completed'));
|
||||||
|
}
|
||||||
|
|
||||||
|
export { performResearch };
|
||||||
@@ -542,6 +542,11 @@ function displayHelp() {
|
|||||||
name: 'expand --all',
|
name: 'expand --all',
|
||||||
args: '[--force] [--research]',
|
args: '[--force] [--research]',
|
||||||
desc: 'Expand all pending tasks with subtasks'
|
desc: 'Expand all pending tasks with subtasks'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'research',
|
||||||
|
args: '"<prompt>" [-i=<task_ids>] [-f=<file_paths>] [-c="<context>"] [--project-tree] [-s=<save_file>] [-d=<detail_level>]',
|
||||||
|
desc: 'Perform AI-powered research queries with project context'
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
659
scripts/modules/utils/contextGatherer.js
Normal file
659
scripts/modules/utils/contextGatherer.js
Normal file
@@ -0,0 +1,659 @@
|
|||||||
|
/**
|
||||||
|
* contextGatherer.js
|
||||||
|
* Comprehensive context gathering utility for Task Master AI operations
|
||||||
|
* Supports task context, file context, project tree, and custom context
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import pkg from 'gpt-tokens';
|
||||||
|
import { readJSON, findTaskById, truncate } from '../utils.js';
|
||||||
|
|
||||||
|
const { encode } = pkg;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Context Gatherer class for collecting and formatting context from various sources
|
||||||
|
*/
|
||||||
|
export class ContextGatherer {
|
||||||
|
constructor(projectRoot) {
|
||||||
|
this.projectRoot = projectRoot;
|
||||||
|
this.tasksPath = path.join(projectRoot, 'tasks', 'tasks.json');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Count tokens in a text string using gpt-tokens
|
||||||
|
* @param {string} text - Text to count tokens for
|
||||||
|
* @returns {number} Token count
|
||||||
|
*/
|
||||||
|
countTokens(text) {
|
||||||
|
if (!text || typeof text !== 'string') {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
return encode(text).length;
|
||||||
|
} catch (error) {
|
||||||
|
// Fallback to rough character-based estimation if tokenizer fails
|
||||||
|
// Rough estimate: ~4 characters per token for English text
|
||||||
|
return Math.ceil(text.length / 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main method to gather context from multiple sources
|
||||||
|
* @param {Object} options - Context gathering options
|
||||||
|
* @param {Array<string>} [options.tasks] - Task/subtask IDs to include
|
||||||
|
* @param {Array<string>} [options.files] - File paths to include
|
||||||
|
* @param {string} [options.customContext] - Additional custom context
|
||||||
|
* @param {boolean} [options.includeProjectTree] - Include project file tree
|
||||||
|
* @param {string} [options.format] - Output format: 'research', 'chat', 'system-prompt'
|
||||||
|
* @returns {Promise<string>} Formatted context string
|
||||||
|
*/
|
||||||
|
async gather(options = {}) {
|
||||||
|
const {
|
||||||
|
tasks = [],
|
||||||
|
files = [],
|
||||||
|
customContext = '',
|
||||||
|
includeProjectTree = false,
|
||||||
|
format = 'research',
|
||||||
|
includeTokenCounts = false
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
const contextSections = [];
|
||||||
|
const tokenBreakdown = {
|
||||||
|
customContext: null,
|
||||||
|
tasks: [],
|
||||||
|
files: [],
|
||||||
|
projectTree: null,
|
||||||
|
total: 0
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add custom context first if provided
|
||||||
|
if (customContext && customContext.trim()) {
|
||||||
|
const formattedCustom = this._formatCustomContext(customContext, format);
|
||||||
|
contextSections.push(formattedCustom);
|
||||||
|
if (includeTokenCounts) {
|
||||||
|
tokenBreakdown.customContext = {
|
||||||
|
tokens: this.countTokens(formattedCustom),
|
||||||
|
characters: formattedCustom.length
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add task context
|
||||||
|
if (tasks.length > 0) {
|
||||||
|
const taskContextResult = await this._gatherTaskContext(
|
||||||
|
tasks,
|
||||||
|
format,
|
||||||
|
includeTokenCounts
|
||||||
|
);
|
||||||
|
if (taskContextResult.context) {
|
||||||
|
contextSections.push(taskContextResult.context);
|
||||||
|
if (includeTokenCounts) {
|
||||||
|
tokenBreakdown.tasks = taskContextResult.breakdown;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add file context
|
||||||
|
if (files.length > 0) {
|
||||||
|
const fileContextResult = await this._gatherFileContext(
|
||||||
|
files,
|
||||||
|
format,
|
||||||
|
includeTokenCounts
|
||||||
|
);
|
||||||
|
if (fileContextResult.context) {
|
||||||
|
contextSections.push(fileContextResult.context);
|
||||||
|
if (includeTokenCounts) {
|
||||||
|
tokenBreakdown.files = fileContextResult.breakdown;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add project tree context
|
||||||
|
if (includeProjectTree) {
|
||||||
|
const treeContextResult = await this._gatherProjectTreeContext(
|
||||||
|
format,
|
||||||
|
includeTokenCounts
|
||||||
|
);
|
||||||
|
if (treeContextResult.context) {
|
||||||
|
contextSections.push(treeContextResult.context);
|
||||||
|
if (includeTokenCounts) {
|
||||||
|
tokenBreakdown.projectTree = treeContextResult.breakdown;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join all sections based on format
|
||||||
|
const finalContext = this._joinContextSections(contextSections, format);
|
||||||
|
|
||||||
|
if (includeTokenCounts) {
|
||||||
|
tokenBreakdown.total = this.countTokens(finalContext);
|
||||||
|
return {
|
||||||
|
context: finalContext,
|
||||||
|
tokenBreakdown: tokenBreakdown
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return finalContext;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse task ID strings into structured format
|
||||||
|
* Supports formats: "15", "15.2", "16,17.1"
|
||||||
|
* @param {Array<string>} taskIds - Array of task ID strings
|
||||||
|
* @returns {Array<Object>} Parsed task identifiers
|
||||||
|
*/
|
||||||
|
_parseTaskIds(taskIds) {
|
||||||
|
const parsed = [];
|
||||||
|
|
||||||
|
for (const idStr of taskIds) {
|
||||||
|
if (idStr.includes('.')) {
|
||||||
|
// Subtask format: "15.2"
|
||||||
|
const [parentId, subtaskId] = idStr.split('.');
|
||||||
|
parsed.push({
|
||||||
|
type: 'subtask',
|
||||||
|
parentId: parseInt(parentId, 10),
|
||||||
|
subtaskId: parseInt(subtaskId, 10),
|
||||||
|
fullId: idStr
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Task format: "15"
|
||||||
|
parsed.push({
|
||||||
|
type: 'task',
|
||||||
|
taskId: parseInt(idStr, 10),
|
||||||
|
fullId: idStr
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gather context from tasks and subtasks
|
||||||
|
* @param {Array<string>} taskIds - Task/subtask IDs
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @param {boolean} includeTokenCounts - Whether to include token breakdown
|
||||||
|
* @returns {Promise<Object>} Task context result with breakdown
|
||||||
|
*/
|
||||||
|
async _gatherTaskContext(taskIds, format, includeTokenCounts = false) {
|
||||||
|
try {
|
||||||
|
const tasksData = readJSON(this.tasksPath);
|
||||||
|
if (!tasksData || !tasksData.tasks) {
|
||||||
|
return { context: null, breakdown: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
const parsedIds = this._parseTaskIds(taskIds);
|
||||||
|
const contextItems = [];
|
||||||
|
const breakdown = [];
|
||||||
|
|
||||||
|
for (const parsed of parsedIds) {
|
||||||
|
let formattedItem = null;
|
||||||
|
let itemInfo = null;
|
||||||
|
|
||||||
|
if (parsed.type === 'task') {
|
||||||
|
const result = findTaskById(tasksData.tasks, parsed.taskId);
|
||||||
|
if (result.task) {
|
||||||
|
formattedItem = this._formatTaskForContext(result.task, format);
|
||||||
|
itemInfo = {
|
||||||
|
id: parsed.fullId,
|
||||||
|
type: 'task',
|
||||||
|
title: result.task.title,
|
||||||
|
tokens: includeTokenCounts ? this.countTokens(formattedItem) : 0,
|
||||||
|
characters: formattedItem.length
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} else if (parsed.type === 'subtask') {
|
||||||
|
const parentResult = findTaskById(tasksData.tasks, parsed.parentId);
|
||||||
|
if (parentResult.task && parentResult.task.subtasks) {
|
||||||
|
const subtask = parentResult.task.subtasks.find(
|
||||||
|
(st) => st.id === parsed.subtaskId
|
||||||
|
);
|
||||||
|
if (subtask) {
|
||||||
|
formattedItem = this._formatSubtaskForContext(
|
||||||
|
subtask,
|
||||||
|
parentResult.task,
|
||||||
|
format
|
||||||
|
);
|
||||||
|
itemInfo = {
|
||||||
|
id: parsed.fullId,
|
||||||
|
type: 'subtask',
|
||||||
|
title: subtask.title,
|
||||||
|
parentTitle: parentResult.task.title,
|
||||||
|
tokens: includeTokenCounts
|
||||||
|
? this.countTokens(formattedItem)
|
||||||
|
: 0,
|
||||||
|
characters: formattedItem.length
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (formattedItem && itemInfo) {
|
||||||
|
contextItems.push(formattedItem);
|
||||||
|
if (includeTokenCounts) {
|
||||||
|
breakdown.push(itemInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (contextItems.length === 0) {
|
||||||
|
return { context: null, breakdown: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
const finalContext = this._formatTaskContextSection(contextItems, format);
|
||||||
|
return {
|
||||||
|
context: finalContext,
|
||||||
|
breakdown: includeTokenCounts ? breakdown : []
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(`Warning: Could not gather task context: ${error.message}`);
|
||||||
|
return { context: null, breakdown: [] };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format a task for context inclusion
|
||||||
|
* @param {Object} task - Task object
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Formatted task context
|
||||||
|
*/
|
||||||
|
_formatTaskForContext(task, format) {
|
||||||
|
const sections = [];
|
||||||
|
|
||||||
|
sections.push(`**Task ${task.id}: ${task.title}**`);
|
||||||
|
sections.push(`Description: ${task.description}`);
|
||||||
|
sections.push(`Status: ${task.status || 'pending'}`);
|
||||||
|
sections.push(`Priority: ${task.priority || 'medium'}`);
|
||||||
|
|
||||||
|
if (task.dependencies && task.dependencies.length > 0) {
|
||||||
|
sections.push(`Dependencies: ${task.dependencies.join(', ')}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (task.details) {
|
||||||
|
const details = truncate(task.details, 500);
|
||||||
|
sections.push(`Implementation Details: ${details}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (task.testStrategy) {
|
||||||
|
const testStrategy = truncate(task.testStrategy, 300);
|
||||||
|
sections.push(`Test Strategy: ${testStrategy}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (task.subtasks && task.subtasks.length > 0) {
|
||||||
|
sections.push(`Subtasks: ${task.subtasks.length} subtasks defined`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return sections.join('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format a subtask for context inclusion
|
||||||
|
* @param {Object} subtask - Subtask object
|
||||||
|
* @param {Object} parentTask - Parent task object
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Formatted subtask context
|
||||||
|
*/
|
||||||
|
_formatSubtaskForContext(subtask, parentTask, format) {
|
||||||
|
const sections = [];
|
||||||
|
|
||||||
|
sections.push(
|
||||||
|
`**Subtask ${parentTask.id}.${subtask.id}: ${subtask.title}**`
|
||||||
|
);
|
||||||
|
sections.push(`Parent Task: ${parentTask.title}`);
|
||||||
|
sections.push(`Description: ${subtask.description}`);
|
||||||
|
sections.push(`Status: ${subtask.status || 'pending'}`);
|
||||||
|
|
||||||
|
if (subtask.dependencies && subtask.dependencies.length > 0) {
|
||||||
|
sections.push(`Dependencies: ${subtask.dependencies.join(', ')}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (subtask.details) {
|
||||||
|
const details = truncate(subtask.details, 500);
|
||||||
|
sections.push(`Implementation Details: ${details}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return sections.join('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gather context from files
|
||||||
|
* @param {Array<string>} filePaths - File paths to read
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @param {boolean} includeTokenCounts - Whether to include token breakdown
|
||||||
|
* @returns {Promise<Object>} File context result with breakdown
|
||||||
|
*/
|
||||||
|
async _gatherFileContext(filePaths, format, includeTokenCounts = false) {
|
||||||
|
const fileContents = [];
|
||||||
|
const breakdown = [];
|
||||||
|
|
||||||
|
for (const filePath of filePaths) {
|
||||||
|
try {
|
||||||
|
const fullPath = path.isAbsolute(filePath)
|
||||||
|
? filePath
|
||||||
|
: path.join(this.projectRoot, filePath);
|
||||||
|
|
||||||
|
if (!fs.existsSync(fullPath)) {
|
||||||
|
console.warn(`Warning: File not found: ${filePath}`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const stats = fs.statSync(fullPath);
|
||||||
|
if (!stats.isFile()) {
|
||||||
|
console.warn(`Warning: Path is not a file: ${filePath}`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check file size (limit to 50KB for context)
|
||||||
|
if (stats.size > 50 * 1024) {
|
||||||
|
console.warn(
|
||||||
|
`Warning: File too large, skipping: ${filePath} (${Math.round(stats.size / 1024)}KB)`
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = fs.readFileSync(fullPath, 'utf-8');
|
||||||
|
const relativePath = path.relative(this.projectRoot, fullPath);
|
||||||
|
|
||||||
|
const fileData = {
|
||||||
|
path: relativePath,
|
||||||
|
size: stats.size,
|
||||||
|
content: content,
|
||||||
|
lastModified: stats.mtime
|
||||||
|
};
|
||||||
|
|
||||||
|
fileContents.push(fileData);
|
||||||
|
|
||||||
|
// Calculate tokens for this individual file if requested
|
||||||
|
if (includeTokenCounts) {
|
||||||
|
const formattedFile = this._formatSingleFileForContext(
|
||||||
|
fileData,
|
||||||
|
format
|
||||||
|
);
|
||||||
|
breakdown.push({
|
||||||
|
path: relativePath,
|
||||||
|
sizeKB: Math.round(stats.size / 1024),
|
||||||
|
tokens: this.countTokens(formattedFile),
|
||||||
|
characters: formattedFile.length
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(
|
||||||
|
`Warning: Could not read file ${filePath}: ${error.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fileContents.length === 0) {
|
||||||
|
return { context: null, breakdown: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
const finalContext = this._formatFileContextSection(fileContents, format);
|
||||||
|
return {
|
||||||
|
context: finalContext,
|
||||||
|
breakdown: includeTokenCounts ? breakdown : []
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate project file tree context
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @param {boolean} includeTokenCounts - Whether to include token breakdown
|
||||||
|
* @returns {Promise<Object>} Project tree context result with breakdown
|
||||||
|
*/
|
||||||
|
async _gatherProjectTreeContext(format, includeTokenCounts = false) {
|
||||||
|
try {
|
||||||
|
const tree = this._generateFileTree(this.projectRoot, 5); // Max depth 5
|
||||||
|
const finalContext = this._formatProjectTreeSection(tree, format);
|
||||||
|
|
||||||
|
const breakdown = includeTokenCounts
|
||||||
|
? {
|
||||||
|
tokens: this.countTokens(finalContext),
|
||||||
|
characters: finalContext.length,
|
||||||
|
fileCount: tree.fileCount || 0,
|
||||||
|
dirCount: tree.dirCount || 0
|
||||||
|
}
|
||||||
|
: null;
|
||||||
|
|
||||||
|
return {
|
||||||
|
context: finalContext,
|
||||||
|
breakdown: breakdown
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(
|
||||||
|
`Warning: Could not generate project tree: ${error.message}`
|
||||||
|
);
|
||||||
|
return { context: null, breakdown: null };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format a single file for context (used for token counting)
|
||||||
|
* @param {Object} fileData - File data object
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Formatted file context
|
||||||
|
*/
|
||||||
|
_formatSingleFileForContext(fileData, format) {
|
||||||
|
const header = `**File: ${fileData.path}** (${Math.round(fileData.size / 1024)}KB)`;
|
||||||
|
const content = `\`\`\`\n${fileData.content}\n\`\`\``;
|
||||||
|
return `${header}\n\n${content}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate file tree structure
|
||||||
|
* @param {string} dirPath - Directory path
|
||||||
|
* @param {number} maxDepth - Maximum depth to traverse
|
||||||
|
* @param {number} currentDepth - Current depth
|
||||||
|
* @returns {Object} File tree structure
|
||||||
|
*/
|
||||||
|
_generateFileTree(dirPath, maxDepth, currentDepth = 0) {
|
||||||
|
const ignoreDirs = [
|
||||||
|
'.git',
|
||||||
|
'node_modules',
|
||||||
|
'.env',
|
||||||
|
'coverage',
|
||||||
|
'dist',
|
||||||
|
'build'
|
||||||
|
];
|
||||||
|
const ignoreFiles = ['.DS_Store', '.env', '.env.local', '.env.production'];
|
||||||
|
|
||||||
|
if (currentDepth >= maxDepth) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const items = fs.readdirSync(dirPath);
|
||||||
|
const tree = {
|
||||||
|
name: path.basename(dirPath),
|
||||||
|
type: 'directory',
|
||||||
|
children: [],
|
||||||
|
fileCount: 0,
|
||||||
|
dirCount: 0
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const item of items) {
|
||||||
|
if (ignoreDirs.includes(item) || ignoreFiles.includes(item)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const itemPath = path.join(dirPath, item);
|
||||||
|
const stats = fs.statSync(itemPath);
|
||||||
|
|
||||||
|
if (stats.isDirectory()) {
|
||||||
|
tree.dirCount++;
|
||||||
|
if (currentDepth < maxDepth - 1) {
|
||||||
|
const subtree = this._generateFileTree(
|
||||||
|
itemPath,
|
||||||
|
maxDepth,
|
||||||
|
currentDepth + 1
|
||||||
|
);
|
||||||
|
if (subtree) {
|
||||||
|
tree.children.push(subtree);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tree.fileCount++;
|
||||||
|
tree.children.push({
|
||||||
|
name: item,
|
||||||
|
type: 'file',
|
||||||
|
size: stats.size
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tree;
|
||||||
|
} catch (error) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format custom context section
|
||||||
|
* @param {string} customContext - Custom context string
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Formatted custom context
|
||||||
|
*/
|
||||||
|
_formatCustomContext(customContext, format) {
|
||||||
|
switch (format) {
|
||||||
|
case 'research':
|
||||||
|
return `## Additional Context\n\n${customContext}`;
|
||||||
|
case 'chat':
|
||||||
|
return `**Additional Context:**\n${customContext}`;
|
||||||
|
case 'system-prompt':
|
||||||
|
return `Additional context: ${customContext}`;
|
||||||
|
default:
|
||||||
|
return customContext;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format task context section
|
||||||
|
* @param {Array<string>} taskItems - Formatted task items
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Formatted task context section
|
||||||
|
*/
|
||||||
|
_formatTaskContextSection(taskItems, format) {
|
||||||
|
switch (format) {
|
||||||
|
case 'research':
|
||||||
|
return `## Task Context\n\n${taskItems.join('\n\n---\n\n')}`;
|
||||||
|
case 'chat':
|
||||||
|
return `**Task Context:**\n\n${taskItems.join('\n\n')}`;
|
||||||
|
case 'system-prompt':
|
||||||
|
return `Task context: ${taskItems.join(' | ')}`;
|
||||||
|
default:
|
||||||
|
return taskItems.join('\n\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format file context section
|
||||||
|
* @param {Array<Object>} fileContents - File content objects
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Formatted file context section
|
||||||
|
*/
|
||||||
|
_formatFileContextSection(fileContents, format) {
|
||||||
|
const fileItems = fileContents.map((file) => {
|
||||||
|
const header = `**File: ${file.path}** (${Math.round(file.size / 1024)}KB)`;
|
||||||
|
const content = `\`\`\`\n${file.content}\n\`\`\``;
|
||||||
|
return `${header}\n\n${content}`;
|
||||||
|
});
|
||||||
|
|
||||||
|
switch (format) {
|
||||||
|
case 'research':
|
||||||
|
return `## File Context\n\n${fileItems.join('\n\n---\n\n')}`;
|
||||||
|
case 'chat':
|
||||||
|
return `**File Context:**\n\n${fileItems.join('\n\n')}`;
|
||||||
|
case 'system-prompt':
|
||||||
|
return `File context: ${fileContents.map((f) => `${f.path} (${f.content.substring(0, 200)}...)`).join(' | ')}`;
|
||||||
|
default:
|
||||||
|
return fileItems.join('\n\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format project tree section
|
||||||
|
* @param {Object} tree - File tree structure
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Formatted project tree section
|
||||||
|
*/
|
||||||
|
_formatProjectTreeSection(tree, format) {
|
||||||
|
const treeString = this._renderFileTree(tree);
|
||||||
|
|
||||||
|
switch (format) {
|
||||||
|
case 'research':
|
||||||
|
return `## Project Structure\n\n\`\`\`\n${treeString}\n\`\`\``;
|
||||||
|
case 'chat':
|
||||||
|
return `**Project Structure:**\n\`\`\`\n${treeString}\n\`\`\``;
|
||||||
|
case 'system-prompt':
|
||||||
|
return `Project structure: ${treeString.replace(/\n/g, ' | ')}`;
|
||||||
|
default:
|
||||||
|
return treeString;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render file tree as string
|
||||||
|
* @param {Object} tree - File tree structure
|
||||||
|
* @param {string} prefix - Current prefix for indentation
|
||||||
|
* @returns {string} Rendered tree string
|
||||||
|
*/
|
||||||
|
_renderFileTree(tree, prefix = '') {
|
||||||
|
let result = `${prefix}${tree.name}/`;
|
||||||
|
|
||||||
|
if (tree.fileCount > 0 || tree.dirCount > 0) {
|
||||||
|
result += ` (${tree.fileCount} files, ${tree.dirCount} dirs)`;
|
||||||
|
}
|
||||||
|
|
||||||
|
result += '\n';
|
||||||
|
|
||||||
|
if (tree.children) {
|
||||||
|
tree.children.forEach((child, index) => {
|
||||||
|
const isLast = index === tree.children.length - 1;
|
||||||
|
const childPrefix = prefix + (isLast ? '└── ' : '├── ');
|
||||||
|
const nextPrefix = prefix + (isLast ? ' ' : '│ ');
|
||||||
|
|
||||||
|
if (child.type === 'directory') {
|
||||||
|
result += this._renderFileTree(child, childPrefix);
|
||||||
|
} else {
|
||||||
|
result += `${childPrefix}${child.name}\n`;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Join context sections based on format
|
||||||
|
* @param {Array<string>} sections - Context sections
|
||||||
|
* @param {string} format - Output format
|
||||||
|
* @returns {string} Joined context string
|
||||||
|
*/
|
||||||
|
_joinContextSections(sections, format) {
|
||||||
|
if (sections.length === 0) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (format) {
|
||||||
|
case 'research':
|
||||||
|
return sections.join('\n\n---\n\n');
|
||||||
|
case 'chat':
|
||||||
|
return sections.join('\n\n');
|
||||||
|
case 'system-prompt':
|
||||||
|
return sections.join(' ');
|
||||||
|
default:
|
||||||
|
return sections.join('\n\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory function to create a context gatherer instance
|
||||||
|
* @param {string} projectRoot - Project root directory
|
||||||
|
* @returns {ContextGatherer} Context gatherer instance
|
||||||
|
*/
|
||||||
|
export function createContextGatherer(projectRoot) {
|
||||||
|
return new ContextGatherer(projectRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default ContextGatherer;
|
||||||
372
scripts/modules/utils/fuzzyTaskSearch.js
Normal file
372
scripts/modules/utils/fuzzyTaskSearch.js
Normal file
@@ -0,0 +1,372 @@
|
|||||||
|
/**
|
||||||
|
* fuzzyTaskSearch.js
|
||||||
|
* Reusable fuzzy search utility for finding relevant tasks based on semantic similarity
|
||||||
|
*/
|
||||||
|
|
||||||
|
import Fuse from 'fuse.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration for different search contexts
|
||||||
|
*/
|
||||||
|
const SEARCH_CONFIGS = {
|
||||||
|
research: {
|
||||||
|
threshold: 0.5, // More lenient for research (broader context)
|
||||||
|
limit: 20,
|
||||||
|
keys: [
|
||||||
|
{ name: 'title', weight: 2.0 },
|
||||||
|
{ name: 'description', weight: 1.0 },
|
||||||
|
{ name: 'details', weight: 0.5 },
|
||||||
|
{ name: 'dependencyTitles', weight: 0.5 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
addTask: {
|
||||||
|
threshold: 0.4, // Stricter for add-task (more precise context)
|
||||||
|
limit: 15,
|
||||||
|
keys: [
|
||||||
|
{ name: 'title', weight: 2.0 },
|
||||||
|
{ name: 'description', weight: 1.5 },
|
||||||
|
{ name: 'details', weight: 0.8 },
|
||||||
|
{ name: 'dependencyTitles', weight: 0.5 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
default: {
|
||||||
|
threshold: 0.4,
|
||||||
|
limit: 15,
|
||||||
|
keys: [
|
||||||
|
{ name: 'title', weight: 2.0 },
|
||||||
|
{ name: 'description', weight: 1.5 },
|
||||||
|
{ name: 'details', weight: 1.0 },
|
||||||
|
{ name: 'dependencyTitles', weight: 0.5 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Purpose categories for pattern-based task matching
|
||||||
|
*/
|
||||||
|
const PURPOSE_CATEGORIES = [
|
||||||
|
{ pattern: /(command|cli|flag)/i, label: 'CLI commands' },
|
||||||
|
{ pattern: /(task|subtask|add)/i, label: 'Task management' },
|
||||||
|
{ pattern: /(dependency|depend)/i, label: 'Dependency handling' },
|
||||||
|
{ pattern: /(AI|model|prompt|research)/i, label: 'AI integration' },
|
||||||
|
{ pattern: /(UI|display|show|interface)/i, label: 'User interface' },
|
||||||
|
{ pattern: /(schedule|time|cron)/i, label: 'Scheduling' },
|
||||||
|
{ pattern: /(config|setting|option)/i, label: 'Configuration' },
|
||||||
|
{ pattern: /(test|testing|spec)/i, label: 'Testing' },
|
||||||
|
{ pattern: /(auth|login|user)/i, label: 'Authentication' },
|
||||||
|
{ pattern: /(database|db|data)/i, label: 'Data management' },
|
||||||
|
{ pattern: /(api|endpoint|route)/i, label: 'API development' },
|
||||||
|
{ pattern: /(deploy|build|release)/i, label: 'Deployment' },
|
||||||
|
{ pattern: /(security|auth|login|user)/i, label: 'Security' },
|
||||||
|
{ pattern: /.*/, label: 'Other' }
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Relevance score thresholds
|
||||||
|
*/
|
||||||
|
const RELEVANCE_THRESHOLDS = {
|
||||||
|
high: 0.25,
|
||||||
|
medium: 0.4,
|
||||||
|
low: 0.6
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fuzzy search utility class for finding relevant tasks
|
||||||
|
*/
|
||||||
|
export class FuzzyTaskSearch {
|
||||||
|
constructor(tasks, searchType = 'default') {
|
||||||
|
this.tasks = tasks;
|
||||||
|
this.config = SEARCH_CONFIGS[searchType] || SEARCH_CONFIGS.default;
|
||||||
|
this.searchableTasks = this._prepareSearchableTasks(tasks);
|
||||||
|
this.fuse = new Fuse(this.searchableTasks, {
|
||||||
|
includeScore: true,
|
||||||
|
threshold: this.config.threshold,
|
||||||
|
keys: this.config.keys,
|
||||||
|
shouldSort: true,
|
||||||
|
useExtendedSearch: true,
|
||||||
|
limit: this.config.limit
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepare tasks for searching by expanding dependency titles
|
||||||
|
* @param {Array} tasks - Array of task objects
|
||||||
|
* @returns {Array} Tasks with expanded dependency information
|
||||||
|
*/
|
||||||
|
_prepareSearchableTasks(tasks) {
|
||||||
|
return tasks.map((task) => {
|
||||||
|
// Get titles of this task's dependencies if they exist
|
||||||
|
const dependencyTitles =
|
||||||
|
task.dependencies?.length > 0
|
||||||
|
? task.dependencies
|
||||||
|
.map((depId) => {
|
||||||
|
const depTask = tasks.find((t) => t.id === depId);
|
||||||
|
return depTask ? depTask.title : '';
|
||||||
|
})
|
||||||
|
.filter((title) => title)
|
||||||
|
.join(' ')
|
||||||
|
: '';
|
||||||
|
|
||||||
|
return {
|
||||||
|
...task,
|
||||||
|
dependencyTitles
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract significant words from a prompt
|
||||||
|
* @param {string} prompt - The search prompt
|
||||||
|
* @returns {Array<string>} Array of significant words
|
||||||
|
*/
|
||||||
|
_extractPromptWords(prompt) {
|
||||||
|
return prompt
|
||||||
|
.toLowerCase()
|
||||||
|
.replace(/[^\w\s-]/g, ' ') // Replace non-alphanumeric chars with spaces
|
||||||
|
.split(/\s+/)
|
||||||
|
.filter((word) => word.length > 3); // Words at least 4 chars
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find tasks related to a prompt using fuzzy search
|
||||||
|
* @param {string} prompt - The search prompt
|
||||||
|
* @param {Object} options - Search options
|
||||||
|
* @param {number} [options.maxResults=8] - Maximum number of results to return
|
||||||
|
* @param {boolean} [options.includeRecent=true] - Include recent tasks in results
|
||||||
|
* @param {boolean} [options.includeCategoryMatches=true] - Include category-based matches
|
||||||
|
* @returns {Object} Search results with relevance breakdown
|
||||||
|
*/
|
||||||
|
findRelevantTasks(prompt, options = {}) {
|
||||||
|
const {
|
||||||
|
maxResults = 8,
|
||||||
|
includeRecent = true,
|
||||||
|
includeCategoryMatches = true
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
// Extract significant words from prompt
|
||||||
|
const promptWords = this._extractPromptWords(prompt);
|
||||||
|
|
||||||
|
// Perform fuzzy search with full prompt
|
||||||
|
const fuzzyResults = this.fuse.search(prompt);
|
||||||
|
|
||||||
|
// Also search for each significant word to catch different aspects
|
||||||
|
let wordResults = [];
|
||||||
|
for (const word of promptWords) {
|
||||||
|
if (word.length > 5) {
|
||||||
|
// Only use significant words
|
||||||
|
const results = this.fuse.search(word);
|
||||||
|
if (results.length > 0) {
|
||||||
|
wordResults.push(...results);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge and deduplicate results
|
||||||
|
const mergedResults = [...fuzzyResults];
|
||||||
|
|
||||||
|
// Add word results that aren't already in fuzzyResults
|
||||||
|
for (const wordResult of wordResults) {
|
||||||
|
if (!mergedResults.some((r) => r.item.id === wordResult.item.id)) {
|
||||||
|
mergedResults.push(wordResult);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group search results by relevance
|
||||||
|
const highRelevance = mergedResults
|
||||||
|
.filter((result) => result.score < RELEVANCE_THRESHOLDS.high)
|
||||||
|
.map((result) => ({ ...result.item, score: result.score }));
|
||||||
|
|
||||||
|
const mediumRelevance = mergedResults
|
||||||
|
.filter(
|
||||||
|
(result) =>
|
||||||
|
result.score >= RELEVANCE_THRESHOLDS.high &&
|
||||||
|
result.score < RELEVANCE_THRESHOLDS.medium
|
||||||
|
)
|
||||||
|
.map((result) => ({ ...result.item, score: result.score }));
|
||||||
|
|
||||||
|
const lowRelevance = mergedResults
|
||||||
|
.filter(
|
||||||
|
(result) =>
|
||||||
|
result.score >= RELEVANCE_THRESHOLDS.medium &&
|
||||||
|
result.score < RELEVANCE_THRESHOLDS.low
|
||||||
|
)
|
||||||
|
.map((result) => ({ ...result.item, score: result.score }));
|
||||||
|
|
||||||
|
// Get recent tasks (newest first) if requested
|
||||||
|
const recentTasks = includeRecent
|
||||||
|
? [...this.tasks].sort((a, b) => b.id - a.id).slice(0, 5)
|
||||||
|
: [];
|
||||||
|
|
||||||
|
// Find category-based matches if requested
|
||||||
|
let categoryTasks = [];
|
||||||
|
let promptCategory = null;
|
||||||
|
if (includeCategoryMatches) {
|
||||||
|
promptCategory = PURPOSE_CATEGORIES.find((cat) =>
|
||||||
|
cat.pattern.test(prompt)
|
||||||
|
);
|
||||||
|
categoryTasks = promptCategory
|
||||||
|
? this.tasks
|
||||||
|
.filter(
|
||||||
|
(t) =>
|
||||||
|
promptCategory.pattern.test(t.title) ||
|
||||||
|
promptCategory.pattern.test(t.description) ||
|
||||||
|
(t.details && promptCategory.pattern.test(t.details))
|
||||||
|
)
|
||||||
|
.slice(0, 3)
|
||||||
|
: [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Combine all relevant tasks, prioritizing by relevance
|
||||||
|
const allRelevantTasks = [...highRelevance];
|
||||||
|
|
||||||
|
// Add medium relevance if not already included
|
||||||
|
for (const task of mediumRelevance) {
|
||||||
|
if (!allRelevantTasks.some((t) => t.id === task.id)) {
|
||||||
|
allRelevantTasks.push(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add low relevance if not already included
|
||||||
|
for (const task of lowRelevance) {
|
||||||
|
if (!allRelevantTasks.some((t) => t.id === task.id)) {
|
||||||
|
allRelevantTasks.push(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add category tasks if not already included
|
||||||
|
for (const task of categoryTasks) {
|
||||||
|
if (!allRelevantTasks.some((t) => t.id === task.id)) {
|
||||||
|
allRelevantTasks.push(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add recent tasks if not already included
|
||||||
|
for (const task of recentTasks) {
|
||||||
|
if (!allRelevantTasks.some((t) => t.id === task.id)) {
|
||||||
|
allRelevantTasks.push(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get top N results for final output
|
||||||
|
const finalResults = allRelevantTasks.slice(0, maxResults);
|
||||||
|
|
||||||
|
return {
|
||||||
|
results: finalResults,
|
||||||
|
breakdown: {
|
||||||
|
highRelevance,
|
||||||
|
mediumRelevance,
|
||||||
|
lowRelevance,
|
||||||
|
categoryTasks,
|
||||||
|
recentTasks,
|
||||||
|
promptCategory,
|
||||||
|
promptWords
|
||||||
|
},
|
||||||
|
metadata: {
|
||||||
|
totalSearched: this.tasks.length,
|
||||||
|
fuzzyMatches: fuzzyResults.length,
|
||||||
|
wordMatches: wordResults.length,
|
||||||
|
finalCount: finalResults.length
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get task IDs from search results
|
||||||
|
* @param {Object} searchResults - Results from findRelevantTasks
|
||||||
|
* @returns {Array<string>} Array of task ID strings
|
||||||
|
*/
|
||||||
|
getTaskIds(searchResults) {
|
||||||
|
return searchResults.results.map((task) => task.id.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get task IDs including subtasks from search results
|
||||||
|
* @param {Object} searchResults - Results from findRelevantTasks
|
||||||
|
* @param {boolean} [includeSubtasks=false] - Whether to include subtask IDs
|
||||||
|
* @returns {Array<string>} Array of task and subtask ID strings
|
||||||
|
*/
|
||||||
|
getTaskIdsWithSubtasks(searchResults, includeSubtasks = false) {
|
||||||
|
const taskIds = [];
|
||||||
|
|
||||||
|
for (const task of searchResults.results) {
|
||||||
|
taskIds.push(task.id.toString());
|
||||||
|
|
||||||
|
if (includeSubtasks && task.subtasks && task.subtasks.length > 0) {
|
||||||
|
for (const subtask of task.subtasks) {
|
||||||
|
taskIds.push(`${task.id}.${subtask.id}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return taskIds;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format search results for display
|
||||||
|
* @param {Object} searchResults - Results from findRelevantTasks
|
||||||
|
* @param {Object} options - Formatting options
|
||||||
|
* @returns {string} Formatted search results summary
|
||||||
|
*/
|
||||||
|
formatSearchSummary(searchResults, options = {}) {
|
||||||
|
const { includeScores = false, includeBreakdown = false } = options;
|
||||||
|
const { results, breakdown, metadata } = searchResults;
|
||||||
|
|
||||||
|
let summary = `Found ${results.length} relevant tasks from ${metadata.totalSearched} total tasks`;
|
||||||
|
|
||||||
|
if (includeBreakdown && breakdown) {
|
||||||
|
const parts = [];
|
||||||
|
if (breakdown.highRelevance.length > 0)
|
||||||
|
parts.push(`${breakdown.highRelevance.length} high relevance`);
|
||||||
|
if (breakdown.mediumRelevance.length > 0)
|
||||||
|
parts.push(`${breakdown.mediumRelevance.length} medium relevance`);
|
||||||
|
if (breakdown.lowRelevance.length > 0)
|
||||||
|
parts.push(`${breakdown.lowRelevance.length} low relevance`);
|
||||||
|
if (breakdown.categoryTasks.length > 0)
|
||||||
|
parts.push(`${breakdown.categoryTasks.length} category matches`);
|
||||||
|
|
||||||
|
if (parts.length > 0) {
|
||||||
|
summary += ` (${parts.join(', ')})`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (breakdown.promptCategory) {
|
||||||
|
summary += `\nCategory detected: ${breakdown.promptCategory.label}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return summary;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory function to create a fuzzy search instance
|
||||||
|
* @param {Array} tasks - Array of task objects
|
||||||
|
* @param {string} [searchType='default'] - Type of search configuration to use
|
||||||
|
* @returns {FuzzyTaskSearch} Fuzzy search instance
|
||||||
|
*/
|
||||||
|
export function createFuzzyTaskSearch(tasks, searchType = 'default') {
|
||||||
|
return new FuzzyTaskSearch(tasks, searchType);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Quick utility function to find relevant task IDs for a prompt
|
||||||
|
* @param {Array} tasks - Array of task objects
|
||||||
|
* @param {string} prompt - Search prompt
|
||||||
|
* @param {Object} options - Search options
|
||||||
|
* @returns {Array<string>} Array of relevant task ID strings
|
||||||
|
*/
|
||||||
|
export function findRelevantTaskIds(tasks, prompt, options = {}) {
|
||||||
|
const {
|
||||||
|
searchType = 'default',
|
||||||
|
maxResults = 8,
|
||||||
|
includeSubtasks = false
|
||||||
|
} = options;
|
||||||
|
|
||||||
|
const fuzzySearch = new FuzzyTaskSearch(tasks, searchType);
|
||||||
|
const results = fuzzySearch.findRelevantTasks(prompt, { maxResults });
|
||||||
|
|
||||||
|
return includeSubtasks
|
||||||
|
? fuzzySearch.getTaskIdsWithSubtasks(results, true)
|
||||||
|
: fuzzySearch.getTaskIds(results);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default FuzzyTaskSearch;
|
||||||
@@ -1,12 +1,13 @@
|
|||||||
# Task ID: 45
|
# Task ID: 45
|
||||||
# Title: Implement GitHub Issue Import Feature
|
# Title: Implement GitHub Issue Import Feature
|
||||||
# Status: pending
|
# Status: pending
|
||||||
# Dependencies: None
|
# Dependencies: 97
|
||||||
# Priority: medium
|
# Priority: medium
|
||||||
# Description: Add a '--from-github' flag to the add-task command that accepts a GitHub issue URL and automatically generates a corresponding task with relevant details.
|
# Description: Add a '--from-github' flag to the add-task command that accepts a GitHub issue URL and automatically generates a corresponding task with relevant details. This feature works in conjunction with the GitHub export feature (Task #97) to provide bidirectional linking between Task Master tasks and GitHub issues.
|
||||||
# Details:
|
# Details:
|
||||||
Implement a new flag '--from-github' for the add-task command that allows users to create tasks directly from GitHub issues. The implementation should:
|
Implement a new flag '--from-github' for the add-task command that allows users to create tasks directly from GitHub issues. The implementation should work seamlessly with the GitHub export feature (Task #97) to provide bidirectional linking capabilities.
|
||||||
|
|
||||||
|
Core functionality:
|
||||||
1. Accept a GitHub issue URL as an argument (e.g., 'taskmaster add-task --from-github https://github.com/owner/repo/issues/123')
|
1. Accept a GitHub issue URL as an argument (e.g., 'taskmaster add-task --from-github https://github.com/owner/repo/issues/123')
|
||||||
2. Parse the URL to extract the repository owner, name, and issue number
|
2. Parse the URL to extract the repository owner, name, and issue number
|
||||||
3. Use the GitHub API to fetch the issue details including:
|
3. Use the GitHub API to fetch the issue details including:
|
||||||
@@ -16,16 +17,27 @@ Implement a new flag '--from-github' for the add-task command that allows users
|
|||||||
- Issue assignees (for reference)
|
- Issue assignees (for reference)
|
||||||
- Issue status (open/closed)
|
- Issue status (open/closed)
|
||||||
4. Generate a well-formatted task with this information
|
4. Generate a well-formatted task with this information
|
||||||
5. Include a reference link back to the original GitHub issue
|
5. **Automatically add GitHub link metadata** using the same schema as the export feature:
|
||||||
6. Handle authentication for private repositories using GitHub tokens from environment variables or config file
|
- Store the source GitHub issue URL in task metadata
|
||||||
7. Implement proper error handling for:
|
- Use consistent metadata structure with export feature for bidirectional compatibility
|
||||||
|
- Enable future synchronization capabilities
|
||||||
|
6. Include a reference link back to the original GitHub issue in the task description
|
||||||
|
7. Handle authentication for private repositories using GitHub tokens from environment variables or config file
|
||||||
|
8. Implement proper error handling for:
|
||||||
- Invalid URLs
|
- Invalid URLs
|
||||||
- Non-existent issues
|
- Non-existent issues
|
||||||
- API rate limiting
|
- API rate limiting
|
||||||
- Authentication failures
|
- Authentication failures
|
||||||
- Network issues
|
- Network issues
|
||||||
8. Allow users to override or supplement the imported details with additional command-line arguments
|
9. **Validate GitHub links** during import to ensure they point to valid, accessible issues
|
||||||
9. Add appropriate documentation in help text and user guide
|
10. Allow users to override or supplement the imported details with additional command-line arguments
|
||||||
|
11. Add appropriate documentation in help text and user guide, including information about the complementary export feature
|
||||||
|
|
||||||
|
Bidirectional Integration:
|
||||||
|
- Use the same metadata schema as Task #97 for GitHub links
|
||||||
|
- Ensure imported tasks can be identified as GitHub-linked for future export operations
|
||||||
|
- Prepare infrastructure for future synchronization between tasks and their source issues
|
||||||
|
- Maintain consistency with export feature's link management approach
|
||||||
|
|
||||||
# Test Strategy:
|
# Test Strategy:
|
||||||
Testing should cover the following scenarios:
|
Testing should cover the following scenarios:
|
||||||
@@ -34,11 +46,15 @@ Testing should cover the following scenarios:
|
|||||||
- Test URL parsing functionality with valid and invalid GitHub issue URLs
|
- Test URL parsing functionality with valid and invalid GitHub issue URLs
|
||||||
- Test GitHub API response parsing with mocked API responses
|
- Test GitHub API response parsing with mocked API responses
|
||||||
- Test error handling for various failure cases
|
- Test error handling for various failure cases
|
||||||
|
- **Test metadata schema consistency with export feature**
|
||||||
|
- **Test GitHub link validation functionality**
|
||||||
|
|
||||||
2. Integration tests:
|
2. Integration tests:
|
||||||
- Test with real GitHub public issues (use well-known repositories)
|
- Test with real GitHub public issues (use well-known repositories)
|
||||||
- Test with both open and closed issues
|
- Test with both open and closed issues
|
||||||
- Test with issues containing various elements (labels, assignees, comments)
|
- Test with issues containing various elements (labels, assignees, comments)
|
||||||
|
- **Test bidirectional compatibility with export feature**
|
||||||
|
- **Verify metadata structure matches export feature requirements**
|
||||||
|
|
||||||
3. Error case tests:
|
3. Error case tests:
|
||||||
- Invalid URL format
|
- Invalid URL format
|
||||||
@@ -46,11 +62,19 @@ Testing should cover the following scenarios:
|
|||||||
- Non-existent issue number
|
- Non-existent issue number
|
||||||
- API rate limit exceeded
|
- API rate limit exceeded
|
||||||
- Authentication failures for private repos
|
- Authentication failures for private repos
|
||||||
|
- **Invalid or inaccessible GitHub links**
|
||||||
|
|
||||||
4. End-to-end tests:
|
4. End-to-end tests:
|
||||||
- Verify that a task created from a GitHub issue contains all expected information
|
- Verify that a task created from a GitHub issue contains all expected information
|
||||||
|
- **Verify that imported tasks contain proper GitHub link metadata**
|
||||||
- Verify that the task can be properly managed after creation
|
- Verify that the task can be properly managed after creation
|
||||||
- Test the interaction with other flags and commands
|
- Test the interaction with other flags and commands
|
||||||
|
- **Test compatibility with export feature workflows**
|
||||||
|
|
||||||
|
5. **Bidirectional feature tests**:
|
||||||
|
- Import a GitHub issue and verify it can be exported back
|
||||||
|
- Test metadata consistency between import and export operations
|
||||||
|
- Verify link validation works correctly
|
||||||
|
|
||||||
Create mock GitHub API responses for testing to avoid hitting rate limits during development and testing. Use environment variables to configure test credentials if needed.
|
Create mock GitHub API responses for testing to avoid hitting rate limits during development and testing. Use environment variables to configure test credentials if needed.
|
||||||
|
|
||||||
@@ -85,3 +109,15 @@ Map GitHub issue fields to task fields (title, description, etc.). Convert GitHu
|
|||||||
### Details:
|
### Details:
|
||||||
Design and implement UI for URL input and import confirmation. Show loading states during API calls. Display meaningful error messages for various failure scenarios. Allow users to review and modify imported task details before saving. Add automated tests for the entire import flow.
|
Design and implement UI for URL input and import confirmation. Show loading states during API calls. Display meaningful error messages for various failure scenarios. Allow users to review and modify imported task details before saving. Add automated tests for the entire import flow.
|
||||||
|
|
||||||
|
## 6. Implement GitHub metadata schema and link management [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Create a consistent metadata schema for GitHub links that works with both import and export features, ensuring bidirectional compatibility.
|
||||||
|
### Details:
|
||||||
|
Design and implement metadata structure that matches the export feature (Task #97). Include fields for GitHub issue URL, repository information, issue number, and sync status. Implement link validation to ensure GitHub URLs are accessible and valid. Create utilities for managing GitHub link metadata consistently across import and export operations.
|
||||||
|
|
||||||
|
## 7. Add bidirectional integration with export feature [pending]
|
||||||
|
### Dependencies: 45.6
|
||||||
|
### Description: Ensure imported tasks work seamlessly with the GitHub export feature and maintain consistent link management.
|
||||||
|
### Details:
|
||||||
|
Verify that tasks imported from GitHub can be properly exported back to GitHub. Implement checks to prevent duplicate exports of imported issues. Add metadata flags to identify imported tasks and their source repositories. Test round-trip workflows (import → modify → export) to ensure data integrity.
|
||||||
|
|
||||||
|
|||||||
@@ -26,19 +26,63 @@
|
|||||||
- Check help output and usage documentation for accuracy and completeness.
|
- Check help output and usage documentation for accuracy and completeness.
|
||||||
|
|
||||||
# Subtasks:
|
# Subtasks:
|
||||||
## 1. Command Registration [in-progress]
|
## 1. Command Registration [done]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Register the 'research' command with the CLI framework, ensuring it appears in the list of available commands and supports standard CLI conventions.
|
### Description: Register the 'research' command with the CLI framework, ensuring it appears in the list of available commands and supports standard CLI conventions.
|
||||||
### Details:
|
### Details:
|
||||||
Integrate the new command into the CLI's command registry. Ensure it is discoverable via the CLI's help system and follows established naming and grouping conventions.
|
Integrate the new command into the CLI's command registry. Ensure it is discoverable via the CLI's help system and follows established naming and grouping conventions.
|
||||||
|
|
||||||
## 2. Parameter and Flag Handling [pending]
|
## 2. Parameter and Flag Handling [done]
|
||||||
### Dependencies: 94.1
|
### Dependencies: 94.1
|
||||||
### Description: Define and implement parsing for all arguments, flags, and options accepted by the 'research' command, including validation and default values.
|
### Description: Define and implement parsing for all arguments, flags, and options accepted by the 'research' command, including validation and default values.
|
||||||
### Details:
|
### Details:
|
||||||
Use a command-line parsing framework to handle parameters. Ensure support for optional and required arguments, order-independence, and clear error messages for invalid input.
|
Use a command-line parsing framework to handle parameters. Ensure support for optional and required arguments, order-independence, and clear error messages for invalid input.
|
||||||
|
<info added on 2025-05-25T06:00:42.350Z>
|
||||||
|
✅ **Parameter and Flag Handling Implementation Complete**
|
||||||
|
|
||||||
## 3. Context Gathering [pending]
|
Successfully implemented comprehensive parameter validation and processing for the research command:
|
||||||
|
|
||||||
|
**✅ Implemented Features:**
|
||||||
|
1. **Comprehensive Parameter Validation:**
|
||||||
|
- Prompt validation (required, non-empty string)
|
||||||
|
- Detail level validation (low, medium, high)
|
||||||
|
- Task ID format validation (supports "15" and "15.2" formats)
|
||||||
|
- File path validation (comma-separated, existence checks)
|
||||||
|
- Save target validation (security checks for path traversal)
|
||||||
|
|
||||||
|
2. **Advanced Parameter Processing:**
|
||||||
|
- Comma-separated value parsing for task IDs and file paths
|
||||||
|
- Whitespace trimming and normalization
|
||||||
|
- Project root detection using `findProjectRoot()`
|
||||||
|
- Relative/absolute path handling for files
|
||||||
|
- Default value application
|
||||||
|
|
||||||
|
3. **Informative Error Messages:**
|
||||||
|
- Specific error messages for each validation failure
|
||||||
|
- Clear examples of correct usage
|
||||||
|
- Helpful suggestions for fixing errors
|
||||||
|
- Much more informative than basic Commander.js errors
|
||||||
|
|
||||||
|
4. **Structured Output:**
|
||||||
|
- Creates validated parameters object with all processed values
|
||||||
|
- Proper type conversion and normalization
|
||||||
|
- Ready for consumption by core research function
|
||||||
|
|
||||||
|
**✅ Validation Examples Tested:**
|
||||||
|
- Missing prompt: Shows Commander.js error (expected behavior)
|
||||||
|
- Invalid detail level: "Error: Detail level must be one of: low, medium, high"
|
||||||
|
- Invalid task ID format: "Error parsing task IDs: Invalid task ID format: "invalid_format". Expected format: "15" or "15.2""
|
||||||
|
- Valid parameters: Successfully processes and creates structured parameter object
|
||||||
|
|
||||||
|
**✅ Security Features:**
|
||||||
|
- Path traversal protection for save targets
|
||||||
|
- File existence validation
|
||||||
|
- Input sanitization and trimming
|
||||||
|
|
||||||
|
The parameter validation is now production-ready and follows the same patterns used throughout the Task Master codebase. Ready to proceed to subtask 94.3 (Context Gathering Utility).
|
||||||
|
</info added on 2025-05-25T06:00:42.350Z>
|
||||||
|
|
||||||
|
## 3. Context Gathering [done]
|
||||||
### Dependencies: 94.2
|
### Dependencies: 94.2
|
||||||
### Description: Implement logic to gather necessary context for the research operation, such as reading from files, stdin, or other sources as specified by the user.
|
### Description: Implement logic to gather necessary context for the research operation, such as reading from files, stdin, or other sources as specified by the user.
|
||||||
### Details:
|
### Details:
|
||||||
@@ -90,22 +134,159 @@ const context = await contextGatherer.gather({
|
|||||||
|
|
||||||
This utility will eliminate code duplication between task 51 (explore REPL) and task 94 (research command) while providing a robust, extensible foundation for context gathering operations.
|
This utility will eliminate code duplication between task 51 (explore REPL) and task 94 (research command) while providing a robust, extensible foundation for context gathering operations.
|
||||||
</info added on 2025-05-25T05:24:58.107Z>
|
</info added on 2025-05-25T05:24:58.107Z>
|
||||||
|
<info added on 2025-05-25T06:13:19.991Z>
|
||||||
|
✅ **Context Gathering Implementation Complete**
|
||||||
|
|
||||||
## 4. AI Service Integration [pending]
|
Successfully implemented the comprehensive ContextGatherer utility in `scripts/modules/utils/contextGatherer.js`:
|
||||||
### Dependencies: 94.3
|
|
||||||
### Description: Integrate with the AI service to process the gathered context and parameters, handling API calls, authentication, and error management.
|
**✅ Core Features Implemented:**
|
||||||
|
|
||||||
|
1. **Task/Subtask Context Extraction:**
|
||||||
|
- ✅ Parse task IDs and subtask IDs (formats: "15", "15.2", "16,17.1")
|
||||||
|
- ✅ Extract task titles, descriptions, details, and dependencies from the task system
|
||||||
|
- ✅ Include parent task context automatically for subtasks
|
||||||
|
- ✅ Format task data optimally for AI consumption
|
||||||
|
- ✅ Proper integration with existing `findTaskById` utility function
|
||||||
|
|
||||||
|
2. **File Path Context Processing:**
|
||||||
|
- ✅ Handle single or comma-separated file paths
|
||||||
|
- ✅ Implement safe file reading with comprehensive error handling
|
||||||
|
- ✅ Support multiple file types (JavaScript, markdown, text, JSON, etc.)
|
||||||
|
- ✅ Include file metadata (path, size, type, last modified)
|
||||||
|
- ✅ File size limits (50KB max) to prevent context explosion
|
||||||
|
|
||||||
|
3. **Project File Tree Generation:**
|
||||||
|
- ✅ Create structured project overview with configurable depth (max 3 levels)
|
||||||
|
- ✅ Filter out irrelevant directories (.git, node_modules, .env files)
|
||||||
|
- ✅ Include file counts and directory statistics
|
||||||
|
- ✅ Support custom filtering patterns
|
||||||
|
|
||||||
|
4. **Custom Context Integration:**
|
||||||
|
- ✅ Accept and merge custom context strings
|
||||||
|
- ✅ Maintain clear context hierarchy and organization
|
||||||
|
- ✅ Preserve context source attribution
|
||||||
|
|
||||||
|
5. **Unified API Design:**
|
||||||
|
- ✅ Clean class-based API with factory function
|
||||||
|
- ✅ Flexible options object for configuration
|
||||||
|
- ✅ Multiple output formats (research, chat, system-prompt)
|
||||||
|
- ✅ Proper error handling and graceful degradation
|
||||||
|
|
||||||
|
**✅ Output Formatting:**
|
||||||
|
- ✅ Support for 'research', 'chat', and 'system-prompt' formats
|
||||||
|
- ✅ Consistent context structure across different use cases
|
||||||
|
- ✅ Optimized for AI model consumption and token efficiency
|
||||||
|
|
||||||
|
**✅ Testing:**
|
||||||
|
- ✅ Successfully tested with real task data (Task 94 and subtask 94.1)
|
||||||
|
- ✅ Verified task context extraction works correctly
|
||||||
|
- ✅ Confirmed proper formatting and structure
|
||||||
|
|
||||||
|
**✅ Integration Ready:**
|
||||||
|
- ✅ Designed to be shared between task 51 (explore REPL) and task 94 (research command)
|
||||||
|
- ✅ Follows existing codebase patterns and utilities
|
||||||
|
- ✅ Proper ES6 module exports for easy importing
|
||||||
|
|
||||||
|
The ContextGatherer utility is now ready for integration into the core research function (subtask 94.4).
|
||||||
|
</info added on 2025-05-25T06:13:19.991Z>
|
||||||
|
|
||||||
|
## 4. Core Function Implementation [done]
|
||||||
|
### Dependencies: 94.2, 94.3
|
||||||
|
### Description: Implement the core research function in scripts/modules/task-manager/ following the add-task.js pattern
|
||||||
### Details:
|
### Details:
|
||||||
Establish a robust interface to the AI backend, manage API keys or tokens securely, and handle network or service errors gracefully.
|
Create a new core function (e.g., research.js) in scripts/modules/task-manager/ that:
|
||||||
|
- Accepts parameters: query, context options (task IDs, file paths, custom context), project tree flag, detail level
|
||||||
|
- Implements context gathering using the contextGatherer utility from subtask 94.3
|
||||||
|
- Integrates with ai-services-unified.js using research role
|
||||||
|
- Handles both CLI and MCP output formats
|
||||||
|
- Returns structured results with telemetry data
|
||||||
|
- Follows the same parameter validation and error handling patterns as add-task.js
|
||||||
|
<info added on 2025-05-25T06:29:01.194Z>
|
||||||
|
✅ COMPLETED: Added loading spinner to research command
|
||||||
|
|
||||||
## 5. Output Formatting [pending]
|
**Implementation Details:**
|
||||||
|
- Imported `startLoadingIndicator` and `stopLoadingIndicator` from ui.js
|
||||||
|
- Added loading indicator that shows "Researching with AI..." during AI service calls
|
||||||
|
- Properly wrapped AI service call in try/catch/finally blocks to ensure spinner stops on both success and error
|
||||||
|
- Loading indicator only shows in CLI mode (outputFormat === 'text'), not in MCP mode
|
||||||
|
- Follows the same pattern as add-task.js for consistent user experience
|
||||||
|
|
||||||
|
**Testing:**
|
||||||
|
- Tested with `node bin/task-master.js research "What is TypeScript?" --detail=low`
|
||||||
|
- Confirmed spinner appears during AI processing and disappears when complete
|
||||||
|
- Telemetry display works correctly after spinner stops
|
||||||
|
|
||||||
|
The research command now provides the same polished user experience as other AI-powered commands in the system.
|
||||||
|
</info added on 2025-05-25T06:29:01.194Z>
|
||||||
|
|
||||||
|
## 5. Direct Function Implementation [pending]
|
||||||
### Dependencies: 94.4
|
### Dependencies: 94.4
|
||||||
### Description: Format the AI-generated output according to user-specified modes (e.g., plain text, JSON), and support writing to stdout or files.
|
### Description: Create the MCP direct function wrapper in mcp-server/src/core/direct-functions/ following the add-task pattern
|
||||||
### Details:
|
### Details:
|
||||||
Implement flexible output formatting, ensuring compatibility with piping and redirection. Provide clear, user-friendly output and support for machine-readable formats.
|
Create a new direct function (e.g., research.js) in mcp-server/src/core/direct-functions/ that:
|
||||||
|
- Follows the addTaskDirect pattern for parameter handling and error management
|
||||||
|
- Uses enableSilentMode/disableSilentMode to prevent console output interference
|
||||||
|
- Creates logger wrapper using createLogWrapper utility
|
||||||
|
- Validates required parameters (query, projectRoot)
|
||||||
|
- Calls the core research function with proper context (session, mcpLog, projectRoot)
|
||||||
|
- Returns standardized result object with success/error structure
|
||||||
|
- Handles telemetry data propagation
|
||||||
|
- Export and register in task-master-core.js
|
||||||
|
|
||||||
## 6. Documentation and Help [pending]
|
## 6. MCP Tool Implementation [pending]
|
||||||
### Dependencies: 94.1, 94.2, 94.3, 94.4, 94.5
|
### Dependencies: 94.5
|
||||||
### Description: Document the 'research' command, including usage examples, parameter descriptions, and integration with the CLI's --help system.
|
### Description: Create the MCP tool in mcp-server/src/tools/ following the add-task tool pattern
|
||||||
### Details:
|
### Details:
|
||||||
Update CLI documentation and ensure the --help flag provides comprehensive guidance on using the command, its options, and expected outputs.
|
Create a new MCP tool (e.g., research.js) in mcp-server/src/tools/ that:
|
||||||
|
- Defines zod schema for all research command parameters (query, id, files, context, project-tree, save, detail)
|
||||||
|
- Uses withNormalizedProjectRoot HOF to handle project path normalization
|
||||||
|
- Calls findTasksJsonPath to locate tasks.json file
|
||||||
|
- Invokes the direct function with proper parameter mapping
|
||||||
|
- Uses handleApiResult for standardized response formatting
|
||||||
|
- Registers the tool as 'research' (snake_case) in the MCP server
|
||||||
|
- Handles errors with createErrorResponse
|
||||||
|
- Register in mcp-server/src/tools/index.js
|
||||||
|
- Update .cursor/mcp.json with tool definition
|
||||||
|
|
||||||
|
## 7. Add research save-to-file functionality [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Implement functionality to save research results to /research/ folder with optional interactive prompts
|
||||||
|
### Details:
|
||||||
|
Add capability to save research results to files in a /research/ directory at project root. For CLI mode, use inquirer to prompt user if they want to save the research. For MCP mode, accept a saveToFile parameter.
|
||||||
|
|
||||||
|
Key implementation details:
|
||||||
|
- Create /research/ directory if it doesn't exist (similar to how tasks/ is handled)
|
||||||
|
- Generate meaningful filenames based on query and timestamp
|
||||||
|
- Support both CLI interactive mode (inquirer prompts) and MCP parameter mode
|
||||||
|
- Follow project root detection pattern from add-task.js stack
|
||||||
|
- Handle file writing with proper error handling
|
||||||
|
- Return saved file path in response for confirmation
|
||||||
|
|
||||||
|
File structure:
|
||||||
|
- /research/YYYY-MM-DD_query-summary.md (markdown format)
|
||||||
|
- Include query, timestamp, context used, and full AI response
|
||||||
|
- Add metadata header with query details and context sources
|
||||||
|
|
||||||
|
## 8. Add research-to-task linking functionality [pending]
|
||||||
|
### Dependencies: 94.7
|
||||||
|
### Description: Implement functionality to link saved research to specific tasks with interactive task selection
|
||||||
|
### Details:
|
||||||
|
Add capability to link research results to specific tasks by updating task details with research references. For CLI mode, use inquirer to prompt user if they want to link research to tasks and provide task selection. For MCP mode, accept linkToTasks parameter.
|
||||||
|
|
||||||
|
Key implementation details:
|
||||||
|
- Prompt user if they want to link research to existing tasks (CLI mode)
|
||||||
|
- Provide task selection interface using inquirer with task list (ID, title, status)
|
||||||
|
- Support multiple task selection (checkbox interface)
|
||||||
|
- Update selected tasks' details section with research reference
|
||||||
|
- Add timestamped research link in format: "Research: [Query Title](file:///path/to/research.md) - YYYY-MM-DD"
|
||||||
|
- Follow add-task.js pattern for task file updates and regeneration
|
||||||
|
- Handle task.json reading/writing with proper error handling
|
||||||
|
- Support both single and multiple task linking
|
||||||
|
- Return list of updated task IDs in response
|
||||||
|
|
||||||
|
Research link format in task details:
|
||||||
|
```
|
||||||
|
## Research References
|
||||||
|
- [How to implement authentication](file:///research/2024-01-15_authentication-research.md) - 2024-01-15
|
||||||
|
```
|
||||||
|
|
||||||
|
|||||||
639
tasks/task_095.txt
Normal file
639
tasks/task_095.txt
Normal file
@@ -0,0 +1,639 @@
|
|||||||
|
# Task ID: 95
|
||||||
|
# Title: Enhance Parse-PRD with Intelligent Task Expansion and Detail Preservation
|
||||||
|
# Status: pending
|
||||||
|
# Dependencies: None
|
||||||
|
# Priority: high
|
||||||
|
# Description: Transform parse-prd from a simple task generator into an intelligent system that preserves PRD detail resolution through context-aware task expansion. This addresses the critical issue where highly detailed PRDs lose their specificity when parsed into too few top-level tasks, and ensures that task expansions are grounded in actual PRD content rather than generic AI assumptions.
|
||||||
|
# Details:
|
||||||
|
## Core Problem Statement
|
||||||
|
|
||||||
|
The current parse-prd implementation suffers from a fundamental resolution loss problem:
|
||||||
|
|
||||||
|
1. **Detail Compression**: Complex, detailed PRDs get compressed into a fixed number of top-level tasks (default 10), losing critical specificity
|
||||||
|
2. **Orphaned Expansions**: When tasks are later expanded via expand-task, the AI lacks the original PRD context, resulting in generic subtasks that don't reflect the PRD's specific requirements
|
||||||
|
3. **Binary Approach**: The system either creates too few high-level tasks OR requires manual expansion that loses PRD context
|
||||||
|
|
||||||
|
## Solution Architecture
|
||||||
|
|
||||||
|
### Phase 1: Enhanced PRD Analysis Engine
|
||||||
|
- Implement intelligent PRD segmentation that identifies natural task boundaries based on content structure
|
||||||
|
- Create a PRD context preservation system that maintains detailed mappings between PRD sections and generated tasks
|
||||||
|
- Develop adaptive task count determination based on PRD complexity metrics (length, technical depth, feature count)
|
||||||
|
|
||||||
|
### Phase 2: Context-Aware Task Generation
|
||||||
|
- Modify generateTasksFromPRD to create tasks with embedded PRD context references
|
||||||
|
- Implement a PRD section mapping system that links each task to its source PRD content
|
||||||
|
- Add metadata fields to tasks that preserve original PRD language and specifications
|
||||||
|
|
||||||
|
### Phase 3: Intelligent In-Flight Expansion
|
||||||
|
- Add optional `--expand-tasks` flag to parse-prd that triggers immediate expansion after initial task generation
|
||||||
|
- Implement context-aware expansion that uses the original PRD content for each task's expansion
|
||||||
|
- Create a two-pass system: first pass generates tasks with PRD context, second pass expands using that context
|
||||||
|
|
||||||
|
### Phase 4: PRD-Grounded Expansion Logic
|
||||||
|
- Enhance the expansion prompt generation to include relevant PRD excerpts for each task being expanded
|
||||||
|
- Implement smart context windowing that includes related PRD sections when expanding tasks
|
||||||
|
- Add validation to ensure expanded subtasks maintain fidelity to original PRD specifications
|
||||||
|
|
||||||
|
## Technical Implementation Details
|
||||||
|
|
||||||
|
### File Modifications Required:
|
||||||
|
1. **scripts/modules/task-manager/parse-prd.js**
|
||||||
|
- Add PRD analysis functions for intelligent segmentation
|
||||||
|
- Implement context preservation during task generation
|
||||||
|
- Add optional expansion pipeline integration
|
||||||
|
- Create PRD-to-task mapping system
|
||||||
|
|
||||||
|
2. **scripts/modules/task-manager/expand-task.js**
|
||||||
|
- Enhance to accept PRD context as additional input
|
||||||
|
- Modify expansion prompts to include relevant PRD excerpts
|
||||||
|
- Add PRD-grounded validation for generated subtasks
|
||||||
|
|
||||||
|
3. **scripts/modules/ai-services-unified.js**
|
||||||
|
- Add support for context-aware prompting with PRD excerpts
|
||||||
|
- Implement intelligent context windowing for large PRDs
|
||||||
|
- Add PRD analysis capabilities for complexity assessment
|
||||||
|
|
||||||
|
### New Data Structures:
|
||||||
|
```javascript
|
||||||
|
// Enhanced task structure with PRD context
|
||||||
|
{
|
||||||
|
id: "1",
|
||||||
|
title: "User Authentication System",
|
||||||
|
description: "...",
|
||||||
|
prdContext: {
|
||||||
|
sourceSection: "Authentication Requirements (Lines 45-78)",
|
||||||
|
originalText: "The system must implement OAuth 2.0...",
|
||||||
|
relatedSections: ["Security Requirements", "User Management"],
|
||||||
|
contextWindow: "Full PRD excerpt relevant to this task"
|
||||||
|
},
|
||||||
|
// ... existing fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// PRD analysis metadata
|
||||||
|
{
|
||||||
|
prdAnalysis: {
|
||||||
|
totalComplexity: 8.5,
|
||||||
|
naturalTaskBoundaries: [...],
|
||||||
|
recommendedTaskCount: 15,
|
||||||
|
sectionMappings: {...}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### New CLI Options:
|
||||||
|
- `--expand-tasks`: Automatically expand generated tasks using PRD context
|
||||||
|
- `--preserve-detail`: Maximum detail preservation mode
|
||||||
|
- `--adaptive-count`: Let AI determine optimal task count based on PRD complexity
|
||||||
|
- `--context-window-size`: Control how much PRD context to include in expansions
|
||||||
|
|
||||||
|
## Implementation Strategy
|
||||||
|
|
||||||
|
### Step 1: PRD Analysis Enhancement
|
||||||
|
- Create PRD parsing utilities that identify natural section boundaries
|
||||||
|
- Implement complexity scoring for different PRD sections
|
||||||
|
- Build context extraction functions that preserve relevant details
|
||||||
|
|
||||||
|
### Step 2: Context-Aware Task Generation
|
||||||
|
- Modify the task generation prompt to include section-specific context
|
||||||
|
- Implement task-to-PRD mapping during generation
|
||||||
|
- Add metadata fields to preserve PRD relationships
|
||||||
|
|
||||||
|
### Step 3: Intelligent Expansion Pipeline
|
||||||
|
- Create expansion logic that uses preserved PRD context
|
||||||
|
- Implement smart prompt engineering that includes relevant PRD excerpts
|
||||||
|
- Add validation to ensure subtask fidelity to original requirements
|
||||||
|
|
||||||
|
### Step 4: Integration and Testing
|
||||||
|
- Integrate new functionality with existing parse-prd workflow
|
||||||
|
- Add comprehensive testing with various PRD types and complexities
|
||||||
|
- Implement telemetry for tracking detail preservation effectiveness
|
||||||
|
|
||||||
|
## Success Metrics
|
||||||
|
- PRD detail preservation rate (measured by semantic similarity between PRD and generated tasks)
|
||||||
|
- Reduction in manual task refinement needed post-parsing
|
||||||
|
- Improved accuracy of expanded subtasks compared to PRD specifications
|
||||||
|
- User satisfaction with task granularity and detail accuracy
|
||||||
|
|
||||||
|
## Edge Cases and Considerations
|
||||||
|
- Very large PRDs that exceed context windows
|
||||||
|
- PRDs with conflicting or ambiguous requirements
|
||||||
|
- Integration with existing task expansion workflows
|
||||||
|
- Performance impact of enhanced analysis
|
||||||
|
- Backward compatibility with existing parse-prd usage
|
||||||
|
|
||||||
|
# Test Strategy:
|
||||||
|
|
||||||
|
|
||||||
|
# Subtasks:
|
||||||
|
## 1. Implement PRD Analysis and Segmentation Engine [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Create intelligent PRD parsing that identifies natural task boundaries and complexity metrics
|
||||||
|
### Details:
|
||||||
|
## Implementation Requirements
|
||||||
|
|
||||||
|
### Core Functions to Implement:
|
||||||
|
1. **analyzePRDStructure(prdContent)**
|
||||||
|
- Parse PRD into logical sections using headers, bullet points, and semantic breaks
|
||||||
|
- Identify feature boundaries, technical requirements, and implementation sections
|
||||||
|
- Return structured analysis with section metadata
|
||||||
|
|
||||||
|
2. **calculatePRDComplexity(prdContent)**
|
||||||
|
- Analyze technical depth, feature count, integration requirements
|
||||||
|
- Score complexity on 1-10 scale for different aspects
|
||||||
|
- Return recommended task count based on complexity
|
||||||
|
|
||||||
|
3. **extractTaskBoundaries(prdAnalysis)**
|
||||||
|
- Identify natural breaking points for task creation
|
||||||
|
- Group related requirements into logical task units
|
||||||
|
- Preserve context relationships between sections
|
||||||
|
|
||||||
|
### Technical Approach:
|
||||||
|
- Use regex patterns and NLP techniques to identify section headers
|
||||||
|
- Implement keyword analysis for technical complexity assessment
|
||||||
|
- Create semantic grouping algorithms for related requirements
|
||||||
|
- Build context preservation mappings
|
||||||
|
|
||||||
|
### Output Structure:
|
||||||
|
```javascript
|
||||||
|
{
|
||||||
|
sections: [
|
||||||
|
{
|
||||||
|
title: "User Authentication",
|
||||||
|
content: "...",
|
||||||
|
startLine: 45,
|
||||||
|
endLine: 78,
|
||||||
|
complexity: 7,
|
||||||
|
relatedSections: ["Security", "User Management"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
overallComplexity: 8.5,
|
||||||
|
recommendedTaskCount: 15,
|
||||||
|
naturalBoundaries: [...],
|
||||||
|
contextMappings: {...}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Integration Points:
|
||||||
|
- Called at the beginning of parse-prd process
|
||||||
|
- Results used to inform task generation strategy
|
||||||
|
- Analysis stored for later use in expansion phase
|
||||||
|
|
||||||
|
## 2. Enhance Task Generation with PRD Context Preservation [pending]
|
||||||
|
### Dependencies: 95.1
|
||||||
|
### Description: Modify generateTasksFromPRD to embed PRD context and maintain source mappings
|
||||||
|
### Details:
|
||||||
|
## Implementation Requirements
|
||||||
|
|
||||||
|
### Core Modifications to generateTasksFromPRD:
|
||||||
|
1. **Add PRD Context Embedding**
|
||||||
|
- Modify task generation prompt to include relevant PRD excerpts
|
||||||
|
- Ensure each generated task includes source section references
|
||||||
|
- Preserve original PRD language and specifications in task metadata
|
||||||
|
|
||||||
|
2. **Implement Context Windowing**
|
||||||
|
- For large PRDs, implement intelligent context windowing
|
||||||
|
- Include relevant sections for each task being generated
|
||||||
|
- Maintain context relationships between related tasks
|
||||||
|
|
||||||
|
3. **Enhanced Task Structure**
|
||||||
|
- Add prdContext field to task objects
|
||||||
|
- Include sourceSection, originalText, and relatedSections
|
||||||
|
- Store contextWindow for later use in expansions
|
||||||
|
|
||||||
|
### Technical Implementation:
|
||||||
|
```javascript
|
||||||
|
// Enhanced task generation with context
|
||||||
|
const generateTaskWithContext = async (prdSection, relatedSections, fullPRD) => {
|
||||||
|
const contextWindow = buildContextWindow(prdSection, relatedSections, fullPRD);
|
||||||
|
const prompt = `
|
||||||
|
Generate a task based on this PRD section:
|
||||||
|
|
||||||
|
PRIMARY SECTION:
|
||||||
|
${prdSection.content}
|
||||||
|
|
||||||
|
RELATED CONTEXT:
|
||||||
|
${contextWindow}
|
||||||
|
|
||||||
|
Ensure the task preserves all specific requirements and technical details.
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Generate task with embedded context
|
||||||
|
const task = await generateTask(prompt);
|
||||||
|
task.prdContext = {
|
||||||
|
sourceSection: prdSection.title,
|
||||||
|
originalText: prdSection.content,
|
||||||
|
relatedSections: relatedSections.map(s => s.title),
|
||||||
|
contextWindow: contextWindow
|
||||||
|
};
|
||||||
|
|
||||||
|
return task;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### Context Preservation Strategy:
|
||||||
|
- Map each task to its source PRD sections
|
||||||
|
- Preserve technical specifications and requirements language
|
||||||
|
- Maintain relationships between interdependent features
|
||||||
|
- Store context for later use in expansion phase
|
||||||
|
|
||||||
|
### Integration with Existing Flow:
|
||||||
|
- Modify existing generateTasksFromPRD function
|
||||||
|
- Maintain backward compatibility with simple PRDs
|
||||||
|
- Add new metadata fields without breaking existing structure
|
||||||
|
- Ensure context is available for subsequent operations
|
||||||
|
|
||||||
|
## 3. Implement In-Flight Task Expansion Pipeline [pending]
|
||||||
|
### Dependencies: 95.2
|
||||||
|
### Description: Add optional --expand-tasks flag and intelligent expansion using preserved PRD context
|
||||||
|
### Details:
|
||||||
|
## Implementation Requirements
|
||||||
|
|
||||||
|
### Core Features:
|
||||||
|
1. **Add --expand-tasks CLI Flag**
|
||||||
|
- Optional flag for parse-prd command
|
||||||
|
- Triggers automatic expansion after initial task generation
|
||||||
|
- Configurable expansion depth and strategy
|
||||||
|
|
||||||
|
2. **Two-Pass Processing System**
|
||||||
|
- First pass: Generate tasks with PRD context preservation
|
||||||
|
- Second pass: Expand tasks using their embedded PRD context
|
||||||
|
- Maintain context fidelity throughout the process
|
||||||
|
|
||||||
|
3. **Context-Aware Expansion Logic**
|
||||||
|
- Use preserved PRD context for each task's expansion
|
||||||
|
- Include relevant PRD excerpts in expansion prompts
|
||||||
|
- Ensure subtasks maintain fidelity to original specifications
|
||||||
|
|
||||||
|
### Technical Implementation:
|
||||||
|
```javascript
|
||||||
|
// Enhanced parse-prd with expansion pipeline
|
||||||
|
const parsePRDWithExpansion = async (prdContent, options) => {
|
||||||
|
// Phase 1: Analyze and generate tasks with context
|
||||||
|
const prdAnalysis = await analyzePRDStructure(prdContent);
|
||||||
|
const tasksWithContext = await generateTasksWithContext(prdAnalysis);
|
||||||
|
|
||||||
|
// Phase 2: Expand tasks if requested
|
||||||
|
if (options.expandTasks) {
|
||||||
|
for (const task of tasksWithContext) {
|
||||||
|
if (shouldExpandTask(task, prdAnalysis)) {
|
||||||
|
const expandedSubtasks = await expandTaskWithPRDContext(task);
|
||||||
|
task.subtasks = expandedSubtasks;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tasksWithContext;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Context-aware task expansion
|
||||||
|
const expandTaskWithPRDContext = async (task) => {
|
||||||
|
const { prdContext } = task;
|
||||||
|
const expansionPrompt = `
|
||||||
|
Expand this task into detailed subtasks using the original PRD context:
|
||||||
|
|
||||||
|
TASK: ${task.title}
|
||||||
|
DESCRIPTION: ${task.description}
|
||||||
|
|
||||||
|
ORIGINAL PRD CONTEXT:
|
||||||
|
${prdContext.originalText}
|
||||||
|
|
||||||
|
RELATED SECTIONS:
|
||||||
|
${prdContext.contextWindow}
|
||||||
|
|
||||||
|
Generate subtasks that preserve all technical details and requirements from the PRD.
|
||||||
|
`;
|
||||||
|
|
||||||
|
return await generateSubtasks(expansionPrompt);
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Integration:
|
||||||
|
- Add --expand-tasks flag to parse-prd command
|
||||||
|
- Add --expansion-depth option for controlling subtask levels
|
||||||
|
- Add --preserve-detail flag for maximum context preservation
|
||||||
|
- Maintain backward compatibility with existing parse-prd usage
|
||||||
|
|
||||||
|
### Expansion Strategy:
|
||||||
|
- Determine which tasks should be expanded based on complexity
|
||||||
|
- Use PRD context to generate accurate, detailed subtasks
|
||||||
|
- Preserve technical specifications and implementation details
|
||||||
|
- Validate subtask accuracy against original PRD content
|
||||||
|
|
||||||
|
### Performance Considerations:
|
||||||
|
- Implement batching for large numbers of tasks
|
||||||
|
- Add progress indicators for long-running expansions
|
||||||
|
- Optimize context window sizes for efficiency
|
||||||
|
- Cache PRD analysis results for reuse
|
||||||
|
|
||||||
|
## 4. Enhance Expand-Task with PRD Context Integration [pending]
|
||||||
|
### Dependencies: 95.2
|
||||||
|
### Description: Modify existing expand-task functionality to leverage preserved PRD context for more accurate expansions
|
||||||
|
### Details:
|
||||||
|
## Implementation Requirements
|
||||||
|
|
||||||
|
### Core Enhancements to expand-task.js:
|
||||||
|
1. **PRD Context Detection**
|
||||||
|
- Check if task has embedded prdContext metadata
|
||||||
|
- Extract relevant PRD sections for expansion
|
||||||
|
- Fall back to existing expansion logic if no PRD context
|
||||||
|
|
||||||
|
2. **Context-Enhanced Expansion Prompts**
|
||||||
|
- Include original PRD excerpts in expansion prompts
|
||||||
|
- Add related section context for comprehensive understanding
|
||||||
|
- Preserve technical specifications and requirements language
|
||||||
|
|
||||||
|
3. **Validation and Quality Assurance**
|
||||||
|
- Validate generated subtasks against original PRD content
|
||||||
|
- Ensure technical accuracy and requirement compliance
|
||||||
|
- Flag potential discrepancies for review
|
||||||
|
|
||||||
|
### Technical Implementation:
|
||||||
|
```javascript
|
||||||
|
// Enhanced expand-task with PRD context
|
||||||
|
const expandTaskWithContext = async (taskId, options, context) => {
|
||||||
|
const task = await getTask(taskId);
|
||||||
|
|
||||||
|
// Check for PRD context
|
||||||
|
if (task.prdContext) {
|
||||||
|
return await expandWithPRDContext(task, options);
|
||||||
|
} else {
|
||||||
|
// Fall back to existing expansion logic
|
||||||
|
return await expandTaskStandard(task, options);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const expandWithPRDContext = async (task, options) => {
|
||||||
|
const { prdContext } = task;
|
||||||
|
|
||||||
|
const enhancedPrompt = `
|
||||||
|
Expand this task into detailed subtasks using the original PRD context:
|
||||||
|
|
||||||
|
TASK DETAILS:
|
||||||
|
Title: ${task.title}
|
||||||
|
Description: ${task.description}
|
||||||
|
Current Details: ${task.details}
|
||||||
|
|
||||||
|
ORIGINAL PRD CONTEXT:
|
||||||
|
Source Section: ${prdContext.sourceSection}
|
||||||
|
Original Requirements:
|
||||||
|
${prdContext.originalText}
|
||||||
|
|
||||||
|
RELATED CONTEXT:
|
||||||
|
${prdContext.contextWindow}
|
||||||
|
|
||||||
|
EXPANSION REQUIREMENTS:
|
||||||
|
- Preserve all technical specifications from the PRD
|
||||||
|
- Maintain requirement accuracy and completeness
|
||||||
|
- Generate ${options.num || 'appropriate number of'} subtasks
|
||||||
|
- Include implementation details that reflect PRD specifics
|
||||||
|
|
||||||
|
Generate subtasks that are grounded in the original PRD content.
|
||||||
|
`;
|
||||||
|
|
||||||
|
const subtasks = await generateSubtasks(enhancedPrompt, options);
|
||||||
|
|
||||||
|
// Add PRD context inheritance to subtasks
|
||||||
|
subtasks.forEach(subtask => {
|
||||||
|
subtask.prdContext = {
|
||||||
|
inheritedFrom: task.id,
|
||||||
|
sourceSection: prdContext.sourceSection,
|
||||||
|
relevantExcerpt: extractRelevantExcerpt(prdContext, subtask)
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
return subtasks;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### Integration Points:
|
||||||
|
1. **Modify existing expand-task.js**
|
||||||
|
- Add PRD context detection logic
|
||||||
|
- Enhance prompt generation with context
|
||||||
|
- Maintain backward compatibility
|
||||||
|
|
||||||
|
2. **Update expansion validation**
|
||||||
|
- Add PRD compliance checking
|
||||||
|
- Implement quality scoring for context fidelity
|
||||||
|
- Flag potential accuracy issues
|
||||||
|
|
||||||
|
3. **CLI and MCP Integration**
|
||||||
|
- Update expand-task command to leverage PRD context
|
||||||
|
- Add options for context-aware expansion
|
||||||
|
- Maintain existing command interface
|
||||||
|
|
||||||
|
### Context Inheritance Strategy:
|
||||||
|
- Pass relevant PRD context to generated subtasks
|
||||||
|
- Create context inheritance chain for nested expansions
|
||||||
|
- Preserve source traceability throughout expansion tree
|
||||||
|
- Enable future re-expansion with maintained context
|
||||||
|
|
||||||
|
### Quality Assurance Features:
|
||||||
|
- Semantic similarity checking between subtasks and PRD
|
||||||
|
- Technical requirement compliance validation
|
||||||
|
- Automated flagging of potential context drift
|
||||||
|
- User feedback integration for continuous improvement
|
||||||
|
|
||||||
|
## 5. Add New CLI Options and MCP Parameters [pending]
|
||||||
|
### Dependencies: 95.3
|
||||||
|
### Description: Implement new command-line flags and MCP tool parameters for enhanced PRD parsing
|
||||||
|
### Details:
|
||||||
|
## Implementation Requirements
|
||||||
|
|
||||||
|
### New CLI Options for parse-prd:
|
||||||
|
1. **--expand-tasks**
|
||||||
|
- Automatically expand generated tasks using PRD context
|
||||||
|
- Boolean flag, default false
|
||||||
|
- Triggers in-flight expansion pipeline
|
||||||
|
|
||||||
|
2. **--preserve-detail**
|
||||||
|
- Maximum detail preservation mode
|
||||||
|
- Boolean flag, default false
|
||||||
|
- Ensures highest fidelity to PRD content
|
||||||
|
|
||||||
|
3. **--adaptive-count**
|
||||||
|
- Let AI determine optimal task count based on PRD complexity
|
||||||
|
- Boolean flag, default false
|
||||||
|
- Overrides --num-tasks when enabled
|
||||||
|
|
||||||
|
4. **--context-window-size**
|
||||||
|
- Control how much PRD context to include in expansions
|
||||||
|
- Integer value, default 2000 characters
|
||||||
|
- Balances context richness with performance
|
||||||
|
|
||||||
|
5. **--expansion-depth**
|
||||||
|
- Control how many levels deep to expand tasks
|
||||||
|
- Integer value, default 1
|
||||||
|
- Prevents excessive nesting
|
||||||
|
|
||||||
|
### MCP Tool Parameter Updates:
|
||||||
|
```javascript
|
||||||
|
// Enhanced parse_prd MCP tool parameters
|
||||||
|
{
|
||||||
|
input: "Path to PRD file",
|
||||||
|
output: "Output path for tasks.json",
|
||||||
|
numTasks: "Number of top-level tasks (overridden by adaptiveCount)",
|
||||||
|
expandTasks: "Boolean - automatically expand tasks with PRD context",
|
||||||
|
preserveDetail: "Boolean - maximum detail preservation mode",
|
||||||
|
adaptiveCount: "Boolean - AI determines optimal task count",
|
||||||
|
contextWindowSize: "Integer - context size for expansions",
|
||||||
|
expansionDepth: "Integer - levels of expansion to perform",
|
||||||
|
research: "Boolean - use research model for enhanced analysis",
|
||||||
|
force: "Boolean - overwrite existing files"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Command Updates:
|
||||||
|
```bash
|
||||||
|
# Enhanced parse-prd command examples
|
||||||
|
task-master parse-prd prd.txt --expand-tasks --preserve-detail
|
||||||
|
task-master parse-prd prd.txt --adaptive-count --expansion-depth=2
|
||||||
|
task-master parse-prd prd.txt --context-window-size=3000 --research
|
||||||
|
```
|
||||||
|
|
||||||
|
### Implementation Details:
|
||||||
|
1. **Update commands.js**
|
||||||
|
- Add new option definitions
|
||||||
|
- Update parse-prd command handler
|
||||||
|
- Maintain backward compatibility
|
||||||
|
|
||||||
|
2. **Update MCP tool definition**
|
||||||
|
- Add new parameter schemas
|
||||||
|
- Update tool description and examples
|
||||||
|
- Ensure parameter validation
|
||||||
|
|
||||||
|
3. **Parameter Processing Logic**
|
||||||
|
- Validate parameter combinations
|
||||||
|
- Set appropriate defaults
|
||||||
|
- Handle conflicting options gracefully
|
||||||
|
|
||||||
|
### Validation Rules:
|
||||||
|
- expansion-depth must be positive integer ≤ 3
|
||||||
|
- context-window-size must be between 500-5000 characters
|
||||||
|
- adaptive-count overrides num-tasks when both specified
|
||||||
|
- expand-tasks requires either adaptive-count or num-tasks > 5
|
||||||
|
|
||||||
|
### Help Documentation Updates:
|
||||||
|
- Update command help text with new options
|
||||||
|
- Add usage examples for different scenarios
|
||||||
|
- Document parameter interactions and constraints
|
||||||
|
- Include performance considerations for large PRDs
|
||||||
|
|
||||||
|
## 6. Implement Comprehensive Testing and Validation [pending]
|
||||||
|
### Dependencies: 95.4, 95.5
|
||||||
|
### Description: Create test suite for PRD analysis, context preservation, and expansion accuracy
|
||||||
|
### Details:
|
||||||
|
## Implementation Requirements
|
||||||
|
|
||||||
|
### Test Categories:
|
||||||
|
1. **PRD Analysis Testing**
|
||||||
|
- Test section identification with various PRD formats
|
||||||
|
- Validate complexity scoring accuracy
|
||||||
|
- Test boundary detection for different document structures
|
||||||
|
- Verify context mapping correctness
|
||||||
|
|
||||||
|
2. **Context Preservation Testing**
|
||||||
|
- Validate PRD context embedding in generated tasks
|
||||||
|
- Test context window generation and sizing
|
||||||
|
- Verify source section mapping accuracy
|
||||||
|
- Test context inheritance in subtasks
|
||||||
|
|
||||||
|
3. **Expansion Accuracy Testing**
|
||||||
|
- Compare PRD-grounded vs standard expansions
|
||||||
|
- Measure semantic similarity between PRD and subtasks
|
||||||
|
- Test technical requirement preservation
|
||||||
|
- Validate expansion depth and quality
|
||||||
|
|
||||||
|
4. **Integration Testing**
|
||||||
|
- Test full parse-prd pipeline with expansion
|
||||||
|
- Validate CLI option combinations
|
||||||
|
- Test MCP tool parameter handling
|
||||||
|
- Verify backward compatibility
|
||||||
|
|
||||||
|
### Test Data Requirements:
|
||||||
|
```javascript
|
||||||
|
// Test PRD samples
|
||||||
|
const testPRDs = {
|
||||||
|
simple: "Basic PRD with minimal technical details",
|
||||||
|
complex: "Detailed PRD with extensive technical specifications",
|
||||||
|
structured: "Well-organized PRD with clear sections",
|
||||||
|
unstructured: "Free-form PRD with mixed content",
|
||||||
|
technical: "Highly technical PRD with specific requirements",
|
||||||
|
large: "Very large PRD testing context window limits"
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### Validation Metrics:
|
||||||
|
1. **Detail Preservation Score**
|
||||||
|
- Semantic similarity between PRD and generated tasks
|
||||||
|
- Technical requirement coverage percentage
|
||||||
|
- Specification accuracy rating
|
||||||
|
|
||||||
|
2. **Context Fidelity Score**
|
||||||
|
- Accuracy of source section mapping
|
||||||
|
- Relevance of included context windows
|
||||||
|
- Quality of context inheritance
|
||||||
|
|
||||||
|
3. **Expansion Quality Score**
|
||||||
|
- Subtask relevance to parent task and PRD
|
||||||
|
- Technical accuracy of implementation details
|
||||||
|
- Completeness of requirement coverage
|
||||||
|
|
||||||
|
### Test Implementation:
|
||||||
|
```javascript
|
||||||
|
// Example test structure
|
||||||
|
describe('Enhanced Parse-PRD', () => {
|
||||||
|
describe('PRD Analysis', () => {
|
||||||
|
test('should identify sections correctly', async () => {
|
||||||
|
const analysis = await analyzePRDStructure(testPRDs.structured);
|
||||||
|
expect(analysis.sections).toHaveLength(expectedSectionCount);
|
||||||
|
expect(analysis.overallComplexity).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should calculate appropriate task count', async () => {
|
||||||
|
const analysis = await analyzePRDStructure(testPRDs.complex);
|
||||||
|
expect(analysis.recommendedTaskCount).toBeGreaterThan(10);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Context Preservation', () => {
|
||||||
|
test('should embed PRD context in tasks', async () => {
|
||||||
|
const tasks = await generateTasksWithContext(testPRDs.technical);
|
||||||
|
tasks.forEach(task => {
|
||||||
|
expect(task.prdContext).toBeDefined();
|
||||||
|
expect(task.prdContext.sourceSection).toBeTruthy();
|
||||||
|
expect(task.prdContext.originalText).toBeTruthy();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Expansion Accuracy', () => {
|
||||||
|
test('should generate relevant subtasks from PRD context', async () => {
|
||||||
|
const task = createTestTaskWithPRDContext();
|
||||||
|
const subtasks = await expandTaskWithPRDContext(task);
|
||||||
|
|
||||||
|
const relevanceScore = calculateRelevanceScore(subtasks, task.prdContext);
|
||||||
|
expect(relevanceScore).toBeGreaterThan(0.8);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Performance Testing:
|
||||||
|
- Test with large PRDs (>10,000 words)
|
||||||
|
- Measure processing time for different complexity levels
|
||||||
|
- Test memory usage with extensive context preservation
|
||||||
|
- Validate timeout handling for long-running operations
|
||||||
|
|
||||||
|
### Quality Assurance Tools:
|
||||||
|
- Automated semantic similarity checking
|
||||||
|
- Technical requirement compliance validation
|
||||||
|
- Context drift detection algorithms
|
||||||
|
- User acceptance testing framework
|
||||||
|
|
||||||
|
### Continuous Integration:
|
||||||
|
- Add tests to existing CI pipeline
|
||||||
|
- Set up performance benchmarking
|
||||||
|
- Implement quality gates for PRD processing
|
||||||
|
- Create regression testing for context preservation
|
||||||
|
|
||||||
1304
tasks/task_096.txt
Normal file
1304
tasks/task_096.txt
Normal file
File diff suppressed because it is too large
Load Diff
1915
tasks/task_097.txt
Normal file
1915
tasks/task_097.txt
Normal file
File diff suppressed because it is too large
Load Diff
315
tasks/tasks.json
315
tasks/tasks.json
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user