task management
This commit is contained in:
9
.changeset/cuddly-walls-clap.md
Normal file
9
.changeset/cuddly-walls-clap.md
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Adds and updates supported AI models with costs:
|
||||
- Added new OpenRouter models: GPT-4.1 series, O3, Codex Mini, Llama 4 Maverick, Llama 4 Scout, Qwen3-235b
|
||||
- Added Mistral models: Devstral Small, Mistral Nemo
|
||||
- Updated Ollama models with latest variants: Devstral, Qwen3, Mistral-small3.1, Llama3.3
|
||||
- Updated Gemini model to latest 2.5 Flash preview version
|
||||
@@ -1,32 +1,32 @@
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 100000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 8192,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"global": {
|
||||
"logLevel": "info",
|
||||
"debug": false,
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"projectName": "Taskmaster",
|
||||
"ollamaBaseUrl": "http://localhost:11434/api",
|
||||
"userId": "1234567890",
|
||||
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
|
||||
}
|
||||
}
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "ollama",
|
||||
"modelId": "devstral:latest",
|
||||
"maxTokens": 120000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 8192,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"global": {
|
||||
"logLevel": "info",
|
||||
"debug": false,
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"projectName": "Taskmaster",
|
||||
"ollamaBaseUrl": "http://localhost:11434/api",
|
||||
"userId": "1234567890",
|
||||
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
|
||||
}
|
||||
}
|
||||
@@ -191,43 +191,43 @@
|
||||
],
|
||||
"ollama": [
|
||||
{
|
||||
"id": "gemma3:27b",
|
||||
"id": "devstral:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gemma3:12b",
|
||||
"id": "qwen3:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "qwq",
|
||||
"id": "qwen3:14b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "deepseek-r1",
|
||||
"id": "qwen3:32b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "mistral-small3.1",
|
||||
"id": "mistral-small3.1:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "llama3.3",
|
||||
"id": "llama3.3:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "phi4",
|
||||
"id": "phi4:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
@@ -235,9 +235,16 @@
|
||||
],
|
||||
"openrouter": [
|
||||
{
|
||||
"id": "google/gemini-2.0-flash-001",
|
||||
"id": "google/gemini-2.5-flash-preview-05-20",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview-05-20:thinking",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 3.50 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
@@ -263,40 +270,25 @@
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-r1:free",
|
||||
"id": "openai/gpt-4.1",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
|
||||
{
|
||||
"id": "microsoft/mai-ds-r1:free",
|
||||
"id": "openai/gpt-4.1-mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"cost_per_1m_tokens": { "input": 0.40, "output": 1.60 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-pro-preview-03-25",
|
||||
"id": "openai/gpt-4.1-nano",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.25, "output": 10 },
|
||||
"cost_per_1m_tokens": { "input": 0.10, "output": 0.40 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview:thinking",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 3.5 },
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 65535
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "openai/o3",
|
||||
@@ -305,6 +297,20 @@
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 200000
|
||||
},
|
||||
{
|
||||
"id": "openai/codex-mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.5, "output": 6 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "openai/gpt-4o-mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "openai/o4-mini",
|
||||
"swe_score": 0.45,
|
||||
@@ -334,46 +340,18 @@
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-12b-it:free",
|
||||
"id": "meta-llama/llama-4-maverick",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"cost_per_1m_tokens": { "input": 0.18, "output": 0.60 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-12b-it",
|
||||
"id": "meta-llama/llama-4-scout",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 50, "output": 100 },
|
||||
"cost_per_1m_tokens": { "input": 0.08, "output": 0.30 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-27b-it:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 96000
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-27b-it",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 100, "output": 200 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwq-32b:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 40000
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwq-32b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 150, "output": 200 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwen-max",
|
||||
@@ -389,6 +367,13 @@
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwen3-235b-a22b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.14, "output": 2 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 24000
|
||||
},
|
||||
{
|
||||
"id": "mistralai/mistral-small-3.1-24b-instruct:free",
|
||||
"swe_score": 0,
|
||||
@@ -403,6 +388,20 @@
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 128000
|
||||
},
|
||||
{
|
||||
"id": "mistralai/devstral-small",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.10, "output": 0.30 },
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 110000
|
||||
},
|
||||
{
|
||||
"id": "mistralai/mistral-nemo",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.03, "output": 0.07 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "thudm/glm-4-32b:free",
|
||||
"swe_score": 0,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Task ID: 63
|
||||
# Title: Add pnpm Support for the Taskmaster Package
|
||||
# Status: pending
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Implement full support for pnpm as an alternative package manager in the Taskmaster application, ensuring users have the exact same experience as with npm when installing and managing the package. The installation process, including any CLI prompts or web interfaces, must serve the exact same content and user experience regardless of whether npm or pnpm is used. The project uses 'module' as the package type, defines binaries 'task-master' and 'task-master-mcp', and its core logic resides in 'scripts/modules/'. The 'init' command (via scripts/init.js) creates the directory structure (.cursor/rules, scripts, tasks), copies templates (.env.example, .gitignore, rule files, dev.js), manages package.json merging, and sets up MCP config (.cursor/mcp.json). All dependencies are standard npm dependencies listed in package.json, and manual modifications are being removed.
|
||||
@@ -88,49 +88,49 @@ This implementation should maintain full feature parity and identical user exper
|
||||
Success criteria: Taskmaster should install and function identically regardless of whether it was installed via npm or pnpm, with no degradation in functionality, performance, or user experience. All binaries should be properly linked, and the directory structure should be correctly created.
|
||||
|
||||
# Subtasks:
|
||||
## 1. Update Documentation for pnpm Support [pending]
|
||||
## 1. Update Documentation for pnpm Support [done]
|
||||
### Dependencies: None
|
||||
### Description: Revise installation and usage documentation to include pnpm commands and instructions for installing and managing Taskmaster with pnpm. Clearly state that the installation process, including any website or UI shown, is identical to npm. Ensure documentation reflects the use of 'module' package type, binaries, and the init process as defined in scripts/init.js.
|
||||
### Details:
|
||||
Add pnpm installation commands (e.g., `pnpm add taskmaster`) and update all relevant sections in the README and official docs to reflect pnpm as a supported package manager. Document that any installation website or prompt is the same as with npm. Include notes on the 'module' package type, binaries, and the directory/template setup performed by scripts/init.js.
|
||||
|
||||
## 2. Ensure Package Scripts Compatibility with pnpm [pending]
|
||||
## 2. Ensure Package Scripts Compatibility with pnpm [done]
|
||||
### Dependencies: 63.1
|
||||
### Description: Review and update package.json scripts to ensure they work seamlessly with pnpm's execution model. Confirm that any scripts responsible for showing a website or prompt during install behave identically with pnpm and npm. Ensure compatibility with 'module' package type and correct binary definitions.
|
||||
### Details:
|
||||
Test all scripts using `pnpm run <script>`, address any pnpm-specific path or execution differences, and modify scripts as needed for compatibility. Pay special attention to any scripts that trigger a website or prompt during installation, ensuring they serve the same content as npm. Validate that scripts/init.js and binaries are referenced correctly for ESM ('module') projects.
|
||||
|
||||
## 3. Generate and Validate pnpm Lockfile [pending]
|
||||
## 3. Generate and Validate pnpm Lockfile [done]
|
||||
### Dependencies: 63.2
|
||||
### Description: Install dependencies using pnpm to create a pnpm-lock.yaml file and ensure it accurately reflects the project's dependency tree, considering the 'module' package type.
|
||||
### Details:
|
||||
Run `pnpm install` to generate the lockfile, check it into version control, and verify that dependency resolution is correct and consistent. Ensure that all dependencies listed in package.json are resolved as expected for an ESM project.
|
||||
|
||||
## 4. Test Taskmaster Installation and Operation with pnpm [pending]
|
||||
## 4. Test Taskmaster Installation and Operation with pnpm [done]
|
||||
### Dependencies: 63.3
|
||||
### Description: Thoroughly test Taskmaster's installation and CLI operation when installed via pnpm, both globally and locally. Confirm that any website or UI shown during installation is identical to npm. Validate that binaries and the init process (scripts/init.js) work as expected.
|
||||
### Details:
|
||||
Perform global (`pnpm add -g taskmaster`) and local installations, verify CLI commands, and check for any pnpm-specific issues or incompatibilities. Ensure any installation UIs or websites appear identical to npm installations, including any website or prompt shown during install. Test that binaries 'task-master' and 'task-master-mcp' are linked and that scripts/init.js creates the correct structure and templates.
|
||||
|
||||
## 5. Integrate pnpm into CI/CD Pipeline [pending]
|
||||
## 5. Integrate pnpm into CI/CD Pipeline [done]
|
||||
### Dependencies: 63.4
|
||||
### Description: Update CI/CD workflows to include pnpm in the test matrix, ensuring all tests pass when dependencies are installed with pnpm. Confirm that tests cover the 'module' package type, binaries, and init process.
|
||||
### Details:
|
||||
Modify GitHub Actions or other CI configurations to use pnpm/action-setup, run tests with pnpm, and cache pnpm dependencies for efficiency. Ensure that CI covers CLI commands, binary linking, and the directory/template setup performed by scripts/init.js.
|
||||
|
||||
## 6. Verify Installation UI/Website Consistency [pending]
|
||||
## 6. Verify Installation UI/Website Consistency [done]
|
||||
### Dependencies: 63.4
|
||||
### Description: Ensure any installation UIs, websites, or interactive prompts—including any website or prompt shown during install—appear and function identically when installing with pnpm compared to npm. Confirm that the experience is consistent for the 'module' package type and the init process.
|
||||
### Details:
|
||||
Identify all user-facing elements during the installation process, including any website or prompt shown during install, and verify they are consistent across package managers. If a website is shown during installation, ensure it appears the same regardless of package manager used. Validate that any prompts or UIs triggered by scripts/init.js are identical.
|
||||
|
||||
## 7. Test init.js Script with pnpm [pending]
|
||||
## 7. Test init.js Script with pnpm [done]
|
||||
### Dependencies: 63.4
|
||||
### Description: Verify that the scripts/init.js file works correctly when Taskmaster is installed via pnpm, creating the proper directory structure and copying all required templates as defined in the project structure.
|
||||
### Details:
|
||||
Test the init command to ensure it properly creates .cursor/rules, scripts, and tasks directories, copies templates (.env.example, .gitignore, rule files, dev.js), handles package.json merging, and sets up MCP config (.cursor/mcp.json) as per scripts/init.js.
|
||||
|
||||
## 8. Verify Binary Links with pnpm [pending]
|
||||
## 8. Verify Binary Links with pnpm [done]
|
||||
### Dependencies: 63.4
|
||||
### Description: Ensure that the task-master and task-master-mcp binaries are properly defined in package.json, linked, and executable when installed via pnpm, in both global and local installations.
|
||||
### Details:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Task ID: 64
|
||||
# Title: Add Yarn Support for Taskmaster Installation
|
||||
# Status: pending
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Implement full support for installing and managing Taskmaster using Yarn package manager, ensuring users have the exact same experience as with npm or pnpm. The installation process, including any CLI prompts or web interfaces, must serve the exact same content and user experience regardless of whether npm, pnpm, or Yarn is used. The project uses 'module' as the package type, defines binaries 'task-master' and 'task-master-mcp', and its core logic resides in 'scripts/modules/'. The 'init' command (via scripts/init.js) creates the directory structure (.cursor/rules, scripts, tasks), copies templates (.env.example, .gitignore, rule files, dev.js), manages package.json merging, and sets up MCP config (.cursor/mcp.json). All dependencies are standard npm dependencies listed in package.json, and manual modifications are being removed.
|
||||
@@ -74,55 +74,55 @@ Testing should verify complete Yarn support through the following steps:
|
||||
All tests should pass with the same results as when using npm, with identical user experience throughout the installation and usage process.
|
||||
|
||||
# Subtasks:
|
||||
## 1. Update package.json for Yarn Compatibility [pending]
|
||||
## 1. Update package.json for Yarn Compatibility [done]
|
||||
### Dependencies: None
|
||||
### Description: Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods. Confirm that any scripts responsible for showing a website or prompt during install behave identically with Yarn and npm. Ensure compatibility with 'module' package type and correct binary definitions.
|
||||
### Details:
|
||||
Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn. Pay special attention to any scripts that trigger a website or prompt during installation, ensuring they serve the same content as npm. Validate that scripts/init.js and binaries are referenced correctly for ESM ('module') projects.
|
||||
|
||||
## 2. Add Yarn-Specific Configuration Files [pending]
|
||||
## 2. Add Yarn-Specific Configuration Files [done]
|
||||
### Dependencies: 64.1
|
||||
### Description: Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs for 'module' package type and binary definitions.
|
||||
### Details:
|
||||
Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly. Ensure configuration supports ESM and binary linking.
|
||||
|
||||
## 3. Test and Fix Yarn Compatibility for Scripts and CLI [pending]
|
||||
## 3. Test and Fix Yarn Compatibility for Scripts and CLI [done]
|
||||
### Dependencies: 64.2
|
||||
### Description: Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn. Confirm that any website or UI shown during installation is identical to npm. Validate that binaries and the init process (scripts/init.js) work as expected.
|
||||
### Details:
|
||||
Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting. Ensure any website or prompt shown during install is the same as with npm. Validate that binaries 'task-master' and 'task-master-mcp' are linked and that scripts/init.js creates the correct structure and templates.
|
||||
|
||||
## 4. Update Documentation for Yarn Installation and Usage [pending]
|
||||
## 4. Update Documentation for Yarn Installation and Usage [done]
|
||||
### Dependencies: 64.3
|
||||
### Description: Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn. Clearly state that the installation process, including any website or UI shown, is identical to npm. Ensure documentation reflects the use of 'module' package type, binaries, and the init process as defined in scripts/init.js. If the installation process includes a website component or requires account setup, document the steps users must follow. If not, explicitly state that no website or account setup is required.
|
||||
### Details:
|
||||
Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs. Document that any installation website or prompt is the same as with npm. Include notes on the 'module' package type, binaries, and the directory/template setup performed by scripts/init.js. If website or account setup is required during installation, provide clear instructions; otherwise, confirm and document that no such steps are needed.
|
||||
|
||||
## 5. Implement and Test Package Manager Detection Logic [pending]
|
||||
## 5. Implement and Test Package Manager Detection Logic [done]
|
||||
### Dependencies: 64.4
|
||||
### Description: Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers. Ensure detection logic works for 'module' package type and binary definitions.
|
||||
### Details:
|
||||
Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues. Ensure detection logic supports ESM and binary linking.
|
||||
|
||||
## 6. Verify Installation UI/Website Consistency [pending]
|
||||
## 6. Verify Installation UI/Website Consistency [done]
|
||||
### Dependencies: 64.3
|
||||
### Description: Ensure any installation UIs, websites, or interactive prompts—including any website or prompt shown during install—appear and function identically when installing with Yarn compared to npm. Confirm that the experience is consistent for the 'module' package type and the init process. If the installation process includes a website or account setup, verify that all required website actions (e.g., account creation, login) are consistent and documented. If not, confirm and document that no website or account setup is needed.
|
||||
### Details:
|
||||
Identify all user-facing elements during the installation process, including any website or prompt shown during install, and verify they are consistent across package managers. If a website is shown during installation or account setup is required, ensure it appears and functions the same regardless of package manager used, and document the steps. If not, confirm and document that no website or account setup is needed. Validate that any prompts or UIs triggered by scripts/init.js are identical.
|
||||
|
||||
## 7. Test init.js Script with Yarn [pending]
|
||||
## 7. Test init.js Script with Yarn [done]
|
||||
### Dependencies: 64.3
|
||||
### Description: Verify that the scripts/init.js file works correctly when Taskmaster is installed via Yarn, creating the proper directory structure and copying all required templates as defined in the project structure.
|
||||
### Details:
|
||||
Test the init command to ensure it properly creates .cursor/rules, scripts, and tasks directories, copies templates (.env.example, .gitignore, rule files, dev.js), handles package.json merging, and sets up MCP config (.cursor/mcp.json) as per scripts/init.js.
|
||||
|
||||
## 8. Verify Binary Links with Yarn [pending]
|
||||
## 8. Verify Binary Links with Yarn [done]
|
||||
### Dependencies: 64.3
|
||||
### Description: Ensure that the task-master and task-master-mcp binaries are properly defined in package.json, linked, and executable when installed via Yarn, in both global and local installations.
|
||||
### Details:
|
||||
Check that the binaries defined in package.json are correctly linked in node_modules/.bin when installed with Yarn, and that they can be executed without errors. Validate that binaries work for ESM ('module') projects and are accessible after both global and local installs.
|
||||
|
||||
## 9. Test Website Account Setup with Yarn [pending]
|
||||
## 9. Test Website Account Setup with Yarn [done]
|
||||
### Dependencies: 64.6
|
||||
### Description: If the installation process includes a website component, verify that account setup, registration, or any other user-specific configurations work correctly when Taskmaster is installed via Yarn. If no website or account setup is required, confirm and document this explicitly.
|
||||
### Details:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Task ID: 65
|
||||
# Title: Add Bun Support for Taskmaster Installation
|
||||
# Status: pending
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Implement full support for installing and managing Taskmaster using the Bun package manager, ensuring the installation process and user experience are identical to npm, pnpm, and Yarn.
|
||||
@@ -11,37 +11,37 @@ Update the Taskmaster installation scripts and documentation to support Bun as a
|
||||
1. Install Taskmaster using Bun on macOS, Linux, and Windows (including WSL and PowerShell), following the updated documentation. 2. Run the full installation and initialization process, verifying that the directory structure, templates, and MCP config are set up identically to npm, pnpm, and Yarn. 3. Execute all CLI commands (including 'init') and confirm functional parity. 4. If a website or account setup is required, test these flows for consistency; if not, confirm and document this. 5. Check for Bun-specific issues (e.g., install hangs) and verify that troubleshooting steps are effective. 6. Ensure the documentation is clear, accurate, and up to date for all supported platforms.
|
||||
|
||||
# Subtasks:
|
||||
## 1. Research Bun compatibility requirements [pending]
|
||||
## 1. Research Bun compatibility requirements [done]
|
||||
### Dependencies: None
|
||||
### Description: Investigate Bun's JavaScript runtime environment and identify key differences from Node.js that may affect Taskmaster's installation and operation.
|
||||
### Details:
|
||||
Research Bun's package management, module resolution, and API compatibility with Node.js. Document any potential issues or limitations that might affect Taskmaster. Identify required changes to make Taskmaster compatible with Bun's execution model.
|
||||
|
||||
## 2. Update installation scripts for Bun compatibility [pending]
|
||||
## 2. Update installation scripts for Bun compatibility [done]
|
||||
### Dependencies: 65.1
|
||||
### Description: Modify the existing installation scripts to detect and support Bun as a runtime environment.
|
||||
### Details:
|
||||
Add Bun detection logic to installation scripts. Update package management commands to use Bun equivalents where needed. Ensure all dependencies are compatible with Bun. Modify any Node.js-specific code to work with Bun's runtime.
|
||||
|
||||
## 3. Create Bun-specific installation path [pending]
|
||||
## 3. Create Bun-specific installation path [done]
|
||||
### Dependencies: 65.2
|
||||
### Description: Implement a dedicated installation flow for Bun users that optimizes for Bun's capabilities.
|
||||
### Details:
|
||||
Create a Bun-specific installation script that leverages Bun's performance advantages. Update any environment detection logic to properly identify Bun environments. Ensure proper path resolution and environment variable handling for Bun.
|
||||
|
||||
## 4. Test Taskmaster installation with Bun [pending]
|
||||
## 4. Test Taskmaster installation with Bun [done]
|
||||
### Dependencies: 65.3
|
||||
### Description: Perform comprehensive testing of the installation process using Bun across different operating systems.
|
||||
### Details:
|
||||
Test installation on Windows, macOS, and Linux using Bun. Verify that all Taskmaster features work correctly when installed via Bun. Document any issues encountered and implement fixes as needed.
|
||||
|
||||
## 5. Test Taskmaster operation with Bun [pending]
|
||||
## 5. Test Taskmaster operation with Bun [done]
|
||||
### Dependencies: 65.4
|
||||
### Description: Ensure all Taskmaster functionality works correctly when running under Bun.
|
||||
### Details:
|
||||
Test all Taskmaster commands and features when running with Bun. Compare performance metrics between Node.js and Bun. Identify and fix any runtime issues specific to Bun. Ensure all plugins and extensions are compatible.
|
||||
|
||||
## 6. Update documentation for Bun support [pending]
|
||||
## 6. Update documentation for Bun support [done]
|
||||
### Dependencies: 65.4, 65.5
|
||||
### Description: Update all relevant documentation to include information about installing and running Taskmaster with Bun.
|
||||
### Details:
|
||||
|
||||
@@ -15,29 +15,92 @@ This task has two main components:\n\n1. Add `--json` flag to all relevant CLI c
|
||||
### Dependencies: None
|
||||
### Description: Modify the command handlers for `task-master next` and `task-master show <id>` to recognize and handle a `--json` flag. When the flag is present, output the raw data received from MCP tools directly as JSON.
|
||||
### Details:
|
||||
Use a CLI argument parsing library (e.g., argparse, click, commander) to add the `--json` boolean flag. In the command execution logic, check if the flag is set. If true, serialize the data object (before any human-readable formatting) into a JSON string and print it to stdout. If false, proceed with the existing formatting logic. Focus on these two commands first to establish the pattern.
|
||||
1. Update the CLI argument parser to add the `--json` boolean flag to both commands
|
||||
2. Create a `formatAsJson` utility function in `src/utils/output.js` that takes a data object and returns a properly formatted JSON string
|
||||
3. In the command handler functions (`src/commands/next.js` and `src/commands/show.js`), add a conditional check for the `--json` flag
|
||||
4. If the flag is set, call the `formatAsJson` function with the raw data object and print the result
|
||||
5. If the flag is not set, continue with the existing human-readable formatting logic
|
||||
6. Ensure proper error handling for JSON serialization failures
|
||||
7. Update the command help text in both files to document the new flag
|
||||
|
||||
## 2. Extend JSON Output to All Relevant Commands and Ensure Schema Consistency [pending]
|
||||
### Dependencies: 67.1
|
||||
### Description: Apply the JSON output pattern established in subtask 1 to all other relevant Taskmaster CLI commands that display data (e.g., `list`, `status`, etc.). Ensure the JSON structure is consistent where applicable (e.g., task objects should have the same fields). Add help text mentioning the `--json` flag for each modified command.
|
||||
### Details:
|
||||
Identify all commands that output structured data. Refactor the JSON output logic into a reusable utility function if possible. Define a standard schema for common data types like tasks. Update the help documentation for each command to include the `--json` flag description. Ensure error outputs are also handled appropriately (e.g., potentially outputting JSON error objects).
|
||||
1. Create a JSON schema definition file at `src/schemas/task.json` to define the standard structure for task objects
|
||||
2. Modify the following command files to support the `--json` flag:
|
||||
- `src/commands/list.js`
|
||||
- `src/commands/status.js`
|
||||
- `src/commands/search.js`
|
||||
- `src/commands/summary.js`
|
||||
3. Refactor the `formatAsJson` utility to handle different data types (single task, task array, status object, etc.)
|
||||
4. Add a `validateJsonSchema` function in `src/utils/validation.js` to ensure output conforms to defined schemas
|
||||
5. Update each command's help text documentation to include the `--json` flag description
|
||||
6. Implement consistent error handling for JSON output (using a standard error object format)
|
||||
7. For list-type commands, ensure array outputs are properly formatted as JSON arrays
|
||||
|
||||
## 3. Create `install-keybindings` Command Structure and OS Detection [pending]
|
||||
### Dependencies: None
|
||||
### Description: Set up the basic structure for the new `task-master install-keybindings` command. Implement logic to detect the user's operating system (Linux, macOS, Windows) and determine the default path to Cursor's `keybindings.json` file.
|
||||
### Details:
|
||||
Add a new command entry point using the CLI framework. Use standard library functions (e.g., `os.platform()` in Node, `platform.system()` in Python) to detect the OS. Define constants or a configuration map for the default `keybindings.json` paths for each supported OS. Handle cases where the path might vary (e.g., different installation methods for Cursor). Add basic help text for the new command.
|
||||
1. Create a new command file at `src/commands/install-keybindings.js`
|
||||
2. Register the command in the main CLI entry point (`src/index.js`)
|
||||
3. Implement OS detection using `os.platform()` in Node.js
|
||||
4. Define the following path constants in `src/config/paths.js`:
|
||||
- Windows: `%APPDATA%\Cursor\User\keybindings.json`
|
||||
- macOS: `~/Library/Application Support/Cursor/User/keybindings.json`
|
||||
- Linux: `~/.config/Cursor/User/keybindings.json`
|
||||
5. Create a `getCursorKeybindingsPath()` function that returns the appropriate path based on detected OS
|
||||
6. Add path override capability via a `--path` command line option
|
||||
7. Implement proper error handling for unsupported operating systems
|
||||
8. Add detailed help text explaining the command's purpose and options
|
||||
|
||||
## 4. Implement Keybinding File Handling and Backup Logic [pending]
|
||||
### Dependencies: 67.3
|
||||
### Description: Implement the core logic within the `install-keybindings` command to read the target `keybindings.json` file. If it exists, create a backup. If it doesn't exist, create a new file with an empty JSON array `[]`. Prepare the structure to add new keybindings.
|
||||
### Details:
|
||||
Use file system modules to check for file existence, read, write, and copy files. Implement a backup mechanism (e.g., copy `keybindings.json` to `keybindings.json.bak`). Handle potential file I/O errors gracefully (e.g., permissions issues). Parse the existing JSON content; if parsing fails, report an error and potentially abort. Ensure the file is created with `[]` if it's missing.
|
||||
1. Create a `KeybindingsManager` class in `src/utils/keybindings.js` with the following methods:
|
||||
- `checkFileExists(path)`: Verify if the keybindings file exists
|
||||
- `createBackup(path)`: Copy existing file to `keybindings.json.bak`
|
||||
- `readKeybindings(path)`: Read and parse the JSON file
|
||||
- `writeKeybindings(path, data)`: Serialize and write data to the file
|
||||
- `createEmptyFile(path)`: Create a new file with `[]` content
|
||||
2. In the command handler, use these methods to:
|
||||
- Check if the target file exists
|
||||
- Create a backup if it does (with timestamp in filename)
|
||||
- Read existing keybindings or create an empty file
|
||||
- Parse the JSON content with proper error handling
|
||||
3. Add a `--no-backup` flag to skip backup creation
|
||||
4. Implement verbose logging with a `--verbose` flag
|
||||
5. Handle all potential file system errors (permissions, disk space, etc.)
|
||||
6. Add a `--dry-run` option that shows what would be done without making changes
|
||||
|
||||
## 5. Add Taskmaster Keybindings, Prevent Duplicates, and Support Customization [pending]
|
||||
### Dependencies: 67.4
|
||||
### Description: Define the specific Taskmaster keybindings (e.g., next task to clipboard, status update, open agent chat) and implement the logic to merge them into the user's `keybindings.json` data. Prevent adding duplicate keybindings (based on command ID or key combination). Add support for custom key combinations via command flags.
|
||||
### Details:
|
||||
Define the desired keybindings as a list of JSON objects following Cursor's format. Before adding, iterate through the existing keybindings (parsed in subtask 4) to check if a Taskmaster keybinding with the same command or key combination already exists. If not, append the new keybinding to the list. Add command-line flags (e.g., `--next-key='ctrl+alt+n'`) to allow users to override default key combinations. Serialize the updated list back to JSON and write it to the `keybindings.json` file.
|
||||
1. Define default Taskmaster keybindings in `src/config/default-keybindings.js` as an array of objects with:
|
||||
- `key`: Default key combination (e.g., `"ctrl+alt+n"`)
|
||||
- `command`: Cursor command ID (e.g., `"taskmaster.nextTask"`)
|
||||
- `when`: Context when keybinding is active (e.g., `"editorTextFocus"`)
|
||||
- `args`: Any command arguments as an object
|
||||
- `description`: Human-readable description of what the keybinding does
|
||||
2. Implement the following keybindings:
|
||||
- Next task to clipboard: `ctrl+alt+n`
|
||||
- Update task status: `ctrl+alt+u`
|
||||
- Open agent chat with task context: `ctrl+alt+a`
|
||||
- Show task details: `ctrl+alt+d`
|
||||
3. Add command-line options to customize each keybinding:
|
||||
- `--next-key="ctrl+alt+n"`
|
||||
- `--update-key="ctrl+alt+u"`
|
||||
- `--agent-key="ctrl+alt+a"`
|
||||
- `--details-key="ctrl+alt+d"`
|
||||
4. Implement a `mergeKeybindings(existing, new)` function that:
|
||||
- Checks for duplicates based on command ID
|
||||
- Checks for key combination conflicts
|
||||
- Warns about conflicts but allows override with `--force` flag
|
||||
- Preserves existing non-Taskmaster keybindings
|
||||
5. Add a `--reset` flag to remove all existing Taskmaster keybindings before adding new ones
|
||||
6. Add a `--list` option to display currently installed Taskmaster keybindings
|
||||
7. Implement an `--uninstall` option to remove all Taskmaster keybindings
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Task ID: 77
|
||||
# Title: Implement AI Usage Telemetry for Taskmaster (with external analytics endpoint)
|
||||
# Status: in-progress
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Capture detailed AI usage data (tokens, costs, models, commands) within Taskmaster and send this telemetry to an external, closed-source analytics backend for usage analysis, profitability measurement, and pricing optimization.
|
||||
@@ -536,13 +536,13 @@ async function callAiService(params) {
|
||||
### Details:
|
||||
Update the provider functions in `src/ai-providers/google.js` to ensure they return telemetry-compatible results:\n\n1. **`generateGoogleText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateGoogleObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamGoogleText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||
|
||||
## 14. Update openai.js for Telemetry Compatibility [pending]
|
||||
## 14. Update openai.js for Telemetry Compatibility [done]
|
||||
### Dependencies: None
|
||||
### Description: Modify src/ai-providers/openai.js functions to return usage data.
|
||||
### Details:
|
||||
Update the provider functions in `src/ai-providers/openai.js` to ensure they return telemetry-compatible results:\n\n1. **`generateOpenAIText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateOpenAIObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamOpenAIText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||
|
||||
## 15. Update openrouter.js for Telemetry Compatibility [pending]
|
||||
## 15. Update openrouter.js for Telemetry Compatibility [done]
|
||||
### Dependencies: None
|
||||
### Description: Modify src/ai-providers/openrouter.js functions to return usage data.
|
||||
### Details:
|
||||
@@ -554,13 +554,13 @@ Update the provider functions in `src/ai-providers/openrouter.js` to ensure they
|
||||
### Details:
|
||||
Update the provider functions in `src/ai-providers/perplexity.js` to ensure they return telemetry-compatible results:\n\n1. **`generatePerplexityText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generatePerplexityObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamPerplexityText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||
|
||||
## 17. Update xai.js for Telemetry Compatibility [pending]
|
||||
## 17. Update xai.js for Telemetry Compatibility [done]
|
||||
### Dependencies: None
|
||||
### Description: Modify src/ai-providers/xai.js functions to return usage data.
|
||||
### Details:
|
||||
Update the provider functions in `src/ai-providers/xai.js` to ensure they return telemetry-compatible results:\n\n1. **`generateXaiText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateXaiObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamXaiText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||
|
||||
## 18. Create dedicated telemetry transmission module [pending]
|
||||
## 18. Create dedicated telemetry transmission module [done]
|
||||
### Dependencies: 77.1, 77.3
|
||||
### Description: Implement a separate module for handling telemetry transmission logic
|
||||
### Details:
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
# Task ID: 81
|
||||
# Title: Task #81: Implement Comprehensive Local Telemetry System with Future Server Integration Capability
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Expand the existing telemetry system to capture additional metrics about feature usage, performance, and user behavior patterns, implementing local storage and aggregation of telemetry data with the capability for future server integration.
|
||||
# Details:
|
||||
This task builds upon the existing telemetry infrastructure (Tasks #77 and #80) to provide more comprehensive insights into how users interact with the application, while storing data locally until a server endpoint becomes available.
|
||||
|
||||
Key implementation details:
|
||||
1. Identify and implement additional telemetry data points:
|
||||
- Command execution frequency and timing metrics
|
||||
- Feature usage patterns (which commands/features are most/least used)
|
||||
- Performance metrics (execution time, memory usage, etc.)
|
||||
- Error rates and types
|
||||
- Session duration and activity patterns
|
||||
- System environment information (OS, Node version, etc.)
|
||||
|
||||
2. Implement a local telemetry storage system:
|
||||
- Create a robust local storage mechanism to hold telemetry data indefinitely
|
||||
- Implement data aggregation to combine similar events and reduce storage size
|
||||
- Add data retention policies to prevent excessive local storage usage
|
||||
- Implement configurable storage limits and cleanup procedures
|
||||
- Design the storage format to be compatible with future server transmission
|
||||
|
||||
3. Add privacy-preserving mechanisms:
|
||||
- Ensure all personally identifiable information is properly anonymized
|
||||
- Implement data minimization principles (only collect what's necessary)
|
||||
- Add user-configurable telemetry levels (basic, enhanced, full)
|
||||
- Provide clear documentation on what data is collected and how it's used
|
||||
|
||||
4. Design for future server integration:
|
||||
- Create a pluggable transmission architecture that can be connected to a server later
|
||||
- Define API contracts and data formats for future server endpoints
|
||||
- Add configuration options for server URLs and authentication that will be used later
|
||||
- Implement feature flags to easily enable server transmission when available
|
||||
|
||||
5. Add telemetry debugging capabilities:
|
||||
- Create a developer mode to view telemetry data being collected
|
||||
- Implement logging of telemetry events (when in debug mode)
|
||||
- Add commands to export telemetry data for manual analysis
|
||||
- Create visualization tools for local telemetry data
|
||||
|
||||
6. Focus on user-facing benefits:
|
||||
- Implement personal usage dashboards showing the user's own patterns
|
||||
- Add productivity insights based on collected telemetry
|
||||
- Create features that allow users to optimize their workflow based on their usage data
|
||||
- Ensure all telemetry collection provides immediate value to the user
|
||||
|
||||
# Test Strategy:
|
||||
The testing strategy for the expanded telemetry system should be comprehensive and cover all aspects of the implementation:
|
||||
|
||||
1. Unit Tests:
|
||||
- Test each telemetry collection function in isolation
|
||||
- Verify proper anonymization of sensitive data
|
||||
- Test aggregation logic with various input scenarios
|
||||
- Validate local storage mechanisms with different data volumes
|
||||
- Test data retention and cleanup policies
|
||||
|
||||
2. Integration Tests:
|
||||
- Verify telemetry data is properly stored locally
|
||||
- Test the complete flow from data collection to local storage
|
||||
- Validate that the storage format is suitable for future server transmission
|
||||
- Test different application states (startup, shutdown, crash recovery)
|
||||
- Verify proper handling of storage failures
|
||||
|
||||
3. End-to-End Tests:
|
||||
- Create automated E2E tests that perform various user actions and verify telemetry is captured
|
||||
- Test with simulated long-term usage to verify storage efficiency
|
||||
- Verify that aggregated data accurately represents the performed actions
|
||||
|
||||
4. Performance Tests:
|
||||
- Measure the performance impact of the expanded telemetry system
|
||||
- Test with large volumes of telemetry data to ensure efficient handling
|
||||
- Verify memory usage remains within acceptable limits
|
||||
- Test CPU utilization during telemetry collection and storage operations
|
||||
|
||||
5. Manual Testing:
|
||||
- Verify telemetry debug mode correctly displays collected data
|
||||
- Test different telemetry level configurations
|
||||
- Manually verify the accuracy of collected metrics
|
||||
- Test the export functionality and analyze the exported data
|
||||
- Validate that user-facing insights and dashboards provide accurate and useful information
|
||||
|
||||
6. Privacy Compliance Testing:
|
||||
- Verify no PII is stored without proper anonymization
|
||||
- Test opt-out functionality works correctly
|
||||
- Ensure telemetry levels properly restrict data collection as configured
|
||||
|
||||
7. Regression Testing:
|
||||
- Verify existing functionality continues to work with the expanded telemetry
|
||||
- Ensure the system is designed to be compatible with future server integration
|
||||
|
||||
8. User Experience Testing:
|
||||
- Test the usability of personal dashboards and insights features
|
||||
- Gather feedback on the usefulness of telemetry-based recommendations
|
||||
- Verify users can easily understand their own usage patterns
|
||||
|
||||
# Subtasks:
|
||||
## 1. Implement Additional Telemetry Data Collection Points [pending]
|
||||
### Dependencies: None
|
||||
### Description: Extend the telemetry system to capture new metrics including command execution frequency, feature usage patterns, performance metrics, error rates, session data, and system environment information. [Updated: 5/8/2025] [Updated: 5/8/2025] [Updated: 5/8/2025]
|
||||
### Details:
|
||||
Create new telemetry event types and collection points throughout the codebase. Implement hooks in the command execution pipeline to track timing and frequency. Add performance monitoring for key operations using high-resolution timers. Capture system environment data at startup. Implement error tracking that records error types and frequencies. Add session tracking with start/end events and periodic heartbeats.
|
||||
<info added on 2025-05-08T22:57:23.259Z>
|
||||
This is a test note added via the MCP tool. The telemetry collection system should be thoroughly tested before implementation.
|
||||
</info added on 2025-05-08T22:57:23.259Z>
|
||||
<info added on 2025-05-08T22:59:29.818Z>
|
||||
For future server integration, Prometheus time-series database with its companion storage solutions (like Cortex or Thanos) would be an excellent choice for handling our telemetry data. The local telemetry collection system should be designed with compatible data structures and metrics formatting that will allow seamless export to Prometheus once server-side infrastructure is in place. This approach would provide powerful querying capabilities, visualization options through Grafana, and scalable long-term storage. Consider implementing the OpenMetrics format locally to ensure compatibility with the Prometheus ecosystem.
|
||||
</info added on 2025-05-08T22:59:29.818Z>
|
||||
<info added on 2025-05-08T23:02:59.692Z>
|
||||
Prometheus would be an excellent choice for server-side telemetry storage and analysis. When designing the local telemetry collection system, we should structure our metrics and events to be compatible with Prometheus' data model (time series with key-value pairs). This would allow for straightforward export to Prometheus once server infrastructure is established. For long-term storage, companion solutions like Cortex or Thanos could extend Prometheus' capabilities, enabling historical analysis and scalable retention. Additionally, adopting the OpenMetrics format locally would ensure seamless integration with the broader Prometheus ecosystem, including visualization through Grafana dashboards.
|
||||
</info added on 2025-05-08T23:02:59.692Z>
|
||||
|
||||
## 2. Build Robust Local Telemetry Storage System [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create a persistent local storage mechanism to hold telemetry data indefinitely with aggregation capabilities to combine similar events and reduce storage requirements.
|
||||
### Details:
|
||||
Implement a persistent local store using SQLite or similar lightweight database. Create data schemas for different telemetry types. Develop aggregation functions that can combine similar events (e.g., multiple instances of the same command) into summary statistics. Implement data retention policies to prevent excessive storage usage. Add serialization/deserialization for telemetry objects. Design the storage format to be compatible with future server transmission needs.
|
||||
|
||||
## 3. Design Server Transmission Architecture for Future Implementation [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create a pluggable architecture for future server transmission capabilities while maintaining local-only functionality for now.
|
||||
### Details:
|
||||
Design a modular transmission system with clear interfaces that can be implemented later when a server becomes available. Define data formats and API contracts for future server endpoints. Add configuration options for server URLs and authentication that will be used in the future. Implement feature flags to easily enable server transmission when available. Create a transmission queue design that can be activated later. Document the architecture for future implementation.
|
||||
|
||||
## 4. Implement Privacy Controls and User Configuration [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add privacy-preserving mechanisms including data anonymization, minimization principles, and user-configurable telemetry levels.
|
||||
### Details:
|
||||
Create a telemetry sanitization layer that removes or hashes PII before storage. Implement three telemetry levels (basic, enhanced, full) with clear documentation of what each includes. Add user settings UI for controlling telemetry levels. Create a first-run experience that explains telemetry and requests user consent. Implement runtime filtering of telemetry events based on user settings.
|
||||
|
||||
## 5. Add Telemetry Debugging and Local Analysis Tools [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create developer tools for debugging telemetry including a developer mode to view collected data, logging capabilities, and local data analysis features.
|
||||
### Details:
|
||||
Implement a developer console command to toggle telemetry debug mode. Create a UI panel that displays collected telemetry data when in debug mode. Add detailed logging of telemetry events to the application log when debugging is enabled. Create commands to export telemetry data in various formats (JSON, CSV) for manual analysis. Implement basic visualization tools for local telemetry data to help users understand their own usage patterns.
|
||||
|
||||
## 6. Develop User-Facing Telemetry Benefits [pending]
|
||||
### Dependencies: 81.1, 81.2
|
||||
### Description: Create features that provide immediate value to users based on their telemetry data, focusing on personal insights and workflow optimization.
|
||||
### Details:
|
||||
Implement a personal usage dashboard that visualizes the user's command usage patterns, feature adoption, and productivity trends. Create a 'productivity insights' feature that offers personalized recommendations based on usage patterns. Add workflow optimization suggestions that help users discover more efficient ways to use the application. Develop weekly/monthly usage reports that users can view to track their own progress. Ensure all telemetry collection has a direct benefit to the user in the absence of server-side analysis.
|
||||
|
||||
57
tasks/task_088.txt
Normal file
57
tasks/task_088.txt
Normal file
@@ -0,0 +1,57 @@
|
||||
# Task ID: 88
|
||||
# Title: Enhance Add-Task Functionality to Consider All Task Dependencies
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Improve the add-task feature to accurately account for all dependencies among tasks, ensuring proper task ordering and execution.
|
||||
# Details:
|
||||
1. Review current implementation of add-task functionality.
|
||||
2. Identify existing mechanisms for handling task dependencies.
|
||||
3. Modify add-task to recursively analyze and incorporate all dependencies.
|
||||
4. Ensure that dependencies are resolved in the correct order during task execution.
|
||||
5. Update documentation to reflect changes in dependency handling.
|
||||
6. Consider edge cases such as circular dependencies and handle them appropriately.
|
||||
7. Optimize performance to ensure efficient dependency resolution, especially for projects with a large number of tasks.
|
||||
8. Integrate with existing validation and error handling mechanisms (from Task 87) to provide clear feedback if dependencies cannot be resolved.
|
||||
9. Test thoroughly with various dependency scenarios to ensure robustness.
|
||||
|
||||
# Test Strategy:
|
||||
1. Create test cases with simple linear dependencies to verify correct ordering.
|
||||
2. Develop test cases with complex, nested dependencies to ensure recursive resolution works correctly.
|
||||
3. Include tests for edge cases such as circular dependencies, verifying appropriate error messages are displayed.
|
||||
4. Measure performance with large sets of tasks and dependencies to ensure efficiency.
|
||||
5. Conduct integration testing with other components that rely on task dependencies.
|
||||
6. Perform manual code reviews to validate implementation against requirements.
|
||||
7. Execute automated tests to verify no regressions in existing functionality.
|
||||
|
||||
# Subtasks:
|
||||
## 1. Review Current Add-Task Implementation and Identify Dependency Mechanisms [pending]
|
||||
### Dependencies: None
|
||||
### Description: Examine the existing add-task functionality to understand how task dependencies are currently handled.
|
||||
### Details:
|
||||
Conduct a code review of the add-task feature. Document any existing mechanisms for handling task dependencies.
|
||||
|
||||
## 2. Modify Add-Task to Recursively Analyze Dependencies [pending]
|
||||
### Dependencies: 88.1
|
||||
### Description: Update the add-task functionality to recursively analyze and incorporate all task dependencies.
|
||||
### Details:
|
||||
Implement a recursive algorithm that identifies and incorporates all dependencies for a given task. Ensure it handles nested dependencies correctly.
|
||||
|
||||
## 3. Ensure Correct Order of Dependency Resolution [pending]
|
||||
### Dependencies: 88.2
|
||||
### Description: Modify the add-task functionality to ensure that dependencies are resolved in the correct order during task execution.
|
||||
### Details:
|
||||
Implement logic to sort and execute tasks based on their dependency order. Handle cases where multiple tasks depend on each other.
|
||||
|
||||
## 4. Integrate with Existing Validation and Error Handling [pending]
|
||||
### Dependencies: 88.3
|
||||
### Description: Update the add-task functionality to integrate with existing validation and error handling mechanisms (from Task 87).
|
||||
### Details:
|
||||
Modify the code to provide clear feedback if dependencies cannot be resolved. Ensure that circular dependencies are detected and handled appropriately.
|
||||
|
||||
## 5. Optimize Performance for Large Projects [pending]
|
||||
### Dependencies: 88.4
|
||||
### Description: Optimize the add-task functionality to ensure efficient dependency resolution, especially for projects with a large number of tasks.
|
||||
### Details:
|
||||
Profile and optimize the recursive dependency analysis algorithm. Implement caching or other performance improvements as needed.
|
||||
|
||||
220
tasks/tasks.json
220
tasks/tasks.json
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user