Compare commits
12 Commits
docs/auto-
...
feat-gener
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58cc143aef | ||
|
|
3aac7ac349 | ||
|
|
f68330efb3 | ||
|
|
1d197fe9c2 | ||
|
|
7660a29a1a | ||
|
|
0aaa105021 | ||
|
|
6b15788c58 | ||
|
|
a2de49dd90 | ||
|
|
2063dc4b7d | ||
|
|
7e6319a56f | ||
|
|
b0504a00d5 | ||
|
|
b16023ab2f |
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add Codex CLI provider with OAuth authentication
|
|
||||||
|
|
||||||
- Added codex-cli provider for GPT-5 and GPT-5-Codex models (272K input / 128K output)
|
|
||||||
- OAuth-first authentication via `codex login` - no API key required
|
|
||||||
- Optional OPENAI_CODEX_API_KEY support
|
|
||||||
- Codebase analysis capabilities automatically enabled
|
|
||||||
- Command-specific settings and approval/sandbox modes
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Do a quick fix on build
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix MCP connection errors caused by deprecated generateTaskFiles calls. Resolves "Cannot read properties of null (reading 'toString')" errors when using MCP tools for task management operations.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix MCP server error when file parameter not provided - now properly constructs default tasks.json path instead of failing with 'tasksJsonPath is required' error.
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add RPG (Repository Planning Graph) method template for structured PRD creation. The new `example_prd_rpg.txt` template teaches AI agents and developers the RPG methodology through embedded instructions, inline good/bad examples, and XML-style tags for structure. This template enables creation of dependency-aware PRDs that automatically generate topologically-ordered task graphs when parsed with Task Master.
|
|
||||||
|
|
||||||
Key features:
|
|
||||||
- Method-as-template: teaches RPG principles (dual-semantics, explicit dependencies, topological order) while being used
|
|
||||||
- Inline instructions at decision points guide AI through each section
|
|
||||||
- Good/bad examples for immediate pattern matching
|
|
||||||
- Flexible plain-text format with XML-style tags for parseability
|
|
||||||
- Critical dependency-graph section ensures correct task ordering
|
|
||||||
- Automatic inclusion during `task-master init`
|
|
||||||
- Comprehensive documentation at [docs.task-master.dev/capabilities/rpg-method](https://docs.task-master.dev/capabilities/rpg-method)
|
|
||||||
- Tool recommendations for code-context-aware PRD creation (Claude Code, Cursor, Gemini CLI, Codex/Grok)
|
|
||||||
|
|
||||||
The RPG template complements the existing `example_prd.txt` and provides a more structured approach for complex projects requiring clear module boundaries and dependency chains.
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
{
|
|
||||||
"mode": "exit",
|
|
||||||
"tag": "rc",
|
|
||||||
"initialVersions": {
|
|
||||||
"task-master-ai": "0.27.3",
|
|
||||||
"docs": "0.0.4",
|
|
||||||
"extension": "0.25.4"
|
|
||||||
},
|
|
||||||
"changesets": [
|
|
||||||
"brave-lions-sing",
|
|
||||||
"chore-fix-docs",
|
|
||||||
"cursor-slash-commands",
|
|
||||||
"curvy-weeks-flow",
|
|
||||||
"easy-spiders-wave",
|
|
||||||
"fix-mcp-connection-errors",
|
|
||||||
"fix-mcp-default-tasks-path",
|
|
||||||
"flat-cities-say",
|
|
||||||
"forty-tables-invite",
|
|
||||||
"gentle-cats-dance",
|
|
||||||
"mcp-timeout-configuration",
|
|
||||||
"petite-ideas-grab",
|
|
||||||
"silly-pandas-find",
|
|
||||||
"sweet-maps-rule",
|
|
||||||
"whole-pigs-say"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix complexity score not showing for `task-master show` and `task-master list`
|
|
||||||
|
|
||||||
- Added complexity score on "next task" when running `task-master list`
|
|
||||||
- Added colors to complexity to reflect complexity (easy, medium, hard)
|
|
||||||
@@ -1,511 +0,0 @@
|
|||||||
<rpg-method>
|
|
||||||
# Repository Planning Graph (RPG) Method - PRD Template
|
|
||||||
|
|
||||||
This template teaches you (AI or human) how to create structured, dependency-aware PRDs using the RPG methodology from Microsoft Research. The key insight: separate WHAT (functional) from HOW (structural), then connect them with explicit dependencies.
|
|
||||||
|
|
||||||
## Core Principles
|
|
||||||
|
|
||||||
1. **Dual-Semantics**: Think functional (capabilities) AND structural (code organization) separately, then map them
|
|
||||||
2. **Explicit Dependencies**: Never assume - always state what depends on what
|
|
||||||
3. **Topological Order**: Build foundation first, then layers on top
|
|
||||||
4. **Progressive Refinement**: Start broad, refine iteratively
|
|
||||||
|
|
||||||
## How to Use This Template
|
|
||||||
|
|
||||||
- Follow the instructions in each `<instruction>` block
|
|
||||||
- Look at `<example>` blocks to see good vs bad patterns
|
|
||||||
- Fill in the content sections with your project details
|
|
||||||
- The AI reading this will learn the RPG method by following along
|
|
||||||
- Task Master will parse the resulting PRD into dependency-aware tasks
|
|
||||||
|
|
||||||
## Recommended Tools for Creating PRDs
|
|
||||||
|
|
||||||
When using this template to **create** a PRD (not parse it), use **code-context-aware AI assistants** for best results:
|
|
||||||
|
|
||||||
**Why?** The AI needs to understand your existing codebase to make good architectural decisions about modules, dependencies, and integration points.
|
|
||||||
|
|
||||||
**Recommended tools:**
|
|
||||||
- **Claude Code** (claude-code CLI) - Best for structured reasoning and large contexts
|
|
||||||
- **Cursor/Windsurf** - IDE integration with full codebase context
|
|
||||||
- **Gemini CLI** (gemini-cli) - Massive context window for large codebases
|
|
||||||
- **Codex/Grok CLI** - Strong code generation with context awareness
|
|
||||||
|
|
||||||
**Note:** Once your PRD is created, `task-master parse-prd` works with any configured AI model - it just needs to read the PRD text itself, not your codebase.
|
|
||||||
</rpg-method>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<overview>
|
|
||||||
<instruction>
|
|
||||||
Start with the problem, not the solution. Be specific about:
|
|
||||||
- What pain point exists?
|
|
||||||
- Who experiences it?
|
|
||||||
- Why existing solutions don't work?
|
|
||||||
- What success looks like (measurable outcomes)?
|
|
||||||
|
|
||||||
Keep this section focused - don't jump into implementation details yet.
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Problem Statement
|
|
||||||
[Describe the core problem. Be concrete about user pain points.]
|
|
||||||
|
|
||||||
## Target Users
|
|
||||||
[Define personas, their workflows, and what they're trying to achieve.]
|
|
||||||
|
|
||||||
## Success Metrics
|
|
||||||
[Quantifiable outcomes. Examples: "80% task completion via autopilot", "< 5% manual intervention rate"]
|
|
||||||
|
|
||||||
</overview>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<functional-decomposition>
|
|
||||||
<instruction>
|
|
||||||
Now think about CAPABILITIES (what the system DOES), not code structure yet.
|
|
||||||
|
|
||||||
Step 1: Identify high-level capability domains
|
|
||||||
- Think: "What major things does this system do?"
|
|
||||||
- Examples: Data Management, Core Processing, Presentation Layer
|
|
||||||
|
|
||||||
Step 2: For each capability, enumerate specific features
|
|
||||||
- Use explore-exploit strategy:
|
|
||||||
* Exploit: What features are REQUIRED for core value?
|
|
||||||
* Explore: What features make this domain COMPLETE?
|
|
||||||
|
|
||||||
Step 3: For each feature, define:
|
|
||||||
- Description: What it does in one sentence
|
|
||||||
- Inputs: What data/context it needs
|
|
||||||
- Outputs: What it produces/returns
|
|
||||||
- Behavior: Key logic or transformations
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Capability: Data Validation
|
|
||||||
Feature: Schema validation
|
|
||||||
- Description: Validate JSON payloads against defined schemas
|
|
||||||
- Inputs: JSON object, schema definition
|
|
||||||
- Outputs: Validation result (pass/fail) + error details
|
|
||||||
- Behavior: Iterate fields, check types, enforce constraints
|
|
||||||
|
|
||||||
Feature: Business rule validation
|
|
||||||
- Description: Apply domain-specific validation rules
|
|
||||||
- Inputs: Validated data object, rule set
|
|
||||||
- Outputs: Boolean + list of violated rules
|
|
||||||
- Behavior: Execute rules sequentially, short-circuit on failure
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
Capability: validation.js
|
|
||||||
(Problem: This is a FILE, not a CAPABILITY. Mixing structure into functional thinking.)
|
|
||||||
|
|
||||||
Capability: Validation
|
|
||||||
Feature: Make sure data is good
|
|
||||||
(Problem: Too vague. No inputs/outputs. Not actionable.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Capability Tree
|
|
||||||
|
|
||||||
### Capability: [Name]
|
|
||||||
[Brief description of what this capability domain covers]
|
|
||||||
|
|
||||||
#### Feature: [Name]
|
|
||||||
- **Description**: [One sentence]
|
|
||||||
- **Inputs**: [What it needs]
|
|
||||||
- **Outputs**: [What it produces]
|
|
||||||
- **Behavior**: [Key logic]
|
|
||||||
|
|
||||||
#### Feature: [Name]
|
|
||||||
- **Description**:
|
|
||||||
- **Inputs**:
|
|
||||||
- **Outputs**:
|
|
||||||
- **Behavior**:
|
|
||||||
|
|
||||||
### Capability: [Name]
|
|
||||||
...
|
|
||||||
|
|
||||||
</functional-decomposition>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<structural-decomposition>
|
|
||||||
<instruction>
|
|
||||||
NOW think about code organization. Map capabilities to actual file/folder structure.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
1. Each capability maps to a module (folder or file)
|
|
||||||
2. Features within a capability map to functions/classes
|
|
||||||
3. Use clear module boundaries - each module has ONE responsibility
|
|
||||||
4. Define what each module exports (public interface)
|
|
||||||
|
|
||||||
The goal: Create a clear mapping between "what it does" (functional) and "where it lives" (structural).
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Capability: Data Validation
|
|
||||||
→ Maps to: src/validation/
|
|
||||||
├── schema-validator.js (Schema validation feature)
|
|
||||||
├── rule-validator.js (Business rule validation feature)
|
|
||||||
└── index.js (Public exports)
|
|
||||||
|
|
||||||
Exports:
|
|
||||||
- validateSchema(data, schema)
|
|
||||||
- validateRules(data, rules)
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
Capability: Data Validation
|
|
||||||
→ Maps to: src/utils.js
|
|
||||||
(Problem: "utils" is not a clear module boundary. Where do I find validation logic?)
|
|
||||||
|
|
||||||
Capability: Data Validation
|
|
||||||
→ Maps to: src/validation/everything.js
|
|
||||||
(Problem: One giant file. Features should map to separate files for maintainability.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Repository Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
project-root/
|
|
||||||
├── src/
|
|
||||||
│ ├── [module-name]/ # Maps to: [Capability Name]
|
|
||||||
│ │ ├── [file].js # Maps to: [Feature Name]
|
|
||||||
│ │ └── index.js # Public exports
|
|
||||||
│ └── [module-name]/
|
|
||||||
├── tests/
|
|
||||||
└── docs/
|
|
||||||
```
|
|
||||||
|
|
||||||
## Module Definitions
|
|
||||||
|
|
||||||
### Module: [Name]
|
|
||||||
- **Maps to capability**: [Capability from functional decomposition]
|
|
||||||
- **Responsibility**: [Single clear purpose]
|
|
||||||
- **File structure**:
|
|
||||||
```
|
|
||||||
module-name/
|
|
||||||
├── feature1.js
|
|
||||||
├── feature2.js
|
|
||||||
└── index.js
|
|
||||||
```
|
|
||||||
- **Exports**:
|
|
||||||
- `functionName()` - [what it does]
|
|
||||||
- `ClassName` - [what it does]
|
|
||||||
|
|
||||||
</structural-decomposition>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<dependency-graph>
|
|
||||||
<instruction>
|
|
||||||
This is THE CRITICAL SECTION for Task Master parsing.
|
|
||||||
|
|
||||||
Define explicit dependencies between modules. This creates the topological order for task execution.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
1. List modules in dependency order (foundation first)
|
|
||||||
2. For each module, state what it depends on
|
|
||||||
3. Foundation modules should have NO dependencies
|
|
||||||
4. Every non-foundation module should depend on at least one other module
|
|
||||||
5. Think: "What must EXIST before I can build this module?"
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Foundation Layer (no dependencies):
|
|
||||||
- error-handling: No dependencies
|
|
||||||
- config-manager: No dependencies
|
|
||||||
- base-types: No dependencies
|
|
||||||
|
|
||||||
Data Layer:
|
|
||||||
- schema-validator: Depends on [base-types, error-handling]
|
|
||||||
- data-ingestion: Depends on [schema-validator, config-manager]
|
|
||||||
|
|
||||||
Core Layer:
|
|
||||||
- algorithm-engine: Depends on [base-types, error-handling]
|
|
||||||
- pipeline-orchestrator: Depends on [algorithm-engine, data-ingestion]
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
- validation: Depends on API
|
|
||||||
- API: Depends on validation
|
|
||||||
(Problem: Circular dependency. This will cause build/runtime issues.)
|
|
||||||
|
|
||||||
- user-auth: Depends on everything
|
|
||||||
(Problem: Too many dependencies. Should be more focused.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Dependency Chain
|
|
||||||
|
|
||||||
### Foundation Layer (Phase 0)
|
|
||||||
No dependencies - these are built first.
|
|
||||||
|
|
||||||
- **[Module Name]**: [What it provides]
|
|
||||||
- **[Module Name]**: [What it provides]
|
|
||||||
|
|
||||||
### [Layer Name] (Phase 1)
|
|
||||||
- **[Module Name]**: Depends on [[module-from-phase-0], [module-from-phase-0]]
|
|
||||||
- **[Module Name]**: Depends on [[module-from-phase-0]]
|
|
||||||
|
|
||||||
### [Layer Name] (Phase 2)
|
|
||||||
- **[Module Name]**: Depends on [[module-from-phase-1], [module-from-foundation]]
|
|
||||||
|
|
||||||
[Continue building up layers...]
|
|
||||||
|
|
||||||
</dependency-graph>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<implementation-roadmap>
|
|
||||||
<instruction>
|
|
||||||
Turn the dependency graph into concrete development phases.
|
|
||||||
|
|
||||||
Each phase should:
|
|
||||||
1. Have clear entry criteria (what must exist before starting)
|
|
||||||
2. Contain tasks that can be parallelized (no inter-dependencies within phase)
|
|
||||||
3. Have clear exit criteria (how do we know phase is complete?)
|
|
||||||
4. Build toward something USABLE (not just infrastructure)
|
|
||||||
|
|
||||||
Phase ordering follows topological sort of dependency graph.
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Phase 0: Foundation
|
|
||||||
Entry: Clean repository
|
|
||||||
Tasks:
|
|
||||||
- Implement error handling utilities
|
|
||||||
- Create base type definitions
|
|
||||||
- Setup configuration system
|
|
||||||
Exit: Other modules can import foundation without errors
|
|
||||||
|
|
||||||
Phase 1: Data Layer
|
|
||||||
Entry: Phase 0 complete
|
|
||||||
Tasks:
|
|
||||||
- Implement schema validator (uses: base types, error handling)
|
|
||||||
- Build data ingestion pipeline (uses: validator, config)
|
|
||||||
Exit: End-to-end data flow from input to validated output
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
Phase 1: Build Everything
|
|
||||||
Tasks:
|
|
||||||
- API
|
|
||||||
- Database
|
|
||||||
- UI
|
|
||||||
- Tests
|
|
||||||
(Problem: No clear focus. Too broad. Dependencies not considered.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Development Phases
|
|
||||||
|
|
||||||
### Phase 0: [Foundation Name]
|
|
||||||
**Goal**: [What foundational capability this establishes]
|
|
||||||
|
|
||||||
**Entry Criteria**: [What must be true before starting]
|
|
||||||
|
|
||||||
**Tasks**:
|
|
||||||
- [ ] [Task name] (depends on: [none or list])
|
|
||||||
- Acceptance criteria: [How we know it's done]
|
|
||||||
- Test strategy: [What tests prove it works]
|
|
||||||
|
|
||||||
- [ ] [Task name] (depends on: [none or list])
|
|
||||||
|
|
||||||
**Exit Criteria**: [Observable outcome that proves phase complete]
|
|
||||||
|
|
||||||
**Delivers**: [What can users/developers do after this phase?]
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Phase 1: [Layer Name]
|
|
||||||
**Goal**:
|
|
||||||
|
|
||||||
**Entry Criteria**: Phase 0 complete
|
|
||||||
|
|
||||||
**Tasks**:
|
|
||||||
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])
|
|
||||||
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])
|
|
||||||
|
|
||||||
**Exit Criteria**:
|
|
||||||
|
|
||||||
**Delivers**:
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
[Continue with more phases...]
|
|
||||||
|
|
||||||
</implementation-roadmap>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<test-strategy>
|
|
||||||
<instruction>
|
|
||||||
Define how testing will be integrated throughout development (TDD approach).
|
|
||||||
|
|
||||||
Specify:
|
|
||||||
1. Test pyramid ratios (unit vs integration vs e2e)
|
|
||||||
2. Coverage requirements
|
|
||||||
3. Critical test scenarios
|
|
||||||
4. Test generation guidelines for Surgical Test Generator
|
|
||||||
|
|
||||||
This section guides the AI when generating tests during the RED phase of TDD.
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Critical Test Scenarios for Data Validation module:
|
|
||||||
- Happy path: Valid data passes all checks
|
|
||||||
- Edge cases: Empty strings, null values, boundary numbers
|
|
||||||
- Error cases: Invalid types, missing required fields
|
|
||||||
- Integration: Validator works with ingestion pipeline
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Test Pyramid
|
|
||||||
|
|
||||||
```
|
|
||||||
/\
|
|
||||||
/E2E\ ← [X]% (End-to-end, slow, comprehensive)
|
|
||||||
/------\
|
|
||||||
/Integration\ ← [Y]% (Module interactions)
|
|
||||||
/------------\
|
|
||||||
/ Unit Tests \ ← [Z]% (Fast, isolated, deterministic)
|
|
||||||
/----------------\
|
|
||||||
```
|
|
||||||
|
|
||||||
## Coverage Requirements
|
|
||||||
- Line coverage: [X]% minimum
|
|
||||||
- Branch coverage: [X]% minimum
|
|
||||||
- Function coverage: [X]% minimum
|
|
||||||
- Statement coverage: [X]% minimum
|
|
||||||
|
|
||||||
## Critical Test Scenarios
|
|
||||||
|
|
||||||
### [Module/Feature Name]
|
|
||||||
**Happy path**:
|
|
||||||
- [Scenario description]
|
|
||||||
- Expected: [What should happen]
|
|
||||||
|
|
||||||
**Edge cases**:
|
|
||||||
- [Scenario description]
|
|
||||||
- Expected: [What should happen]
|
|
||||||
|
|
||||||
**Error cases**:
|
|
||||||
- [Scenario description]
|
|
||||||
- Expected: [How system handles failure]
|
|
||||||
|
|
||||||
**Integration points**:
|
|
||||||
- [What interactions to test]
|
|
||||||
- Expected: [End-to-end behavior]
|
|
||||||
|
|
||||||
## Test Generation Guidelines
|
|
||||||
[Specific instructions for Surgical Test Generator about what to focus on, what patterns to follow, project-specific test conventions]
|
|
||||||
|
|
||||||
</test-strategy>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<architecture>
|
|
||||||
<instruction>
|
|
||||||
Describe technical architecture, data models, and key design decisions.
|
|
||||||
|
|
||||||
Keep this section AFTER functional/structural decomposition - implementation details come after understanding structure.
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## System Components
|
|
||||||
[Major architectural pieces and their responsibilities]
|
|
||||||
|
|
||||||
## Data Models
|
|
||||||
[Core data structures, schemas, database design]
|
|
||||||
|
|
||||||
## Technology Stack
|
|
||||||
[Languages, frameworks, key libraries]
|
|
||||||
|
|
||||||
**Decision: [Technology/Pattern]**
|
|
||||||
- **Rationale**: [Why chosen]
|
|
||||||
- **Trade-offs**: [What we're giving up]
|
|
||||||
- **Alternatives considered**: [What else we looked at]
|
|
||||||
|
|
||||||
</architecture>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<risks>
|
|
||||||
<instruction>
|
|
||||||
Identify risks that could derail development and how to mitigate them.
|
|
||||||
|
|
||||||
Categories:
|
|
||||||
- Technical risks (complexity, unknowns)
|
|
||||||
- Dependency risks (blocking issues)
|
|
||||||
- Scope risks (creep, underestimation)
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Technical Risks
|
|
||||||
**Risk**: [Description]
|
|
||||||
- **Impact**: [High/Medium/Low - effect on project]
|
|
||||||
- **Likelihood**: [High/Medium/Low]
|
|
||||||
- **Mitigation**: [How to address]
|
|
||||||
- **Fallback**: [Plan B if mitigation fails]
|
|
||||||
|
|
||||||
## Dependency Risks
|
|
||||||
[External dependencies, blocking issues]
|
|
||||||
|
|
||||||
## Scope Risks
|
|
||||||
[Scope creep, underestimation, unclear requirements]
|
|
||||||
|
|
||||||
</risks>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<appendix>
|
|
||||||
## References
|
|
||||||
[Papers, documentation, similar systems]
|
|
||||||
|
|
||||||
## Glossary
|
|
||||||
[Domain-specific terms]
|
|
||||||
|
|
||||||
## Open Questions
|
|
||||||
[Things to resolve during development]
|
|
||||||
</appendix>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<task-master-integration>
|
|
||||||
# How Task Master Uses This PRD
|
|
||||||
|
|
||||||
When you run `task-master parse-prd <file>.txt`, the parser:
|
|
||||||
|
|
||||||
1. **Extracts capabilities** → Main tasks
|
|
||||||
- Each `### Capability:` becomes a top-level task
|
|
||||||
|
|
||||||
2. **Extracts features** → Subtasks
|
|
||||||
- Each `#### Feature:` becomes a subtask under its capability
|
|
||||||
|
|
||||||
3. **Parses dependencies** → Task dependencies
|
|
||||||
- `Depends on: [X, Y]` sets task.dependencies = ["X", "Y"]
|
|
||||||
|
|
||||||
4. **Orders by phases** → Task priorities
|
|
||||||
- Phase 0 tasks = highest priority
|
|
||||||
- Phase N tasks = lower priority, properly sequenced
|
|
||||||
|
|
||||||
5. **Uses test strategy** → Test generation context
|
|
||||||
- Feeds test scenarios to Surgical Test Generator during implementation
|
|
||||||
|
|
||||||
**Result**: A dependency-aware task graph that can be executed in topological order.
|
|
||||||
|
|
||||||
## Why RPG Structure Matters
|
|
||||||
|
|
||||||
Traditional flat PRDs lead to:
|
|
||||||
- ❌ Unclear task dependencies
|
|
||||||
- ❌ Arbitrary task ordering
|
|
||||||
- ❌ Circular dependencies discovered late
|
|
||||||
- ❌ Poorly scoped tasks
|
|
||||||
|
|
||||||
RPG-structured PRDs provide:
|
|
||||||
- ✅ Explicit dependency chains
|
|
||||||
- ✅ Topological execution order
|
|
||||||
- ✅ Clear module boundaries
|
|
||||||
- ✅ Validated task graph before implementation
|
|
||||||
|
|
||||||
## Tips for Best Results
|
|
||||||
|
|
||||||
1. **Spend time on dependency graph** - This is the most valuable section for Task Master
|
|
||||||
2. **Keep features atomic** - Each feature should be independently testable
|
|
||||||
3. **Progressive refinement** - Start broad, use `task-master expand` to break down complex tasks
|
|
||||||
4. **Use research mode** - `task-master parse-prd --research` leverages AI for better task generation
|
|
||||||
</task-master-integration>
|
|
||||||
96
CHANGELOG.md
96
CHANGELOG.md
@@ -1,101 +1,5 @@
|
|||||||
# task-master-ai
|
# task-master-ai
|
||||||
|
|
||||||
## 0.28.0-rc.2
|
|
||||||
|
|
||||||
### Minor Changes
|
|
||||||
|
|
||||||
- [#1273](https://github.com/eyaltoledano/claude-task-master/pull/1273) [`b43b7ce`](https://github.com/eyaltoledano/claude-task-master/commit/b43b7ce201625eee956fb2f8cd332f238bb78c21) Thanks [@ben-vargas](https://github.com/ben-vargas)! - Add Codex CLI provider with OAuth authentication
|
|
||||||
- Added codex-cli provider for GPT-5 and GPT-5-Codex models (272K input / 128K output)
|
|
||||||
- OAuth-first authentication via `codex login` - no API key required
|
|
||||||
- Optional OPENAI_CODEX_API_KEY support
|
|
||||||
- Codebase analysis capabilities automatically enabled
|
|
||||||
- Command-specific settings and approval/sandbox modes
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- [#1277](https://github.com/eyaltoledano/claude-task-master/pull/1277) [`7b5a7c4`](https://github.com/eyaltoledano/claude-task-master/commit/7b5a7c4495a68b782f7407fc5d0e0d3ae81f42f5) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix MCP connection errors caused by deprecated generateTaskFiles calls. Resolves "Cannot read properties of null (reading 'toString')" errors when using MCP tools for task management operations.
|
|
||||||
|
|
||||||
- [#1276](https://github.com/eyaltoledano/claude-task-master/pull/1276) [`caee040`](https://github.com/eyaltoledano/claude-task-master/commit/caee040907f856d31a660171c9e6d966f23c632e) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix MCP server error when file parameter not provided - now properly constructs default tasks.json path instead of failing with 'tasksJsonPath is required' error.
|
|
||||||
|
|
||||||
## 0.28.0-rc.1
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- [#1274](https://github.com/eyaltoledano/claude-task-master/pull/1274) [`4f984f8`](https://github.com/eyaltoledano/claude-task-master/commit/4f984f8a6965da9f9c7edd60ddfd6560ac022917) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Do a quick fix on build
|
|
||||||
|
|
||||||
## 0.28.0-rc.0
|
|
||||||
|
|
||||||
### Minor Changes
|
|
||||||
|
|
||||||
- [#1215](https://github.com/eyaltoledano/claude-task-master/pull/1215) [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d) Thanks [@joedanz](https://github.com/joedanz)! - Add Cursor IDE custom slash command support
|
|
||||||
|
|
||||||
Expose Task Master commands as Cursor slash commands by copying assets/claude/commands to .cursor/commands on profile add and cleaning up on remove.
|
|
||||||
|
|
||||||
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Added api keys page on docs website: docs.task-master.dev/getting-started/api-keys
|
|
||||||
|
|
||||||
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Move to AI SDK v5:
|
|
||||||
- Works better with claude-code and gemini-cli as ai providers
|
|
||||||
- Improved openai model family compatibility
|
|
||||||
- Migrate ollama provider to v2
|
|
||||||
- Closes #1223, #1013, #1161, #1174
|
|
||||||
|
|
||||||
- [#1262](https://github.com/eyaltoledano/claude-task-master/pull/1262) [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Migrate AI services to use generateObject for structured data generation
|
|
||||||
|
|
||||||
This update migrates all AI service calls from generateText to generateObject, ensuring more reliable and structured responses across all commands.
|
|
||||||
|
|
||||||
### Key Changes:
|
|
||||||
- **Unified AI Service**: Replaced separate generateText implementations with a single generateObjectService that handles structured data generation
|
|
||||||
- **JSON Mode Support**: Added proper JSON mode configuration for providers that support it (OpenAI, Anthropic, Google, Groq)
|
|
||||||
- **Schema Validation**: Integrated Zod schemas for all AI-generated content with automatic validation
|
|
||||||
- **Provider Compatibility**: Maintained compatibility with all existing providers while leveraging their native structured output capabilities
|
|
||||||
- **Improved Reliability**: Structured output generation reduces parsing errors and ensures consistent data formats
|
|
||||||
|
|
||||||
### Technical Improvements:
|
|
||||||
- Centralized provider configuration in `ai-providers-unified.js`
|
|
||||||
- Added `generateObject` support detection for each provider
|
|
||||||
- Implemented proper error handling for schema validation failures
|
|
||||||
- Maintained backward compatibility with existing prompt structures
|
|
||||||
|
|
||||||
### Bug Fixes:
|
|
||||||
- Fixed subtask ID numbering issue where AI was generating inconsistent IDs (101-105, 601-603) instead of sequential numbering (1, 2, 3...)
|
|
||||||
- Enhanced prompt instructions to enforce proper ID generation patterns
|
|
||||||
- Ensured subtasks display correctly as X.1, X.2, X.3 format
|
|
||||||
|
|
||||||
This migration improves the reliability and consistency of AI-generated content throughout the Task Master application.
|
|
||||||
|
|
||||||
- [#1112](https://github.com/eyaltoledano/claude-task-master/pull/1112) [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541) Thanks [@olssonsten](https://github.com/olssonsten)! - Enhanced Roo Code profile with MCP timeout configuration for improved reliability during long-running AI operations. The Roo profile now automatically configures a 300-second timeout for MCP server operations, preventing timeouts during complex tasks like `parse-prd`, `expand-all`, `analyze-complexity`, and `research` operations. This change also replaces static MCP configuration files with programmatic generation for better maintainability.
|
|
||||||
|
|
||||||
**What's New:**
|
|
||||||
- 300-second timeout for MCP operations (up from default 60 seconds)
|
|
||||||
- Programmatic MCP configuration generation (replaces static asset files)
|
|
||||||
- Enhanced reliability for AI-powered operations
|
|
||||||
- Consistent with other AI coding assistant profiles
|
|
||||||
|
|
||||||
**Migration:** No user action required - existing Roo Code installations will automatically receive the enhanced MCP configuration on next initialization.
|
|
||||||
|
|
||||||
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Upgrade grok-cli ai provider to ai sdk v5
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- [#1235](https://github.com/eyaltoledano/claude-task-master/pull/1235) [`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Improve `analyze-complexity` cli docs and `--research` flag documentation
|
|
||||||
|
|
||||||
- [#1251](https://github.com/eyaltoledano/claude-task-master/pull/1251) [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Change parent task back to "pending" when all subtasks are in "pending" state
|
|
||||||
|
|
||||||
- [#1172](https://github.com/eyaltoledano/claude-task-master/pull/1172) [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d) Thanks [@jujax](https://github.com/jujax)! - Fix Claude Code settings validation for pathToClaudeCodeExecutable
|
|
||||||
|
|
||||||
- [#1192](https://github.com/eyaltoledano/claude-task-master/pull/1192) [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a) Thanks [@nukunga](https://github.com/nukunga)! - Fix sonar deep research model failing, should be called `sonar-deep-research`
|
|
||||||
|
|
||||||
- [#1270](https://github.com/eyaltoledano/claude-task-master/pull/1270) [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix complexity score not showing for `task-master show` and `task-master list`
|
|
||||||
- Added complexity score on "next task" when running `task-master list`
|
|
||||||
- Added colors to complexity to reflect complexity (easy, medium, hard)
|
|
||||||
|
|
||||||
## 0.27.3
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- [#1254](https://github.com/eyaltoledano/claude-task-master/pull/1254) [`af53525`](https://github.com/eyaltoledano/claude-task-master/commit/af53525cbc660a595b67d4bb90d906911c71f45d) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fixed issue where `tm show` command could not find subtasks using dotted notation IDs (e.g., '8.1').
|
|
||||||
- The command now properly searches within parent task subtasks and returns the correct subtask information.
|
|
||||||
|
|
||||||
## 0.27.2
|
## 0.27.2
|
||||||
|
|
||||||
### Patch Changes
|
### Patch Changes
|
||||||
|
|||||||
22
CLAUDE.md
22
CLAUDE.md
@@ -4,28 +4,6 @@
|
|||||||
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
|
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
|
||||||
@./.taskmaster/CLAUDE.md
|
@./.taskmaster/CLAUDE.md
|
||||||
|
|
||||||
## Test Guidelines
|
|
||||||
|
|
||||||
### Synchronous Tests
|
|
||||||
- **NEVER use async/await in test functions** unless testing actual asynchronous operations
|
|
||||||
- Use synchronous top-level imports instead of dynamic `await import()`
|
|
||||||
- Test bodies should be synchronous whenever possible
|
|
||||||
- Example:
|
|
||||||
```javascript
|
|
||||||
// ✅ CORRECT - Synchronous imports
|
|
||||||
import { MyClass } from '../src/my-class.js';
|
|
||||||
|
|
||||||
it('should verify behavior', () => {
|
|
||||||
expect(new MyClass().property).toBe(value);
|
|
||||||
});
|
|
||||||
|
|
||||||
// ❌ INCORRECT - Async imports
|
|
||||||
it('should verify behavior', async () => {
|
|
||||||
const { MyClass } = await import('../src/my-class.js');
|
|
||||||
expect(new MyClass().property).toBe(value);
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Changeset Guidelines
|
## Changeset Guidelines
|
||||||
|
|
||||||
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
||||||
@@ -88,9 +88,8 @@ At least one (1) of the following is required:
|
|||||||
- xAI API Key (for research or main model)
|
- xAI API Key (for research or main model)
|
||||||
- OpenRouter API Key (for research or main model)
|
- OpenRouter API Key (for research or main model)
|
||||||
- Claude Code (no API key required - requires Claude Code CLI)
|
- Claude Code (no API key required - requires Claude Code CLI)
|
||||||
- Codex CLI (OAuth via ChatGPT subscription - requires Codex CLI)
|
|
||||||
|
|
||||||
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code or Codex CLI with OAuth). Adding all API keys enables you to seamlessly switch between model providers at will.
|
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code). Adding all API keys enables you to seamlessly switch between model providers at will.
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
|
|||||||
@@ -1,255 +0,0 @@
|
|||||||
/**
|
|
||||||
* @fileoverview Centralized Command Registry
|
|
||||||
* Provides a single location for registering all CLI commands
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { Command } from 'commander';
|
|
||||||
|
|
||||||
// Import all commands
|
|
||||||
import { ListTasksCommand } from './commands/list.command.js';
|
|
||||||
import { ShowCommand } from './commands/show.command.js';
|
|
||||||
import { AuthCommand } from './commands/auth.command.js';
|
|
||||||
import { ContextCommand } from './commands/context.command.js';
|
|
||||||
import { StartCommand } from './commands/start.command.js';
|
|
||||||
import { SetStatusCommand } from './commands/set-status.command.js';
|
|
||||||
import { ExportCommand } from './commands/export.command.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Command metadata for registration
|
|
||||||
*/
|
|
||||||
export interface CommandMetadata {
|
|
||||||
name: string;
|
|
||||||
description: string;
|
|
||||||
commandClass: typeof Command;
|
|
||||||
category?: 'task' | 'auth' | 'utility' | 'development';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Registry of all available commands
|
|
||||||
*/
|
|
||||||
export class CommandRegistry {
|
|
||||||
/**
|
|
||||||
* All available commands with their metadata
|
|
||||||
*/
|
|
||||||
private static commands: CommandMetadata[] = [
|
|
||||||
// Task Management Commands
|
|
||||||
{
|
|
||||||
name: 'list',
|
|
||||||
description: 'List all tasks with filtering and status overview',
|
|
||||||
commandClass: ListTasksCommand as any,
|
|
||||||
category: 'task'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'show',
|
|
||||||
description: 'Display detailed information about a specific task',
|
|
||||||
commandClass: ShowCommand as any,
|
|
||||||
category: 'task'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'start',
|
|
||||||
description: 'Start working on a task with claude-code',
|
|
||||||
commandClass: StartCommand as any,
|
|
||||||
category: 'task'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'set-status',
|
|
||||||
description: 'Update the status of one or more tasks',
|
|
||||||
commandClass: SetStatusCommand as any,
|
|
||||||
category: 'task'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'export',
|
|
||||||
description: 'Export tasks to external systems',
|
|
||||||
commandClass: ExportCommand as any,
|
|
||||||
category: 'task'
|
|
||||||
},
|
|
||||||
|
|
||||||
// Authentication & Context Commands
|
|
||||||
{
|
|
||||||
name: 'auth',
|
|
||||||
description: 'Manage authentication with tryhamster.com',
|
|
||||||
commandClass: AuthCommand as any,
|
|
||||||
category: 'auth'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'context',
|
|
||||||
description: 'Manage workspace context (organization/brief)',
|
|
||||||
commandClass: ContextCommand as any,
|
|
||||||
category: 'auth'
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Register all commands on a program instance
|
|
||||||
* @param program - Commander program to register commands on
|
|
||||||
*/
|
|
||||||
static registerAll(program: Command): void {
|
|
||||||
for (const cmd of this.commands) {
|
|
||||||
this.registerCommand(program, cmd);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Register specific commands by category
|
|
||||||
* @param program - Commander program to register commands on
|
|
||||||
* @param category - Category of commands to register
|
|
||||||
*/
|
|
||||||
static registerByCategory(
|
|
||||||
program: Command,
|
|
||||||
category: 'task' | 'auth' | 'utility' | 'development'
|
|
||||||
): void {
|
|
||||||
const categoryCommands = this.commands.filter(
|
|
||||||
(cmd) => cmd.category === category
|
|
||||||
);
|
|
||||||
|
|
||||||
for (const cmd of categoryCommands) {
|
|
||||||
this.registerCommand(program, cmd);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Register a single command by name
|
|
||||||
* @param program - Commander program to register the command on
|
|
||||||
* @param name - Name of the command to register
|
|
||||||
*/
|
|
||||||
static registerByName(program: Command, name: string): void {
|
|
||||||
const cmd = this.commands.find((c) => c.name === name);
|
|
||||||
if (cmd) {
|
|
||||||
this.registerCommand(program, cmd);
|
|
||||||
} else {
|
|
||||||
throw new Error(`Command '${name}' not found in registry`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Register a single command
|
|
||||||
* @param program - Commander program to register the command on
|
|
||||||
* @param metadata - Command metadata
|
|
||||||
*/
|
|
||||||
private static registerCommand(
|
|
||||||
program: Command,
|
|
||||||
metadata: CommandMetadata
|
|
||||||
): void {
|
|
||||||
const CommandClass = metadata.commandClass as any;
|
|
||||||
|
|
||||||
// Use the static registration method that all commands have
|
|
||||||
if (CommandClass.registerOn) {
|
|
||||||
CommandClass.registerOn(program);
|
|
||||||
} else if (CommandClass.register) {
|
|
||||||
CommandClass.register(program);
|
|
||||||
} else {
|
|
||||||
// Fallback to creating instance and adding
|
|
||||||
const instance = new CommandClass();
|
|
||||||
program.addCommand(instance);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all registered command names
|
|
||||||
*/
|
|
||||||
static getCommandNames(): string[] {
|
|
||||||
return this.commands.map((cmd) => cmd.name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get commands by category
|
|
||||||
*/
|
|
||||||
static getCommandsByCategory(
|
|
||||||
category: 'task' | 'auth' | 'utility' | 'development'
|
|
||||||
): CommandMetadata[] {
|
|
||||||
return this.commands.filter((cmd) => cmd.category === category);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add a new command to the registry
|
|
||||||
* @param metadata - Command metadata to add
|
|
||||||
*/
|
|
||||||
static addCommand(metadata: CommandMetadata): void {
|
|
||||||
// Check if command already exists
|
|
||||||
if (this.commands.some((cmd) => cmd.name === metadata.name)) {
|
|
||||||
throw new Error(`Command '${metadata.name}' already exists in registry`);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.commands.push(metadata);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Remove a command from the registry
|
|
||||||
* @param name - Name of the command to remove
|
|
||||||
*/
|
|
||||||
static removeCommand(name: string): boolean {
|
|
||||||
const index = this.commands.findIndex((cmd) => cmd.name === name);
|
|
||||||
if (index >= 0) {
|
|
||||||
this.commands.splice(index, 1);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get command metadata by name
|
|
||||||
* @param name - Name of the command
|
|
||||||
*/
|
|
||||||
static getCommand(name: string): CommandMetadata | undefined {
|
|
||||||
return this.commands.find((cmd) => cmd.name === name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a command exists
|
|
||||||
* @param name - Name of the command
|
|
||||||
*/
|
|
||||||
static hasCommand(name: string): boolean {
|
|
||||||
return this.commands.some((cmd) => cmd.name === name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get a formatted list of all commands for display
|
|
||||||
*/
|
|
||||||
static getFormattedCommandList(): string {
|
|
||||||
const categories = {
|
|
||||||
task: 'Task Management',
|
|
||||||
auth: 'Authentication & Context',
|
|
||||||
utility: 'Utilities',
|
|
||||||
development: 'Development'
|
|
||||||
};
|
|
||||||
|
|
||||||
let output = '';
|
|
||||||
|
|
||||||
for (const [category, title] of Object.entries(categories)) {
|
|
||||||
const cmds = this.getCommandsByCategory(
|
|
||||||
category as keyof typeof categories
|
|
||||||
);
|
|
||||||
if (cmds.length > 0) {
|
|
||||||
output += `\n${title}:\n`;
|
|
||||||
for (const cmd of cmds) {
|
|
||||||
output += ` ${cmd.name.padEnd(20)} ${cmd.description}\n`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return output;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convenience function to register all CLI commands
|
|
||||||
* @param program - Commander program instance
|
|
||||||
*/
|
|
||||||
export function registerAllCommands(program: Command): void {
|
|
||||||
CommandRegistry.registerAll(program);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convenience function to register commands by category
|
|
||||||
* @param program - Commander program instance
|
|
||||||
* @param category - Category to register
|
|
||||||
*/
|
|
||||||
export function registerCommandsByCategory(
|
|
||||||
program: Command,
|
|
||||||
category: 'task' | 'auth' | 'utility' | 'development'
|
|
||||||
): void {
|
|
||||||
CommandRegistry.registerByCategory(program, category);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export the registry for direct access if needed
|
|
||||||
export default CommandRegistry;
|
|
||||||
@@ -493,7 +493,18 @@ export class AuthCommand extends Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register this command on an existing program
|
* Static method to register this command on an existing program
|
||||||
|
* This is for gradual migration - allows commands.js to use this
|
||||||
|
*/
|
||||||
|
static registerOn(program: Command): Command {
|
||||||
|
const authCommand = new AuthCommand();
|
||||||
|
program.addCommand(authCommand);
|
||||||
|
return authCommand;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative registration that returns the command for chaining
|
||||||
|
* Can also configure the command name if needed
|
||||||
*/
|
*/
|
||||||
static register(program: Command, name?: string): AuthCommand {
|
static register(program: Command, name?: string): AuthCommand {
|
||||||
const authCommand = new AuthCommand(name);
|
const authCommand = new AuthCommand(name);
|
||||||
|
|||||||
@@ -694,7 +694,16 @@ export class ContextCommand extends Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register this command on an existing program
|
* Static method to register this command on an existing program
|
||||||
|
*/
|
||||||
|
static registerOn(program: Command): Command {
|
||||||
|
const contextCommand = new ContextCommand();
|
||||||
|
program.addCommand(contextCommand);
|
||||||
|
return contextCommand;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative registration that returns the command for chaining
|
||||||
*/
|
*/
|
||||||
static register(program: Command, name?: string): ContextCommand {
|
static register(program: Command, name?: string): ContextCommand {
|
||||||
const contextCommand = new ContextCommand(name);
|
const contextCommand = new ContextCommand(name);
|
||||||
|
|||||||
@@ -1,379 +0,0 @@
|
|||||||
/**
|
|
||||||
* @fileoverview Export command for exporting tasks to external systems
|
|
||||||
* Provides functionality to export tasks to Hamster briefs
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { Command } from 'commander';
|
|
||||||
import chalk from 'chalk';
|
|
||||||
import inquirer from 'inquirer';
|
|
||||||
import ora, { Ora } from 'ora';
|
|
||||||
import {
|
|
||||||
AuthManager,
|
|
||||||
AuthenticationError,
|
|
||||||
type UserContext
|
|
||||||
} from '@tm/core/auth';
|
|
||||||
import { TaskMasterCore, type ExportResult } from '@tm/core';
|
|
||||||
import * as ui from '../utils/ui.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Result type from export command
|
|
||||||
*/
|
|
||||||
export interface ExportCommandResult {
|
|
||||||
success: boolean;
|
|
||||||
action: 'export' | 'validate' | 'cancelled';
|
|
||||||
result?: ExportResult;
|
|
||||||
message?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ExportCommand extending Commander's Command class
|
|
||||||
* Handles task export to external systems
|
|
||||||
*/
|
|
||||||
export class ExportCommand extends Command {
|
|
||||||
private authManager: AuthManager;
|
|
||||||
private taskMasterCore?: TaskMasterCore;
|
|
||||||
private lastResult?: ExportCommandResult;
|
|
||||||
|
|
||||||
constructor(name?: string) {
|
|
||||||
super(name || 'export');
|
|
||||||
|
|
||||||
// Initialize auth manager
|
|
||||||
this.authManager = AuthManager.getInstance();
|
|
||||||
|
|
||||||
// Configure the command
|
|
||||||
this.description('Export tasks to external systems (e.g., Hamster briefs)');
|
|
||||||
|
|
||||||
// Add options
|
|
||||||
this.option('--org <id>', 'Organization ID to export to');
|
|
||||||
this.option('--brief <id>', 'Brief ID to export tasks to');
|
|
||||||
this.option('--tag <tag>', 'Export tasks from a specific tag');
|
|
||||||
this.option(
|
|
||||||
'--status <status>',
|
|
||||||
'Filter tasks by status (pending, in-progress, done, etc.)'
|
|
||||||
);
|
|
||||||
this.option('--exclude-subtasks', 'Exclude subtasks from export');
|
|
||||||
this.option('-y, --yes', 'Skip confirmation prompt');
|
|
||||||
|
|
||||||
// Accept optional positional argument for brief ID or Hamster URL
|
|
||||||
this.argument('[briefOrUrl]', 'Brief ID or Hamster brief URL');
|
|
||||||
|
|
||||||
// Default action
|
|
||||||
this.action(async (briefOrUrl?: string, options?: any) => {
|
|
||||||
await this.executeExport(briefOrUrl, options);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize the TaskMasterCore
|
|
||||||
*/
|
|
||||||
private async initializeServices(): Promise<void> {
|
|
||||||
if (this.taskMasterCore) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Initialize TaskMasterCore
|
|
||||||
this.taskMasterCore = await TaskMasterCore.create({
|
|
||||||
projectPath: process.cwd()
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to initialize services: ${(error as Error).message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Execute the export command
|
|
||||||
*/
|
|
||||||
private async executeExport(
|
|
||||||
briefOrUrl?: string,
|
|
||||||
options?: any
|
|
||||||
): Promise<void> {
|
|
||||||
let spinner: Ora | undefined;
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Check authentication
|
|
||||||
if (!this.authManager.isAuthenticated()) {
|
|
||||||
ui.displayError('Not authenticated. Run "tm auth login" first.');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize services
|
|
||||||
await this.initializeServices();
|
|
||||||
|
|
||||||
// Get current context
|
|
||||||
const context = this.authManager.getContext();
|
|
||||||
|
|
||||||
// Determine org and brief IDs
|
|
||||||
let orgId = options?.org || context?.orgId;
|
|
||||||
let briefId = options?.brief || briefOrUrl || context?.briefId;
|
|
||||||
|
|
||||||
// If a URL/ID was provided as argument, resolve it
|
|
||||||
if (briefOrUrl && !options?.brief) {
|
|
||||||
spinner = ora('Resolving brief...').start();
|
|
||||||
const resolvedBrief = await this.resolveBriefInput(briefOrUrl);
|
|
||||||
if (resolvedBrief) {
|
|
||||||
briefId = resolvedBrief.briefId;
|
|
||||||
orgId = resolvedBrief.orgId;
|
|
||||||
spinner.succeed('Brief resolved');
|
|
||||||
} else {
|
|
||||||
spinner.fail('Could not resolve brief');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate we have necessary IDs
|
|
||||||
if (!orgId) {
|
|
||||||
ui.displayError(
|
|
||||||
'No organization selected. Run "tm context org" or use --org flag.'
|
|
||||||
);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!briefId) {
|
|
||||||
ui.displayError(
|
|
||||||
'No brief specified. Run "tm context brief", provide a brief ID/URL, or use --brief flag.'
|
|
||||||
);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Confirm export if not auto-confirmed
|
|
||||||
if (!options?.yes) {
|
|
||||||
const confirmed = await this.confirmExport(orgId, briefId, context);
|
|
||||||
if (!confirmed) {
|
|
||||||
ui.displayWarning('Export cancelled');
|
|
||||||
this.lastResult = {
|
|
||||||
success: false,
|
|
||||||
action: 'cancelled',
|
|
||||||
message: 'User cancelled export'
|
|
||||||
};
|
|
||||||
process.exit(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform export
|
|
||||||
spinner = ora('Exporting tasks...').start();
|
|
||||||
|
|
||||||
const exportResult = await this.taskMasterCore!.exportTasks({
|
|
||||||
orgId,
|
|
||||||
briefId,
|
|
||||||
tag: options?.tag,
|
|
||||||
status: options?.status,
|
|
||||||
excludeSubtasks: options?.excludeSubtasks || false
|
|
||||||
});
|
|
||||||
|
|
||||||
if (exportResult.success) {
|
|
||||||
spinner.succeed(
|
|
||||||
`Successfully exported ${exportResult.taskCount} task(s) to brief`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Display summary
|
|
||||||
console.log(chalk.cyan('\n📤 Export Summary\n'));
|
|
||||||
console.log(chalk.white(` Organization: ${orgId}`));
|
|
||||||
console.log(chalk.white(` Brief: ${briefId}`));
|
|
||||||
console.log(chalk.white(` Tasks exported: ${exportResult.taskCount}`));
|
|
||||||
if (options?.tag) {
|
|
||||||
console.log(chalk.gray(` Tag: ${options.tag}`));
|
|
||||||
}
|
|
||||||
if (options?.status) {
|
|
||||||
console.log(chalk.gray(` Status filter: ${options.status}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (exportResult.message) {
|
|
||||||
console.log(chalk.gray(`\n ${exportResult.message}`));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
spinner.fail('Export failed');
|
|
||||||
if (exportResult.error) {
|
|
||||||
console.error(chalk.red(`\n✗ ${exportResult.error.message}`));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
this.lastResult = {
|
|
||||||
success: exportResult.success,
|
|
||||||
action: 'export',
|
|
||||||
result: exportResult
|
|
||||||
};
|
|
||||||
} catch (error: any) {
|
|
||||||
if (spinner?.isSpinning) spinner.fail('Export failed');
|
|
||||||
this.handleError(error);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolve brief input to get brief and org IDs
|
|
||||||
*/
|
|
||||||
private async resolveBriefInput(
|
|
||||||
briefOrUrl: string
|
|
||||||
): Promise<{ briefId: string; orgId: string } | null> {
|
|
||||||
try {
|
|
||||||
// Extract brief ID from input
|
|
||||||
const briefId = this.extractBriefId(briefOrUrl);
|
|
||||||
if (!briefId) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch brief to get organization
|
|
||||||
const brief = await this.authManager.getBrief(briefId);
|
|
||||||
if (!brief) {
|
|
||||||
ui.displayError('Brief not found or you do not have access');
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
briefId: brief.id,
|
|
||||||
orgId: brief.accountId
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
console.error(chalk.red(`Failed to resolve brief: ${error}`));
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract a brief ID from raw input (ID or URL)
|
|
||||||
*/
|
|
||||||
private extractBriefId(input: string): string | null {
|
|
||||||
const raw = input?.trim() ?? '';
|
|
||||||
if (!raw) return null;
|
|
||||||
|
|
||||||
const parseUrl = (s: string): URL | null => {
|
|
||||||
try {
|
|
||||||
return new URL(s);
|
|
||||||
} catch {}
|
|
||||||
try {
|
|
||||||
return new URL(`https://${s}`);
|
|
||||||
} catch {}
|
|
||||||
return null;
|
|
||||||
};
|
|
||||||
|
|
||||||
const fromParts = (path: string): string | null => {
|
|
||||||
const parts = path.split('/').filter(Boolean);
|
|
||||||
const briefsIdx = parts.lastIndexOf('briefs');
|
|
||||||
const candidate =
|
|
||||||
briefsIdx >= 0 && parts.length > briefsIdx + 1
|
|
||||||
? parts[briefsIdx + 1]
|
|
||||||
: parts[parts.length - 1];
|
|
||||||
return candidate?.trim() || null;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try URL parsing
|
|
||||||
const url = parseUrl(raw);
|
|
||||||
if (url) {
|
|
||||||
const qId = url.searchParams.get('id') || url.searchParams.get('briefId');
|
|
||||||
const candidate = (qId || fromParts(url.pathname)) ?? null;
|
|
||||||
if (candidate) {
|
|
||||||
if (this.isLikelyId(candidate) || candidate.length >= 8) {
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it looks like a path
|
|
||||||
if (raw.includes('/')) {
|
|
||||||
const candidate = fromParts(raw);
|
|
||||||
if (candidate && (this.isLikelyId(candidate) || candidate.length >= 8)) {
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return raw if it looks like an ID
|
|
||||||
return raw;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a string looks like a brief ID
|
|
||||||
*/
|
|
||||||
private isLikelyId(value: string): boolean {
|
|
||||||
const uuidRegex =
|
|
||||||
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/;
|
|
||||||
const ulidRegex = /^[0-9A-HJKMNP-TV-Z]{26}$/i;
|
|
||||||
const slugRegex = /^[A-Za-z0-9_-]{16,}$/;
|
|
||||||
return (
|
|
||||||
uuidRegex.test(value) || ulidRegex.test(value) || slugRegex.test(value)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Confirm export with the user
|
|
||||||
*/
|
|
||||||
private async confirmExport(
|
|
||||||
orgId: string,
|
|
||||||
briefId: string,
|
|
||||||
context: UserContext | null
|
|
||||||
): Promise<boolean> {
|
|
||||||
console.log(chalk.cyan('\n📤 Export Tasks\n'));
|
|
||||||
|
|
||||||
// Show org name if available
|
|
||||||
if (context?.orgName) {
|
|
||||||
console.log(chalk.white(` Organization: ${context.orgName}`));
|
|
||||||
console.log(chalk.gray(` ID: ${orgId}`));
|
|
||||||
} else {
|
|
||||||
console.log(chalk.white(` Organization ID: ${orgId}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show brief info
|
|
||||||
if (context?.briefName) {
|
|
||||||
console.log(chalk.white(`\n Brief: ${context.briefName}`));
|
|
||||||
console.log(chalk.gray(` ID: ${briefId}`));
|
|
||||||
} else {
|
|
||||||
console.log(chalk.white(`\n Brief ID: ${briefId}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
const { confirmed } = await inquirer.prompt([
|
|
||||||
{
|
|
||||||
type: 'confirm',
|
|
||||||
name: 'confirmed',
|
|
||||||
message: 'Do you want to proceed with export?',
|
|
||||||
default: true
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
|
|
||||||
return confirmed;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle errors
|
|
||||||
*/
|
|
||||||
private handleError(error: any): void {
|
|
||||||
if (error instanceof AuthenticationError) {
|
|
||||||
console.error(chalk.red(`\n✗ ${error.message}`));
|
|
||||||
|
|
||||||
if (error.code === 'NOT_AUTHENTICATED') {
|
|
||||||
ui.displayWarning('Please authenticate first: tm auth login');
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const msg = error?.message ?? String(error);
|
|
||||||
console.error(chalk.red(`Error: ${msg}`));
|
|
||||||
|
|
||||||
if (error.stack && process.env.DEBUG) {
|
|
||||||
console.error(chalk.gray(error.stack));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the last export result (useful for testing)
|
|
||||||
*/
|
|
||||||
public getLastResult(): ExportCommandResult | undefined {
|
|
||||||
return this.lastResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Clean up resources
|
|
||||||
*/
|
|
||||||
async cleanup(): Promise<void> {
|
|
||||||
// No resources to clean up
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Register this command on an existing program
|
|
||||||
*/
|
|
||||||
static register(program: Command, name?: string): ExportCommand {
|
|
||||||
const exportCommand = new ExportCommand(name);
|
|
||||||
program.addCommand(exportCommand);
|
|
||||||
return exportCommand;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -246,7 +246,7 @@ export class ListTasksCommand extends Command {
|
|||||||
task.subtasks.forEach((subtask) => {
|
task.subtasks.forEach((subtask) => {
|
||||||
const subIcon = STATUS_ICONS[subtask.status];
|
const subIcon = STATUS_ICONS[subtask.status];
|
||||||
console.log(
|
console.log(
|
||||||
` ${chalk.gray(String(subtask.id))} ${subIcon} ${chalk.gray(subtask.title)}`
|
` ${chalk.gray(`${task.id}.${subtask.id}`)} ${subIcon} ${chalk.gray(subtask.title)}`
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -281,14 +281,9 @@ export class ListTasksCommand extends Command {
|
|||||||
const priorityBreakdown = getPriorityBreakdown(tasks);
|
const priorityBreakdown = getPriorityBreakdown(tasks);
|
||||||
|
|
||||||
// Find next task following the same logic as findNextTask
|
// Find next task following the same logic as findNextTask
|
||||||
const nextTaskInfo = this.findNextTask(tasks);
|
const nextTask = this.findNextTask(tasks);
|
||||||
|
|
||||||
// Get the full task object with complexity data already included
|
// Display dashboard boxes
|
||||||
const nextTask = nextTaskInfo
|
|
||||||
? tasks.find((t) => String(t.id) === String(nextTaskInfo.id))
|
|
||||||
: undefined;
|
|
||||||
|
|
||||||
// Display dashboard boxes (nextTask already has complexity from storage enrichment)
|
|
||||||
displayDashboards(
|
displayDashboards(
|
||||||
taskStats,
|
taskStats,
|
||||||
subtaskStats,
|
subtaskStats,
|
||||||
@@ -297,7 +292,7 @@ export class ListTasksCommand extends Command {
|
|||||||
nextTask
|
nextTask
|
||||||
);
|
);
|
||||||
|
|
||||||
// Task table
|
// Task table - no title, just show the table directly
|
||||||
console.log(
|
console.log(
|
||||||
ui.createTaskTable(tasks, {
|
ui.createTaskTable(tasks, {
|
||||||
showSubtasks: withSubtasks,
|
showSubtasks: withSubtasks,
|
||||||
@@ -308,16 +303,14 @@ export class ListTasksCommand extends Command {
|
|||||||
|
|
||||||
// Display recommended next task section immediately after table
|
// Display recommended next task section immediately after table
|
||||||
if (nextTask) {
|
if (nextTask) {
|
||||||
const description = getTaskDescription(nextTask);
|
// Find the full task object to get description
|
||||||
|
const fullTask = tasks.find((t) => String(t.id) === String(nextTask.id));
|
||||||
|
const description = fullTask ? getTaskDescription(fullTask) : undefined;
|
||||||
|
|
||||||
displayRecommendedNextTask({
|
displayRecommendedNextTask({
|
||||||
id: nextTask.id,
|
...nextTask,
|
||||||
title: nextTask.title,
|
status: 'pending', // Next task is typically pending
|
||||||
priority: nextTask.priority,
|
description
|
||||||
status: nextTask.status,
|
|
||||||
dependencies: nextTask.dependencies,
|
|
||||||
description,
|
|
||||||
complexity: nextTask.complexity as number | undefined
|
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
displayRecommendedNextTask(undefined);
|
displayRecommendedNextTask(undefined);
|
||||||
@@ -474,7 +467,18 @@ export class ListTasksCommand extends Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register this command on an existing program
|
* Static method to register this command on an existing program
|
||||||
|
* This is for gradual migration - allows commands.js to use this
|
||||||
|
*/
|
||||||
|
static registerOn(program: Command): Command {
|
||||||
|
const listCommand = new ListTasksCommand();
|
||||||
|
program.addCommand(listCommand);
|
||||||
|
return listCommand;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative registration that returns the command for chaining
|
||||||
|
* Can also configure the command name if needed
|
||||||
*/
|
*/
|
||||||
static register(program: Command, name?: string): ListTasksCommand {
|
static register(program: Command, name?: string): ListTasksCommand {
|
||||||
const listCommand = new ListTasksCommand(name);
|
const listCommand = new ListTasksCommand(name);
|
||||||
|
|||||||
@@ -258,6 +258,9 @@ export class SetStatusCommand extends Command {
|
|||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Show storage info
|
||||||
|
console.log(chalk.gray(`\nUsing ${result.storageType} storage`));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -287,7 +290,18 @@ export class SetStatusCommand extends Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register this command on an existing program
|
* Static method to register this command on an existing program
|
||||||
|
* This is for gradual migration - allows commands.js to use this
|
||||||
|
*/
|
||||||
|
static registerOn(program: Command): Command {
|
||||||
|
const setStatusCommand = new SetStatusCommand();
|
||||||
|
program.addCommand(setStatusCommand);
|
||||||
|
return setStatusCommand;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative registration that returns the command for chaining
|
||||||
|
* Can also configure the command name if needed
|
||||||
*/
|
*/
|
||||||
static register(program: Command, name?: string): SetStatusCommand {
|
static register(program: Command, name?: string): SetStatusCommand {
|
||||||
const setStatusCommand = new SetStatusCommand(name);
|
const setStatusCommand = new SetStatusCommand(name);
|
||||||
|
|||||||
@@ -322,7 +322,18 @@ export class ShowCommand extends Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register this command on an existing program
|
* Static method to register this command on an existing program
|
||||||
|
* This is for gradual migration - allows commands.js to use this
|
||||||
|
*/
|
||||||
|
static registerOn(program: Command): Command {
|
||||||
|
const showCommand = new ShowCommand();
|
||||||
|
program.addCommand(showCommand);
|
||||||
|
return showCommand;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative registration that returns the command for chaining
|
||||||
|
* Can also configure the command name if needed
|
||||||
*/
|
*/
|
||||||
static register(program: Command, name?: string): ShowCommand {
|
static register(program: Command, name?: string): ShowCommand {
|
||||||
const showCommand = new ShowCommand(name);
|
const showCommand = new ShowCommand(name);
|
||||||
|
|||||||
@@ -493,7 +493,16 @@ export class StartCommand extends Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register this command on an existing program
|
* Static method to register this command on an existing program
|
||||||
|
*/
|
||||||
|
static registerOn(program: Command): Command {
|
||||||
|
const startCommand = new StartCommand();
|
||||||
|
program.addCommand(startCommand);
|
||||||
|
return startCommand;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative registration that returns the command for chaining
|
||||||
*/
|
*/
|
||||||
static register(program: Command, name?: string): StartCommand {
|
static register(program: Command, name?: string): StartCommand {
|
||||||
const startCommand = new StartCommand(name);
|
const startCommand = new StartCommand(name);
|
||||||
|
|||||||
@@ -10,15 +10,6 @@ export { AuthCommand } from './commands/auth.command.js';
|
|||||||
export { ContextCommand } from './commands/context.command.js';
|
export { ContextCommand } from './commands/context.command.js';
|
||||||
export { StartCommand } from './commands/start.command.js';
|
export { StartCommand } from './commands/start.command.js';
|
||||||
export { SetStatusCommand } from './commands/set-status.command.js';
|
export { SetStatusCommand } from './commands/set-status.command.js';
|
||||||
export { ExportCommand } from './commands/export.command.js';
|
|
||||||
|
|
||||||
// Command Registry
|
|
||||||
export {
|
|
||||||
CommandRegistry,
|
|
||||||
registerAllCommands,
|
|
||||||
registerCommandsByCategory,
|
|
||||||
type CommandMetadata
|
|
||||||
} from './command-registry.js';
|
|
||||||
|
|
||||||
// UI utilities (for other commands to use)
|
// UI utilities (for other commands to use)
|
||||||
export * as ui from './utils/ui.js';
|
export * as ui from './utils/ui.js';
|
||||||
|
|||||||
@@ -6,7 +6,6 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import type { Task, TaskPriority } from '@tm/core/types';
|
import type { Task, TaskPriority } from '@tm/core/types';
|
||||||
import { getComplexityWithColor } from '../../utils/ui.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Statistics for task collection
|
* Statistics for task collection
|
||||||
@@ -480,7 +479,7 @@ export function displayDependencyDashboard(
|
|||||||
? chalk.cyan(nextTask.dependencies.join(', '))
|
? chalk.cyan(nextTask.dependencies.join(', '))
|
||||||
: chalk.gray('None')
|
: chalk.gray('None')
|
||||||
}\n` +
|
}\n` +
|
||||||
`Complexity: ${nextTask?.complexity !== undefined ? getComplexityWithColor(nextTask.complexity) : chalk.gray('N/A')}`;
|
`Complexity: ${nextTask?.complexity || chalk.gray('N/A')}`;
|
||||||
|
|
||||||
return content;
|
return content;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import type { Task } from '@tm/core/types';
|
import type { Task } from '@tm/core/types';
|
||||||
import { getComplexityWithColor } from '../../utils/ui.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Next task display options
|
* Next task display options
|
||||||
@@ -18,7 +17,6 @@ export interface NextTaskDisplayOptions {
|
|||||||
status?: string;
|
status?: string;
|
||||||
dependencies?: (string | number)[];
|
dependencies?: (string | number)[];
|
||||||
description?: string;
|
description?: string;
|
||||||
complexity?: number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -84,11 +82,6 @@ export function displayRecommendedNextTask(
|
|||||||
: chalk.cyan(task.dependencies.join(', '));
|
: chalk.cyan(task.dependencies.join(', '));
|
||||||
content.push(`Dependencies: ${depsDisplay}`);
|
content.push(`Dependencies: ${depsDisplay}`);
|
||||||
|
|
||||||
// Complexity with color and label
|
|
||||||
if (typeof task.complexity === 'number') {
|
|
||||||
content.push(`Complexity: ${getComplexityWithColor(task.complexity)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Description if available
|
// Description if available
|
||||||
if (task.description) {
|
if (task.description) {
|
||||||
content.push('');
|
content.push('');
|
||||||
|
|||||||
@@ -9,11 +9,7 @@ import Table from 'cli-table3';
|
|||||||
import { marked, MarkedExtension } from 'marked';
|
import { marked, MarkedExtension } from 'marked';
|
||||||
import { markedTerminal } from 'marked-terminal';
|
import { markedTerminal } from 'marked-terminal';
|
||||||
import type { Task } from '@tm/core/types';
|
import type { Task } from '@tm/core/types';
|
||||||
import {
|
import { getStatusWithColor, getPriorityWithColor } from '../../utils/ui.js';
|
||||||
getStatusWithColor,
|
|
||||||
getPriorityWithColor,
|
|
||||||
getComplexityWithColor
|
|
||||||
} from '../../utils/ui.js';
|
|
||||||
|
|
||||||
// Configure marked to use terminal renderer with subtle colors
|
// Configure marked to use terminal renderer with subtle colors
|
||||||
marked.use(
|
marked.use(
|
||||||
@@ -112,9 +108,7 @@ export function displayTaskProperties(task: Task): void {
|
|||||||
getStatusWithColor(task.status),
|
getStatusWithColor(task.status),
|
||||||
getPriorityWithColor(task.priority),
|
getPriorityWithColor(task.priority),
|
||||||
deps,
|
deps,
|
||||||
typeof task.complexity === 'number'
|
'N/A',
|
||||||
? getComplexityWithColor(task.complexity)
|
|
||||||
: chalk.gray('N/A'),
|
|
||||||
task.description || ''
|
task.description || ''
|
||||||
].join('\n');
|
].join('\n');
|
||||||
|
|
||||||
@@ -192,7 +186,8 @@ export function displaySubtasks(
|
|||||||
status: any;
|
status: any;
|
||||||
description?: string;
|
description?: string;
|
||||||
dependencies?: string[];
|
dependencies?: string[];
|
||||||
}>
|
}>,
|
||||||
|
parentId: string | number
|
||||||
): void {
|
): void {
|
||||||
const terminalWidth = process.stdout.columns * 0.95 || 100;
|
const terminalWidth = process.stdout.columns * 0.95 || 100;
|
||||||
// Display subtasks header
|
// Display subtasks header
|
||||||
@@ -227,7 +222,7 @@ export function displaySubtasks(
|
|||||||
});
|
});
|
||||||
|
|
||||||
subtasks.forEach((subtask) => {
|
subtasks.forEach((subtask) => {
|
||||||
const subtaskId = String(subtask.id);
|
const subtaskId = `${parentId}.${subtask.id}`;
|
||||||
|
|
||||||
// Format dependencies
|
// Format dependencies
|
||||||
const deps =
|
const deps =
|
||||||
@@ -328,7 +323,7 @@ export function displayTaskDetails(
|
|||||||
console.log(chalk.gray(` No subtasks with status '${statusFilter}'`));
|
console.log(chalk.gray(` No subtasks with status '${statusFilter}'`));
|
||||||
} else if (filteredSubtasks.length > 0) {
|
} else if (filteredSubtasks.length > 0) {
|
||||||
console.log(); // Empty line for spacing
|
console.log(); // Empty line for spacing
|
||||||
displaySubtasks(filteredSubtasks);
|
displaySubtasks(filteredSubtasks, task.id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -84,23 +84,7 @@ export function getPriorityWithColor(priority: TaskPriority): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get complexity color and label based on score thresholds
|
* Get colored complexity display
|
||||||
*/
|
|
||||||
function getComplexityLevel(score: number): {
|
|
||||||
color: (text: string) => string;
|
|
||||||
label: string;
|
|
||||||
} {
|
|
||||||
if (score >= 7) {
|
|
||||||
return { color: chalk.hex('#CC0000'), label: 'High' };
|
|
||||||
} else if (score >= 4) {
|
|
||||||
return { color: chalk.hex('#FF8800'), label: 'Medium' };
|
|
||||||
} else {
|
|
||||||
return { color: chalk.green, label: 'Low' };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get colored complexity display with dot indicator (simple format)
|
|
||||||
*/
|
*/
|
||||||
export function getComplexityWithColor(complexity: number | string): string {
|
export function getComplexityWithColor(complexity: number | string): string {
|
||||||
const score =
|
const score =
|
||||||
@@ -110,20 +94,13 @@ export function getComplexityWithColor(complexity: number | string): string {
|
|||||||
return chalk.gray('N/A');
|
return chalk.gray('N/A');
|
||||||
}
|
}
|
||||||
|
|
||||||
const { color } = getComplexityLevel(score);
|
if (score >= 8) {
|
||||||
return color(`● ${score}`);
|
return chalk.red.bold(`${score} (High)`);
|
||||||
}
|
} else if (score >= 5) {
|
||||||
|
return chalk.yellow(`${score} (Medium)`);
|
||||||
/**
|
} else {
|
||||||
* Get colored complexity display with /10 format (for dashboards)
|
return chalk.green(`${score} (Low)`);
|
||||||
*/
|
|
||||||
export function getComplexityWithScore(complexity: number | undefined): string {
|
|
||||||
if (typeof complexity !== 'number') {
|
|
||||||
return chalk.gray('N/A');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const { color, label } = getComplexityLevel(complexity);
|
|
||||||
return color(`${complexity}/10 (${label})`);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -286,12 +263,12 @@ export function createTaskTable(
|
|||||||
// Adjust column widths to better match the original layout
|
// Adjust column widths to better match the original layout
|
||||||
const baseColWidths = showComplexity
|
const baseColWidths = showComplexity
|
||||||
? [
|
? [
|
||||||
Math.floor(terminalWidth * 0.1),
|
Math.floor(terminalWidth * 0.06),
|
||||||
Math.floor(terminalWidth * 0.4),
|
Math.floor(terminalWidth * 0.4),
|
||||||
Math.floor(terminalWidth * 0.15),
|
Math.floor(terminalWidth * 0.15),
|
||||||
Math.floor(terminalWidth * 0.1),
|
Math.floor(terminalWidth * 0.12),
|
||||||
Math.floor(terminalWidth * 0.2),
|
Math.floor(terminalWidth * 0.2),
|
||||||
Math.floor(terminalWidth * 0.1)
|
Math.floor(terminalWidth * 0.12)
|
||||||
] // ID, Title, Status, Priority, Dependencies, Complexity
|
] // ID, Title, Status, Priority, Dependencies, Complexity
|
||||||
: [
|
: [
|
||||||
Math.floor(terminalWidth * 0.08),
|
Math.floor(terminalWidth * 0.08),
|
||||||
@@ -346,12 +323,8 @@ export function createTaskTable(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (showComplexity) {
|
if (showComplexity) {
|
||||||
// Show complexity score from report if available
|
// Show N/A if no complexity score
|
||||||
if (typeof task.complexity === 'number') {
|
row.push(chalk.gray('N/A'));
|
||||||
row.push(getComplexityWithColor(task.complexity));
|
|
||||||
} else {
|
|
||||||
row.push(chalk.gray('N/A'));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
table.push(row);
|
table.push(row);
|
||||||
@@ -377,11 +350,7 @@ export function createTaskTable(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (showComplexity) {
|
if (showComplexity) {
|
||||||
const complexityDisplay =
|
subRow.push(chalk.gray('--'));
|
||||||
typeof subtask.complexity === 'number'
|
|
||||||
? getComplexityWithColor(subtask.complexity)
|
|
||||||
: '--';
|
|
||||||
subRow.push(chalk.gray(complexityDisplay));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
table.push(subRow);
|
table.push(subRow);
|
||||||
|
|||||||
@@ -1,326 +0,0 @@
|
|||||||
---
|
|
||||||
title: RPG Method for PRD Creation
|
|
||||||
sidebarTitle: "RPG Method"
|
|
||||||
---
|
|
||||||
|
|
||||||
# Repository Planning Graph (RPG) Method
|
|
||||||
|
|
||||||
The RPG (Repository Planning Graph) method is an advanced approach to creating Product Requirements Documents that generate highly-structured, dependency-aware task graphs. It's based on Microsoft Research's methodology for scalable codebase generation.
|
|
||||||
|
|
||||||
## When to Use RPG
|
|
||||||
|
|
||||||
Use the RPG template (`example_prd_rpg.txt`) for:
|
|
||||||
|
|
||||||
- **Complex multi-module systems** with intricate dependencies
|
|
||||||
- **Large-scale codebases** being built from scratch
|
|
||||||
- **Projects requiring explicit architecture** and clear module boundaries
|
|
||||||
- **Teams needing dependency visibility** for parallel development
|
|
||||||
|
|
||||||
For simpler features or smaller projects, the standard `example_prd.txt` template may be more appropriate.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Core Principles
|
|
||||||
|
|
||||||
### 1. Dual-Semantics
|
|
||||||
|
|
||||||
Separate **functional** thinking (WHAT) from **structural** thinking (HOW):
|
|
||||||
|
|
||||||
```
|
|
||||||
Functional: "Data Validation capability with schema checking and rule enforcement"
|
|
||||||
↓
|
|
||||||
Structural: "src/validation/ with schema-validator.js and rule-validator.js"
|
|
||||||
```
|
|
||||||
|
|
||||||
This separation prevents mixing concerns and creates clearer module boundaries.
|
|
||||||
|
|
||||||
### 2. Explicit Dependencies
|
|
||||||
|
|
||||||
Never assume dependencies - always state them explicitly:
|
|
||||||
|
|
||||||
```
|
|
||||||
Good:
|
|
||||||
Module: data-ingestion
|
|
||||||
Depends on: [schema-validator, config-manager]
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
Module: data-ingestion
|
|
||||||
(Assumes schema-validator exists somewhere)
|
|
||||||
```
|
|
||||||
|
|
||||||
Explicit dependencies enable:
|
|
||||||
- Topological ordering of implementation
|
|
||||||
- Parallel development of independent modules
|
|
||||||
- Clear build/test order
|
|
||||||
- Early detection of circular dependencies
|
|
||||||
|
|
||||||
### 3. Topological Order
|
|
||||||
|
|
||||||
Build foundation layers before higher layers:
|
|
||||||
|
|
||||||
```
|
|
||||||
Phase 0 (Foundation): error-handling, base-types, config
|
|
||||||
↓
|
|
||||||
Phase 1 (Data): validation, ingestion (depend on Phase 0)
|
|
||||||
↓
|
|
||||||
Phase 2 (Core): algorithms, pipelines (depend on Phase 1)
|
|
||||||
↓
|
|
||||||
Phase 3 (API): routes, handlers (depend on Phase 2)
|
|
||||||
```
|
|
||||||
|
|
||||||
Task Master automatically orders tasks based on this dependency chain.
|
|
||||||
|
|
||||||
### 4. Progressive Refinement
|
|
||||||
|
|
||||||
Start broad, refine iteratively:
|
|
||||||
|
|
||||||
1. High-level capabilities → Main tasks
|
|
||||||
2. Features per capability → Subtasks
|
|
||||||
3. Implementation details → Expanded subtasks
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Template Structure
|
|
||||||
|
|
||||||
The RPG template guides you through 7 key sections:
|
|
||||||
|
|
||||||
### 1. Overview
|
|
||||||
- Problem statement
|
|
||||||
- Target users
|
|
||||||
- Success metrics
|
|
||||||
|
|
||||||
### 2. Functional Decomposition (WHAT)
|
|
||||||
- High-level capability domains
|
|
||||||
- Features per capability
|
|
||||||
- Inputs/outputs/behavior for each feature
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
```
|
|
||||||
Capability: Data Management
|
|
||||||
Feature: Schema validation
|
|
||||||
Description: Validate JSON against defined schemas
|
|
||||||
Inputs: JSON object, schema definition
|
|
||||||
Outputs: Validation result + error details
|
|
||||||
Behavior: Iterate fields, check types, enforce constraints
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Structural Decomposition (HOW)
|
|
||||||
- Repository folder structure
|
|
||||||
- Module-to-capability mapping
|
|
||||||
- File organization
|
|
||||||
- Public interfaces/exports
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
```
|
|
||||||
Capability: Data Management
|
|
||||||
→ Maps to: src/data/
|
|
||||||
├── schema-validator.js (Schema validation feature)
|
|
||||||
├── rule-validator.js (Rule validation feature)
|
|
||||||
└── index.js (Exports)
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Dependency Graph (CRITICAL)
|
|
||||||
- Foundation layer (no dependencies)
|
|
||||||
- Each subsequent layer's dependencies
|
|
||||||
- Explicit "depends on" declarations
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
```
|
|
||||||
Foundation Layer (Phase 0):
|
|
||||||
- error-handling: No dependencies
|
|
||||||
- base-types: No dependencies
|
|
||||||
|
|
||||||
Data Layer (Phase 1):
|
|
||||||
- schema-validator: Depends on [base-types, error-handling]
|
|
||||||
- data-ingestion: Depends on [schema-validator]
|
|
||||||
```
|
|
||||||
|
|
||||||
### 5. Implementation Roadmap
|
|
||||||
- Phases with entry/exit criteria
|
|
||||||
- Tasks grouped by phase
|
|
||||||
- Clear deliverables per phase
|
|
||||||
|
|
||||||
### 6. Test Strategy
|
|
||||||
- Test pyramid ratios
|
|
||||||
- Coverage requirements
|
|
||||||
- Critical test scenarios per module
|
|
||||||
- Guidelines for test generation
|
|
||||||
|
|
||||||
### 7. Architecture & Risks
|
|
||||||
- Technical architecture
|
|
||||||
- Data models
|
|
||||||
- Technology decisions
|
|
||||||
- Risk mitigation strategies
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Using RPG with Task Master
|
|
||||||
|
|
||||||
### Step 1: Create PRD with RPG Template
|
|
||||||
|
|
||||||
Use a code-context-aware tool to fill out the template:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# In Claude Code, Cursor, or similar
|
|
||||||
"Create a PRD using @.taskmaster/templates/example_prd_rpg.txt for [your project]"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Why code context matters:** The AI needs to understand your existing codebase to make informed decisions about:
|
|
||||||
- Module boundaries
|
|
||||||
- Dependency relationships
|
|
||||||
- Integration points
|
|
||||||
- Naming conventions
|
|
||||||
|
|
||||||
**Recommended tools:**
|
|
||||||
- Claude Code (claude-code CLI)
|
|
||||||
- Cursor/Windsurf
|
|
||||||
- Gemini CLI (large contexts)
|
|
||||||
- Codex/Grok CLI
|
|
||||||
|
|
||||||
### Step 2: Parse PRD into Tasks
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master parse-prd .taskmaster/docs/your-prd.txt --research
|
|
||||||
```
|
|
||||||
|
|
||||||
Task Master will:
|
|
||||||
1. Extract capabilities → Main tasks
|
|
||||||
2. Extract features → Subtasks
|
|
||||||
3. Parse dependencies → Task dependencies
|
|
||||||
4. Order by phases → Task priorities
|
|
||||||
|
|
||||||
**Result:** A dependency-aware task graph ready for topological execution.
|
|
||||||
|
|
||||||
### Step 3: Analyze Complexity
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
```
|
|
||||||
|
|
||||||
Review the complexity report to identify tasks that need expansion.
|
|
||||||
|
|
||||||
### Step 4: Expand Tasks
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master expand --all --research
|
|
||||||
```
|
|
||||||
|
|
||||||
Break down complex tasks into manageable subtasks while preserving dependency chains.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## RPG Benefits
|
|
||||||
|
|
||||||
### For Solo Developers
|
|
||||||
- Clear roadmap for implementing complex features
|
|
||||||
- Prevents architectural mistakes early
|
|
||||||
- Explicit dependency tracking avoids integration issues
|
|
||||||
- Enables resuming work after interruptions
|
|
||||||
|
|
||||||
### For Teams
|
|
||||||
- Parallel development of independent modules
|
|
||||||
- Clear contracts between modules (explicit dependencies)
|
|
||||||
- Reduced merge conflicts (proper module boundaries)
|
|
||||||
- Onboarding aid (architectural overview in PRD)
|
|
||||||
|
|
||||||
### For AI Agents
|
|
||||||
- Structured context for code generation
|
|
||||||
- Clear scope boundaries per task
|
|
||||||
- Dependency awareness prevents incomplete implementations
|
|
||||||
- Test strategy guidance for TDD workflows
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## RPG vs Standard Template
|
|
||||||
|
|
||||||
| Aspect | Standard Template | RPG Template |
|
|
||||||
|--------|------------------|--------------|
|
|
||||||
| **Best for** | Simple features | Complex systems |
|
|
||||||
| **Dependency handling** | Implicit | Explicit graph |
|
|
||||||
| **Structure guidance** | Minimal | Step-by-step |
|
|
||||||
| **Examples** | Few | Inline good/bad examples |
|
|
||||||
| **Module boundaries** | Vague | Precise mapping |
|
|
||||||
| **Task ordering** | Manual | Automatic (topological) |
|
|
||||||
| **Learning curve** | Low | Medium |
|
|
||||||
| **Resulting task quality** | Good | Excellent |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Tips for Best Results
|
|
||||||
|
|
||||||
### 1. Spend Time on Dependencies
|
|
||||||
The dependency graph section is the most valuable. List all dependencies explicitly, even if they seem obvious.
|
|
||||||
|
|
||||||
### 2. Keep Features Atomic
|
|
||||||
Each feature should be independently testable. If a feature description is vague ("handle data"), break it into specific features.
|
|
||||||
|
|
||||||
### 3. Progressive Refinement
|
|
||||||
Don't try to get everything perfect on the first pass:
|
|
||||||
1. Fill out high-level sections
|
|
||||||
2. Review and refine
|
|
||||||
3. Add detail where needed
|
|
||||||
4. Let `task-master expand` break down complex tasks further
|
|
||||||
|
|
||||||
### 4. Use Research Mode
|
|
||||||
```bash
|
|
||||||
task-master parse-prd --research
|
|
||||||
```
|
|
||||||
The `--research` flag leverages AI to enhance task generation with domain knowledge.
|
|
||||||
|
|
||||||
### 5. Validate Early
|
|
||||||
```bash
|
|
||||||
task-master validate-dependencies
|
|
||||||
```
|
|
||||||
Check for circular dependencies or orphaned modules before starting implementation.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Common Pitfalls
|
|
||||||
|
|
||||||
### ❌ Mixing Functional and Structural
|
|
||||||
```
|
|
||||||
Bad: "Capability: validation.js"
|
|
||||||
Good: "Capability: Data Validation" → maps to "src/validation/"
|
|
||||||
```
|
|
||||||
|
|
||||||
### ❌ Vague Module Boundaries
|
|
||||||
```
|
|
||||||
Bad: "Module: utils"
|
|
||||||
Good: "Module: string-utilities" with clear exports
|
|
||||||
```
|
|
||||||
|
|
||||||
### ❌ Implicit Dependencies
|
|
||||||
```
|
|
||||||
Bad: "Module: API handlers (needs validation)"
|
|
||||||
Good: "Module: API handlers, Depends on: [validation, error-handling]"
|
|
||||||
```
|
|
||||||
|
|
||||||
### ❌ Skipping Test Strategy
|
|
||||||
Without test strategy, the AI won't know what to test during implementation.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Example Workflow
|
|
||||||
|
|
||||||
1. **Discuss idea with AI**: Explain your project concept
|
|
||||||
2. **Reference RPG template**: Show AI the `example_prd_rpg.txt`
|
|
||||||
3. **Co-create PRD**: Work through each section with AI guidance
|
|
||||||
4. **Save to docs**: Place in `.taskmaster/docs/your-project.txt`
|
|
||||||
5. **Parse PRD**: `task-master parse-prd .taskmaster/docs/your-project.txt --research`
|
|
||||||
6. **Analyze**: `task-master analyze-complexity --research`
|
|
||||||
7. **Expand**: `task-master expand --all --research`
|
|
||||||
8. **Start work**: `task-master next`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Further Reading
|
|
||||||
|
|
||||||
- [PRD Creation and Parsing Guide](/getting-started/quick-start/prd-quick)
|
|
||||||
- [Task Structure Documentation](/capabilities/task-structure)
|
|
||||||
- [Microsoft Research RPG Paper](https://arxiv.org/abs/2410.21376) (Original methodology)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
The RPG template includes inline `<instruction>` and `<example>` blocks that teach the method as you use it. Read these sections carefully - they provide valuable guidance at each decision point.
|
|
||||||
</Tip>
|
|
||||||
@@ -50,8 +50,7 @@
|
|||||||
"pages": [
|
"pages": [
|
||||||
"capabilities/mcp",
|
"capabilities/mcp",
|
||||||
"capabilities/cli-root-commands",
|
"capabilities/cli-root-commands",
|
||||||
"capabilities/task-structure",
|
"capabilities/task-structure"
|
||||||
"capabilities/rpg-method"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -32,11 +32,7 @@ The more context you give the model, the better the breakdown and results.
|
|||||||
|
|
||||||
## Writing a PRD for Task Master
|
## Writing a PRD for Task Master
|
||||||
|
|
||||||
<Note>
|
<Note>An example PRD can be found in .taskmaster/templates/example_prd.txt</Note>
|
||||||
Two example PRD templates are available in `.taskmaster/templates/`:
|
|
||||||
- `example_prd.txt` - Simple template for straightforward projects
|
|
||||||
- `example_prd_rpg.txt` - Advanced RPG (Repository Planning Graph) template for complex projects with dependencies
|
|
||||||
</Note>
|
|
||||||
|
|
||||||
|
|
||||||
You can co-write your PRD with an LLM model using the following workflow:
|
You can co-write your PRD with an LLM model using the following workflow:
|
||||||
@@ -47,29 +43,6 @@ You can co-write your PRD with an LLM model using the following workflow:
|
|||||||
|
|
||||||
This approach works great in Cursor, or anywhere you use a chat-based LLM.
|
This approach works great in Cursor, or anywhere you use a chat-based LLM.
|
||||||
|
|
||||||
### Choosing Between Templates
|
|
||||||
|
|
||||||
**Use `example_prd.txt` when:**
|
|
||||||
- Building straightforward features
|
|
||||||
- Working on smaller projects
|
|
||||||
- Dependencies are simple and obvious
|
|
||||||
|
|
||||||
**Use `example_prd_rpg.txt` when:**
|
|
||||||
- Building complex systems with multiple modules
|
|
||||||
- Need explicit dependency management
|
|
||||||
- Want structured guidance on architecture decisions
|
|
||||||
- Planning a large codebase from scratch
|
|
||||||
|
|
||||||
The RPG template teaches you to think about:
|
|
||||||
1. **Functional decomposition** (WHAT the system does)
|
|
||||||
2. **Structural decomposition** (HOW it's organized in code)
|
|
||||||
3. **Explicit dependencies** (WHAT depends on WHAT)
|
|
||||||
4. **Topological ordering** (build foundation first, then layers)
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
For complex projects, using the RPG template with a code-context-aware ai agent produces the best results because the AI can understand your existing codebase structure. [Learn more about the RPG method →](/capabilities/rpg-method)
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Where to Save Your PRD
|
## Where to Save Your PRD
|
||||||
|
|||||||
@@ -1,19 +1,5 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
## 0.25.5-rc.0
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- Updated dependencies [[`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3), [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d), [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede), [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541), [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d), [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a), [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca), [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055)]:
|
|
||||||
- task-master-ai@0.28.0-rc.0
|
|
||||||
|
|
||||||
## 0.25.4
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- Updated dependencies [[`af53525`](https://github.com/eyaltoledano/claude-task-master/commit/af53525cbc660a595b67d4bb90d906911c71f45d)]:
|
|
||||||
- task-master-ai@0.27.3
|
|
||||||
|
|
||||||
## 0.25.3
|
## 0.25.3
|
||||||
|
|
||||||
### Patch Changes
|
### Patch Changes
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
"private": true,
|
"private": true,
|
||||||
"displayName": "TaskMaster",
|
"displayName": "TaskMaster",
|
||||||
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
|
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
|
||||||
"version": "0.25.5-rc.0",
|
"version": "0.25.3",
|
||||||
"publisher": "Hamster",
|
"publisher": "Hamster",
|
||||||
"icon": "assets/icon.png",
|
"icon": "assets/icon.png",
|
||||||
"engines": {
|
"engines": {
|
||||||
|
|||||||
@@ -1,511 +0,0 @@
|
|||||||
<rpg-method>
|
|
||||||
# Repository Planning Graph (RPG) Method - PRD Template
|
|
||||||
|
|
||||||
This template teaches you (AI or human) how to create structured, dependency-aware PRDs using the RPG methodology from Microsoft Research. The key insight: separate WHAT (functional) from HOW (structural), then connect them with explicit dependencies.
|
|
||||||
|
|
||||||
## Core Principles
|
|
||||||
|
|
||||||
1. **Dual-Semantics**: Think functional (capabilities) AND structural (code organization) separately, then map them
|
|
||||||
2. **Explicit Dependencies**: Never assume - always state what depends on what
|
|
||||||
3. **Topological Order**: Build foundation first, then layers on top
|
|
||||||
4. **Progressive Refinement**: Start broad, refine iteratively
|
|
||||||
|
|
||||||
## How to Use This Template
|
|
||||||
|
|
||||||
- Follow the instructions in each `<instruction>` block
|
|
||||||
- Look at `<example>` blocks to see good vs bad patterns
|
|
||||||
- Fill in the content sections with your project details
|
|
||||||
- The AI reading this will learn the RPG method by following along
|
|
||||||
- Task Master will parse the resulting PRD into dependency-aware tasks
|
|
||||||
|
|
||||||
## Recommended Tools for Creating PRDs
|
|
||||||
|
|
||||||
When using this template to **create** a PRD (not parse it), use **code-context-aware AI assistants** for best results:
|
|
||||||
|
|
||||||
**Why?** The AI needs to understand your existing codebase to make good architectural decisions about modules, dependencies, and integration points.
|
|
||||||
|
|
||||||
**Recommended tools:**
|
|
||||||
- **Claude Code** (claude-code CLI) - Best for structured reasoning and large contexts
|
|
||||||
- **Cursor/Windsurf** - IDE integration with full codebase context
|
|
||||||
- **Gemini CLI** (gemini-cli) - Massive context window for large codebases
|
|
||||||
- **Codex/Grok CLI** - Strong code generation with context awareness
|
|
||||||
|
|
||||||
**Note:** Once your PRD is created, `task-master parse-prd` works with any configured AI model - it just needs to read the PRD text itself, not your codebase.
|
|
||||||
</rpg-method>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<overview>
|
|
||||||
<instruction>
|
|
||||||
Start with the problem, not the solution. Be specific about:
|
|
||||||
- What pain point exists?
|
|
||||||
- Who experiences it?
|
|
||||||
- Why existing solutions don't work?
|
|
||||||
- What success looks like (measurable outcomes)?
|
|
||||||
|
|
||||||
Keep this section focused - don't jump into implementation details yet.
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Problem Statement
|
|
||||||
[Describe the core problem. Be concrete about user pain points.]
|
|
||||||
|
|
||||||
## Target Users
|
|
||||||
[Define personas, their workflows, and what they're trying to achieve.]
|
|
||||||
|
|
||||||
## Success Metrics
|
|
||||||
[Quantifiable outcomes. Examples: "80% task completion via autopilot", "< 5% manual intervention rate"]
|
|
||||||
|
|
||||||
</overview>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<functional-decomposition>
|
|
||||||
<instruction>
|
|
||||||
Now think about CAPABILITIES (what the system DOES), not code structure yet.
|
|
||||||
|
|
||||||
Step 1: Identify high-level capability domains
|
|
||||||
- Think: "What major things does this system do?"
|
|
||||||
- Examples: Data Management, Core Processing, Presentation Layer
|
|
||||||
|
|
||||||
Step 2: For each capability, enumerate specific features
|
|
||||||
- Use explore-exploit strategy:
|
|
||||||
* Exploit: What features are REQUIRED for core value?
|
|
||||||
* Explore: What features make this domain COMPLETE?
|
|
||||||
|
|
||||||
Step 3: For each feature, define:
|
|
||||||
- Description: What it does in one sentence
|
|
||||||
- Inputs: What data/context it needs
|
|
||||||
- Outputs: What it produces/returns
|
|
||||||
- Behavior: Key logic or transformations
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Capability: Data Validation
|
|
||||||
Feature: Schema validation
|
|
||||||
- Description: Validate JSON payloads against defined schemas
|
|
||||||
- Inputs: JSON object, schema definition
|
|
||||||
- Outputs: Validation result (pass/fail) + error details
|
|
||||||
- Behavior: Iterate fields, check types, enforce constraints
|
|
||||||
|
|
||||||
Feature: Business rule validation
|
|
||||||
- Description: Apply domain-specific validation rules
|
|
||||||
- Inputs: Validated data object, rule set
|
|
||||||
- Outputs: Boolean + list of violated rules
|
|
||||||
- Behavior: Execute rules sequentially, short-circuit on failure
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
Capability: validation.js
|
|
||||||
(Problem: This is a FILE, not a CAPABILITY. Mixing structure into functional thinking.)
|
|
||||||
|
|
||||||
Capability: Validation
|
|
||||||
Feature: Make sure data is good
|
|
||||||
(Problem: Too vague. No inputs/outputs. Not actionable.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Capability Tree
|
|
||||||
|
|
||||||
### Capability: [Name]
|
|
||||||
[Brief description of what this capability domain covers]
|
|
||||||
|
|
||||||
#### Feature: [Name]
|
|
||||||
- **Description**: [One sentence]
|
|
||||||
- **Inputs**: [What it needs]
|
|
||||||
- **Outputs**: [What it produces]
|
|
||||||
- **Behavior**: [Key logic]
|
|
||||||
|
|
||||||
#### Feature: [Name]
|
|
||||||
- **Description**:
|
|
||||||
- **Inputs**:
|
|
||||||
- **Outputs**:
|
|
||||||
- **Behavior**:
|
|
||||||
|
|
||||||
### Capability: [Name]
|
|
||||||
...
|
|
||||||
|
|
||||||
</functional-decomposition>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<structural-decomposition>
|
|
||||||
<instruction>
|
|
||||||
NOW think about code organization. Map capabilities to actual file/folder structure.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
1. Each capability maps to a module (folder or file)
|
|
||||||
2. Features within a capability map to functions/classes
|
|
||||||
3. Use clear module boundaries - each module has ONE responsibility
|
|
||||||
4. Define what each module exports (public interface)
|
|
||||||
|
|
||||||
The goal: Create a clear mapping between "what it does" (functional) and "where it lives" (structural).
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Capability: Data Validation
|
|
||||||
→ Maps to: src/validation/
|
|
||||||
├── schema-validator.js (Schema validation feature)
|
|
||||||
├── rule-validator.js (Business rule validation feature)
|
|
||||||
└── index.js (Public exports)
|
|
||||||
|
|
||||||
Exports:
|
|
||||||
- validateSchema(data, schema)
|
|
||||||
- validateRules(data, rules)
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
Capability: Data Validation
|
|
||||||
→ Maps to: src/utils.js
|
|
||||||
(Problem: "utils" is not a clear module boundary. Where do I find validation logic?)
|
|
||||||
|
|
||||||
Capability: Data Validation
|
|
||||||
→ Maps to: src/validation/everything.js
|
|
||||||
(Problem: One giant file. Features should map to separate files for maintainability.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Repository Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
project-root/
|
|
||||||
├── src/
|
|
||||||
│ ├── [module-name]/ # Maps to: [Capability Name]
|
|
||||||
│ │ ├── [file].js # Maps to: [Feature Name]
|
|
||||||
│ │ └── index.js # Public exports
|
|
||||||
│ └── [module-name]/
|
|
||||||
├── tests/
|
|
||||||
└── docs/
|
|
||||||
```
|
|
||||||
|
|
||||||
## Module Definitions
|
|
||||||
|
|
||||||
### Module: [Name]
|
|
||||||
- **Maps to capability**: [Capability from functional decomposition]
|
|
||||||
- **Responsibility**: [Single clear purpose]
|
|
||||||
- **File structure**:
|
|
||||||
```
|
|
||||||
module-name/
|
|
||||||
├── feature1.js
|
|
||||||
├── feature2.js
|
|
||||||
└── index.js
|
|
||||||
```
|
|
||||||
- **Exports**:
|
|
||||||
- `functionName()` - [what it does]
|
|
||||||
- `ClassName` - [what it does]
|
|
||||||
|
|
||||||
</structural-decomposition>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<dependency-graph>
|
|
||||||
<instruction>
|
|
||||||
This is THE CRITICAL SECTION for Task Master parsing.
|
|
||||||
|
|
||||||
Define explicit dependencies between modules. This creates the topological order for task execution.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
1. List modules in dependency order (foundation first)
|
|
||||||
2. For each module, state what it depends on
|
|
||||||
3. Foundation modules should have NO dependencies
|
|
||||||
4. Every non-foundation module should depend on at least one other module
|
|
||||||
5. Think: "What must EXIST before I can build this module?"
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Foundation Layer (no dependencies):
|
|
||||||
- error-handling: No dependencies
|
|
||||||
- config-manager: No dependencies
|
|
||||||
- base-types: No dependencies
|
|
||||||
|
|
||||||
Data Layer:
|
|
||||||
- schema-validator: Depends on [base-types, error-handling]
|
|
||||||
- data-ingestion: Depends on [schema-validator, config-manager]
|
|
||||||
|
|
||||||
Core Layer:
|
|
||||||
- algorithm-engine: Depends on [base-types, error-handling]
|
|
||||||
- pipeline-orchestrator: Depends on [algorithm-engine, data-ingestion]
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
- validation: Depends on API
|
|
||||||
- API: Depends on validation
|
|
||||||
(Problem: Circular dependency. This will cause build/runtime issues.)
|
|
||||||
|
|
||||||
- user-auth: Depends on everything
|
|
||||||
(Problem: Too many dependencies. Should be more focused.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Dependency Chain
|
|
||||||
|
|
||||||
### Foundation Layer (Phase 0)
|
|
||||||
No dependencies - these are built first.
|
|
||||||
|
|
||||||
- **[Module Name]**: [What it provides]
|
|
||||||
- **[Module Name]**: [What it provides]
|
|
||||||
|
|
||||||
### [Layer Name] (Phase 1)
|
|
||||||
- **[Module Name]**: Depends on [[module-from-phase-0], [module-from-phase-0]]
|
|
||||||
- **[Module Name]**: Depends on [[module-from-phase-0]]
|
|
||||||
|
|
||||||
### [Layer Name] (Phase 2)
|
|
||||||
- **[Module Name]**: Depends on [[module-from-phase-1], [module-from-foundation]]
|
|
||||||
|
|
||||||
[Continue building up layers...]
|
|
||||||
|
|
||||||
</dependency-graph>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<implementation-roadmap>
|
|
||||||
<instruction>
|
|
||||||
Turn the dependency graph into concrete development phases.
|
|
||||||
|
|
||||||
Each phase should:
|
|
||||||
1. Have clear entry criteria (what must exist before starting)
|
|
||||||
2. Contain tasks that can be parallelized (no inter-dependencies within phase)
|
|
||||||
3. Have clear exit criteria (how do we know phase is complete?)
|
|
||||||
4. Build toward something USABLE (not just infrastructure)
|
|
||||||
|
|
||||||
Phase ordering follows topological sort of dependency graph.
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Phase 0: Foundation
|
|
||||||
Entry: Clean repository
|
|
||||||
Tasks:
|
|
||||||
- Implement error handling utilities
|
|
||||||
- Create base type definitions
|
|
||||||
- Setup configuration system
|
|
||||||
Exit: Other modules can import foundation without errors
|
|
||||||
|
|
||||||
Phase 1: Data Layer
|
|
||||||
Entry: Phase 0 complete
|
|
||||||
Tasks:
|
|
||||||
- Implement schema validator (uses: base types, error handling)
|
|
||||||
- Build data ingestion pipeline (uses: validator, config)
|
|
||||||
Exit: End-to-end data flow from input to validated output
|
|
||||||
</example>
|
|
||||||
|
|
||||||
<example type="bad">
|
|
||||||
Phase 1: Build Everything
|
|
||||||
Tasks:
|
|
||||||
- API
|
|
||||||
- Database
|
|
||||||
- UI
|
|
||||||
- Tests
|
|
||||||
(Problem: No clear focus. Too broad. Dependencies not considered.)
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Development Phases
|
|
||||||
|
|
||||||
### Phase 0: [Foundation Name]
|
|
||||||
**Goal**: [What foundational capability this establishes]
|
|
||||||
|
|
||||||
**Entry Criteria**: [What must be true before starting]
|
|
||||||
|
|
||||||
**Tasks**:
|
|
||||||
- [ ] [Task name] (depends on: [none or list])
|
|
||||||
- Acceptance criteria: [How we know it's done]
|
|
||||||
- Test strategy: [What tests prove it works]
|
|
||||||
|
|
||||||
- [ ] [Task name] (depends on: [none or list])
|
|
||||||
|
|
||||||
**Exit Criteria**: [Observable outcome that proves phase complete]
|
|
||||||
|
|
||||||
**Delivers**: [What can users/developers do after this phase?]
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Phase 1: [Layer Name]
|
|
||||||
**Goal**:
|
|
||||||
|
|
||||||
**Entry Criteria**: Phase 0 complete
|
|
||||||
|
|
||||||
**Tasks**:
|
|
||||||
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])
|
|
||||||
- [ ] [Task name] (depends on: [[tasks-from-phase-0]])
|
|
||||||
|
|
||||||
**Exit Criteria**:
|
|
||||||
|
|
||||||
**Delivers**:
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
[Continue with more phases...]
|
|
||||||
|
|
||||||
</implementation-roadmap>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<test-strategy>
|
|
||||||
<instruction>
|
|
||||||
Define how testing will be integrated throughout development (TDD approach).
|
|
||||||
|
|
||||||
Specify:
|
|
||||||
1. Test pyramid ratios (unit vs integration vs e2e)
|
|
||||||
2. Coverage requirements
|
|
||||||
3. Critical test scenarios
|
|
||||||
4. Test generation guidelines for Surgical Test Generator
|
|
||||||
|
|
||||||
This section guides the AI when generating tests during the RED phase of TDD.
|
|
||||||
|
|
||||||
<example type="good">
|
|
||||||
Critical Test Scenarios for Data Validation module:
|
|
||||||
- Happy path: Valid data passes all checks
|
|
||||||
- Edge cases: Empty strings, null values, boundary numbers
|
|
||||||
- Error cases: Invalid types, missing required fields
|
|
||||||
- Integration: Validator works with ingestion pipeline
|
|
||||||
</example>
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Test Pyramid
|
|
||||||
|
|
||||||
```
|
|
||||||
/\
|
|
||||||
/E2E\ ← [X]% (End-to-end, slow, comprehensive)
|
|
||||||
/------\
|
|
||||||
/Integration\ ← [Y]% (Module interactions)
|
|
||||||
/------------\
|
|
||||||
/ Unit Tests \ ← [Z]% (Fast, isolated, deterministic)
|
|
||||||
/----------------\
|
|
||||||
```
|
|
||||||
|
|
||||||
## Coverage Requirements
|
|
||||||
- Line coverage: [X]% minimum
|
|
||||||
- Branch coverage: [X]% minimum
|
|
||||||
- Function coverage: [X]% minimum
|
|
||||||
- Statement coverage: [X]% minimum
|
|
||||||
|
|
||||||
## Critical Test Scenarios
|
|
||||||
|
|
||||||
### [Module/Feature Name]
|
|
||||||
**Happy path**:
|
|
||||||
- [Scenario description]
|
|
||||||
- Expected: [What should happen]
|
|
||||||
|
|
||||||
**Edge cases**:
|
|
||||||
- [Scenario description]
|
|
||||||
- Expected: [What should happen]
|
|
||||||
|
|
||||||
**Error cases**:
|
|
||||||
- [Scenario description]
|
|
||||||
- Expected: [How system handles failure]
|
|
||||||
|
|
||||||
**Integration points**:
|
|
||||||
- [What interactions to test]
|
|
||||||
- Expected: [End-to-end behavior]
|
|
||||||
|
|
||||||
## Test Generation Guidelines
|
|
||||||
[Specific instructions for Surgical Test Generator about what to focus on, what patterns to follow, project-specific test conventions]
|
|
||||||
|
|
||||||
</test-strategy>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<architecture>
|
|
||||||
<instruction>
|
|
||||||
Describe technical architecture, data models, and key design decisions.
|
|
||||||
|
|
||||||
Keep this section AFTER functional/structural decomposition - implementation details come after understanding structure.
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## System Components
|
|
||||||
[Major architectural pieces and their responsibilities]
|
|
||||||
|
|
||||||
## Data Models
|
|
||||||
[Core data structures, schemas, database design]
|
|
||||||
|
|
||||||
## Technology Stack
|
|
||||||
[Languages, frameworks, key libraries]
|
|
||||||
|
|
||||||
**Decision: [Technology/Pattern]**
|
|
||||||
- **Rationale**: [Why chosen]
|
|
||||||
- **Trade-offs**: [What we're giving up]
|
|
||||||
- **Alternatives considered**: [What else we looked at]
|
|
||||||
|
|
||||||
</architecture>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<risks>
|
|
||||||
<instruction>
|
|
||||||
Identify risks that could derail development and how to mitigate them.
|
|
||||||
|
|
||||||
Categories:
|
|
||||||
- Technical risks (complexity, unknowns)
|
|
||||||
- Dependency risks (blocking issues)
|
|
||||||
- Scope risks (creep, underestimation)
|
|
||||||
</instruction>
|
|
||||||
|
|
||||||
## Technical Risks
|
|
||||||
**Risk**: [Description]
|
|
||||||
- **Impact**: [High/Medium/Low - effect on project]
|
|
||||||
- **Likelihood**: [High/Medium/Low]
|
|
||||||
- **Mitigation**: [How to address]
|
|
||||||
- **Fallback**: [Plan B if mitigation fails]
|
|
||||||
|
|
||||||
## Dependency Risks
|
|
||||||
[External dependencies, blocking issues]
|
|
||||||
|
|
||||||
## Scope Risks
|
|
||||||
[Scope creep, underestimation, unclear requirements]
|
|
||||||
|
|
||||||
</risks>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<appendix>
|
|
||||||
## References
|
|
||||||
[Papers, documentation, similar systems]
|
|
||||||
|
|
||||||
## Glossary
|
|
||||||
[Domain-specific terms]
|
|
||||||
|
|
||||||
## Open Questions
|
|
||||||
[Things to resolve during development]
|
|
||||||
</appendix>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<task-master-integration>
|
|
||||||
# How Task Master Uses This PRD
|
|
||||||
|
|
||||||
When you run `task-master parse-prd <file>.txt`, the parser:
|
|
||||||
|
|
||||||
1. **Extracts capabilities** → Main tasks
|
|
||||||
- Each `### Capability:` becomes a top-level task
|
|
||||||
|
|
||||||
2. **Extracts features** → Subtasks
|
|
||||||
- Each `#### Feature:` becomes a subtask under its capability
|
|
||||||
|
|
||||||
3. **Parses dependencies** → Task dependencies
|
|
||||||
- `Depends on: [X, Y]` sets task.dependencies = ["X", "Y"]
|
|
||||||
|
|
||||||
4. **Orders by phases** → Task priorities
|
|
||||||
- Phase 0 tasks = highest priority
|
|
||||||
- Phase N tasks = lower priority, properly sequenced
|
|
||||||
|
|
||||||
5. **Uses test strategy** → Test generation context
|
|
||||||
- Feeds test scenarios to Surgical Test Generator during implementation
|
|
||||||
|
|
||||||
**Result**: A dependency-aware task graph that can be executed in topological order.
|
|
||||||
|
|
||||||
## Why RPG Structure Matters
|
|
||||||
|
|
||||||
Traditional flat PRDs lead to:
|
|
||||||
- ❌ Unclear task dependencies
|
|
||||||
- ❌ Arbitrary task ordering
|
|
||||||
- ❌ Circular dependencies discovered late
|
|
||||||
- ❌ Poorly scoped tasks
|
|
||||||
|
|
||||||
RPG-structured PRDs provide:
|
|
||||||
- ✅ Explicit dependency chains
|
|
||||||
- ✅ Topological execution order
|
|
||||||
- ✅ Clear module boundaries
|
|
||||||
- ✅ Validated task graph before implementation
|
|
||||||
|
|
||||||
## Tips for Best Results
|
|
||||||
|
|
||||||
1. **Spend time on dependency graph** - This is the most valuable section for Task Master
|
|
||||||
2. **Keep features atomic** - Each feature should be independently testable
|
|
||||||
3. **Progressive refinement** - Start broad, use `task-master expand` to break down complex tasks
|
|
||||||
4. **Use research mode** - `task-master parse-prd --research` leverages AI for better task generation
|
|
||||||
</task-master-integration>
|
|
||||||
@@ -383,12 +383,6 @@ task-master models --set-main=my-local-llama --ollama
|
|||||||
# Set a custom OpenRouter model for the research role
|
# Set a custom OpenRouter model for the research role
|
||||||
task-master models --set-research=google/gemini-pro --openrouter
|
task-master models --set-research=google/gemini-pro --openrouter
|
||||||
|
|
||||||
# Set Codex CLI model for the main role (uses ChatGPT subscription via OAuth)
|
|
||||||
task-master models --set-main=gpt-5-codex --codex-cli
|
|
||||||
|
|
||||||
# Set Codex CLI model for the fallback role
|
|
||||||
task-master models --set-fallback=gpt-5 --codex-cli
|
|
||||||
|
|
||||||
# Run interactive setup to configure models, including custom ones
|
# Run interactive setup to configure models, including custom ones
|
||||||
task-master models --setup
|
task-master models --setup
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -429,153 +429,3 @@ Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure c
|
|||||||
- Verify the deployment name matches your configuration exactly (case-sensitive)
|
- Verify the deployment name matches your configuration exactly (case-sensitive)
|
||||||
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
|
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
|
||||||
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
|
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
|
||||||
|
|
||||||
### Codex CLI Provider
|
|
||||||
|
|
||||||
The Codex CLI provider integrates Task Master with OpenAI's Codex CLI, allowing you to use ChatGPT subscription models via OAuth authentication.
|
|
||||||
|
|
||||||
1. **Prerequisites**:
|
|
||||||
- Node.js >= 18
|
|
||||||
- Codex CLI >= 0.42.0 (>= 0.44.0 recommended)
|
|
||||||
- ChatGPT subscription: Plus, Pro, Business, Edu, or Enterprise (for OAuth access to GPT-5 models)
|
|
||||||
|
|
||||||
2. **Installation**:
|
|
||||||
```bash
|
|
||||||
npm install -g @openai/codex
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Authentication** (OAuth - Primary Method):
|
|
||||||
```bash
|
|
||||||
codex login
|
|
||||||
```
|
|
||||||
This will open a browser window for OAuth authentication with your ChatGPT account. Once authenticated, Task Master will automatically use these credentials.
|
|
||||||
|
|
||||||
4. **Optional API Key Method**:
|
|
||||||
While OAuth is the primary and recommended authentication method, you can optionally set an OpenAI API key:
|
|
||||||
```bash
|
|
||||||
# In .env file
|
|
||||||
OPENAI_API_KEY=sk-your-openai-api-key-here
|
|
||||||
```
|
|
||||||
**Note**: The API key will only be injected if explicitly provided. OAuth is always preferred.
|
|
||||||
|
|
||||||
5. **Configuration**:
|
|
||||||
```json
|
|
||||||
// In .taskmaster/config.json
|
|
||||||
{
|
|
||||||
"models": {
|
|
||||||
"main": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5-codex",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
},
|
|
||||||
"fallback": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true,
|
|
||||||
"skipGitRepoCheck": true,
|
|
||||||
"approvalMode": "on-failure",
|
|
||||||
"sandboxMode": "workspace-write"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
6. **Available Models**:
|
|
||||||
- `gpt-5` - Latest GPT-5 model (272K max input, 128K max output)
|
|
||||||
- `gpt-5-codex` - GPT-5 optimized for agentic software engineering (272K max input, 128K max output)
|
|
||||||
|
|
||||||
7. **Codex CLI Settings (`codexCli` section)**:
|
|
||||||
|
|
||||||
The `codexCli` section in your configuration file supports the following options:
|
|
||||||
|
|
||||||
- **`allowNpx`** (boolean, default: `false`): Allow fallback to `npx @openai/codex` if CLI not found on PATH
|
|
||||||
- **`skipGitRepoCheck`** (boolean, default: `false`): Skip git repository safety check (recommended for CI/non-repo usage)
|
|
||||||
- **`approvalMode`** (string): Control command execution approval
|
|
||||||
- `"untrusted"`: Require approval for all commands
|
|
||||||
- `"on-failure"`: Only require approval after a command fails (default)
|
|
||||||
- `"on-request"`: Approve only when explicitly requested
|
|
||||||
- `"never"`: Never require approval (not recommended)
|
|
||||||
- **`sandboxMode`** (string): Control filesystem access
|
|
||||||
- `"read-only"`: Read-only access
|
|
||||||
- `"workspace-write"`: Allow writes to workspace (default)
|
|
||||||
- `"danger-full-access"`: Full filesystem access (use with caution)
|
|
||||||
- **`codexPath`** (string, optional): Custom path to codex CLI executable
|
|
||||||
- **`cwd`** (string, optional): Working directory for Codex CLI execution
|
|
||||||
- **`fullAuto`** (boolean, optional): Fully automatic mode (equivalent to `--full-auto` flag)
|
|
||||||
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional): Bypass all safety checks (dangerous!)
|
|
||||||
- **`color`** (string, optional): Color handling - `"always"`, `"never"`, or `"auto"`
|
|
||||||
- **`outputLastMessageFile`** (string, optional): Write last agent message to specified file
|
|
||||||
- **`verbose`** (boolean, optional): Enable verbose logging
|
|
||||||
- **`env`** (object, optional): Additional environment variables for Codex CLI
|
|
||||||
|
|
||||||
8. **Command-Specific Settings** (optional):
|
|
||||||
You can override settings for specific Task Master commands:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true,
|
|
||||||
"approvalMode": "on-failure",
|
|
||||||
"commandSpecific": {
|
|
||||||
"parse-prd": {
|
|
||||||
"approvalMode": "never",
|
|
||||||
"verbose": true
|
|
||||||
},
|
|
||||||
"expand": {
|
|
||||||
"sandboxMode": "read-only"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
9. **Codebase Features**:
|
|
||||||
The Codex CLI provider is codebase-capable, meaning it can analyze and interact with your project files. Codebase analysis features are automatically enabled when using `codex-cli` as your provider and `enableCodebaseAnalysis` is set to `true` in your global configuration (default).
|
|
||||||
|
|
||||||
10. **Setup Commands**:
|
|
||||||
```bash
|
|
||||||
# Set Codex CLI for main role
|
|
||||||
task-master models --set-main gpt-5-codex --codex-cli
|
|
||||||
|
|
||||||
# Set Codex CLI for fallback role
|
|
||||||
task-master models --set-fallback gpt-5 --codex-cli
|
|
||||||
|
|
||||||
# Verify configuration
|
|
||||||
task-master models
|
|
||||||
```
|
|
||||||
|
|
||||||
11. **Troubleshooting**:
|
|
||||||
|
|
||||||
**"codex: command not found" error:**
|
|
||||||
- Install Codex CLI globally: `npm install -g @openai/codex`
|
|
||||||
- Verify installation: `codex --version`
|
|
||||||
- Alternatively, enable `allowNpx: true` in your codexCli configuration
|
|
||||||
|
|
||||||
**"Not logged in" errors:**
|
|
||||||
- Run `codex login` to authenticate with your ChatGPT account
|
|
||||||
- Verify authentication status: `codex` (opens interactive CLI)
|
|
||||||
|
|
||||||
**"Old version" warnings:**
|
|
||||||
- Check version: `codex --version`
|
|
||||||
- Upgrade: `npm install -g @openai/codex@latest`
|
|
||||||
- Minimum version: 0.42.0, recommended: >= 0.44.0
|
|
||||||
|
|
||||||
**"Model not available" errors:**
|
|
||||||
- Only `gpt-5` and `gpt-5-codex` are available via OAuth subscription
|
|
||||||
- Verify your ChatGPT subscription is active
|
|
||||||
- For other OpenAI models, use the standard `openai` provider with an API key
|
|
||||||
|
|
||||||
**API key not being used:**
|
|
||||||
- API key is only injected when explicitly provided
|
|
||||||
- OAuth authentication is always preferred
|
|
||||||
- If you want to use an API key, ensure `OPENAI_API_KEY` is set in your `.env` file
|
|
||||||
|
|
||||||
12. **Important Notes**:
|
|
||||||
- OAuth subscription required for model access (no API key needed for basic operation)
|
|
||||||
- Limited to OAuth-available models only (`gpt-5` and `gpt-5-codex`)
|
|
||||||
- Pricing information is not available for OAuth models (shows as "Unknown" in cost calculations)
|
|
||||||
- See [Codex CLI Provider Documentation](./providers/codex-cli.md) for more details
|
|
||||||
|
|||||||
@@ -1,463 +0,0 @@
|
|||||||
# Codex CLI Provider Usage Examples
|
|
||||||
|
|
||||||
This guide provides practical examples of using Task Master with the Codex CLI provider.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
Before using these examples, ensure you have:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Codex CLI installed
|
|
||||||
npm install -g @openai/codex
|
|
||||||
|
|
||||||
# 2. Authenticated with ChatGPT
|
|
||||||
codex login
|
|
||||||
|
|
||||||
# 3. Codex CLI configured as your provider
|
|
||||||
task-master models --set-main gpt-5-codex --codex-cli
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example 1: Basic Task Creation
|
|
||||||
|
|
||||||
Use Codex CLI to create tasks from a simple description:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Add a task with AI-powered enhancement
|
|
||||||
task-master add-task --prompt="Implement user authentication with JWT" --research
|
|
||||||
```
|
|
||||||
|
|
||||||
**What happens**:
|
|
||||||
1. Task Master sends your prompt to GPT-5-Codex via the CLI
|
|
||||||
2. The AI analyzes your request and generates a detailed task
|
|
||||||
3. The task is added to your `.taskmaster/tasks/tasks.json`
|
|
||||||
4. OAuth credentials are automatically used (no API key needed)
|
|
||||||
|
|
||||||
## Example 2: Parsing a Product Requirements Document
|
|
||||||
|
|
||||||
Create a comprehensive task list from a PRD:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Create your PRD
|
|
||||||
cat > my-feature.txt <<EOF
|
|
||||||
# User Profile Feature
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
1. Users can view their profile
|
|
||||||
2. Users can edit their information
|
|
||||||
3. Profile pictures can be uploaded
|
|
||||||
4. Email verification required
|
|
||||||
|
|
||||||
## Technical Constraints
|
|
||||||
- Use React for frontend
|
|
||||||
- Node.js/Express backend
|
|
||||||
- PostgreSQL database
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Parse with Codex CLI
|
|
||||||
task-master parse-prd my-feature.txt --num-tasks 12
|
|
||||||
```
|
|
||||||
|
|
||||||
**What happens**:
|
|
||||||
1. GPT-5-Codex reads and analyzes your PRD
|
|
||||||
2. Generates structured tasks with dependencies
|
|
||||||
3. Creates subtasks for complex items
|
|
||||||
4. Saves everything to `.taskmaster/tasks/`
|
|
||||||
|
|
||||||
## Example 3: Expanding Tasks with Research
|
|
||||||
|
|
||||||
Break down a complex task into detailed subtasks:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# First, show your current tasks
|
|
||||||
task-master list
|
|
||||||
|
|
||||||
# Expand a specific task (e.g., task 1.2)
|
|
||||||
task-master expand --id=1.2 --research --force
|
|
||||||
```
|
|
||||||
|
|
||||||
**What happens**:
|
|
||||||
1. Codex CLI uses GPT-5 for research-level analysis
|
|
||||||
2. Breaks down the task into logical subtasks
|
|
||||||
3. Adds implementation details and test strategies
|
|
||||||
4. Updates the task with dependency information
|
|
||||||
|
|
||||||
## Example 4: Analyzing Project Complexity
|
|
||||||
|
|
||||||
Get AI-powered insights into your project's task complexity:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Analyze all tasks
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
|
|
||||||
# View the complexity report
|
|
||||||
task-master complexity-report
|
|
||||||
```
|
|
||||||
|
|
||||||
**What happens**:
|
|
||||||
1. GPT-5 analyzes each task's scope and requirements
|
|
||||||
2. Assigns complexity scores and estimates subtask counts
|
|
||||||
3. Generates a detailed report
|
|
||||||
4. Saves to `.taskmaster/reports/task-complexity-report.json`
|
|
||||||
|
|
||||||
## Example 5: Using Custom Codex CLI Settings
|
|
||||||
|
|
||||||
Configure Codex CLI behavior for different commands:
|
|
||||||
|
|
||||||
```json
|
|
||||||
// In .taskmaster/config.json
|
|
||||||
{
|
|
||||||
"models": {
|
|
||||||
"main": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5-codex",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true,
|
|
||||||
"approvalMode": "on-failure",
|
|
||||||
"sandboxMode": "workspace-write",
|
|
||||||
"commandSpecific": {
|
|
||||||
"parse-prd": {
|
|
||||||
"verbose": true,
|
|
||||||
"approvalMode": "never"
|
|
||||||
},
|
|
||||||
"expand": {
|
|
||||||
"sandboxMode": "read-only",
|
|
||||||
"verbose": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Now parse-prd runs with verbose output and no approvals
|
|
||||||
task-master parse-prd requirements.txt
|
|
||||||
|
|
||||||
# Expand runs with read-only mode
|
|
||||||
task-master expand --id=2.1
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example 6: Workflow - Building a Feature End-to-End
|
|
||||||
|
|
||||||
Complete workflow from PRD to implementation tracking:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Step 1: Initialize project
|
|
||||||
task-master init
|
|
||||||
|
|
||||||
# Step 2: Set up Codex CLI
|
|
||||||
task-master models --set-main gpt-5-codex --codex-cli
|
|
||||||
task-master models --set-fallback gpt-5 --codex-cli
|
|
||||||
|
|
||||||
# Step 3: Create PRD
|
|
||||||
cat > feature-prd.txt <<EOF
|
|
||||||
# Authentication System
|
|
||||||
|
|
||||||
Implement a complete authentication system with:
|
|
||||||
- User registration
|
|
||||||
- Email verification
|
|
||||||
- Password reset
|
|
||||||
- Two-factor authentication
|
|
||||||
- Session management
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Step 4: Parse PRD into tasks
|
|
||||||
task-master parse-prd feature-prd.txt --num-tasks 8
|
|
||||||
|
|
||||||
# Step 5: Analyze complexity
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
|
|
||||||
# Step 6: Expand complex tasks
|
|
||||||
task-master expand --all --research
|
|
||||||
|
|
||||||
# Step 7: Start working
|
|
||||||
task-master next
|
|
||||||
# Shows: Task 1.1: User registration database schema
|
|
||||||
|
|
||||||
# Step 8: Mark completed as you work
|
|
||||||
task-master set-status --id=1.1 --status=done
|
|
||||||
|
|
||||||
# Step 9: Continue to next task
|
|
||||||
task-master next
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example 7: Multi-Role Configuration
|
|
||||||
|
|
||||||
Use Codex CLI for main tasks, Perplexity for research:
|
|
||||||
|
|
||||||
```json
|
|
||||||
// In .taskmaster/config.json
|
|
||||||
{
|
|
||||||
"models": {
|
|
||||||
"main": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5-codex",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
},
|
|
||||||
"research": {
|
|
||||||
"provider": "perplexity",
|
|
||||||
"modelId": "sonar-pro",
|
|
||||||
"maxTokens": 8700,
|
|
||||||
"temperature": 0.1
|
|
||||||
},
|
|
||||||
"fallback": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Main task operations use GPT-5-Codex
|
|
||||||
task-master add-task --prompt="Build REST API endpoint"
|
|
||||||
|
|
||||||
# Research operations use Perplexity
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
|
|
||||||
# Fallback to GPT-5 if needed
|
|
||||||
task-master expand --id=3.2 --force
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example 8: Troubleshooting Common Issues
|
|
||||||
|
|
||||||
### Issue: Codex CLI not found
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check if Codex is installed
|
|
||||||
codex --version
|
|
||||||
|
|
||||||
# If not found, install globally
|
|
||||||
npm install -g @openai/codex
|
|
||||||
|
|
||||||
# Or enable npx fallback in config
|
|
||||||
cat >> .taskmaster/config.json <<EOF
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
### Issue: Not authenticated
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check auth status
|
|
||||||
codex
|
|
||||||
# Use /about command to see auth info
|
|
||||||
|
|
||||||
# Re-authenticate if needed
|
|
||||||
codex login
|
|
||||||
```
|
|
||||||
|
|
||||||
### Issue: Want more verbose output
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Enable verbose mode in config
|
|
||||||
cat >> .taskmaster/config.json <<EOF
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"verbose": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Or for specific commands
|
|
||||||
task-master parse-prd my-prd.txt
|
|
||||||
# (verbose output shows detailed Codex CLI interactions)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example 9: CI/CD Integration
|
|
||||||
|
|
||||||
Use Codex CLI in automated workflows:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# .github/workflows/task-analysis.yml
|
|
||||||
name: Analyze Task Complexity
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- '.taskmaster/**'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup Node.js
|
|
||||||
uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '20'
|
|
||||||
|
|
||||||
- name: Install Task Master
|
|
||||||
run: npm install -g task-master-ai
|
|
||||||
|
|
||||||
- name: Configure Codex CLI
|
|
||||||
run: |
|
|
||||||
npm install -g @openai/codex
|
|
||||||
echo "${{ secrets.OPENAI_CODEX_API_KEY }}" > ~/.codex-auth
|
|
||||||
env:
|
|
||||||
OPENAI_CODEX_API_KEY: ${{ secrets.OPENAI_CODEX_API_KEY }}
|
|
||||||
|
|
||||||
- name: Configure Task Master
|
|
||||||
run: |
|
|
||||||
cat > .taskmaster/config.json <<EOF
|
|
||||||
{
|
|
||||||
"models": {
|
|
||||||
"main": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true,
|
|
||||||
"skipGitRepoCheck": true,
|
|
||||||
"approvalMode": "never",
|
|
||||||
"fullAuto": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
- name: Analyze Complexity
|
|
||||||
run: task-master analyze-complexity --research
|
|
||||||
|
|
||||||
- name: Upload Report
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: complexity-report
|
|
||||||
path: .taskmaster/reports/task-complexity-report.json
|
|
||||||
```
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
### 1. Use OAuth for Development
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# For local development, use OAuth (no API key needed)
|
|
||||||
codex login
|
|
||||||
task-master models --set-main gpt-5-codex --codex-cli
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Configure Approval Modes Appropriately
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"approvalMode": "on-failure", // Safe default
|
|
||||||
"sandboxMode": "workspace-write" // Restricts to project directory
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Use Command-Specific Settings
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"commandSpecific": {
|
|
||||||
"parse-prd": {
|
|
||||||
"approvalMode": "never", // PRD parsing is safe
|
|
||||||
"verbose": true
|
|
||||||
},
|
|
||||||
"expand": {
|
|
||||||
"approvalMode": "on-request", // More cautious for task expansion
|
|
||||||
"verbose": false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Leverage Codebase Analysis
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"global": {
|
|
||||||
"enableCodebaseAnalysis": true // Let Codex analyze your code
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 5. Handle Errors Gracefully
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Always configure a fallback model
|
|
||||||
task-master models --set-fallback gpt-5 --codex-cli
|
|
||||||
|
|
||||||
# Or use a different provider as fallback
|
|
||||||
task-master models --set-fallback claude-3-5-sonnet
|
|
||||||
```
|
|
||||||
|
|
||||||
## Next Steps
|
|
||||||
|
|
||||||
- Read the [Codex CLI Provider Documentation](../providers/codex-cli.md)
|
|
||||||
- Explore [Configuration Options](../configuration.md#codex-cli-provider)
|
|
||||||
- Check out [Command Reference](../command-reference.md)
|
|
||||||
- Learn about [Task Structure](../task-structure.md)
|
|
||||||
|
|
||||||
## Common Patterns
|
|
||||||
|
|
||||||
### Pattern: Daily Development Workflow
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Morning: Review tasks
|
|
||||||
task-master list
|
|
||||||
|
|
||||||
# Get next task
|
|
||||||
task-master next
|
|
||||||
|
|
||||||
# Work on task...
|
|
||||||
|
|
||||||
# Update task with notes
|
|
||||||
task-master update-subtask --id=2.3 --prompt="Implemented authentication middleware"
|
|
||||||
|
|
||||||
# Mark complete
|
|
||||||
task-master set-status --id=2.3 --status=done
|
|
||||||
|
|
||||||
# Repeat
|
|
||||||
```
|
|
||||||
|
|
||||||
### Pattern: Feature Planning
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Write feature spec
|
|
||||||
vim new-feature.txt
|
|
||||||
|
|
||||||
# Generate tasks
|
|
||||||
task-master parse-prd new-feature.txt --num-tasks 10
|
|
||||||
|
|
||||||
# Analyze and expand
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
task-master expand --all --research --force
|
|
||||||
|
|
||||||
# Review and adjust
|
|
||||||
task-master list
|
|
||||||
```
|
|
||||||
|
|
||||||
### Pattern: Sprint Planning
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Parse sprint requirements
|
|
||||||
task-master parse-prd sprint-requirements.txt
|
|
||||||
|
|
||||||
# Analyze complexity
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
|
|
||||||
# View report
|
|
||||||
task-master complexity-report
|
|
||||||
|
|
||||||
# Adjust task estimates based on complexity scores
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
For more examples and advanced usage, see the [full documentation](https://docs.task-master.dev).
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Available Models as of October 5, 2025
|
# Available Models as of September 23, 2025
|
||||||
|
|
||||||
## Main Models
|
## Main Models
|
||||||
|
|
||||||
@@ -10,8 +10,6 @@
|
|||||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||||
| claude-code | opus | 0.725 | 0 | 0 |
|
| claude-code | opus | 0.725 | 0 | 0 |
|
||||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
|
||||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
|
||||||
| mcp | mcp-sampling | — | 0 | 0 |
|
| mcp | mcp-sampling | — | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||||
@@ -102,8 +100,6 @@
|
|||||||
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
|
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
|
||||||
| claude-code | opus | 0.725 | 0 | 0 |
|
| claude-code | opus | 0.725 | 0 | 0 |
|
||||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
|
||||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
|
||||||
| mcp | mcp-sampling | — | 0 | 0 |
|
| mcp | mcp-sampling | — | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||||
@@ -144,8 +140,6 @@
|
|||||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||||
| claude-code | opus | 0.725 | 0 | 0 |
|
| claude-code | opus | 0.725 | 0 | 0 |
|
||||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
|
||||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
|
||||||
| mcp | mcp-sampling | — | 0 | 0 |
|
| mcp | mcp-sampling | — | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||||
|
|||||||
@@ -1,510 +0,0 @@
|
|||||||
# Codex CLI Provider
|
|
||||||
|
|
||||||
The `codex-cli` provider integrates Task Master with OpenAI's Codex CLI via the community AI SDK provider [`ai-sdk-provider-codex-cli`](https://github.com/ben-vargas/ai-sdk-provider-codex-cli). It uses your ChatGPT subscription (OAuth) via `codex login`, with optional `OPENAI_CODEX_API_KEY` support.
|
|
||||||
|
|
||||||
## Why Use Codex CLI?
|
|
||||||
|
|
||||||
The primary benefits of using the `codex-cli` provider include:
|
|
||||||
|
|
||||||
- **Use Latest OpenAI Models**: Access to cutting-edge models like GPT-5 and GPT-5-Codex via ChatGPT subscription
|
|
||||||
- **OAuth Authentication**: No API key management needed - authenticate once with `codex login`
|
|
||||||
- **Built-in Tool Execution**: Native support for command execution, file changes, MCP tools, and web search
|
|
||||||
- **Native JSON Schema Support**: Structured output generation without post-processing
|
|
||||||
- **Approval/Sandbox Modes**: Fine-grained control over command execution and filesystem access for safety
|
|
||||||
|
|
||||||
## Quickstart
|
|
||||||
|
|
||||||
Get up and running with Codex CLI in 3 steps:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Install Codex CLI globally
|
|
||||||
npm install -g @openai/codex
|
|
||||||
|
|
||||||
# 2. Authenticate with your ChatGPT account
|
|
||||||
codex login
|
|
||||||
|
|
||||||
# 3. Configure Task Master to use Codex CLI
|
|
||||||
task-master models --set-main gpt-5-codex --codex-cli
|
|
||||||
```
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
- **Node.js**: >= 18.0.0
|
|
||||||
- **Codex CLI**: >= 0.42.0 (>= 0.44.0 recommended)
|
|
||||||
- **ChatGPT Subscription**: Required for OAuth access (Plus, Pro, Business, Edu, or Enterprise)
|
|
||||||
- **Task Master**: >= 0.27.3 (version with Codex CLI support)
|
|
||||||
|
|
||||||
### Checking Your Versions
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check Node.js version
|
|
||||||
node --version
|
|
||||||
|
|
||||||
# Check Codex CLI version
|
|
||||||
codex --version
|
|
||||||
|
|
||||||
# Check Task Master version
|
|
||||||
task-master --version
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
### Install Codex CLI
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install globally via npm
|
|
||||||
npm install -g @openai/codex
|
|
||||||
|
|
||||||
# Verify installation
|
|
||||||
codex --version
|
|
||||||
```
|
|
||||||
|
|
||||||
Expected output: `v0.44.0` or higher
|
|
||||||
|
|
||||||
### Install Task Master (if not already installed)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install globally
|
|
||||||
npm install -g task-master-ai
|
|
||||||
|
|
||||||
# Or install in your project
|
|
||||||
npm install --save-dev task-master-ai
|
|
||||||
```
|
|
||||||
|
|
||||||
## Authentication
|
|
||||||
|
|
||||||
### OAuth Authentication (Primary Method - Recommended)
|
|
||||||
|
|
||||||
The Codex CLI provider is designed to use OAuth authentication with your ChatGPT subscription:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Launch Codex CLI and authenticate
|
|
||||||
codex login
|
|
||||||
```
|
|
||||||
|
|
||||||
This will:
|
|
||||||
1. Open a browser window for OAuth authentication
|
|
||||||
2. Prompt you to log in with your ChatGPT account
|
|
||||||
3. Store authentication credentials locally
|
|
||||||
4. Allow Task Master to automatically use these credentials
|
|
||||||
|
|
||||||
To verify your authentication:
|
|
||||||
```bash
|
|
||||||
# Open interactive Codex CLI
|
|
||||||
codex
|
|
||||||
|
|
||||||
# Use /about command to see auth status
|
|
||||||
/about
|
|
||||||
```
|
|
||||||
|
|
||||||
### Optional: API Key Method
|
|
||||||
|
|
||||||
While OAuth is the primary and recommended method, you can optionally use an OpenAI API key:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# In your .env file
|
|
||||||
OPENAI_CODEX_API_KEY=sk-your-openai-api-key-here
|
|
||||||
```
|
|
||||||
|
|
||||||
**Important Notes**:
|
|
||||||
- The API key will **only** be injected when explicitly provided
|
|
||||||
- OAuth authentication is always preferred when available
|
|
||||||
- Using an API key doesn't provide access to subscription-only models like GPT-5-Codex
|
|
||||||
- For full OpenAI API access with non-subscription models, consider using the standard `openai` provider instead
|
|
||||||
- `OPENAI_CODEX_API_KEY` is specific to the codex-cli provider to avoid conflicts with the `openai` provider's `OPENAI_API_KEY`
|
|
||||||
|
|
||||||
## Available Models
|
|
||||||
|
|
||||||
The Codex CLI provider supports only models available through ChatGPT subscription:
|
|
||||||
|
|
||||||
| Model ID | Description | Max Input Tokens | Max Output Tokens |
|
|
||||||
|----------|-------------|------------------|-------------------|
|
|
||||||
| `gpt-5` | Latest GPT-5 model | 272K | 128K |
|
|
||||||
| `gpt-5-codex` | GPT-5 optimized for agentic software engineering | 272K | 128K |
|
|
||||||
|
|
||||||
**Note**: These models are only available via OAuth subscription through Codex CLI (ChatGPT Plus, Pro, Business, Edu, or Enterprise plans). For other OpenAI models, use the standard `openai` provider with an API key.
|
|
||||||
|
|
||||||
**Research Capabilities**: Both GPT-5 models support web search tools, making them suitable for the `research` role in addition to `main` and `fallback` roles.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Basic Configuration
|
|
||||||
|
|
||||||
Add Codex CLI to your `.taskmaster/config.json`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"models": {
|
|
||||||
"main": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5-codex",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
},
|
|
||||||
"fallback": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Advanced Configuration with Codex CLI Settings
|
|
||||||
|
|
||||||
The `codexCli` section allows you to customize Codex CLI behavior:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"models": {
|
|
||||||
"main": {
|
|
||||||
"provider": "codex-cli",
|
|
||||||
"modelId": "gpt-5-codex",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true,
|
|
||||||
"skipGitRepoCheck": true,
|
|
||||||
"approvalMode": "on-failure",
|
|
||||||
"sandboxMode": "workspace-write",
|
|
||||||
"verbose": false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Codex CLI Settings Reference
|
|
||||||
|
|
||||||
#### Core Settings
|
|
||||||
|
|
||||||
- **`allowNpx`** (boolean, default: `false`)
|
|
||||||
- Allow fallback to `npx @openai/codex` if the CLI is not found on PATH
|
|
||||||
- Useful for CI environments or systems without global npm installations
|
|
||||||
- Example: `"allowNpx": true`
|
|
||||||
|
|
||||||
- **`skipGitRepoCheck`** (boolean, default: `false`)
|
|
||||||
- Skip git repository safety check before execution
|
|
||||||
- Recommended for CI environments or non-repository usage
|
|
||||||
- Example: `"skipGitRepoCheck": true`
|
|
||||||
|
|
||||||
#### Execution Control
|
|
||||||
|
|
||||||
- **`approvalMode`** (string)
|
|
||||||
- Controls when to require user approval for command execution
|
|
||||||
- Options:
|
|
||||||
- `"untrusted"`: Require approval for all commands
|
|
||||||
- `"on-failure"`: Only require approval after a command fails (default)
|
|
||||||
- `"on-request"`: Approve only when explicitly requested
|
|
||||||
- `"never"`: Never require approval (use with caution)
|
|
||||||
- Example: `"approvalMode": "on-failure"`
|
|
||||||
|
|
||||||
- **`sandboxMode`** (string)
|
|
||||||
- Controls filesystem access permissions
|
|
||||||
- Options:
|
|
||||||
- `"read-only"`: Read-only access to filesystem
|
|
||||||
- `"workspace-write"`: Allow writes to workspace directory (default)
|
|
||||||
- `"danger-full-access"`: Full filesystem access (use with extreme caution)
|
|
||||||
- Example: `"sandboxMode": "workspace-write"`
|
|
||||||
|
|
||||||
#### Path and Environment
|
|
||||||
|
|
||||||
- **`codexPath`** (string, optional)
|
|
||||||
- Custom path to Codex CLI executable
|
|
||||||
- Useful when Codex is installed in a non-standard location
|
|
||||||
- Example: `"codexPath": "/usr/local/bin/codex"`
|
|
||||||
|
|
||||||
- **`cwd`** (string, optional)
|
|
||||||
- Working directory for Codex CLI execution
|
|
||||||
- Defaults to current working directory
|
|
||||||
- Example: `"cwd": "/path/to/project"`
|
|
||||||
|
|
||||||
- **`env`** (object, optional)
|
|
||||||
- Additional environment variables for Codex CLI
|
|
||||||
- Example: `"env": { "DEBUG": "true" }`
|
|
||||||
|
|
||||||
#### Advanced Settings
|
|
||||||
|
|
||||||
- **`fullAuto`** (boolean, optional)
|
|
||||||
- Fully automatic mode (equivalent to `--full-auto` flag)
|
|
||||||
- Bypasses most approvals for fully automated workflows
|
|
||||||
- Example: `"fullAuto": true`
|
|
||||||
|
|
||||||
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional)
|
|
||||||
- Bypass all safety checks including approvals and sandbox
|
|
||||||
- **WARNING**: Use with extreme caution - can execute arbitrary code
|
|
||||||
- Example: `"dangerouslyBypassApprovalsAndSandbox": false`
|
|
||||||
|
|
||||||
- **`color`** (string, optional)
|
|
||||||
- Force color handling in Codex CLI output
|
|
||||||
- Options: `"always"`, `"never"`, `"auto"`
|
|
||||||
- Example: `"color": "auto"`
|
|
||||||
|
|
||||||
- **`outputLastMessageFile`** (string, optional)
|
|
||||||
- Write last agent message to specified file
|
|
||||||
- Useful for debugging or logging
|
|
||||||
- Example: `"outputLastMessageFile": "./last-message.txt"`
|
|
||||||
|
|
||||||
- **`verbose`** (boolean, optional)
|
|
||||||
- Enable verbose provider logging
|
|
||||||
- Helpful for debugging issues
|
|
||||||
- Example: `"verbose": true`
|
|
||||||
|
|
||||||
### Command-Specific Settings
|
|
||||||
|
|
||||||
Override settings for specific Task Master commands:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true,
|
|
||||||
"approvalMode": "on-failure",
|
|
||||||
"commandSpecific": {
|
|
||||||
"parse-prd": {
|
|
||||||
"approvalMode": "never",
|
|
||||||
"verbose": true
|
|
||||||
},
|
|
||||||
"expand": {
|
|
||||||
"sandboxMode": "read-only"
|
|
||||||
},
|
|
||||||
"add-task": {
|
|
||||||
"approvalMode": "untrusted"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### Setting Codex CLI Models
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Set Codex CLI for main role
|
|
||||||
task-master models --set-main gpt-5-codex --codex-cli
|
|
||||||
|
|
||||||
# Set Codex CLI for fallback role
|
|
||||||
task-master models --set-fallback gpt-5 --codex-cli
|
|
||||||
|
|
||||||
# Set Codex CLI for research role
|
|
||||||
task-master models --set-research gpt-5 --codex-cli
|
|
||||||
|
|
||||||
# Verify configuration
|
|
||||||
task-master models
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using Codex CLI with Task Master Commands
|
|
||||||
|
|
||||||
Once configured, use Task Master commands as normal:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Parse a PRD with Codex CLI
|
|
||||||
task-master parse-prd my-requirements.txt
|
|
||||||
|
|
||||||
# Analyze project complexity
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
|
|
||||||
# Expand a task into subtasks
|
|
||||||
task-master expand --id=1.2
|
|
||||||
|
|
||||||
# Add a new task with AI assistance
|
|
||||||
task-master add-task --prompt="Implement user authentication" --research
|
|
||||||
```
|
|
||||||
|
|
||||||
The provider will automatically use your OAuth credentials when Codex CLI is configured.
|
|
||||||
|
|
||||||
## Codebase Features
|
|
||||||
|
|
||||||
The Codex CLI provider is **codebase-capable**, meaning it can analyze and interact with your project files. This enables advanced features like:
|
|
||||||
|
|
||||||
- **Code Analysis**: Understanding your project structure and dependencies
|
|
||||||
- **Intelligent Suggestions**: Context-aware task recommendations
|
|
||||||
- **File Operations**: Reading and analyzing project files for better task generation
|
|
||||||
- **Pattern Recognition**: Identifying common patterns and best practices in your codebase
|
|
||||||
|
|
||||||
### Enabling Codebase Analysis
|
|
||||||
|
|
||||||
Codebase analysis is automatically enabled when:
|
|
||||||
1. Your provider is set to `codex-cli`
|
|
||||||
2. `enableCodebaseAnalysis` is `true` in your global configuration (default)
|
|
||||||
|
|
||||||
To verify or configure:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"global": {
|
|
||||||
"enableCodebaseAnalysis": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### "codex: command not found" Error
|
|
||||||
|
|
||||||
**Symptoms**: Task Master reports that the Codex CLI is not found.
|
|
||||||
|
|
||||||
**Solutions**:
|
|
||||||
1. **Install Codex CLI globally**:
|
|
||||||
```bash
|
|
||||||
npm install -g @openai/codex
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Verify installation**:
|
|
||||||
```bash
|
|
||||||
codex --version
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Alternative: Enable npx fallback**:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"allowNpx": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### "Not logged in" Errors
|
|
||||||
|
|
||||||
**Symptoms**: Authentication errors when trying to use Codex CLI.
|
|
||||||
|
|
||||||
**Solutions**:
|
|
||||||
1. **Authenticate with OAuth**:
|
|
||||||
```bash
|
|
||||||
codex login
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Verify authentication status**:
|
|
||||||
```bash
|
|
||||||
codex
|
|
||||||
# Then use /about command
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Re-authenticate if needed**:
|
|
||||||
```bash
|
|
||||||
# Logout first
|
|
||||||
codex
|
|
||||||
# Use /auth command to change auth method
|
|
||||||
|
|
||||||
# Then login again
|
|
||||||
codex login
|
|
||||||
```
|
|
||||||
|
|
||||||
### "Old version" Warnings
|
|
||||||
|
|
||||||
**Symptoms**: Warnings about Codex CLI version being outdated.
|
|
||||||
|
|
||||||
**Solutions**:
|
|
||||||
1. **Check current version**:
|
|
||||||
```bash
|
|
||||||
codex --version
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Upgrade to latest version**:
|
|
||||||
```bash
|
|
||||||
npm install -g @openai/codex@latest
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Verify upgrade**:
|
|
||||||
```bash
|
|
||||||
codex --version
|
|
||||||
```
|
|
||||||
Should show >= 0.44.0
|
|
||||||
|
|
||||||
### "Model not available" Errors
|
|
||||||
|
|
||||||
**Symptoms**: Error indicating the requested model is not available.
|
|
||||||
|
|
||||||
**Causes and Solutions**:
|
|
||||||
|
|
||||||
1. **Using unsupported model**:
|
|
||||||
- Only `gpt-5` and `gpt-5-codex` are available via Codex CLI
|
|
||||||
- For other OpenAI models, use the standard `openai` provider
|
|
||||||
|
|
||||||
2. **Subscription not active**:
|
|
||||||
- Verify your ChatGPT subscription is active
|
|
||||||
- Check subscription status at <https://platform.openai.com>
|
|
||||||
|
|
||||||
3. **Wrong provider selected**:
|
|
||||||
- Verify you're using `--codex-cli` flag when setting models
|
|
||||||
- Check `.taskmaster/config.json` shows `"provider": "codex-cli"`
|
|
||||||
|
|
||||||
### API Key Not Being Used
|
|
||||||
|
|
||||||
**Symptoms**: You've set `OPENAI_CODEX_API_KEY` but it's not being used.
|
|
||||||
|
|
||||||
**Expected Behavior**:
|
|
||||||
- OAuth authentication is always preferred
|
|
||||||
- API key is only injected when explicitly provided
|
|
||||||
- API key doesn't grant access to subscription-only models
|
|
||||||
|
|
||||||
**Solutions**:
|
|
||||||
1. **Verify OAuth is working**:
|
|
||||||
```bash
|
|
||||||
codex
|
|
||||||
# Check /about for auth status
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **If you want to force API key usage**:
|
|
||||||
- This is not recommended with Codex CLI
|
|
||||||
- Consider using the standard `openai` provider instead
|
|
||||||
|
|
||||||
3. **Verify .env file is being loaded**:
|
|
||||||
```bash
|
|
||||||
# Check if .env exists in project root
|
|
||||||
ls -la .env
|
|
||||||
|
|
||||||
# Verify OPENAI_CODEX_API_KEY is set
|
|
||||||
grep OPENAI_CODEX_API_KEY .env
|
|
||||||
```
|
|
||||||
|
|
||||||
### Approval/Sandbox Issues
|
|
||||||
|
|
||||||
**Symptoms**: Commands are blocked or filesystem access is denied.
|
|
||||||
|
|
||||||
**Solutions**:
|
|
||||||
|
|
||||||
1. **Adjust approval mode**:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"approvalMode": "on-request"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Adjust sandbox mode**:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"sandboxMode": "workspace-write"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **For fully automated workflows** (use cautiously):
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"codexCli": {
|
|
||||||
"fullAuto": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Important Notes
|
|
||||||
|
|
||||||
- **OAuth subscription required**: No API key needed for basic operation, but requires active ChatGPT subscription
|
|
||||||
- **Limited model selection**: Only `gpt-5` and `gpt-5-codex` available via OAuth
|
|
||||||
- **Pricing information**: Not available for OAuth models (shows as "Unknown" in cost calculations)
|
|
||||||
- **No automatic dependency**: The `@openai/codex` package is not added to Task Master's dependencies - install it globally or enable `allowNpx`
|
|
||||||
- **Codebase analysis**: Automatically enabled when using `codex-cli` provider
|
|
||||||
- **Safety first**: Default settings prioritize safety with `approvalMode: "on-failure"` and `sandboxMode: "workspace-write"`
|
|
||||||
|
|
||||||
## See Also
|
|
||||||
|
|
||||||
- [Configuration Guide](../configuration.md#codex-cli-provider) - Complete Codex CLI configuration reference
|
|
||||||
- [Command Reference](../command-reference.md) - Using `--codex-cli` flag with commands
|
|
||||||
- [Gemini CLI Provider](./gemini-cli.md) - Similar CLI-based provider for Google Gemini
|
|
||||||
- [Claude Code Integration](../claude-code-integration.md) - Another CLI-based provider
|
|
||||||
- [ai-sdk-provider-codex-cli](https://github.com/ben-vargas/ai-sdk-provider-codex-cli) - Source code for the provider package
|
|
||||||
@@ -69,29 +69,11 @@ export function resolveTasksPath(args, log = silentLogger) {
|
|||||||
|
|
||||||
// Use core findTasksPath with explicit path and normalized projectRoot context
|
// Use core findTasksPath with explicit path and normalized projectRoot context
|
||||||
if (projectRoot) {
|
if (projectRoot) {
|
||||||
const foundPath = coreFindTasksPath(explicitPath, { projectRoot }, log);
|
return coreFindTasksPath(explicitPath, { projectRoot }, log);
|
||||||
// If core function returns null and no explicit path was provided,
|
|
||||||
// construct the expected default path as documented
|
|
||||||
if (foundPath === null && !explicitPath) {
|
|
||||||
const defaultPath = path.join(
|
|
||||||
projectRoot,
|
|
||||||
'.taskmaster',
|
|
||||||
'tasks',
|
|
||||||
'tasks.json'
|
|
||||||
);
|
|
||||||
log?.info?.(
|
|
||||||
`Core findTasksPath returned null, using default path: ${defaultPath}`
|
|
||||||
);
|
|
||||||
return defaultPath;
|
|
||||||
}
|
|
||||||
return foundPath;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to core function without projectRoot context
|
// Fallback to core function without projectRoot context
|
||||||
const foundPath = coreFindTasksPath(explicitPath, null, log);
|
return coreFindTasksPath(explicitPath, null, log);
|
||||||
// Note: When no projectRoot is available, we can't construct a default path
|
|
||||||
// so we return null and let the calling code handle the error
|
|
||||||
return foundPath;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
34
output.txt
34
output.txt
File diff suppressed because one or more lines are too long
6711
package-lock.json
generated
6711
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.28.0-rc.2",
|
"version": "0.27.2",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
@@ -71,7 +71,6 @@
|
|||||||
"@supabase/supabase-js": "^2.57.4",
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
"ai": "^5.0.51",
|
"ai": "^5.0.51",
|
||||||
"ai-sdk-provider-claude-code": "^1.1.4",
|
"ai-sdk-provider-claude-code": "^1.1.4",
|
||||||
"ai-sdk-provider-codex-cli": "^0.3.0",
|
|
||||||
"ai-sdk-provider-gemini-cli": "^1.1.1",
|
"ai-sdk-provider-gemini-cli": "^1.1.1",
|
||||||
"ajv": "^8.17.1",
|
"ajv": "^8.17.1",
|
||||||
"ajv-formats": "^3.0.1",
|
"ajv-formats": "^3.0.1",
|
||||||
|
|||||||
@@ -33,9 +33,6 @@ export class TaskEntity implements Task {
|
|||||||
tags?: string[];
|
tags?: string[];
|
||||||
assignee?: string;
|
assignee?: string;
|
||||||
complexity?: Task['complexity'];
|
complexity?: Task['complexity'];
|
||||||
recommendedSubtasks?: number;
|
|
||||||
expansionPrompt?: string;
|
|
||||||
complexityReasoning?: string;
|
|
||||||
|
|
||||||
constructor(data: Task | (Omit<Task, 'id'> & { id: number | string })) {
|
constructor(data: Task | (Omit<Task, 'id'> & { id: number | string })) {
|
||||||
this.validate(data);
|
this.validate(data);
|
||||||
@@ -53,7 +50,7 @@ export class TaskEntity implements Task {
|
|||||||
// Normalize subtask IDs to strings
|
// Normalize subtask IDs to strings
|
||||||
this.subtasks = (data.subtasks || []).map((subtask) => ({
|
this.subtasks = (data.subtasks || []).map((subtask) => ({
|
||||||
...subtask,
|
...subtask,
|
||||||
id: String(subtask.id),
|
id: Number(subtask.id), // Keep subtask IDs as numbers per interface
|
||||||
parentId: String(subtask.parentId)
|
parentId: String(subtask.parentId)
|
||||||
}));
|
}));
|
||||||
|
|
||||||
@@ -65,9 +62,6 @@ export class TaskEntity implements Task {
|
|||||||
this.tags = data.tags;
|
this.tags = data.tags;
|
||||||
this.assignee = data.assignee;
|
this.assignee = data.assignee;
|
||||||
this.complexity = data.complexity;
|
this.complexity = data.complexity;
|
||||||
this.recommendedSubtasks = data.recommendedSubtasks;
|
|
||||||
this.expansionPrompt = data.expansionPrompt;
|
|
||||||
this.complexityReasoning = data.complexityReasoning;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -252,10 +246,7 @@ export class TaskEntity implements Task {
|
|||||||
actualEffort: this.actualEffort,
|
actualEffort: this.actualEffort,
|
||||||
tags: this.tags,
|
tags: this.tags,
|
||||||
assignee: this.assignee,
|
assignee: this.assignee,
|
||||||
complexity: this.complexity,
|
complexity: this.complexity
|
||||||
recommendedSubtasks: this.recommendedSubtasks,
|
|
||||||
expansionPrompt: this.expansionPrompt,
|
|
||||||
complexityReasoning: this.complexityReasoning
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -51,8 +51,7 @@ export const ERROR_CODES = {
|
|||||||
INTERNAL_ERROR: 'INTERNAL_ERROR',
|
INTERNAL_ERROR: 'INTERNAL_ERROR',
|
||||||
INVALID_INPUT: 'INVALID_INPUT',
|
INVALID_INPUT: 'INVALID_INPUT',
|
||||||
NOT_IMPLEMENTED: 'NOT_IMPLEMENTED',
|
NOT_IMPLEMENTED: 'NOT_IMPLEMENTED',
|
||||||
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
|
UNKNOWN_ERROR: 'UNKNOWN_ERROR'
|
||||||
NOT_FOUND: 'NOT_FOUND'
|
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
export type ErrorCode = (typeof ERROR_CODES)[keyof typeof ERROR_CODES];
|
export type ErrorCode = (typeof ERROR_CODES)[keyof typeof ERROR_CODES];
|
||||||
|
|||||||
@@ -11,9 +11,7 @@ export {
|
|||||||
type ListTasksResult,
|
type ListTasksResult,
|
||||||
type StartTaskOptions,
|
type StartTaskOptions,
|
||||||
type StartTaskResult,
|
type StartTaskResult,
|
||||||
type ConflictCheckResult,
|
type ConflictCheckResult
|
||||||
type ExportTasksOptions,
|
|
||||||
type ExportResult
|
|
||||||
} from './task-master-core.js';
|
} from './task-master-core.js';
|
||||||
|
|
||||||
// Re-export types
|
// Re-export types
|
||||||
@@ -63,12 +61,3 @@ export { getLogger, createLogger, setGlobalLogger } from './logger/index.js';
|
|||||||
|
|
||||||
// Re-export executors
|
// Re-export executors
|
||||||
export * from './executors/index.js';
|
export * from './executors/index.js';
|
||||||
|
|
||||||
// Re-export reports
|
|
||||||
export {
|
|
||||||
ComplexityReportManager,
|
|
||||||
type ComplexityReport,
|
|
||||||
type ComplexityReportMetadata,
|
|
||||||
type ComplexityAnalysis,
|
|
||||||
type TaskComplexityData
|
|
||||||
} from './reports/index.js';
|
|
||||||
|
|||||||
@@ -5,16 +5,6 @@
|
|||||||
|
|
||||||
import type { Task, TaskMetadata, TaskStatus } from '../types/index.js';
|
import type { Task, TaskMetadata, TaskStatus } from '../types/index.js';
|
||||||
|
|
||||||
/**
|
|
||||||
* Options for loading tasks from storage
|
|
||||||
*/
|
|
||||||
export interface LoadTasksOptions {
|
|
||||||
/** Filter tasks by status */
|
|
||||||
status?: TaskStatus;
|
|
||||||
/** Exclude subtasks from loaded tasks (default: false) */
|
|
||||||
excludeSubtasks?: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Result type for updateTaskStatus operations
|
* Result type for updateTaskStatus operations
|
||||||
*/
|
*/
|
||||||
@@ -31,12 +21,11 @@ export interface UpdateStatusResult {
|
|||||||
*/
|
*/
|
||||||
export interface IStorage {
|
export interface IStorage {
|
||||||
/**
|
/**
|
||||||
* Load all tasks from storage, optionally filtered by tag and other criteria
|
* Load all tasks from storage, optionally filtered by tag
|
||||||
* @param tag - Optional tag to filter tasks by
|
* @param tag - Optional tag to filter tasks by
|
||||||
* @param options - Optional filtering options (status, excludeSubtasks)
|
|
||||||
* @returns Promise that resolves to an array of tasks
|
* @returns Promise that resolves to an array of tasks
|
||||||
*/
|
*/
|
||||||
loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]>;
|
loadTasks(tag?: string): Promise<Task[]>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Load a single task by ID
|
* Load a single task by ID
|
||||||
@@ -216,7 +205,7 @@ export abstract class BaseStorage implements IStorage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Abstract methods that must be implemented by concrete classes
|
// Abstract methods that must be implemented by concrete classes
|
||||||
abstract loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]>;
|
abstract loadTasks(tag?: string): Promise<Task[]>;
|
||||||
abstract loadTask(taskId: string, tag?: string): Promise<Task | null>;
|
abstract loadTask(taskId: string, tag?: string): Promise<Task | null>;
|
||||||
abstract saveTasks(tasks: Task[], tag?: string): Promise<void>;
|
abstract saveTasks(tasks: Task[], tag?: string): Promise<void>;
|
||||||
abstract appendTasks(tasks: Task[], tag?: string): Promise<void>;
|
abstract appendTasks(tasks: Task[], tag?: string): Promise<void>;
|
||||||
|
|||||||
@@ -1,148 +0,0 @@
|
|||||||
import { describe, it, expect, vi } from 'vitest';
|
|
||||||
import { TaskMapper } from './TaskMapper.js';
|
|
||||||
import type { Tables } from '../types/database.types.js';
|
|
||||||
|
|
||||||
type TaskRow = Tables<'tasks'>;
|
|
||||||
|
|
||||||
describe('TaskMapper', () => {
|
|
||||||
describe('extractMetadataField', () => {
|
|
||||||
it('should extract string field from metadata', () => {
|
|
||||||
const taskRow: TaskRow = {
|
|
||||||
id: '123',
|
|
||||||
display_id: '1',
|
|
||||||
title: 'Test Task',
|
|
||||||
description: 'Test description',
|
|
||||||
status: 'todo',
|
|
||||||
priority: 'medium',
|
|
||||||
parent_task_id: null,
|
|
||||||
subtask_position: 0,
|
|
||||||
created_at: new Date().toISOString(),
|
|
||||||
updated_at: new Date().toISOString(),
|
|
||||||
metadata: {
|
|
||||||
details: 'Some details',
|
|
||||||
testStrategy: 'Test with unit tests'
|
|
||||||
},
|
|
||||||
complexity: null,
|
|
||||||
assignee_id: null,
|
|
||||||
estimated_hours: null,
|
|
||||||
actual_hours: null,
|
|
||||||
due_date: null,
|
|
||||||
completed_at: null
|
|
||||||
};
|
|
||||||
|
|
||||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
|
||||||
|
|
||||||
expect(task.details).toBe('Some details');
|
|
||||||
expect(task.testStrategy).toBe('Test with unit tests');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use default value when metadata field is missing', () => {
|
|
||||||
const taskRow: TaskRow = {
|
|
||||||
id: '123',
|
|
||||||
display_id: '1',
|
|
||||||
title: 'Test Task',
|
|
||||||
description: 'Test description',
|
|
||||||
status: 'todo',
|
|
||||||
priority: 'medium',
|
|
||||||
parent_task_id: null,
|
|
||||||
subtask_position: 0,
|
|
||||||
created_at: new Date().toISOString(),
|
|
||||||
updated_at: new Date().toISOString(),
|
|
||||||
metadata: {},
|
|
||||||
complexity: null,
|
|
||||||
assignee_id: null,
|
|
||||||
estimated_hours: null,
|
|
||||||
actual_hours: null,
|
|
||||||
due_date: null,
|
|
||||||
completed_at: null
|
|
||||||
};
|
|
||||||
|
|
||||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
|
||||||
|
|
||||||
expect(task.details).toBe('');
|
|
||||||
expect(task.testStrategy).toBe('');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use default value when metadata is null', () => {
|
|
||||||
const taskRow: TaskRow = {
|
|
||||||
id: '123',
|
|
||||||
display_id: '1',
|
|
||||||
title: 'Test Task',
|
|
||||||
description: 'Test description',
|
|
||||||
status: 'todo',
|
|
||||||
priority: 'medium',
|
|
||||||
parent_task_id: null,
|
|
||||||
subtask_position: 0,
|
|
||||||
created_at: new Date().toISOString(),
|
|
||||||
updated_at: new Date().toISOString(),
|
|
||||||
metadata: null,
|
|
||||||
complexity: null,
|
|
||||||
assignee_id: null,
|
|
||||||
estimated_hours: null,
|
|
||||||
actual_hours: null,
|
|
||||||
due_date: null,
|
|
||||||
completed_at: null
|
|
||||||
};
|
|
||||||
|
|
||||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
|
||||||
|
|
||||||
expect(task.details).toBe('');
|
|
||||||
expect(task.testStrategy).toBe('');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use default value and warn when metadata field has wrong type', () => {
|
|
||||||
const consoleWarnSpy = vi
|
|
||||||
.spyOn(console, 'warn')
|
|
||||||
.mockImplementation(() => {});
|
|
||||||
|
|
||||||
const taskRow: TaskRow = {
|
|
||||||
id: '123',
|
|
||||||
display_id: '1',
|
|
||||||
title: 'Test Task',
|
|
||||||
description: 'Test description',
|
|
||||||
status: 'todo',
|
|
||||||
priority: 'medium',
|
|
||||||
parent_task_id: null,
|
|
||||||
subtask_position: 0,
|
|
||||||
created_at: new Date().toISOString(),
|
|
||||||
updated_at: new Date().toISOString(),
|
|
||||||
metadata: {
|
|
||||||
details: 12345, // Wrong type: number instead of string
|
|
||||||
testStrategy: ['test1', 'test2'] // Wrong type: array instead of string
|
|
||||||
},
|
|
||||||
complexity: null,
|
|
||||||
assignee_id: null,
|
|
||||||
estimated_hours: null,
|
|
||||||
actual_hours: null,
|
|
||||||
due_date: null,
|
|
||||||
completed_at: null
|
|
||||||
};
|
|
||||||
|
|
||||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
|
||||||
|
|
||||||
// Should use empty string defaults when type doesn't match
|
|
||||||
expect(task.details).toBe('');
|
|
||||||
expect(task.testStrategy).toBe('');
|
|
||||||
|
|
||||||
// Should have logged warnings
|
|
||||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining('Type mismatch in metadata field "details"')
|
|
||||||
);
|
|
||||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining(
|
|
||||||
'Type mismatch in metadata field "testStrategy"'
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
consoleWarnSpy.mockRestore();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('mapStatus', () => {
|
|
||||||
it('should map database status to internal status', () => {
|
|
||||||
expect(TaskMapper.mapStatus('todo')).toBe('pending');
|
|
||||||
expect(TaskMapper.mapStatus('in_progress')).toBe('in-progress');
|
|
||||||
expect(TaskMapper.mapStatus('done')).toBe('done');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -2,32 +2,22 @@ import { Task, Subtask } from '../types/index.js';
|
|||||||
import { Database, Tables } from '../types/database.types.js';
|
import { Database, Tables } from '../types/database.types.js';
|
||||||
|
|
||||||
type TaskRow = Tables<'tasks'>;
|
type TaskRow = Tables<'tasks'>;
|
||||||
|
type DependencyRow = Tables<'task_dependencies'>;
|
||||||
// Legacy type for backward compatibility
|
|
||||||
type DependencyRow = Tables<'task_dependencies'> & {
|
|
||||||
depends_on_task?: { display_id: string } | null;
|
|
||||||
depends_on_task_id?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
export class TaskMapper {
|
export class TaskMapper {
|
||||||
/**
|
/**
|
||||||
* Maps database tasks to internal Task format
|
* Maps database tasks to internal Task format
|
||||||
* @param dbTasks - Array of tasks from database
|
|
||||||
* @param dependencies - Either a Map of task_id to display_ids or legacy array format
|
|
||||||
*/
|
*/
|
||||||
static mapDatabaseTasksToTasks(
|
static mapDatabaseTasksToTasks(
|
||||||
dbTasks: TaskRow[],
|
dbTasks: TaskRow[],
|
||||||
dependencies: Map<string, string[]> | DependencyRow[]
|
dbDependencies: DependencyRow[]
|
||||||
): Task[] {
|
): Task[] {
|
||||||
if (!dbTasks || dbTasks.length === 0) {
|
if (!dbTasks || dbTasks.length === 0) {
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle both Map and array formats for backward compatibility
|
// Group dependencies by task_id
|
||||||
const dependenciesByTaskId =
|
const dependenciesByTaskId = this.groupDependenciesByTaskId(dbDependencies);
|
||||||
dependencies instanceof Map
|
|
||||||
? dependencies
|
|
||||||
: this.groupDependenciesByTaskId(dependencies);
|
|
||||||
|
|
||||||
// Separate parent tasks and subtasks
|
// Separate parent tasks and subtasks
|
||||||
const parentTasks = dbTasks.filter((t) => !t.parent_task_id);
|
const parentTasks = dbTasks.filter((t) => !t.parent_task_id);
|
||||||
@@ -53,23 +43,21 @@ export class TaskMapper {
|
|||||||
): Task {
|
): Task {
|
||||||
// Map subtasks
|
// Map subtasks
|
||||||
const subtasks: Subtask[] = dbSubtasks.map((subtask, index) => ({
|
const subtasks: Subtask[] = dbSubtasks.map((subtask, index) => ({
|
||||||
id: subtask.display_id || String(index + 1), // Use display_id if available (API storage), fallback to numeric (file storage)
|
id: index + 1, // Use numeric ID for subtasks
|
||||||
parentId: dbTask.id,
|
parentId: dbTask.id,
|
||||||
title: subtask.title,
|
title: subtask.title,
|
||||||
description: subtask.description || '',
|
description: subtask.description || '',
|
||||||
status: this.mapStatus(subtask.status),
|
status: this.mapStatus(subtask.status),
|
||||||
priority: this.mapPriority(subtask.priority),
|
priority: this.mapPriority(subtask.priority),
|
||||||
dependencies: dependenciesByTaskId.get(subtask.id) || [],
|
dependencies: dependenciesByTaskId.get(subtask.id) || [],
|
||||||
details: this.extractMetadataField(subtask.metadata, 'details', ''),
|
details: (subtask.metadata as any)?.details || '',
|
||||||
testStrategy: this.extractMetadataField(
|
testStrategy: (subtask.metadata as any)?.testStrategy || '',
|
||||||
subtask.metadata,
|
|
||||||
'testStrategy',
|
|
||||||
''
|
|
||||||
),
|
|
||||||
createdAt: subtask.created_at,
|
createdAt: subtask.created_at,
|
||||||
updatedAt: subtask.updated_at,
|
updatedAt: subtask.updated_at,
|
||||||
assignee: subtask.assignee_id || undefined,
|
assignee: subtask.assignee_id || undefined,
|
||||||
complexity: subtask.complexity ?? undefined
|
complexity: subtask.complexity
|
||||||
|
? this.mapComplexityToInternal(subtask.complexity)
|
||||||
|
: undefined
|
||||||
}));
|
}));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@@ -79,25 +67,22 @@ export class TaskMapper {
|
|||||||
status: this.mapStatus(dbTask.status),
|
status: this.mapStatus(dbTask.status),
|
||||||
priority: this.mapPriority(dbTask.priority),
|
priority: this.mapPriority(dbTask.priority),
|
||||||
dependencies: dependenciesByTaskId.get(dbTask.id) || [],
|
dependencies: dependenciesByTaskId.get(dbTask.id) || [],
|
||||||
details: this.extractMetadataField(dbTask.metadata, 'details', ''),
|
details: (dbTask.metadata as any)?.details || '',
|
||||||
testStrategy: this.extractMetadataField(
|
testStrategy: (dbTask.metadata as any)?.testStrategy || '',
|
||||||
dbTask.metadata,
|
|
||||||
'testStrategy',
|
|
||||||
''
|
|
||||||
),
|
|
||||||
subtasks,
|
subtasks,
|
||||||
createdAt: dbTask.created_at,
|
createdAt: dbTask.created_at,
|
||||||
updatedAt: dbTask.updated_at,
|
updatedAt: dbTask.updated_at,
|
||||||
assignee: dbTask.assignee_id || undefined,
|
assignee: dbTask.assignee_id || undefined,
|
||||||
complexity: dbTask.complexity ?? undefined,
|
complexity: dbTask.complexity
|
||||||
|
? this.mapComplexityToInternal(dbTask.complexity)
|
||||||
|
: undefined,
|
||||||
effort: dbTask.estimated_hours || undefined,
|
effort: dbTask.estimated_hours || undefined,
|
||||||
actualEffort: dbTask.actual_hours || undefined
|
actualEffort: dbTask.actual_hours || undefined
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Groups dependencies by task ID (legacy method for backward compatibility)
|
* Groups dependencies by task ID
|
||||||
* @deprecated Use DependencyFetcher.fetchDependenciesWithDisplayIds instead
|
|
||||||
*/
|
*/
|
||||||
private static groupDependenciesByTaskId(
|
private static groupDependenciesByTaskId(
|
||||||
dependencies: DependencyRow[]
|
dependencies: DependencyRow[]
|
||||||
@@ -107,14 +92,7 @@ export class TaskMapper {
|
|||||||
if (dependencies) {
|
if (dependencies) {
|
||||||
for (const dep of dependencies) {
|
for (const dep of dependencies) {
|
||||||
const deps = dependenciesByTaskId.get(dep.task_id) || [];
|
const deps = dependenciesByTaskId.get(dep.task_id) || [];
|
||||||
// Handle both old format (UUID string) and new format (object with display_id)
|
deps.push(dep.depends_on_task_id);
|
||||||
const dependencyId =
|
|
||||||
typeof dep.depends_on_task === 'object'
|
|
||||||
? dep.depends_on_task?.display_id
|
|
||||||
: dep.depends_on_task_id;
|
|
||||||
if (dependencyId) {
|
|
||||||
deps.push(dependencyId);
|
|
||||||
}
|
|
||||||
dependenciesByTaskId.set(dep.task_id, deps);
|
dependenciesByTaskId.set(dep.task_id, deps);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -179,38 +157,14 @@ export class TaskMapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Safely extracts a field from metadata JSON with runtime type validation
|
* Maps numeric complexity to descriptive complexity
|
||||||
* @param metadata The metadata object (could be null or any type)
|
|
||||||
* @param field The field to extract
|
|
||||||
* @param defaultValue Default value if field doesn't exist
|
|
||||||
* @returns The extracted value if it matches the expected type, otherwise defaultValue
|
|
||||||
*/
|
*/
|
||||||
private static extractMetadataField<T>(
|
private static mapComplexityToInternal(
|
||||||
metadata: unknown,
|
complexity: number
|
||||||
field: string,
|
): Task['complexity'] {
|
||||||
defaultValue: T
|
if (complexity <= 2) return 'simple';
|
||||||
): T {
|
if (complexity <= 5) return 'moderate';
|
||||||
if (!metadata || typeof metadata !== 'object') {
|
if (complexity <= 8) return 'complex';
|
||||||
return defaultValue;
|
return 'very-complex';
|
||||||
}
|
|
||||||
|
|
||||||
const value = (metadata as Record<string, unknown>)[field];
|
|
||||||
|
|
||||||
if (value === undefined) {
|
|
||||||
return defaultValue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Runtime type validation: ensure value matches the type of defaultValue
|
|
||||||
const expectedType = typeof defaultValue;
|
|
||||||
const actualType = typeof value;
|
|
||||||
|
|
||||||
if (expectedType !== actualType) {
|
|
||||||
console.warn(
|
|
||||||
`Type mismatch in metadata field "${field}": expected ${expectedType}, got ${actualType}. Using default value.`
|
|
||||||
);
|
|
||||||
return defaultValue;
|
|
||||||
}
|
|
||||||
|
|
||||||
return value as T;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,185 +0,0 @@
|
|||||||
/**
|
|
||||||
* @fileoverview ComplexityReportManager - Handles loading and managing complexity analysis reports
|
|
||||||
* Follows the same pattern as ConfigManager and AuthManager
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { promises as fs } from 'fs';
|
|
||||||
import path from 'path';
|
|
||||||
import type {
|
|
||||||
ComplexityReport,
|
|
||||||
ComplexityAnalysis,
|
|
||||||
TaskComplexityData
|
|
||||||
} from './types.js';
|
|
||||||
import { getLogger } from '../logger/index.js';
|
|
||||||
|
|
||||||
const logger = getLogger('ComplexityReportManager');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Manages complexity analysis reports
|
|
||||||
* Handles loading, caching, and providing complexity data for tasks
|
|
||||||
*/
|
|
||||||
export class ComplexityReportManager {
|
|
||||||
private projectRoot: string;
|
|
||||||
private reportCache: Map<string, ComplexityReport> = new Map();
|
|
||||||
|
|
||||||
constructor(projectRoot: string) {
|
|
||||||
this.projectRoot = projectRoot;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the path to the complexity report file for a given tag
|
|
||||||
*/
|
|
||||||
private getReportPath(tag?: string): string {
|
|
||||||
const reportsDir = path.join(this.projectRoot, '.taskmaster', 'reports');
|
|
||||||
const tagSuffix = tag && tag !== 'master' ? `_${tag}` : '';
|
|
||||||
return path.join(reportsDir, `task-complexity-report${tagSuffix}.json`);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Load complexity report for a given tag
|
|
||||||
* Results are cached to avoid repeated file reads
|
|
||||||
*/
|
|
||||||
async loadReport(tag?: string): Promise<ComplexityReport | null> {
|
|
||||||
const resolvedTag = tag || 'master';
|
|
||||||
const cacheKey = resolvedTag;
|
|
||||||
|
|
||||||
// Check cache first
|
|
||||||
if (this.reportCache.has(cacheKey)) {
|
|
||||||
return this.reportCache.get(cacheKey)!;
|
|
||||||
}
|
|
||||||
|
|
||||||
const reportPath = this.getReportPath(tag);
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Check if file exists
|
|
||||||
await fs.access(reportPath);
|
|
||||||
|
|
||||||
// Read and parse the report
|
|
||||||
const content = await fs.readFile(reportPath, 'utf-8');
|
|
||||||
const report = JSON.parse(content) as ComplexityReport;
|
|
||||||
|
|
||||||
// Validate basic structure
|
|
||||||
if (!report.meta || !Array.isArray(report.complexityAnalysis)) {
|
|
||||||
logger.warn(
|
|
||||||
`Invalid complexity report structure at ${reportPath}, ignoring`
|
|
||||||
);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache the report
|
|
||||||
this.reportCache.set(cacheKey, report);
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
`Loaded complexity report for tag '${resolvedTag}' with ${report.complexityAnalysis.length} analyses`
|
|
||||||
);
|
|
||||||
|
|
||||||
return report;
|
|
||||||
} catch (error: any) {
|
|
||||||
if (error.code === 'ENOENT') {
|
|
||||||
// File doesn't exist - this is normal, not all projects have complexity reports
|
|
||||||
logger.debug(`No complexity report found for tag '${resolvedTag}'`);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Other errors (parsing, permissions, etc.)
|
|
||||||
logger.warn(
|
|
||||||
`Failed to load complexity report for tag '${resolvedTag}': ${error.message}`
|
|
||||||
);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get complexity data for a specific task ID
|
|
||||||
*/
|
|
||||||
async getComplexityForTask(
|
|
||||||
taskId: string | number,
|
|
||||||
tag?: string
|
|
||||||
): Promise<TaskComplexityData | null> {
|
|
||||||
const report = await this.loadReport(tag);
|
|
||||||
if (!report) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the analysis for this task
|
|
||||||
const analysis = report.complexityAnalysis.find(
|
|
||||||
(a) => String(a.taskId) === String(taskId)
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!analysis) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert to TaskComplexityData format
|
|
||||||
return {
|
|
||||||
complexityScore: analysis.complexityScore,
|
|
||||||
recommendedSubtasks: analysis.recommendedSubtasks,
|
|
||||||
expansionPrompt: analysis.expansionPrompt,
|
|
||||||
complexityReasoning: analysis.complexityReasoning
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get complexity data for multiple tasks at once
|
|
||||||
* More efficient than calling getComplexityForTask multiple times
|
|
||||||
*/
|
|
||||||
async getComplexityForTasks(
|
|
||||||
taskIds: (string | number)[],
|
|
||||||
tag?: string
|
|
||||||
): Promise<Map<string, TaskComplexityData>> {
|
|
||||||
const result = new Map<string, TaskComplexityData>();
|
|
||||||
const report = await this.loadReport(tag);
|
|
||||||
|
|
||||||
if (!report) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a map for fast lookups
|
|
||||||
const analysisMap = new Map<string, ComplexityAnalysis>();
|
|
||||||
report.complexityAnalysis.forEach((analysis) => {
|
|
||||||
analysisMap.set(String(analysis.taskId), analysis);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Map each task ID to its complexity data
|
|
||||||
taskIds.forEach((taskId) => {
|
|
||||||
const analysis = analysisMap.get(String(taskId));
|
|
||||||
if (analysis) {
|
|
||||||
result.set(String(taskId), {
|
|
||||||
complexityScore: analysis.complexityScore,
|
|
||||||
recommendedSubtasks: analysis.recommendedSubtasks,
|
|
||||||
expansionPrompt: analysis.expansionPrompt,
|
|
||||||
complexityReasoning: analysis.complexityReasoning
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Clear the report cache
|
|
||||||
* @param tag - Specific tag to clear, or undefined to clear all cached reports
|
|
||||||
* Useful when reports are regenerated or modified externally
|
|
||||||
*/
|
|
||||||
clearCache(tag?: string): void {
|
|
||||||
if (tag) {
|
|
||||||
this.reportCache.delete(tag);
|
|
||||||
} else {
|
|
||||||
// Clear all cached reports
|
|
||||||
this.reportCache.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a complexity report exists for a tag
|
|
||||||
*/
|
|
||||||
async hasReport(tag?: string): Promise<boolean> {
|
|
||||||
const reportPath = this.getReportPath(tag);
|
|
||||||
try {
|
|
||||||
await fs.access(reportPath);
|
|
||||||
return true;
|
|
||||||
} catch {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
/**
|
|
||||||
* @fileoverview Reports module exports
|
|
||||||
*/
|
|
||||||
|
|
||||||
export { ComplexityReportManager } from './complexity-report-manager.js';
|
|
||||||
export type {
|
|
||||||
ComplexityReport,
|
|
||||||
ComplexityReportMetadata,
|
|
||||||
ComplexityAnalysis,
|
|
||||||
TaskComplexityData
|
|
||||||
} from './types.js';
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
/**
|
|
||||||
* @fileoverview Type definitions for complexity analysis reports
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Analysis result for a single task
|
|
||||||
*/
|
|
||||||
export interface ComplexityAnalysis {
|
|
||||||
/** Task ID being analyzed */
|
|
||||||
taskId: string | number;
|
|
||||||
/** Task title */
|
|
||||||
taskTitle: string;
|
|
||||||
/** Complexity score (1-10 scale) */
|
|
||||||
complexityScore: number;
|
|
||||||
/** Recommended number of subtasks */
|
|
||||||
recommendedSubtasks: number;
|
|
||||||
/** AI-generated prompt for task expansion */
|
|
||||||
expansionPrompt: string;
|
|
||||||
/** Reasoning behind the complexity assessment */
|
|
||||||
complexityReasoning: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Metadata about the complexity report
|
|
||||||
*/
|
|
||||||
export interface ComplexityReportMetadata {
|
|
||||||
/** When the report was generated */
|
|
||||||
generatedAt: string;
|
|
||||||
/** Number of tasks analyzed in this run */
|
|
||||||
tasksAnalyzed: number;
|
|
||||||
/** Total number of tasks in the file */
|
|
||||||
totalTasks?: number;
|
|
||||||
/** Total analyses in the report (across all runs) */
|
|
||||||
analysisCount?: number;
|
|
||||||
/** Complexity threshold score used */
|
|
||||||
thresholdScore: number;
|
|
||||||
/** Project name */
|
|
||||||
projectName?: string;
|
|
||||||
/** Whether research mode was used */
|
|
||||||
usedResearch: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Complete complexity analysis report
|
|
||||||
*/
|
|
||||||
export interface ComplexityReport {
|
|
||||||
/** Report metadata */
|
|
||||||
meta: ComplexityReportMetadata;
|
|
||||||
/** Array of complexity analyses */
|
|
||||||
complexityAnalysis: ComplexityAnalysis[];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Complexity data to be attached to a Task
|
|
||||||
*/
|
|
||||||
export interface TaskComplexityData {
|
|
||||||
/** Complexity score (1-10 scale) */
|
|
||||||
complexityScore?: number;
|
|
||||||
/** Recommended number of subtasks */
|
|
||||||
recommendedSubtasks?: number;
|
|
||||||
/** AI-generated expansion prompt */
|
|
||||||
expansionPrompt?: string;
|
|
||||||
/** Reasoning behind the assessment */
|
|
||||||
complexityReasoning?: string;
|
|
||||||
}
|
|
||||||
224
packages/tm-core/src/repositories/supabase-task-repository.ts
Normal file
224
packages/tm-core/src/repositories/supabase-task-repository.ts
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
import { SupabaseClient } from '@supabase/supabase-js';
|
||||||
|
import { Task } from '../types/index.js';
|
||||||
|
import { Database } from '../types/database.types.js';
|
||||||
|
import { TaskMapper } from '../mappers/TaskMapper.js';
|
||||||
|
import { AuthManager } from '../auth/auth-manager.js';
|
||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
|
// Zod schema for task status validation
|
||||||
|
const TaskStatusSchema = z.enum([
|
||||||
|
'pending',
|
||||||
|
'in-progress',
|
||||||
|
'done',
|
||||||
|
'review',
|
||||||
|
'deferred',
|
||||||
|
'cancelled',
|
||||||
|
'blocked'
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Zod schema for task updates
|
||||||
|
const TaskUpdateSchema = z
|
||||||
|
.object({
|
||||||
|
title: z.string().min(1).optional(),
|
||||||
|
description: z.string().optional(),
|
||||||
|
status: TaskStatusSchema.optional(),
|
||||||
|
priority: z.enum(['low', 'medium', 'high', 'critical']).optional(),
|
||||||
|
details: z.string().optional(),
|
||||||
|
testStrategy: z.string().optional()
|
||||||
|
})
|
||||||
|
.partial();
|
||||||
|
|
||||||
|
export class SupabaseTaskRepository {
|
||||||
|
constructor(private supabase: SupabaseClient<Database>) {}
|
||||||
|
|
||||||
|
async getTasks(_projectId?: string): Promise<Task[]> {
|
||||||
|
// Get the current context to determine briefId
|
||||||
|
const authManager = AuthManager.getInstance();
|
||||||
|
const context = authManager.getContext();
|
||||||
|
|
||||||
|
if (!context || !context.briefId) {
|
||||||
|
throw new Error(
|
||||||
|
'No brief selected. Please select a brief first using: tm context brief'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all tasks for the brief using the exact query structure
|
||||||
|
const { data: tasks, error } = await this.supabase
|
||||||
|
.from('tasks')
|
||||||
|
.select(`
|
||||||
|
*,
|
||||||
|
document:document_id (
|
||||||
|
id,
|
||||||
|
document_name,
|
||||||
|
title,
|
||||||
|
description
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
.eq('brief_id', context.briefId)
|
||||||
|
.order('position', { ascending: true })
|
||||||
|
.order('subtask_position', { ascending: true })
|
||||||
|
.order('created_at', { ascending: true });
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
throw new Error(`Failed to fetch tasks: ${error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!tasks || tasks.length === 0) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all dependencies for these tasks
|
||||||
|
const taskIds = tasks.map((t: any) => t.id);
|
||||||
|
const { data: depsData, error: depsError } = await this.supabase
|
||||||
|
.from('task_dependencies')
|
||||||
|
.select('*')
|
||||||
|
.in('task_id', taskIds);
|
||||||
|
|
||||||
|
if (depsError) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to fetch task dependencies: ${depsError.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use mapper to convert to internal format
|
||||||
|
return TaskMapper.mapDatabaseTasksToTasks(tasks, depsData || []);
|
||||||
|
}
|
||||||
|
|
||||||
|
async getTask(_projectId: string, taskId: string): Promise<Task | null> {
|
||||||
|
// Get the current context to determine briefId (projectId not used in Supabase context)
|
||||||
|
const authManager = AuthManager.getInstance();
|
||||||
|
const context = authManager.getContext();
|
||||||
|
|
||||||
|
if (!context || !context.briefId) {
|
||||||
|
throw new Error(
|
||||||
|
'No brief selected. Please select a brief first using: tm context brief'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const { data, error } = await this.supabase
|
||||||
|
.from('tasks')
|
||||||
|
.select('*')
|
||||||
|
.eq('brief_id', context.briefId)
|
||||||
|
.eq('display_id', taskId.toUpperCase())
|
||||||
|
.single();
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
if (error.code === 'PGRST116') {
|
||||||
|
return null; // Not found
|
||||||
|
}
|
||||||
|
throw new Error(`Failed to fetch task: ${error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get dependencies for this task
|
||||||
|
const { data: depsData } = await this.supabase
|
||||||
|
.from('task_dependencies')
|
||||||
|
.select('*')
|
||||||
|
.eq('task_id', taskId);
|
||||||
|
|
||||||
|
// Get subtasks if this is a parent task
|
||||||
|
const { data: subtasksData } = await this.supabase
|
||||||
|
.from('tasks')
|
||||||
|
.select('*')
|
||||||
|
.eq('parent_task_id', taskId)
|
||||||
|
.order('subtask_position', { ascending: true });
|
||||||
|
|
||||||
|
// Create dependency map
|
||||||
|
const dependenciesByTaskId = new Map<string, string[]>();
|
||||||
|
if (depsData) {
|
||||||
|
dependenciesByTaskId.set(
|
||||||
|
taskId,
|
||||||
|
depsData.map(
|
||||||
|
(d: Database['public']['Tables']['task_dependencies']['Row']) =>
|
||||||
|
d.depends_on_task_id
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use mapper to convert single task
|
||||||
|
return TaskMapper.mapDatabaseTaskToTask(
|
||||||
|
data,
|
||||||
|
subtasksData || [],
|
||||||
|
dependenciesByTaskId
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async updateTask(
|
||||||
|
projectId: string,
|
||||||
|
taskId: string,
|
||||||
|
updates: Partial<Task>
|
||||||
|
): Promise<Task> {
|
||||||
|
// Get the current context to determine briefId
|
||||||
|
const authManager = AuthManager.getInstance();
|
||||||
|
const context = authManager.getContext();
|
||||||
|
|
||||||
|
if (!context || !context.briefId) {
|
||||||
|
throw new Error(
|
||||||
|
'No brief selected. Please select a brief first using: tm context brief'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate updates using Zod schema
|
||||||
|
try {
|
||||||
|
TaskUpdateSchema.parse(updates);
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof z.ZodError) {
|
||||||
|
const errorMessages = error.errors
|
||||||
|
.map((err) => `${err.path.join('.')}: ${err.message}`)
|
||||||
|
.join(', ');
|
||||||
|
throw new Error(`Invalid task update data: ${errorMessages}`);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert Task fields to database fields - only include fields that actually exist in the database
|
||||||
|
const dbUpdates: any = {};
|
||||||
|
|
||||||
|
if (updates.title !== undefined) dbUpdates.title = updates.title;
|
||||||
|
if (updates.description !== undefined)
|
||||||
|
dbUpdates.description = updates.description;
|
||||||
|
if (updates.status !== undefined)
|
||||||
|
dbUpdates.status = this.mapStatusToDatabase(updates.status);
|
||||||
|
if (updates.priority !== undefined) dbUpdates.priority = updates.priority;
|
||||||
|
// Skip fields that don't exist in database schema: details, testStrategy, etc.
|
||||||
|
|
||||||
|
// Update the task
|
||||||
|
const { error } = await this.supabase
|
||||||
|
.from('tasks')
|
||||||
|
.update(dbUpdates)
|
||||||
|
.eq('brief_id', context.briefId)
|
||||||
|
.eq('display_id', taskId.toUpperCase());
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
throw new Error(`Failed to update task: ${error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the updated task by fetching it
|
||||||
|
const updatedTask = await this.getTask(projectId, taskId);
|
||||||
|
if (!updatedTask) {
|
||||||
|
throw new Error(`Failed to retrieve updated task ${taskId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return updatedTask;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maps internal status to database status
|
||||||
|
*/
|
||||||
|
private mapStatusToDatabase(
|
||||||
|
status: string
|
||||||
|
): Database['public']['Enums']['task_status'] {
|
||||||
|
switch (status) {
|
||||||
|
case 'pending':
|
||||||
|
return 'todo';
|
||||||
|
case 'in-progress':
|
||||||
|
case 'in_progress': // Accept both formats
|
||||||
|
return 'in_progress';
|
||||||
|
case 'done':
|
||||||
|
return 'done';
|
||||||
|
default:
|
||||||
|
throw new Error(
|
||||||
|
`Invalid task status: ${status}. Valid statuses are: pending, in-progress, done`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
import { SupabaseClient } from '@supabase/supabase-js';
|
|
||||||
import { Database } from '../../types/database.types.js';
|
|
||||||
import { DependencyWithDisplayId } from '../../types/repository-types.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handles fetching and processing of task dependencies with display_ids
|
|
||||||
*/
|
|
||||||
export class DependencyFetcher {
|
|
||||||
constructor(private supabase: SupabaseClient<Database>) {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Fetches dependencies for given task IDs with display_ids joined
|
|
||||||
* @param taskIds Array of task IDs to fetch dependencies for
|
|
||||||
* @returns Map of task ID to array of dependency display_ids
|
|
||||||
*/
|
|
||||||
async fetchDependenciesWithDisplayIds(
|
|
||||||
taskIds: string[]
|
|
||||||
): Promise<Map<string, string[]>> {
|
|
||||||
if (!taskIds || taskIds.length === 0) {
|
|
||||||
return new Map();
|
|
||||||
}
|
|
||||||
|
|
||||||
const { data, error } = await this.supabase
|
|
||||||
.from('task_dependencies')
|
|
||||||
.select(`
|
|
||||||
task_id,
|
|
||||||
depends_on_task:tasks!task_dependencies_depends_on_task_id_fkey (
|
|
||||||
display_id
|
|
||||||
)
|
|
||||||
`)
|
|
||||||
.in('task_id', taskIds);
|
|
||||||
|
|
||||||
if (error) {
|
|
||||||
throw new Error(`Failed to fetch task dependencies: ${error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return this.processDependencyData(data as DependencyWithDisplayId[]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Processes raw dependency data into a map structure
|
|
||||||
*/
|
|
||||||
private processDependencyData(
|
|
||||||
dependencies: DependencyWithDisplayId[]
|
|
||||||
): Map<string, string[]> {
|
|
||||||
const dependenciesByTaskId = new Map<string, string[]>();
|
|
||||||
|
|
||||||
if (!dependencies) {
|
|
||||||
return dependenciesByTaskId;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const dep of dependencies) {
|
|
||||||
if (!dep.task_id) continue;
|
|
||||||
|
|
||||||
const currentDeps = dependenciesByTaskId.get(dep.task_id) || [];
|
|
||||||
|
|
||||||
// Extract display_id from the joined object
|
|
||||||
const displayId = dep.depends_on_task?.display_id;
|
|
||||||
if (displayId) {
|
|
||||||
currentDeps.push(displayId);
|
|
||||||
}
|
|
||||||
|
|
||||||
dependenciesByTaskId.set(dep.task_id, currentDeps);
|
|
||||||
}
|
|
||||||
|
|
||||||
return dependenciesByTaskId;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
/**
|
|
||||||
* Supabase repository implementations
|
|
||||||
*/
|
|
||||||
export { SupabaseTaskRepository } from './supabase-task-repository.js';
|
|
||||||
export { DependencyFetcher } from './dependency-fetcher.js';
|
|
||||||
@@ -1,275 +0,0 @@
|
|||||||
import { SupabaseClient } from '@supabase/supabase-js';
|
|
||||||
import { Task } from '../../types/index.js';
|
|
||||||
import { Database, Json } from '../../types/database.types.js';
|
|
||||||
import { TaskMapper } from '../../mappers/TaskMapper.js';
|
|
||||||
import { AuthManager } from '../../auth/auth-manager.js';
|
|
||||||
import { DependencyFetcher } from './dependency-fetcher.js';
|
|
||||||
import {
|
|
||||||
TaskWithRelations,
|
|
||||||
TaskDatabaseUpdate
|
|
||||||
} from '../../types/repository-types.js';
|
|
||||||
import { LoadTasksOptions } from '../../interfaces/storage.interface.js';
|
|
||||||
import { z } from 'zod';
|
|
||||||
|
|
||||||
// Zod schema for task status validation
|
|
||||||
const TaskStatusSchema = z.enum([
|
|
||||||
'pending',
|
|
||||||
'in-progress',
|
|
||||||
'done',
|
|
||||||
'review',
|
|
||||||
'deferred',
|
|
||||||
'cancelled',
|
|
||||||
'blocked'
|
|
||||||
]);
|
|
||||||
|
|
||||||
// Zod schema for task updates
|
|
||||||
const TaskUpdateSchema = z
|
|
||||||
.object({
|
|
||||||
title: z.string().min(1).optional(),
|
|
||||||
description: z.string().optional(),
|
|
||||||
status: TaskStatusSchema.optional(),
|
|
||||||
priority: z.enum(['low', 'medium', 'high', 'critical']).optional(),
|
|
||||||
details: z.string().optional(),
|
|
||||||
testStrategy: z.string().optional()
|
|
||||||
})
|
|
||||||
.partial();
|
|
||||||
|
|
||||||
export class SupabaseTaskRepository {
|
|
||||||
private dependencyFetcher: DependencyFetcher;
|
|
||||||
private authManager: AuthManager;
|
|
||||||
|
|
||||||
constructor(private supabase: SupabaseClient<Database>) {
|
|
||||||
this.dependencyFetcher = new DependencyFetcher(supabase);
|
|
||||||
this.authManager = AuthManager.getInstance();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the current brief ID from auth context
|
|
||||||
* @throws {Error} If no brief is selected
|
|
||||||
*/
|
|
||||||
private getBriefIdOrThrow(): string {
|
|
||||||
const context = this.authManager.getContext();
|
|
||||||
if (!context?.briefId) {
|
|
||||||
throw new Error(
|
|
||||||
'No brief selected. Please select a brief first using: tm context brief'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return context.briefId;
|
|
||||||
}
|
|
||||||
|
|
||||||
async getTasks(
|
|
||||||
_projectId?: string,
|
|
||||||
options?: LoadTasksOptions
|
|
||||||
): Promise<Task[]> {
|
|
||||||
const briefId = this.getBriefIdOrThrow();
|
|
||||||
|
|
||||||
// Build query with filters
|
|
||||||
let query = this.supabase
|
|
||||||
.from('tasks')
|
|
||||||
.select(`
|
|
||||||
*,
|
|
||||||
document:document_id (
|
|
||||||
id,
|
|
||||||
document_name,
|
|
||||||
title,
|
|
||||||
description
|
|
||||||
)
|
|
||||||
`)
|
|
||||||
.eq('brief_id', briefId);
|
|
||||||
|
|
||||||
// Apply status filter at database level if specified
|
|
||||||
if (options?.status) {
|
|
||||||
const dbStatus = this.mapStatusToDatabase(options.status);
|
|
||||||
query = query.eq('status', dbStatus);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply subtask exclusion at database level if specified
|
|
||||||
if (options?.excludeSubtasks) {
|
|
||||||
// Only fetch parent tasks (where parent_task_id is null)
|
|
||||||
query = query.is('parent_task_id', null);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute query with ordering
|
|
||||||
const { data: tasks, error } = await query
|
|
||||||
.order('position', { ascending: true })
|
|
||||||
.order('subtask_position', { ascending: true })
|
|
||||||
.order('created_at', { ascending: true });
|
|
||||||
|
|
||||||
if (error) {
|
|
||||||
throw new Error(`Failed to fetch tasks: ${error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!tasks || tasks.length === 0) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type-safe task ID extraction
|
|
||||||
const typedTasks = tasks as TaskWithRelations[];
|
|
||||||
const taskIds = typedTasks.map((t) => t.id);
|
|
||||||
const dependenciesMap =
|
|
||||||
await this.dependencyFetcher.fetchDependenciesWithDisplayIds(taskIds);
|
|
||||||
|
|
||||||
// Use mapper to convert to internal format
|
|
||||||
return TaskMapper.mapDatabaseTasksToTasks(tasks, dependenciesMap);
|
|
||||||
}
|
|
||||||
|
|
||||||
async getTask(_projectId: string, taskId: string): Promise<Task | null> {
|
|
||||||
const briefId = this.getBriefIdOrThrow();
|
|
||||||
|
|
||||||
const { data, error } = await this.supabase
|
|
||||||
.from('tasks')
|
|
||||||
.select('*')
|
|
||||||
.eq('brief_id', briefId)
|
|
||||||
.eq('display_id', taskId.toUpperCase())
|
|
||||||
.single();
|
|
||||||
|
|
||||||
if (error) {
|
|
||||||
if (error.code === 'PGRST116') {
|
|
||||||
return null; // Not found
|
|
||||||
}
|
|
||||||
throw new Error(`Failed to fetch task: ${error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get subtasks if this is a parent task
|
|
||||||
const { data: subtasksData } = await this.supabase
|
|
||||||
.from('tasks')
|
|
||||||
.select('*')
|
|
||||||
.eq('parent_task_id', data.id)
|
|
||||||
.order('subtask_position', { ascending: true });
|
|
||||||
|
|
||||||
// Get all task IDs (parent + subtasks) to fetch dependencies
|
|
||||||
const allTaskIds = [data.id, ...(subtasksData?.map((st) => st.id) || [])];
|
|
||||||
|
|
||||||
// Fetch dependencies using the dedicated fetcher
|
|
||||||
const dependenciesByTaskId =
|
|
||||||
await this.dependencyFetcher.fetchDependenciesWithDisplayIds(allTaskIds);
|
|
||||||
|
|
||||||
// Use mapper to convert single task
|
|
||||||
return TaskMapper.mapDatabaseTaskToTask(
|
|
||||||
data,
|
|
||||||
subtasksData || [],
|
|
||||||
dependenciesByTaskId
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
async updateTask(
|
|
||||||
projectId: string,
|
|
||||||
taskId: string,
|
|
||||||
updates: Partial<Task>
|
|
||||||
): Promise<Task> {
|
|
||||||
const briefId = this.getBriefIdOrThrow();
|
|
||||||
|
|
||||||
// Validate updates using Zod schema
|
|
||||||
try {
|
|
||||||
TaskUpdateSchema.parse(updates);
|
|
||||||
} catch (error) {
|
|
||||||
if (error instanceof z.ZodError) {
|
|
||||||
const errorMessages = error.issues
|
|
||||||
.map((err) => `${err.path.join('.')}: ${err.message}`)
|
|
||||||
.join(', ');
|
|
||||||
throw new Error(`Invalid task update data: ${errorMessages}`);
|
|
||||||
}
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert Task fields to database fields with proper typing
|
|
||||||
const dbUpdates: TaskDatabaseUpdate = {};
|
|
||||||
|
|
||||||
if (updates.title !== undefined) dbUpdates.title = updates.title;
|
|
||||||
if (updates.description !== undefined)
|
|
||||||
dbUpdates.description = updates.description;
|
|
||||||
if (updates.status !== undefined)
|
|
||||||
dbUpdates.status = this.mapStatusToDatabase(updates.status);
|
|
||||||
if (updates.priority !== undefined)
|
|
||||||
dbUpdates.priority = this.mapPriorityToDatabase(updates.priority);
|
|
||||||
|
|
||||||
// Handle metadata fields (details, testStrategy, etc.)
|
|
||||||
// Load existing metadata to preserve fields not being updated
|
|
||||||
const { data: existingMetadataRow, error: existingMetadataError } =
|
|
||||||
await this.supabase
|
|
||||||
.from('tasks')
|
|
||||||
.select('metadata')
|
|
||||||
.eq('brief_id', briefId)
|
|
||||||
.eq('display_id', taskId.toUpperCase())
|
|
||||||
.single();
|
|
||||||
|
|
||||||
if (existingMetadataError) {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to load existing task metadata: ${existingMetadataError.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const metadata: Record<string, unknown> = {
|
|
||||||
...((existingMetadataRow?.metadata as Record<string, unknown>) ?? {})
|
|
||||||
};
|
|
||||||
|
|
||||||
if (updates.details !== undefined) metadata.details = updates.details;
|
|
||||||
if (updates.testStrategy !== undefined)
|
|
||||||
metadata.testStrategy = updates.testStrategy;
|
|
||||||
|
|
||||||
if (Object.keys(metadata).length > 0) {
|
|
||||||
dbUpdates.metadata = metadata as Json;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the task
|
|
||||||
const { error } = await this.supabase
|
|
||||||
.from('tasks')
|
|
||||||
.update(dbUpdates)
|
|
||||||
.eq('brief_id', briefId)
|
|
||||||
.eq('display_id', taskId.toUpperCase());
|
|
||||||
|
|
||||||
if (error) {
|
|
||||||
throw new Error(`Failed to update task: ${error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the updated task by fetching it
|
|
||||||
const updatedTask = await this.getTask(projectId, taskId);
|
|
||||||
if (!updatedTask) {
|
|
||||||
throw new Error(`Failed to retrieve updated task ${taskId}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return updatedTask;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Maps internal status to database status
|
|
||||||
*/
|
|
||||||
private mapStatusToDatabase(
|
|
||||||
status: string
|
|
||||||
): Database['public']['Enums']['task_status'] {
|
|
||||||
switch (status) {
|
|
||||||
case 'pending':
|
|
||||||
return 'todo';
|
|
||||||
case 'in-progress':
|
|
||||||
case 'in_progress': // Accept both formats
|
|
||||||
return 'in_progress';
|
|
||||||
case 'done':
|
|
||||||
return 'done';
|
|
||||||
default:
|
|
||||||
throw new Error(
|
|
||||||
`Invalid task status: ${status}. Valid statuses are: pending, in-progress, done`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Maps internal priority to database priority
|
|
||||||
* Task Master uses 'critical', database uses 'urgent'
|
|
||||||
*/
|
|
||||||
private mapPriorityToDatabase(
|
|
||||||
priority: string
|
|
||||||
): Database['public']['Enums']['task_priority'] {
|
|
||||||
switch (priority) {
|
|
||||||
case 'critical':
|
|
||||||
return 'urgent';
|
|
||||||
case 'low':
|
|
||||||
case 'medium':
|
|
||||||
case 'high':
|
|
||||||
return priority as Database['public']['Enums']['task_priority'];
|
|
||||||
default:
|
|
||||||
throw new Error(
|
|
||||||
`Invalid task priority: ${priority}. Valid priorities are: low, medium, high, critical`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,8 @@
|
|||||||
import { Task, TaskTag } from '../types/index.js';
|
import { Task, TaskTag } from '../types/index.js';
|
||||||
import { LoadTasksOptions } from '../interfaces/storage.interface.js';
|
|
||||||
|
|
||||||
export interface TaskRepository {
|
export interface TaskRepository {
|
||||||
// Task operations
|
// Task operations
|
||||||
getTasks(projectId: string, options?: LoadTasksOptions): Promise<Task[]>;
|
getTasks(projectId: string): Promise<Task[]>;
|
||||||
getTask(projectId: string, taskId: string): Promise<Task | null>;
|
getTask(projectId: string, taskId: string): Promise<Task | null>;
|
||||||
createTask(projectId: string, task: Omit<Task, 'id'>): Promise<Task>;
|
createTask(projectId: string, task: Omit<Task, 'id'>): Promise<Task>;
|
||||||
updateTask(
|
updateTask(
|
||||||
|
|||||||
@@ -1,496 +0,0 @@
|
|||||||
/**
|
|
||||||
* @fileoverview Export Service
|
|
||||||
* Core service for exporting tasks to external systems (e.g., Hamster briefs)
|
|
||||||
*/
|
|
||||||
|
|
||||||
import type { Task, TaskStatus } from '../types/index.js';
|
|
||||||
import type { UserContext } from '../auth/types.js';
|
|
||||||
import { ConfigManager } from '../config/config-manager.js';
|
|
||||||
import { AuthManager } from '../auth/auth-manager.js';
|
|
||||||
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
|
|
||||||
import { FileStorage } from '../storage/file-storage/index.js';
|
|
||||||
|
|
||||||
// Type definitions for the bulk API response
|
|
||||||
interface TaskImportResult {
|
|
||||||
externalId?: string;
|
|
||||||
index: number;
|
|
||||||
success: boolean;
|
|
||||||
taskId?: string;
|
|
||||||
error?: string;
|
|
||||||
validationErrors?: string[];
|
|
||||||
}
|
|
||||||
|
|
||||||
interface BulkTasksResponse {
|
|
||||||
dryRun: boolean;
|
|
||||||
totalTasks: number;
|
|
||||||
successCount: number;
|
|
||||||
failedCount: number;
|
|
||||||
skippedCount: number;
|
|
||||||
results: TaskImportResult[];
|
|
||||||
summary: {
|
|
||||||
message: string;
|
|
||||||
duration: number;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Options for exporting tasks
|
|
||||||
*/
|
|
||||||
export interface ExportTasksOptions {
|
|
||||||
/** Optional tag to export tasks from (uses active tag if not provided) */
|
|
||||||
tag?: string;
|
|
||||||
/** Brief ID to export to */
|
|
||||||
briefId?: string;
|
|
||||||
/** Organization ID (required if briefId is provided) */
|
|
||||||
orgId?: string;
|
|
||||||
/** Filter by task status */
|
|
||||||
status?: TaskStatus;
|
|
||||||
/** Exclude subtasks from export (default: false, subtasks included by default) */
|
|
||||||
excludeSubtasks?: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Result of the export operation
|
|
||||||
*/
|
|
||||||
export interface ExportResult {
|
|
||||||
/** Whether the export was successful */
|
|
||||||
success: boolean;
|
|
||||||
/** Number of tasks exported */
|
|
||||||
taskCount: number;
|
|
||||||
/** The brief ID tasks were exported to */
|
|
||||||
briefId: string;
|
|
||||||
/** The organization ID */
|
|
||||||
orgId: string;
|
|
||||||
/** Optional message */
|
|
||||||
message?: string;
|
|
||||||
/** Error details if export failed */
|
|
||||||
error?: {
|
|
||||||
code: string;
|
|
||||||
message: string;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Brief information from API
|
|
||||||
*/
|
|
||||||
export interface Brief {
|
|
||||||
id: string;
|
|
||||||
accountId: string;
|
|
||||||
createdAt: string;
|
|
||||||
name?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ExportService handles task export to external systems
|
|
||||||
*/
|
|
||||||
export class ExportService {
|
|
||||||
private configManager: ConfigManager;
|
|
||||||
private authManager: AuthManager;
|
|
||||||
|
|
||||||
constructor(configManager: ConfigManager, authManager: AuthManager) {
|
|
||||||
this.configManager = configManager;
|
|
||||||
this.authManager = authManager;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Export tasks to a brief
|
|
||||||
*/
|
|
||||||
async exportTasks(options: ExportTasksOptions): Promise<ExportResult> {
|
|
||||||
// Validate authentication
|
|
||||||
if (!this.authManager.isAuthenticated()) {
|
|
||||||
throw new TaskMasterError(
|
|
||||||
'Authentication required for export',
|
|
||||||
ERROR_CODES.AUTHENTICATION_ERROR
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get current context
|
|
||||||
const context = this.authManager.getContext();
|
|
||||||
|
|
||||||
// Determine org and brief IDs
|
|
||||||
let orgId = options.orgId || context?.orgId;
|
|
||||||
let briefId = options.briefId || context?.briefId;
|
|
||||||
|
|
||||||
// Validate we have necessary IDs
|
|
||||||
if (!orgId) {
|
|
||||||
throw new TaskMasterError(
|
|
||||||
'Organization ID is required for export. Use "tm context org" to select one.',
|
|
||||||
ERROR_CODES.MISSING_CONFIGURATION
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!briefId) {
|
|
||||||
throw new TaskMasterError(
|
|
||||||
'Brief ID is required for export. Use "tm context brief" or provide --brief flag.',
|
|
||||||
ERROR_CODES.MISSING_CONFIGURATION
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get tasks from the specified or active tag
|
|
||||||
const activeTag = this.configManager.getActiveTag();
|
|
||||||
const tag = options.tag || activeTag;
|
|
||||||
|
|
||||||
// Always read tasks from local file storage for export
|
|
||||||
// (we're exporting local tasks to a remote brief)
|
|
||||||
const fileStorage = new FileStorage(this.configManager.getProjectRoot());
|
|
||||||
await fileStorage.initialize();
|
|
||||||
|
|
||||||
// Load tasks with filters applied at storage layer
|
|
||||||
const filteredTasks = await fileStorage.loadTasks(tag, {
|
|
||||||
status: options.status,
|
|
||||||
excludeSubtasks: options.excludeSubtasks
|
|
||||||
});
|
|
||||||
|
|
||||||
// Get total count (without filters) for comparison
|
|
||||||
const allTasks = await fileStorage.loadTasks(tag);
|
|
||||||
|
|
||||||
const taskListResult = {
|
|
||||||
tasks: filteredTasks,
|
|
||||||
total: allTasks.length,
|
|
||||||
filtered: filteredTasks.length,
|
|
||||||
tag,
|
|
||||||
storageType: 'file' as const
|
|
||||||
};
|
|
||||||
|
|
||||||
if (taskListResult.tasks.length === 0) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
taskCount: 0,
|
|
||||||
briefId,
|
|
||||||
orgId,
|
|
||||||
message: 'No tasks found to export',
|
|
||||||
error: {
|
|
||||||
code: 'NO_TASKS',
|
|
||||||
message: 'No tasks match the specified criteria'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Call the export API with the original tasks
|
|
||||||
// performExport will handle the transformation based on the method used
|
|
||||||
await this.performExport(orgId, briefId, taskListResult.tasks);
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
taskCount: taskListResult.tasks.length,
|
|
||||||
briefId,
|
|
||||||
orgId,
|
|
||||||
message: `Successfully exported ${taskListResult.tasks.length} task(s) to brief`
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
const errorMessage =
|
|
||||||
error instanceof Error ? error.message : String(error);
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
taskCount: 0,
|
|
||||||
briefId,
|
|
||||||
orgId,
|
|
||||||
error: {
|
|
||||||
code: 'EXPORT_FAILED',
|
|
||||||
message: errorMessage
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Export tasks from a brief ID or URL
|
|
||||||
*/
|
|
||||||
async exportFromBriefInput(briefInput: string): Promise<ExportResult> {
|
|
||||||
// Extract brief ID from input
|
|
||||||
const briefId = this.extractBriefId(briefInput);
|
|
||||||
if (!briefId) {
|
|
||||||
throw new TaskMasterError(
|
|
||||||
'Invalid brief ID or URL provided',
|
|
||||||
ERROR_CODES.VALIDATION_ERROR
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch brief to get organization
|
|
||||||
const brief = await this.authManager.getBrief(briefId);
|
|
||||||
if (!brief) {
|
|
||||||
throw new TaskMasterError(
|
|
||||||
'Brief not found or you do not have access',
|
|
||||||
ERROR_CODES.NOT_FOUND
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export with the resolved org and brief
|
|
||||||
return this.exportTasks({
|
|
||||||
orgId: brief.accountId,
|
|
||||||
briefId: brief.id
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Validate export context before prompting
|
|
||||||
*/
|
|
||||||
async validateContext(): Promise<{
|
|
||||||
hasOrg: boolean;
|
|
||||||
hasBrief: boolean;
|
|
||||||
context: UserContext | null;
|
|
||||||
}> {
|
|
||||||
const context = this.authManager.getContext();
|
|
||||||
|
|
||||||
return {
|
|
||||||
hasOrg: !!context?.orgId,
|
|
||||||
hasBrief: !!context?.briefId,
|
|
||||||
context
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Transform tasks for API bulk import format (flat structure)
|
|
||||||
*/
|
|
||||||
private transformTasksForBulkImport(tasks: Task[]): any[] {
|
|
||||||
const flatTasks: any[] = [];
|
|
||||||
|
|
||||||
// Process each task and its subtasks
|
|
||||||
tasks.forEach((task) => {
|
|
||||||
// Add parent task
|
|
||||||
flatTasks.push({
|
|
||||||
externalId: String(task.id),
|
|
||||||
title: task.title,
|
|
||||||
description: this.enrichDescription(task),
|
|
||||||
status: this.mapStatusForAPI(task.status),
|
|
||||||
priority: task.priority || 'medium',
|
|
||||||
dependencies: task.dependencies?.map(String) || [],
|
|
||||||
details: task.details,
|
|
||||||
testStrategy: task.testStrategy,
|
|
||||||
complexity: task.complexity,
|
|
||||||
metadata: {
|
|
||||||
complexity: task.complexity,
|
|
||||||
originalId: task.id,
|
|
||||||
originalDescription: task.description,
|
|
||||||
originalDetails: task.details,
|
|
||||||
originalTestStrategy: task.testStrategy
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Add subtasks if they exist
|
|
||||||
if (task.subtasks && task.subtasks.length > 0) {
|
|
||||||
task.subtasks.forEach((subtask) => {
|
|
||||||
flatTasks.push({
|
|
||||||
externalId: `${task.id}.${subtask.id}`,
|
|
||||||
parentExternalId: String(task.id),
|
|
||||||
title: subtask.title,
|
|
||||||
description: this.enrichDescription(subtask),
|
|
||||||
status: this.mapStatusForAPI(subtask.status),
|
|
||||||
priority: subtask.priority || 'medium',
|
|
||||||
dependencies:
|
|
||||||
subtask.dependencies?.map((dep) => {
|
|
||||||
// Convert subtask dependencies to full ID format
|
|
||||||
if (String(dep).includes('.')) {
|
|
||||||
return String(dep);
|
|
||||||
}
|
|
||||||
return `${task.id}.${dep}`;
|
|
||||||
}) || [],
|
|
||||||
details: subtask.details,
|
|
||||||
testStrategy: subtask.testStrategy,
|
|
||||||
complexity: subtask.complexity,
|
|
||||||
metadata: {
|
|
||||||
complexity: subtask.complexity,
|
|
||||||
originalId: subtask.id,
|
|
||||||
originalDescription: subtask.description,
|
|
||||||
originalDetails: subtask.details,
|
|
||||||
originalTestStrategy: subtask.testStrategy
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return flatTasks;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Enrich task/subtask description with implementation details and test strategy
|
|
||||||
* Creates a comprehensive markdown-formatted description
|
|
||||||
*/
|
|
||||||
private enrichDescription(taskOrSubtask: Task | any): string {
|
|
||||||
const sections: string[] = [];
|
|
||||||
|
|
||||||
// Start with original description if it exists
|
|
||||||
if (taskOrSubtask.description) {
|
|
||||||
sections.push(taskOrSubtask.description);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add implementation details section
|
|
||||||
if (taskOrSubtask.details) {
|
|
||||||
sections.push('## Implementation Details\n');
|
|
||||||
sections.push(taskOrSubtask.details);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add test strategy section
|
|
||||||
if (taskOrSubtask.testStrategy) {
|
|
||||||
sections.push('## Test Strategy\n');
|
|
||||||
sections.push(taskOrSubtask.testStrategy);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Join sections with double newlines for better markdown formatting
|
|
||||||
return sections.join('\n\n').trim() || 'No description provided';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Map internal status to API status format
|
|
||||||
*/
|
|
||||||
private mapStatusForAPI(status?: string): string {
|
|
||||||
switch (status) {
|
|
||||||
case 'pending':
|
|
||||||
return 'todo';
|
|
||||||
case 'in-progress':
|
|
||||||
return 'in_progress';
|
|
||||||
case 'done':
|
|
||||||
return 'done';
|
|
||||||
default:
|
|
||||||
return 'todo';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Perform the actual export API call
|
|
||||||
*/
|
|
||||||
private async performExport(
|
|
||||||
orgId: string,
|
|
||||||
briefId: string,
|
|
||||||
tasks: any[]
|
|
||||||
): Promise<void> {
|
|
||||||
// Check if we should use the API endpoint or direct Supabase
|
|
||||||
const useAPIEndpoint = process.env.TM_PUBLIC_BASE_DOMAIN;
|
|
||||||
|
|
||||||
if (useAPIEndpoint) {
|
|
||||||
// Use the new bulk import API endpoint
|
|
||||||
const apiUrl = `${process.env.TM_PUBLIC_BASE_DOMAIN}/ai/api/v1/briefs/${briefId}/tasks/bulk`;
|
|
||||||
|
|
||||||
// Transform tasks to flat structure for API
|
|
||||||
const flatTasks = this.transformTasksForBulkImport(tasks);
|
|
||||||
|
|
||||||
// Prepare request body
|
|
||||||
const requestBody = {
|
|
||||||
source: 'task-master-cli',
|
|
||||||
accountId: orgId,
|
|
||||||
options: {
|
|
||||||
dryRun: false,
|
|
||||||
stopOnError: false
|
|
||||||
},
|
|
||||||
tasks: flatTasks
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get auth token
|
|
||||||
const credentials = this.authManager.getCredentials();
|
|
||||||
if (!credentials || !credentials.token) {
|
|
||||||
throw new Error('Not authenticated');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make API request
|
|
||||||
const response = await fetch(apiUrl, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
Authorization: `Bearer ${credentials.token}`
|
|
||||||
},
|
|
||||||
body: JSON.stringify(requestBody)
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
const errorText = await response.text();
|
|
||||||
throw new Error(
|
|
||||||
`API request failed: ${response.status} - ${errorText}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = (await response.json()) as BulkTasksResponse;
|
|
||||||
|
|
||||||
if (result.failedCount > 0) {
|
|
||||||
const failedTasks = result.results
|
|
||||||
.filter((r) => !r.success)
|
|
||||||
.map((r) => `${r.externalId}: ${r.error}`)
|
|
||||||
.join(', ');
|
|
||||||
console.warn(
|
|
||||||
`Warning: ${result.failedCount} tasks failed to import: ${failedTasks}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(
|
|
||||||
`Successfully exported ${result.successCount} of ${result.totalTasks} tasks to brief ${briefId}`
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
// Direct Supabase approach is no longer supported
|
|
||||||
// The extractTasks method has been removed from SupabaseTaskRepository
|
|
||||||
// as we now exclusively use the API endpoint for exports
|
|
||||||
throw new Error(
|
|
||||||
'Export API endpoint not configured. Please set TM_PUBLIC_BASE_DOMAIN environment variable to enable task export.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract a brief ID from raw input (ID or URL)
|
|
||||||
*/
|
|
||||||
private extractBriefId(input: string): string | null {
|
|
||||||
const raw = input?.trim() ?? '';
|
|
||||||
if (!raw) return null;
|
|
||||||
|
|
||||||
const parseUrl = (s: string): URL | null => {
|
|
||||||
try {
|
|
||||||
return new URL(s);
|
|
||||||
} catch {}
|
|
||||||
try {
|
|
||||||
return new URL(`https://${s}`);
|
|
||||||
} catch {}
|
|
||||||
return null;
|
|
||||||
};
|
|
||||||
|
|
||||||
const fromParts = (path: string): string | null => {
|
|
||||||
const parts = path.split('/').filter(Boolean);
|
|
||||||
const briefsIdx = parts.lastIndexOf('briefs');
|
|
||||||
const candidate =
|
|
||||||
briefsIdx >= 0 && parts.length > briefsIdx + 1
|
|
||||||
? parts[briefsIdx + 1]
|
|
||||||
: parts[parts.length - 1];
|
|
||||||
return candidate?.trim() || null;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try to parse as URL
|
|
||||||
const url = parseUrl(raw);
|
|
||||||
if (url) {
|
|
||||||
const qId = url.searchParams.get('id') || url.searchParams.get('briefId');
|
|
||||||
const candidate = (qId || fromParts(url.pathname)) ?? null;
|
|
||||||
if (candidate) {
|
|
||||||
if (this.isLikelyId(candidate) || candidate.length >= 8) {
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it looks like a path without scheme
|
|
||||||
if (raw.includes('/')) {
|
|
||||||
const candidate = fromParts(raw);
|
|
||||||
if (candidate && (this.isLikelyId(candidate) || candidate.length >= 8)) {
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return as-is if it looks like an ID
|
|
||||||
if (this.isLikelyId(raw) || raw.length >= 8) {
|
|
||||||
return raw;
|
|
||||||
}
|
|
||||||
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a string looks like a brief ID (UUID-like)
|
|
||||||
*/
|
|
||||||
private isLikelyId(value: string): boolean {
|
|
||||||
const uuidRegex =
|
|
||||||
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/;
|
|
||||||
const ulidRegex = /^[0-9A-HJKMNP-TV-Z]{26}$/i;
|
|
||||||
const slugRegex = /^[A-Za-z0-9_-]{16,}$/;
|
|
||||||
return (
|
|
||||||
uuidRegex.test(value) || ulidRegex.test(value) || slugRegex.test(value)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -5,9 +5,4 @@
|
|||||||
|
|
||||||
export { TaskService } from './task-service.js';
|
export { TaskService } from './task-service.js';
|
||||||
export { OrganizationService } from './organization.service.js';
|
export { OrganizationService } from './organization.service.js';
|
||||||
export { ExportService } from './export.service.js';
|
|
||||||
export type { Organization, Brief } from './organization.service.js';
|
export type { Organization, Brief } from './organization.service.js';
|
||||||
export type {
|
|
||||||
ExportTasksOptions,
|
|
||||||
ExportResult
|
|
||||||
} from './export.service.js';
|
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import { ConfigManager } from '../config/config-manager.js';
|
|||||||
import { StorageFactory } from '../storage/storage-factory.js';
|
import { StorageFactory } from '../storage/storage-factory.js';
|
||||||
import { TaskEntity } from '../entities/task.entity.js';
|
import { TaskEntity } from '../entities/task.entity.js';
|
||||||
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
|
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
|
||||||
import { getLogger } from '../logger/factory.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Result returned by getTaskList
|
* Result returned by getTaskList
|
||||||
@@ -52,7 +51,6 @@ export class TaskService {
|
|||||||
private configManager: ConfigManager;
|
private configManager: ConfigManager;
|
||||||
private storage: IStorage;
|
private storage: IStorage;
|
||||||
private initialized = false;
|
private initialized = false;
|
||||||
private logger = getLogger('TaskService');
|
|
||||||
|
|
||||||
constructor(configManager: ConfigManager) {
|
constructor(configManager: ConfigManager) {
|
||||||
this.configManager = configManager;
|
this.configManager = configManager;
|
||||||
@@ -92,76 +90,37 @@ export class TaskService {
|
|||||||
const tag = options.tag || activeTag;
|
const tag = options.tag || activeTag;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Determine if we can push filters to storage layer
|
// Load raw tasks from storage - storage only knows about tags
|
||||||
const canPushStatusFilter =
|
const rawTasks = await this.storage.loadTasks(tag);
|
||||||
options.filter?.status &&
|
|
||||||
!options.filter.priority &&
|
|
||||||
!options.filter.tags &&
|
|
||||||
!options.filter.assignee &&
|
|
||||||
!options.filter.search &&
|
|
||||||
options.filter.hasSubtasks === undefined;
|
|
||||||
|
|
||||||
// Build storage-level options
|
|
||||||
const storageOptions: any = {};
|
|
||||||
|
|
||||||
// Push status filter to storage if it's the only filter
|
|
||||||
if (canPushStatusFilter) {
|
|
||||||
const statuses = Array.isArray(options.filter!.status)
|
|
||||||
? options.filter!.status
|
|
||||||
: [options.filter!.status];
|
|
||||||
// Only push single status to storage (multiple statuses need in-memory filtering)
|
|
||||||
if (statuses.length === 1) {
|
|
||||||
storageOptions.status = statuses[0];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push subtask exclusion to storage
|
|
||||||
if (options.includeSubtasks === false) {
|
|
||||||
storageOptions.excludeSubtasks = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load tasks from storage with pushed-down filters
|
|
||||||
const rawTasks = await this.storage.loadTasks(tag, storageOptions);
|
|
||||||
|
|
||||||
// Get total count without status filters, but preserve subtask exclusion
|
|
||||||
const baseOptions: any = {};
|
|
||||||
if (options.includeSubtasks === false) {
|
|
||||||
baseOptions.excludeSubtasks = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
const allTasks =
|
|
||||||
storageOptions.status !== undefined
|
|
||||||
? await this.storage.loadTasks(tag, baseOptions)
|
|
||||||
: rawTasks;
|
|
||||||
|
|
||||||
// Convert to TaskEntity for business logic operations
|
// Convert to TaskEntity for business logic operations
|
||||||
const taskEntities = TaskEntity.fromArray(rawTasks);
|
const taskEntities = TaskEntity.fromArray(rawTasks);
|
||||||
|
|
||||||
// Apply remaining filters in-memory if needed
|
// Apply filters if provided
|
||||||
let filteredEntities = taskEntities;
|
let filteredEntities = taskEntities;
|
||||||
if (options.filter && !canPushStatusFilter) {
|
if (options.filter) {
|
||||||
filteredEntities = this.applyFilters(taskEntities, options.filter);
|
|
||||||
} else if (
|
|
||||||
options.filter?.status &&
|
|
||||||
Array.isArray(options.filter.status) &&
|
|
||||||
options.filter.status.length > 1
|
|
||||||
) {
|
|
||||||
// Multiple statuses - filter in-memory
|
|
||||||
filteredEntities = this.applyFilters(taskEntities, options.filter);
|
filteredEntities = this.applyFilters(taskEntities, options.filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert back to plain objects
|
// Convert back to plain objects
|
||||||
const tasks = filteredEntities.map((entity) => entity.toJSON());
|
let tasks = filteredEntities.map((entity) => entity.toJSON());
|
||||||
|
|
||||||
|
// Handle subtasks option
|
||||||
|
if (options.includeSubtasks === false) {
|
||||||
|
tasks = tasks.map((task) => ({
|
||||||
|
...task,
|
||||||
|
subtasks: []
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
tasks,
|
tasks,
|
||||||
total: allTasks.length,
|
total: rawTasks.length,
|
||||||
filtered: filteredEntities.length,
|
filtered: filteredEntities.length,
|
||||||
tag: tag, // Return the actual tag being used (either explicitly provided or active tag)
|
tag: tag, // Return the actual tag being used (either explicitly provided or active tag)
|
||||||
storageType: this.getStorageType()
|
storageType: this.getStorageType()
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
this.logger.error('Failed to get task list', error);
|
|
||||||
throw new TaskMasterError(
|
throw new TaskMasterError(
|
||||||
'Failed to get task list',
|
'Failed to get task list',
|
||||||
ERROR_CODES.INTERNAL_ERROR,
|
ERROR_CODES.INTERNAL_ERROR,
|
||||||
@@ -176,28 +135,15 @@ export class TaskService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a single task by ID - delegates to storage layer
|
* Get a single task by ID
|
||||||
*/
|
*/
|
||||||
async getTask(taskId: string, tag?: string): Promise<Task | null> {
|
async getTask(taskId: string, tag?: string): Promise<Task | null> {
|
||||||
// Use provided tag or get active tag
|
const result = await this.getTaskList({
|
||||||
const activeTag = tag || this.getActiveTag();
|
tag,
|
||||||
|
includeSubtasks: true
|
||||||
|
});
|
||||||
|
|
||||||
try {
|
return result.tasks.find((t) => t.id === taskId) || null;
|
||||||
// Delegate to storage layer which handles the specific logic for tasks vs subtasks
|
|
||||||
return await this.storage.loadTask(String(taskId), activeTag);
|
|
||||||
} catch (error) {
|
|
||||||
throw new TaskMasterError(
|
|
||||||
`Failed to get task ${taskId}`,
|
|
||||||
ERROR_CODES.STORAGE_ERROR,
|
|
||||||
{
|
|
||||||
operation: 'getTask',
|
|
||||||
resource: 'task',
|
|
||||||
taskId: String(taskId),
|
|
||||||
tag: activeTag
|
|
||||||
},
|
|
||||||
error as Error
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -438,6 +384,16 @@ export class TaskService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Complexity filter
|
||||||
|
if (filter.complexity) {
|
||||||
|
const complexities = Array.isArray(filter.complexity)
|
||||||
|
? filter.complexity
|
||||||
|
: [filter.complexity];
|
||||||
|
if (!task.complexity || !complexities.includes(task.complexity)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Search filter
|
// Search filter
|
||||||
if (filter.search) {
|
if (filter.search) {
|
||||||
const searchLower = filter.search.toLowerCase();
|
const searchLower = filter.search.toLowerCase();
|
||||||
|
|||||||
@@ -6,8 +6,7 @@
|
|||||||
import type {
|
import type {
|
||||||
IStorage,
|
IStorage,
|
||||||
StorageStats,
|
StorageStats,
|
||||||
UpdateStatusResult,
|
UpdateStatusResult
|
||||||
LoadTasksOptions
|
|
||||||
} from '../interfaces/storage.interface.js';
|
} from '../interfaces/storage.interface.js';
|
||||||
import type {
|
import type {
|
||||||
Task,
|
Task,
|
||||||
@@ -17,7 +16,7 @@ import type {
|
|||||||
} from '../types/index.js';
|
} from '../types/index.js';
|
||||||
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
|
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
|
||||||
import { TaskRepository } from '../repositories/task-repository.interface.js';
|
import { TaskRepository } from '../repositories/task-repository.interface.js';
|
||||||
import { SupabaseTaskRepository } from '../repositories/supabase/index.js';
|
import { SupabaseTaskRepository } from '../repositories/supabase-task-repository.js';
|
||||||
import { SupabaseClient } from '@supabase/supabase-js';
|
import { SupabaseClient } from '@supabase/supabase-js';
|
||||||
import { AuthManager } from '../auth/auth-manager.js';
|
import { AuthManager } from '../auth/auth-manager.js';
|
||||||
|
|
||||||
@@ -147,7 +146,7 @@ export class ApiStorage implements IStorage {
|
|||||||
* Load tasks from API
|
* Load tasks from API
|
||||||
* In our system, the tag parameter represents a brief ID
|
* In our system, the tag parameter represents a brief ID
|
||||||
*/
|
*/
|
||||||
async loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]> {
|
async loadTasks(tag?: string): Promise<Task[]> {
|
||||||
await this.ensureInitialized();
|
await this.ensureInitialized();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@@ -161,9 +160,9 @@ export class ApiStorage implements IStorage {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load tasks from the current brief context with filters pushed to repository
|
// Load tasks from the current brief context
|
||||||
const tasks = await this.retryOperation(() =>
|
const tasks = await this.retryOperation(() =>
|
||||||
this.repository.getTasks(this.projectId, options)
|
this.repository.getTasks(this.projectId)
|
||||||
);
|
);
|
||||||
|
|
||||||
// Update the tag cache with the loaded task IDs
|
// Update the tag cache with the loaded task IDs
|
||||||
|
|||||||
@@ -6,13 +6,11 @@ import type { Task, TaskMetadata, TaskStatus } from '../../types/index.js';
|
|||||||
import type {
|
import type {
|
||||||
IStorage,
|
IStorage,
|
||||||
StorageStats,
|
StorageStats,
|
||||||
UpdateStatusResult,
|
UpdateStatusResult
|
||||||
LoadTasksOptions
|
|
||||||
} from '../../interfaces/storage.interface.js';
|
} from '../../interfaces/storage.interface.js';
|
||||||
import { FormatHandler } from './format-handler.js';
|
import { FormatHandler } from './format-handler.js';
|
||||||
import { FileOperations } from './file-operations.js';
|
import { FileOperations } from './file-operations.js';
|
||||||
import { PathResolver } from './path-resolver.js';
|
import { PathResolver } from './path-resolver.js';
|
||||||
import { ComplexityReportManager } from '../../reports/complexity-report-manager.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* File-based storage implementation using a single tasks.json file with separated concerns
|
* File-based storage implementation using a single tasks.json file with separated concerns
|
||||||
@@ -21,13 +19,11 @@ export class FileStorage implements IStorage {
|
|||||||
private formatHandler: FormatHandler;
|
private formatHandler: FormatHandler;
|
||||||
private fileOps: FileOperations;
|
private fileOps: FileOperations;
|
||||||
private pathResolver: PathResolver;
|
private pathResolver: PathResolver;
|
||||||
private complexityManager: ComplexityReportManager;
|
|
||||||
|
|
||||||
constructor(projectPath: string) {
|
constructor(projectPath: string) {
|
||||||
this.formatHandler = new FormatHandler();
|
this.formatHandler = new FormatHandler();
|
||||||
this.fileOps = new FileOperations();
|
this.fileOps = new FileOperations();
|
||||||
this.pathResolver = new PathResolver(projectPath);
|
this.pathResolver = new PathResolver(projectPath);
|
||||||
this.complexityManager = new ComplexityReportManager(projectPath);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -91,33 +87,14 @@ export class FileStorage implements IStorage {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Load tasks from the single tasks.json file for a specific tag
|
* Load tasks from the single tasks.json file for a specific tag
|
||||||
* Enriches tasks with complexity data from the complexity report
|
|
||||||
*/
|
*/
|
||||||
async loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]> {
|
async loadTasks(tag?: string): Promise<Task[]> {
|
||||||
const filePath = this.pathResolver.getTasksPath();
|
const filePath = this.pathResolver.getTasksPath();
|
||||||
const resolvedTag = tag || 'master';
|
const resolvedTag = tag || 'master';
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const rawData = await this.fileOps.readJson(filePath);
|
const rawData = await this.fileOps.readJson(filePath);
|
||||||
let tasks = this.formatHandler.extractTasks(rawData, resolvedTag);
|
return this.formatHandler.extractTasks(rawData, resolvedTag);
|
||||||
|
|
||||||
// Apply filters if provided
|
|
||||||
if (options) {
|
|
||||||
// Filter by status if specified
|
|
||||||
if (options.status) {
|
|
||||||
tasks = tasks.filter((task) => task.status === options.status);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exclude subtasks if specified
|
|
||||||
if (options.excludeSubtasks) {
|
|
||||||
tasks = tasks.map((task) => ({
|
|
||||||
...task,
|
|
||||||
subtasks: []
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return await this.enrichTasksWithComplexity(tasks, resolvedTag);
|
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
if (error.code === 'ENOENT') {
|
if (error.code === 'ENOENT') {
|
||||||
return []; // File doesn't exist, return empty array
|
return []; // File doesn't exist, return empty array
|
||||||
@@ -128,65 +105,9 @@ export class FileStorage implements IStorage {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Load a single task by ID from the tasks.json file
|
* Load a single task by ID from the tasks.json file
|
||||||
* Handles both regular tasks and subtasks (with dotted notation like "1.2")
|
|
||||||
*/
|
*/
|
||||||
async loadTask(taskId: string, tag?: string): Promise<Task | null> {
|
async loadTask(taskId: string, tag?: string): Promise<Task | null> {
|
||||||
const tasks = await this.loadTasks(tag);
|
const tasks = await this.loadTasks(tag);
|
||||||
|
|
||||||
// Check if this is a subtask (contains a dot)
|
|
||||||
if (taskId.includes('.')) {
|
|
||||||
const [parentId, subtaskId] = taskId.split('.');
|
|
||||||
const parentTask = tasks.find((t) => String(t.id) === parentId);
|
|
||||||
|
|
||||||
if (!parentTask || !parentTask.subtasks) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const subtask = parentTask.subtasks.find(
|
|
||||||
(st) => String(st.id) === subtaskId
|
|
||||||
);
|
|
||||||
if (!subtask) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const toFullSubId = (maybeDotId: string | number): string => {
|
|
||||||
const depId = String(maybeDotId);
|
|
||||||
return depId.includes('.') ? depId : `${parentTask.id}.${depId}`;
|
|
||||||
};
|
|
||||||
const resolvedDependencies =
|
|
||||||
subtask.dependencies?.map((dep) => toFullSubId(dep)) ?? [];
|
|
||||||
|
|
||||||
// Return a Task-like object for the subtask with the full dotted ID
|
|
||||||
// Following the same pattern as findTaskById in utils.js
|
|
||||||
const subtaskResult = {
|
|
||||||
...subtask,
|
|
||||||
id: taskId, // Use the full dotted ID
|
|
||||||
title: subtask.title || `Subtask ${subtaskId}`,
|
|
||||||
description: subtask.description || '',
|
|
||||||
status: subtask.status || 'pending',
|
|
||||||
priority: subtask.priority || parentTask.priority || 'medium',
|
|
||||||
dependencies: resolvedDependencies,
|
|
||||||
details: subtask.details || '',
|
|
||||||
testStrategy: subtask.testStrategy || '',
|
|
||||||
subtasks: [],
|
|
||||||
tags: parentTask.tags || [],
|
|
||||||
assignee: subtask.assignee || parentTask.assignee,
|
|
||||||
complexity: subtask.complexity || parentTask.complexity,
|
|
||||||
createdAt: subtask.createdAt || parentTask.createdAt,
|
|
||||||
updatedAt: subtask.updatedAt || parentTask.updatedAt,
|
|
||||||
// Add reference to parent task for context (like utils.js does)
|
|
||||||
parentTask: {
|
|
||||||
id: parentTask.id,
|
|
||||||
title: parentTask.title,
|
|
||||||
status: parentTask.status
|
|
||||||
},
|
|
||||||
isSubtask: true
|
|
||||||
};
|
|
||||||
|
|
||||||
return subtaskResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle regular task lookup
|
|
||||||
return tasks.find((task) => String(task.id) === String(taskId)) || null;
|
return tasks.find((task) => String(task.id) === String(taskId)) || null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -619,46 +540,6 @@ export class FileStorage implements IStorage {
|
|||||||
|
|
||||||
await this.saveTasks(tasks, targetTag);
|
await this.saveTasks(tasks, targetTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Enrich tasks with complexity data from the complexity report
|
|
||||||
* Private helper method called by loadTasks()
|
|
||||||
*/
|
|
||||||
private async enrichTasksWithComplexity(
|
|
||||||
tasks: Task[],
|
|
||||||
tag: string
|
|
||||||
): Promise<Task[]> {
|
|
||||||
// Get all task IDs for bulk lookup
|
|
||||||
const taskIds = tasks.map((t) => t.id);
|
|
||||||
|
|
||||||
// Load complexity data for all tasks at once (more efficient)
|
|
||||||
const complexityMap = await this.complexityManager.getComplexityForTasks(
|
|
||||||
taskIds,
|
|
||||||
tag
|
|
||||||
);
|
|
||||||
|
|
||||||
// If no complexity data found, return tasks as-is
|
|
||||||
if (complexityMap.size === 0) {
|
|
||||||
return tasks;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enrich each task with its complexity data
|
|
||||||
return tasks.map((task) => {
|
|
||||||
const complexityData = complexityMap.get(String(task.id));
|
|
||||||
if (!complexityData) {
|
|
||||||
return task;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge complexity data into the task
|
|
||||||
return {
|
|
||||||
...task,
|
|
||||||
complexity: complexityData.complexityScore,
|
|
||||||
recommendedSubtasks: complexityData.recommendedSubtasks,
|
|
||||||
expansionPrompt: complexityData.expansionPrompt,
|
|
||||||
complexityReasoning: complexityData.complexityReasoning
|
|
||||||
};
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export as default for convenience
|
// Export as default for convenience
|
||||||
|
|||||||
@@ -14,14 +14,7 @@ import {
|
|||||||
type StartTaskResult,
|
type StartTaskResult,
|
||||||
type ConflictCheckResult
|
type ConflictCheckResult
|
||||||
} from './services/task-execution-service.js';
|
} from './services/task-execution-service.js';
|
||||||
import {
|
|
||||||
ExportService,
|
|
||||||
type ExportTasksOptions,
|
|
||||||
type ExportResult
|
|
||||||
} from './services/export.service.js';
|
|
||||||
import { AuthManager } from './auth/auth-manager.js';
|
|
||||||
import { ERROR_CODES, TaskMasterError } from './errors/task-master-error.js';
|
import { ERROR_CODES, TaskMasterError } from './errors/task-master-error.js';
|
||||||
import type { UserContext } from './auth/types.js';
|
|
||||||
import type { IConfiguration } from './interfaces/configuration.interface.js';
|
import type { IConfiguration } from './interfaces/configuration.interface.js';
|
||||||
import type {
|
import type {
|
||||||
Task,
|
Task,
|
||||||
@@ -54,10 +47,6 @@ export type {
|
|||||||
StartTaskResult,
|
StartTaskResult,
|
||||||
ConflictCheckResult
|
ConflictCheckResult
|
||||||
} from './services/task-execution-service.js';
|
} from './services/task-execution-service.js';
|
||||||
export type {
|
|
||||||
ExportTasksOptions,
|
|
||||||
ExportResult
|
|
||||||
} from './services/export.service.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* TaskMasterCore facade class
|
* TaskMasterCore facade class
|
||||||
@@ -67,7 +56,6 @@ export class TaskMasterCore {
|
|||||||
private configManager: ConfigManager;
|
private configManager: ConfigManager;
|
||||||
private taskService: TaskService;
|
private taskService: TaskService;
|
||||||
private taskExecutionService: TaskExecutionService;
|
private taskExecutionService: TaskExecutionService;
|
||||||
private exportService: ExportService;
|
|
||||||
private executorService: ExecutorService | null = null;
|
private executorService: ExecutorService | null = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -92,7 +80,6 @@ export class TaskMasterCore {
|
|||||||
this.configManager = null as any;
|
this.configManager = null as any;
|
||||||
this.taskService = null as any;
|
this.taskService = null as any;
|
||||||
this.taskExecutionService = null as any;
|
this.taskExecutionService = null as any;
|
||||||
this.exportService = null as any;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -122,10 +109,6 @@ export class TaskMasterCore {
|
|||||||
|
|
||||||
// Create task execution service
|
// Create task execution service
|
||||||
this.taskExecutionService = new TaskExecutionService(this.taskService);
|
this.taskExecutionService = new TaskExecutionService(this.taskService);
|
||||||
|
|
||||||
// Create export service
|
|
||||||
const authManager = AuthManager.getInstance();
|
|
||||||
this.exportService = new ExportService(this.configManager, authManager);
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
throw new TaskMasterError(
|
throw new TaskMasterError(
|
||||||
'Failed to initialize TaskMasterCore',
|
'Failed to initialize TaskMasterCore',
|
||||||
@@ -259,33 +242,6 @@ export class TaskMasterCore {
|
|||||||
return this.taskExecutionService.getNextAvailableTask();
|
return this.taskExecutionService.getNextAvailableTask();
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==================== Export Service Methods ====================
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Export tasks to an external system (e.g., Hamster brief)
|
|
||||||
*/
|
|
||||||
async exportTasks(options: ExportTasksOptions): Promise<ExportResult> {
|
|
||||||
return this.exportService.exportTasks(options);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Export tasks from a brief ID or URL
|
|
||||||
*/
|
|
||||||
async exportFromBriefInput(briefInput: string): Promise<ExportResult> {
|
|
||||||
return this.exportService.exportFromBriefInput(briefInput);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Validate export context before prompting
|
|
||||||
*/
|
|
||||||
async validateExportContext(): Promise<{
|
|
||||||
hasOrg: boolean;
|
|
||||||
hasBrief: boolean;
|
|
||||||
context: UserContext | null;
|
|
||||||
}> {
|
|
||||||
return this.exportService.validateContext();
|
|
||||||
}
|
|
||||||
|
|
||||||
// ==================== Executor Service Methods ====================
|
// ==================== Executor Service Methods ====================
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -72,21 +72,14 @@ export interface Task {
|
|||||||
actualEffort?: number;
|
actualEffort?: number;
|
||||||
tags?: string[];
|
tags?: string[];
|
||||||
assignee?: string;
|
assignee?: string;
|
||||||
|
complexity?: TaskComplexity;
|
||||||
// Complexity analysis (from complexity report)
|
|
||||||
// Can be either enum ('simple' | 'moderate' | 'complex' | 'very-complex') or numeric score (1-10)
|
|
||||||
complexity?: TaskComplexity | number;
|
|
||||||
recommendedSubtasks?: number;
|
|
||||||
expansionPrompt?: string;
|
|
||||||
complexityReasoning?: string;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Subtask interface extending Task
|
* Subtask interface extending Task with numeric ID
|
||||||
* ID can be number (file storage) or string (API storage with display_id)
|
|
||||||
*/
|
*/
|
||||||
export interface Subtask extends Omit<Task, 'id' | 'subtasks'> {
|
export interface Subtask extends Omit<Task, 'id' | 'subtasks'> {
|
||||||
id: number | string;
|
id: number;
|
||||||
parentId: string;
|
parentId: string;
|
||||||
subtasks?: never; // Subtasks cannot have their own subtasks
|
subtasks?: never; // Subtasks cannot have their own subtasks
|
||||||
}
|
}
|
||||||
@@ -152,6 +145,7 @@ export interface TaskFilter {
|
|||||||
hasSubtasks?: boolean;
|
hasSubtasks?: boolean;
|
||||||
search?: string;
|
search?: string;
|
||||||
assignee?: string;
|
assignee?: string;
|
||||||
|
complexity?: TaskComplexity | TaskComplexity[];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -1,83 +0,0 @@
|
|||||||
/**
|
|
||||||
* Type definitions for repository operations
|
|
||||||
*/
|
|
||||||
import { Database, Tables } from './database.types.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Task row from database with optional joined relations
|
|
||||||
*/
|
|
||||||
export interface TaskWithRelations extends Tables<'tasks'> {
|
|
||||||
document?: {
|
|
||||||
id: string;
|
|
||||||
document_name: string;
|
|
||||||
title: string;
|
|
||||||
description: string | null;
|
|
||||||
} | null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Dependency row with joined display_id
|
|
||||||
*/
|
|
||||||
export interface DependencyWithDisplayId {
|
|
||||||
task_id: string;
|
|
||||||
depends_on_task: {
|
|
||||||
display_id: string;
|
|
||||||
} | null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Task metadata structure
|
|
||||||
*/
|
|
||||||
export interface TaskMetadata {
|
|
||||||
details?: string;
|
|
||||||
testStrategy?: string;
|
|
||||||
[key: string]: unknown; // Allow additional fields but be explicit
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Database update payload for tasks
|
|
||||||
*/
|
|
||||||
export type TaskDatabaseUpdate =
|
|
||||||
Database['public']['Tables']['tasks']['Update'];
|
|
||||||
/**
|
|
||||||
* Configuration for task queries
|
|
||||||
*/
|
|
||||||
export interface TaskQueryConfig {
|
|
||||||
briefId: string;
|
|
||||||
includeSubtasks?: boolean;
|
|
||||||
includeDependencies?: boolean;
|
|
||||||
includeDocument?: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Result of a task fetch operation
|
|
||||||
*/
|
|
||||||
export interface TaskFetchResult {
|
|
||||||
task: Tables<'tasks'>;
|
|
||||||
subtasks: Tables<'tasks'>[];
|
|
||||||
dependencies: Map<string, string[]>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Task validation errors
|
|
||||||
*/
|
|
||||||
export class TaskValidationError extends Error {
|
|
||||||
constructor(
|
|
||||||
message: string,
|
|
||||||
public readonly field: string,
|
|
||||||
public readonly value: unknown
|
|
||||||
) {
|
|
||||||
super(message);
|
|
||||||
this.name = 'TaskValidationError';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Context validation errors
|
|
||||||
*/
|
|
||||||
export class ContextValidationError extends Error {
|
|
||||||
constructor(message: string) {
|
|
||||||
super(message);
|
|
||||||
this.name = 'ContextValidationError';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -628,12 +628,6 @@ function createProjectStructure(
|
|||||||
// Copy example_prd.txt to NEW location
|
// Copy example_prd.txt to NEW location
|
||||||
copyTemplateFile('example_prd.txt', path.join(targetDir, EXAMPLE_PRD_FILE));
|
copyTemplateFile('example_prd.txt', path.join(targetDir, EXAMPLE_PRD_FILE));
|
||||||
|
|
||||||
// Copy example_prd_rpg.txt to templates directory
|
|
||||||
copyTemplateFile(
|
|
||||||
'example_prd_rpg.txt',
|
|
||||||
path.join(targetDir, TASKMASTER_TEMPLATES_DIR, 'example_prd_rpg.txt')
|
|
||||||
);
|
|
||||||
|
|
||||||
// Initialize git repository if git is available
|
// Initialize git repository if git is available
|
||||||
try {
|
try {
|
||||||
if (initGit === false) {
|
if (initGit === false) {
|
||||||
@@ -862,10 +856,10 @@ function createProjectStructure(
|
|||||||
)}\n${chalk.white(' ├─ ')}${chalk.dim('Models: Use `task-master models` commands')}\n${chalk.white(' └─ ')}${chalk.dim(
|
)}\n${chalk.white(' ├─ ')}${chalk.dim('Models: Use `task-master models` commands')}\n${chalk.white(' └─ ')}${chalk.dim(
|
||||||
'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)'
|
'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)'
|
||||||
)}\n${chalk.white('2. ')}${chalk.yellow(
|
)}\n${chalk.white('2. ')}${chalk.yellow(
|
||||||
'Discuss your idea with AI and ask for a PRD, and save it to .taskmaster/docs/prd.txt'
|
'Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt'
|
||||||
)}\n${chalk.white(' ├─ ')}${chalk.dim('Simple projects: Use ')}${chalk.cyan('example_prd.txt')}${chalk.dim(' template')}\n${chalk.white(' └─ ')}${chalk.dim('Complex systems: Use ')}${chalk.cyan('example_prd_rpg.txt')}${chalk.dim(' template (for dependency-aware task graphs)')}\n${chalk.white('3. ')}${chalk.yellow(
|
)}\n${chalk.white('3. ')}${chalk.yellow(
|
||||||
'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:'
|
'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:'
|
||||||
)}\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('parse_prd')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master parse-prd .taskmaster/docs/prd.txt')}\n${chalk.white('4. ')}${chalk.yellow(
|
)}\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('parse_prd')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master parse-prd scripts/prd.txt')}\n${chalk.white('4. ')}${chalk.yellow(
|
||||||
'Ask Cursor to analyze the complexity of the tasks in your PRD using research'
|
'Ask Cursor to analyze the complexity of the tasks in your PRD using research'
|
||||||
)}\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('analyze_project_complexity')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master analyze-complexity')}\n${chalk.white('5. ')}${chalk.yellow(
|
)}\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('analyze_project_complexity')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master analyze-complexity')}\n${chalk.white('5. ')}${chalk.yellow(
|
||||||
'Ask Cursor to expand all of your tasks using the complexity analysis'
|
'Ask Cursor to expand all of your tasks using the complexity analysis'
|
||||||
|
|||||||
@@ -41,7 +41,6 @@ import {
|
|||||||
AzureProvider,
|
AzureProvider,
|
||||||
BedrockAIProvider,
|
BedrockAIProvider,
|
||||||
ClaudeCodeProvider,
|
ClaudeCodeProvider,
|
||||||
CodexCliProvider,
|
|
||||||
GeminiCliProvider,
|
GeminiCliProvider,
|
||||||
GoogleAIProvider,
|
GoogleAIProvider,
|
||||||
GrokCliProvider,
|
GrokCliProvider,
|
||||||
@@ -71,7 +70,6 @@ const PROVIDERS = {
|
|||||||
azure: new AzureProvider(),
|
azure: new AzureProvider(),
|
||||||
vertex: new VertexAIProvider(),
|
vertex: new VertexAIProvider(),
|
||||||
'claude-code': new ClaudeCodeProvider(),
|
'claude-code': new ClaudeCodeProvider(),
|
||||||
'codex-cli': new CodexCliProvider(),
|
|
||||||
'gemini-cli': new GeminiCliProvider(),
|
'gemini-cli': new GeminiCliProvider(),
|
||||||
'grok-cli': new GrokCliProvider()
|
'grok-cli': new GrokCliProvider()
|
||||||
};
|
};
|
||||||
@@ -95,55 +93,31 @@ function _getProvider(providerName) {
|
|||||||
|
|
||||||
// Helper function to get cost for a specific model
|
// Helper function to get cost for a specific model
|
||||||
function _getCostForModel(providerName, modelId) {
|
function _getCostForModel(providerName, modelId) {
|
||||||
const DEFAULT_COST = {
|
const DEFAULT_COST = { inputCost: 0, outputCost: 0, currency: 'USD' };
|
||||||
inputCost: 0,
|
|
||||||
outputCost: 0,
|
|
||||||
currency: 'USD',
|
|
||||||
isUnknown: false
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!MODEL_MAP || !MODEL_MAP[providerName]) {
|
if (!MODEL_MAP || !MODEL_MAP[providerName]) {
|
||||||
log(
|
log(
|
||||||
'warn',
|
'warn',
|
||||||
`Provider "${providerName}" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`
|
`Provider "${providerName}" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`
|
||||||
);
|
);
|
||||||
return { ...DEFAULT_COST, isUnknown: true };
|
return DEFAULT_COST;
|
||||||
}
|
}
|
||||||
|
|
||||||
const modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);
|
const modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);
|
||||||
|
|
||||||
if (!modelData) {
|
if (!modelData?.cost_per_1m_tokens) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Model "${modelId}" not found under provider "${providerName}". Assuming unknown cost.`
|
`Cost data not found for model "${modelId}" under provider "${providerName}". Assuming zero cost.`
|
||||||
);
|
);
|
||||||
return { ...DEFAULT_COST, isUnknown: true };
|
return DEFAULT_COST;
|
||||||
}
|
|
||||||
|
|
||||||
// Check if cost_per_1m_tokens is explicitly null (unknown pricing)
|
|
||||||
if (modelData.cost_per_1m_tokens === null) {
|
|
||||||
log(
|
|
||||||
'debug',
|
|
||||||
`Cost data is null for model "${modelId}" under provider "${providerName}". Pricing unknown.`
|
|
||||||
);
|
|
||||||
return { ...DEFAULT_COST, isUnknown: true };
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if cost_per_1m_tokens is missing/undefined (also unknown)
|
|
||||||
if (modelData.cost_per_1m_tokens === undefined) {
|
|
||||||
log(
|
|
||||||
'debug',
|
|
||||||
`Cost data not found for model "${modelId}" under provider "${providerName}". Pricing unknown.`
|
|
||||||
);
|
|
||||||
return { ...DEFAULT_COST, isUnknown: true };
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const costs = modelData.cost_per_1m_tokens;
|
const costs = modelData.cost_per_1m_tokens;
|
||||||
return {
|
return {
|
||||||
inputCost: costs.input || 0,
|
inputCost: costs.input || 0,
|
||||||
outputCost: costs.output || 0,
|
outputCost: costs.output || 0,
|
||||||
currency: costs.currency || 'USD',
|
currency: costs.currency || 'USD'
|
||||||
isUnknown: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -893,8 +867,8 @@ async function logAiUsage({
|
|||||||
const timestamp = new Date().toISOString();
|
const timestamp = new Date().toISOString();
|
||||||
const totalTokens = (inputTokens || 0) + (outputTokens || 0);
|
const totalTokens = (inputTokens || 0) + (outputTokens || 0);
|
||||||
|
|
||||||
// Destructure currency along with costs and unknown flag
|
// Destructure currency along with costs
|
||||||
const { inputCost, outputCost, currency, isUnknown } = _getCostForModel(
|
const { inputCost, outputCost, currency } = _getCostForModel(
|
||||||
providerName,
|
providerName,
|
||||||
modelId
|
modelId
|
||||||
);
|
);
|
||||||
@@ -916,8 +890,7 @@ async function logAiUsage({
|
|||||||
outputTokens: outputTokens || 0,
|
outputTokens: outputTokens || 0,
|
||||||
totalTokens,
|
totalTokens,
|
||||||
totalCost,
|
totalCost,
|
||||||
currency, // Add currency to the telemetry data
|
currency // Add currency to the telemetry data
|
||||||
isUnknownCost: isUnknown // Flag to indicate if pricing is unknown
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag()) {
|
||||||
|
|||||||
@@ -12,11 +12,17 @@ import https from 'https';
|
|||||||
import http from 'http';
|
import http from 'http';
|
||||||
import inquirer from 'inquirer';
|
import inquirer from 'inquirer';
|
||||||
import search from '@inquirer/search';
|
import search from '@inquirer/search';
|
||||||
|
import ora from 'ora'; // Import ora
|
||||||
|
|
||||||
import { log, readJSON } from './utils.js';
|
import { log, readJSON } from './utils.js';
|
||||||
// Import command registry and utilities from @tm/cli
|
// Import new commands from @tm/cli
|
||||||
import {
|
import {
|
||||||
registerAllCommands,
|
ListTasksCommand,
|
||||||
|
ShowCommand,
|
||||||
|
AuthCommand,
|
||||||
|
ContextCommand,
|
||||||
|
StartCommand,
|
||||||
|
SetStatusCommand,
|
||||||
checkForUpdate,
|
checkForUpdate,
|
||||||
performAutoUpdate,
|
performAutoUpdate,
|
||||||
displayUpgradeNotification
|
displayUpgradeNotification
|
||||||
@@ -26,6 +32,7 @@ import {
|
|||||||
parsePRD,
|
parsePRD,
|
||||||
updateTasks,
|
updateTasks,
|
||||||
generateTaskFiles,
|
generateTaskFiles,
|
||||||
|
listTasks,
|
||||||
expandTask,
|
expandTask,
|
||||||
expandAllTasks,
|
expandAllTasks,
|
||||||
clearSubtasks,
|
clearSubtasks,
|
||||||
@@ -46,7 +53,11 @@ import {
|
|||||||
validateStrength
|
validateStrength
|
||||||
} from './task-manager.js';
|
} from './task-manager.js';
|
||||||
|
|
||||||
import { moveTasksBetweenTags } from './task-manager/move-task.js';
|
import {
|
||||||
|
moveTasksBetweenTags,
|
||||||
|
MoveTaskError,
|
||||||
|
MOVE_ERROR_CODES
|
||||||
|
} from './task-manager/move-task.js';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
createTag,
|
createTag,
|
||||||
@@ -61,7 +72,9 @@ import {
|
|||||||
addDependency,
|
addDependency,
|
||||||
removeDependency,
|
removeDependency,
|
||||||
validateDependenciesCommand,
|
validateDependenciesCommand,
|
||||||
fixDependenciesCommand
|
fixDependenciesCommand,
|
||||||
|
DependencyError,
|
||||||
|
DEPENDENCY_ERROR_CODES
|
||||||
} from './dependency-manager.js';
|
} from './dependency-manager.js';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
@@ -90,6 +103,7 @@ import {
|
|||||||
displayBanner,
|
displayBanner,
|
||||||
displayHelp,
|
displayHelp,
|
||||||
displayNextTask,
|
displayNextTask,
|
||||||
|
displayTaskById,
|
||||||
displayComplexityReport,
|
displayComplexityReport,
|
||||||
getStatusWithColor,
|
getStatusWithColor,
|
||||||
confirmTaskOverwrite,
|
confirmTaskOverwrite,
|
||||||
@@ -98,6 +112,8 @@ import {
|
|||||||
displayModelConfiguration,
|
displayModelConfiguration,
|
||||||
displayAvailableModels,
|
displayAvailableModels,
|
||||||
displayApiKeyStatus,
|
displayApiKeyStatus,
|
||||||
|
displayAiUsageSummary,
|
||||||
|
displayMultipleTasksSummary,
|
||||||
displayTaggedTasksFYI,
|
displayTaggedTasksFYI,
|
||||||
displayCurrentTagIndicator,
|
displayCurrentTagIndicator,
|
||||||
displayCrossTagDependencyError,
|
displayCrossTagDependencyError,
|
||||||
@@ -121,6 +137,10 @@ import {
|
|||||||
setModel,
|
setModel,
|
||||||
getApiKeyStatusReport
|
getApiKeyStatusReport
|
||||||
} from './task-manager/models.js';
|
} from './task-manager/models.js';
|
||||||
|
import {
|
||||||
|
isValidTaskStatus,
|
||||||
|
TASK_STATUS_OPTIONS
|
||||||
|
} from '../../src/constants/task-status.js';
|
||||||
import {
|
import {
|
||||||
isValidRulesAction,
|
isValidRulesAction,
|
||||||
RULES_ACTIONS,
|
RULES_ACTIONS,
|
||||||
@@ -1667,12 +1687,29 @@ function registerCommands(programInstance) {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
// ========================================
|
// Register the set-status command from @tm/cli
|
||||||
// Register All Commands from @tm/cli
|
// Handles task status updates with proper error handling and validation
|
||||||
// ========================================
|
SetStatusCommand.registerOn(programInstance);
|
||||||
// Use the centralized command registry to register all CLI commands
|
|
||||||
// This replaces individual command registrations and reduces duplication
|
// NEW: Register the new list command from @tm/cli
|
||||||
registerAllCommands(programInstance);
|
// This command handles all its own configuration and logic
|
||||||
|
ListTasksCommand.registerOn(programInstance);
|
||||||
|
|
||||||
|
// Register the auth command from @tm/cli
|
||||||
|
// Handles authentication with tryhamster.com
|
||||||
|
AuthCommand.registerOn(programInstance);
|
||||||
|
|
||||||
|
// Register the context command from @tm/cli
|
||||||
|
// Manages workspace context (org/brief selection)
|
||||||
|
ContextCommand.registerOn(programInstance);
|
||||||
|
|
||||||
|
// Register the show command from @tm/cli
|
||||||
|
// Displays detailed information about tasks
|
||||||
|
ShowCommand.registerOn(programInstance);
|
||||||
|
|
||||||
|
// Register the start command from @tm/cli
|
||||||
|
// Starts working on a task by launching claude-code with a standardized prompt
|
||||||
|
StartCommand.registerOn(programInstance);
|
||||||
|
|
||||||
// expand command
|
// expand command
|
||||||
programInstance
|
programInstance
|
||||||
@@ -3549,10 +3586,6 @@ ${result.result}
|
|||||||
'--gemini-cli',
|
'--gemini-cli',
|
||||||
'Allow setting a Gemini CLI model ID (use with --set-*)'
|
'Allow setting a Gemini CLI model ID (use with --set-*)'
|
||||||
)
|
)
|
||||||
.option(
|
|
||||||
'--codex-cli',
|
|
||||||
'Allow setting a Codex CLI model ID (use with --set-*)'
|
|
||||||
)
|
|
||||||
.addHelpText(
|
.addHelpText(
|
||||||
'after',
|
'after',
|
||||||
`
|
`
|
||||||
@@ -3568,7 +3601,6 @@ Examples:
|
|||||||
$ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role
|
$ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role
|
||||||
$ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role
|
$ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role
|
||||||
$ task-master models --set-main gemini-2.5-pro --gemini-cli # Set Gemini CLI model for main role
|
$ task-master models --set-main gemini-2.5-pro --gemini-cli # Set Gemini CLI model for main role
|
||||||
$ task-master models --set-main gpt-5-codex --codex-cli # Set Codex CLI model for main role
|
|
||||||
$ task-master models --setup # Run interactive setup`
|
$ task-master models --setup # Run interactive setup`
|
||||||
)
|
)
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
@@ -3585,13 +3617,12 @@ Examples:
|
|||||||
options.ollama,
|
options.ollama,
|
||||||
options.bedrock,
|
options.bedrock,
|
||||||
options.claudeCode,
|
options.claudeCode,
|
||||||
options.geminiCli,
|
options.geminiCli
|
||||||
options.codexCli
|
|
||||||
].filter(Boolean).length;
|
].filter(Boolean).length;
|
||||||
if (providerFlags > 1) {
|
if (providerFlags > 1) {
|
||||||
console.error(
|
console.error(
|
||||||
chalk.red(
|
chalk.red(
|
||||||
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock, --claude-code, --gemini-cli, --codex-cli) simultaneously.'
|
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock, --claude-code, --gemini-cli) simultaneously.'
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
@@ -3637,9 +3668,7 @@ Examples:
|
|||||||
? 'claude-code'
|
? 'claude-code'
|
||||||
: options.geminiCli
|
: options.geminiCli
|
||||||
? 'gemini-cli'
|
? 'gemini-cli'
|
||||||
: options.codexCli
|
: undefined
|
||||||
? 'codex-cli'
|
|
||||||
: undefined
|
|
||||||
});
|
});
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||||
@@ -3665,9 +3694,7 @@ Examples:
|
|||||||
? 'claude-code'
|
? 'claude-code'
|
||||||
: options.geminiCli
|
: options.geminiCli
|
||||||
? 'gemini-cli'
|
? 'gemini-cli'
|
||||||
: options.codexCli
|
: undefined
|
||||||
? 'codex-cli'
|
|
||||||
: undefined
|
|
||||||
});
|
});
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||||
@@ -3695,9 +3722,7 @@ Examples:
|
|||||||
? 'claude-code'
|
? 'claude-code'
|
||||||
: options.geminiCli
|
: options.geminiCli
|
||||||
? 'gemini-cli'
|
? 'gemini-cli'
|
||||||
: options.codexCli
|
: undefined
|
||||||
? 'codex-cli'
|
|
||||||
: undefined
|
|
||||||
});
|
});
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||||
|
|||||||
@@ -58,7 +58,6 @@ const DEFAULTS = {
|
|||||||
enableCodebaseAnalysis: true
|
enableCodebaseAnalysis: true
|
||||||
},
|
},
|
||||||
claudeCode: {},
|
claudeCode: {},
|
||||||
codexCli: {},
|
|
||||||
grokCli: {
|
grokCli: {
|
||||||
timeout: 120000,
|
timeout: 120000,
|
||||||
workingDirectory: null,
|
workingDirectory: null,
|
||||||
@@ -139,7 +138,6 @@ function _loadAndValidateConfig(explicitRoot = null) {
|
|||||||
},
|
},
|
||||||
global: { ...defaults.global, ...parsedConfig?.global },
|
global: { ...defaults.global, ...parsedConfig?.global },
|
||||||
claudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode },
|
claudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode },
|
||||||
codexCli: { ...defaults.codexCli, ...parsedConfig?.codexCli },
|
|
||||||
grokCli: { ...defaults.grokCli, ...parsedConfig?.grokCli }
|
grokCli: { ...defaults.grokCli, ...parsedConfig?.grokCli }
|
||||||
};
|
};
|
||||||
configSource = `file (${configPath})`; // Update source info
|
configSource = `file (${configPath})`; // Update source info
|
||||||
@@ -186,9 +184,6 @@ function _loadAndValidateConfig(explicitRoot = null) {
|
|||||||
if (config.claudeCode && !isEmpty(config.claudeCode)) {
|
if (config.claudeCode && !isEmpty(config.claudeCode)) {
|
||||||
config.claudeCode = validateClaudeCodeSettings(config.claudeCode);
|
config.claudeCode = validateClaudeCodeSettings(config.claudeCode);
|
||||||
}
|
}
|
||||||
if (config.codexCli && !isEmpty(config.codexCli)) {
|
|
||||||
config.codexCli = validateCodexCliSettings(config.codexCli);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Use console.error for actual errors during parsing
|
// Use console.error for actual errors during parsing
|
||||||
console.error(
|
console.error(
|
||||||
@@ -371,57 +366,6 @@ function validateClaudeCodeSettings(settings) {
|
|||||||
return validatedSettings;
|
return validatedSettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Validates Codex CLI provider custom settings
|
|
||||||
* Mirrors the ai-sdk-provider-codex-cli options
|
|
||||||
* @param {object} settings The settings to validate
|
|
||||||
* @returns {object} The validated settings
|
|
||||||
*/
|
|
||||||
function validateCodexCliSettings(settings) {
|
|
||||||
const BaseSettingsSchema = z.object({
|
|
||||||
codexPath: z.string().optional(),
|
|
||||||
cwd: z.string().optional(),
|
|
||||||
approvalMode: z
|
|
||||||
.enum(['untrusted', 'on-failure', 'on-request', 'never'])
|
|
||||||
.optional(),
|
|
||||||
sandboxMode: z
|
|
||||||
.enum(['read-only', 'workspace-write', 'danger-full-access'])
|
|
||||||
.optional(),
|
|
||||||
fullAuto: z.boolean().optional(),
|
|
||||||
dangerouslyBypassApprovalsAndSandbox: z.boolean().optional(),
|
|
||||||
skipGitRepoCheck: z.boolean().optional(),
|
|
||||||
color: z.enum(['always', 'never', 'auto']).optional(),
|
|
||||||
allowNpx: z.boolean().optional(),
|
|
||||||
outputLastMessageFile: z.string().optional(),
|
|
||||||
env: z.record(z.string(), z.string()).optional(),
|
|
||||||
verbose: z.boolean().optional(),
|
|
||||||
logger: z.union([z.object({}).passthrough(), z.literal(false)]).optional()
|
|
||||||
});
|
|
||||||
|
|
||||||
const CommandSpecificSchema = z
|
|
||||||
.record(z.string(), BaseSettingsSchema)
|
|
||||||
.refine(
|
|
||||||
(obj) =>
|
|
||||||
Object.keys(obj || {}).every((k) => AI_COMMAND_NAMES.includes(k)),
|
|
||||||
{ message: 'Invalid command name in commandSpecific' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const SettingsSchema = BaseSettingsSchema.extend({
|
|
||||||
commandSpecific: CommandSpecificSchema.optional()
|
|
||||||
});
|
|
||||||
|
|
||||||
try {
|
|
||||||
return SettingsSchema.parse(settings);
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(
|
|
||||||
chalk.yellow(
|
|
||||||
`Warning: Invalid Codex CLI settings in config: ${error.message}. Falling back to default.`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Claude Code Settings Getters ---
|
// --- Claude Code Settings Getters ---
|
||||||
|
|
||||||
function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
|
function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
|
||||||
@@ -430,23 +374,6 @@ function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
|
|||||||
return { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };
|
return { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Codex CLI Settings Getters ---
|
|
||||||
|
|
||||||
function getCodexCliSettings(explicitRoot = null, forceReload = false) {
|
|
||||||
const config = getConfig(explicitRoot, forceReload);
|
|
||||||
return { ...DEFAULTS.codexCli, ...(config?.codexCli || {}) };
|
|
||||||
}
|
|
||||||
|
|
||||||
function getCodexCliSettingsForCommand(
|
|
||||||
commandName,
|
|
||||||
explicitRoot = null,
|
|
||||||
forceReload = false
|
|
||||||
) {
|
|
||||||
const settings = getCodexCliSettings(explicitRoot, forceReload);
|
|
||||||
const commandSpecific = settings?.commandSpecific || {};
|
|
||||||
return { ...settings, ...commandSpecific[commandName] };
|
|
||||||
}
|
|
||||||
|
|
||||||
function getClaudeCodeSettingsForCommand(
|
function getClaudeCodeSettingsForCommand(
|
||||||
commandName,
|
commandName,
|
||||||
explicitRoot = null,
|
explicitRoot = null,
|
||||||
@@ -564,8 +491,7 @@ function hasCodebaseAnalysis(
|
|||||||
return (
|
return (
|
||||||
currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE ||
|
currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE ||
|
||||||
currentProvider === CUSTOM_PROVIDERS.GEMINI_CLI ||
|
currentProvider === CUSTOM_PROVIDERS.GEMINI_CLI ||
|
||||||
currentProvider === CUSTOM_PROVIDERS.GROK_CLI ||
|
currentProvider === CUSTOM_PROVIDERS.GROK_CLI
|
||||||
currentProvider === CUSTOM_PROVIDERS.CODEX_CLI
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -795,8 +721,7 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
|
|||||||
CUSTOM_PROVIDERS.BEDROCK,
|
CUSTOM_PROVIDERS.BEDROCK,
|
||||||
CUSTOM_PROVIDERS.MCP,
|
CUSTOM_PROVIDERS.MCP,
|
||||||
CUSTOM_PROVIDERS.GEMINI_CLI,
|
CUSTOM_PROVIDERS.GEMINI_CLI,
|
||||||
CUSTOM_PROVIDERS.GROK_CLI,
|
CUSTOM_PROVIDERS.GROK_CLI
|
||||||
CUSTOM_PROVIDERS.CODEX_CLI
|
|
||||||
];
|
];
|
||||||
|
|
||||||
if (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {
|
if (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {
|
||||||
@@ -808,11 +733,6 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
|
|||||||
return true; // No API key needed
|
return true; // No API key needed
|
||||||
}
|
}
|
||||||
|
|
||||||
// Codex CLI supports OAuth via codex login; API key optional
|
|
||||||
if (providerName?.toLowerCase() === 'codex-cli') {
|
|
||||||
return true; // Treat as OK even without key
|
|
||||||
}
|
|
||||||
|
|
||||||
const keyMap = {
|
const keyMap = {
|
||||||
openai: 'OPENAI_API_KEY',
|
openai: 'OPENAI_API_KEY',
|
||||||
anthropic: 'ANTHROPIC_API_KEY',
|
anthropic: 'ANTHROPIC_API_KEY',
|
||||||
@@ -916,8 +836,6 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
|
|||||||
return true; // No key needed
|
return true; // No key needed
|
||||||
case 'claude-code':
|
case 'claude-code':
|
||||||
return true; // No key needed
|
return true; // No key needed
|
||||||
case 'codex-cli':
|
|
||||||
return true; // OAuth/subscription via Codex CLI
|
|
||||||
case 'mistral':
|
case 'mistral':
|
||||||
apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
|
apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
|
||||||
placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
|
placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
|
||||||
@@ -1110,8 +1028,7 @@ export const providersWithoutApiKeys = [
|
|||||||
CUSTOM_PROVIDERS.BEDROCK,
|
CUSTOM_PROVIDERS.BEDROCK,
|
||||||
CUSTOM_PROVIDERS.GEMINI_CLI,
|
CUSTOM_PROVIDERS.GEMINI_CLI,
|
||||||
CUSTOM_PROVIDERS.GROK_CLI,
|
CUSTOM_PROVIDERS.GROK_CLI,
|
||||||
CUSTOM_PROVIDERS.MCP,
|
CUSTOM_PROVIDERS.MCP
|
||||||
CUSTOM_PROVIDERS.CODEX_CLI
|
|
||||||
];
|
];
|
||||||
|
|
||||||
export {
|
export {
|
||||||
@@ -1123,9 +1040,6 @@ export {
|
|||||||
// Claude Code settings
|
// Claude Code settings
|
||||||
getClaudeCodeSettings,
|
getClaudeCodeSettings,
|
||||||
getClaudeCodeSettingsForCommand,
|
getClaudeCodeSettingsForCommand,
|
||||||
// Codex CLI settings
|
|
||||||
getCodexCliSettings,
|
|
||||||
getCodexCliSettingsForCommand,
|
|
||||||
// Grok CLI settings
|
// Grok CLI settings
|
||||||
getGrokCliSettings,
|
getGrokCliSettings,
|
||||||
getGrokCliSettingsForCommand,
|
getGrokCliSettingsForCommand,
|
||||||
@@ -1133,7 +1047,6 @@ export {
|
|||||||
validateProvider,
|
validateProvider,
|
||||||
validateProviderModelCombination,
|
validateProviderModelCombination,
|
||||||
validateClaudeCodeSettings,
|
validateClaudeCodeSettings,
|
||||||
validateCodexCliSettings,
|
|
||||||
VALIDATED_PROVIDERS,
|
VALIDATED_PROVIDERS,
|
||||||
CUSTOM_PROVIDERS,
|
CUSTOM_PROVIDERS,
|
||||||
ALL_PROVIDERS,
|
ALL_PROVIDERS,
|
||||||
|
|||||||
@@ -69,30 +69,6 @@
|
|||||||
"supported": true
|
"supported": true
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"codex-cli": [
|
|
||||||
{
|
|
||||||
"id": "gpt-5",
|
|
||||||
"swe_score": 0.749,
|
|
||||||
"cost_per_1m_tokens": {
|
|
||||||
"input": 0,
|
|
||||||
"output": 0
|
|
||||||
},
|
|
||||||
"allowed_roles": ["main", "fallback", "research"],
|
|
||||||
"max_tokens": 128000,
|
|
||||||
"supported": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "gpt-5-codex",
|
|
||||||
"swe_score": 0.749,
|
|
||||||
"cost_per_1m_tokens": {
|
|
||||||
"input": 0,
|
|
||||||
"output": 0
|
|
||||||
},
|
|
||||||
"allowed_roles": ["main", "fallback", "research"],
|
|
||||||
"max_tokens": 128000,
|
|
||||||
"supported": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"mcp": [
|
"mcp": [
|
||||||
{
|
{
|
||||||
"id": "mcp-sampling",
|
"id": "mcp-sampling",
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
|
import path from 'path';
|
||||||
|
|
||||||
import { log, readJSON, writeJSON, getCurrentTag } from '../utils.js';
|
import { log, readJSON, writeJSON, getCurrentTag } from '../utils.js';
|
||||||
import { isTaskDependentOn } from '../task-manager.js';
|
import { isTaskDependentOn } from '../task-manager.js';
|
||||||
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a subtask to a parent task
|
* Add a subtask to a parent task
|
||||||
@@ -139,7 +142,11 @@ async function addSubtask(
|
|||||||
// Write the updated tasks back to the file with proper context
|
// Write the updated tasks back to the file with proper context
|
||||||
writeJSON(tasksPath, data, projectRoot, tag);
|
writeJSON(tasksPath, data, projectRoot, tag);
|
||||||
|
|
||||||
// Note: Task file generation is no longer supported and has been removed
|
// Generate task files if requested
|
||||||
|
if (generateFiles) {
|
||||||
|
log('info', 'Regenerating task files...');
|
||||||
|
await generateTaskFiles(tasksPath, path.dirname(tasksPath), context);
|
||||||
|
}
|
||||||
|
|
||||||
return newSubtask;
|
return newSubtask;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -539,22 +539,6 @@ async function setModel(role, modelId, options = {}) {
|
|||||||
warningMessage = `Warning: Gemini CLI model '${modelId}' not found in supported models. Setting without validation.`;
|
warningMessage = `Warning: Gemini CLI model '${modelId}' not found in supported models. Setting without validation.`;
|
||||||
report('warn', warningMessage);
|
report('warn', warningMessage);
|
||||||
}
|
}
|
||||||
} else if (providerHint === CUSTOM_PROVIDERS.CODEX_CLI) {
|
|
||||||
// Codex CLI provider - enforce supported model list
|
|
||||||
determinedProvider = CUSTOM_PROVIDERS.CODEX_CLI;
|
|
||||||
const codexCliModels = availableModels.filter(
|
|
||||||
(m) => m.provider === 'codex-cli'
|
|
||||||
);
|
|
||||||
const codexCliModelData = codexCliModels.find(
|
|
||||||
(m) => m.id === modelId
|
|
||||||
);
|
|
||||||
if (codexCliModelData) {
|
|
||||||
modelData = codexCliModelData;
|
|
||||||
report('info', `Setting Codex CLI model '${modelId}'.`);
|
|
||||||
} else {
|
|
||||||
warningMessage = `Warning: Codex CLI model '${modelId}' not found in supported models. Setting without validation.`;
|
|
||||||
report('warn', warningMessage);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Invalid provider hint - should not happen with our constants
|
// Invalid provider hint - should not happen with our constants
|
||||||
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
||||||
@@ -575,7 +559,7 @@ async function setModel(role, modelId, options = {}) {
|
|||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'MODEL_NOT_FOUND_NO_HINT',
|
code: 'MODEL_NOT_FOUND_NO_HINT',
|
||||||
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, --vertex, --gemini-cli, or --codex-cli.`
|
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import {
|
|||||||
setTasksForTag,
|
setTasksForTag,
|
||||||
traverseDependencies
|
traverseDependencies
|
||||||
} from '../utils.js';
|
} from '../utils.js';
|
||||||
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
import {
|
import {
|
||||||
findCrossTagDependencies,
|
findCrossTagDependencies,
|
||||||
getDependentTaskIds,
|
getDependentTaskIds,
|
||||||
@@ -141,7 +142,13 @@ async function moveTask(
|
|||||||
results.push(result);
|
results.push(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: Task file generation is no longer supported and has been removed
|
// Generate files once at the end if requested
|
||||||
|
if (generateFiles) {
|
||||||
|
await generateTaskFiles(tasksPath, path.dirname(tasksPath), {
|
||||||
|
tag: tag,
|
||||||
|
projectRoot: projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
message: `Successfully moved ${sourceIds.length} tasks/subtasks`,
|
message: `Successfully moved ${sourceIds.length} tasks/subtasks`,
|
||||||
@@ -202,7 +209,12 @@ async function moveTask(
|
|||||||
// The writeJSON function will filter out _rawTaggedData automatically
|
// The writeJSON function will filter out _rawTaggedData automatically
|
||||||
writeJSON(tasksPath, rawData, options.projectRoot, tag);
|
writeJSON(tasksPath, rawData, options.projectRoot, tag);
|
||||||
|
|
||||||
// Note: Task file generation is no longer supported and has been removed
|
if (generateFiles) {
|
||||||
|
await generateTaskFiles(tasksPath, path.dirname(tasksPath), {
|
||||||
|
tag: tag,
|
||||||
|
projectRoot: projectRoot
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
|
import path from 'path';
|
||||||
import { log, readJSON, writeJSON } from '../utils.js';
|
import { log, readJSON, writeJSON } from '../utils.js';
|
||||||
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove a subtask from its parent task
|
* Remove a subtask from its parent task
|
||||||
@@ -106,7 +108,11 @@ async function removeSubtask(
|
|||||||
// Write the updated tasks back to the file with proper context
|
// Write the updated tasks back to the file with proper context
|
||||||
writeJSON(tasksPath, data, projectRoot, tag);
|
writeJSON(tasksPath, data, projectRoot, tag);
|
||||||
|
|
||||||
// Note: Task file generation is no longer supported and has been removed
|
// Generate task files if requested
|
||||||
|
if (generateFiles) {
|
||||||
|
log('info', 'Regenerating task files...');
|
||||||
|
await generateTaskFiles(tasksPath, path.dirname(tasksPath), context);
|
||||||
|
}
|
||||||
|
|
||||||
return convertedTask;
|
return convertedTask;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -619,29 +619,9 @@ async function tags(
|
|||||||
headers.push(chalk.cyan.bold('Description'));
|
headers.push(chalk.cyan.bold('Description'));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate dynamic column widths based on terminal width
|
|
||||||
const terminalWidth = Math.max(process.stdout.columns || 120, 80);
|
|
||||||
const usableWidth = Math.floor(terminalWidth * 0.95);
|
|
||||||
|
|
||||||
let colWidths;
|
|
||||||
if (showMetadata) {
|
|
||||||
// With metadata: Tag Name, Tasks, Completed, Created, Description
|
|
||||||
const widths = [0.25, 0.1, 0.12, 0.15, 0.38];
|
|
||||||
colWidths = widths.map((w, i) =>
|
|
||||||
Math.max(Math.floor(usableWidth * w), i === 0 ? 15 : 8)
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
// Without metadata: Tag Name, Tasks, Completed
|
|
||||||
const widths = [0.7, 0.15, 0.15];
|
|
||||||
colWidths = widths.map((w, i) =>
|
|
||||||
Math.max(Math.floor(usableWidth * w), i === 0 ? 20 : 10)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const table = new Table({
|
const table = new Table({
|
||||||
head: headers,
|
head: headers,
|
||||||
colWidths: colWidths,
|
colWidths: showMetadata ? [20, 10, 12, 15, 50] : [25, 10, 12]
|
||||||
wordWrap: true
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add rows
|
// Add rows
|
||||||
|
|||||||
@@ -2310,8 +2310,7 @@ function displayAiUsageSummary(telemetryData, outputType = 'cli') {
|
|||||||
outputTokens,
|
outputTokens,
|
||||||
totalTokens,
|
totalTokens,
|
||||||
totalCost,
|
totalCost,
|
||||||
commandName,
|
commandName
|
||||||
isUnknownCost
|
|
||||||
} = telemetryData;
|
} = telemetryData;
|
||||||
|
|
||||||
let summary = chalk.bold.blue('AI Usage Summary:') + '\n';
|
let summary = chalk.bold.blue('AI Usage Summary:') + '\n';
|
||||||
@@ -2321,10 +2320,7 @@ function displayAiUsageSummary(telemetryData, outputType = 'cli') {
|
|||||||
summary += chalk.gray(
|
summary += chalk.gray(
|
||||||
` Tokens: ${totalTokens} (Input: ${inputTokens}, Output: ${outputTokens})\n`
|
` Tokens: ${totalTokens} (Input: ${inputTokens}, Output: ${outputTokens})\n`
|
||||||
);
|
);
|
||||||
|
summary += chalk.gray(` Est. Cost: $${totalCost.toFixed(6)}`);
|
||||||
// Show "Unknown" if pricing data is not available, otherwise show the cost
|
|
||||||
const costDisplay = isUnknownCost ? 'Unknown' : `$${totalCost.toFixed(6)}`;
|
|
||||||
summary += chalk.gray(` Est. Cost: ${costDisplay}`);
|
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
boxen(summary, {
|
boxen(summary, {
|
||||||
|
|||||||
@@ -28,13 +28,6 @@ export class BaseAIProvider {
|
|||||||
* @type {boolean}
|
* @type {boolean}
|
||||||
*/
|
*/
|
||||||
this.needsExplicitJsonSchema = false;
|
this.needsExplicitJsonSchema = false;
|
||||||
|
|
||||||
/**
|
|
||||||
* Whether this provider supports temperature parameter
|
|
||||||
* Can be overridden by subclasses
|
|
||||||
* @type {boolean}
|
|
||||||
*/
|
|
||||||
this.supportsTemperature = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -175,9 +168,7 @@ export class BaseAIProvider {
|
|||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
||||||
...(this.supportsTemperature && params.temperature !== undefined
|
temperature: params.temperature
|
||||||
? { temperature: params.temperature }
|
|
||||||
: {})
|
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -185,19 +176,12 @@ export class BaseAIProvider {
|
|||||||
`${this.name} generateText completed successfully for model: ${params.modelId}`
|
`${this.name} generateText completed successfully for model: ${params.modelId}`
|
||||||
);
|
);
|
||||||
|
|
||||||
const inputTokens =
|
|
||||||
result.usage?.inputTokens ?? result.usage?.promptTokens ?? 0;
|
|
||||||
const outputTokens =
|
|
||||||
result.usage?.outputTokens ?? result.usage?.completionTokens ?? 0;
|
|
||||||
const totalTokens =
|
|
||||||
result.usage?.totalTokens ?? inputTokens + outputTokens;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
text: result.text,
|
text: result.text,
|
||||||
usage: {
|
usage: {
|
||||||
inputTokens,
|
inputTokens: result.usage?.promptTokens,
|
||||||
outputTokens,
|
outputTokens: result.usage?.completionTokens,
|
||||||
totalTokens
|
totalTokens: result.usage?.totalTokens
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -220,9 +204,7 @@ export class BaseAIProvider {
|
|||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
||||||
...(this.supportsTemperature && params.temperature !== undefined
|
temperature: params.temperature
|
||||||
? { temperature: params.temperature }
|
|
||||||
: {})
|
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -260,9 +242,7 @@ export class BaseAIProvider {
|
|||||||
schema: zodSchema(params.schema),
|
schema: zodSchema(params.schema),
|
||||||
mode: params.mode || 'auto',
|
mode: params.mode || 'auto',
|
||||||
maxOutputTokens: params.maxTokens,
|
maxOutputTokens: params.maxTokens,
|
||||||
...(this.supportsTemperature && params.temperature !== undefined
|
temperature: params.temperature
|
||||||
? { temperature: params.temperature }
|
|
||||||
: {})
|
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -308,9 +288,7 @@ export class BaseAIProvider {
|
|||||||
schemaName: params.objectName,
|
schemaName: params.objectName,
|
||||||
schemaDescription: `Generate a valid JSON object for ${params.objectName}`,
|
schemaDescription: `Generate a valid JSON object for ${params.objectName}`,
|
||||||
maxTokens: params.maxTokens,
|
maxTokens: params.maxTokens,
|
||||||
...(this.supportsTemperature && params.temperature !== undefined
|
temperature: params.temperature
|
||||||
? { temperature: params.temperature }
|
|
||||||
: {})
|
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -318,19 +296,12 @@ export class BaseAIProvider {
|
|||||||
`${this.name} generateObject completed successfully for model: ${params.modelId}`
|
`${this.name} generateObject completed successfully for model: ${params.modelId}`
|
||||||
);
|
);
|
||||||
|
|
||||||
const inputTokens =
|
|
||||||
result.usage?.inputTokens ?? result.usage?.promptTokens ?? 0;
|
|
||||||
const outputTokens =
|
|
||||||
result.usage?.outputTokens ?? result.usage?.completionTokens ?? 0;
|
|
||||||
const totalTokens =
|
|
||||||
result.usage?.totalTokens ?? inputTokens + outputTokens;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
object: result.object,
|
object: result.object,
|
||||||
usage: {
|
usage: {
|
||||||
inputTokens,
|
inputTokens: result.usage?.promptTokens,
|
||||||
outputTokens,
|
outputTokens: result.usage?.completionTokens,
|
||||||
totalTokens
|
totalTokens: result.usage?.totalTokens
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -34,8 +34,6 @@ export class ClaudeCodeProvider extends BaseAIProvider {
|
|||||||
this.supportedModels = ['sonnet', 'opus'];
|
this.supportedModels = ['sonnet', 'opus'];
|
||||||
// Claude Code requires explicit JSON schema mode
|
// Claude Code requires explicit JSON schema mode
|
||||||
this.needsExplicitJsonSchema = true;
|
this.needsExplicitJsonSchema = true;
|
||||||
// Claude Code does not support temperature parameter
|
|
||||||
this.supportsTemperature = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -1,106 +0,0 @@
|
|||||||
/**
|
|
||||||
* src/ai-providers/codex-cli.js
|
|
||||||
*
|
|
||||||
* Codex CLI provider implementation using the ai-sdk-provider-codex-cli package.
|
|
||||||
* This provider uses the local OpenAI Codex CLI with OAuth (preferred) or
|
|
||||||
* an optional OPENAI_CODEX_API_KEY if provided.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { createCodexCli } from 'ai-sdk-provider-codex-cli';
|
|
||||||
import { BaseAIProvider } from './base-provider.js';
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import { log } from '../../scripts/modules/utils.js';
|
|
||||||
import { getCodexCliSettingsForCommand } from '../../scripts/modules/config-manager.js';
|
|
||||||
|
|
||||||
export class CodexCliProvider extends BaseAIProvider {
|
|
||||||
constructor() {
|
|
||||||
super();
|
|
||||||
this.name = 'Codex CLI';
|
|
||||||
// Codex CLI has native schema support, no explicit JSON schema mode required
|
|
||||||
this.needsExplicitJsonSchema = false;
|
|
||||||
// Codex CLI does not support temperature parameter
|
|
||||||
this.supportsTemperature = false;
|
|
||||||
// Restrict to supported models for OAuth subscription usage
|
|
||||||
this.supportedModels = ['gpt-5', 'gpt-5-codex'];
|
|
||||||
// CLI availability check cache
|
|
||||||
this._codexCliChecked = false;
|
|
||||||
this._codexCliAvailable = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Codex CLI does not require an API key when using OAuth via `codex login`.
|
|
||||||
* @returns {boolean}
|
|
||||||
*/
|
|
||||||
isRequiredApiKey() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the environment variable name used when an API key is provided.
|
|
||||||
* Even though the API key is optional for Codex CLI (OAuth-first),
|
|
||||||
* downstream resolution expects a non-throwing implementation.
|
|
||||||
* Uses OPENAI_CODEX_API_KEY to avoid conflicts with OpenAI provider.
|
|
||||||
* @returns {string}
|
|
||||||
*/
|
|
||||||
getRequiredApiKeyName() {
|
|
||||||
return 'OPENAI_CODEX_API_KEY';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Optional CLI availability check; provide helpful guidance if missing.
|
|
||||||
*/
|
|
||||||
validateAuth() {
|
|
||||||
if (process.env.NODE_ENV === 'test') return;
|
|
||||||
|
|
||||||
if (!this._codexCliChecked) {
|
|
||||||
try {
|
|
||||||
execSync('codex --version', { stdio: 'pipe', timeout: 1000 });
|
|
||||||
this._codexCliAvailable = true;
|
|
||||||
} catch (error) {
|
|
||||||
this._codexCliAvailable = false;
|
|
||||||
log(
|
|
||||||
'warn',
|
|
||||||
'Codex CLI not detected. Install with: npm i -g @openai/codex or enable fallback with allowNpx.'
|
|
||||||
);
|
|
||||||
} finally {
|
|
||||||
this._codexCliChecked = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a Codex CLI client instance
|
|
||||||
* @param {object} params
|
|
||||||
* @param {string} [params.commandName] - Command name for settings lookup
|
|
||||||
* @param {string} [params.apiKey] - Optional API key (injected as OPENAI_API_KEY for Codex CLI)
|
|
||||||
* @returns {Function}
|
|
||||||
*/
|
|
||||||
getClient(params = {}) {
|
|
||||||
try {
|
|
||||||
// Merge global + command-specific settings from config
|
|
||||||
const settings = getCodexCliSettingsForCommand(params.commandName) || {};
|
|
||||||
|
|
||||||
// Inject API key only if explicitly provided; OAuth is the primary path
|
|
||||||
const defaultSettings = {
|
|
||||||
...settings,
|
|
||||||
...(params.apiKey
|
|
||||||
? { env: { ...(settings.env || {}), OPENAI_API_KEY: params.apiKey } }
|
|
||||||
: {})
|
|
||||||
};
|
|
||||||
|
|
||||||
return createCodexCli({ defaultSettings });
|
|
||||||
} catch (error) {
|
|
||||||
const msg = String(error?.message || '');
|
|
||||||
const code = error?.code;
|
|
||||||
if (code === 'ENOENT' || /codex/i.test(msg)) {
|
|
||||||
const enhancedError = new Error(
|
|
||||||
`Codex CLI not available. Please install Codex CLI first. Original error: ${error.message}`
|
|
||||||
);
|
|
||||||
enhancedError.cause = error;
|
|
||||||
this.handleError('Codex CLI initialization', enhancedError);
|
|
||||||
} else {
|
|
||||||
this.handleError('client initialization', error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -17,8 +17,6 @@ export class GeminiCliProvider extends BaseAIProvider {
|
|||||||
this.name = 'Gemini CLI';
|
this.name = 'Gemini CLI';
|
||||||
// Gemini CLI requires explicit JSON schema mode
|
// Gemini CLI requires explicit JSON schema mode
|
||||||
this.needsExplicitJsonSchema = true;
|
this.needsExplicitJsonSchema = true;
|
||||||
// Gemini CLI does not support temperature parameter
|
|
||||||
this.supportsTemperature = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -13,8 +13,6 @@ export class GrokCliProvider extends BaseAIProvider {
|
|||||||
this.name = 'Grok CLI';
|
this.name = 'Grok CLI';
|
||||||
// Grok CLI requires explicit JSON schema mode
|
// Grok CLI requires explicit JSON schema mode
|
||||||
this.needsExplicitJsonSchema = true;
|
this.needsExplicitJsonSchema = true;
|
||||||
// Grok CLI does not support temperature parameter
|
|
||||||
this.supportsTemperature = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -17,4 +17,3 @@ export { VertexAIProvider } from './google-vertex.js';
|
|||||||
export { ClaudeCodeProvider } from './claude-code.js';
|
export { ClaudeCodeProvider } from './claude-code.js';
|
||||||
export { GeminiCliProvider } from './gemini-cli.js';
|
export { GeminiCliProvider } from './gemini-cli.js';
|
||||||
export { GrokCliProvider } from './grok-cli.js';
|
export { GrokCliProvider } from './grok-cli.js';
|
||||||
export { CodexCliProvider } from './codex-cli.js';
|
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ export const CUSTOM_PROVIDERS = {
|
|||||||
CLAUDE_CODE: 'claude-code',
|
CLAUDE_CODE: 'claude-code',
|
||||||
MCP: 'mcp',
|
MCP: 'mcp',
|
||||||
GEMINI_CLI: 'gemini-cli',
|
GEMINI_CLI: 'gemini-cli',
|
||||||
GROK_CLI: 'grok-cli',
|
GROK_CLI: 'grok-cli'
|
||||||
CODEX_CLI: 'codex-cli'
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Custom providers array (for backward compatibility and iteration)
|
// Custom providers array (for backward compatibility and iteration)
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
/**
|
|
||||||
* Integration Tests for Provider Temperature Support
|
|
||||||
*
|
|
||||||
* This test suite verifies that all providers correctly declare their
|
|
||||||
* temperature support capabilities. CLI providers should have
|
|
||||||
* supportsTemperature = false, while standard API providers should
|
|
||||||
* have supportsTemperature = true.
|
|
||||||
*
|
|
||||||
* These tests are separated from unit tests to avoid coupling
|
|
||||||
* base provider tests with concrete provider implementations.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { ClaudeCodeProvider } from '../../../src/ai-providers/claude-code.js';
|
|
||||||
import { CodexCliProvider } from '../../../src/ai-providers/codex-cli.js';
|
|
||||||
import { GeminiCliProvider } from '../../../src/ai-providers/gemini-cli.js';
|
|
||||||
import { GrokCliProvider } from '../../../src/ai-providers/grok-cli.js';
|
|
||||||
import { AnthropicAIProvider } from '../../../src/ai-providers/anthropic.js';
|
|
||||||
import { OpenAIProvider } from '../../../src/ai-providers/openai.js';
|
|
||||||
import { GoogleAIProvider } from '../../../src/ai-providers/google.js';
|
|
||||||
import { PerplexityAIProvider } from '../../../src/ai-providers/perplexity.js';
|
|
||||||
import { XAIProvider } from '../../../src/ai-providers/xai.js';
|
|
||||||
import { GroqProvider } from '../../../src/ai-providers/groq.js';
|
|
||||||
import { OpenRouterAIProvider } from '../../../src/ai-providers/openrouter.js';
|
|
||||||
import { OllamaAIProvider } from '../../../src/ai-providers/ollama.js';
|
|
||||||
import { BedrockAIProvider } from '../../../src/ai-providers/bedrock.js';
|
|
||||||
import { AzureProvider } from '../../../src/ai-providers/azure.js';
|
|
||||||
import { VertexAIProvider } from '../../../src/ai-providers/google-vertex.js';
|
|
||||||
|
|
||||||
describe('Provider Temperature Support', () => {
|
|
||||||
describe('CLI Providers', () => {
|
|
||||||
it('should verify CLI providers have supportsTemperature = false', () => {
|
|
||||||
expect(new ClaudeCodeProvider().supportsTemperature).toBe(false);
|
|
||||||
expect(new CodexCliProvider().supportsTemperature).toBe(false);
|
|
||||||
expect(new GeminiCliProvider().supportsTemperature).toBe(false);
|
|
||||||
expect(new GrokCliProvider().supportsTemperature).toBe(false);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Standard API Providers', () => {
|
|
||||||
it('should verify standard providers have supportsTemperature = true', () => {
|
|
||||||
expect(new AnthropicAIProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new OpenAIProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new GoogleAIProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new PerplexityAIProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new XAIProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new GroqProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new OpenRouterAIProvider().supportsTemperature).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Special Case Providers', () => {
|
|
||||||
it('should verify Ollama provider has supportsTemperature = true', () => {
|
|
||||||
expect(new OllamaAIProvider().supportsTemperature).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should verify cloud providers have supportsTemperature = true', () => {
|
|
||||||
expect(new BedrockAIProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new AzureProvider().supportsTemperature).toBe(true);
|
|
||||||
expect(new VertexAIProvider().supportsTemperature).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,669 +0,0 @@
|
|||||||
import { jest } from '@jest/globals';
|
|
||||||
|
|
||||||
// Mock the 'ai' SDK
|
|
||||||
const mockGenerateText = jest.fn();
|
|
||||||
const mockGenerateObject = jest.fn();
|
|
||||||
const mockNoObjectGeneratedError = class NoObjectGeneratedError extends Error {
|
|
||||||
static isInstance(error) {
|
|
||||||
return error instanceof mockNoObjectGeneratedError;
|
|
||||||
}
|
|
||||||
constructor(cause) {
|
|
||||||
super('No object generated');
|
|
||||||
this.cause = cause;
|
|
||||||
this.usage = cause.usage;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
const mockJSONParseError = class JSONParseError extends Error {
|
|
||||||
constructor(text) {
|
|
||||||
super('JSON parse error');
|
|
||||||
this.text = text;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
jest.unstable_mockModule('ai', () => ({
|
|
||||||
generateText: mockGenerateText,
|
|
||||||
streamText: jest.fn(),
|
|
||||||
generateObject: mockGenerateObject,
|
|
||||||
streamObject: jest.fn(),
|
|
||||||
zodSchema: jest.fn((schema) => schema),
|
|
||||||
NoObjectGeneratedError: mockNoObjectGeneratedError,
|
|
||||||
JSONParseError: mockJSONParseError
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Mock jsonrepair
|
|
||||||
const mockJsonrepair = jest.fn();
|
|
||||||
jest.unstable_mockModule('jsonrepair', () => ({
|
|
||||||
jsonrepair: mockJsonrepair
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Mock logging and utilities
|
|
||||||
jest.unstable_mockModule('../../../scripts/modules/utils.js', () => ({
|
|
||||||
log: jest.fn(),
|
|
||||||
findProjectRoot: jest.fn(() => '/mock/project/root'),
|
|
||||||
isEmpty: jest.fn(
|
|
||||||
(val) =>
|
|
||||||
!val ||
|
|
||||||
(Array.isArray(val) && val.length === 0) ||
|
|
||||||
(typeof val === 'object' && Object.keys(val).length === 0)
|
|
||||||
),
|
|
||||||
resolveEnvVariable: jest.fn((key) => process.env[key])
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Import after mocking
|
|
||||||
const { BaseAIProvider } = await import(
|
|
||||||
'../../../src/ai-providers/base-provider.js'
|
|
||||||
);
|
|
||||||
|
|
||||||
describe('BaseAIProvider', () => {
|
|
||||||
let testProvider;
|
|
||||||
let mockClient;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
// Create a concrete test provider
|
|
||||||
class TestProvider extends BaseAIProvider {
|
|
||||||
constructor() {
|
|
||||||
super();
|
|
||||||
this.name = 'TestProvider';
|
|
||||||
}
|
|
||||||
|
|
||||||
getRequiredApiKeyName() {
|
|
||||||
return 'TEST_API_KEY';
|
|
||||||
}
|
|
||||||
|
|
||||||
async getClient() {
|
|
||||||
return mockClient;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mockClient = jest.fn((modelId) => ({ modelId }));
|
|
||||||
jest.clearAllMocks();
|
|
||||||
testProvider = new TestProvider();
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('1. Parameter Validation - Catches Invalid Inputs', () => {
|
|
||||||
describe('validateAuth', () => {
|
|
||||||
it('should throw when API key is missing', () => {
|
|
||||||
expect(() => testProvider.validateAuth({})).toThrow(
|
|
||||||
'TestProvider API key is required'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should pass when API key is provided', () => {
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateAuth({ apiKey: 'test-key' })
|
|
||||||
).not.toThrow();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('validateParams', () => {
|
|
||||||
it('should throw when model ID is missing', () => {
|
|
||||||
expect(() => testProvider.validateParams({ apiKey: 'key' })).toThrow(
|
|
||||||
'TestProvider Model ID is required'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw when both API key and model ID are missing', () => {
|
|
||||||
expect(() => testProvider.validateParams({})).toThrow(
|
|
||||||
'TestProvider API key is required'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('validateOptionalParams', () => {
|
|
||||||
it('should throw for temperature below 0', () => {
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ temperature: -0.1 })
|
|
||||||
).toThrow('Temperature must be between 0 and 1');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw for temperature above 1', () => {
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ temperature: 1.1 })
|
|
||||||
).toThrow('Temperature must be between 0 and 1');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should accept temperature at boundaries', () => {
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ temperature: 0 })
|
|
||||||
).not.toThrow();
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ temperature: 1 })
|
|
||||||
).not.toThrow();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw for invalid maxTokens values', () => {
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ maxTokens: 0 })
|
|
||||||
).toThrow('maxTokens must be a finite number greater than 0');
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ maxTokens: -100 })
|
|
||||||
).toThrow('maxTokens must be a finite number greater than 0');
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ maxTokens: Infinity })
|
|
||||||
).toThrow('maxTokens must be a finite number greater than 0');
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ maxTokens: 'invalid' })
|
|
||||||
).toThrow('maxTokens must be a finite number greater than 0');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('validateMessages', () => {
|
|
||||||
it('should throw for null/undefined messages', async () => {
|
|
||||||
await expect(
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: null
|
|
||||||
})
|
|
||||||
).rejects.toThrow('Invalid or empty messages array provided');
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: undefined
|
|
||||||
})
|
|
||||||
).rejects.toThrow('Invalid or empty messages array provided');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw for empty messages array', async () => {
|
|
||||||
await expect(
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: []
|
|
||||||
})
|
|
||||||
).rejects.toThrow('Invalid or empty messages array provided');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw for messages without role or content', async () => {
|
|
||||||
await expect(
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ content: 'test' }] // missing role
|
|
||||||
})
|
|
||||||
).rejects.toThrow(
|
|
||||||
'Invalid message format. Each message must have role and content'
|
|
||||||
);
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user' }] // missing content
|
|
||||||
})
|
|
||||||
).rejects.toThrow(
|
|
||||||
'Invalid message format. Each message must have role and content'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('2. Error Handling - Proper Error Context', () => {
|
|
||||||
it('should wrap API errors with context', async () => {
|
|
||||||
const apiError = new Error('API rate limit exceeded');
|
|
||||||
mockGenerateText.mockRejectedValue(apiError);
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
})
|
|
||||||
).rejects.toThrow(
|
|
||||||
'TestProvider API error during text generation: API rate limit exceeded'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle errors without message property', async () => {
|
|
||||||
const apiError = { code: 'NETWORK_ERROR' };
|
|
||||||
mockGenerateText.mockRejectedValue(apiError);
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
})
|
|
||||||
).rejects.toThrow(
|
|
||||||
'TestProvider API error during text generation: Unknown error occurred'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('3. Abstract Class Protection', () => {
|
|
||||||
it('should prevent direct instantiation of BaseAIProvider', () => {
|
|
||||||
expect(() => new BaseAIProvider()).toThrow(
|
|
||||||
'BaseAIProvider cannot be instantiated directly'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw when abstract methods are not implemented', () => {
|
|
||||||
class IncompleteProvider extends BaseAIProvider {
|
|
||||||
constructor() {
|
|
||||||
super();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const provider = new IncompleteProvider();
|
|
||||||
|
|
||||||
expect(() => provider.getClient()).toThrow(
|
|
||||||
'getClient must be implemented by provider'
|
|
||||||
);
|
|
||||||
expect(() => provider.getRequiredApiKeyName()).toThrow(
|
|
||||||
'getRequiredApiKeyName must be implemented by provider'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('4. Token Parameter Preparation', () => {
|
|
||||||
it('should convert maxTokens to maxOutputTokens as integer', () => {
|
|
||||||
const result = testProvider.prepareTokenParam('model', 1000.7);
|
|
||||||
expect(result).toEqual({ maxOutputTokens: 1000 });
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle string numbers', () => {
|
|
||||||
const result = testProvider.prepareTokenParam('model', '500');
|
|
||||||
expect(result).toEqual({ maxOutputTokens: 500 });
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return empty object when maxTokens is undefined', () => {
|
|
||||||
const result = testProvider.prepareTokenParam('model', undefined);
|
|
||||||
expect(result).toEqual({});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should floor decimal values', () => {
|
|
||||||
const result = testProvider.prepareTokenParam('model', 999.99);
|
|
||||||
expect(result).toEqual({ maxOutputTokens: 999 });
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('5. JSON Repair for Malformed Responses', () => {
|
|
||||||
it('should repair malformed JSON in generateObject errors', async () => {
|
|
||||||
const malformedJson = '{"key": "value",,}'; // Double comma
|
|
||||||
const repairedJson = '{"key": "value"}';
|
|
||||||
|
|
||||||
const parseError = new mockJSONParseError(malformedJson);
|
|
||||||
const noObjectError = new mockNoObjectGeneratedError(parseError);
|
|
||||||
noObjectError.usage = {
|
|
||||||
promptTokens: 100,
|
|
||||||
completionTokens: 50,
|
|
||||||
totalTokens: 150
|
|
||||||
};
|
|
||||||
|
|
||||||
mockGenerateObject.mockRejectedValue(noObjectError);
|
|
||||||
mockJsonrepair.mockReturnValue(repairedJson);
|
|
||||||
|
|
||||||
const result = await testProvider.generateObject({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
schema: { type: 'object' },
|
|
||||||
objectName: 'TestObject'
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(mockJsonrepair).toHaveBeenCalledWith(malformedJson);
|
|
||||||
expect(result).toEqual({
|
|
||||||
object: { key: 'value' },
|
|
||||||
usage: {
|
|
||||||
inputTokens: 100,
|
|
||||||
outputTokens: 50,
|
|
||||||
totalTokens: 150
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw original error when JSON repair fails', async () => {
|
|
||||||
const malformedJson = 'not even close to JSON';
|
|
||||||
const parseError = new mockJSONParseError(malformedJson);
|
|
||||||
const noObjectError = new mockNoObjectGeneratedError(parseError);
|
|
||||||
|
|
||||||
mockGenerateObject.mockRejectedValue(noObjectError);
|
|
||||||
mockJsonrepair.mockImplementation(() => {
|
|
||||||
throw new Error('Cannot repair this JSON');
|
|
||||||
});
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
testProvider.generateObject({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
schema: { type: 'object' },
|
|
||||||
objectName: 'TestObject'
|
|
||||||
})
|
|
||||||
).rejects.toThrow('TestProvider API error during object generation');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle non-JSON parse errors normally', async () => {
|
|
||||||
const regularError = new Error('Network timeout');
|
|
||||||
mockGenerateObject.mockRejectedValue(regularError);
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
testProvider.generateObject({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
schema: { type: 'object' },
|
|
||||||
objectName: 'TestObject'
|
|
||||||
})
|
|
||||||
).rejects.toThrow(
|
|
||||||
'TestProvider API error during object generation: Network timeout'
|
|
||||||
);
|
|
||||||
|
|
||||||
expect(mockJsonrepair).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('6. Usage Token Normalization', () => {
|
|
||||||
it('should normalize different token formats in generateText', async () => {
|
|
||||||
// Test promptTokens/completionTokens format (older format)
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { promptTokens: 10, completionTokens: 5 }
|
|
||||||
});
|
|
||||||
|
|
||||||
let result = await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.usage).toEqual({
|
|
||||||
inputTokens: 10,
|
|
||||||
outputTokens: 5,
|
|
||||||
totalTokens: 15
|
|
||||||
});
|
|
||||||
|
|
||||||
// Test inputTokens/outputTokens format (newer format)
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 20, outputTokens: 10, totalTokens: 30 }
|
|
||||||
});
|
|
||||||
|
|
||||||
result = await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.usage).toEqual({
|
|
||||||
inputTokens: 20,
|
|
||||||
outputTokens: 10,
|
|
||||||
totalTokens: 30
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle missing usage data gracefully', async () => {
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: undefined
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.usage).toEqual({
|
|
||||||
inputTokens: 0,
|
|
||||||
outputTokens: 0,
|
|
||||||
totalTokens: 0
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should calculate totalTokens when missing', async () => {
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 15, outputTokens: 25 }
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.usage.totalTokens).toBe(40);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('7. Schema Validation for Object Methods', () => {
|
|
||||||
it('should throw when schema is missing for generateObject', async () => {
|
|
||||||
await expect(
|
|
||||||
testProvider.generateObject({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
objectName: 'TestObject'
|
|
||||||
// missing schema
|
|
||||||
})
|
|
||||||
).rejects.toThrow('Schema is required for object generation');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw when objectName is missing for generateObject', async () => {
|
|
||||||
await expect(
|
|
||||||
testProvider.generateObject({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
schema: { type: 'object' }
|
|
||||||
// missing objectName
|
|
||||||
})
|
|
||||||
).rejects.toThrow('Object name is required for object generation');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw when schema is missing for streamObject', async () => {
|
|
||||||
await expect(
|
|
||||||
testProvider.streamObject({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
// missing schema
|
|
||||||
})
|
|
||||||
).rejects.toThrow('Schema is required for object streaming');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use json mode when needsExplicitJsonSchema is true', async () => {
|
|
||||||
testProvider.needsExplicitJsonSchema = true;
|
|
||||||
mockGenerateObject.mockResolvedValue({
|
|
||||||
object: { test: 'value' },
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
await testProvider.generateObject({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
schema: { type: 'object' },
|
|
||||||
objectName: 'TestObject'
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(mockGenerateObject).toHaveBeenCalledWith(
|
|
||||||
expect.objectContaining({
|
|
||||||
mode: 'json' // Should be 'json' not 'auto'
|
|
||||||
})
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('8. Integration Points - Client Creation', () => {
|
|
||||||
it('should pass params to getClient method', async () => {
|
|
||||||
const getClientSpy = jest.spyOn(testProvider, 'getClient');
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
const params = {
|
|
||||||
apiKey: 'test-key',
|
|
||||||
modelId: 'test-model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
customParam: 'custom-value'
|
|
||||||
};
|
|
||||||
|
|
||||||
await testProvider.generateText(params);
|
|
||||||
|
|
||||||
expect(getClientSpy).toHaveBeenCalledWith(params);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use client with correct model ID', async () => {
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'gpt-4-turbo',
|
|
||||||
messages: [{ role: 'user', content: 'test' }]
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(mockClient).toHaveBeenCalledWith('gpt-4-turbo');
|
|
||||||
expect(mockGenerateText).toHaveBeenCalledWith(
|
|
||||||
expect.objectContaining({
|
|
||||||
model: { modelId: 'gpt-4-turbo' }
|
|
||||||
})
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('9. Edge Cases - Boundary Conditions', () => {
|
|
||||||
it('should handle zero maxTokens gracefully', () => {
|
|
||||||
// This should throw in validation
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ maxTokens: 0 })
|
|
||||||
).toThrow('maxTokens must be a finite number greater than 0');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle very large maxTokens', () => {
|
|
||||||
const result = testProvider.prepareTokenParam('model', 999999999);
|
|
||||||
expect(result).toEqual({ maxOutputTokens: 999999999 });
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle NaN temperature gracefully', () => {
|
|
||||||
// NaN fails the range check (NaN < 0 is false, NaN > 1 is also false)
|
|
||||||
// But NaN is not between 0 and 1, so we need to check the actual behavior
|
|
||||||
// The current implementation doesn't explicitly check for NaN,
|
|
||||||
// it passes because NaN < 0 and NaN > 1 are both false
|
|
||||||
expect(() =>
|
|
||||||
testProvider.validateOptionalParams({ temperature: NaN })
|
|
||||||
).not.toThrow();
|
|
||||||
// This is actually a bug - NaN should be rejected
|
|
||||||
// But we're testing current behavior, not desired behavior
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle concurrent calls safely', async () => {
|
|
||||||
mockGenerateText.mockImplementation(async () => ({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
}));
|
|
||||||
|
|
||||||
const promises = Array.from({ length: 10 }, (_, i) =>
|
|
||||||
testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: `model-${i}`,
|
|
||||||
messages: [{ role: 'user', content: `test-${i}` }]
|
|
||||||
})
|
|
||||||
);
|
|
||||||
|
|
||||||
const results = await Promise.all(promises);
|
|
||||||
expect(results).toHaveLength(10);
|
|
||||||
expect(mockClient).toHaveBeenCalledTimes(10);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('10. Default Behavior - isRequiredApiKey', () => {
|
|
||||||
it('should return true by default for isRequiredApiKey', () => {
|
|
||||||
expect(testProvider.isRequiredApiKey()).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should allow override of isRequiredApiKey', () => {
|
|
||||||
class NoAuthProvider extends BaseAIProvider {
|
|
||||||
constructor() {
|
|
||||||
super();
|
|
||||||
}
|
|
||||||
isRequiredApiKey() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
validateAuth() {
|
|
||||||
// Override to not require API key
|
|
||||||
}
|
|
||||||
getClient() {
|
|
||||||
return mockClient;
|
|
||||||
}
|
|
||||||
getRequiredApiKeyName() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const provider = new NoAuthProvider();
|
|
||||||
expect(provider.isRequiredApiKey()).toBe(false);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('11. Temperature Filtering - CLI vs Standard Providers', () => {
|
|
||||||
const mockStreamText = jest.fn();
|
|
||||||
const mockStreamObject = jest.fn();
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
mockStreamText.mockReset();
|
|
||||||
mockStreamObject.mockReset();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should include temperature in generateText when supported', async () => {
|
|
||||||
testProvider.supportsTemperature = true;
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(mockGenerateText).toHaveBeenCalledWith(
|
|
||||||
expect.objectContaining({ temperature: 0.7 })
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should exclude temperature in generateText when not supported', async () => {
|
|
||||||
testProvider.supportsTemperature = false;
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
|
|
||||||
const callArgs = mockGenerateText.mock.calls[0][0];
|
|
||||||
expect(callArgs).not.toHaveProperty('temperature');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should exclude temperature when undefined even if supported', async () => {
|
|
||||||
testProvider.supportsTemperature = true;
|
|
||||||
mockGenerateText.mockResolvedValue({
|
|
||||||
text: 'response',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
await testProvider.generateText({
|
|
||||||
apiKey: 'key',
|
|
||||||
modelId: 'model',
|
|
||||||
messages: [{ role: 'user', content: 'test' }],
|
|
||||||
temperature: undefined
|
|
||||||
});
|
|
||||||
|
|
||||||
const callArgs = mockGenerateText.mock.calls[0][0];
|
|
||||||
expect(callArgs).not.toHaveProperty('temperature');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
import { jest } from '@jest/globals';
|
|
||||||
|
|
||||||
// Mock the ai module
|
|
||||||
jest.unstable_mockModule('ai', () => ({
|
|
||||||
generateObject: jest.fn(),
|
|
||||||
generateText: jest.fn(),
|
|
||||||
streamText: jest.fn()
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Mock the codex-cli SDK module
|
|
||||||
jest.unstable_mockModule('ai-sdk-provider-codex-cli', () => ({
|
|
||||||
createCodexCli: jest.fn((options) => {
|
|
||||||
const provider = (modelId, settings) => ({ id: modelId, settings });
|
|
||||||
provider.languageModel = jest.fn((id, settings) => ({ id, settings }));
|
|
||||||
provider.chat = provider.languageModel;
|
|
||||||
return provider;
|
|
||||||
})
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Mock config getters
|
|
||||||
jest.unstable_mockModule('../../../scripts/modules/config-manager.js', () => ({
|
|
||||||
getCodexCliSettingsForCommand: jest.fn(() => ({ allowNpx: true })),
|
|
||||||
// Provide commonly imported getters to satisfy other module imports if any
|
|
||||||
getDebugFlag: jest.fn(() => false),
|
|
||||||
getLogLevel: jest.fn(() => 'info')
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Mock base provider
|
|
||||||
jest.unstable_mockModule('../../../src/ai-providers/base-provider.js', () => ({
|
|
||||||
BaseAIProvider: class {
|
|
||||||
constructor() {
|
|
||||||
this.name = 'Base Provider';
|
|
||||||
}
|
|
||||||
handleError(_ctx, err) {
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
validateParams(params) {
|
|
||||||
if (!params.modelId) throw new Error('Model ID is required');
|
|
||||||
}
|
|
||||||
validateMessages(msgs) {
|
|
||||||
if (!Array.isArray(msgs)) throw new Error('Invalid messages array');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
|
|
||||||
const { CodexCliProvider } = await import(
|
|
||||||
'../../../src/ai-providers/codex-cli.js'
|
|
||||||
);
|
|
||||||
const { createCodexCli } = await import('ai-sdk-provider-codex-cli');
|
|
||||||
const { getCodexCliSettingsForCommand } = await import(
|
|
||||||
'../../../scripts/modules/config-manager.js'
|
|
||||||
);
|
|
||||||
|
|
||||||
describe('CodexCliProvider', () => {
|
|
||||||
let provider;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
jest.clearAllMocks();
|
|
||||||
provider = new CodexCliProvider();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('sets provider name and supported models', () => {
|
|
||||||
expect(provider.name).toBe('Codex CLI');
|
|
||||||
expect(provider.supportedModels).toEqual(['gpt-5', 'gpt-5-codex']);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('does not require API key', () => {
|
|
||||||
expect(provider.isRequiredApiKey()).toBe(false);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('creates client with merged default settings', async () => {
|
|
||||||
const client = await provider.getClient({ commandName: 'parse-prd' });
|
|
||||||
expect(client).toBeDefined();
|
|
||||||
expect(createCodexCli).toHaveBeenCalledWith({
|
|
||||||
defaultSettings: expect.objectContaining({ allowNpx: true })
|
|
||||||
});
|
|
||||||
expect(getCodexCliSettingsForCommand).toHaveBeenCalledWith('parse-prd');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('injects OPENAI_API_KEY only when apiKey provided', async () => {
|
|
||||||
const client = await provider.getClient({
|
|
||||||
commandName: 'expand',
|
|
||||||
apiKey: 'sk-test'
|
|
||||||
});
|
|
||||||
const call = createCodexCli.mock.calls[0][0];
|
|
||||||
expect(call.defaultSettings.env.OPENAI_API_KEY).toBe('sk-test');
|
|
||||||
// Ensure env is not set when apiKey not provided
|
|
||||||
await provider.getClient({ commandName: 'expand' });
|
|
||||||
const second = createCodexCli.mock.calls[1][0];
|
|
||||||
expect(second.defaultSettings.env).toBeUndefined();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -122,7 +122,7 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
|||||||
getMcpApiKeyStatus: mockGetMcpApiKeyStatus,
|
getMcpApiKeyStatus: mockGetMcpApiKeyStatus,
|
||||||
|
|
||||||
// Providers without API keys
|
// Providers without API keys
|
||||||
providersWithoutApiKeys: ['ollama', 'bedrock', 'gemini-cli', 'codex-cli']
|
providersWithoutApiKeys: ['ollama', 'bedrock', 'gemini-cli']
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Mock AI Provider Classes with proper methods
|
// Mock AI Provider Classes with proper methods
|
||||||
@@ -158,24 +158,6 @@ const mockOllamaProvider = {
|
|||||||
isRequiredApiKey: jest.fn(() => false)
|
isRequiredApiKey: jest.fn(() => false)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Codex CLI mock provider instance
|
|
||||||
const mockCodexProvider = {
|
|
||||||
generateText: jest.fn(),
|
|
||||||
streamText: jest.fn(),
|
|
||||||
generateObject: jest.fn(),
|
|
||||||
getRequiredApiKeyName: jest.fn(() => 'OPENAI_API_KEY'),
|
|
||||||
isRequiredApiKey: jest.fn(() => false)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Claude Code mock provider instance
|
|
||||||
const mockClaudeProvider = {
|
|
||||||
generateText: jest.fn(),
|
|
||||||
streamText: jest.fn(),
|
|
||||||
generateObject: jest.fn(),
|
|
||||||
getRequiredApiKeyName: jest.fn(() => 'CLAUDE_CODE_API_KEY'),
|
|
||||||
isRequiredApiKey: jest.fn(() => false)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Mock the provider classes to return our mock instances
|
// Mock the provider classes to return our mock instances
|
||||||
jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({
|
jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({
|
||||||
AnthropicAIProvider: jest.fn(() => mockAnthropicProvider),
|
AnthropicAIProvider: jest.fn(() => mockAnthropicProvider),
|
||||||
@@ -231,7 +213,13 @@ jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({
|
|||||||
getRequiredApiKeyName: jest.fn(() => null),
|
getRequiredApiKeyName: jest.fn(() => null),
|
||||||
isRequiredApiKey: jest.fn(() => false)
|
isRequiredApiKey: jest.fn(() => false)
|
||||||
})),
|
})),
|
||||||
ClaudeCodeProvider: jest.fn(() => mockClaudeProvider),
|
ClaudeCodeProvider: jest.fn(() => ({
|
||||||
|
generateText: jest.fn(),
|
||||||
|
streamText: jest.fn(),
|
||||||
|
generateObject: jest.fn(),
|
||||||
|
getRequiredApiKeyName: jest.fn(() => 'CLAUDE_CODE_API_KEY'),
|
||||||
|
isRequiredApiKey: jest.fn(() => false)
|
||||||
|
})),
|
||||||
GeminiCliProvider: jest.fn(() => ({
|
GeminiCliProvider: jest.fn(() => ({
|
||||||
generateText: jest.fn(),
|
generateText: jest.fn(),
|
||||||
streamText: jest.fn(),
|
streamText: jest.fn(),
|
||||||
@@ -239,7 +227,6 @@ jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({
|
|||||||
getRequiredApiKeyName: jest.fn(() => 'GEMINI_API_KEY'),
|
getRequiredApiKeyName: jest.fn(() => 'GEMINI_API_KEY'),
|
||||||
isRequiredApiKey: jest.fn(() => false)
|
isRequiredApiKey: jest.fn(() => false)
|
||||||
})),
|
})),
|
||||||
CodexCliProvider: jest.fn(() => mockCodexProvider),
|
|
||||||
GrokCliProvider: jest.fn(() => ({
|
GrokCliProvider: jest.fn(() => ({
|
||||||
generateText: jest.fn(),
|
generateText: jest.fn(),
|
||||||
streamText: jest.fn(),
|
streamText: jest.fn(),
|
||||||
@@ -822,112 +809,5 @@ describe('Unified AI Services', () => {
|
|||||||
// Should have gotten the anthropic response
|
// Should have gotten the anthropic response
|
||||||
expect(result.mainResult).toBe('Anthropic response with session key');
|
expect(result.mainResult).toBe('Anthropic response with session key');
|
||||||
});
|
});
|
||||||
|
|
||||||
// --- Codex CLI specific tests ---
|
|
||||||
test('should use codex-cli provider without API key (OAuth)', async () => {
|
|
||||||
// Arrange codex-cli as main provider
|
|
||||||
mockGetMainProvider.mockReturnValue('codex-cli');
|
|
||||||
mockGetMainModelId.mockReturnValue('gpt-5-codex');
|
|
||||||
mockGetParametersForRole.mockReturnValue({
|
|
||||||
maxTokens: 128000,
|
|
||||||
temperature: 1
|
|
||||||
});
|
|
||||||
mockGetResponseLanguage.mockReturnValue('English');
|
|
||||||
// No API key in env
|
|
||||||
mockResolveEnvVariable.mockReturnValue(null);
|
|
||||||
// Mock codex generateText response
|
|
||||||
mockCodexProvider.generateText.mockResolvedValueOnce({
|
|
||||||
text: 'ok',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
const { generateTextService } = await import(
|
|
||||||
'../../scripts/modules/ai-services-unified.js'
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await generateTextService({
|
|
||||||
role: 'main',
|
|
||||||
prompt: 'Hello Codex',
|
|
||||||
projectRoot: fakeProjectRoot
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.mainResult).toBe('ok');
|
|
||||||
expect(mockCodexProvider.generateText).toHaveBeenCalledWith(
|
|
||||||
expect.objectContaining({
|
|
||||||
modelId: 'gpt-5-codex',
|
|
||||||
apiKey: null,
|
|
||||||
maxTokens: 128000
|
|
||||||
})
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should pass apiKey to codex-cli when provided', async () => {
|
|
||||||
// Arrange codex-cli as main provider
|
|
||||||
mockGetMainProvider.mockReturnValue('codex-cli');
|
|
||||||
mockGetMainModelId.mockReturnValue('gpt-5-codex');
|
|
||||||
mockGetParametersForRole.mockReturnValue({
|
|
||||||
maxTokens: 128000,
|
|
||||||
temperature: 1
|
|
||||||
});
|
|
||||||
mockGetResponseLanguage.mockReturnValue('English');
|
|
||||||
// Provide API key via env resolver
|
|
||||||
mockResolveEnvVariable.mockReturnValue('sk-test');
|
|
||||||
// Mock codex generateText response
|
|
||||||
mockCodexProvider.generateText.mockResolvedValueOnce({
|
|
||||||
text: 'ok-with-key',
|
|
||||||
usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 }
|
|
||||||
});
|
|
||||||
|
|
||||||
const { generateTextService } = await import(
|
|
||||||
'../../scripts/modules/ai-services-unified.js'
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await generateTextService({
|
|
||||||
role: 'main',
|
|
||||||
prompt: 'Hello Codex',
|
|
||||||
projectRoot: fakeProjectRoot
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.mainResult).toBe('ok-with-key');
|
|
||||||
expect(mockCodexProvider.generateText).toHaveBeenCalledWith(
|
|
||||||
expect.objectContaining({
|
|
||||||
modelId: 'gpt-5-codex',
|
|
||||||
apiKey: 'sk-test'
|
|
||||||
})
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
// --- Claude Code specific test ---
|
|
||||||
test('should pass temperature to claude-code provider (provider handles filtering)', async () => {
|
|
||||||
mockGetMainProvider.mockReturnValue('claude-code');
|
|
||||||
mockGetMainModelId.mockReturnValue('sonnet');
|
|
||||||
mockGetParametersForRole.mockReturnValue({
|
|
||||||
maxTokens: 64000,
|
|
||||||
temperature: 0.7
|
|
||||||
});
|
|
||||||
mockGetResponseLanguage.mockReturnValue('English');
|
|
||||||
mockResolveEnvVariable.mockReturnValue(null);
|
|
||||||
|
|
||||||
mockClaudeProvider.generateText.mockResolvedValueOnce({
|
|
||||||
text: 'ok-claude',
|
|
||||||
usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
||||||
});
|
|
||||||
|
|
||||||
const { generateTextService } = await import(
|
|
||||||
'../../scripts/modules/ai-services-unified.js'
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await generateTextService({
|
|
||||||
role: 'main',
|
|
||||||
prompt: 'Hello Claude',
|
|
||||||
projectRoot: fakeProjectRoot
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.mainResult).toBe('ok-claude');
|
|
||||||
// The provider (BaseAIProvider) is responsible for filtering it based on supportsTemperature
|
|
||||||
const callArgs = mockClaudeProvider.generateText.mock.calls[0][0];
|
|
||||||
expect(callArgs).toHaveProperty('temperature', 0.7);
|
|
||||||
expect(callArgs.maxTokens).toBe(64000);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -149,7 +149,6 @@ const DEFAULT_CONFIG = {
|
|||||||
responseLanguage: 'English'
|
responseLanguage: 'English'
|
||||||
},
|
},
|
||||||
claudeCode: {},
|
claudeCode: {},
|
||||||
codexCli: {},
|
|
||||||
grokCli: {
|
grokCli: {
|
||||||
timeout: 120000,
|
timeout: 120000,
|
||||||
workingDirectory: null,
|
workingDirectory: null,
|
||||||
@@ -643,8 +642,7 @@ describe('getConfig Tests', () => {
|
|||||||
...DEFAULT_CONFIG.claudeCode,
|
...DEFAULT_CONFIG.claudeCode,
|
||||||
...VALID_CUSTOM_CONFIG.claudeCode
|
...VALID_CUSTOM_CONFIG.claudeCode
|
||||||
},
|
},
|
||||||
grokCli: { ...DEFAULT_CONFIG.grokCli },
|
grokCli: { ...DEFAULT_CONFIG.grokCli }
|
||||||
codexCli: { ...DEFAULT_CONFIG.codexCli }
|
|
||||||
};
|
};
|
||||||
expect(config).toEqual(expectedMergedConfig);
|
expect(config).toEqual(expectedMergedConfig);
|
||||||
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
||||||
@@ -687,8 +685,7 @@ describe('getConfig Tests', () => {
|
|||||||
...DEFAULT_CONFIG.claudeCode,
|
...DEFAULT_CONFIG.claudeCode,
|
||||||
...VALID_CUSTOM_CONFIG.claudeCode
|
...VALID_CUSTOM_CONFIG.claudeCode
|
||||||
},
|
},
|
||||||
grokCli: { ...DEFAULT_CONFIG.grokCli },
|
grokCli: { ...DEFAULT_CONFIG.grokCli }
|
||||||
codexCli: { ...DEFAULT_CONFIG.codexCli }
|
|
||||||
};
|
};
|
||||||
expect(config).toEqual(expectedMergedConfig);
|
expect(config).toEqual(expectedMergedConfig);
|
||||||
expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
|
expect(fsReadFileSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH, 'utf-8');
|
||||||
@@ -797,8 +794,7 @@ describe('getConfig Tests', () => {
|
|||||||
...DEFAULT_CONFIG.claudeCode,
|
...DEFAULT_CONFIG.claudeCode,
|
||||||
...VALID_CUSTOM_CONFIG.claudeCode
|
...VALID_CUSTOM_CONFIG.claudeCode
|
||||||
},
|
},
|
||||||
grokCli: { ...DEFAULT_CONFIG.grokCli },
|
grokCli: { ...DEFAULT_CONFIG.grokCli }
|
||||||
codexCli: { ...DEFAULT_CONFIG.codexCli }
|
|
||||||
};
|
};
|
||||||
expect(config).toEqual(expectedMergedConfig);
|
expect(config).toEqual(expectedMergedConfig);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -94,6 +94,7 @@ describe('addSubtask function', () => {
|
|||||||
const parentTask = writeCallArgs.tasks.find((t) => t.id === 1);
|
const parentTask = writeCallArgs.tasks.find((t) => t.id === 1);
|
||||||
expect(parentTask.subtasks).toHaveLength(1);
|
expect(parentTask.subtasks).toHaveLength(1);
|
||||||
expect(parentTask.subtasks[0].title).toBe('New Subtask');
|
expect(parentTask.subtasks[0].title).toBe('New Subtask');
|
||||||
|
expect(mockGenerateTaskFiles).toHaveBeenCalled();
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should convert an existing task to a subtask', async () => {
|
test('should convert an existing task to a subtask', async () => {
|
||||||
|
|||||||
@@ -88,6 +88,11 @@ describe('moveTask (unit)', () => {
|
|||||||
).rejects.toThrow(/Number of source IDs/);
|
).rejects.toThrow(/Number of source IDs/);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('batch move calls generateTaskFiles once when flag true', async () => {
|
||||||
|
await moveTask('tasks.json', '1,2', '3,4', true, { tag: 'master' });
|
||||||
|
expect(generateTaskFiles).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
|
||||||
test('error when tag invalid', async () => {
|
test('error when tag invalid', async () => {
|
||||||
await expect(
|
await expect(
|
||||||
moveTask('tasks.json', '1', '2', false, { tag: 'ghost' })
|
moveTask('tasks.json', '1', '2', false, { tag: 'ghost' })
|
||||||
|
|||||||
@@ -1,10 +1,6 @@
|
|||||||
import { defineConfig } from 'tsdown';
|
import { defineConfig } from 'tsdown';
|
||||||
import { baseConfig, mergeConfig } from '@tm/build-config';
|
import { baseConfig, mergeConfig } from '@tm/build-config';
|
||||||
import { config } from 'dotenv';
|
import 'dotenv/config';
|
||||||
import { resolve } from 'path';
|
|
||||||
|
|
||||||
// Load .env file explicitly with absolute path
|
|
||||||
config({ path: resolve(process.cwd(), '.env') });
|
|
||||||
|
|
||||||
// Get all TM_PUBLIC_* env variables for build-time injection
|
// Get all TM_PUBLIC_* env variables for build-time injection
|
||||||
const getBuildTimeEnvs = () => {
|
const getBuildTimeEnvs = () => {
|
||||||
@@ -23,10 +19,10 @@ const getBuildTimeEnvs = () => {
|
|||||||
|
|
||||||
for (const [key, value] of Object.entries(process.env)) {
|
for (const [key, value] of Object.entries(process.env)) {
|
||||||
if (key.startsWith('TM_PUBLIC_')) {
|
if (key.startsWith('TM_PUBLIC_')) {
|
||||||
|
// Return the actual value, not JSON.stringify'd
|
||||||
envs[key] = value || '';
|
envs[key] = value || '';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return envs;
|
return envs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -5,8 +5,7 @@
|
|||||||
"build": {
|
"build": {
|
||||||
"dependsOn": ["^build"],
|
"dependsOn": ["^build"],
|
||||||
"outputs": ["dist/**"],
|
"outputs": ["dist/**"],
|
||||||
"outputLogs": "new-only",
|
"outputLogs": "new-only"
|
||||||
"env": ["NODE_ENV", "TM_PUBLIC_*"]
|
|
||||||
},
|
},
|
||||||
"dev": {
|
"dev": {
|
||||||
"cache": false,
|
"cache": false,
|
||||||
|
|||||||
Reference in New Issue
Block a user