Compare commits
1 Commits
docs/auto-
...
docs/auto-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1cfdaa3a65 |
5
.changeset/easy-deer-heal.md
Normal file
5
.changeset/easy-deer-heal.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Testing one more pre-release iteration
|
||||
5
.changeset/moody-oranges-slide.md
Normal file
5
.changeset/moody-oranges-slide.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Test out the RC
|
||||
5
.changeset/odd-otters-tan.md
Normal file
5
.changeset/odd-otters-tan.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"@tm/cli": minor
|
||||
---
|
||||
|
||||
testing this stuff out to see how the release candidate works with monorepo
|
||||
@@ -2,7 +2,7 @@
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add grok-cli as a provider with full codebase context support. You can now use Grok models (grok-2, grok-3, grok-4, etc.) with Task Master for AI operations that have access to your entire codebase context, enabling more informed task generation and PRD parsing.
|
||||
Add grok-cli as a provider. You can now use Grok models with Task Master by setting the `GROK_CLI_API_KEY` environment variable.
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
@@ -20,17 +20,11 @@ Add grok-cli as a provider with full codebase context support. You can now use G
|
||||
task-master models --set-fallback grok-beta
|
||||
```
|
||||
|
||||
## Key Features
|
||||
- **Full codebase context**: Grok models can analyze your entire project when generating tasks or parsing PRDs
|
||||
- **xAI model access**: Support for latest Grok models (grok-2, grok-3, grok-4, etc.)
|
||||
- **Code-aware task generation**: Create more accurate and contextual tasks based on your actual codebase
|
||||
- **Intelligent PRD parsing**: Parse requirements with understanding of your existing code structure
|
||||
|
||||
## Available Models
|
||||
- `grok-beta` - Latest Grok model with codebase context
|
||||
- `grok-vision-beta` - Grok with vision capabilities and codebase context
|
||||
- `grok-beta` - Latest Grok model
|
||||
- `grok-vision-beta` - Grok with vision capabilities
|
||||
|
||||
The Grok CLI provider integrates with xAI's Grok models via grok-cli and can also use the local Grok CLI configuration file (`~/.grok/user-settings.json`) if available.
|
||||
The Grok CLI provider integrates with xAI's Grok models and can also use the local Grok CLI configuration file (`~/.grok/user-settings.json`) if available.
|
||||
|
||||
## Credits
|
||||
Built using the [grok-cli](https://github.com/superagent-ai/grok-cli) by Superagent AI for seamless integration with xAI's Grok models.
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Improve taskmaster ai provider defaults
|
||||
|
||||
- moving from main anthropic 3.7 to anthropic sonnet 4
|
||||
- moving from fallback anthropic 3.5 to anthropic 3.7
|
||||
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-sonnet-4-20250514",
|
||||
"maxTokens": 64000,
|
||||
"provider": "grok-cli",
|
||||
"modelId": "grok-4-latest",
|
||||
"maxTokens": 131072,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
@@ -14,8 +14,8 @@
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 120000,
|
||||
"modelId": "claude-sonnet-4-20250514",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
|
||||
@@ -23,7 +23,7 @@ description: "This guide walks you through setting up Task Master in your develo
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||
"MODEL": "claude-sonnet-4-20250514",
|
||||
"MODEL": "claude-3-7-sonnet-20250219",
|
||||
"PERPLEXITY_MODEL": "sonar-pro",
|
||||
"MAX_TOKENS": 128000,
|
||||
"TEMPERATURE": 0.2,
|
||||
|
||||
@@ -19,7 +19,7 @@ description: "Configure Task Master through environment variables in a .env file
|
||||
|
||||
| Variable | Default Value | Description | Example |
|
||||
| --- | --- | --- | --- |
|
||||
| `MODEL` | `"claude-sonnet-4-20250514"` | Claude model to use | `MODEL=claude-3-opus-20240229` |
|
||||
| `MODEL` | `"claude-3-7-sonnet-20250219"` | Claude model to use | `MODEL=claude-3-opus-20240229` |
|
||||
| `MAX_TOKENS` | `"4000"` | Maximum tokens for responses | `MAX_TOKENS=8000` |
|
||||
| `TEMPERATURE` | `"0.7"` | Temperature for model responses | `TEMPERATURE=0.5` |
|
||||
| `DEBUG` | `"false"` | Enable debug logging | `DEBUG=true` |
|
||||
@@ -38,7 +38,7 @@ description: "Configure Task Master through environment variables in a .env file
|
||||
ANTHROPIC_API_KEY=sk-ant-api03-your-api-key
|
||||
|
||||
# Optional - Claude Configuration
|
||||
MODEL=claude-sonnet-4-20250514
|
||||
MODEL=claude-3-7-sonnet-20250219
|
||||
MAX_TOKENS=4000
|
||||
TEMPERATURE=0.7
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ Taskmaster uses two primary methods for configuration:
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-sonnet-4-20250514",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2,
|
||||
"baseURL": "https://api.anthropic.com/v1"
|
||||
@@ -32,7 +32,7 @@ Taskmaster uses two primary methods for configuration:
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"modelId": "claude-3-5-sonnet",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
@@ -75,7 +75,7 @@ Taskmaster uses two primary methods for configuration:
|
||||
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||
- `XAI_API_KEY`: Your X-AI API key.
|
||||
- `GROK_CLI_API_KEY`: Your Grok API key for grok-cli provider.
|
||||
- `GROK_CLI_API_KEY`: Your Grok API key from console.x.ai.
|
||||
- **Optional Endpoint Overrides:**
|
||||
- **Per-role `baseURL` in `.taskmasterconfig`:** You can add a `baseURL` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||
- **Environment Variable Overrides (`<PROVIDER>_BASE_URL`):** For greater flexibility, especially with third-party services, you can set an environment variable like `OPENAI_BASE_URL` or `MISTRAL_BASE_URL`. This will override any `baseURL` set in the configuration file for that provider. This is the recommended way to connect to OpenAI-compatible APIs.
|
||||
@@ -138,7 +138,6 @@ PERPLEXITY_API_KEY=pplx-your-key-here
|
||||
# OPENAI_API_KEY=sk-your-key-here
|
||||
# GOOGLE_API_KEY=AIzaSy...
|
||||
# AZURE_OPENAI_API_KEY=your-azure-openai-api-key-here
|
||||
# GROK_CLI_API_KEY=your-grok-api-key-here
|
||||
# etc.
|
||||
|
||||
# Optional Endpoint Overrides
|
||||
@@ -320,18 +319,22 @@ Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure c
|
||||
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
|
||||
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
|
||||
|
||||
### Grok CLI Configuration
|
||||
### Grok AI Configuration
|
||||
|
||||
The Grok CLI provider integrates with xAI's Grok models and provides full codebase context support for enhanced task generation and analysis.
|
||||
Grok AI provides access to xAI's Grok models with enhanced reasoning capabilities and requires minimal configuration:
|
||||
|
||||
1. **Prerequisites**:
|
||||
- A Grok API key from [console.x.ai](https://console.x.ai)
|
||||
- The `grok-cli` package will be automatically used when this provider is configured
|
||||
- An xAI account with API access
|
||||
- Grok API key from [console.x.ai](https://console.x.ai)
|
||||
|
||||
2. **Authentication**:
|
||||
- Set the `GROK_CLI_API_KEY` environment variable with your Grok API key
|
||||
|
||||
3. **Configuration**:
|
||||
3. **Available Models**:
|
||||
- `grok-beta`: Latest Grok model with advanced reasoning
|
||||
- `grok-vision-beta`: Grok with vision capabilities for image analysis
|
||||
|
||||
4. **Configuration Example**:
|
||||
```json
|
||||
// In .taskmaster/config.json
|
||||
{
|
||||
@@ -339,41 +342,43 @@ The Grok CLI provider integrates with xAI's Grok models and provides full codeba
|
||||
"main": {
|
||||
"provider": "grok-cli",
|
||||
"modelId": "grok-beta",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
"maxTokens": 131072,
|
||||
"temperature": 0.3
|
||||
},
|
||||
"research": {
|
||||
"provider": "grok-cli",
|
||||
"modelId": "grok-vision-beta",
|
||||
"maxTokens": 8700,
|
||||
"maxTokens": 131072,
|
||||
"temperature": 0.1
|
||||
}
|
||||
},
|
||||
"grokCli": {
|
||||
"timeout": 120000,
|
||||
"workingDirectory": null,
|
||||
"defaultModel": "grok-4-latest"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. **Available Models**:
|
||||
- `grok-beta`: Latest Grok model with codebase context
|
||||
- `grok-vision-beta`: Grok with vision capabilities and codebase context
|
||||
- `grok-2`, `grok-3`, `grok-4`: Standard Grok models
|
||||
|
||||
5. **Key Features**:
|
||||
- **Full codebase context**: Grok models can analyze your entire project when generating tasks or parsing PRDs
|
||||
- **Code-aware task generation**: Create more accurate and contextual tasks based on your actual codebase
|
||||
- **Intelligent PRD parsing**: Parse requirements with understanding of your existing code structure
|
||||
|
||||
6. **Environment Variables**:
|
||||
5. **Environment Variables**:
|
||||
```bash
|
||||
# In .env file
|
||||
GROK_CLI_API_KEY=your-grok-api-key-here
|
||||
```
|
||||
|
||||
7. **Configuration Options**:
|
||||
- `timeout`: Request timeout in milliseconds (default: 120000)
|
||||
- `workingDirectory`: Override working directory for grok-cli (default: null, uses current directory)
|
||||
- `defaultModel`: Default Grok model to use (default: "grok-4-latest")
|
||||
6. **Setup Commands**:
|
||||
```bash
|
||||
# Set Grok as your main model
|
||||
task-master models --set-main grok-beta
|
||||
|
||||
# Set Grok as your research model
|
||||
task-master models --set-research grok-beta
|
||||
|
||||
# Set Grok as your fallback model
|
||||
task-master models --set-fallback grok-beta
|
||||
```
|
||||
|
||||
7. **Integration Features**:
|
||||
- **Local Configuration Support**: The Grok CLI provider can use your local Grok CLI configuration file (`~/.grok/user-settings.json`) if available
|
||||
- **Full Token Capacity**: Supports Grok's full 131K token capacity for large context operations
|
||||
- **Built on Grok CLI**: Uses the [grok-cli](https://github.com/superagent-ai/grok-cli) by Superagent AI for reliable integration
|
||||
|
||||
8. **Troubleshooting**:
|
||||
- **API Key Issues**: Verify your `GROK_CLI_API_KEY` is correctly set and valid
|
||||
- **Model Availability**: Ensure you have access to the specified Grok model variant
|
||||
- **Rate Limits**: Grok models have generous rate limits, but large contexts may take longer to process
|
||||
@@ -38,6 +38,39 @@ sidebarTitle: "CLI Commands"
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Start Working on a Task">
|
||||
```bash
|
||||
# Start working on a specific task with Claude Code
|
||||
task-master start <id>
|
||||
|
||||
# Start the next available task automatically
|
||||
task-master start
|
||||
|
||||
# Show what would be executed without launching Claude Code
|
||||
task-master start <id> --dry-run
|
||||
|
||||
# Force start even if another task is in-progress
|
||||
task-master start <id> --force
|
||||
|
||||
# Don't automatically update task status to in-progress
|
||||
task-master start <id> --no-status-update
|
||||
|
||||
# Specify project root directory
|
||||
task-master start <id> --project /path/to/project
|
||||
|
||||
# Get results in JSON format
|
||||
task-master start <id> --format json
|
||||
```
|
||||
|
||||
The `start` command automatically launches Claude Code with comprehensive context about the task, including:
|
||||
- Task details and requirements
|
||||
- Implementation guidelines
|
||||
- Related subtasks and dependencies
|
||||
- Project-specific context
|
||||
|
||||
When no task ID is provided, it automatically finds and starts the next available task based on dependencies and status.
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Show Specific Task">
|
||||
```bash
|
||||
# Show details of a specific task
|
||||
|
||||
@@ -30,7 +30,6 @@ For MCP/Cursor usage: Configure keys in the env section of your .cursor/mcp.json
|
||||
"MISTRAL_API_KEY": "MISTRAL_API_KEY_HERE",
|
||||
"AZURE_OPENAI_API_KEY": "AZURE_OPENAI_API_KEY_HERE",
|
||||
"OLLAMA_API_KEY": "OLLAMA_API_KEY_HERE",
|
||||
"GROK_CLI_API_KEY": "GROK_CLI_API_KEY_HERE",
|
||||
"GITHUB_API_KEY": "GITHUB_API_KEY_HERE"
|
||||
}
|
||||
}
|
||||
@@ -51,7 +50,6 @@ PERPLEXITY_API_KEY=pplx-your-key-here
|
||||
# OPENAI_API_KEY=sk-your-key-here
|
||||
# GOOGLE_API_KEY=AIzaSy...
|
||||
# AZURE_OPENAI_API_KEY=your-azure-openai-api-key-here
|
||||
# GROK_CLI_API_KEY=your-grok-api-key-here
|
||||
# etc.
|
||||
|
||||
# Optional Endpoint Overrides
|
||||
|
||||
@@ -16,6 +16,24 @@ Alternatively you can use the CLI to show the next task
|
||||
task-master next
|
||||
```
|
||||
|
||||
### Quick Start with `task-master start`
|
||||
|
||||
For immediate task execution, you can use the new `start` command to automatically launch Claude Code with full task context:
|
||||
|
||||
```bash
|
||||
# Start the next available task automatically
|
||||
task-master start
|
||||
|
||||
# Or start a specific task
|
||||
task-master start 1.2
|
||||
```
|
||||
|
||||
This command will:
|
||||
- Find the next available task (if no ID is provided)
|
||||
- Update the task status to "in-progress"
|
||||
- Launch Claude Code with comprehensive task context
|
||||
- Provide all necessary implementation details and project context
|
||||
|
||||
## Discuss Task
|
||||
When you know what task to work on next you can then start chatting with the agent to make sure it understands the plan of action.
|
||||
|
||||
|
||||
@@ -3,4 +3,70 @@ title: "What's New"
|
||||
sidebarTitle: "What's New"
|
||||
---
|
||||
|
||||
## Latest Features (January 2025)
|
||||
|
||||
### 🚀 New `task-master start` Command
|
||||
|
||||
**Automated Task Execution with Claude Code Integration**
|
||||
|
||||
The new `start` command revolutionizes your development workflow by automatically launching Claude Code with comprehensive task context:
|
||||
|
||||
```bash
|
||||
# Start a specific task
|
||||
task-master start 1.2
|
||||
|
||||
# Start the next available task automatically
|
||||
task-master start
|
||||
|
||||
# Preview what would be executed without launching Claude Code
|
||||
task-master start 1.2 --dry-run
|
||||
```
|
||||
|
||||
**Key Features:**
|
||||
- **Automatic Task Discovery** - When no ID is provided, finds the next available task based on dependencies and status
|
||||
- **Rich Context Injection** - Provides Claude Code with task details, requirements, subtasks, and project context
|
||||
- **Status Management** - Automatically updates task status to "in-progress" when starting
|
||||
- **Flexible Options** - Support for dry-run, force mode, custom project paths, and JSON output
|
||||
|
||||
### 🤖 Grok AI Provider Support
|
||||
|
||||
**Enhanced AI Model Options**
|
||||
|
||||
Task Master now supports xAI's Grok models with full 131K token capacity:
|
||||
|
||||
```bash
|
||||
# Configure Grok as your main model
|
||||
task-master models --set-main grok-beta
|
||||
|
||||
# Use Grok with vision capabilities
|
||||
task-master models --set-research grok-vision-beta
|
||||
```
|
||||
|
||||
**Setup:**
|
||||
1. Get your API key from [console.x.ai](https://console.x.ai)
|
||||
2. Set `GROK_CLI_API_KEY` environment variable
|
||||
3. Configure using `task-master models --setup`
|
||||
|
||||
**Available Models:**
|
||||
- `grok-beta` - Latest Grok model with advanced reasoning
|
||||
- `grok-vision-beta` - Grok with vision capabilities
|
||||
|
||||
### 📱 VS Code Extension "Start Task" Button
|
||||
|
||||
**Seamless VS Code Integration**
|
||||
|
||||
The Task Master VS Code extension now includes a "Start Task" button for one-click task execution:
|
||||
|
||||
- **Direct Integration** - Launch Claude Code directly from task cards in VS Code
|
||||
- **No Terminal Switching** - Automatic terminal management and command execution
|
||||
- **Full Context** - Same rich context injection as the CLI command
|
||||
- **Visual Workflow** - Seamless transition from task planning to implementation
|
||||
|
||||
### 🔧 Technical Improvements
|
||||
|
||||
- **TypeScript Migration** - Core components now use TypeScript for better type safety
|
||||
- **Model Configuration Updates** - Upgraded fallback model to Claude Sonnet 4
|
||||
- **Token Capacity Fixes** - Grok models now properly support their full 131K token capacity
|
||||
- **Enhanced Error Handling** - Improved error messages and debugging capabilities
|
||||
|
||||
An easy way to see the latest releases
|
||||
@@ -22,6 +22,7 @@ Taskmaster AI is an intelligent task management system designed for AI-assisted
|
||||

|
||||
|
||||
### 🤖 **AI-Powered Features**
|
||||
- **One-Click Task Start** - Launch Claude Code directly from task cards with full context
|
||||
- **Task Content Generation** - Regenerate task descriptions using AI
|
||||
- **Smart Task Updates** - Append findings and progress notes automatically
|
||||
- **MCP Integration** - Seamless connection to Taskmaster AI via Model Context Protocol
|
||||
@@ -83,6 +84,7 @@ The extension automatically handles the Taskmaster MCP server connection:
|
||||
| **View Kanban Board** | `Ctrl/Cmd + Shift + P` → "Taskmaster: Show Board" |
|
||||
| **Change Task Status** | Drag task card to different column |
|
||||
| **View Task Details** | Click on any task card |
|
||||
| **Start Working on Task** | Click "Start Task" button to launch Claude Code automatically |
|
||||
| **Edit Task Content** | Click task → Use edit buttons in details panel |
|
||||
| **Add Subtasks** | Click the + button on parent task cards |
|
||||
| **Use AI Features** | Open task details → Click AI action buttons |
|
||||
|
||||
153
output.txt
153
output.txt
File diff suppressed because one or more lines are too long
@@ -5,7 +5,6 @@
|
||||
|
||||
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
|
||||
import { ConfigManager } from './config-manager.js';
|
||||
import { DEFAULT_CONFIG_VALUES } from '../interfaces/configuration.interface.js';
|
||||
import { ConfigLoader } from './services/config-loader.service.js';
|
||||
import { ConfigMerger } from './services/config-merger.service.js';
|
||||
import { RuntimeStateManager } from './services/runtime-state-manager.service.js';
|
||||
@@ -70,8 +69,8 @@ describe('ConfigManager', () => {
|
||||
({
|
||||
loadState: vi.fn().mockResolvedValue({ activeTag: 'master' }),
|
||||
saveState: vi.fn().mockResolvedValue(undefined),
|
||||
getCurrentTag: vi.fn().mockReturnValue('master'),
|
||||
setCurrentTag: vi.fn().mockResolvedValue(undefined),
|
||||
getActiveTag: vi.fn().mockReturnValue('master'),
|
||||
setActiveTag: vi.fn().mockResolvedValue(undefined),
|
||||
getState: vi.fn().mockReturnValue({ activeTag: 'master' }),
|
||||
updateMetadata: vi.fn().mockResolvedValue(undefined),
|
||||
clearState: vi.fn().mockResolvedValue(undefined)
|
||||
@@ -228,8 +227,8 @@ describe('ConfigManager', () => {
|
||||
|
||||
const models = manager.getModelConfig();
|
||||
expect(models).toEqual({
|
||||
main: DEFAULT_CONFIG_VALUES.MODELS.MAIN,
|
||||
fallback: DEFAULT_CONFIG_VALUES.MODELS.FALLBACK
|
||||
main: 'claude-3-5-sonnet-20241022',
|
||||
fallback: 'gpt-4o-mini'
|
||||
});
|
||||
});
|
||||
|
||||
@@ -282,7 +281,7 @@ describe('ConfigManager', () => {
|
||||
await manager.setActiveTag('feature-branch');
|
||||
|
||||
const stateManager = (manager as any).stateManager;
|
||||
expect(stateManager.setCurrentTag).toHaveBeenCalledWith('feature-branch');
|
||||
expect(stateManager.setActiveTag).toHaveBeenCalledWith('feature-branch');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import type {
|
||||
PartialConfiguration,
|
||||
RuntimeStorageConfig
|
||||
} from '../interfaces/configuration.interface.js';
|
||||
import { DEFAULT_CONFIG_VALUES as DEFAULTS } from '../interfaces/configuration.interface.js';
|
||||
import { ConfigLoader } from './services/config-loader.service.js';
|
||||
import {
|
||||
ConfigMerger,
|
||||
@@ -168,8 +167,8 @@ export class ConfigManager {
|
||||
getModelConfig() {
|
||||
return (
|
||||
this.config.models || {
|
||||
main: DEFAULTS.MODELS.MAIN,
|
||||
fallback: DEFAULTS.MODELS.FALLBACK
|
||||
main: 'claude-3-5-sonnet-20241022',
|
||||
fallback: 'gpt-4o-mini'
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -399,8 +399,8 @@ export interface IConfigurationManager {
|
||||
*/
|
||||
export const DEFAULT_CONFIG_VALUES = {
|
||||
MODELS: {
|
||||
MAIN: 'claude-sonnet-4-20250514',
|
||||
FALLBACK: 'claude-3-7-sonnet-20250219'
|
||||
MAIN: 'claude-3-5-sonnet-20241022',
|
||||
FALLBACK: 'gpt-4o-mini'
|
||||
},
|
||||
TASKS: {
|
||||
DEFAULT_PRIORITY: 'medium' as TaskPriority,
|
||||
|
||||
@@ -27,21 +27,21 @@ const DEFAULTS = {
|
||||
models: {
|
||||
main: {
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-sonnet-4-20250514',
|
||||
modelId: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
temperature: 0.2
|
||||
},
|
||||
research: {
|
||||
provider: 'perplexity',
|
||||
modelId: 'sonar',
|
||||
modelId: 'sonar-pro',
|
||||
maxTokens: 8700,
|
||||
temperature: 0.1
|
||||
},
|
||||
fallback: {
|
||||
// No default fallback provider/model initially
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 120000, // Default parameters if fallback IS configured
|
||||
modelId: 'claude-3-5-sonnet',
|
||||
maxTokens: 8192, // Default parameters if fallback IS configured
|
||||
temperature: 0.2
|
||||
}
|
||||
},
|
||||
|
||||
@@ -119,20 +119,20 @@ const DEFAULT_CONFIG = {
|
||||
models: {
|
||||
main: {
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-sonnet-4-20250514',
|
||||
modelId: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
temperature: 0.2
|
||||
},
|
||||
research: {
|
||||
provider: 'perplexity',
|
||||
modelId: 'sonar',
|
||||
modelId: 'sonar-pro',
|
||||
maxTokens: 8700,
|
||||
temperature: 0.1
|
||||
},
|
||||
fallback: {
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 120000,
|
||||
modelId: 'claude-3-5-sonnet',
|
||||
maxTokens: 8192,
|
||||
temperature: 0.2
|
||||
}
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user