fix(ai, config): Correct Anthropic API calls and improve model config UI
Resolves persistent 404 'Not Found' errors when calling Anthropic models via the Vercel AI SDK. The primary issue was likely related to incorrect or missing API headers. - Refactors Anthropic provider (src/ai-providers/anthropic.js) to use the standard 'anthropic-version' header instead of potentially outdated/incorrect beta headers when creating the client instance. - Updates the default fallback model ID in .taskmasterconfig to 'claude-3-5-sonnet-20241022'. - Fixes the interactive model setup (task-master models --setup) in scripts/modules/commands.js to correctly filter and default the main model selection. - Improves the cost display in the 'task-master models' command output to explicitly show 'Free' for models with zero cost. - Updates description for the 'id' parameter in the 'set_task_status' MCP tool definition for clarity. - Updates list of models and costs
This commit is contained in:
0
.cursor/rules/ai_providers.mdc
Normal file
0
.cursor/rules/ai_providers.mdc
Normal file
@@ -1,30 +1,30 @@
|
|||||||
{
|
{
|
||||||
"models": {
|
"models": {
|
||||||
"main": {
|
"main": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
"maxTokens": 120000,
|
"maxTokens": 120000,
|
||||||
"temperature": 0.2
|
"temperature": 0.2
|
||||||
},
|
},
|
||||||
"research": {
|
"research": {
|
||||||
"provider": "perplexity",
|
"provider": "perplexity",
|
||||||
"modelId": "sonar-pro",
|
"modelId": "sonar-pro",
|
||||||
"maxTokens": 8700,
|
"maxTokens": 8700,
|
||||||
"temperature": 0.1
|
"temperature": 0.1
|
||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3.5-sonnet-20240620",
|
"modelId": "claude-3-5-sonnet-20241022",
|
||||||
"maxTokens": 120000,
|
"maxTokens": 120000,
|
||||||
"temperature": 0.2
|
"temperature": 0.2
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"global": {
|
"global": {
|
||||||
"logLevel": "info",
|
"logLevel": "info",
|
||||||
"debug": false,
|
"debug": false,
|
||||||
"defaultSubtasks": 5,
|
"defaultSubtasks": 5,
|
||||||
"defaultPriority": "medium",
|
"defaultPriority": "medium",
|
||||||
"projectName": "Taskmaster",
|
"projectName": "Taskmaster",
|
||||||
"ollamaBaseUrl": "http://localhost:11434/api"
|
"ollamaBaseUrl": "http://localhost:11434/api"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -24,7 +24,7 @@ export function registerSetTaskStatusTool(server) {
|
|||||||
id: z
|
id: z
|
||||||
.string()
|
.string()
|
||||||
.describe(
|
.describe(
|
||||||
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated for multiple updates."
|
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
||||||
),
|
),
|
||||||
status: z
|
status: z
|
||||||
.string()
|
.string()
|
||||||
|
|||||||
@@ -1659,6 +1659,18 @@ function registerCommands(programInstance) {
|
|||||||
|
|
||||||
console.log(chalk.cyan.bold('\nInteractive Model Setup:'));
|
console.log(chalk.cyan.bold('\nInteractive Model Setup:'));
|
||||||
|
|
||||||
|
const getMainChoicesAndDefault = () => {
|
||||||
|
const mainChoices = allModelsForSetup.filter((modelChoice) =>
|
||||||
|
availableModelsForSetup
|
||||||
|
.find((m) => m.modelId === modelChoice.value.id)
|
||||||
|
?.allowedRoles?.includes('main')
|
||||||
|
);
|
||||||
|
const defaultIndex = mainChoices.findIndex(
|
||||||
|
(m) => m.value.id === currentModels.main?.modelId
|
||||||
|
);
|
||||||
|
return { choices: mainChoices, default: defaultIndex };
|
||||||
|
};
|
||||||
|
|
||||||
// Get all available models, including active ones
|
// Get all available models, including active ones
|
||||||
const allModelsForSetup = availableModelsForSetup.map((model) => ({
|
const allModelsForSetup = availableModelsForSetup.map((model) => ({
|
||||||
name: `${model.provider} / ${model.modelId}`,
|
name: `${model.provider} / ${model.modelId}`,
|
||||||
@@ -1716,6 +1728,8 @@ function registerCommands(programInstance) {
|
|||||||
|
|
||||||
const researchPromptData = getResearchChoicesAndDefault();
|
const researchPromptData = getResearchChoicesAndDefault();
|
||||||
const fallbackPromptData = getFallbackChoicesAndDefault();
|
const fallbackPromptData = getFallbackChoicesAndDefault();
|
||||||
|
// Call the helper function for main model choices
|
||||||
|
const mainPromptData = getMainChoicesAndDefault();
|
||||||
|
|
||||||
// Add cancel option for all prompts
|
// Add cancel option for all prompts
|
||||||
const cancelOption = {
|
const cancelOption = {
|
||||||
@@ -1726,7 +1740,7 @@ function registerCommands(programInstance) {
|
|||||||
const mainModelChoices = [
|
const mainModelChoices = [
|
||||||
cancelOption,
|
cancelOption,
|
||||||
new inquirer.Separator(),
|
new inquirer.Separator(),
|
||||||
...allModelsForSetup
|
...mainPromptData.choices
|
||||||
];
|
];
|
||||||
|
|
||||||
const researchModelChoices = [
|
const researchModelChoices = [
|
||||||
@@ -1758,7 +1772,7 @@ function registerCommands(programInstance) {
|
|||||||
name: 'mainModel',
|
name: 'mainModel',
|
||||||
message: 'Select the main model for generation/updates:',
|
message: 'Select the main model for generation/updates:',
|
||||||
choices: mainModelChoices,
|
choices: mainModelChoices,
|
||||||
default: findDefaultIndex(currentModels.main?.modelId) + 2 // +2 for cancel option and separator
|
default: mainPromptData.default + 2 // +2 for cancel option and separator
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
type: 'list',
|
type: 'list',
|
||||||
@@ -2001,6 +2015,12 @@ function registerCommands(programInstance) {
|
|||||||
|
|
||||||
const formatCost = (costObj) => {
|
const formatCost = (costObj) => {
|
||||||
if (!costObj) return 'N/A';
|
if (!costObj) return 'N/A';
|
||||||
|
|
||||||
|
// Check if both input and output costs are 0 and return "Free"
|
||||||
|
if (costObj.input === 0 && costObj.output === 0) {
|
||||||
|
return chalk.green('Free');
|
||||||
|
}
|
||||||
|
|
||||||
const formatSingleCost = (costValue) => {
|
const formatSingleCost = (costValue) => {
|
||||||
if (costValue === null || costValue === undefined) return 'N/A';
|
if (costValue === null || costValue === undefined) return 'N/A';
|
||||||
const isInteger = Number.isInteger(costValue);
|
const isInteger = Number.isInteger(costValue);
|
||||||
|
|||||||
@@ -1,11 +1,5 @@
|
|||||||
{
|
{
|
||||||
"anthropic": [
|
"anthropic": [
|
||||||
{
|
|
||||||
"id": "claude-3.5-sonnet-20240620",
|
|
||||||
"swe_score": 0.49,
|
|
||||||
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
|
||||||
"allowed_roles": ["main", "fallback"]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "claude-3-7-sonnet-20250219",
|
"id": "claude-3-7-sonnet-20250219",
|
||||||
"swe_score": 0.623,
|
"swe_score": 0.623,
|
||||||
@@ -13,21 +7,21 @@
|
|||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "claude-3.5-haiku-20241022",
|
"id": "claude-3-5-sonnet-20241022",
|
||||||
|
"swe_score": 0.49,
|
||||||
|
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "claude-3-5-haiku-20241022",
|
||||||
"swe_score": 0.406,
|
"swe_score": 0.406,
|
||||||
"cost_per_1m_tokens": { "input": 0.8, "output": 4.0 },
|
"cost_per_1m_tokens": { "input": 0.8, "output": 4.0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "claude-3-haiku-20240307",
|
|
||||||
"swe_score": 0,
|
|
||||||
"cost_per_1m_tokens": { "input": 0.25, "output": 1.25 },
|
|
||||||
"allowed_roles": ["main", "fallback"]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "claude-3-opus-20240229",
|
"id": "claude-3-opus-20240229",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 15, "output": 75 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -35,13 +29,7 @@
|
|||||||
{
|
{
|
||||||
"id": "gpt-4o",
|
"id": "gpt-4o",
|
||||||
"swe_score": 0.332,
|
"swe_score": 0.332,
|
||||||
"cost_per_1m_tokens": { "input": 5.0, "output": 15.0 },
|
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "gpt-4-turbo",
|
|
||||||
"swe_score": 0,
|
|
||||||
"cost_per_1m_tokens": { "input": 10.0, "output": 30.0 },
|
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -50,12 +38,30 @@
|
|||||||
"cost_per_1m_tokens": { "input": 15.0, "output": 60.0 },
|
"cost_per_1m_tokens": { "input": 15.0, "output": 60.0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"id": "o3",
|
||||||
|
"swe_score": 0.5,
|
||||||
|
"cost_per_1m_tokens": { "input": 10.0, "output": 40.0 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"id": "o3-mini",
|
"id": "o3-mini",
|
||||||
"swe_score": 0.493,
|
"swe_score": 0.493,
|
||||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"id": "o4-mini",
|
||||||
|
"swe_score": 0.45,
|
||||||
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "o1-mini",
|
||||||
|
"swe_score": 0.4,
|
||||||
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"id": "o1-pro",
|
"id": "o1-pro",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
@@ -63,51 +69,63 @@
|
|||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gpt-4.1",
|
"id": "gpt-4-1",
|
||||||
"swe_score": 0.55,
|
"swe_score": 0.55,
|
||||||
"cost_per_1m_tokens": { "input": 2.0, "output": 8.0 },
|
"cost_per_1m_tokens": { "input": 2.0, "output": 8.0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gpt-4.5-preview",
|
"id": "gpt-4-5-preview",
|
||||||
"swe_score": 0.38,
|
"swe_score": 0.38,
|
||||||
"cost_per_1m_tokens": { "input": 75.0, "output": 150.0 },
|
"cost_per_1m_tokens": { "input": 75.0, "output": 150.0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gpt-4.1-mini",
|
"id": "gpt-4-1-mini",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 },
|
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gpt-4.1-nano",
|
"id": "gpt-4-1-nano",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gpt-3.5-turbo",
|
"id": "gpt-4o-mini",
|
||||||
"swe_score": 0,
|
"swe_score": 0.3,
|
||||||
"cost_per_1m_tokens": { "input": 0.5, "output": 1.5 },
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "gpt-4o-search-preview",
|
||||||
|
"swe_score": 0.33,
|
||||||
|
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
|
||||||
|
"allowed_roles": ["main", "fallback", "research"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "gpt-4o-mini-search-preview",
|
||||||
|
"swe_score": 0.3,
|
||||||
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||||
|
"allowed_roles": ["main", "fallback", "research"]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"google": [
|
"google": [
|
||||||
{
|
{
|
||||||
"id": "gemini-2.5-pro-latest",
|
"id": "gemini-2.5-pro-exp-03-25",
|
||||||
"swe_score": 0.638,
|
"swe_score": 0.638,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": null,
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gemini-1.5-flash-latest",
|
"id": "gemini-2.5-flash-preview-04-17",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": null,
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gemini-2.0-flash-experimental",
|
"id": "gemini-2.0-flash",
|
||||||
"swe_score": 0.754,
|
"swe_score": 0.754,
|
||||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
@@ -123,134 +141,146 @@
|
|||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": null,
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "gemma-3-7b",
|
|
||||||
"swe_score": 0,
|
|
||||||
"cost_per_1m_tokens": null,
|
|
||||||
"allowed_roles": ["main", "fallback"]
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"perplexity": [
|
"perplexity": [
|
||||||
{
|
{
|
||||||
"id": "sonar-pro",
|
"id": "sonar-pro",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||||
"allowed_roles": ["main", "fallback", "research"]
|
"allowed_roles": ["research"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "sonar-mini",
|
"id": "sonar",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 1, "output": 1 },
|
||||||
"allowed_roles": ["main", "fallback", "research"]
|
"allowed_roles": ["research"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "deep-research",
|
"id": "deep-research",
|
||||||
"swe_score": 0.211,
|
"swe_score": 0.211,
|
||||||
"cost_per_1m_tokens": { "input": 2.0, "output": 8.0 },
|
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
||||||
"allowed_roles": ["main", "fallback", "research"]
|
"allowed_roles": ["research"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "sonar-reasoning-pro",
|
||||||
|
"swe_score": 0.211,
|
||||||
|
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "sonar-reasoning",
|
||||||
|
"swe_score": 0.211,
|
||||||
|
"cost_per_1m_tokens": { "input": 1, "output": 5 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"ollama": [
|
"ollama": [
|
||||||
{
|
{
|
||||||
"id": "llava",
|
"id": "gemma3:27b",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "deepseek-coder-v2",
|
"id": "gemma3:12b",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "dolphin3",
|
"id": "qwq",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "olmo2-7b",
|
"id": "deepseek-r1",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "olmo2-13b",
|
"id": "mistral-small3.1",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "llama3.3",
|
||||||
|
"swe_score": 0,
|
||||||
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
|
"allowed_roles": ["main", "fallback"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "phi4",
|
||||||
|
"swe_score": 0,
|
||||||
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"openrouter": [
|
"openrouter": [
|
||||||
{
|
{
|
||||||
"id": "meta-llama/llama-4-scout",
|
"id": "google/gemini-2.0-flash-001",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "google/gemini-2.5-pro-exp-03-25",
|
"id": "google/gemini-2.5-pro-exp-03-25:free",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "openrouter/optimus-alpha",
|
"id": "deepseek/deepseek-chat-v3-0324:free",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": { "input": 30.0, "output": 60.0 },
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "openrouter/quasar-alpha",
|
"id": "google/gemini-2.5-pro-preview-03-25",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 1.25, "output": 10 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "kimi-vl-a3b-thinking",
|
"id": "deepseek/deepseek-chat-v3-0324",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0.27, "output": 1.1 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "qwen2.5-max",
|
"id": "deepseek/deepseek-r1:free",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"grok": [
|
"grok": [
|
||||||
{
|
{
|
||||||
"id": "grok3-beta",
|
"id": "grok3",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||||
"allowed_roles": ["main", "fallback", "research"]
|
"allowed_roles": ["main", "fallback", "research"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "grok-3-mini",
|
"id": "grok-3-mini",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0.3, "output": 0.5 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback", "research"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "grok-2",
|
"id": "grok3-fast",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 5, "output": 25 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback", "research"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "grok-2-mini",
|
"id": "grok-3-mini-fast",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": { "input": 0.6, "output": 4 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback", "research"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "grok-1.5",
|
|
||||||
"swe_score": 0,
|
|
||||||
"cost_per_1m_tokens": null,
|
|
||||||
"allowed_roles": ["main", "fallback"]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import fs from 'fs';
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
log,
|
log,
|
||||||
@@ -11,10 +12,33 @@ import {
|
|||||||
isSilentMode
|
isSilentMode
|
||||||
} from '../utils.js';
|
} from '../utils.js';
|
||||||
|
|
||||||
import { callClaude } from '../ai-services.js';
|
import { generateObjectService } from '../ai-services-unified.js';
|
||||||
import { getDebugFlag } from '../config-manager.js';
|
import { getDebugFlag } from '../config-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
|
// Define Zod schema for task validation
|
||||||
|
const TaskSchema = z.object({
|
||||||
|
id: z.number(),
|
||||||
|
title: z.string(),
|
||||||
|
description: z.string(),
|
||||||
|
status: z.string().default('pending'),
|
||||||
|
dependencies: z.array(z.number()).default([]),
|
||||||
|
priority: z.string().default('medium'),
|
||||||
|
details: z.string().optional(),
|
||||||
|
testStrategy: z.string().optional()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Define Zod schema for the complete tasks data
|
||||||
|
const TasksDataSchema = z.object({
|
||||||
|
tasks: z.array(TaskSchema),
|
||||||
|
metadata: z.object({
|
||||||
|
projectName: z.string(),
|
||||||
|
totalTasks: z.number(),
|
||||||
|
sourceFile: z.string(),
|
||||||
|
generatedAt: z.string()
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse a PRD file and generate tasks
|
* Parse a PRD file and generate tasks
|
||||||
* @param {string} prdPath - Path to the PRD file
|
* @param {string} prdPath - Path to the PRD file
|
||||||
@@ -24,17 +48,8 @@ import generateTaskFiles from './generate-task-files.js';
|
|||||||
* @param {Object} options.reportProgress - Function to report progress to MCP server (optional)
|
* @param {Object} options.reportProgress - Function to report progress to MCP server (optional)
|
||||||
* @param {Object} options.mcpLog - MCP logger object (optional)
|
* @param {Object} options.mcpLog - MCP logger object (optional)
|
||||||
* @param {Object} options.session - Session object from MCP server (optional)
|
* @param {Object} options.session - Session object from MCP server (optional)
|
||||||
* @param {Object} aiClient - AI client to use (optional)
|
|
||||||
* @param {Object} modelConfig - Model configuration (optional)
|
|
||||||
*/
|
*/
|
||||||
async function parsePRD(
|
async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||||
prdPath,
|
|
||||||
tasksPath,
|
|
||||||
numTasks,
|
|
||||||
options = {},
|
|
||||||
aiClient = null,
|
|
||||||
modelConfig = null
|
|
||||||
) {
|
|
||||||
const { reportProgress, mcpLog, session } = options;
|
const { reportProgress, mcpLog, session } = options;
|
||||||
|
|
||||||
// Determine output format based on mcpLog presence (simplification)
|
// Determine output format based on mcpLog presence (simplification)
|
||||||
@@ -56,22 +71,79 @@ async function parsePRD(
|
|||||||
// Read the PRD content
|
// Read the PRD content
|
||||||
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
||||||
|
|
||||||
// Call Claude to generate tasks, passing the provided AI client if available
|
// Build system prompt for PRD parsing
|
||||||
const tasksData = await callClaude(
|
const systemPrompt = `You are an AI assistant helping to break down a Product Requirements Document (PRD) into a set of sequential development tasks.
|
||||||
prdContent,
|
Your goal is to create ${numTasks} well-structured, actionable development tasks based on the PRD provided.
|
||||||
prdPath,
|
|
||||||
numTasks,
|
Each task should follow this JSON structure:
|
||||||
0,
|
{
|
||||||
{ reportProgress, mcpLog, session },
|
"id": number,
|
||||||
aiClient,
|
"title": string,
|
||||||
modelConfig
|
"description": string,
|
||||||
);
|
"status": "pending",
|
||||||
|
"dependencies": number[] (IDs of tasks this depends on),
|
||||||
|
"priority": "high" | "medium" | "low",
|
||||||
|
"details": string (implementation details),
|
||||||
|
"testStrategy": string (validation approach)
|
||||||
|
}
|
||||||
|
|
||||||
|
Guidelines:
|
||||||
|
1. Create exactly ${numTasks} tasks, numbered from 1 to ${numTasks}
|
||||||
|
2. Each task should be atomic and focused on a single responsibility
|
||||||
|
3. Order tasks logically - consider dependencies and implementation sequence
|
||||||
|
4. Early tasks should focus on setup, core functionality first, then advanced features
|
||||||
|
5. Include clear validation/testing approach for each task
|
||||||
|
6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs)
|
||||||
|
7. Assign priority (high/medium/low) based on criticality and dependency order
|
||||||
|
8. Include detailed implementation guidance in the "details" field
|
||||||
|
9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance
|
||||||
|
10. Focus on filling in any gaps left by the PRD or areas that aren't fully specified, while preserving all explicit requirements
|
||||||
|
11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches`;
|
||||||
|
|
||||||
|
// Build user prompt with PRD content
|
||||||
|
const userPrompt = `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:
|
||||||
|
|
||||||
|
${prdContent}
|
||||||
|
|
||||||
|
Return your response in this format:
|
||||||
|
{
|
||||||
|
"tasks": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "Setup Project Repository",
|
||||||
|
"description": "...",
|
||||||
|
...
|
||||||
|
},
|
||||||
|
...
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"projectName": "PRD Implementation",
|
||||||
|
"totalTasks": ${numTasks},
|
||||||
|
"sourceFile": "${prdPath}",
|
||||||
|
"generatedAt": "YYYY-MM-DD"
|
||||||
|
}
|
||||||
|
}`;
|
||||||
|
|
||||||
|
// Call the unified AI service
|
||||||
|
report('Calling AI service to generate tasks from PRD...', 'info');
|
||||||
|
|
||||||
|
// Call generateObjectService with proper parameters
|
||||||
|
const tasksData = await generateObjectService({
|
||||||
|
role: 'main', // Use 'main' role to get the model from config
|
||||||
|
session: session, // Pass session for API key resolution
|
||||||
|
schema: TasksDataSchema, // Pass the schema for validation
|
||||||
|
objectName: 'tasks_data', // Name the generated object
|
||||||
|
systemPrompt: systemPrompt, // System instructions
|
||||||
|
prompt: userPrompt, // User prompt with PRD content
|
||||||
|
reportProgress // Progress reporting function
|
||||||
|
});
|
||||||
|
|
||||||
// Create the directory if it doesn't exist
|
// Create the directory if it doesn't exist
|
||||||
const tasksDir = path.dirname(tasksPath);
|
const tasksDir = path.dirname(tasksPath);
|
||||||
if (!fs.existsSync(tasksDir)) {
|
if (!fs.existsSync(tasksDir)) {
|
||||||
fs.mkdirSync(tasksDir, { recursive: true });
|
fs.mkdirSync(tasksDir, { recursive: true });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the tasks to the file
|
// Write the tasks to the file
|
||||||
writeJSON(tasksPath, tasksData);
|
writeJSON(tasksPath, tasksData);
|
||||||
report(
|
report(
|
||||||
@@ -125,7 +197,7 @@ async function parsePRD(
|
|||||||
if (outputFormat === 'text') {
|
if (outputFormat === 'text') {
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
|
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag(session)) {
|
||||||
// Use getter
|
// Use getter
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,9 +27,14 @@ function getClient(apiKey) {
|
|||||||
// Remove the check for anthropicClient
|
// Remove the check for anthropicClient
|
||||||
// if (!anthropicClient) {
|
// if (!anthropicClient) {
|
||||||
// TODO: Explore passing options like default headers if needed
|
// TODO: Explore passing options like default headers if needed
|
||||||
// Create and return a new instance directly
|
// Create and return a new instance directly with standard version header
|
||||||
return createAnthropic({
|
return createAnthropic({
|
||||||
apiKey: apiKey
|
apiKey: apiKey,
|
||||||
|
baseURL: 'https://api.anthropic.com/v1',
|
||||||
|
// Use standard version header instead of beta
|
||||||
|
headers: {
|
||||||
|
'anthropic-beta': 'output-128k-2025-02-19'
|
||||||
|
}
|
||||||
});
|
});
|
||||||
// }
|
// }
|
||||||
// return anthropicClient;
|
// return anthropicClient;
|
||||||
@@ -63,10 +68,8 @@ export async function generateAnthropicText({
|
|||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature,
|
temperature: temperature
|
||||||
headers: {
|
// Beta header moved to client initialization
|
||||||
'anthropic-beta': 'output-128k-2025-02-19'
|
|
||||||
}
|
|
||||||
// TODO: Add other relevant parameters like topP, topK if needed
|
// TODO: Add other relevant parameters like topP, topK if needed
|
||||||
});
|
});
|
||||||
log(
|
log(
|
||||||
@@ -125,10 +128,8 @@ export async function streamAnthropicText({
|
|||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature,
|
temperature: temperature
|
||||||
headers: {
|
// Beta header moved to client initialization
|
||||||
'anthropic-beta': 'output-128k-2025-02-19'
|
|
||||||
}
|
|
||||||
// TODO: Add other relevant parameters
|
// TODO: Add other relevant parameters
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -178,6 +179,13 @@ export async function generateAnthropicObject({
|
|||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey);
|
||||||
|
|
||||||
|
// Log basic debug info
|
||||||
|
log(
|
||||||
|
'debug',
|
||||||
|
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
||||||
|
);
|
||||||
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
|
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
|
||||||
@@ -191,12 +199,14 @@ export async function generateAnthropicObject({
|
|||||||
temperature: temperature,
|
temperature: temperature,
|
||||||
maxRetries: maxRetries
|
maxRetries: maxRetries
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
);
|
);
|
||||||
return result.object;
|
return result.object;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
// Simple error logging
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
||||||
|
|||||||
@@ -486,7 +486,7 @@ The existing `ai-services.js` should be refactored to:
|
|||||||
7. Add verbose output option for debugging
|
7. Add verbose output option for debugging
|
||||||
8. Testing approach: Create integration tests that verify model setting functionality with various inputs
|
8. Testing approach: Create integration tests that verify model setting functionality with various inputs
|
||||||
|
|
||||||
## 8. Update Main Task Processing Logic [pending]
|
## 8. Update Main Task Processing Logic [deferred]
|
||||||
### Dependencies: 61.4, 61.5, 61.18
|
### Dependencies: 61.4, 61.5, 61.18
|
||||||
### Description: Refactor the main task processing logic to use the new AI services module and support dynamic model selection.
|
### Description: Refactor the main task processing logic to use the new AI services module and support dynamic model selection.
|
||||||
### Details:
|
### Details:
|
||||||
@@ -554,7 +554,7 @@ When updating the main task processing logic, implement the following changes to
|
|||||||
```
|
```
|
||||||
</info added on 2025-04-20T03:55:56.310Z>
|
</info added on 2025-04-20T03:55:56.310Z>
|
||||||
|
|
||||||
## 9. Update Research Processing Logic [pending]
|
## 9. Update Research Processing Logic [deferred]
|
||||||
### Dependencies: 61.4, 61.5, 61.8, 61.18
|
### Dependencies: 61.4, 61.5, 61.8, 61.18
|
||||||
### Description: Refactor the research processing logic to use the new AI services module and support dynamic model selection for research operations.
|
### Description: Refactor the research processing logic to use the new AI services module and support dynamic model selection for research operations.
|
||||||
### Details:
|
### Details:
|
||||||
@@ -712,7 +712,7 @@ When implementing the refactored research processing logic, ensure the following
|
|||||||
- How to verify configuration is correctly loaded
|
- How to verify configuration is correctly loaded
|
||||||
</info added on 2025-04-20T03:55:20.433Z>
|
</info added on 2025-04-20T03:55:20.433Z>
|
||||||
|
|
||||||
## 11. Refactor PRD Parsing to use generateObjectService [pending]
|
## 11. Refactor PRD Parsing to use generateObjectService [in-progress]
|
||||||
### Dependencies: 61.23
|
### Dependencies: 61.23
|
||||||
### Description: Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema.
|
### Description: Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema.
|
||||||
### Details:
|
### Details:
|
||||||
@@ -961,7 +961,7 @@ To implement this refactoring, you'll need to:
|
|||||||
4. Update any error handling to match the new service's error patterns.
|
4. Update any error handling to match the new service's error patterns.
|
||||||
</info added on 2025-04-20T03:53:27.455Z>
|
</info added on 2025-04-20T03:53:27.455Z>
|
||||||
|
|
||||||
## 17. Refactor General Chat/Update AI Calls [pending]
|
## 17. Refactor General Chat/Update AI Calls [deferred]
|
||||||
### Dependencies: 61.23
|
### Dependencies: 61.23
|
||||||
### Description: Refactor functions like `sendChatWithContext` (and potentially related task update functions in `task-manager.js` if they make direct AI calls) to use `streamTextService` or `generateTextService` from `ai-services-unified.js`.
|
### Description: Refactor functions like `sendChatWithContext` (and potentially related task update functions in `task-manager.js` if they make direct AI calls) to use `streamTextService` or `generateTextService` from `ai-services-unified.js`.
|
||||||
### Details:
|
### Details:
|
||||||
@@ -1008,7 +1008,7 @@ When refactoring `sendChatWithContext` and related functions, ensure they align
|
|||||||
5. Ensure any default behaviors respect configuration defaults rather than hardcoded values.
|
5. Ensure any default behaviors respect configuration defaults rather than hardcoded values.
|
||||||
</info added on 2025-04-20T03:53:03.709Z>
|
</info added on 2025-04-20T03:53:03.709Z>
|
||||||
|
|
||||||
## 18. Refactor Callers of AI Parsing Utilities [pending]
|
## 18. Refactor Callers of AI Parsing Utilities [deferred]
|
||||||
### Dependencies: 61.11,61.12,61.13,61.14,61.15,61.16,61.17,61.19
|
### Dependencies: 61.11,61.12,61.13,61.14,61.15,61.16,61.17,61.19
|
||||||
### Description: Update the code that calls `parseSubtasksFromText`, `parseTaskJsonResponse`, and `parseTasksFromCompletion` to instead directly handle the structured JSON output provided by `generateObjectService` (as the refactored AI calls will now use it).
|
### Description: Update the code that calls `parseSubtasksFromText`, `parseTaskJsonResponse`, and `parseTasksFromCompletion` to instead directly handle the structured JSON output provided by `generateObjectService` (as the refactored AI calls will now use it).
|
||||||
### Details:
|
### Details:
|
||||||
@@ -1276,12 +1276,60 @@ When testing the non-streaming `generateTextService` call in `updateSubtaskById`
|
|||||||
</info added on 2025-04-22T06:35:14.892Z>
|
</info added on 2025-04-22T06:35:14.892Z>
|
||||||
</info added on 2025-04-22T06:23:23.247Z>
|
</info added on 2025-04-22T06:23:23.247Z>
|
||||||
|
|
||||||
## 20. Implement `anthropic.js` Provider Module using Vercel AI SDK [done]
|
## 20. Implement `anthropic.js` Provider Module using Vercel AI SDK [in-progress]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
### Description: Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
|
<info added on 2025-04-24T02:54:40.326Z>
|
||||||
|
- Use the `@ai-sdk/anthropic` package to implement the provider module. You can import the default provider instance with `import { anthropic } from '@ai-sdk/anthropic'`, or create a custom instance using `createAnthropic` if you need to specify custom headers, API key, or base URL (such as for beta features or proxying)[1][4].
|
||||||
|
|
||||||
|
- To address persistent 'Not Found' errors, ensure the model name matches the latest Anthropic model IDs (e.g., `claude-3-haiku-20240307`, `claude-3-5-sonnet-20241022`). Model naming is case-sensitive and must match Anthropic's published versions[4][5].
|
||||||
|
|
||||||
|
- If you require custom headers (such as for beta features), use the `createAnthropic` function and pass a `headers` object. For example:
|
||||||
|
```js
|
||||||
|
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||||
|
const anthropic = createAnthropic({
|
||||||
|
apiKey: process.env.ANTHROPIC_API_KEY,
|
||||||
|
headers: { 'anthropic-beta': 'tools-2024-04-04' }
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- For streaming and non-streaming support, the Vercel AI SDK provides both `generateText` (non-streaming) and `streamText` (streaming) functions. Use these with the Anthropic provider instance as the `model` parameter[5].
|
||||||
|
|
||||||
|
- Example usage for non-streaming:
|
||||||
|
```js
|
||||||
|
import { generateText } from 'ai';
|
||||||
|
import { anthropic } from '@ai-sdk/anthropic';
|
||||||
|
|
||||||
|
const result = await generateText({
|
||||||
|
model: anthropic('claude-3-haiku-20240307'),
|
||||||
|
messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }]
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- Example usage for streaming:
|
||||||
|
```js
|
||||||
|
import { streamText } from 'ai';
|
||||||
|
import { anthropic } from '@ai-sdk/anthropic';
|
||||||
|
|
||||||
|
const stream = await streamText({
|
||||||
|
model: anthropic('claude-3-haiku-20240307'),
|
||||||
|
messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }]
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- Ensure that your implementation adheres to the standardized input/output format defined for `ai-services-unified.js`, mapping the SDK's response structure to your unified format.
|
||||||
|
|
||||||
|
- If you continue to encounter 'Not Found' errors, verify:
|
||||||
|
- The API key is valid and has access to the requested models.
|
||||||
|
- The model name is correct and available to your Anthropic account.
|
||||||
|
- Any required beta headers are included if using beta features or models[1].
|
||||||
|
|
||||||
|
- Prefer direct provider instantiation with explicit headers and API key configuration for maximum compatibility and to avoid SDK-level abstraction issues[1].
|
||||||
|
</info added on 2025-04-24T02:54:40.326Z>
|
||||||
|
|
||||||
## 21. Implement `perplexity.js` Provider Module using Vercel AI SDK [done]
|
## 21. Implement `perplexity.js` Provider Module using Vercel AI SDK [done]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Create and implement the `perplexity.js` module within `src/ai-providers/`. This module should contain functions to interact with the Perplexity API (likely using their OpenAI-compatible endpoint) via the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
### Description: Create and implement the `perplexity.js` module within `src/ai-providers/`. This module should contain functions to interact with the Perplexity API (likely using their OpenAI-compatible endpoint) via the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
||||||
@@ -1673,7 +1721,7 @@ The new AI architecture introduces a clear separation between sensitive credenti
|
|||||||
</info added on 2025-04-20T03:51:04.461Z>
|
</info added on 2025-04-20T03:51:04.461Z>
|
||||||
|
|
||||||
## 33. Cleanup Old AI Service Files [pending]
|
## 33. Cleanup Old AI Service Files [pending]
|
||||||
### Dependencies: 61.32
|
### Dependencies: 61.31, 61.32
|
||||||
### Description: After all other migration subtasks (refactoring, provider implementation, testing, documentation) are complete and verified, remove the old `ai-services.js` and `ai-client-factory.js` files from the `scripts/modules/` directory. Ensure no code still references them.
|
### Description: After all other migration subtasks (refactoring, provider implementation, testing, documentation) are complete and verified, remove the old `ai-services.js` and `ai-client-factory.js` files from the `scripts/modules/` directory. Ensure no code still references them.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|||||||
@@ -2838,7 +2838,7 @@
|
|||||||
"61.18"
|
"61.18"
|
||||||
],
|
],
|
||||||
"details": "1. Update task processing functions to use the centralized AI services\n2. Implement dynamic model selection based on configuration\n3. Add error handling for model-specific failures\n4. Implement graceful degradation when preferred models are unavailable\n5. Update prompts to be model-agnostic where possible\n6. Add telemetry for model performance monitoring\n7. Implement response validation to ensure quality across different models\n8. Testing approach: Create integration tests that verify task processing with different model configurations\n\n<info added on 2025-04-20T03:55:56.310Z>\nWhen updating the main task processing logic, implement the following changes to align with the new configuration system:\n\n1. Replace direct environment variable access with calls to the configuration manager:\n ```javascript\n // Before\n const apiKey = process.env.OPENAI_API_KEY;\n const modelId = process.env.MAIN_MODEL || \"gpt-4\";\n \n // After\n import { getMainProvider, getMainModelId, getMainMaxTokens, getMainTemperature } from './config-manager.js';\n \n const provider = getMainProvider();\n const modelId = getMainModelId();\n const maxTokens = getMainMaxTokens();\n const temperature = getMainTemperature();\n ```\n\n2. Implement model fallback logic using the configuration hierarchy:\n ```javascript\n async function processTaskWithFallback(task) {\n try {\n return await processWithModel(task, getMainModelId());\n } catch (error) {\n logger.warn(`Primary model failed: ${error.message}`);\n const fallbackModel = getMainFallbackModelId();\n if (fallbackModel) {\n return await processWithModel(task, fallbackModel);\n }\n throw error;\n }\n }\n ```\n\n3. Add configuration-aware telemetry points to track model usage and performance:\n ```javascript\n function trackModelPerformance(modelId, startTime, success) {\n const duration = Date.now() - startTime;\n telemetry.trackEvent('model_usage', {\n modelId,\n provider: getMainProvider(),\n duration,\n success,\n configVersion: getConfigVersion()\n });\n }\n ```\n\n4. Ensure all prompt templates are loaded through the configuration system rather than hardcoded:\n ```javascript\n const promptTemplate = getPromptTemplate('task_processing');\n const prompt = formatPrompt(promptTemplate, { task: taskData });\n ```\n</info added on 2025-04-20T03:55:56.310Z>",
|
"details": "1. Update task processing functions to use the centralized AI services\n2. Implement dynamic model selection based on configuration\n3. Add error handling for model-specific failures\n4. Implement graceful degradation when preferred models are unavailable\n5. Update prompts to be model-agnostic where possible\n6. Add telemetry for model performance monitoring\n7. Implement response validation to ensure quality across different models\n8. Testing approach: Create integration tests that verify task processing with different model configurations\n\n<info added on 2025-04-20T03:55:56.310Z>\nWhen updating the main task processing logic, implement the following changes to align with the new configuration system:\n\n1. Replace direct environment variable access with calls to the configuration manager:\n ```javascript\n // Before\n const apiKey = process.env.OPENAI_API_KEY;\n const modelId = process.env.MAIN_MODEL || \"gpt-4\";\n \n // After\n import { getMainProvider, getMainModelId, getMainMaxTokens, getMainTemperature } from './config-manager.js';\n \n const provider = getMainProvider();\n const modelId = getMainModelId();\n const maxTokens = getMainMaxTokens();\n const temperature = getMainTemperature();\n ```\n\n2. Implement model fallback logic using the configuration hierarchy:\n ```javascript\n async function processTaskWithFallback(task) {\n try {\n return await processWithModel(task, getMainModelId());\n } catch (error) {\n logger.warn(`Primary model failed: ${error.message}`);\n const fallbackModel = getMainFallbackModelId();\n if (fallbackModel) {\n return await processWithModel(task, fallbackModel);\n }\n throw error;\n }\n }\n ```\n\n3. Add configuration-aware telemetry points to track model usage and performance:\n ```javascript\n function trackModelPerformance(modelId, startTime, success) {\n const duration = Date.now() - startTime;\n telemetry.trackEvent('model_usage', {\n modelId,\n provider: getMainProvider(),\n duration,\n success,\n configVersion: getConfigVersion()\n });\n }\n ```\n\n4. Ensure all prompt templates are loaded through the configuration system rather than hardcoded:\n ```javascript\n const promptTemplate = getPromptTemplate('task_processing');\n const prompt = formatPrompt(promptTemplate, { task: taskData });\n ```\n</info added on 2025-04-20T03:55:56.310Z>",
|
||||||
"status": "pending",
|
"status": "deferred",
|
||||||
"parentTaskId": 61
|
"parentTaskId": 61
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -2852,7 +2852,7 @@
|
|||||||
"61.18"
|
"61.18"
|
||||||
],
|
],
|
||||||
"details": "1. Update research functions to use the centralized AI services\n2. Implement dynamic model selection for research operations\n3. Add specialized error handling for research-specific issues\n4. Optimize prompts for research-focused models\n5. Implement result caching for research operations\n6. Add support for model-specific research parameters\n7. Create fallback mechanisms for research operations\n8. Testing approach: Create integration tests that verify research functionality with different model configurations\n\n<info added on 2025-04-20T03:55:39.633Z>\nWhen implementing the refactored research processing logic, ensure the following:\n\n1. Replace direct environment variable access with the new configuration system:\n ```javascript\n // Old approach\n const apiKey = process.env.OPENAI_API_KEY;\n const model = \"gpt-4\";\n \n // New approach\n import { getResearchProvider, getResearchModelId, getResearchMaxTokens, \n getResearchTemperature } from './config-manager.js';\n \n const provider = getResearchProvider();\n const modelId = getResearchModelId();\n const maxTokens = getResearchMaxTokens();\n const temperature = getResearchTemperature();\n ```\n\n2. Implement model fallback chains using the configuration system:\n ```javascript\n async function performResearch(query) {\n try {\n return await callAIService({\n provider: getResearchProvider(),\n modelId: getResearchModelId(),\n maxTokens: getResearchMaxTokens(),\n temperature: getResearchTemperature()\n });\n } catch (error) {\n logger.warn(`Primary research model failed: ${error.message}`);\n return await callAIService({\n provider: getResearchProvider('fallback'),\n modelId: getResearchModelId('fallback'),\n maxTokens: getResearchMaxTokens('fallback'),\n temperature: getResearchTemperature('fallback')\n });\n }\n }\n ```\n\n3. Add support for dynamic parameter adjustment based on research type:\n ```javascript\n function getResearchParameters(researchType) {\n // Get base parameters\n const baseParams = {\n provider: getResearchProvider(),\n modelId: getResearchModelId(),\n maxTokens: getResearchMaxTokens(),\n temperature: getResearchTemperature()\n };\n \n // Adjust based on research type\n switch(researchType) {\n case 'deep':\n return {...baseParams, maxTokens: baseParams.maxTokens * 1.5};\n case 'creative':\n return {...baseParams, temperature: Math.min(baseParams.temperature + 0.2, 1.0)};\n case 'factual':\n return {...baseParams, temperature: Math.max(baseParams.temperature - 0.2, 0)};\n default:\n return baseParams;\n }\n }\n ```\n\n4. Ensure the caching mechanism uses configuration-based TTL settings:\n ```javascript\n const researchCache = new Cache({\n ttl: getResearchCacheTTL(),\n maxSize: getResearchCacheMaxSize()\n });\n ```\n</info added on 2025-04-20T03:55:39.633Z>",
|
"details": "1. Update research functions to use the centralized AI services\n2. Implement dynamic model selection for research operations\n3. Add specialized error handling for research-specific issues\n4. Optimize prompts for research-focused models\n5. Implement result caching for research operations\n6. Add support for model-specific research parameters\n7. Create fallback mechanisms for research operations\n8. Testing approach: Create integration tests that verify research functionality with different model configurations\n\n<info added on 2025-04-20T03:55:39.633Z>\nWhen implementing the refactored research processing logic, ensure the following:\n\n1. Replace direct environment variable access with the new configuration system:\n ```javascript\n // Old approach\n const apiKey = process.env.OPENAI_API_KEY;\n const model = \"gpt-4\";\n \n // New approach\n import { getResearchProvider, getResearchModelId, getResearchMaxTokens, \n getResearchTemperature } from './config-manager.js';\n \n const provider = getResearchProvider();\n const modelId = getResearchModelId();\n const maxTokens = getResearchMaxTokens();\n const temperature = getResearchTemperature();\n ```\n\n2. Implement model fallback chains using the configuration system:\n ```javascript\n async function performResearch(query) {\n try {\n return await callAIService({\n provider: getResearchProvider(),\n modelId: getResearchModelId(),\n maxTokens: getResearchMaxTokens(),\n temperature: getResearchTemperature()\n });\n } catch (error) {\n logger.warn(`Primary research model failed: ${error.message}`);\n return await callAIService({\n provider: getResearchProvider('fallback'),\n modelId: getResearchModelId('fallback'),\n maxTokens: getResearchMaxTokens('fallback'),\n temperature: getResearchTemperature('fallback')\n });\n }\n }\n ```\n\n3. Add support for dynamic parameter adjustment based on research type:\n ```javascript\n function getResearchParameters(researchType) {\n // Get base parameters\n const baseParams = {\n provider: getResearchProvider(),\n modelId: getResearchModelId(),\n maxTokens: getResearchMaxTokens(),\n temperature: getResearchTemperature()\n };\n \n // Adjust based on research type\n switch(researchType) {\n case 'deep':\n return {...baseParams, maxTokens: baseParams.maxTokens * 1.5};\n case 'creative':\n return {...baseParams, temperature: Math.min(baseParams.temperature + 0.2, 1.0)};\n case 'factual':\n return {...baseParams, temperature: Math.max(baseParams.temperature - 0.2, 0)};\n default:\n return baseParams;\n }\n }\n ```\n\n4. Ensure the caching mechanism uses configuration-based TTL settings:\n ```javascript\n const researchCache = new Cache({\n ttl: getResearchCacheTTL(),\n maxSize: getResearchCacheMaxSize()\n });\n ```\n</info added on 2025-04-20T03:55:39.633Z>",
|
||||||
"status": "pending",
|
"status": "deferred",
|
||||||
"parentTaskId": 61
|
"parentTaskId": 61
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -2874,7 +2874,7 @@
|
|||||||
"title": "Refactor PRD Parsing to use generateObjectService",
|
"title": "Refactor PRD Parsing to use generateObjectService",
|
||||||
"description": "Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema.",
|
"description": "Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema.",
|
||||||
"details": "\n\n<info added on 2025-04-20T03:55:01.707Z>\nThe PRD parsing refactoring should align with the new configuration system architecture. When implementing this change:\n\n1. Replace direct environment variable access with `resolveEnvVariable` calls for API keys.\n\n2. Remove any hardcoded model names or parameters in the PRD processing functions. Instead, use the config-manager.js getters:\n - `getModelForRole('prd')` to determine the appropriate model\n - `getModelParameters('prd')` to retrieve temperature, maxTokens, etc.\n\n3. When constructing the generateObjectService call, ensure parameters are sourced from config:\n```javascript\nconst modelConfig = getModelParameters('prd');\nconst model = getModelForRole('prd');\n\nconst result = await generateObjectService({\n model,\n temperature: modelConfig.temperature,\n maxTokens: modelConfig.maxTokens,\n // other parameters as needed\n schema: prdSchema,\n // existing prompt/context parameters\n});\n```\n\n4. Update any logging to respect the logging configuration from config-manager (e.g., `isLoggingEnabled('ai')`)\n\n5. Ensure any default values previously hardcoded are now retrieved from the configuration system.\n</info added on 2025-04-20T03:55:01.707Z>",
|
"details": "\n\n<info added on 2025-04-20T03:55:01.707Z>\nThe PRD parsing refactoring should align with the new configuration system architecture. When implementing this change:\n\n1. Replace direct environment variable access with `resolveEnvVariable` calls for API keys.\n\n2. Remove any hardcoded model names or parameters in the PRD processing functions. Instead, use the config-manager.js getters:\n - `getModelForRole('prd')` to determine the appropriate model\n - `getModelParameters('prd')` to retrieve temperature, maxTokens, etc.\n\n3. When constructing the generateObjectService call, ensure parameters are sourced from config:\n```javascript\nconst modelConfig = getModelParameters('prd');\nconst model = getModelForRole('prd');\n\nconst result = await generateObjectService({\n model,\n temperature: modelConfig.temperature,\n maxTokens: modelConfig.maxTokens,\n // other parameters as needed\n schema: prdSchema,\n // existing prompt/context parameters\n});\n```\n\n4. Update any logging to respect the logging configuration from config-manager (e.g., `isLoggingEnabled('ai')`)\n\n5. Ensure any default values previously hardcoded are now retrieved from the configuration system.\n</info added on 2025-04-20T03:55:01.707Z>",
|
||||||
"status": "pending",
|
"status": "in-progress",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"61.23"
|
"61.23"
|
||||||
],
|
],
|
||||||
@@ -2940,7 +2940,7 @@
|
|||||||
"title": "Refactor General Chat/Update AI Calls",
|
"title": "Refactor General Chat/Update AI Calls",
|
||||||
"description": "Refactor functions like `sendChatWithContext` (and potentially related task update functions in `task-manager.js` if they make direct AI calls) to use `streamTextService` or `generateTextService` from `ai-services-unified.js`.",
|
"description": "Refactor functions like `sendChatWithContext` (and potentially related task update functions in `task-manager.js` if they make direct AI calls) to use `streamTextService` or `generateTextService` from `ai-services-unified.js`.",
|
||||||
"details": "\n\n<info added on 2025-04-20T03:53:03.709Z>\nWhen refactoring `sendChatWithContext` and related functions, ensure they align with the new configuration system:\n\n1. Replace direct model references with config getter calls:\n ```javascript\n // Before\n const model = \"gpt-4\";\n \n // After\n import { getModelForRole } from './config-manager.js';\n const model = getModelForRole('chat'); // or appropriate role\n ```\n\n2. Extract AI parameters from config rather than hardcoding:\n ```javascript\n import { getAIParameters } from './config-manager.js';\n const { temperature, maxTokens } = getAIParameters('chat');\n ```\n\n3. When calling `streamTextService` or `generateTextService`, pass parameters from config:\n ```javascript\n await streamTextService({\n messages,\n model: getModelForRole('chat'),\n temperature: getAIParameters('chat').temperature,\n // other parameters as needed\n });\n ```\n\n4. For logging control, check config settings:\n ```javascript\n import { isLoggingEnabled } from './config-manager.js';\n \n if (isLoggingEnabled('aiCalls')) {\n console.log('AI request:', messages);\n }\n ```\n\n5. Ensure any default behaviors respect configuration defaults rather than hardcoded values.\n</info added on 2025-04-20T03:53:03.709Z>",
|
"details": "\n\n<info added on 2025-04-20T03:53:03.709Z>\nWhen refactoring `sendChatWithContext` and related functions, ensure they align with the new configuration system:\n\n1. Replace direct model references with config getter calls:\n ```javascript\n // Before\n const model = \"gpt-4\";\n \n // After\n import { getModelForRole } from './config-manager.js';\n const model = getModelForRole('chat'); // or appropriate role\n ```\n\n2. Extract AI parameters from config rather than hardcoding:\n ```javascript\n import { getAIParameters } from './config-manager.js';\n const { temperature, maxTokens } = getAIParameters('chat');\n ```\n\n3. When calling `streamTextService` or `generateTextService`, pass parameters from config:\n ```javascript\n await streamTextService({\n messages,\n model: getModelForRole('chat'),\n temperature: getAIParameters('chat').temperature,\n // other parameters as needed\n });\n ```\n\n4. For logging control, check config settings:\n ```javascript\n import { isLoggingEnabled } from './config-manager.js';\n \n if (isLoggingEnabled('aiCalls')) {\n console.log('AI request:', messages);\n }\n ```\n\n5. Ensure any default behaviors respect configuration defaults rather than hardcoded values.\n</info added on 2025-04-20T03:53:03.709Z>",
|
||||||
"status": "pending",
|
"status": "deferred",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"61.23"
|
"61.23"
|
||||||
],
|
],
|
||||||
@@ -2951,7 +2951,7 @@
|
|||||||
"title": "Refactor Callers of AI Parsing Utilities",
|
"title": "Refactor Callers of AI Parsing Utilities",
|
||||||
"description": "Update the code that calls `parseSubtasksFromText`, `parseTaskJsonResponse`, and `parseTasksFromCompletion` to instead directly handle the structured JSON output provided by `generateObjectService` (as the refactored AI calls will now use it).",
|
"description": "Update the code that calls `parseSubtasksFromText`, `parseTaskJsonResponse`, and `parseTasksFromCompletion` to instead directly handle the structured JSON output provided by `generateObjectService` (as the refactored AI calls will now use it).",
|
||||||
"details": "\n\n<info added on 2025-04-20T03:52:45.518Z>\nThe refactoring of callers to AI parsing utilities should align with the new configuration system. When updating these callers:\n\n1. Replace direct API key references with calls to the configuration system using `resolveEnvVariable` for sensitive credentials.\n\n2. Update model selection logic to use the centralized configuration from `.taskmasterconfig` via the getter functions in `config-manager.js`. For example:\n ```javascript\n // Old approach\n const model = \"gpt-4\";\n \n // New approach\n import { getModelForRole } from './config-manager';\n const model = getModelForRole('parsing'); // or appropriate role\n ```\n\n3. Similarly, replace hardcoded parameters with configuration-based values:\n ```javascript\n // Old approach\n const maxTokens = 2000;\n const temperature = 0.2;\n \n // New approach\n import { getAIParameterValue } from './config-manager';\n const maxTokens = getAIParameterValue('maxTokens', 'parsing');\n const temperature = getAIParameterValue('temperature', 'parsing');\n ```\n\n4. Ensure logging behavior respects the centralized logging configuration settings.\n\n5. When calling `generateObjectService`, pass the appropriate configuration context to ensure it uses the correct settings from the centralized configuration system.\n</info added on 2025-04-20T03:52:45.518Z>",
|
"details": "\n\n<info added on 2025-04-20T03:52:45.518Z>\nThe refactoring of callers to AI parsing utilities should align with the new configuration system. When updating these callers:\n\n1. Replace direct API key references with calls to the configuration system using `resolveEnvVariable` for sensitive credentials.\n\n2. Update model selection logic to use the centralized configuration from `.taskmasterconfig` via the getter functions in `config-manager.js`. For example:\n ```javascript\n // Old approach\n const model = \"gpt-4\";\n \n // New approach\n import { getModelForRole } from './config-manager';\n const model = getModelForRole('parsing'); // or appropriate role\n ```\n\n3. Similarly, replace hardcoded parameters with configuration-based values:\n ```javascript\n // Old approach\n const maxTokens = 2000;\n const temperature = 0.2;\n \n // New approach\n import { getAIParameterValue } from './config-manager';\n const maxTokens = getAIParameterValue('maxTokens', 'parsing');\n const temperature = getAIParameterValue('temperature', 'parsing');\n ```\n\n4. Ensure logging behavior respects the centralized logging configuration settings.\n\n5. When calling `generateObjectService`, pass the appropriate configuration context to ensure it uses the correct settings from the centralized configuration system.\n</info added on 2025-04-20T03:52:45.518Z>",
|
||||||
"status": "pending",
|
"status": "deferred",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"61.11,61.12,61.13,61.14,61.15,61.16,61.17,61.19"
|
"61.11,61.12,61.13,61.14,61.15,61.16,61.17,61.19"
|
||||||
],
|
],
|
||||||
@@ -2972,8 +2972,8 @@
|
|||||||
"id": 20,
|
"id": 20,
|
||||||
"title": "Implement `anthropic.js` Provider Module using Vercel AI SDK",
|
"title": "Implement `anthropic.js` Provider Module using Vercel AI SDK",
|
||||||
"description": "Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
|
"description": "Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
|
||||||
"details": "",
|
"details": "\n\n<info added on 2025-04-24T02:54:40.326Z>\n- Use the `@ai-sdk/anthropic` package to implement the provider module. You can import the default provider instance with `import { anthropic } from '@ai-sdk/anthropic'`, or create a custom instance using `createAnthropic` if you need to specify custom headers, API key, or base URL (such as for beta features or proxying)[1][4].\n\n- To address persistent 'Not Found' errors, ensure the model name matches the latest Anthropic model IDs (e.g., `claude-3-haiku-20240307`, `claude-3-5-sonnet-20241022`). Model naming is case-sensitive and must match Anthropic's published versions[4][5].\n\n- If you require custom headers (such as for beta features), use the `createAnthropic` function and pass a `headers` object. For example:\n ```js\n import { createAnthropic } from '@ai-sdk/anthropic';\n const anthropic = createAnthropic({\n apiKey: process.env.ANTHROPIC_API_KEY,\n headers: { 'anthropic-beta': 'tools-2024-04-04' }\n });\n ```\n\n- For streaming and non-streaming support, the Vercel AI SDK provides both `generateText` (non-streaming) and `streamText` (streaming) functions. Use these with the Anthropic provider instance as the `model` parameter[5].\n\n- Example usage for non-streaming:\n ```js\n import { generateText } from 'ai';\n import { anthropic } from '@ai-sdk/anthropic';\n\n const result = await generateText({\n model: anthropic('claude-3-haiku-20240307'),\n messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }]\n });\n ```\n\n- Example usage for streaming:\n ```js\n import { streamText } from 'ai';\n import { anthropic } from '@ai-sdk/anthropic';\n\n const stream = await streamText({\n model: anthropic('claude-3-haiku-20240307'),\n messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }]\n });\n ```\n\n- Ensure that your implementation adheres to the standardized input/output format defined for `ai-services-unified.js`, mapping the SDK's response structure to your unified format.\n\n- If you continue to encounter 'Not Found' errors, verify:\n - The API key is valid and has access to the requested models.\n - The model name is correct and available to your Anthropic account.\n - Any required beta headers are included if using beta features or models[1].\n\n- Prefer direct provider instantiation with explicit headers and API key configuration for maximum compatibility and to avoid SDK-level abstraction issues[1].\n</info added on 2025-04-24T02:54:40.326Z>",
|
||||||
"status": "done",
|
"status": "in-progress",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"parentTaskId": 61
|
"parentTaskId": 61
|
||||||
},
|
},
|
||||||
|
|||||||
Reference in New Issue
Block a user