Compare commits

..

31 Commits

Author SHA1 Message Date
Ralph Khreish
834dfb86ac chore: cleanup 2025-05-16 23:22:34 +02:00
Ralph Khreish
b984af0606 Merge remote-tracking branch 'origin/next' into add-complexity-score-to-task 2025-05-16 23:07:16 +02:00
Shrey Paharia
d7ebfe30fc fix: fixed mcp server project root input 2025-05-07 23:00:42 +05:30
Shrey Paharia
126abb9631 fix: fixed addComplexityToTask util 2025-05-07 11:30:37 +05:30
Shrey Paharia
e917fd16c0 fix: fixed findTaskById and tests 2025-05-07 11:17:55 +05:30
Shrey Paharia
07a710d88e fix: fixed findTaskById 2025-05-07 11:03:20 +05:30
Shrey Paharia
0ca41443de Merge branch 'next' of github.com:eyaltoledano/claude-task-master into add-complexity-score-to-task 2025-05-07 10:21:09 +05:30
Shrey Paharia
7c543cd8c3 fix: fixed running tests 2025-05-03 18:38:36 +05:30
Shrey Paharia
c2865b81f8 feat: update list and find next task 2025-05-03 18:15:25 +05:30
Shrey Paharia
a0ac50ffd7 Merge branch 'next' of github.com:eyaltoledano/claude-task-master into add-complexity-score-to-task 2025-05-03 16:34:47 +05:30
Shrey Paharia
b6b0dd1e29 Merge branch 'next' of github.com:eyaltoledano/claude-task-master into add-complexity-score-to-task 2025-05-01 22:21:02 +05:30
Shrey Paharia
0f37cf0851 ref: remove unecessary comments 2025-04-24 00:33:09 +05:30
Shrey Paharia
abb5063b3e chore: remove unecessary changeset 2025-04-24 00:29:47 +05:30
Shrey Paharia
6b0ec458e8 feat: add handling for report path override 2025-04-24 00:27:25 +05:30
Shrey Paharia
8047ec756c feat: added handling for next-task in mcp 2025-04-24 00:17:19 +05:30
Shrey Paharia
be8fe8092f feat: added handling to get-task 2025-04-24 00:13:09 +05:30
Shrey Paharia
33d2569ace fix: fixed handling for complexity report path in mcp 2025-04-23 23:39:03 +05:30
Shrey Paharia
fdbb25e185 fix: fixed next cli command handling 2025-04-23 21:25:42 +05:30
Shrey Paharia
deaf4a6ff4 fix: added handling for show cli 2025-04-23 21:11:49 +05:30
Shrey Paharia
3628acab78 fix: add complexity handling to next task in list command 2025-04-23 20:51:53 +05:30
Shrey Paharia
463de0035c fix: moved complexity report handling to list tasks rather than list tasks direct 2025-04-23 10:08:57 +05:30
Shrey Paharia
fffcc5a89d chore: add changeset 2025-04-19 13:29:35 +05:30
Shrey Paharia
11506ddc0e fix: added handling for complexity report path 2025-04-19 13:28:36 +05:30
Shrey Paharia
dcb3f2f9f9 test: fix findTaskById complexity report testcases 2025-04-19 13:24:50 +05:30
Shrey Paharia
e045a5268c feat: updated handling for findTaskById to take complexityReport as input 2025-04-19 13:09:14 +05:30
Shrey Paharia
8911bf4d49 ref: reorder imports 2025-04-19 04:24:16 +05:30
Shrey Paharia
5e5e20391a format: fixed formatting issues 2025-04-19 04:06:14 +05:30
Shrey Paharia
521cf0e5f0 chore: add changeset 2025-04-19 04:03:05 +05:30
Shrey Paharia
92bd0e4395 test: remove console dir 2025-04-19 03:45:43 +05:30
Shrey Paharia
3162ac49ec feat: added handling for complexity score in find task by id 2025-04-19 03:45:22 +05:30
Shrey Paharia
b98af1541e feat: added complexity score handling to list tasks 2025-04-19 02:17:38 +05:30
6 changed files with 11 additions and 184 deletions

View File

@@ -1,11 +0,0 @@
---
'task-master-ai': minor
---
Add Ollama as a supported AI provider.
- You can now add it by running `task-master models --setup` and selecting it.
- Ollama is a local model provider, so no API key is required.
- Ollama models are available at `http://localhost:11434/api` by default.
- You can change the default URL by setting the `OLLAMA_BASE_URL` environment variable or by adding a `baseUrl` property to the `ollama` model role in `.taskmasterconfig`.
- If you want to use a custom API key, you can set it in the `OLLAMA_API_KEY` environment variable.

View File

@@ -36,8 +36,7 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
"OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE"
}
}
}

View File

@@ -5,5 +5,4 @@ OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/Ope
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication.
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).

View File

@@ -49,9 +49,6 @@
"@anthropic-ai/sdk": "^0.39.0",
"@openrouter/ai-sdk-provider": "^0.4.5",
"ai": "^4.3.10",
"boxen": "^8.0.1",
"chalk": "^5.4.1",
"cli-table3": "^0.6.5",
"commander": "^11.1.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
@@ -68,6 +65,9 @@
"openai": "^4.89.0",
"ora": "^8.2.0",
"uuid": "^11.1.0",
"boxen": "^8.0.1",
"chalk": "^5.4.1",
"cli-table3": "^0.6.5",
"zod": "^3.23.8"
},
"engines": {

View File

@@ -25,7 +25,6 @@ import * as google from '../../src/ai-providers/google.js';
import * as openai from '../../src/ai-providers/openai.js';
import * as xai from '../../src/ai-providers/xai.js';
import * as openrouter from '../../src/ai-providers/openrouter.js';
import * as ollama from '../../src/ai-providers/ollama.js';
// TODO: Import other provider modules when implemented (ollama, etc.)
// --- Provider Function Map ---
@@ -64,11 +63,6 @@ const PROVIDER_FUNCTIONS = {
generateText: openrouter.generateOpenRouterText,
streamText: openrouter.streamOpenRouterText,
generateObject: openrouter.generateOpenRouterObject
},
ollama: {
generateText: ollama.generateOllamaText,
streamText: ollama.streamOllamaText,
generateObject: ollama.generateOllamaObject
}
// TODO: Add entries for ollama, etc. when implemented
};
@@ -156,10 +150,14 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
mistral: 'MISTRAL_API_KEY',
azure: 'AZURE_OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY',
ollama: 'OLLAMA_API_KEY'
xai: 'XAI_API_KEY'
};
// Double check this -- I have had to use an api key for ollama in the past
// if (providerName === 'ollama') {
// return null; // Ollama typically doesn't require an API key for basic setup
// }
const envVarName = keyMap[providerName];
if (!envVarName) {
throw new Error(
@@ -168,13 +166,6 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
}
const apiKey = resolveEnvVariable(envVarName, session, projectRoot);
// Special handling for Ollama - API key is optional
if (providerName === 'ollama') {
return apiKey || null;
}
// For all other providers, API key is required
if (!apiKey) {
throw new Error(
`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`

View File

@@ -1,151 +0,0 @@
/**
* ollama.js
* AI provider implementation for Ollama models using the ollama-ai-provider package.
*/
import { createOllama, ollama } from 'ollama-ai-provider';
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
import { generateObject, generateText, streamText } from 'ai';
// Consider making model configurable via config-manager.js later
const DEFAULT_MODEL = 'llama3'; // Or a suitable default for Ollama
const DEFAULT_TEMPERATURE = 0.2;
function getClient(baseUrl) {
// baseUrl is optional, defaults to http://localhost:11434
return createOllama({
baseUrl: baseUrl || undefined
});
}
/**
* Generates text using an Ollama model.
*
* @param {object} params - Parameters for the generation.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history (system/user prompts).
* @param {number} [params.maxTokens] - Optional max tokens.
* @param {string} [params.baseUrl] - Optional Ollama base URL.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If API call fails.
*/
async function generateOllamaText({
modelId = DEFAULT_MODEL,
messages,
maxTokens,
temperature = DEFAULT_TEMPERATURE,
baseUrl
}) {
log('info', `Generating text with Ollama model: ${modelId}`);
try {
const client = getClient(baseUrl);
const result = await generateText({
model: client(modelId),
messages,
maxTokens,
temperature
});
log('debug', `Ollama generated text: ${result.text}`);
return result.text;
} catch (error) {
log(
'error',
`Error generating text with Ollama (${modelId}): ${error.message}`
);
throw error;
}
}
/**
* Streams text using an Ollama model.
*
* @param {object} params - Parameters for the streaming.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history.
* @param {number} [params.maxTokens] - Optional max tokens.
* @param {string} [params.baseUrl] - Optional Ollama base URL.
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
* @throws {Error} If API call fails.
*/
async function streamOllamaText({
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
maxTokens,
baseUrl
}) {
log('info', `Streaming text with Ollama model: ${modelId}`);
try {
const ollama = getClient(baseUrl);
const stream = await streamText({
model: modelId,
messages,
temperature,
maxTokens
});
return stream;
} catch (error) {
log(
'error',
`Error streaming text with Ollama (${modelId}): ${error.message}`
);
throw error;
}
}
/**
* Generates a structured object using an Ollama model using the Vercel AI SDK's generateObject.
*
* @param {object} params - Parameters for the object generation.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history.
* @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object.
* @param {string} params.objectName - Name for the object generation context.
* @param {number} [params.maxTokens] - Optional max tokens.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - Optional Ollama base URL.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails.
*/
async function generateOllamaObject({
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
schema,
objectName = 'generated_object',
maxTokens,
maxRetries = 3,
baseUrl
}) {
log('info', `Generating object with Ollama model: ${modelId}`);
try {
const ollama = getClient(baseUrl);
const result = await generateObject({
model: ollama(modelId),
mode: 'tool',
schema: schema,
messages: messages,
tool: {
name: objectName,
description: `Generate a ${objectName} based on the prompt.`
},
maxOutputTokens: maxTokens,
temperature: temperature,
maxRetries: maxRetries
});
return result.object;
} catch (error) {
log(
'error',
`Ollama generateObject ('${objectName}') failed: ${error.message}`
);
throw error;
}
}
export { generateOllamaText, streamOllamaText, generateOllamaObject };