Compare commits
5 Commits
telemetry
...
fix/ollama
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db6fdebdcd | ||
|
|
60b8e97a1c | ||
|
|
3a6d6dd671 | ||
|
|
f4a83ec047 | ||
|
|
0699f64299 |
11
.changeset/free-bikes-smile.md
Normal file
11
.changeset/free-bikes-smile.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Add Ollama as a supported AI provider.
|
||||
|
||||
- You can now add it by running `task-master models --setup` and selecting it.
|
||||
- Ollama is a local model provider, so no API key is required.
|
||||
- Ollama models are available at `http://localhost:11434/api` by default.
|
||||
- You can change the default URL by setting the `OLLAMA_BASE_URL` environment variable or by adding a `baseUrl` property to the `ollama` model role in `.taskmasterconfig`.
|
||||
- If you want to use a custom API key, you can set it in the `OLLAMA_API_KEY` environment variable.
|
||||
@@ -2,18 +2,21 @@
|
||||
"mode": "exit",
|
||||
"tag": "rc",
|
||||
"initialVersions": {
|
||||
"task-master-ai": "0.13.2"
|
||||
"task-master-ai": "0.14.0-rc.0"
|
||||
},
|
||||
"changesets": [
|
||||
"beige-doodles-type",
|
||||
"floppy-plants-marry",
|
||||
"forty-plums-stay",
|
||||
"free-bikes-smile",
|
||||
"many-wasps-sell",
|
||||
"nice-lies-cover",
|
||||
"red-oranges-attend",
|
||||
"red-suns-wash",
|
||||
"sharp-dingos-melt",
|
||||
"six-cloths-happen",
|
||||
"slow-singers-swim",
|
||||
"small-toys-fly",
|
||||
"social-masks-fold",
|
||||
"soft-zoos-flow",
|
||||
"ten-ways-mate",
|
||||
|
||||
18
CHANGELOG.md
18
CHANGELOG.md
@@ -1,5 +1,23 @@
|
||||
# task-master-ai
|
||||
|
||||
## 0.14.0-rc.1
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- [#536](https://github.com/eyaltoledano/claude-task-master/pull/536) [`f4a83ec`](https://github.com/eyaltoledano/claude-task-master/commit/f4a83ec047b057196833e3a9b861d4bceaec805d) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add Ollama as a supported AI provider.
|
||||
|
||||
- You can now add it by running `task-master models --setup` and selecting it.
|
||||
- Ollama is a local model provider, so no API key is required.
|
||||
- Ollama models are available at `http://localhost:11434/api` by default.
|
||||
- You can change the default URL by setting the `OLLAMA_BASE_URL` environment variable or by adding a `baseUrl` property to the `ollama` model role in `.taskmasterconfig`.
|
||||
- If you want to use a custom API key, you can set it in the `OLLAMA_API_KEY` environment variable.
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#442](https://github.com/eyaltoledano/claude-task-master/pull/442) [`2b3ae8b`](https://github.com/eyaltoledano/claude-task-master/commit/2b3ae8bf89dc471c4ce92f3a12ded57f61faa449) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds costs information to AI commands using input/output tokens and model costs.
|
||||
|
||||
- [#442](https://github.com/eyaltoledano/claude-task-master/pull/442) [`0288311`](https://github.com/eyaltoledano/claude-task-master/commit/0288311965ae2a343ebee4a0c710dde94d2ae7e7) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Small fixes - `next` command no longer incorrectly suggests that subtasks be broken down into subtasks in the CLI - fixes the `append` flag so it properly works in the CLI
|
||||
|
||||
## 0.14.0-rc.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
@@ -48,7 +48,8 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
|
||||
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
|
||||
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
|
||||
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
|
||||
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE"
|
||||
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
|
||||
"OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,3 +6,4 @@ GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gem
|
||||
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
||||
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
||||
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
||||
OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication.
|
||||
@@ -39,7 +39,7 @@
|
||||
{
|
||||
"slug": "debug",
|
||||
"name": "Debug",
|
||||
"roleDefinition": "You are Roo, an expert software debugger specializing in systematic problem diagnosis and resolution. When activated by another mdode, your task is to meticulously analyze the provided debugging request (potentially referencing Taskmaster tasks, logs, or metrics), use diagnostic tools as instructed to investigate the issue, identify the root cause, and report your findings and recommended next steps back via `attempt_completion`. You focus solely on diagnostics within the scope defined by the delegated task.",
|
||||
"roleDefinition": "You are Roo, an expert software debugger specializing in systematic problem diagnosis and resolution. When activated by another mode, your task is to meticulously analyze the provided debugging request (potentially referencing Taskmaster tasks, logs, or metrics), use diagnostic tools as instructed to investigate the issue, identify the root cause, and report your findings and recommended next steps back via `attempt_completion`. You focus solely on diagnostics within the scope defined by the delegated task.",
|
||||
"customInstructions": "Reflect on 5-7 different possible sources of the problem, distill those down to 1-2 most likely sources, and then add logs to validate your assumptions. Explicitly ask the user to confirm the diagnosis before fixing the problem.",
|
||||
"groups": [
|
||||
"read",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.14.0-rc.0",
|
||||
"version": "0.14.0-rc.1",
|
||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
@@ -49,6 +49,9 @@
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||
"ai": "^4.3.10",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^5.4.1",
|
||||
"cli-table3": "^0.6.5",
|
||||
"commander": "^11.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
@@ -65,9 +68,6 @@
|
||||
"openai": "^4.89.0",
|
||||
"ora": "^8.2.0",
|
||||
"uuid": "^11.1.0",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^5.4.1",
|
||||
"cli-table3": "^0.6.5",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
@@ -28,6 +28,7 @@ import * as google from '../../src/ai-providers/google.js';
|
||||
import * as openai from '../../src/ai-providers/openai.js';
|
||||
import * as xai from '../../src/ai-providers/xai.js';
|
||||
import * as openrouter from '../../src/ai-providers/openrouter.js';
|
||||
import * as ollama from '../../src/ai-providers/ollama.js';
|
||||
// TODO: Import other provider modules when implemented (ollama, etc.)
|
||||
|
||||
// Helper function to get cost for a specific model
|
||||
@@ -96,6 +97,11 @@ const PROVIDER_FUNCTIONS = {
|
||||
generateText: openrouter.generateOpenRouterText,
|
||||
streamText: openrouter.streamOpenRouterText,
|
||||
generateObject: openrouter.generateOpenRouterObject
|
||||
},
|
||||
ollama: {
|
||||
generateText: ollama.generateOllamaText,
|
||||
streamText: ollama.streamOllamaText,
|
||||
generateObject: ollama.generateOllamaObject
|
||||
}
|
||||
// TODO: Add entries for ollama, etc. when implemented
|
||||
};
|
||||
@@ -183,14 +189,10 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
mistral: 'MISTRAL_API_KEY',
|
||||
azure: 'AZURE_OPENAI_API_KEY',
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
xai: 'XAI_API_KEY'
|
||||
xai: 'XAI_API_KEY',
|
||||
ollama: 'OLLAMA_API_KEY'
|
||||
};
|
||||
|
||||
// Double check this -- I have had to use an api key for ollama in the past
|
||||
// if (providerName === 'ollama') {
|
||||
// return null; // Ollama typically doesn't require an API key for basic setup
|
||||
// }
|
||||
|
||||
const envVarName = keyMap[providerName];
|
||||
if (!envVarName) {
|
||||
throw new Error(
|
||||
@@ -199,6 +201,13 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
}
|
||||
|
||||
const apiKey = resolveEnvVariable(envVarName, session, projectRoot);
|
||||
|
||||
// Special handling for Ollama - API key is optional
|
||||
if (providerName === 'ollama') {
|
||||
return apiKey || null;
|
||||
}
|
||||
|
||||
// For all other providers, API key is required
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`
|
||||
|
||||
163
src/ai-providers/ollama.js
Normal file
163
src/ai-providers/ollama.js
Normal file
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* ollama.js
|
||||
* AI provider implementation for Ollama models using the ollama-ai-provider package.
|
||||
*/
|
||||
|
||||
import { createOllama } from 'ollama-ai-provider';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
||||
import { generateObject, generateText, streamText } from 'ai';
|
||||
|
||||
// Consider making model configurable via config-manager.js later
|
||||
const DEFAULT_MODEL = 'llama3'; // Or a suitable default for Ollama
|
||||
const DEFAULT_TEMPERATURE = 0.2;
|
||||
|
||||
function getClient(baseUrl) {
|
||||
// baseUrl is optional, defaults to http://localhost:11434
|
||||
return createOllama({
|
||||
baseUrl: baseUrl || undefined
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using an Ollama model.
|
||||
*
|
||||
* @param {object} params - Parameters for the generation.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history (system/user prompts).
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
async function generateOllamaText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Generating text with Ollama model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const client = getClient(baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
});
|
||||
log('debug', `Ollama generated text: ${result.text}`);
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error generating text with Ollama (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using an Ollama model.
|
||||
*
|
||||
* @param {object} params - Parameters for the streaming.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
async function streamOllamaText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Streaming text with Ollama model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const ollama = getClient(baseUrl);
|
||||
const stream = await streamText({
|
||||
model: modelId,
|
||||
messages,
|
||||
temperature,
|
||||
maxTokens
|
||||
});
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error streaming text with Ollama (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using an Ollama model using the Vercel AI SDK's generateObject.
|
||||
*
|
||||
* @param {object} params - Parameters for the object generation.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object.
|
||||
* @param {string} params.objectName - Name for the object generation context.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
async function generateOllamaObject({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
schema,
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Generating object with Ollama model: ${modelId}`);
|
||||
try {
|
||||
const ollama = getClient(baseUrl);
|
||||
const result = await generateObject({
|
||||
model: ollama(modelId),
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
tool: {
|
||||
name: objectName,
|
||||
description: `Generate a ${objectName} based on the prompt.`
|
||||
},
|
||||
maxOutputTokens: maxTokens,
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Ollama generateObject ('${objectName}') failed: ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export { generateOllamaText, streamOllamaText, generateOllamaObject };
|
||||
Reference in New Issue
Block a user