From 9ec3733d22e628b8c57206223b3fd700391ee900 Mon Sep 17 00:00:00 2001 From: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> Date: Sat, 17 May 2025 23:48:21 +0200 Subject: [PATCH] feat: add ollama support --- .changeset/free-bikes-smile.md | 11 ++ README.md | 3 +- assets/env.example | 3 +- package.json | 6 +- scripts/modules/ai-services-unified.js | 21 +++- src/ai-providers/ollama.js | 151 +++++++++++++++++++++++++ 6 files changed, 184 insertions(+), 11 deletions(-) create mode 100644 .changeset/free-bikes-smile.md create mode 100644 src/ai-providers/ollama.js diff --git a/.changeset/free-bikes-smile.md b/.changeset/free-bikes-smile.md new file mode 100644 index 00000000..669437e6 --- /dev/null +++ b/.changeset/free-bikes-smile.md @@ -0,0 +1,11 @@ +--- +'task-master-ai': minor +--- + +Add Ollama as a supported AI provider. + +- You can now add it by running `task-master models --setup` and selecting it. +- Ollama is a local model provider, so no API key is required. +- Ollama models are available at `http://localhost:11434/api` by default. +- You can change the default URL by setting the `OLLAMA_BASE_URL` environment variable or by adding a `baseUrl` property to the `ollama` model role in `.taskmasterconfig`. + - If you want to use a custom API key, you can set it in the `OLLAMA_API_KEY` environment variable. diff --git a/README.md b/README.md index 2949f682..c95e208d 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,8 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE", "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE", "XAI_API_KEY": "YOUR_XAI_KEY_HERE", - "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE" + "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE", + "OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE" } } } diff --git a/assets/env.example b/assets/env.example index 1c9b41e4..c7142879 100644 --- a/assets/env.example +++ b/assets/env.example @@ -5,4 +5,5 @@ OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/Ope GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models. MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models. XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models. -AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). \ No newline at end of file +AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). +OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication. \ No newline at end of file diff --git a/package.json b/package.json index 039bcf41..5e39e4c1 100644 --- a/package.json +++ b/package.json @@ -49,6 +49,9 @@ "@anthropic-ai/sdk": "^0.39.0", "@openrouter/ai-sdk-provider": "^0.4.5", "ai": "^4.3.10", + "boxen": "^8.0.1", + "chalk": "^5.4.1", + "cli-table3": "^0.6.5", "commander": "^11.1.0", "cors": "^2.8.5", "dotenv": "^16.3.1", @@ -65,9 +68,6 @@ "openai": "^4.89.0", "ora": "^8.2.0", "uuid": "^11.1.0", - "boxen": "^8.0.1", - "chalk": "^5.4.1", - "cli-table3": "^0.6.5", "zod": "^3.23.8" }, "engines": { diff --git a/scripts/modules/ai-services-unified.js b/scripts/modules/ai-services-unified.js index da958986..359c6219 100644 --- a/scripts/modules/ai-services-unified.js +++ b/scripts/modules/ai-services-unified.js @@ -25,6 +25,7 @@ import * as google from '../../src/ai-providers/google.js'; import * as openai from '../../src/ai-providers/openai.js'; import * as xai from '../../src/ai-providers/xai.js'; import * as openrouter from '../../src/ai-providers/openrouter.js'; +import * as ollama from '../../src/ai-providers/ollama.js'; // TODO: Import other provider modules when implemented (ollama, etc.) // --- Provider Function Map --- @@ -63,6 +64,11 @@ const PROVIDER_FUNCTIONS = { generateText: openrouter.generateOpenRouterText, streamText: openrouter.streamOpenRouterText, generateObject: openrouter.generateOpenRouterObject + }, + ollama: { + generateText: ollama.generateOllamaText, + streamText: ollama.streamOllamaText, + generateObject: ollama.generateOllamaObject } // TODO: Add entries for ollama, etc. when implemented }; @@ -150,14 +156,10 @@ function _resolveApiKey(providerName, session, projectRoot = null) { mistral: 'MISTRAL_API_KEY', azure: 'AZURE_OPENAI_API_KEY', openrouter: 'OPENROUTER_API_KEY', - xai: 'XAI_API_KEY' + xai: 'XAI_API_KEY', + ollama: 'OLLAMA_API_KEY' }; - // Double check this -- I have had to use an api key for ollama in the past - // if (providerName === 'ollama') { - // return null; // Ollama typically doesn't require an API key for basic setup - // } - const envVarName = keyMap[providerName]; if (!envVarName) { throw new Error( @@ -166,6 +168,13 @@ function _resolveApiKey(providerName, session, projectRoot = null) { } const apiKey = resolveEnvVariable(envVarName, session, projectRoot); + + // Special handling for Ollama - API key is optional + if (providerName === 'ollama') { + return apiKey || null; + } + + // For all other providers, API key is required if (!apiKey) { throw new Error( `Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.` diff --git a/src/ai-providers/ollama.js b/src/ai-providers/ollama.js new file mode 100644 index 00000000..28779c48 --- /dev/null +++ b/src/ai-providers/ollama.js @@ -0,0 +1,151 @@ +/** + * ollama.js + * AI provider implementation for Ollama models using the ollama-ai-provider package. + */ + +import { createOllama, ollama } from 'ollama-ai-provider'; +import { log } from '../../scripts/modules/utils.js'; // Import logging utility +import { generateObject, generateText, streamText } from 'ai'; + +// Consider making model configurable via config-manager.js later +const DEFAULT_MODEL = 'llama3'; // Or a suitable default for Ollama +const DEFAULT_TEMPERATURE = 0.2; + +function getClient(baseUrl) { + // baseUrl is optional, defaults to http://localhost:11434 + return createOllama({ + baseUrl: baseUrl || undefined + }); +} + +/** + * Generates text using an Ollama model. + * + * @param {object} params - Parameters for the generation. + * @param {string} params.modelId - Specific model ID to use (overrides default). + * @param {number} params.temperature - Generation temperature. + * @param {Array} params.messages - The conversation history (system/user prompts). + * @param {number} [params.maxTokens] - Optional max tokens. + * @param {string} [params.baseUrl] - Optional Ollama base URL. + * @returns {Promise} The generated text content. + * @throws {Error} If API call fails. + */ +async function generateOllamaText({ + modelId = DEFAULT_MODEL, + messages, + maxTokens, + temperature = DEFAULT_TEMPERATURE, + baseUrl +}) { + log('info', `Generating text with Ollama model: ${modelId}`); + + try { + const client = getClient(baseUrl); + const result = await generateText({ + model: client(modelId), + messages, + maxTokens, + temperature + }); + log('debug', `Ollama generated text: ${result.text}`); + return result.text; + } catch (error) { + log( + 'error', + `Error generating text with Ollama (${modelId}): ${error.message}` + ); + throw error; + } +} + +/** + * Streams text using an Ollama model. + * + * @param {object} params - Parameters for the streaming. + * @param {string} params.modelId - Specific model ID to use (overrides default). + * @param {number} params.temperature - Generation temperature. + * @param {Array} params.messages - The conversation history. + * @param {number} [params.maxTokens] - Optional max tokens. + * @param {string} [params.baseUrl] - Optional Ollama base URL. + * @returns {Promise} A readable stream of text deltas. + * @throws {Error} If API call fails. + */ +async function streamOllamaText({ + modelId = DEFAULT_MODEL, + temperature = DEFAULT_TEMPERATURE, + messages, + maxTokens, + baseUrl +}) { + log('info', `Streaming text with Ollama model: ${modelId}`); + + try { + const ollama = getClient(baseUrl); + const stream = await streamText({ + model: modelId, + messages, + temperature, + maxTokens + }); + return stream; + } catch (error) { + log( + 'error', + `Error streaming text with Ollama (${modelId}): ${error.message}` + ); + throw error; + } +} + +/** + * Generates a structured object using an Ollama model using the Vercel AI SDK's generateObject. + * + * @param {object} params - Parameters for the object generation. + * @param {string} params.modelId - Specific model ID to use (overrides default). + * @param {number} params.temperature - Generation temperature. + * @param {Array} params.messages - The conversation history. + * @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object. + * @param {string} params.objectName - Name for the object generation context. + * @param {number} [params.maxTokens] - Optional max tokens. + * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @param {string} [params.baseUrl] - Optional Ollama base URL. + * @returns {Promise} The generated object matching the schema. + * @throws {Error} If generation or validation fails. + */ +async function generateOllamaObject({ + modelId = DEFAULT_MODEL, + temperature = DEFAULT_TEMPERATURE, + messages, + schema, + objectName = 'generated_object', + maxTokens, + maxRetries = 3, + baseUrl +}) { + log('info', `Generating object with Ollama model: ${modelId}`); + try { + const ollama = getClient(baseUrl); + const result = await generateObject({ + model: ollama(modelId), + mode: 'tool', + schema: schema, + messages: messages, + tool: { + name: objectName, + description: `Generate a ${objectName} based on the prompt.` + }, + maxOutputTokens: maxTokens, + temperature: temperature, + maxRetries: maxRetries + }); + return result.object; + } catch (error) { + log( + 'error', + `Ollama generateObject ('${objectName}') failed: ${error.message}` + ); + throw error; + } +} + +export { generateOllamaText, streamOllamaText, generateOllamaObject };