Compare commits
1 Commits
feat/imple
...
crunchyman
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ec3733d22 |
11
.changeset/free-bikes-smile.md
Normal file
11
.changeset/free-bikes-smile.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Add Ollama as a supported AI provider.
|
||||
|
||||
- You can now add it by running `task-master models --setup` and selecting it.
|
||||
- Ollama is a local model provider, so no API key is required.
|
||||
- Ollama models are available at `http://localhost:11434/api` by default.
|
||||
- You can change the default URL by setting the `OLLAMA_BASE_URL` environment variable or by adding a `baseUrl` property to the `ollama` model role in `.taskmasterconfig`.
|
||||
- If you want to use a custom API key, you can set it in the `OLLAMA_API_KEY` environment variable.
|
||||
@@ -36,7 +36,8 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
|
||||
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
|
||||
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
|
||||
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
|
||||
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE"
|
||||
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
|
||||
"OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,3 +6,4 @@ GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gem
|
||||
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
||||
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
||||
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
||||
OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication.
|
||||
@@ -49,6 +49,9 @@
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||
"ai": "^4.3.10",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^5.4.1",
|
||||
"cli-table3": "^0.6.5",
|
||||
"commander": "^11.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
@@ -65,9 +68,6 @@
|
||||
"openai": "^4.89.0",
|
||||
"ora": "^8.2.0",
|
||||
"uuid": "^11.1.0",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^5.4.1",
|
||||
"cli-table3": "^0.6.5",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
@@ -25,6 +25,7 @@ import * as google from '../../src/ai-providers/google.js';
|
||||
import * as openai from '../../src/ai-providers/openai.js';
|
||||
import * as xai from '../../src/ai-providers/xai.js';
|
||||
import * as openrouter from '../../src/ai-providers/openrouter.js';
|
||||
import * as ollama from '../../src/ai-providers/ollama.js';
|
||||
// TODO: Import other provider modules when implemented (ollama, etc.)
|
||||
|
||||
// --- Provider Function Map ---
|
||||
@@ -63,6 +64,11 @@ const PROVIDER_FUNCTIONS = {
|
||||
generateText: openrouter.generateOpenRouterText,
|
||||
streamText: openrouter.streamOpenRouterText,
|
||||
generateObject: openrouter.generateOpenRouterObject
|
||||
},
|
||||
ollama: {
|
||||
generateText: ollama.generateOllamaText,
|
||||
streamText: ollama.streamOllamaText,
|
||||
generateObject: ollama.generateOllamaObject
|
||||
}
|
||||
// TODO: Add entries for ollama, etc. when implemented
|
||||
};
|
||||
@@ -150,14 +156,10 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
mistral: 'MISTRAL_API_KEY',
|
||||
azure: 'AZURE_OPENAI_API_KEY',
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
xai: 'XAI_API_KEY'
|
||||
xai: 'XAI_API_KEY',
|
||||
ollama: 'OLLAMA_API_KEY'
|
||||
};
|
||||
|
||||
// Double check this -- I have had to use an api key for ollama in the past
|
||||
// if (providerName === 'ollama') {
|
||||
// return null; // Ollama typically doesn't require an API key for basic setup
|
||||
// }
|
||||
|
||||
const envVarName = keyMap[providerName];
|
||||
if (!envVarName) {
|
||||
throw new Error(
|
||||
@@ -166,6 +168,13 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
}
|
||||
|
||||
const apiKey = resolveEnvVariable(envVarName, session, projectRoot);
|
||||
|
||||
// Special handling for Ollama - API key is optional
|
||||
if (providerName === 'ollama') {
|
||||
return apiKey || null;
|
||||
}
|
||||
|
||||
// For all other providers, API key is required
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`
|
||||
|
||||
151
src/ai-providers/ollama.js
Normal file
151
src/ai-providers/ollama.js
Normal file
@@ -0,0 +1,151 @@
|
||||
/**
|
||||
* ollama.js
|
||||
* AI provider implementation for Ollama models using the ollama-ai-provider package.
|
||||
*/
|
||||
|
||||
import { createOllama, ollama } from 'ollama-ai-provider';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
||||
import { generateObject, generateText, streamText } from 'ai';
|
||||
|
||||
// Consider making model configurable via config-manager.js later
|
||||
const DEFAULT_MODEL = 'llama3'; // Or a suitable default for Ollama
|
||||
const DEFAULT_TEMPERATURE = 0.2;
|
||||
|
||||
function getClient(baseUrl) {
|
||||
// baseUrl is optional, defaults to http://localhost:11434
|
||||
return createOllama({
|
||||
baseUrl: baseUrl || undefined
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using an Ollama model.
|
||||
*
|
||||
* @param {object} params - Parameters for the generation.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history (system/user prompts).
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
async function generateOllamaText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Generating text with Ollama model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const client = getClient(baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
});
|
||||
log('debug', `Ollama generated text: ${result.text}`);
|
||||
return result.text;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error generating text with Ollama (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using an Ollama model.
|
||||
*
|
||||
* @param {object} params - Parameters for the streaming.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
async function streamOllamaText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Streaming text with Ollama model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const ollama = getClient(baseUrl);
|
||||
const stream = await streamText({
|
||||
model: modelId,
|
||||
messages,
|
||||
temperature,
|
||||
maxTokens
|
||||
});
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error streaming text with Ollama (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using an Ollama model using the Vercel AI SDK's generateObject.
|
||||
*
|
||||
* @param {object} params - Parameters for the object generation.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object.
|
||||
* @param {string} params.objectName - Name for the object generation context.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
async function generateOllamaObject({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
schema,
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Generating object with Ollama model: ${modelId}`);
|
||||
try {
|
||||
const ollama = getClient(baseUrl);
|
||||
const result = await generateObject({
|
||||
model: ollama(modelId),
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
tool: {
|
||||
name: objectName,
|
||||
description: `Generate a ${objectName} based on the prompt.`
|
||||
},
|
||||
maxOutputTokens: maxTokens,
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
return result.object;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Ollama generateObject ('${objectName}') failed: ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export { generateOllamaText, streamOllamaText, generateOllamaObject };
|
||||
Reference in New Issue
Block a user