Merge branch 'next' of github.com:eyaltoledano/claude-task-master into v017-adds

This commit is contained in:
Eyal Toledano
2025-05-28 10:08:14 -04:00
62 changed files with 23532 additions and 15326 deletions

View File

@@ -19,18 +19,41 @@ import {
MODEL_MAP,
getDebugFlag,
getBaseUrlForRole,
isApiKeySet
isApiKeySet,
getOllamaBaseURL,
getAzureBaseURL,
getVertexProjectId,
getVertexLocation
} from './config-manager.js';
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
import * as anthropic from '../../src/ai-providers/anthropic.js';
import * as perplexity from '../../src/ai-providers/perplexity.js';
import * as google from '../../src/ai-providers/google.js';
import * as openai from '../../src/ai-providers/openai.js';
import * as xai from '../../src/ai-providers/xai.js';
import * as openrouter from '../../src/ai-providers/openrouter.js';
import * as ollama from '../../src/ai-providers/ollama.js';
// TODO: Import other provider modules when implemented (ollama, etc.)
// Import provider classes
import {
AnthropicAIProvider,
PerplexityAIProvider,
GoogleAIProvider,
OpenAIProvider,
XAIProvider,
OpenRouterAIProvider,
OllamaAIProvider,
BedrockAIProvider,
AzureProvider,
VertexAIProvider
} from '../../src/ai-providers/index.js';
// Create provider instances
const PROVIDERS = {
anthropic: new AnthropicAIProvider(),
perplexity: new PerplexityAIProvider(),
google: new GoogleAIProvider(),
openai: new OpenAIProvider(),
xai: new XAIProvider(),
openrouter: new OpenRouterAIProvider(),
ollama: new OllamaAIProvider(),
bedrock: new BedrockAIProvider(),
azure: new AzureProvider(),
vertex: new VertexAIProvider()
};
// Helper function to get cost for a specific model
function _getCostForModel(providerName, modelId) {
@@ -62,51 +85,6 @@ function _getCostForModel(providerName, modelId) {
};
}
// --- Provider Function Map ---
// Maps provider names (lowercase) to their respective service functions
const PROVIDER_FUNCTIONS = {
anthropic: {
generateText: anthropic.generateAnthropicText,
streamText: anthropic.streamAnthropicText,
generateObject: anthropic.generateAnthropicObject
},
perplexity: {
generateText: perplexity.generatePerplexityText,
streamText: perplexity.streamPerplexityText,
generateObject: perplexity.generatePerplexityObject
},
google: {
// Add Google entry
generateText: google.generateGoogleText,
streamText: google.streamGoogleText,
generateObject: google.generateGoogleObject
},
openai: {
// ADD: OpenAI entry
generateText: openai.generateOpenAIText,
streamText: openai.streamOpenAIText,
generateObject: openai.generateOpenAIObject
},
xai: {
// ADD: xAI entry
generateText: xai.generateXaiText,
streamText: xai.streamXaiText,
generateObject: xai.generateXaiObject // Note: Object generation might be unsupported
},
openrouter: {
// ADD: OpenRouter entry
generateText: openrouter.generateOpenRouterText,
streamText: openrouter.streamOpenRouterText,
generateObject: openrouter.generateOpenRouterObject
},
ollama: {
generateText: ollama.generateOllamaText,
streamText: ollama.streamOllamaText,
generateObject: ollama.generateOllamaObject
}
// TODO: Add entries for ollama, etc. when implemented
};
// --- Configuration for Retries ---
const MAX_RETRIES = 2;
const INITIAL_RETRY_DELAY_MS = 1000;
@@ -191,7 +169,9 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
azure: 'AZURE_OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY',
ollama: 'OLLAMA_API_KEY'
ollama: 'OLLAMA_API_KEY',
bedrock: 'AWS_ACCESS_KEY_ID',
vertex: 'GOOGLE_API_KEY'
};
const envVarName = keyMap[providerName];
@@ -203,12 +183,11 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
const apiKey = resolveEnvVariable(envVarName, session, projectRoot);
// Special handling for Ollama - API key is optional
if (providerName === 'ollama') {
// Special handling for providers that can use alternative auth
if (providerName === 'ollama' || providerName === 'bedrock') {
return apiKey || null;
}
// For all other providers, API key is required
if (!apiKey) {
throw new Error(
`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`
@@ -229,14 +208,15 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
* @throws {Error} If the call fails after all retries.
*/
async function _attemptProviderCallWithRetries(
providerApiFn,
provider,
serviceType,
callParams,
providerName,
modelId,
attemptRole
) {
let retries = 0;
const fnName = providerApiFn.name;
const fnName = serviceType;
while (retries <= MAX_RETRIES) {
try {
@@ -247,8 +227,8 @@ async function _attemptProviderCallWithRetries(
);
}
// Call the specific provider function directly
const result = await providerApiFn(callParams);
// Call the appropriate method on the provider instance
const result = await provider[serviceType](callParams);
if (getDebugFlag()) {
log(
@@ -350,9 +330,8 @@ async function _unifiedServiceRunner(serviceType, params) {
modelId,
apiKey,
roleParams,
providerFnSet,
providerApiFn,
baseUrl,
provider,
baseURL,
providerResponse,
telemetryData = null;
@@ -391,7 +370,20 @@ async function _unifiedServiceRunner(serviceType, params) {
continue;
}
// Check if API key is set for the current provider and role (excluding 'ollama')
// Get provider instance
provider = PROVIDERS[providerName?.toLowerCase()];
if (!provider) {
log(
'warn',
`Skipping role '${currentRole}': Provider '${providerName}' not supported.`
);
lastError =
lastError ||
new Error(`Unsupported provider configured: ${providerName}`);
continue;
}
// Check API key if needed
if (providerName?.toLowerCase() !== 'ollama') {
if (!isApiKeySet(providerName, session, effectiveProjectRoot)) {
log(
@@ -407,40 +399,70 @@ async function _unifiedServiceRunner(serviceType, params) {
}
}
// Get base URL if configured (optional for most providers)
baseURL = getBaseUrlForRole(currentRole, effectiveProjectRoot);
// For Azure, use the global Azure base URL if role-specific URL is not configured
if (providerName?.toLowerCase() === 'azure' && !baseURL) {
baseURL = getAzureBaseURL(effectiveProjectRoot);
log('debug', `Using global Azure base URL: ${baseURL}`);
} else if (providerName?.toLowerCase() === 'ollama' && !baseURL) {
// For Ollama, use the global Ollama base URL if role-specific URL is not configured
baseURL = getOllamaBaseURL(effectiveProjectRoot);
log('debug', `Using global Ollama base URL: ${baseURL}`);
}
// Get AI parameters for the current role
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
if (!providerFnSet) {
log(
'warn',
`Skipping role '${currentRole}': Provider '${providerName}' not supported or map entry missing.`
);
lastError =
lastError ||
new Error(`Unsupported provider configured: ${providerName}`);
continue;
}
providerApiFn = providerFnSet[serviceType];
if (typeof providerApiFn !== 'function') {
log(
'warn',
`Skipping role '${currentRole}': Service type '${serviceType}' not implemented for provider '${providerName}'.`
);
lastError =
lastError ||
new Error(
`Service '${serviceType}' not implemented for provider ${providerName}`
);
continue;
}
apiKey = _resolveApiKey(
providerName?.toLowerCase(),
session,
effectiveProjectRoot
);
// Prepare provider-specific configuration
let providerSpecificParams = {};
// Handle Vertex AI specific configuration
if (providerName?.toLowerCase() === 'vertex') {
// Get Vertex project ID and location
const projectId =
getVertexProjectId(effectiveProjectRoot) ||
resolveEnvVariable(
'VERTEX_PROJECT_ID',
session,
effectiveProjectRoot
);
const location =
getVertexLocation(effectiveProjectRoot) ||
resolveEnvVariable(
'VERTEX_LOCATION',
session,
effectiveProjectRoot
) ||
'us-central1';
// Get credentials path if available
const credentialsPath = resolveEnvVariable(
'GOOGLE_APPLICATION_CREDENTIALS',
session,
effectiveProjectRoot
);
// Add Vertex-specific parameters
providerSpecificParams = {
projectId,
location,
...(credentialsPath && { credentials: { credentialsFromEnv: true } })
};
log(
'debug',
`Using Vertex AI configuration: Project ID=${projectId}, Location=${location}`
);
}
const messages = [];
if (systemPrompt) {
messages.push({ role: 'system', content: systemPrompt });
@@ -476,13 +498,15 @@ async function _unifiedServiceRunner(serviceType, params) {
maxTokens: roleParams.maxTokens,
temperature: roleParams.temperature,
messages,
baseUrl,
...(baseURL && { baseURL }),
...(serviceType === 'generateObject' && { schema, objectName }),
...providerSpecificParams,
...restApiParams
};
providerResponse = await _attemptProviderCallWithRetries(
providerApiFn,
provider,
serviceType,
callParams,
providerName,
modelId,

View File

@@ -13,7 +13,7 @@ import http from 'http';
import inquirer from 'inquirer';
import ora from 'ora'; // Import ora
import { log, readJSON } from './utils.js';
import { log, readJSON, findProjectRoot } from './utils.js';
import {
parsePRD,
updateTasks,
@@ -77,7 +77,6 @@ import {
setModel,
getApiKeyStatusReport
} from './task-manager/models.js';
import { findProjectRoot } from './utils.js';
import {
isValidTaskStatus,
TASK_STATUS_OPTIONS
@@ -157,11 +156,11 @@ async function runInteractiveSetup(projectRoot) {
}
// Helper function to fetch Ollama models (duplicated for CLI context)
function fetchOllamaModelsCLI(baseUrl = 'http://localhost:11434/api') {
function fetchOllamaModelsCLI(baseURL = 'http://localhost:11434/api') {
return new Promise((resolve) => {
try {
// Parse the base URL to extract hostname, port, and base path
const url = new URL(baseUrl);
const url = new URL(baseURL);
const isHttps = url.protocol === 'https:';
const port = url.port || (isHttps ? 443 : 80);
const basePath = url.pathname.endsWith('/')
@@ -246,6 +245,11 @@ async function runInteractiveSetup(projectRoot) {
value: '__CUSTOM_OLLAMA__'
};
const customBedrockOption = {
name: '* Custom Bedrock model', // Add Bedrock custom option
value: '__CUSTOM_BEDROCK__'
};
let choices = [];
let defaultIndex = 0; // Default to 'Cancel'
@@ -292,6 +296,7 @@ async function runInteractiveSetup(projectRoot) {
commonPrefix.push(cancelOption);
commonPrefix.push(customOpenRouterOption);
commonPrefix.push(customOllamaOption);
commonPrefix.push(customBedrockOption);
let prefixLength = commonPrefix.length; // Initial prefix length
@@ -438,13 +443,13 @@ async function runInteractiveSetup(projectRoot) {
modelIdToSet = customId;
providerHint = 'ollama';
// Get the Ollama base URL from config for this role
const ollamaBaseUrl = getBaseUrlForRole(role, projectRoot);
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
// Validate against live Ollama list
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseUrl);
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);
if (ollamaModels === null) {
console.error(
chalk.red(
`Error: Unable to connect to Ollama server at ${ollamaBaseUrl}. Please ensure Ollama is running and try again.`
`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
)
);
setupSuccess = false;
@@ -457,12 +462,47 @@ async function runInteractiveSetup(projectRoot) {
);
console.log(
chalk.yellow(
`You can check available models with: curl ${ollamaBaseUrl}/tags`
`You can check available models with: curl ${ollamaBaseURL}/tags`
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
} else if (selectedValue === '__CUSTOM_BEDROCK__') {
isCustomSelection = true;
const { customId } = await inquirer.prompt([
{
type: 'input',
name: 'customId',
message: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = 'bedrock';
// Check if AWS environment variables exist
if (
!process.env.AWS_ACCESS_KEY_ID ||
!process.env.AWS_SECRET_ACCESS_KEY
) {
console.error(
chalk.red(
`Error: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Please set them before using custom Bedrock models.`
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
console.log(
chalk.blue(
`Custom Bedrock model "${modelIdToSet}" will be used. No validation performed.`
)
);
} else if (
selectedValue &&
typeof selectedValue === 'object' &&
@@ -2553,6 +2593,10 @@ ${result.result}
'--ollama',
'Allow setting a custom Ollama model ID (use with --set-*) '
)
.option(
'--bedrock',
'Allow setting a custom Bedrock model ID (use with --set-*) '
)
.addHelpText(
'after',
`
@@ -2562,17 +2606,26 @@ Examples:
$ task-master models --set-research sonar-pro # Set research model
$ task-master models --set-fallback claude-3-5-sonnet-20241022 # Set fallback
$ task-master models --set-main my-custom-model --ollama # Set custom Ollama model for main role
$ task-master models --set-main anthropic.claude-3-sonnet-20240229-v1:0 --bedrock # Set custom Bedrock model for main role
$ task-master models --set-main some/other-model --openrouter # Set custom OpenRouter model for main role
$ task-master models --setup # Run interactive setup`
)
.action(async (options) => {
const projectRoot = findProjectRoot(); // Find project root for context
// Validate flags: cannot use both --openrouter and --ollama simultaneously
if (options.openrouter && options.ollama) {
const projectRoot = findProjectRoot();
if (!projectRoot) {
console.error(chalk.red('Error: Could not find project root.'));
process.exit(1);
}
// Validate flags: cannot use multiple provider flags simultaneously
const providerFlags = [
options.openrouter,
options.ollama,
options.bedrock
].filter(Boolean).length;
if (providerFlags > 1) {
console.error(
chalk.red(
'Error: Cannot use both --openrouter and --ollama flags simultaneously.'
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock) simultaneously.'
)
);
process.exit(1);
@@ -2612,7 +2665,9 @@ Examples:
? 'openrouter'
: options.ollama
? 'ollama'
: undefined
: options.bedrock
? 'bedrock'
: undefined
});
if (result.success) {
console.log(chalk.green(`${result.data.message}`));
@@ -2632,7 +2687,9 @@ Examples:
? 'openrouter'
: options.ollama
? 'ollama'
: undefined
: options.bedrock
? 'bedrock'
: undefined
});
if (result.success) {
console.log(chalk.green(`${result.data.message}`));
@@ -2654,7 +2711,9 @@ Examples:
? 'openrouter'
: options.ollama
? 'ollama'
: undefined
: options.bedrock
? 'bedrock'
: undefined
});
if (result.success) {
console.log(chalk.green(`${result.data.message}`));

View File

@@ -2,7 +2,7 @@ import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import { fileURLToPath } from 'url';
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
// Calculate __dirname in ESM
const __filename = fileURLToPath(import.meta.url);
@@ -61,7 +61,7 @@ const DEFAULTS = {
defaultSubtasks: 5,
defaultPriority: 'medium',
projectName: 'Task Master',
ollamaBaseUrl: 'http://localhost:11434/api'
ollamaBaseURL: 'http://localhost:11434/api'
}
};
@@ -361,9 +361,34 @@ function getProjectName(explicitRoot = null) {
return getGlobalConfig(explicitRoot).projectName;
}
function getOllamaBaseUrl(explicitRoot = null) {
function getOllamaBaseURL(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).ollamaBaseUrl;
return getGlobalConfig(explicitRoot).ollamaBaseURL;
}
function getAzureBaseURL(explicitRoot = null) {
// Directly return value from config
return getGlobalConfig(explicitRoot).azureBaseURL;
}
/**
* Gets the Google Cloud project ID for Vertex AI from configuration
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|null} The project ID or null if not configured
*/
function getVertexProjectId(explicitRoot = null) {
// Return value from config
return getGlobalConfig(explicitRoot).vertexProjectId;
}
/**
* Gets the Google Cloud location for Vertex AI from configuration
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The location or default value of "us-central1"
*/
function getVertexLocation(explicitRoot = null) {
// Return value from config or default
return getGlobalConfig(explicitRoot).vertexLocation || 'us-central1';
}
/**
@@ -450,7 +475,8 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
mistral: 'MISTRAL_API_KEY',
azure: 'AZURE_OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY'
xai: 'XAI_API_KEY',
vertex: 'GOOGLE_API_KEY' // Vertex uses the same key as Google
// Add other providers as needed
};
@@ -542,6 +568,10 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
apiKeyToCheck = mcpEnv.AZURE_OPENAI_API_KEY;
placeholderValue = 'YOUR_AZURE_OPENAI_API_KEY_HERE';
break;
case 'vertex':
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
break;
default:
return false; // Unknown provider
}
@@ -707,8 +737,8 @@ function getAllProviders() {
function getBaseUrlForRole(role, explicitRoot = null) {
const roleConfig = getModelConfigForRole(role, explicitRoot);
return roleConfig && typeof roleConfig.baseUrl === 'string'
? roleConfig.baseUrl
return roleConfig && typeof roleConfig.baseURL === 'string'
? roleConfig.baseURL
: undefined;
}
@@ -718,14 +748,12 @@ export {
writeConfig,
ConfigurationError,
isConfigFilePresent,
// Validation
validateProvider,
validateProviderModelCombination,
VALID_PROVIDERS,
MODEL_MAP,
getAvailableModels,
// Role-specific getters (No env var overrides)
getMainProvider,
getMainModelId,
@@ -740,7 +768,6 @@ export {
getFallbackMaxTokens,
getFallbackTemperature,
getBaseUrlForRole,
// Global setting getters (No env var overrides)
getLogLevel,
getDebugFlag,
@@ -748,13 +775,15 @@ export {
getDefaultSubtasks,
getDefaultPriority,
getProjectName,
getOllamaBaseUrl,
getOllamaBaseURL,
getAzureBaseURL,
getParametersForRole,
getUserId,
// API Key Checkers (still relevant)
isApiKeySet,
getMcpApiKeyStatus,
// ADD: Function to get all provider names
getAllProviders
getAllProviders,
getVertexProjectId,
getVertexLocation
};

View File

@@ -5,14 +5,14 @@
"swe_score": 0.727,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
"allowed_roles": ["main", "fallback"],
"max_tokens": 120000
"max_tokens": 64000
},
{
"id": "claude-opus-4-20250514",
"swe_score": 0.725,
"cost_per_1m_tokens": { "input": 15.0, "output": 75.0 },
"allowed_roles": ["main", "fallback"],
"max_tokens": 120000
"max_tokens": 32000
},
{
"id": "claude-3-7-sonnet-20250219",

View File

@@ -308,7 +308,8 @@ function parseSubtasksFromText(
logger.error(
`Advanced extraction: Problematic JSON string for parse (first 500 chars): ${jsonToParse.substring(0, 500)}`
);
throw new Error( // Re-throw a more specific error if advanced also fails
throw new Error(
// Re-throw a more specific error if advanced also fails
`Failed to parse JSON response object after both simple and advanced attempts: ${parseError.message}`
);
}

View File

@@ -72,14 +72,14 @@ function fetchOpenRouterModels() {
/**
* Fetches the list of models from Ollama instance.
* @param {string} baseUrl - The base URL for the Ollama API (e.g., "http://localhost:11434/api")
* @param {string} baseURL - The base URL for the Ollama API (e.g., "http://localhost:11434/api")
* @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails.
*/
function fetchOllamaModels(baseUrl = 'http://localhost:11434/api') {
function fetchOllamaModels(baseURL = 'http://localhost:11434/api') {
return new Promise((resolve) => {
try {
// Parse the base URL to extract hostname, port, and base path
const url = new URL(baseUrl);
const url = new URL(baseURL);
const isHttps = url.protocol === 'https:';
const port = url.port || (isHttps ? 443 : 80);
const basePath = url.pathname.endsWith('/')
@@ -484,13 +484,13 @@ async function setModel(role, modelId, options = {}) {
report('info', `Checking Ollama for ${modelId} (as hinted)...`);
// Get the Ollama base URL from config
const ollamaBaseUrl = getBaseUrlForRole(role, projectRoot);
const ollamaModels = await fetchOllamaModels(ollamaBaseUrl);
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
const ollamaModels = await fetchOllamaModels(ollamaBaseURL);
if (ollamaModels === null) {
// Connection failed - server probably not running
throw new Error(
`Unable to connect to Ollama server at ${ollamaBaseUrl}. Please ensure Ollama is running and try again.`
`Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
);
} else if (ollamaModels.some((m) => m.model === modelId)) {
determinedProvider = 'ollama';
@@ -498,7 +498,7 @@ async function setModel(role, modelId, options = {}) {
report('warn', warningMessage);
} else {
// Server is running but model not found
const tagsUrl = `${ollamaBaseUrl}/tags`;
const tagsUrl = `${ollamaBaseURL}/tags`;
throw new Error(
`Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`
);

View File

@@ -60,8 +60,7 @@ function resolveEnvVariable(key, session = null, projectRoot = null) {
// --- Project Root Finding Utility ---
/**
* Finds the project root directory by searching upwards from a given starting point
* for a marker file or directory (e.g., 'package.json', '.git').
* Finds the project root directory by searching for marker files/directories.
* @param {string} [startPath=process.cwd()] - The directory to start searching from.
* @param {string[]} [markers=['package.json', '.git', '.taskmasterconfig']] - Marker files/dirs to look for.
* @returns {string|null} The path to the project root directory, or null if not found.
@@ -71,27 +70,35 @@ function findProjectRoot(
markers = ['package.json', '.git', '.taskmasterconfig']
) {
let currentPath = path.resolve(startPath);
while (true) {
for (const marker of markers) {
if (fs.existsSync(path.join(currentPath, marker))) {
return currentPath;
}
const rootPath = path.parse(currentPath).root;
while (currentPath !== rootPath) {
// Check if any marker exists in the current directory
const hasMarker = markers.some((marker) => {
const markerPath = path.join(currentPath, marker);
return fs.existsSync(markerPath);
});
if (hasMarker) {
return currentPath;
}
const parentPath = path.dirname(currentPath);
if (parentPath === currentPath) {
// Reached the filesystem root
return null;
}
currentPath = parentPath;
// Move up one directory
currentPath = path.dirname(currentPath);
}
// Check the root directory as well
const hasMarkerInRoot = markers.some((marker) => {
const markerPath = path.join(rootPath, marker);
return fs.existsSync(markerPath);
});
return hasMarkerInRoot ? rootPath : null;
}
// --- Dynamic Configuration Function --- (REMOVED)
/*
function getConfig(session = null) {
// ... implementation removed ...
}
*/
// --- Logging and Utility Functions ---
// Set up logging based on log level
const LOG_LEVELS = {