feat(telemetry): Integrate AI usage telemetry into update-tasks
This commit applies the standard telemetry pattern to the command and its corresponding MCP tool.
Key Changes:
1. **Core Logic ():**
- The call to now includes and .
- The full response is captured.
- (the AI-generated text) is used for parsing the updated task JSON.
- If running in CLI mode (), is called with the .
- The function now returns .
2. **Direct Function ():**
- The call to the core function now passes the necessary context for telemetry (, ).
- The successful response object now correctly extracts and includes it in the field returned to the MCP client.
This commit is contained in:
@@ -81,7 +81,6 @@ export async function updateTasksDirect(args, log, context = {}) {
|
|||||||
'json'
|
'json'
|
||||||
);
|
);
|
||||||
|
|
||||||
// updateTasks returns { success: true, updatedTasks: [...] } on success
|
|
||||||
if (result && result.success && Array.isArray(result.updatedTasks)) {
|
if (result && result.success && Array.isArray(result.updatedTasks)) {
|
||||||
logWrapper.success(
|
logWrapper.success(
|
||||||
`Successfully updated ${result.updatedTasks.length} tasks.`
|
`Successfully updated ${result.updatedTasks.length} tasks.`
|
||||||
@@ -91,7 +90,8 @@ export async function updateTasksDirect(args, log, context = {}) {
|
|||||||
data: {
|
data: {
|
||||||
message: `Successfully updated ${result.updatedTasks.length} tasks.`,
|
message: `Successfully updated ${result.updatedTasks.length} tasks.`,
|
||||||
tasksFile,
|
tasksFile,
|
||||||
updatedCount: result.updatedTasks.length
|
updatedCount: result.updatedTasks.length,
|
||||||
|
telemetryData: result.telemetryData
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -15,7 +15,8 @@ import {
|
|||||||
import {
|
import {
|
||||||
getStatusWithColor,
|
getStatusWithColor,
|
||||||
startLoadingIndicator,
|
startLoadingIndicator,
|
||||||
stopLoadingIndicator
|
stopLoadingIndicator,
|
||||||
|
displayAiUsageSummary
|
||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
|
|
||||||
import { getDebugFlag } from '../config-manager.js';
|
import { getDebugFlag } from '../config-manager.js';
|
||||||
@@ -350,26 +351,100 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
|||||||
const userPrompt = `Here are the tasks to update:\n${taskDataString}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`;
|
const userPrompt = `Here are the tasks to update:\n${taskDataString}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`;
|
||||||
// --- End Build Prompts ---
|
// --- End Build Prompts ---
|
||||||
|
|
||||||
|
// --- AI Call ---
|
||||||
let loadingIndicator = null;
|
let loadingIndicator = null;
|
||||||
if (outputFormat === 'text') {
|
let aiServiceResponse = null;
|
||||||
loadingIndicator = startLoadingIndicator('Updating tasks...\n');
|
|
||||||
|
if (!isMCP && outputFormat === 'text') {
|
||||||
|
loadingIndicator = startLoadingIndicator('Updating tasks with AI...\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
let responseText = '';
|
|
||||||
let updatedTasks;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// --- Call Unified AI Service ---
|
// Determine role based on research flag
|
||||||
const role = useResearch ? 'research' : 'main';
|
const serviceRole = useResearch ? 'research' : 'main';
|
||||||
|
|
||||||
responseText = await generateTextService({
|
// Call the unified AI service
|
||||||
prompt: userPrompt,
|
aiServiceResponse = await generateTextService({
|
||||||
|
role: serviceRole,
|
||||||
|
session: session,
|
||||||
|
projectRoot: projectRoot,
|
||||||
systemPrompt: systemPrompt,
|
systemPrompt: systemPrompt,
|
||||||
role,
|
prompt: userPrompt,
|
||||||
session,
|
commandName: 'update-tasks',
|
||||||
projectRoot
|
outputType: isMCP ? 'mcp' : 'cli'
|
||||||
});
|
});
|
||||||
// --- End AI Service Call ---
|
|
||||||
|
if (loadingIndicator)
|
||||||
|
stopLoadingIndicator(loadingIndicator, 'AI update complete.');
|
||||||
|
|
||||||
|
// Use the mainResult (text) for parsing
|
||||||
|
const parsedUpdatedTasks = parseUpdatedTasksFromText(
|
||||||
|
aiServiceResponse.mainResult,
|
||||||
|
tasksToUpdate.length,
|
||||||
|
logFn,
|
||||||
|
isMCP
|
||||||
|
);
|
||||||
|
|
||||||
|
// --- Update Tasks Data (Unchanged) ---
|
||||||
|
if (!Array.isArray(parsedUpdatedTasks)) {
|
||||||
|
// Should be caught by parser, but extra check
|
||||||
|
throw new Error(
|
||||||
|
'Parsed AI response for updated tasks was not an array.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (isMCP)
|
||||||
|
logFn.info(
|
||||||
|
`Received ${parsedUpdatedTasks.length} updated tasks from AI.`
|
||||||
|
);
|
||||||
|
else
|
||||||
|
logFn(
|
||||||
|
'info',
|
||||||
|
`Received ${parsedUpdatedTasks.length} updated tasks from AI.`
|
||||||
|
);
|
||||||
|
// Create a map for efficient lookup
|
||||||
|
const updatedTasksMap = new Map(
|
||||||
|
parsedUpdatedTasks.map((task) => [task.id, task])
|
||||||
|
);
|
||||||
|
|
||||||
|
let actualUpdateCount = 0;
|
||||||
|
data.tasks.forEach((task, index) => {
|
||||||
|
if (updatedTasksMap.has(task.id)) {
|
||||||
|
// Only update if the task was part of the set sent to AI
|
||||||
|
data.tasks[index] = updatedTasksMap.get(task.id);
|
||||||
|
actualUpdateCount++;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (isMCP)
|
||||||
|
logFn.info(
|
||||||
|
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
|
||||||
|
);
|
||||||
|
else
|
||||||
|
logFn(
|
||||||
|
'info',
|
||||||
|
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
|
||||||
|
);
|
||||||
|
|
||||||
|
writeJSON(tasksPath, data);
|
||||||
|
if (isMCP)
|
||||||
|
logFn.info(
|
||||||
|
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
|
||||||
|
);
|
||||||
|
else
|
||||||
|
logFn(
|
||||||
|
'success',
|
||||||
|
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
|
||||||
|
);
|
||||||
|
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||||
|
|
||||||
|
if (outputFormat === 'text' && aiServiceResponse.telemetryData) {
|
||||||
|
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
updatedTasks: parsedUpdatedTasks,
|
||||||
|
telemetryData: aiServiceResponse.telemetryData
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||||
if (isMCP) logFn.error(`Error during AI service call: ${error.message}`);
|
if (isMCP) logFn.error(`Error during AI service call: ${error.message}`);
|
||||||
@@ -385,98 +460,10 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
|||||||
'Please ensure API keys are configured correctly in .env or mcp.json.'
|
'Please ensure API keys are configured correctly in .env or mcp.json.'
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
throw error; // Re-throw error
|
throw error;
|
||||||
} finally {
|
} finally {
|
||||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Parse and Validate Response ---
|
|
||||||
try {
|
|
||||||
updatedTasks = parseUpdatedTasksFromText(
|
|
||||||
responseText,
|
|
||||||
tasksToUpdate.length,
|
|
||||||
logFn,
|
|
||||||
isMCP
|
|
||||||
);
|
|
||||||
} catch (parseError) {
|
|
||||||
if (isMCP)
|
|
||||||
logFn.error(
|
|
||||||
`Failed to parse updated tasks from AI response: ${parseError.message}`
|
|
||||||
);
|
|
||||||
else
|
|
||||||
logFn(
|
|
||||||
'error',
|
|
||||||
`Failed to parse updated tasks from AI response: ${parseError.message}`
|
|
||||||
);
|
|
||||||
if (getDebugFlag(session)) {
|
|
||||||
if (isMCP) logFn.error(`Raw AI Response:\n${responseText}`);
|
|
||||||
else logFn('error', `Raw AI Response:\n${responseText}`);
|
|
||||||
}
|
|
||||||
throw new Error(
|
|
||||||
`Failed to parse valid updated tasks from AI response: ${parseError.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// --- End Parse/Validate ---
|
|
||||||
|
|
||||||
// --- Update Tasks Data (Unchanged) ---
|
|
||||||
if (!Array.isArray(updatedTasks)) {
|
|
||||||
// Should be caught by parser, but extra check
|
|
||||||
throw new Error('Parsed AI response for updated tasks was not an array.');
|
|
||||||
}
|
|
||||||
if (isMCP)
|
|
||||||
logFn.info(`Received ${updatedTasks.length} updated tasks from AI.`);
|
|
||||||
else
|
|
||||||
logFn('info', `Received ${updatedTasks.length} updated tasks from AI.`);
|
|
||||||
// Create a map for efficient lookup
|
|
||||||
const updatedTasksMap = new Map(
|
|
||||||
updatedTasks.map((task) => [task.id, task])
|
|
||||||
);
|
|
||||||
|
|
||||||
// Iterate through the original data and update based on the map
|
|
||||||
let actualUpdateCount = 0;
|
|
||||||
data.tasks.forEach((task, index) => {
|
|
||||||
if (updatedTasksMap.has(task.id)) {
|
|
||||||
// Only update if the task was part of the set sent to AI
|
|
||||||
data.tasks[index] = updatedTasksMap.get(task.id);
|
|
||||||
actualUpdateCount++;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if (isMCP)
|
|
||||||
logFn.info(
|
|
||||||
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
|
|
||||||
);
|
|
||||||
else
|
|
||||||
logFn(
|
|
||||||
'info',
|
|
||||||
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
|
|
||||||
);
|
|
||||||
// --- End Update Tasks Data ---
|
|
||||||
|
|
||||||
// --- Write File and Generate (Unchanged) ---
|
|
||||||
writeJSON(tasksPath, data);
|
|
||||||
if (isMCP)
|
|
||||||
logFn.info(
|
|
||||||
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
|
|
||||||
);
|
|
||||||
else
|
|
||||||
logFn(
|
|
||||||
'success',
|
|
||||||
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
|
|
||||||
);
|
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
|
||||||
// --- End Write File ---
|
|
||||||
|
|
||||||
// --- Final CLI Output (Unchanged) ---
|
|
||||||
if (outputFormat === 'text') {
|
|
||||||
console.log(
|
|
||||||
boxen(chalk.green(`Successfully updated ${actualUpdateCount} tasks`), {
|
|
||||||
padding: 1,
|
|
||||||
borderColor: 'green',
|
|
||||||
borderStyle: 'round'
|
|
||||||
})
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// --- End Final CLI Output ---
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// --- General Error Handling (Unchanged) ---
|
// --- General Error Handling (Unchanged) ---
|
||||||
if (isMCP) logFn.error(`Error updating tasks: ${error.message}`);
|
if (isMCP) logFn.error(`Error updating tasks: ${error.message}`);
|
||||||
|
|||||||
@@ -183,7 +183,7 @@ Apply telemetry pattern from telemetry.mdc:
|
|||||||
* Verify `handleApiResult` correctly passes `data.telemetryData` through.
|
* Verify `handleApiResult` correctly passes `data.telemetryData` through.
|
||||||
|
|
||||||
|
|
||||||
## 9. Telemetry Integration for update-tasks [pending]
|
## 9. Telemetry Integration for update-tasks [done]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Integrate AI usage telemetry capture and propagation for the update-tasks (bulk update) functionality.
|
### Description: Integrate AI usage telemetry capture and propagation for the update-tasks (bulk update) functionality.
|
||||||
### Details:
|
### Details:
|
||||||
@@ -277,3 +277,33 @@ Apply telemetry pattern from telemetry.mdc:
|
|||||||
* Verify `handleApiResult` correctly passes `data.telemetryData` through.
|
* Verify `handleApiResult` correctly passes `data.telemetryData` through.
|
||||||
|
|
||||||
|
|
||||||
|
## 13. Update google.js for Telemetry Compatibility [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Modify src/ai-providers/google.js functions to return usage data.
|
||||||
|
### Details:
|
||||||
|
Update the provider functions in `src/ai-providers/google.js` to ensure they return telemetry-compatible results:\n\n1. **`generateGoogleText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateGoogleObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamGoogleText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||||
|
|
||||||
|
## 14. Update openai.js for Telemetry Compatibility [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Modify src/ai-providers/openai.js functions to return usage data.
|
||||||
|
### Details:
|
||||||
|
Update the provider functions in `src/ai-providers/openai.js` to ensure they return telemetry-compatible results:\n\n1. **`generateOpenAIText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateOpenAIObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamOpenAIText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||||
|
|
||||||
|
## 15. Update openrouter.js for Telemetry Compatibility [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Modify src/ai-providers/openrouter.js functions to return usage data.
|
||||||
|
### Details:
|
||||||
|
Update the provider functions in `src/ai-providers/openrouter.js` to ensure they return telemetry-compatible results:\n\n1. **`generateOpenRouterText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateOpenRouterObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamOpenRouterText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||||
|
|
||||||
|
## 16. Update perplexity.js for Telemetry Compatibility [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Modify src/ai-providers/perplexity.js functions to return usage data.
|
||||||
|
### Details:
|
||||||
|
Update the provider functions in `src/ai-providers/perplexity.js` to ensure they return telemetry-compatible results:\n\n1. **`generatePerplexityText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generatePerplexityObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamPerplexityText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||||
|
|
||||||
|
## 17. Update xai.js for Telemetry Compatibility [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Modify src/ai-providers/xai.js functions to return usage data.
|
||||||
|
### Details:
|
||||||
|
Update the provider functions in `src/ai-providers/xai.js` to ensure they return telemetry-compatible results:\n\n1. **`generateXaiText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateXaiObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamXaiText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ The implementation should respect user privacy by:
|
|||||||
- Making it clear in documentation how users can opt out of telemetry
|
- Making it clear in documentation how users can opt out of telemetry
|
||||||
- Ensuring the ID cannot be traced back to specific users or installations
|
- Ensuring the ID cannot be traced back to specific users or installations
|
||||||
|
|
||||||
This user ID will serve as the foundation for anonymous usage tracking, helping to understand how Taskmaster is used without compromising user privacy.
|
This user ID will serve as the foundation for anonymous usage tracking, helping to understand how Taskmaster is used without compromising user privacy. Note that while we're implementing the ID generation now, the actual server-side collection is not yet available, so this data will initially only be stored locally.
|
||||||
|
|
||||||
# Test Strategy:
|
# Test Strategy:
|
||||||
Testing for this feature should include:
|
Testing for this feature should include:
|
||||||
@@ -88,5 +88,5 @@ Combine the UUID generation and config handling functions to: 1) Check if a user
|
|||||||
### Dependencies: 80.4
|
### Dependencies: 80.4
|
||||||
### Description: Document the user ID system and create an API for the telemetry system to access the user ID.
|
### Description: Document the user ID system and create an API for the telemetry system to access the user ID.
|
||||||
### Details:
|
### Details:
|
||||||
Create comprehensive documentation explaining: 1) The purpose of the anonymous ID, 2) How user privacy is protected, 3) How to opt out of telemetry, and 4) Technical details of the implementation. Implement a simple API function 'getUserId()' that reads the ID from config for use by the telemetry system. Update the README and user documentation to include information about anonymous usage tracking. Ensure cross-platform compatibility by testing on all supported operating systems.
|
Create comprehensive documentation explaining: 1) The purpose of the anonymous ID, 2) How user privacy is protected, 3) How to opt out of telemetry, and 4) Technical details of the implementation. Implement a simple API function 'getUserId()' that reads the ID from config for use by the telemetry system. Update the README and user documentation to include information about anonymous usage tracking. Ensure cross-platform compatibility by testing on all supported operating systems. Make it clear in the documentation that while we're collecting this ID, the server-side collection is not yet implemented, so data remains local for now.
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
# Task ID: 81
|
# Task ID: 81
|
||||||
# Title: Task #81: Implement Comprehensive Expansion Telemetry System with Aggregated Metrics
|
# Title: Task #81: Implement Comprehensive Local Telemetry System with Future Server Integration Capability
|
||||||
# Status: pending
|
# Status: pending
|
||||||
# Dependencies: None
|
# Dependencies: None
|
||||||
# Priority: medium
|
# Priority: medium
|
||||||
# Description: Expand the existing telemetry system to capture additional metrics about feature usage, performance, and user behavior patterns, then implement aggregation and batching of telemetry data to minimize network overhead.
|
# Description: Expand the existing telemetry system to capture additional metrics about feature usage, performance, and user behavior patterns, implementing local storage and aggregation of telemetry data with the capability for future server integration.
|
||||||
# Details:
|
# Details:
|
||||||
This task builds upon the existing telemetry infrastructure (Tasks #77 and #80) to provide more comprehensive insights into how users interact with the application.
|
This task builds upon the existing telemetry infrastructure (Tasks #77 and #80) to provide more comprehensive insights into how users interact with the application, while storing data locally until a server endpoint becomes available.
|
||||||
|
|
||||||
Key implementation details:
|
Key implementation details:
|
||||||
1. Identify and implement additional telemetry data points:
|
1. Identify and implement additional telemetry data points:
|
||||||
@@ -16,11 +16,12 @@ Key implementation details:
|
|||||||
- Session duration and activity patterns
|
- Session duration and activity patterns
|
||||||
- System environment information (OS, Node version, etc.)
|
- System environment information (OS, Node version, etc.)
|
||||||
|
|
||||||
2. Implement a local telemetry aggregation system:
|
2. Implement a local telemetry storage system:
|
||||||
- Create a local storage mechanism to temporarily hold telemetry data
|
- Create a robust local storage mechanism to hold telemetry data indefinitely
|
||||||
- Implement data aggregation to combine similar events and reduce payload size
|
- Implement data aggregation to combine similar events and reduce storage size
|
||||||
- Add batching logic to send multiple telemetry events in a single network request
|
- Add data retention policies to prevent excessive local storage usage
|
||||||
- Implement configurable batch sizes and transmission intervals
|
- Implement configurable storage limits and cleanup procedures
|
||||||
|
- Design the storage format to be compatible with future server transmission
|
||||||
|
|
||||||
3. Add privacy-preserving mechanisms:
|
3. Add privacy-preserving mechanisms:
|
||||||
- Ensure all personally identifiable information is properly anonymized
|
- Ensure all personally identifiable information is properly anonymized
|
||||||
@@ -28,15 +29,17 @@ Key implementation details:
|
|||||||
- Add user-configurable telemetry levels (basic, enhanced, full)
|
- Add user-configurable telemetry levels (basic, enhanced, full)
|
||||||
- Provide clear documentation on what data is collected and how it's used
|
- Provide clear documentation on what data is collected and how it's used
|
||||||
|
|
||||||
4. Enhance the analytics backend integration:
|
4. Design for future server integration:
|
||||||
- Update the API endpoints to handle the new telemetry data types
|
- Create a pluggable transmission architecture that can be connected to a server later
|
||||||
- Implement proper error handling and retry logic for failed transmissions
|
- Define API contracts and data formats for future server endpoints
|
||||||
- Add compression for telemetry payloads to reduce bandwidth usage
|
- Add configuration options for server URLs and authentication that will be used later
|
||||||
|
- Implement feature flags to easily enable server transmission when available
|
||||||
|
|
||||||
5. Add telemetry debugging capabilities:
|
5. Add telemetry debugging capabilities:
|
||||||
- Create a developer mode to view telemetry data being collected
|
- Create a developer mode to view telemetry data being collected
|
||||||
- Implement logging of telemetry events (when in debug mode)
|
- Implement logging of telemetry events (when in debug mode)
|
||||||
- Add command to manually trigger telemetry transmission
|
- Add commands to export telemetry data for manual analysis
|
||||||
|
- Create visualization tools for local telemetry data
|
||||||
|
|
||||||
# Test Strategy:
|
# Test Strategy:
|
||||||
The testing strategy for the expanded telemetry system should be comprehensive and cover all aspects of the implementation:
|
The testing strategy for the expanded telemetry system should be comprehensive and cover all aspects of the implementation:
|
||||||
@@ -45,40 +48,41 @@ The testing strategy for the expanded telemetry system should be comprehensive a
|
|||||||
- Test each telemetry collection function in isolation
|
- Test each telemetry collection function in isolation
|
||||||
- Verify proper anonymization of sensitive data
|
- Verify proper anonymization of sensitive data
|
||||||
- Test aggregation logic with various input scenarios
|
- Test aggregation logic with various input scenarios
|
||||||
- Validate batching mechanisms with different batch sizes
|
- Validate local storage mechanisms with different data volumes
|
||||||
- Test retry logic and error handling
|
- Test data retention and cleanup policies
|
||||||
|
|
||||||
2. Integration Tests:
|
2. Integration Tests:
|
||||||
- Verify telemetry data is properly stored locally before transmission
|
- Verify telemetry data is properly stored locally
|
||||||
- Test the complete flow from data collection to transmission
|
- Test the complete flow from data collection to local storage
|
||||||
- Validate that the analytics backend correctly processes the new data types
|
- Validate that the storage format is suitable for future server transmission
|
||||||
- Test different network conditions (slow, intermittent, offline)
|
- Test different application states (startup, shutdown, crash recovery)
|
||||||
- Verify proper handling of transmission failures
|
- Verify proper handling of storage failures
|
||||||
|
|
||||||
3. End-to-End Tests:
|
3. End-to-End Tests:
|
||||||
- Create automated E2E tests that perform various user actions and verify telemetry is captured
|
- Create automated E2E tests that perform various user actions and verify telemetry is captured
|
||||||
- Test with the actual analytics backend to ensure compatibility
|
- Test with simulated long-term usage to verify storage efficiency
|
||||||
- Verify that aggregated data accurately represents the performed actions
|
- Verify that aggregated data accurately represents the performed actions
|
||||||
|
|
||||||
4. Performance Tests:
|
4. Performance Tests:
|
||||||
- Measure the performance impact of the expanded telemetry system
|
- Measure the performance impact of the expanded telemetry system
|
||||||
- Test with large volumes of telemetry data to ensure efficient handling
|
- Test with large volumes of telemetry data to ensure efficient handling
|
||||||
- Verify memory usage remains within acceptable limits
|
- Verify memory usage remains within acceptable limits
|
||||||
- Test CPU utilization during telemetry collection and transmission
|
- Test CPU utilization during telemetry collection and storage operations
|
||||||
|
|
||||||
5. Manual Testing:
|
5. Manual Testing:
|
||||||
- Verify telemetry debug mode correctly displays collected data
|
- Verify telemetry debug mode correctly displays collected data
|
||||||
- Test different telemetry level configurations
|
- Test different telemetry level configurations
|
||||||
- Manually verify the accuracy of collected metrics
|
- Manually verify the accuracy of collected metrics
|
||||||
|
- Test the export functionality and analyze the exported data
|
||||||
|
|
||||||
6. Privacy Compliance Testing:
|
6. Privacy Compliance Testing:
|
||||||
- Verify no PII is transmitted without proper anonymization
|
- Verify no PII is stored without proper anonymization
|
||||||
- Test opt-out functionality works correctly
|
- Test opt-out functionality works correctly
|
||||||
- Ensure telemetry levels properly restrict data collection as configured
|
- Ensure telemetry levels properly restrict data collection as configured
|
||||||
|
|
||||||
7. Regression Testing:
|
7. Regression Testing:
|
||||||
- Verify existing functionality continues to work with the expanded telemetry
|
- Verify existing functionality continues to work with the expanded telemetry
|
||||||
- Ensure backward compatibility with the existing analytics backend
|
- Ensure the system is designed to be compatible with future server integration
|
||||||
|
|
||||||
# Subtasks:
|
# Subtasks:
|
||||||
## 1. Implement Additional Telemetry Data Collection Points [pending]
|
## 1. Implement Additional Telemetry Data Collection Points [pending]
|
||||||
@@ -87,17 +91,17 @@ The testing strategy for the expanded telemetry system should be comprehensive a
|
|||||||
### Details:
|
### Details:
|
||||||
Create new telemetry event types and collection points throughout the codebase. Implement hooks in the command execution pipeline to track timing and frequency. Add performance monitoring for key operations using high-resolution timers. Capture system environment data at startup. Implement error tracking that records error types and frequencies. Add session tracking with start/end events and periodic heartbeats.
|
Create new telemetry event types and collection points throughout the codebase. Implement hooks in the command execution pipeline to track timing and frequency. Add performance monitoring for key operations using high-resolution timers. Capture system environment data at startup. Implement error tracking that records error types and frequencies. Add session tracking with start/end events and periodic heartbeats.
|
||||||
|
|
||||||
## 2. Build Local Telemetry Storage and Aggregation System [pending]
|
## 2. Build Robust Local Telemetry Storage System [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Create a local storage mechanism to temporarily hold telemetry data with aggregation capabilities to combine similar events and reduce payload size.
|
### Description: Create a persistent local storage mechanism to hold telemetry data indefinitely with aggregation capabilities to combine similar events and reduce storage requirements.
|
||||||
### Details:
|
### Details:
|
||||||
Implement a persistent local store using SQLite or similar lightweight database. Create data schemas for different telemetry types. Develop aggregation functions that can combine similar events (e.g., multiple instances of the same command) into summary statistics. Implement data retention policies to prevent excessive storage usage. Add serialization/deserialization for telemetry objects.
|
Implement a persistent local store using SQLite or similar lightweight database. Create data schemas for different telemetry types. Develop aggregation functions that can combine similar events (e.g., multiple instances of the same command) into summary statistics. Implement data retention policies to prevent excessive storage usage. Add serialization/deserialization for telemetry objects. Design the storage format to be compatible with future server transmission needs.
|
||||||
|
|
||||||
## 3. Implement Batching and Transmission Logic [pending]
|
## 3. Design Server Transmission Architecture for Future Implementation [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Add batching capabilities to group multiple telemetry events into single network requests with configurable batch sizes and transmission intervals.
|
### Description: Create a pluggable architecture for future server transmission capabilities while maintaining local-only functionality for now.
|
||||||
### Details:
|
### Details:
|
||||||
Create a background service that periodically checks for pending telemetry data. Implement configurable settings for batch size, transmission frequency, and retry behavior. Add compression for telemetry payloads using gzip or similar. Implement network-aware transmission that detects connectivity and adjusts accordingly. Add retry logic with exponential backoff for failed transmissions. Create hooks for manual transmission triggering.
|
Design a modular transmission system with clear interfaces that can be implemented later when a server becomes available. Define data formats and API contracts for future server endpoints. Add configuration options for server URLs and authentication that will be used in the future. Implement feature flags to easily enable server transmission when available. Create a transmission queue design that can be activated later. Document the architecture for future implementation.
|
||||||
|
|
||||||
## 4. Implement Privacy Controls and User Configuration [pending]
|
## 4. Implement Privacy Controls and User Configuration [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
@@ -105,9 +109,9 @@ Create a background service that periodically checks for pending telemetry data.
|
|||||||
### Details:
|
### Details:
|
||||||
Create a telemetry sanitization layer that removes or hashes PII before storage. Implement three telemetry levels (basic, enhanced, full) with clear documentation of what each includes. Add user settings UI for controlling telemetry levels. Create a first-run experience that explains telemetry and requests user consent. Implement runtime filtering of telemetry events based on user settings.
|
Create a telemetry sanitization layer that removes or hashes PII before storage. Implement three telemetry levels (basic, enhanced, full) with clear documentation of what each includes. Add user settings UI for controlling telemetry levels. Create a first-run experience that explains telemetry and requests user consent. Implement runtime filtering of telemetry events based on user settings.
|
||||||
|
|
||||||
## 5. Add Telemetry Debugging and Monitoring Capabilities [pending]
|
## 5. Add Telemetry Debugging and Local Analysis Tools [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Create developer tools for debugging telemetry including a developer mode to view collected data, logging capabilities, and manual transmission controls.
|
### Description: Create developer tools for debugging telemetry including a developer mode to view collected data, logging capabilities, and local data analysis features.
|
||||||
### Details:
|
### Details:
|
||||||
Implement a developer console command to toggle telemetry debug mode. Create a UI panel that displays pending and recently sent telemetry data when in debug mode. Add detailed logging of telemetry events to the application log when debugging is enabled. Create commands to manually trigger transmission or clear pending telemetry. Add telemetry health metrics to monitor system performance impact.
|
Implement a developer console command to toggle telemetry debug mode. Create a UI panel that displays collected telemetry data when in debug mode. Add detailed logging of telemetry events to the application log when debugging is enabled. Create commands to export telemetry data in various formats (JSON, CSV) for manual analysis. Implement basic visualization tools for local telemetry data to help users understand their own usage patterns.
|
||||||
|
|
||||||
|
|||||||
@@ -4997,7 +4997,7 @@
|
|||||||
"title": "Telemetry Integration for update-tasks",
|
"title": "Telemetry Integration for update-tasks",
|
||||||
"description": "Integrate AI usage telemetry capture and propagation for the update-tasks (bulk update) functionality.",
|
"description": "Integrate AI usage telemetry capture and propagation for the update-tasks (bulk update) functionality.",
|
||||||
"details": "\\\nApply telemetry pattern from telemetry.mdc:\n\n1. **Core (`scripts/modules/task-manager/update-tasks.js`):**\n * Modify AI service call (likely within a loop) to include `commandName: \\'update-tasks\\'` and `outputType`.\n * Receive `{ mainResult, telemetryData }` for each AI call.\n * Aggregate or handle `telemetryData` appropriately for multiple calls.\n * Return object including aggregated/relevant `telemetryData`.\n * Handle CLI display via `displayAiUsageSummary` if applicable.\n\n2. **Direct (`mcp-server/src/core/direct-functions/update-tasks.js`):**\n * Pass `commandName`, `outputType: \\'mcp\\'` to core.\n * Pass `outputFormat: \\'json\\'` if applicable.\n * Receive `{ ..., telemetryData }` from core.\n * Return `{ success: true, data: { ..., telemetryData } }`.\n\n3. **Tool (`mcp-server/src/tools/update.js`):**\n * Verify `handleApiResult` correctly passes `data.telemetryData` through.\n",
|
"details": "\\\nApply telemetry pattern from telemetry.mdc:\n\n1. **Core (`scripts/modules/task-manager/update-tasks.js`):**\n * Modify AI service call (likely within a loop) to include `commandName: \\'update-tasks\\'` and `outputType`.\n * Receive `{ mainResult, telemetryData }` for each AI call.\n * Aggregate or handle `telemetryData` appropriately for multiple calls.\n * Return object including aggregated/relevant `telemetryData`.\n * Handle CLI display via `displayAiUsageSummary` if applicable.\n\n2. **Direct (`mcp-server/src/core/direct-functions/update-tasks.js`):**\n * Pass `commandName`, `outputType: \\'mcp\\'` to core.\n * Pass `outputFormat: \\'json\\'` if applicable.\n * Receive `{ ..., telemetryData }` from core.\n * Return `{ success: true, data: { ..., telemetryData } }`.\n\n3. **Tool (`mcp-server/src/tools/update.js`):**\n * Verify `handleApiResult` correctly passes `data.telemetryData` through.\n",
|
||||||
"status": "pending",
|
"status": "done",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"parentTaskId": 77
|
"parentTaskId": 77
|
||||||
},
|
},
|
||||||
@@ -5027,6 +5027,51 @@
|
|||||||
"status": "pending",
|
"status": "pending",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"parentTaskId": 77
|
"parentTaskId": 77
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 13,
|
||||||
|
"title": "Update google.js for Telemetry Compatibility",
|
||||||
|
"description": "Modify src/ai-providers/google.js functions to return usage data.",
|
||||||
|
"details": "Update the provider functions in `src/ai-providers/google.js` to ensure they return telemetry-compatible results:\\n\\n1. **`generateGoogleText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\\n2. **`generateGoogleObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\\n3. **`streamGoogleText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\\n\\nReference `anthropic.js` for the pattern.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [],
|
||||||
|
"parentTaskId": 77
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 14,
|
||||||
|
"title": "Update openai.js for Telemetry Compatibility",
|
||||||
|
"description": "Modify src/ai-providers/openai.js functions to return usage data.",
|
||||||
|
"details": "Update the provider functions in `src/ai-providers/openai.js` to ensure they return telemetry-compatible results:\\n\\n1. **`generateOpenAIText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\\n2. **`generateOpenAIObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\\n3. **`streamOpenAIText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\\n\\nReference `anthropic.js` for the pattern.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [],
|
||||||
|
"parentTaskId": 77
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 15,
|
||||||
|
"title": "Update openrouter.js for Telemetry Compatibility",
|
||||||
|
"description": "Modify src/ai-providers/openrouter.js functions to return usage data.",
|
||||||
|
"details": "Update the provider functions in `src/ai-providers/openrouter.js` to ensure they return telemetry-compatible results:\\n\\n1. **`generateOpenRouterText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\\n2. **`generateOpenRouterObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\\n3. **`streamOpenRouterText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\\n\\nReference `anthropic.js` for the pattern.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [],
|
||||||
|
"parentTaskId": 77
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 16,
|
||||||
|
"title": "Update perplexity.js for Telemetry Compatibility",
|
||||||
|
"description": "Modify src/ai-providers/perplexity.js functions to return usage data.",
|
||||||
|
"details": "Update the provider functions in `src/ai-providers/perplexity.js` to ensure they return telemetry-compatible results:\\n\\n1. **`generatePerplexityText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\\n2. **`generatePerplexityObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\\n3. **`streamPerplexityText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\\n\\nReference `anthropic.js` for the pattern.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [],
|
||||||
|
"parentTaskId": 77
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 17,
|
||||||
|
"title": "Update xai.js for Telemetry Compatibility",
|
||||||
|
"description": "Modify src/ai-providers/xai.js functions to return usage data.",
|
||||||
|
"details": "Update the provider functions in `src/ai-providers/xai.js` to ensure they return telemetry-compatible results:\\n\\n1. **`generateXaiText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\\n2. **`generateXaiObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\\n3. **`streamXaiText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\\n\\nReference `anthropic.js` for the pattern.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [],
|
||||||
|
"parentTaskId": 77
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -5034,11 +5079,11 @@
|
|||||||
"id": 80,
|
"id": 80,
|
||||||
"title": "Implement Unique User ID Generation and Storage During Installation",
|
"title": "Implement Unique User ID Generation and Storage During Installation",
|
||||||
"description": "Generate a unique user identifier during npm installation and store it in the .taskmasterconfig globals to enable anonymous usage tracking and telemetry without requiring user registration.",
|
"description": "Generate a unique user identifier during npm installation and store it in the .taskmasterconfig globals to enable anonymous usage tracking and telemetry without requiring user registration.",
|
||||||
"details": "This task involves implementing a mechanism to generate and store a unique user identifier during the npm installation process of Taskmaster. The implementation should:\n\n1. Create a post-install script that runs automatically after npm install completes\n2. Generate a cryptographically secure random UUID v4 as the unique user identifier\n3. Check if a user ID already exists in the .taskmasterconfig file before generating a new one\n4. Add the generated user ID to the globals section of the .taskmasterconfig file\n5. Ensure the user ID persists across updates but is regenerated on fresh installations\n6. Handle edge cases such as failed installations, manual deletions of the config file, or permission issues\n7. Add appropriate logging to notify users that an anonymous ID is being generated (with clear privacy messaging)\n8. Document the purpose of this ID in the codebase and user documentation\n9. Ensure the ID generation is compatible with all supported operating systems\n10. Make the ID accessible to the telemetry system implemented in Task #77\n\nThe implementation should respect user privacy by:\n- Not collecting any personally identifiable information\n- Making it clear in documentation how users can opt out of telemetry\n- Ensuring the ID cannot be traced back to specific users or installations\n\nThis user ID will serve as the foundation for anonymous usage tracking, helping to understand how Taskmaster is used without compromising user privacy.",
|
|
||||||
"testStrategy": "Testing for this feature should include:\n\n1. **Unit Tests**:\n - Verify the UUID generation produces valid UUIDs\n - Test the config file reading and writing functionality\n - Ensure proper error handling for file system operations\n - Verify the ID remains consistent across multiple reads\n\n2. **Integration Tests**:\n - Run a complete npm installation in a clean environment and verify a new ID is generated\n - Simulate an update installation and verify the existing ID is preserved\n - Test the interaction between the ID generation and the telemetry system\n - Verify the ID is correctly stored in the expected location in .taskmasterconfig\n\n3. **Manual Testing**:\n - Perform fresh installations on different operating systems (Windows, macOS, Linux)\n - Verify the installation process completes without errors\n - Check that the .taskmasterconfig file contains the generated ID\n - Test scenarios where the config file is manually deleted or corrupted\n\n4. **Edge Case Testing**:\n - Test behavior when the installation is run without sufficient permissions\n - Verify handling of network disconnections during installation\n - Test with various npm versions to ensure compatibility\n - Verify behavior when .taskmasterconfig already exists but doesn't contain a user ID section\n\n5. **Validation**:\n - Create a simple script to extract and analyze generated IDs to ensure uniqueness\n - Verify the ID format meets UUID v4 specifications\n - Confirm the ID is accessible to the telemetry system from Task #77\n\nThe test plan should include documentation of all test cases, expected results, and actual outcomes. A successful implementation will generate unique IDs for each installation while maintaining that ID across updates.",
|
|
||||||
"status": "pending",
|
"status": "pending",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"priority": "medium",
|
"priority": "medium",
|
||||||
|
"details": "This task involves implementing a mechanism to generate and store a unique user identifier during the npm installation process of Taskmaster. The implementation should:\n\n1. Create a post-install script that runs automatically after npm install completes\n2. Generate a cryptographically secure random UUID v4 as the unique user identifier\n3. Check if a user ID already exists in the .taskmasterconfig file before generating a new one\n4. Add the generated user ID to the globals section of the .taskmasterconfig file\n5. Ensure the user ID persists across updates but is regenerated on fresh installations\n6. Handle edge cases such as failed installations, manual deletions of the config file, or permission issues\n7. Add appropriate logging to notify users that an anonymous ID is being generated (with clear privacy messaging)\n8. Document the purpose of this ID in the codebase and user documentation\n9. Ensure the ID generation is compatible with all supported operating systems\n10. Make the ID accessible to the telemetry system implemented in Task #77\n\nThe implementation should respect user privacy by:\n- Not collecting any personally identifiable information\n- Making it clear in documentation how users can opt out of telemetry\n- Ensuring the ID cannot be traced back to specific users or installations\n\nThis user ID will serve as the foundation for anonymous usage tracking, helping to understand how Taskmaster is used without compromising user privacy. Note that while we're implementing the ID generation now, the actual server-side collection is not yet available, so this data will initially only be stored locally.",
|
||||||
|
"testStrategy": "Testing for this feature should include:\n\n1. **Unit Tests**:\n - Verify the UUID generation produces valid UUIDs\n - Test the config file reading and writing functionality\n - Ensure proper error handling for file system operations\n - Verify the ID remains consistent across multiple reads\n\n2. **Integration Tests**:\n - Run a complete npm installation in a clean environment and verify a new ID is generated\n - Simulate an update installation and verify the existing ID is preserved\n - Test the interaction between the ID generation and the telemetry system\n - Verify the ID is correctly stored in the expected location in .taskmasterconfig\n\n3. **Manual Testing**:\n - Perform fresh installations on different operating systems (Windows, macOS, Linux)\n - Verify the installation process completes without errors\n - Check that the .taskmasterconfig file contains the generated ID\n - Test scenarios where the config file is manually deleted or corrupted\n\n4. **Edge Case Testing**:\n - Test behavior when the installation is run without sufficient permissions\n - Verify handling of network disconnections during installation\n - Test with various npm versions to ensure compatibility\n - Verify behavior when .taskmasterconfig already exists but doesn't contain a user ID section\n\n5. **Validation**:\n - Create a simple script to extract and analyze generated IDs to ensure uniqueness\n - Verify the ID format meets UUID v4 specifications\n - Confirm the ID is accessible to the telemetry system from Task #77\n\nThe test plan should include documentation of all test cases, expected results, and actual outcomes. A successful implementation will generate unique IDs for each installation while maintaining that ID across updates.",
|
||||||
"subtasks": [
|
"subtasks": [
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
@@ -5090,7 +5135,7 @@
|
|||||||
"dependencies": [
|
"dependencies": [
|
||||||
4
|
4
|
||||||
],
|
],
|
||||||
"details": "Create comprehensive documentation explaining: 1) The purpose of the anonymous ID, 2) How user privacy is protected, 3) How to opt out of telemetry, and 4) Technical details of the implementation. Implement a simple API function 'getUserId()' that reads the ID from config for use by the telemetry system. Update the README and user documentation to include information about anonymous usage tracking. Ensure cross-platform compatibility by testing on all supported operating systems.",
|
"details": "Create comprehensive documentation explaining: 1) The purpose of the anonymous ID, 2) How user privacy is protected, 3) How to opt out of telemetry, and 4) Technical details of the implementation. Implement a simple API function 'getUserId()' that reads the ID from config for use by the telemetry system. Update the README and user documentation to include information about anonymous usage tracking. Ensure cross-platform compatibility by testing on all supported operating systems. Make it clear in the documentation that while we're collecting this ID, the server-side collection is not yet implemented, so data remains local for now.",
|
||||||
"status": "pending",
|
"status": "pending",
|
||||||
"testStrategy": "Verify documentation accuracy and completeness. Test the getUserId() function across platforms to ensure consistent behavior. Create a mock telemetry system to verify proper ID access."
|
"testStrategy": "Verify documentation accuracy and completeness. Test the getUserId() function across platforms to ensure consistent behavior. Create a mock telemetry system to verify proper ID access."
|
||||||
}
|
}
|
||||||
@@ -5098,13 +5143,13 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 81,
|
"id": 81,
|
||||||
"title": "Task #81: Implement Comprehensive Expansion Telemetry System with Aggregated Metrics",
|
"title": "Task #81: Implement Comprehensive Local Telemetry System with Future Server Integration Capability",
|
||||||
"description": "Expand the existing telemetry system to capture additional metrics about feature usage, performance, and user behavior patterns, then implement aggregation and batching of telemetry data to minimize network overhead.",
|
"description": "Expand the existing telemetry system to capture additional metrics about feature usage, performance, and user behavior patterns, implementing local storage and aggregation of telemetry data with the capability for future server integration.",
|
||||||
"details": "This task builds upon the existing telemetry infrastructure (Tasks #77 and #80) to provide more comprehensive insights into how users interact with the application.\n\nKey implementation details:\n1. Identify and implement additional telemetry data points:\n - Command execution frequency and timing metrics\n - Feature usage patterns (which commands/features are most/least used)\n - Performance metrics (execution time, memory usage, etc.)\n - Error rates and types\n - Session duration and activity patterns\n - System environment information (OS, Node version, etc.)\n\n2. Implement a local telemetry aggregation system:\n - Create a local storage mechanism to temporarily hold telemetry data\n - Implement data aggregation to combine similar events and reduce payload size\n - Add batching logic to send multiple telemetry events in a single network request\n - Implement configurable batch sizes and transmission intervals\n\n3. Add privacy-preserving mechanisms:\n - Ensure all personally identifiable information is properly anonymized\n - Implement data minimization principles (only collect what's necessary)\n - Add user-configurable telemetry levels (basic, enhanced, full)\n - Provide clear documentation on what data is collected and how it's used\n\n4. Enhance the analytics backend integration:\n - Update the API endpoints to handle the new telemetry data types\n - Implement proper error handling and retry logic for failed transmissions\n - Add compression for telemetry payloads to reduce bandwidth usage\n\n5. Add telemetry debugging capabilities:\n - Create a developer mode to view telemetry data being collected\n - Implement logging of telemetry events (when in debug mode)\n - Add command to manually trigger telemetry transmission",
|
|
||||||
"testStrategy": "The testing strategy for the expanded telemetry system should be comprehensive and cover all aspects of the implementation:\n\n1. Unit Tests:\n - Test each telemetry collection function in isolation\n - Verify proper anonymization of sensitive data\n - Test aggregation logic with various input scenarios\n - Validate batching mechanisms with different batch sizes\n - Test retry logic and error handling\n\n2. Integration Tests:\n - Verify telemetry data is properly stored locally before transmission\n - Test the complete flow from data collection to transmission\n - Validate that the analytics backend correctly processes the new data types\n - Test different network conditions (slow, intermittent, offline)\n - Verify proper handling of transmission failures\n\n3. End-to-End Tests:\n - Create automated E2E tests that perform various user actions and verify telemetry is captured\n - Test with the actual analytics backend to ensure compatibility\n - Verify that aggregated data accurately represents the performed actions\n\n4. Performance Tests:\n - Measure the performance impact of the expanded telemetry system\n - Test with large volumes of telemetry data to ensure efficient handling\n - Verify memory usage remains within acceptable limits\n - Test CPU utilization during telemetry collection and transmission\n\n5. Manual Testing:\n - Verify telemetry debug mode correctly displays collected data\n - Test different telemetry level configurations\n - Manually verify the accuracy of collected metrics\n\n6. Privacy Compliance Testing:\n - Verify no PII is transmitted without proper anonymization\n - Test opt-out functionality works correctly\n - Ensure telemetry levels properly restrict data collection as configured\n\n7. Regression Testing:\n - Verify existing functionality continues to work with the expanded telemetry\n - Ensure backward compatibility with the existing analytics backend",
|
|
||||||
"status": "pending",
|
"status": "pending",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"priority": "medium",
|
"priority": "medium",
|
||||||
|
"details": "This task builds upon the existing telemetry infrastructure (Tasks #77 and #80) to provide more comprehensive insights into how users interact with the application, while storing data locally until a server endpoint becomes available.\n\nKey implementation details:\n1. Identify and implement additional telemetry data points:\n - Command execution frequency and timing metrics\n - Feature usage patterns (which commands/features are most/least used)\n - Performance metrics (execution time, memory usage, etc.)\n - Error rates and types\n - Session duration and activity patterns\n - System environment information (OS, Node version, etc.)\n\n2. Implement a local telemetry storage system:\n - Create a robust local storage mechanism to hold telemetry data indefinitely\n - Implement data aggregation to combine similar events and reduce storage size\n - Add data retention policies to prevent excessive local storage usage\n - Implement configurable storage limits and cleanup procedures\n - Design the storage format to be compatible with future server transmission\n\n3. Add privacy-preserving mechanisms:\n - Ensure all personally identifiable information is properly anonymized\n - Implement data minimization principles (only collect what's necessary)\n - Add user-configurable telemetry levels (basic, enhanced, full)\n - Provide clear documentation on what data is collected and how it's used\n\n4. Design for future server integration:\n - Create a pluggable transmission architecture that can be connected to a server later\n - Define API contracts and data formats for future server endpoints\n - Add configuration options for server URLs and authentication that will be used later\n - Implement feature flags to easily enable server transmission when available\n\n5. Add telemetry debugging capabilities:\n - Create a developer mode to view telemetry data being collected\n - Implement logging of telemetry events (when in debug mode)\n - Add commands to export telemetry data for manual analysis\n - Create visualization tools for local telemetry data",
|
||||||
|
"testStrategy": "The testing strategy for the expanded telemetry system should be comprehensive and cover all aspects of the implementation:\n\n1. Unit Tests:\n - Test each telemetry collection function in isolation\n - Verify proper anonymization of sensitive data\n - Test aggregation logic with various input scenarios\n - Validate local storage mechanisms with different data volumes\n - Test data retention and cleanup policies\n\n2. Integration Tests:\n - Verify telemetry data is properly stored locally\n - Test the complete flow from data collection to local storage\n - Validate that the storage format is suitable for future server transmission\n - Test different application states (startup, shutdown, crash recovery)\n - Verify proper handling of storage failures\n\n3. End-to-End Tests:\n - Create automated E2E tests that perform various user actions and verify telemetry is captured\n - Test with simulated long-term usage to verify storage efficiency\n - Verify that aggregated data accurately represents the performed actions\n\n4. Performance Tests:\n - Measure the performance impact of the expanded telemetry system\n - Test with large volumes of telemetry data to ensure efficient handling\n - Verify memory usage remains within acceptable limits\n - Test CPU utilization during telemetry collection and storage operations\n\n5. Manual Testing:\n - Verify telemetry debug mode correctly displays collected data\n - Test different telemetry level configurations\n - Manually verify the accuracy of collected metrics\n - Test the export functionality and analyze the exported data\n\n6. Privacy Compliance Testing:\n - Verify no PII is stored without proper anonymization\n - Test opt-out functionality works correctly\n - Ensure telemetry levels properly restrict data collection as configured\n\n7. Regression Testing:\n - Verify existing functionality continues to work with the expanded telemetry\n - Ensure the system is designed to be compatible with future server integration",
|
||||||
"subtasks": [
|
"subtasks": [
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
@@ -5117,21 +5162,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 2,
|
"id": 2,
|
||||||
"title": "Build Local Telemetry Storage and Aggregation System",
|
"title": "Build Robust Local Telemetry Storage System",
|
||||||
"description": "Create a local storage mechanism to temporarily hold telemetry data with aggregation capabilities to combine similar events and reduce payload size.",
|
"description": "Create a persistent local storage mechanism to hold telemetry data indefinitely with aggregation capabilities to combine similar events and reduce storage requirements.",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"details": "Implement a persistent local store using SQLite or similar lightweight database. Create data schemas for different telemetry types. Develop aggregation functions that can combine similar events (e.g., multiple instances of the same command) into summary statistics. Implement data retention policies to prevent excessive storage usage. Add serialization/deserialization for telemetry objects.",
|
"details": "Implement a persistent local store using SQLite or similar lightweight database. Create data schemas for different telemetry types. Develop aggregation functions that can combine similar events (e.g., multiple instances of the same command) into summary statistics. Implement data retention policies to prevent excessive storage usage. Add serialization/deserialization for telemetry objects. Design the storage format to be compatible with future server transmission needs.",
|
||||||
"status": "pending",
|
"status": "pending",
|
||||||
"testStrategy": "Test storage persistence across application restarts. Verify aggregation logic correctly combines similar events. Benchmark storage performance with large volumes of telemetry data."
|
"testStrategy": "Test storage persistence across application restarts. Verify aggregation logic correctly combines similar events. Benchmark storage performance with large volumes of telemetry data. Test data retention policies with simulated long-term usage."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 3,
|
"id": 3,
|
||||||
"title": "Implement Batching and Transmission Logic",
|
"title": "Design Server Transmission Architecture for Future Implementation",
|
||||||
"description": "Add batching capabilities to group multiple telemetry events into single network requests with configurable batch sizes and transmission intervals.",
|
"description": "Create a pluggable architecture for future server transmission capabilities while maintaining local-only functionality for now.",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"details": "Create a background service that periodically checks for pending telemetry data. Implement configurable settings for batch size, transmission frequency, and retry behavior. Add compression for telemetry payloads using gzip or similar. Implement network-aware transmission that detects connectivity and adjusts accordingly. Add retry logic with exponential backoff for failed transmissions. Create hooks for manual transmission triggering.",
|
"details": "Design a modular transmission system with clear interfaces that can be implemented later when a server becomes available. Define data formats and API contracts for future server endpoints. Add configuration options for server URLs and authentication that will be used in the future. Implement feature flags to easily enable server transmission when available. Create a transmission queue design that can be activated later. Document the architecture for future implementation.",
|
||||||
"status": "pending",
|
"status": "pending",
|
||||||
"testStrategy": "Test batching with various configurations. Verify compression reduces payload size. Simulate network failures to test retry logic. Measure transmission efficiency with different batch sizes."
|
"testStrategy": "Create interface tests to verify the design is implementable. Test configuration loading for future server settings. Verify the architecture allows for easy extension without major refactoring. Create mock implementations to validate the design."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 4,
|
"id": 4,
|
||||||
@@ -5144,12 +5189,12 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 5,
|
"id": 5,
|
||||||
"title": "Add Telemetry Debugging and Monitoring Capabilities",
|
"title": "Add Telemetry Debugging and Local Analysis Tools",
|
||||||
"description": "Create developer tools for debugging telemetry including a developer mode to view collected data, logging capabilities, and manual transmission controls.",
|
"description": "Create developer tools for debugging telemetry including a developer mode to view collected data, logging capabilities, and local data analysis features.",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"details": "Implement a developer console command to toggle telemetry debug mode. Create a UI panel that displays pending and recently sent telemetry data when in debug mode. Add detailed logging of telemetry events to the application log when debugging is enabled. Create commands to manually trigger transmission or clear pending telemetry. Add telemetry health metrics to monitor system performance impact.",
|
"details": "Implement a developer console command to toggle telemetry debug mode. Create a UI panel that displays collected telemetry data when in debug mode. Add detailed logging of telemetry events to the application log when debugging is enabled. Create commands to export telemetry data in various formats (JSON, CSV) for manual analysis. Implement basic visualization tools for local telemetry data to help users understand their own usage patterns.",
|
||||||
"status": "pending",
|
"status": "pending",
|
||||||
"testStrategy": "Verify debug mode correctly displays all telemetry events. Test manual transmission triggers. Ensure logging provides sufficient detail for debugging without excessive verbosity."
|
"testStrategy": "Verify debug mode correctly displays all telemetry events. Test data export functionality with various output formats. Ensure visualizations accurately represent the underlying data. Test with large datasets to verify performance."
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ const mockGetFallbackProvider = jest.fn();
|
|||||||
const mockGetFallbackModelId = jest.fn();
|
const mockGetFallbackModelId = jest.fn();
|
||||||
const mockGetParametersForRole = jest.fn();
|
const mockGetParametersForRole = jest.fn();
|
||||||
const mockGetUserId = jest.fn();
|
const mockGetUserId = jest.fn();
|
||||||
|
const mockGetDebugFlag = jest.fn();
|
||||||
|
|
||||||
// --- Mock MODEL_MAP Data ---
|
// --- Mock MODEL_MAP Data ---
|
||||||
// Provide a simplified structure sufficient for cost calculation tests
|
// Provide a simplified structure sufficient for cost calculation tests
|
||||||
@@ -41,6 +42,7 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
|||||||
getFallbackModelId: mockGetFallbackModelId,
|
getFallbackModelId: mockGetFallbackModelId,
|
||||||
getParametersForRole: mockGetParametersForRole,
|
getParametersForRole: mockGetParametersForRole,
|
||||||
getUserId: mockGetUserId,
|
getUserId: mockGetUserId,
|
||||||
|
getDebugFlag: mockGetDebugFlag,
|
||||||
MODEL_MAP: mockModelMap
|
MODEL_MAP: mockModelMap
|
||||||
}));
|
}));
|
||||||
|
|
||||||
@@ -70,12 +72,14 @@ const mockLog = jest.fn();
|
|||||||
const mockResolveEnvVariable = jest.fn();
|
const mockResolveEnvVariable = jest.fn();
|
||||||
const mockFindProjectRoot = jest.fn();
|
const mockFindProjectRoot = jest.fn();
|
||||||
const mockIsSilentMode = jest.fn();
|
const mockIsSilentMode = jest.fn();
|
||||||
|
const mockLogAiUsage = jest.fn();
|
||||||
|
|
||||||
jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
|
jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
|
||||||
log: mockLog,
|
log: mockLog,
|
||||||
resolveEnvVariable: mockResolveEnvVariable,
|
resolveEnvVariable: mockResolveEnvVariable,
|
||||||
findProjectRoot: mockFindProjectRoot,
|
findProjectRoot: mockFindProjectRoot,
|
||||||
isSilentMode: mockIsSilentMode
|
isSilentMode: mockIsSilentMode,
|
||||||
|
logAiUsage: mockLogAiUsage
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Import the module to test (AFTER mocks)
|
// Import the module to test (AFTER mocks)
|
||||||
@@ -111,11 +115,16 @@ describe('Unified AI Services', () => {
|
|||||||
|
|
||||||
// Set a default behavior for the new mock
|
// Set a default behavior for the new mock
|
||||||
mockFindProjectRoot.mockReturnValue(fakeProjectRoot);
|
mockFindProjectRoot.mockReturnValue(fakeProjectRoot);
|
||||||
|
mockGetDebugFlag.mockReturnValue(false);
|
||||||
|
mockGetUserId.mockReturnValue('test-user-id'); // Add default mock for getUserId
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('generateTextService', () => {
|
describe('generateTextService', () => {
|
||||||
test('should use main provider/model and succeed', async () => {
|
test('should use main provider/model and succeed', async () => {
|
||||||
mockGenerateAnthropicText.mockResolvedValue('Main provider response');
|
mockGenerateAnthropicText.mockResolvedValue({
|
||||||
|
text: 'Main provider response',
|
||||||
|
usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 }
|
||||||
|
});
|
||||||
|
|
||||||
const params = {
|
const params = {
|
||||||
role: 'main',
|
role: 'main',
|
||||||
@@ -156,7 +165,10 @@ describe('Unified AI Services', () => {
|
|||||||
const mainError = new Error('Main provider failed');
|
const mainError = new Error('Main provider failed');
|
||||||
mockGenerateAnthropicText
|
mockGenerateAnthropicText
|
||||||
.mockRejectedValueOnce(mainError)
|
.mockRejectedValueOnce(mainError)
|
||||||
.mockResolvedValueOnce('Fallback provider response');
|
.mockResolvedValueOnce({
|
||||||
|
text: 'Fallback provider response',
|
||||||
|
usage: { inputTokens: 15, outputTokens: 25, totalTokens: 40 }
|
||||||
|
});
|
||||||
|
|
||||||
const explicitRoot = '/explicit/test/root';
|
const explicitRoot = '/explicit/test/root';
|
||||||
const params = {
|
const params = {
|
||||||
@@ -203,9 +215,10 @@ describe('Unified AI Services', () => {
|
|||||||
mockGenerateAnthropicText
|
mockGenerateAnthropicText
|
||||||
.mockRejectedValueOnce(mainError)
|
.mockRejectedValueOnce(mainError)
|
||||||
.mockRejectedValueOnce(fallbackError);
|
.mockRejectedValueOnce(fallbackError);
|
||||||
mockGeneratePerplexityText.mockResolvedValue(
|
mockGeneratePerplexityText.mockResolvedValue({
|
||||||
'Research provider response'
|
text: 'Research provider response',
|
||||||
);
|
usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
|
||||||
|
});
|
||||||
|
|
||||||
const params = { role: 'main', prompt: 'Research fallback test' };
|
const params = { role: 'main', prompt: 'Research fallback test' };
|
||||||
const result = await generateTextService(params);
|
const result = await generateTextService(params);
|
||||||
@@ -278,7 +291,11 @@ describe('Unified AI Services', () => {
|
|||||||
const retryableError = new Error('Rate limit');
|
const retryableError = new Error('Rate limit');
|
||||||
mockGenerateAnthropicText
|
mockGenerateAnthropicText
|
||||||
.mockRejectedValueOnce(retryableError) // Fails once
|
.mockRejectedValueOnce(retryableError) // Fails once
|
||||||
.mockResolvedValue('Success after retry'); // Succeeds on retry
|
.mockResolvedValueOnce({
|
||||||
|
// Succeeds on retry
|
||||||
|
text: 'Success after retry',
|
||||||
|
usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 }
|
||||||
|
});
|
||||||
|
|
||||||
const params = { role: 'main', prompt: 'Retry success test' };
|
const params = { role: 'main', prompt: 'Retry success test' };
|
||||||
const result = await generateTextService(params);
|
const result = await generateTextService(params);
|
||||||
@@ -294,7 +311,10 @@ describe('Unified AI Services', () => {
|
|||||||
|
|
||||||
test('should use default project root or handle null if findProjectRoot returns null', async () => {
|
test('should use default project root or handle null if findProjectRoot returns null', async () => {
|
||||||
mockFindProjectRoot.mockReturnValue(null); // Simulate not finding root
|
mockFindProjectRoot.mockReturnValue(null); // Simulate not finding root
|
||||||
mockGenerateAnthropicText.mockResolvedValue('Response with no root');
|
mockGenerateAnthropicText.mockResolvedValue({
|
||||||
|
text: 'Response with no root',
|
||||||
|
usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 }
|
||||||
|
});
|
||||||
|
|
||||||
const params = { role: 'main', prompt: 'No root test' }; // No explicit root passed
|
const params = { role: 'main', prompt: 'No root test' }; // No explicit root passed
|
||||||
await generateTextService(params);
|
await generateTextService(params);
|
||||||
|
|||||||
Reference in New Issue
Block a user