feat(telemetry): Integrate usage telemetry for expand-task, fix return types
This commit integrates AI usage telemetry for the `expand-task` command/tool and resolves issues related to incorrect return type handling and logging.
Key Changes:
1. **Telemetry Integration for `expand-task` (Subtask 77.7):**\n - Applied the standard telemetry pattern to the `expandTask` core logic (`scripts/modules/task-manager/expand-task.js`) and the `expandTaskDirect` wrapper (`mcp-server/src/core/direct-functions/expand-task.js`).\n - AI service calls now pass `commandName` and `outputType`.\n - Core function returns `{ task, telemetryData }`.\n - Direct function correctly extracts `task` and passes `telemetryData` in the MCP response `data` field.\n - Telemetry summary is now displayed in the CLI output for the `expand` command.
2. **Fix AI Service Return Type Handling (`ai-services-unified.js`):**\n - Corrected the `_unifiedServiceRunner` function to properly handle the return objects from provider-specific functions (`generateText`, `generateObject`).\n - It now correctly extracts `providerResponse.text` or `providerResponse.object` into the `mainResult` field based on `serviceType`, resolving the "text.trim is not a function" error encountered during `expand-task`.
3. **Log Cleanup:**\n - Removed various redundant or excessive `console.log` statements across multiple files (as indicated by recent changes) to reduce noise and improve clarity, particularly for MCP interactions.
This commit is contained in:
@@ -193,13 +193,19 @@ export async function expandTaskDirect(args, log, context = {}) {
|
||||
if (!wasSilent) enableSilentMode();
|
||||
|
||||
// Call the core expandTask function with the wrapped logger and projectRoot
|
||||
const updatedTaskResult = await expandTask(
|
||||
const coreResult = await expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
numSubtasks,
|
||||
useResearch,
|
||||
additionalContext,
|
||||
{ mcpLog, session, projectRoot },
|
||||
{
|
||||
mcpLog,
|
||||
session,
|
||||
projectRoot,
|
||||
commandName: 'expand-task',
|
||||
outputType: 'mcp'
|
||||
},
|
||||
forceFlag
|
||||
);
|
||||
|
||||
@@ -215,16 +221,17 @@ export async function expandTaskDirect(args, log, context = {}) {
|
||||
? updatedTask.subtasks.length - subtasksCountBefore
|
||||
: 0;
|
||||
|
||||
// Return the result
|
||||
// Return the result, including telemetryData
|
||||
log.info(
|
||||
`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`
|
||||
);
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
task: updatedTask,
|
||||
task: coreResult.task,
|
||||
subtasksAdded,
|
||||
hasExistingSubtasks
|
||||
hasExistingSubtasks,
|
||||
telemetryData: coreResult.telemetryData
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
|
||||
@@ -16,7 +16,8 @@ import {
|
||||
getFallbackModelId,
|
||||
getParametersForRole,
|
||||
getUserId,
|
||||
MODEL_MAP
|
||||
MODEL_MAP,
|
||||
getDebugFlag
|
||||
} from './config-manager.js';
|
||||
import { log, resolveEnvVariable, isSilentMode } from './utils.js';
|
||||
|
||||
@@ -298,12 +299,14 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
outputType,
|
||||
...restApiParams
|
||||
} = params;
|
||||
log('info', `${serviceType}Service called`, {
|
||||
role: initialRole,
|
||||
commandName,
|
||||
outputType,
|
||||
projectRoot
|
||||
});
|
||||
if (getDebugFlag()) {
|
||||
log('info', `${serviceType}Service called`, {
|
||||
role: initialRole,
|
||||
commandName,
|
||||
outputType,
|
||||
projectRoot
|
||||
});
|
||||
}
|
||||
|
||||
// Determine the effective project root (passed in or detected if needed by config getters)
|
||||
const { findProjectRoot: detectProjectRoot } = await import('./utils.js'); // Dynamically import if needed
|
||||
@@ -333,7 +336,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
|
||||
for (const currentRole of sequence) {
|
||||
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
|
||||
let aiCallResult;
|
||||
let providerResponse;
|
||||
let telemetryData = null;
|
||||
|
||||
try {
|
||||
@@ -456,7 +459,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
};
|
||||
|
||||
// 6. Attempt the call with retries
|
||||
aiCallResult = await _attemptProviderCallWithRetries(
|
||||
providerResponse = await _attemptProviderCallWithRetries(
|
||||
providerApiFn,
|
||||
callParams,
|
||||
providerName,
|
||||
@@ -464,36 +467,51 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
currentRole
|
||||
);
|
||||
|
||||
log('info', `${serviceType}Service succeeded using role: ${currentRole}`);
|
||||
|
||||
// --- Log Telemetry & Capture Data ---
|
||||
// TODO: Add telemetry logic gate in case user doesn't accept telemetry
|
||||
if (userId && aiCallResult && aiCallResult.usage) {
|
||||
// Use providerResponse which contains the usage data directly for text/object
|
||||
if (userId && providerResponse && providerResponse.usage) {
|
||||
try {
|
||||
telemetryData = await logAiUsage({
|
||||
userId,
|
||||
commandName,
|
||||
providerName,
|
||||
modelId,
|
||||
inputTokens: aiCallResult.usage.inputTokens,
|
||||
outputTokens: aiCallResult.usage.outputTokens,
|
||||
inputTokens: providerResponse.usage.inputTokens,
|
||||
outputTokens: providerResponse.usage.outputTokens,
|
||||
outputType
|
||||
});
|
||||
} catch (telemetryError) {
|
||||
// logAiUsage already logs its own errors and returns null on failure
|
||||
// No need to log again here, telemetryData will remain null
|
||||
}
|
||||
} else if (userId && aiCallResult && !aiCallResult.usage) {
|
||||
} else if (userId && providerResponse && !providerResponse.usage) {
|
||||
log(
|
||||
'warn',
|
||||
`Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data.`
|
||||
`Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data. (May be expected for streams)`
|
||||
);
|
||||
}
|
||||
// --- End Log Telemetry ---
|
||||
|
||||
// Return a composite object including the main AI result and telemetry data
|
||||
// --- Extract the correct main result based on serviceType ---
|
||||
let finalMainResult;
|
||||
if (serviceType === 'generateText') {
|
||||
finalMainResult = providerResponse.text;
|
||||
} else if (serviceType === 'generateObject') {
|
||||
finalMainResult = providerResponse.object;
|
||||
} else if (serviceType === 'streamText') {
|
||||
finalMainResult = providerResponse; // Return the whole stream object
|
||||
} else {
|
||||
log(
|
||||
'error',
|
||||
`Unknown serviceType in _unifiedServiceRunner: ${serviceType}`
|
||||
);
|
||||
finalMainResult = providerResponse; // Default to returning the whole object as fallback
|
||||
}
|
||||
// --- End Main Result Extraction ---
|
||||
|
||||
// Return a composite object including the extracted main result and telemetry data
|
||||
return {
|
||||
mainResult: aiCallResult,
|
||||
mainResult: finalMainResult,
|
||||
telemetryData: telemetryData
|
||||
};
|
||||
} catch (error) {
|
||||
@@ -653,7 +671,9 @@ async function logAiUsage({
|
||||
currency // Add currency to the telemetry data
|
||||
};
|
||||
|
||||
log('info', 'AI Usage Telemetry:', telemetryData);
|
||||
if (getDebugFlag()) {
|
||||
log('info', 'AI Usage Telemetry:', telemetryData);
|
||||
}
|
||||
|
||||
// TODO (Subtask 77.2): Send telemetryData securely to the external endpoint.
|
||||
|
||||
|
||||
@@ -209,7 +209,6 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
|
||||
try {
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
reportLog(`Using AI service with role: ${role}`, 'info');
|
||||
|
||||
fullResponse = await generateTextService({
|
||||
prompt,
|
||||
@@ -219,11 +218,6 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
projectRoot
|
||||
});
|
||||
|
||||
reportLog(
|
||||
'Successfully received text response via AI service',
|
||||
'success'
|
||||
);
|
||||
|
||||
// --- Stop Loading Indicator (Unchanged) ---
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
@@ -4,7 +4,11 @@ import { z } from 'zod';
|
||||
|
||||
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
|
||||
|
||||
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
||||
import {
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator,
|
||||
displayAiUsageSummary
|
||||
} from '../ui.js';
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
|
||||
@@ -182,9 +186,16 @@ function parseSubtasksFromText(
|
||||
parentTaskId,
|
||||
logger
|
||||
) {
|
||||
logger.info('Attempting to parse subtasks object from text response...');
|
||||
// Add a type check for 'text' before attempting to call .trim()
|
||||
if (typeof text !== 'string') {
|
||||
logger.error(
|
||||
`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`
|
||||
);
|
||||
throw new Error('AI response text is not a string.');
|
||||
}
|
||||
|
||||
if (!text || text.trim() === '') {
|
||||
throw new Error('AI response text is empty.');
|
||||
throw new Error('AI response text is empty after trimming.'); // Clarified error message
|
||||
}
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
@@ -196,14 +207,12 @@ function parseSubtasksFromText(
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
logger.info('Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// 2. If no code block, find first '{' and last '}' for the object
|
||||
const firstBrace = cleanedResponse.indexOf('{');
|
||||
const lastBrace = cleanedResponse.lastIndexOf('}');
|
||||
if (firstBrace !== -1 && lastBrace > firstBrace) {
|
||||
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
|
||||
logger.info('Extracted content between first { and last }.');
|
||||
} else {
|
||||
logger.warn(
|
||||
'Response does not appear to contain a JSON object structure. Parsing raw response.'
|
||||
@@ -243,9 +252,6 @@ function parseSubtasksFromText(
|
||||
}
|
||||
const parsedSubtasks = parsedObject.subtasks; // Extract the array
|
||||
|
||||
logger.info(
|
||||
`Successfully parsed ${parsedSubtasks.length} potential subtasks from the object.`
|
||||
);
|
||||
if (expectedCount && parsedSubtasks.length !== expectedCount) {
|
||||
logger.warn(
|
||||
`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`
|
||||
@@ -336,9 +342,13 @@ async function expandTask(
|
||||
context = {},
|
||||
force = false
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const { session, mcpLog, projectRoot: contextProjectRoot } = context;
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
|
||||
// Determine projectRoot: Use from context if available, otherwise derive from tasksPath
|
||||
const projectRoot =
|
||||
contextProjectRoot || path.dirname(path.dirname(tasksPath));
|
||||
|
||||
// Use mcpLog if available, otherwise use the default console log wrapper
|
||||
const logger = mcpLog || {
|
||||
info: (msg) => !isSilentMode() && log('info', msg),
|
||||
@@ -381,7 +391,6 @@ async function expandTask(
|
||||
let complexityReasoningContext = '';
|
||||
let systemPrompt; // Declare systemPrompt here
|
||||
|
||||
const projectRoot = path.dirname(path.dirname(tasksPath));
|
||||
const complexityReportPath = path.join(
|
||||
projectRoot,
|
||||
'scripts/task-complexity-report.json'
|
||||
@@ -488,28 +497,27 @@ async function expandTask(
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Generating ${finalSubtaskCount} subtasks...`
|
||||
`Generating ${finalSubtaskCount} subtasks...\n`
|
||||
);
|
||||
}
|
||||
|
||||
let responseText = '';
|
||||
let aiServiceResponse = null;
|
||||
|
||||
try {
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
logger.info(`Using AI service with role: ${role}`);
|
||||
|
||||
// Call generateTextService with the determined prompts
|
||||
responseText = await generateTextService({
|
||||
// Call generateTextService with the determined prompts and telemetry params
|
||||
aiServiceResponse = await generateTextService({
|
||||
prompt: promptContent,
|
||||
systemPrompt: systemPrompt, // Use the determined system prompt
|
||||
systemPrompt: systemPrompt,
|
||||
role,
|
||||
session,
|
||||
projectRoot
|
||||
projectRoot,
|
||||
commandName: 'expand-task',
|
||||
outputType: outputFormat
|
||||
});
|
||||
logger.info(
|
||||
'Successfully received text response from AI service',
|
||||
'success'
|
||||
);
|
||||
responseText = aiServiceResponse.mainResult;
|
||||
|
||||
// Parse Subtasks
|
||||
generatedSubtasks = parseSubtasksFromText(
|
||||
@@ -550,14 +558,23 @@ async function expandTask(
|
||||
// --- End Change: Append instead of replace ---
|
||||
|
||||
data.tasks[taskIndex] = task; // Assign the modified task back
|
||||
logger.info(`Writing updated tasks to ${tasksPath}`);
|
||||
writeJSON(tasksPath, data);
|
||||
logger.info(`Generating individual task files...`);
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
logger.info(`Task files generated.`);
|
||||
// --- End Task Update & File Writing ---
|
||||
|
||||
return task; // Return the updated task object
|
||||
// Display AI Usage Summary for CLI
|
||||
if (
|
||||
outputFormat === 'text' &&
|
||||
aiServiceResponse &&
|
||||
aiServiceResponse.telemetryData
|
||||
) {
|
||||
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||
}
|
||||
|
||||
// Return the updated task object AND telemetry data
|
||||
return {
|
||||
task,
|
||||
telemetryData: aiServiceResponse?.telemetryData
|
||||
};
|
||||
} catch (error) {
|
||||
// Catches errors from file reading, parsing, AI call etc.
|
||||
logger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');
|
||||
|
||||
@@ -19,8 +19,6 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
|
||||
// Determine if we're in MCP mode by checking for mcpLog
|
||||
const isMcpMode = !!options?.mcpLog;
|
||||
|
||||
log('info', `Preparing to regenerate task files in ${tasksPath}`);
|
||||
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
@@ -31,7 +29,7 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
|
||||
fs.mkdirSync(outputDir, { recursive: true });
|
||||
}
|
||||
|
||||
log('info', `Found ${data.tasks.length} tasks to regenerate`);
|
||||
log('info', `Preparing to regenerate ${data.tasks.length} task files`);
|
||||
|
||||
// Validate and fix dependencies before generating files
|
||||
log('info', `Validating and fixing dependencies`);
|
||||
|
||||
@@ -388,7 +388,6 @@ The changes described in the prompt should be thoughtfully applied to make the t
|
||||
try {
|
||||
// --- Call Unified AI Service (generateTextService) ---
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
report('info', `Using AI service with role: ${role}`);
|
||||
|
||||
responseText = await generateTextService({
|
||||
prompt: userPrompt,
|
||||
@@ -397,7 +396,6 @@ The changes described in the prompt should be thoughtfully applied to make the t
|
||||
session,
|
||||
projectRoot
|
||||
});
|
||||
report('success', 'Successfully received text response from AI service');
|
||||
// --- End AI Service Call ---
|
||||
} catch (error) {
|
||||
// Catch errors from generateTextService
|
||||
|
||||
@@ -361,8 +361,6 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
try {
|
||||
// --- Call Unified AI Service ---
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
if (isMCP) logFn.info(`Using AI service with role: ${role}`);
|
||||
else logFn('info', `Using AI service with role: ${role}`);
|
||||
|
||||
responseText = await generateTextService({
|
||||
prompt: userPrompt,
|
||||
@@ -371,9 +369,6 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
session,
|
||||
projectRoot
|
||||
});
|
||||
if (isMCP) logFn.info('Successfully received text response');
|
||||
else
|
||||
logFn('success', 'Successfully received text response via AI service');
|
||||
// --- End AI Service Call ---
|
||||
} catch (error) {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
@@ -84,13 +84,13 @@ For each command category, we'll need to:
|
||||
- Establish validation procedures to ensure data accuracy
|
||||
</info added on 2025-05-07T17:28:57.361Z>
|
||||
|
||||
## 2. Implement secure telemetry transmission [pending]
|
||||
## 2. Implement secure telemetry transmission [deferred]
|
||||
### Dependencies: 77.1
|
||||
### Description: Create a secure mechanism to transmit telemetry data to the external analytics endpoint
|
||||
### Details:
|
||||
Implement HTTPS POST request functionality to securely send the telemetry payload to the closed-source analytics API. Include proper encryption in transit using TLS. Implement retry logic and graceful fallback mechanisms for handling transmission failures due to connectivity issues.
|
||||
|
||||
## 3. Develop user consent and privacy notice system [pending]
|
||||
## 3. Develop user consent and privacy notice system [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create a privacy notice and explicit consent mechanism during Taskmaster setup
|
||||
### Details:
|
||||
|
||||
113
tasks/task_081.txt
Normal file
113
tasks/task_081.txt
Normal file
@@ -0,0 +1,113 @@
|
||||
# Task ID: 81
|
||||
# Title: Task #81: Implement Comprehensive Expansion Telemetry System with Aggregated Metrics
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Expand the existing telemetry system to capture additional metrics about feature usage, performance, and user behavior patterns, then implement aggregation and batching of telemetry data to minimize network overhead.
|
||||
# Details:
|
||||
This task builds upon the existing telemetry infrastructure (Tasks #77 and #80) to provide more comprehensive insights into how users interact with the application.
|
||||
|
||||
Key implementation details:
|
||||
1. Identify and implement additional telemetry data points:
|
||||
- Command execution frequency and timing metrics
|
||||
- Feature usage patterns (which commands/features are most/least used)
|
||||
- Performance metrics (execution time, memory usage, etc.)
|
||||
- Error rates and types
|
||||
- Session duration and activity patterns
|
||||
- System environment information (OS, Node version, etc.)
|
||||
|
||||
2. Implement a local telemetry aggregation system:
|
||||
- Create a local storage mechanism to temporarily hold telemetry data
|
||||
- Implement data aggregation to combine similar events and reduce payload size
|
||||
- Add batching logic to send multiple telemetry events in a single network request
|
||||
- Implement configurable batch sizes and transmission intervals
|
||||
|
||||
3. Add privacy-preserving mechanisms:
|
||||
- Ensure all personally identifiable information is properly anonymized
|
||||
- Implement data minimization principles (only collect what's necessary)
|
||||
- Add user-configurable telemetry levels (basic, enhanced, full)
|
||||
- Provide clear documentation on what data is collected and how it's used
|
||||
|
||||
4. Enhance the analytics backend integration:
|
||||
- Update the API endpoints to handle the new telemetry data types
|
||||
- Implement proper error handling and retry logic for failed transmissions
|
||||
- Add compression for telemetry payloads to reduce bandwidth usage
|
||||
|
||||
5. Add telemetry debugging capabilities:
|
||||
- Create a developer mode to view telemetry data being collected
|
||||
- Implement logging of telemetry events (when in debug mode)
|
||||
- Add command to manually trigger telemetry transmission
|
||||
|
||||
# Test Strategy:
|
||||
The testing strategy for the expanded telemetry system should be comprehensive and cover all aspects of the implementation:
|
||||
|
||||
1. Unit Tests:
|
||||
- Test each telemetry collection function in isolation
|
||||
- Verify proper anonymization of sensitive data
|
||||
- Test aggregation logic with various input scenarios
|
||||
- Validate batching mechanisms with different batch sizes
|
||||
- Test retry logic and error handling
|
||||
|
||||
2. Integration Tests:
|
||||
- Verify telemetry data is properly stored locally before transmission
|
||||
- Test the complete flow from data collection to transmission
|
||||
- Validate that the analytics backend correctly processes the new data types
|
||||
- Test different network conditions (slow, intermittent, offline)
|
||||
- Verify proper handling of transmission failures
|
||||
|
||||
3. End-to-End Tests:
|
||||
- Create automated E2E tests that perform various user actions and verify telemetry is captured
|
||||
- Test with the actual analytics backend to ensure compatibility
|
||||
- Verify that aggregated data accurately represents the performed actions
|
||||
|
||||
4. Performance Tests:
|
||||
- Measure the performance impact of the expanded telemetry system
|
||||
- Test with large volumes of telemetry data to ensure efficient handling
|
||||
- Verify memory usage remains within acceptable limits
|
||||
- Test CPU utilization during telemetry collection and transmission
|
||||
|
||||
5. Manual Testing:
|
||||
- Verify telemetry debug mode correctly displays collected data
|
||||
- Test different telemetry level configurations
|
||||
- Manually verify the accuracy of collected metrics
|
||||
|
||||
6. Privacy Compliance Testing:
|
||||
- Verify no PII is transmitted without proper anonymization
|
||||
- Test opt-out functionality works correctly
|
||||
- Ensure telemetry levels properly restrict data collection as configured
|
||||
|
||||
7. Regression Testing:
|
||||
- Verify existing functionality continues to work with the expanded telemetry
|
||||
- Ensure backward compatibility with the existing analytics backend
|
||||
|
||||
# Subtasks:
|
||||
## 1. Implement Additional Telemetry Data Collection Points [pending]
|
||||
### Dependencies: None
|
||||
### Description: Extend the telemetry system to capture new metrics including command execution frequency, feature usage patterns, performance metrics, error rates, session data, and system environment information.
|
||||
### Details:
|
||||
Create new telemetry event types and collection points throughout the codebase. Implement hooks in the command execution pipeline to track timing and frequency. Add performance monitoring for key operations using high-resolution timers. Capture system environment data at startup. Implement error tracking that records error types and frequencies. Add session tracking with start/end events and periodic heartbeats.
|
||||
|
||||
## 2. Build Local Telemetry Storage and Aggregation System [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create a local storage mechanism to temporarily hold telemetry data with aggregation capabilities to combine similar events and reduce payload size.
|
||||
### Details:
|
||||
Implement a persistent local store using SQLite or similar lightweight database. Create data schemas for different telemetry types. Develop aggregation functions that can combine similar events (e.g., multiple instances of the same command) into summary statistics. Implement data retention policies to prevent excessive storage usage. Add serialization/deserialization for telemetry objects.
|
||||
|
||||
## 3. Implement Batching and Transmission Logic [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add batching capabilities to group multiple telemetry events into single network requests with configurable batch sizes and transmission intervals.
|
||||
### Details:
|
||||
Create a background service that periodically checks for pending telemetry data. Implement configurable settings for batch size, transmission frequency, and retry behavior. Add compression for telemetry payloads using gzip or similar. Implement network-aware transmission that detects connectivity and adjusts accordingly. Add retry logic with exponential backoff for failed transmissions. Create hooks for manual transmission triggering.
|
||||
|
||||
## 4. Implement Privacy Controls and User Configuration [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add privacy-preserving mechanisms including data anonymization, minimization principles, and user-configurable telemetry levels.
|
||||
### Details:
|
||||
Create a telemetry sanitization layer that removes or hashes PII before storage. Implement three telemetry levels (basic, enhanced, full) with clear documentation of what each includes. Add user settings UI for controlling telemetry levels. Create a first-run experience that explains telemetry and requests user consent. Implement runtime filtering of telemetry events based on user settings.
|
||||
|
||||
## 5. Add Telemetry Debugging and Monitoring Capabilities [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create developer tools for debugging telemetry including a developer mode to view collected data, logging capabilities, and manual transmission controls.
|
||||
### Details:
|
||||
Implement a developer console command to toggle telemetry debug mode. Create a UI panel that displays pending and recently sent telemetry data when in debug mode. Add detailed logging of telemetry events to the application log when debugging is enabled. Create commands to manually trigger transmission or clear pending telemetry. Add telemetry health metrics to monitor system performance impact.
|
||||
|
||||
@@ -4028,7 +4028,7 @@
|
||||
1
|
||||
],
|
||||
"details": "Implement HTTPS POST request functionality to securely send the telemetry payload to the closed-source analytics API. Include proper encryption in transit using TLS. Implement retry logic and graceful fallback mechanisms for handling transmission failures due to connectivity issues.",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"testStrategy": "Test with mock endpoints to verify secure transmission and proper handling of various response scenarios"
|
||||
},
|
||||
{
|
||||
@@ -4037,7 +4037,7 @@
|
||||
"description": "Create a privacy notice and explicit consent mechanism during Taskmaster setup",
|
||||
"dependencies": [],
|
||||
"details": "Design and implement a clear privacy notice explaining what data is collected and how it's used. Create a user consent prompt during initial installation/setup that requires explicit opt-in. Store the consent status in the .taskmasterconfig file and respect this setting throughout the application.",
|
||||
"status": "pending",
|
||||
"status": "deferred",
|
||||
"testStrategy": "Test the consent flow to ensure users can opt in/out and that their preference is properly stored and respected"
|
||||
},
|
||||
{
|
||||
@@ -4139,6 +4139,63 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"subtasks": []
|
||||
},
|
||||
{
|
||||
"id": 81,
|
||||
"title": "Task #81: Implement Comprehensive Expansion Telemetry System with Aggregated Metrics",
|
||||
"description": "Expand the existing telemetry system to capture additional metrics about feature usage, performance, and user behavior patterns, then implement aggregation and batching of telemetry data to minimize network overhead.",
|
||||
"details": "This task builds upon the existing telemetry infrastructure (Tasks #77 and #80) to provide more comprehensive insights into how users interact with the application.\n\nKey implementation details:\n1. Identify and implement additional telemetry data points:\n - Command execution frequency and timing metrics\n - Feature usage patterns (which commands/features are most/least used)\n - Performance metrics (execution time, memory usage, etc.)\n - Error rates and types\n - Session duration and activity patterns\n - System environment information (OS, Node version, etc.)\n\n2. Implement a local telemetry aggregation system:\n - Create a local storage mechanism to temporarily hold telemetry data\n - Implement data aggregation to combine similar events and reduce payload size\n - Add batching logic to send multiple telemetry events in a single network request\n - Implement configurable batch sizes and transmission intervals\n\n3. Add privacy-preserving mechanisms:\n - Ensure all personally identifiable information is properly anonymized\n - Implement data minimization principles (only collect what's necessary)\n - Add user-configurable telemetry levels (basic, enhanced, full)\n - Provide clear documentation on what data is collected and how it's used\n\n4. Enhance the analytics backend integration:\n - Update the API endpoints to handle the new telemetry data types\n - Implement proper error handling and retry logic for failed transmissions\n - Add compression for telemetry payloads to reduce bandwidth usage\n\n5. Add telemetry debugging capabilities:\n - Create a developer mode to view telemetry data being collected\n - Implement logging of telemetry events (when in debug mode)\n - Add command to manually trigger telemetry transmission",
|
||||
"testStrategy": "The testing strategy for the expanded telemetry system should be comprehensive and cover all aspects of the implementation:\n\n1. Unit Tests:\n - Test each telemetry collection function in isolation\n - Verify proper anonymization of sensitive data\n - Test aggregation logic with various input scenarios\n - Validate batching mechanisms with different batch sizes\n - Test retry logic and error handling\n\n2. Integration Tests:\n - Verify telemetry data is properly stored locally before transmission\n - Test the complete flow from data collection to transmission\n - Validate that the analytics backend correctly processes the new data types\n - Test different network conditions (slow, intermittent, offline)\n - Verify proper handling of transmission failures\n\n3. End-to-End Tests:\n - Create automated E2E tests that perform various user actions and verify telemetry is captured\n - Test with the actual analytics backend to ensure compatibility\n - Verify that aggregated data accurately represents the performed actions\n\n4. Performance Tests:\n - Measure the performance impact of the expanded telemetry system\n - Test with large volumes of telemetry data to ensure efficient handling\n - Verify memory usage remains within acceptable limits\n - Test CPU utilization during telemetry collection and transmission\n\n5. Manual Testing:\n - Verify telemetry debug mode correctly displays collected data\n - Test different telemetry level configurations\n - Manually verify the accuracy of collected metrics\n\n6. Privacy Compliance Testing:\n - Verify no PII is transmitted without proper anonymization\n - Test opt-out functionality works correctly\n - Ensure telemetry levels properly restrict data collection as configured\n\n7. Regression Testing:\n - Verify existing functionality continues to work with the expanded telemetry\n - Ensure backward compatibility with the existing analytics backend",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Implement Additional Telemetry Data Collection Points",
|
||||
"description": "Extend the telemetry system to capture new metrics including command execution frequency, feature usage patterns, performance metrics, error rates, session data, and system environment information.",
|
||||
"dependencies": [],
|
||||
"details": "Create new telemetry event types and collection points throughout the codebase. Implement hooks in the command execution pipeline to track timing and frequency. Add performance monitoring for key operations using high-resolution timers. Capture system environment data at startup. Implement error tracking that records error types and frequencies. Add session tracking with start/end events and periodic heartbeats.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Create unit tests for each new telemetry point. Implement integration tests that verify telemetry is captured during normal application usage. Add mock services to verify data format correctness."
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Build Local Telemetry Storage and Aggregation System",
|
||||
"description": "Create a local storage mechanism to temporarily hold telemetry data with aggregation capabilities to combine similar events and reduce payload size.",
|
||||
"dependencies": [],
|
||||
"details": "Implement a persistent local store using SQLite or similar lightweight database. Create data schemas for different telemetry types. Develop aggregation functions that can combine similar events (e.g., multiple instances of the same command) into summary statistics. Implement data retention policies to prevent excessive storage usage. Add serialization/deserialization for telemetry objects.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Test storage persistence across application restarts. Verify aggregation logic correctly combines similar events. Benchmark storage performance with large volumes of telemetry data."
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Implement Batching and Transmission Logic",
|
||||
"description": "Add batching capabilities to group multiple telemetry events into single network requests with configurable batch sizes and transmission intervals.",
|
||||
"dependencies": [],
|
||||
"details": "Create a background service that periodically checks for pending telemetry data. Implement configurable settings for batch size, transmission frequency, and retry behavior. Add compression for telemetry payloads using gzip or similar. Implement network-aware transmission that detects connectivity and adjusts accordingly. Add retry logic with exponential backoff for failed transmissions. Create hooks for manual transmission triggering.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Test batching with various configurations. Verify compression reduces payload size. Simulate network failures to test retry logic. Measure transmission efficiency with different batch sizes."
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Implement Privacy Controls and User Configuration",
|
||||
"description": "Add privacy-preserving mechanisms including data anonymization, minimization principles, and user-configurable telemetry levels.",
|
||||
"dependencies": [],
|
||||
"details": "Create a telemetry sanitization layer that removes or hashes PII before storage. Implement three telemetry levels (basic, enhanced, full) with clear documentation of what each includes. Add user settings UI for controlling telemetry levels. Create a first-run experience that explains telemetry and requests user consent. Implement runtime filtering of telemetry events based on user settings.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Verify PII is properly anonymized in all telemetry events. Test that changing telemetry levels correctly adjusts what data is collected. Ensure user preferences persist across application updates."
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Add Telemetry Debugging and Monitoring Capabilities",
|
||||
"description": "Create developer tools for debugging telemetry including a developer mode to view collected data, logging capabilities, and manual transmission controls.",
|
||||
"dependencies": [],
|
||||
"details": "Implement a developer console command to toggle telemetry debug mode. Create a UI panel that displays pending and recently sent telemetry data when in debug mode. Add detailed logging of telemetry events to the application log when debugging is enabled. Create commands to manually trigger transmission or clear pending telemetry. Add telemetry health metrics to monitor system performance impact.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Verify debug mode correctly displays all telemetry events. Test manual transmission triggers. Ensure logging provides sufficient detail for debugging without excessive verbosity."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user