2025-03-22 15:52:22 -04:00
|
|
|
#!/usr/bin/env node
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Claude Task Master CLI
|
|
|
|
* Main entry point for globally installed package
|
|
|
|
*/
|
|
|
|
|
|
|
|
import { fileURLToPath } from 'url';
|
|
|
|
import { dirname, resolve } from 'path';
|
|
|
|
import { createRequire } from 'module';
|
|
|
|
import { spawn } from 'child_process';
|
|
|
|
import { Command } from 'commander';
|
2025-03-24 16:30:27 -04:00
|
|
|
import { displayHelp, displayBanner } from '../scripts/modules/ui.js';
|
2025-03-22 15:52:22 -04:00
|
|
|
|
|
|
|
const __filename = fileURLToPath(import.meta.url);
|
|
|
|
const __dirname = dirname(__filename);
|
|
|
|
const require = createRequire(import.meta.url);
|
|
|
|
|
|
|
|
// Get package information
|
|
|
|
const packageJson = require('../package.json');
|
|
|
|
const version = packageJson.version;
|
|
|
|
|
|
|
|
// Get paths to script files
|
|
|
|
const devScriptPath = resolve(__dirname, '../scripts/dev.js');
|
|
|
|
const initScriptPath = resolve(__dirname, '../scripts/init.js');
|
|
|
|
|
|
|
|
// Helper function to run dev.js with arguments
|
|
|
|
function runDevScript(args) {
|
|
|
|
const child = spawn('node', [devScriptPath, ...args], {
|
|
|
|
stdio: 'inherit',
|
|
|
|
cwd: process.cwd()
|
|
|
|
});
|
|
|
|
|
|
|
|
child.on('close', (code) => {
|
|
|
|
process.exit(code);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set up the command-line interface
|
|
|
|
const program = new Command();
|
|
|
|
|
|
|
|
program
|
|
|
|
.name('task-master')
|
|
|
|
.description('Claude Task Master CLI')
|
2025-03-24 15:43:14 -04:00
|
|
|
.version(version)
|
|
|
|
.addHelpText('afterAll', () => {
|
2025-03-24 16:30:27 -04:00
|
|
|
// Use the same help display function as dev.js for consistency
|
|
|
|
displayHelp();
|
|
|
|
return ''; // Return empty string to prevent commander's default help
|
2025-03-24 15:43:14 -04:00
|
|
|
});
|
2025-03-22 15:52:22 -04:00
|
|
|
|
2025-03-24 16:30:27 -04:00
|
|
|
// Add custom help option to directly call our help display
|
|
|
|
program.helpOption('-h, --help', 'Display help information');
|
|
|
|
program.on('--help', () => {
|
|
|
|
displayHelp();
|
|
|
|
});
|
|
|
|
|
2025-03-22 15:52:22 -04:00
|
|
|
program
|
|
|
|
.command('init')
|
|
|
|
.description('Initialize a new project')
|
|
|
|
.option('-y, --yes', 'Skip prompts and use default values')
|
|
|
|
.option('-n, --name <name>', 'Project name')
|
|
|
|
.option('-d, --description <description>', 'Project description')
|
|
|
|
.option('-v, --version <version>', 'Project version')
|
|
|
|
.option('-a, --author <author>', 'Author name')
|
|
|
|
.option('--skip-install', 'Skip installing dependencies')
|
|
|
|
.option('--dry-run', 'Show what would be done without making changes')
|
|
|
|
.action((options) => {
|
|
|
|
// Pass through any options to the init script
|
|
|
|
const args = ['--yes', 'name', 'description', 'version', 'author', 'skip-install', 'dry-run']
|
|
|
|
.filter(opt => options[opt])
|
|
|
|
.map(opt => {
|
|
|
|
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
|
|
|
|
return `--${opt}`;
|
|
|
|
}
|
|
|
|
return `--${opt}=${options[opt]}`;
|
|
|
|
});
|
|
|
|
|
|
|
|
const child = spawn('node', [initScriptPath, ...args], {
|
|
|
|
stdio: 'inherit',
|
|
|
|
cwd: process.cwd()
|
|
|
|
});
|
|
|
|
|
|
|
|
child.on('close', (code) => {
|
|
|
|
process.exit(code);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('dev')
|
|
|
|
.description('Run the dev.js script')
|
|
|
|
.allowUnknownOption(true)
|
|
|
|
.action(() => {
|
|
|
|
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
// Add shortcuts for common dev.js commands
|
|
|
|
program
|
|
|
|
.command('list')
|
|
|
|
.description('List all tasks')
|
|
|
|
.option('-s, --status <status>', 'Filter by status')
|
|
|
|
.option('--with-subtasks', 'Show subtasks for each task')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['list'];
|
|
|
|
if (options.status) args.push('--status', options.status);
|
|
|
|
if (options.withSubtasks) args.push('--with-subtasks');
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('next')
|
|
|
|
.description('Show the next task to work on')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['next'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('generate')
|
|
|
|
.description('Generate task files')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('-o, --output <dir>', 'Output directory', 'tasks')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['generate'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.output) args.push('--output', options.output);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
// Add all other commands from dev.js
|
|
|
|
program
|
|
|
|
.command('parse-prd')
|
|
|
|
.description('Parse a PRD file and generate tasks')
|
2025-03-24 16:30:27 -04:00
|
|
|
.argument('[file]', 'Path to the PRD file')
|
2025-03-22 15:52:22 -04:00
|
|
|
.option('-o, --output <file>', 'Output file path', 'tasks/tasks.json')
|
|
|
|
.option('-n, --num-tasks <number>', 'Number of tasks to generate', '10')
|
|
|
|
.action((file, options) => {
|
2025-03-24 16:30:27 -04:00
|
|
|
const args = ['parse-prd'];
|
|
|
|
if (file) args.push(file);
|
2025-03-22 15:52:22 -04:00
|
|
|
if (options.output) args.push('--output', options.output);
|
|
|
|
if (options.numTasks) args.push('--num-tasks', options.numTasks);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('update')
|
|
|
|
.description('Update tasks based on new information or implementation changes')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('--from <id>', 'Task ID to start updating from', '1')
|
|
|
|
.option('-p, --prompt <text>', 'Prompt explaining the changes or new context (required)')
|
feat: Add skipped tests for task-manager and utils modules, and address potential issues
This commit introduces a comprehensive set of skipped tests to both and . These skipped tests serve as a blueprint for future test implementation, outlining the necessary test cases for currently untested functionalities.
- Ensures sync with bin/ folder by adding -r/--research to the command
- Fixes an issue that improperly parsed command line args
- Ensures confirmation card on dependency add/remove
- Properly formats some sub-task dependencies
**Potentially addressed issues:**
While primarily focused on adding test coverage, this commit also implicitly addresses potential issues by:
- **Improving error handling coverage:** The addition of skipped tests for error scenarios in functions like , , , and highlights areas where error handling needs to be robustly tested and potentially improved in the codebase.
- **Enhancing dependency validation:** Skipped tests for include validation of dependencies, prompting a review of the dependency validation logic and ensuring its correctness.
- **Standardizing test coverage:** By creating a clear roadmap for testing all functions, this commit contributes to a more standardized and complete test suite, reducing the likelihood of undiscovered bugs in the future.
**task-manager.test.js:**
- Added skipped test blocks for the following functions:
- : Includes tests for handling valid JSON responses, malformed JSON, missing tasks in responses, Perplexity AI research integration, Claude fallback, and parallel task processing.
- : Covers tests for updating tasks based on context, handling Claude streaming, Perplexity AI integration, scenarios with no tasks to update, and error handling during updates.
- : Includes tests for generating task files from , formatting dependencies with status indicators, handling tasks without subtasks, empty task arrays, and dependency validation before file generation.
- : Covers tests for updating task status, subtask status using dot notation, updating multiple tasks, automatic subtask status updates, parent task update suggestions, and handling non-existent task IDs.
- : Includes tests for updating regular and subtask statuses, handling parent tasks without subtasks, and non-existent subtask IDs.
- : Covers tests for displaying all tasks, filtering by status, displaying subtasks, showing completion statistics, identifying the next task, and handling empty task arrays.
- : Includes tests for generating subtasks, using complexity reports for subtask counts, Perplexity AI integration, appending subtasks, skipping completed tasks, and error handling during subtask generation.
- : Covers tests for expanding all pending tasks, sorting by complexity, skipping tasks with existing subtasks (unless forced), using task-specific parameters from complexity reports, handling empty task arrays, and error handling for individual tasks.
- : Includes tests for clearing subtasks from specific and multiple tasks, handling tasks without subtasks, non-existent task IDs, and regenerating task files after clearing subtasks.
- : Covers tests for adding new tasks using AI, handling Claude streaming, validating dependencies, handling malformed AI responses, and using existing task context for generation.
**utils.test.js:**
- Added skipped test blocks for the following functions:
- : Tests for logging messages according to log levels and filtering messages below configured levels.
- : Tests for reading and parsing valid JSON files, handling file not found errors, and invalid JSON formats.
- : Tests for writing JSON data to files and handling file write errors.
- : Tests for escaping double quotes in prompts and handling prompts without special characters.
- : Tests for reading and parsing complexity reports, handling missing report files, and custom report paths.
- : Tests for finding tasks in reports by ID, handling non-existent task IDs, and invalid report structures.
- : Tests for verifying existing task and subtask IDs, handling non-existent IDs, and invalid inputs.
- : Tests for formatting numeric and string task IDs and preserving dot notation for subtasks.
- : Tests for detecting simple and complex cycles in dependency graphs, handling acyclic graphs, and empty dependency maps.
These skipped tests provide a clear roadmap for future test development, ensuring comprehensive coverage for core functionalities in both modules. They document the intended behavior of each function and outline various scenarios, including happy paths, edge cases, and error conditions, thereby improving the overall test strategy and maintainability of the Task Master CLI.
2025-03-24 18:54:35 -04:00
|
|
|
.option('-r, --research', 'Use Perplexity AI for research-backed task updates')
|
2025-03-22 15:52:22 -04:00
|
|
|
.action((options) => {
|
|
|
|
const args = ['update'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.from) args.push('--from', options.from);
|
|
|
|
if (options.prompt) args.push('--prompt', options.prompt);
|
feat: Add skipped tests for task-manager and utils modules, and address potential issues
This commit introduces a comprehensive set of skipped tests to both and . These skipped tests serve as a blueprint for future test implementation, outlining the necessary test cases for currently untested functionalities.
- Ensures sync with bin/ folder by adding -r/--research to the command
- Fixes an issue that improperly parsed command line args
- Ensures confirmation card on dependency add/remove
- Properly formats some sub-task dependencies
**Potentially addressed issues:**
While primarily focused on adding test coverage, this commit also implicitly addresses potential issues by:
- **Improving error handling coverage:** The addition of skipped tests for error scenarios in functions like , , , and highlights areas where error handling needs to be robustly tested and potentially improved in the codebase.
- **Enhancing dependency validation:** Skipped tests for include validation of dependencies, prompting a review of the dependency validation logic and ensuring its correctness.
- **Standardizing test coverage:** By creating a clear roadmap for testing all functions, this commit contributes to a more standardized and complete test suite, reducing the likelihood of undiscovered bugs in the future.
**task-manager.test.js:**
- Added skipped test blocks for the following functions:
- : Includes tests for handling valid JSON responses, malformed JSON, missing tasks in responses, Perplexity AI research integration, Claude fallback, and parallel task processing.
- : Covers tests for updating tasks based on context, handling Claude streaming, Perplexity AI integration, scenarios with no tasks to update, and error handling during updates.
- : Includes tests for generating task files from , formatting dependencies with status indicators, handling tasks without subtasks, empty task arrays, and dependency validation before file generation.
- : Covers tests for updating task status, subtask status using dot notation, updating multiple tasks, automatic subtask status updates, parent task update suggestions, and handling non-existent task IDs.
- : Includes tests for updating regular and subtask statuses, handling parent tasks without subtasks, and non-existent subtask IDs.
- : Covers tests for displaying all tasks, filtering by status, displaying subtasks, showing completion statistics, identifying the next task, and handling empty task arrays.
- : Includes tests for generating subtasks, using complexity reports for subtask counts, Perplexity AI integration, appending subtasks, skipping completed tasks, and error handling during subtask generation.
- : Covers tests for expanding all pending tasks, sorting by complexity, skipping tasks with existing subtasks (unless forced), using task-specific parameters from complexity reports, handling empty task arrays, and error handling for individual tasks.
- : Includes tests for clearing subtasks from specific and multiple tasks, handling tasks without subtasks, non-existent task IDs, and regenerating task files after clearing subtasks.
- : Covers tests for adding new tasks using AI, handling Claude streaming, validating dependencies, handling malformed AI responses, and using existing task context for generation.
**utils.test.js:**
- Added skipped test blocks for the following functions:
- : Tests for logging messages according to log levels and filtering messages below configured levels.
- : Tests for reading and parsing valid JSON files, handling file not found errors, and invalid JSON formats.
- : Tests for writing JSON data to files and handling file write errors.
- : Tests for escaping double quotes in prompts and handling prompts without special characters.
- : Tests for reading and parsing complexity reports, handling missing report files, and custom report paths.
- : Tests for finding tasks in reports by ID, handling non-existent task IDs, and invalid report structures.
- : Tests for verifying existing task and subtask IDs, handling non-existent IDs, and invalid inputs.
- : Tests for formatting numeric and string task IDs and preserving dot notation for subtasks.
- : Tests for detecting simple and complex cycles in dependency graphs, handling acyclic graphs, and empty dependency maps.
These skipped tests provide a clear roadmap for future test development, ensuring comprehensive coverage for core functionalities in both modules. They document the intended behavior of each function and outline various scenarios, including happy paths, edge cases, and error conditions, thereby improving the overall test strategy and maintainability of the Task Master CLI.
2025-03-24 18:54:35 -04:00
|
|
|
if (options.research) args.push('--research');
|
2025-03-22 15:52:22 -04:00
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('set-status')
|
|
|
|
.description('Set the status of a task')
|
|
|
|
.option('-i, --id <id>', 'Task ID (can be comma-separated for multiple tasks)')
|
|
|
|
.option('-s, --status <status>', 'New status (todo, in-progress, review, done)')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['set-status'];
|
|
|
|
if (options.id) args.push('--id', options.id);
|
|
|
|
if (options.status) args.push('--status', options.status);
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('expand')
|
2025-03-24 15:43:14 -04:00
|
|
|
.description('Break down tasks into detailed subtasks')
|
2025-03-22 15:52:22 -04:00
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('-i, --id <id>', 'Task ID to expand')
|
|
|
|
.option('-a, --all', 'Expand all tasks')
|
|
|
|
.option('-n, --num <number>', 'Number of subtasks to generate')
|
2025-03-24 15:43:14 -04:00
|
|
|
.option('--research', 'Enable Perplexity AI for research-backed subtask generation')
|
2025-03-22 15:52:22 -04:00
|
|
|
.option('-p, --prompt <text>', 'Additional context to guide subtask generation')
|
|
|
|
.option('--force', 'Force regeneration of subtasks for tasks that already have them')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['expand'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.id) args.push('--id', options.id);
|
|
|
|
if (options.all) args.push('--all');
|
|
|
|
if (options.num) args.push('--num', options.num);
|
2025-03-24 15:43:14 -04:00
|
|
|
if (options.research) args.push('--research');
|
2025-03-22 15:52:22 -04:00
|
|
|
if (options.prompt) args.push('--prompt', options.prompt);
|
|
|
|
if (options.force) args.push('--force');
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('analyze-complexity')
|
|
|
|
.description('Analyze tasks and generate complexity-based expansion recommendations')
|
|
|
|
.option('-o, --output <file>', 'Output file path for the report', 'scripts/task-complexity-report.json')
|
|
|
|
.option('-m, --model <model>', 'LLM model to use for analysis')
|
|
|
|
.option('-t, --threshold <number>', 'Minimum complexity score to recommend expansion (1-10)', '5')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('-r, --research', 'Use Perplexity AI for research-backed complexity analysis')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['analyze-complexity'];
|
|
|
|
if (options.output) args.push('--output', options.output);
|
|
|
|
if (options.model) args.push('--model', options.model);
|
|
|
|
if (options.threshold) args.push('--threshold', options.threshold);
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.research) args.push('--research');
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('clear-subtasks')
|
|
|
|
.description('Clear subtasks from specified tasks')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('-i, --id <ids>', 'Task IDs (comma-separated) to clear subtasks from')
|
|
|
|
.option('--all', 'Clear subtasks from all tasks')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['clear-subtasks'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.id) args.push('--id', options.id);
|
|
|
|
if (options.all) args.push('--all');
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('add-task')
|
|
|
|
.description('Add a new task to tasks.json using AI')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('-p, --prompt <text>', 'Description of the task to add (required)')
|
|
|
|
.option('-d, --dependencies <ids>', 'Comma-separated list of task IDs this task depends on')
|
|
|
|
.option('--priority <priority>', 'Task priority (high, medium, low)', 'medium')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['add-task'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.prompt) args.push('--prompt', options.prompt);
|
|
|
|
if (options.dependencies) args.push('--dependencies', options.dependencies);
|
|
|
|
if (options.priority) args.push('--priority', options.priority);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('show')
|
2025-03-24 16:30:27 -04:00
|
|
|
.description('Display detailed information about a specific task')
|
2025-03-22 15:52:22 -04:00
|
|
|
.argument('[id]', 'Task ID to show')
|
|
|
|
.option('-i, --id <id>', 'Task ID to show (alternative to argument)')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.action((id, options) => {
|
|
|
|
const args = ['show'];
|
|
|
|
if (id) args.push(id);
|
|
|
|
else if (options.id) args.push('--id', options.id);
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('add-dependency')
|
|
|
|
.description('Add a dependency to a task')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('-i, --id <id>', 'ID of the task to add dependency to')
|
|
|
|
.option('-d, --depends-on <id>', 'ID of the task to add as dependency')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['add-dependency'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.id) args.push('--id', options.id);
|
|
|
|
if (options.dependsOn) args.push('--depends-on', options.dependsOn);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('remove-dependency')
|
|
|
|
.description('Remove a dependency from a task')
|
|
|
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
|
|
|
.option('-i, --id <id>', 'ID of the task to remove dependency from')
|
|
|
|
.option('-d, --depends-on <id>', 'ID of the task to remove as dependency')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['remove-dependency'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
if (options.id) args.push('--id', options.id);
|
|
|
|
if (options.dependsOn) args.push('--depends-on', options.dependsOn);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('validate-dependencies')
|
|
|
|
.description('Check for and identify invalid dependencies in tasks')
|
|
|
|
.option('-f, --file <path>', 'Path to the tasks.json file', 'tasks/tasks.json')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['validate-dependencies'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('fix-dependencies')
|
|
|
|
.description('Find and fix all invalid dependencies in tasks.json and task files')
|
|
|
|
.option('-f, --file <path>', 'Path to the tasks.json file', 'tasks/tasks.json')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['fix-dependencies'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
|
|
|
program
|
|
|
|
.command('complexity-report')
|
|
|
|
.description('Display the complexity analysis report')
|
|
|
|
.option('-f, --file <path>', 'Path to the complexity report file', 'scripts/task-complexity-report.json')
|
|
|
|
.action((options) => {
|
|
|
|
const args = ['complexity-report'];
|
|
|
|
if (options.file) args.push('--file', options.file);
|
|
|
|
runDevScript(args);
|
|
|
|
});
|
|
|
|
|
feat: Add skipped tests for task-manager and utils modules, and address potential issues
This commit introduces a comprehensive set of skipped tests to both and . These skipped tests serve as a blueprint for future test implementation, outlining the necessary test cases for currently untested functionalities.
- Ensures sync with bin/ folder by adding -r/--research to the command
- Fixes an issue that improperly parsed command line args
- Ensures confirmation card on dependency add/remove
- Properly formats some sub-task dependencies
**Potentially addressed issues:**
While primarily focused on adding test coverage, this commit also implicitly addresses potential issues by:
- **Improving error handling coverage:** The addition of skipped tests for error scenarios in functions like , , , and highlights areas where error handling needs to be robustly tested and potentially improved in the codebase.
- **Enhancing dependency validation:** Skipped tests for include validation of dependencies, prompting a review of the dependency validation logic and ensuring its correctness.
- **Standardizing test coverage:** By creating a clear roadmap for testing all functions, this commit contributes to a more standardized and complete test suite, reducing the likelihood of undiscovered bugs in the future.
**task-manager.test.js:**
- Added skipped test blocks for the following functions:
- : Includes tests for handling valid JSON responses, malformed JSON, missing tasks in responses, Perplexity AI research integration, Claude fallback, and parallel task processing.
- : Covers tests for updating tasks based on context, handling Claude streaming, Perplexity AI integration, scenarios with no tasks to update, and error handling during updates.
- : Includes tests for generating task files from , formatting dependencies with status indicators, handling tasks without subtasks, empty task arrays, and dependency validation before file generation.
- : Covers tests for updating task status, subtask status using dot notation, updating multiple tasks, automatic subtask status updates, parent task update suggestions, and handling non-existent task IDs.
- : Includes tests for updating regular and subtask statuses, handling parent tasks without subtasks, and non-existent subtask IDs.
- : Covers tests for displaying all tasks, filtering by status, displaying subtasks, showing completion statistics, identifying the next task, and handling empty task arrays.
- : Includes tests for generating subtasks, using complexity reports for subtask counts, Perplexity AI integration, appending subtasks, skipping completed tasks, and error handling during subtask generation.
- : Covers tests for expanding all pending tasks, sorting by complexity, skipping tasks with existing subtasks (unless forced), using task-specific parameters from complexity reports, handling empty task arrays, and error handling for individual tasks.
- : Includes tests for clearing subtasks from specific and multiple tasks, handling tasks without subtasks, non-existent task IDs, and regenerating task files after clearing subtasks.
- : Covers tests for adding new tasks using AI, handling Claude streaming, validating dependencies, handling malformed AI responses, and using existing task context for generation.
**utils.test.js:**
- Added skipped test blocks for the following functions:
- : Tests for logging messages according to log levels and filtering messages below configured levels.
- : Tests for reading and parsing valid JSON files, handling file not found errors, and invalid JSON formats.
- : Tests for writing JSON data to files and handling file write errors.
- : Tests for escaping double quotes in prompts and handling prompts without special characters.
- : Tests for reading and parsing complexity reports, handling missing report files, and custom report paths.
- : Tests for finding tasks in reports by ID, handling non-existent task IDs, and invalid report structures.
- : Tests for verifying existing task and subtask IDs, handling non-existent IDs, and invalid inputs.
- : Tests for formatting numeric and string task IDs and preserving dot notation for subtasks.
- : Tests for detecting simple and complex cycles in dependency graphs, handling acyclic graphs, and empty dependency maps.
These skipped tests provide a clear roadmap for future test development, ensuring comprehensive coverage for core functionalities in both modules. They document the intended behavior of each function and outline various scenarios, including happy paths, edge cases, and error conditions, thereby improving the overall test strategy and maintainability of the Task Master CLI.
2025-03-24 18:54:35 -04:00
|
|
|
// Parse the command line arguments
|
2025-03-24 16:30:27 -04:00
|
|
|
program.parse(process.argv);
|
|
|
|
|
|
|
|
// Show help if no command was provided (just 'task-master' with no args)
|
|
|
|
if (process.argv.length <= 2) {
|
|
|
|
displayBanner();
|
|
|
|
displayHelp();
|
|
|
|
process.exit(0);
|
|
|
|
}
|