mirror of
https://github.com/eyaltoledano/claude-task-master.git
synced 2025-06-27 00:29:58 +00:00
fix: Improve CLI flag validation for single-word flags
Fix issue with kebab-case validator incorrectly flagging single-word flags like --prompt. Refactor detectCamelCaseFlags to properly handle all single-word flags. Update tests to verify correct behavior with single-word and camelCase flags. Add support for alternative flag formats in init command (e.g., -my_name). This fixes a bug where users couldn't use the --prompt flag directly and had to use -p instead.
This commit is contained in:
parent
6322a1a66f
commit
c59bd8c9fc
6
.cursorignore
Normal file
6
.cursorignore
Normal file
@ -0,0 +1,6 @@
|
||||
package-lock.json
|
||||
|
||||
# Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv)
|
||||
|
||||
node_modules/
|
||||
|
@ -30,8 +30,12 @@ program
|
||||
.version('1.0.0') // Will be replaced by prepare-package script
|
||||
.option('-y, --yes', 'Skip prompts and use default values')
|
||||
.option('-n, --name <name>', 'Project name')
|
||||
.option('-my_name <name>', 'Project name (alias for --name)')
|
||||
.option('-d, --description <description>', 'Project description')
|
||||
.option('-my_description <description>', 'Project description (alias for --description)')
|
||||
.option('-v, --version <version>', 'Project version')
|
||||
.option('-my_version <version>', 'Project version (alias for --version)')
|
||||
.option('--my_name <name>', 'Project name (alias for --name)')
|
||||
.option('-a, --author <author>', 'Author name')
|
||||
.option('--skip-install', 'Skip installing dependencies')
|
||||
.option('--dry-run', 'Show what would be done without making changes')
|
||||
@ -39,6 +43,17 @@ program
|
||||
|
||||
const options = program.opts();
|
||||
|
||||
// Map custom aliases to standard options
|
||||
if (options.my_name && !options.name) {
|
||||
options.name = options.my_name;
|
||||
}
|
||||
if (options.my_description && !options.description) {
|
||||
options.description = options.my_description;
|
||||
}
|
||||
if (options.my_version && !options.version) {
|
||||
options.version = options.my_version;
|
||||
}
|
||||
|
||||
// Define log levels
|
||||
const LOG_LEVELS = {
|
||||
debug: 0,
|
||||
|
@ -562,6 +562,29 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
});
|
||||
|
||||
// init command (documentation only, implementation is in init.js)
|
||||
programInstance
|
||||
.command('init')
|
||||
.description('Initialize a new project with Task Master structure')
|
||||
.option('-n, --name <name>', 'Project name')
|
||||
.option('-my_name <name>', 'Project name (alias for --name)')
|
||||
.option('--my_name <name>', 'Project name (alias for --name)')
|
||||
.option('-d, --description <description>', 'Project description')
|
||||
.option('-my_description <description>', 'Project description (alias for --description)')
|
||||
.option('-v, --version <version>', 'Project version')
|
||||
.option('-my_version <version>', 'Project version (alias for --version)')
|
||||
.option('-a, --author <author>', 'Author name')
|
||||
.option('-y, --yes', 'Skip prompts and use default values')
|
||||
.option('--skip-install', 'Skip installing dependencies')
|
||||
.action(() => {
|
||||
console.log(chalk.yellow('The init command must be run as a standalone command: task-master init'));
|
||||
console.log(chalk.cyan('Example usage:'));
|
||||
console.log(chalk.white(' task-master init -n "My Project" -d "Project description"'));
|
||||
console.log(chalk.white(' task-master init -my_name "My Project" -my_description "Project description"'));
|
||||
console.log(chalk.white(' task-master init -y'));
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Add more commands as needed...
|
||||
|
||||
return programInstance;
|
||||
|
@ -301,8 +301,8 @@ function detectCamelCaseFlags(args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip if it's a single word (no hyphens) or already in kebab-case
|
||||
if (!flagName.includes('-')) {
|
||||
// Skip single-word flags - they can't be camelCase
|
||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
42
tasks/task_031.txt
Normal file
42
tasks/task_031.txt
Normal file
@ -0,0 +1,42 @@
|
||||
# Task ID: 31
|
||||
# Title: Add Config Flag Support to task-master init Command
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: low
|
||||
# Description: Enhance the 'task-master init' command to accept configuration flags that allow users to bypass the interactive CLI questions and directly provide configuration values.
|
||||
# Details:
|
||||
Currently, the 'task-master init' command prompts users with a series of questions to set up the configuration. This task involves modifying the init command to accept command-line flags that can pre-populate these configuration values, allowing for a non-interactive setup process.
|
||||
|
||||
Implementation steps:
|
||||
1. Identify all configuration options that are currently collected through CLI prompts during initialization
|
||||
2. Create corresponding command-line flags for each configuration option (e.g., --project-name, --ai-provider, etc.)
|
||||
3. Modify the init command handler to check for these flags before starting the interactive prompts
|
||||
4. If a flag is provided, skip the corresponding prompt and use the provided value instead
|
||||
5. If all required configuration values are provided via flags, skip the interactive process entirely
|
||||
6. Update the command's help text to document all available flags and their usage
|
||||
7. Ensure backward compatibility so the command still works with the interactive approach when no flags are provided
|
||||
8. Consider adding a --non-interactive flag that will fail if any required configuration is missing rather than prompting for it (useful for scripts and CI/CD)
|
||||
|
||||
The implementation should follow the existing command structure and use the same configuration file format. Make sure to validate flag values with the same validation logic used for interactive inputs.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify both the interactive and non-interactive paths work correctly:
|
||||
|
||||
1. Unit tests:
|
||||
- Test each flag individually to ensure it correctly overrides the corresponding prompt
|
||||
- Test combinations of flags to ensure they work together properly
|
||||
- Test validation of flag values to ensure invalid values are rejected
|
||||
- Test the --non-interactive flag to ensure it fails when required values are missing
|
||||
|
||||
2. Integration tests:
|
||||
- Test a complete initialization with all flags provided
|
||||
- Test partial initialization with some flags and some interactive prompts
|
||||
- Test initialization with no flags (fully interactive)
|
||||
|
||||
3. Manual testing scenarios:
|
||||
- Run 'task-master init --project-name="Test Project" --ai-provider="openai"' and verify it skips those prompts
|
||||
- Run 'task-master init --help' and verify all flags are documented
|
||||
- Run 'task-master init --non-interactive' without required flags and verify it fails with a helpful error message
|
||||
- Run a complete non-interactive initialization and verify the resulting configuration file matches expectations
|
||||
|
||||
Ensure the command's documentation is updated to reflect the new functionality, and verify that the help text accurately describes all available options.
|
56
tasks/task_032.txt
Normal file
56
tasks/task_032.txt
Normal file
@ -0,0 +1,56 @@
|
||||
# Task ID: 32
|
||||
# Title: Implement 'learn' Command for Automatic Cursor Rule Generation
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Create a new 'learn' command that analyzes code changes and chat history to automatically generate or update Cursor rules in the .cursor/rules directory based on successful implementation patterns.
|
||||
# Details:
|
||||
Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns:
|
||||
|
||||
1. Create a new module `commands/learn.js` that implements the command logic
|
||||
2. Update `index.js` to register the new command
|
||||
3. The command should:
|
||||
- Accept an optional parameter for specifying which patterns to focus on
|
||||
- Use git diff to extract code changes since the last commit
|
||||
- Access the Cursor chat history if possible (investigate API or file storage location)
|
||||
- Call Claude via ai-services.js with the following context:
|
||||
* Code diffs
|
||||
* Chat history excerpts showing challenges and solutions
|
||||
* Existing rules from .cursor/rules if present
|
||||
- Parse Claude's response to extract rule definitions
|
||||
- Create or update .mdc files in the .cursor/rules directory
|
||||
- Provide a summary of what was learned and which rules were updated
|
||||
|
||||
4. Create helper functions to:
|
||||
- Extract relevant patterns from diffs
|
||||
- Format the prompt for Claude to focus on identifying reusable patterns
|
||||
- Parse Claude's response into valid rule definitions
|
||||
- Handle rule conflicts or duplications
|
||||
|
||||
5. Ensure the command handles errors gracefully, especially if chat history is inaccessible
|
||||
6. Add appropriate logging to show the learning process
|
||||
7. Document the command in the README.md file
|
||||
|
||||
# Test Strategy:
|
||||
1. Unit tests:
|
||||
- Create tests for each helper function in isolation
|
||||
- Mock git diff responses and chat history data
|
||||
- Verify rule extraction logic works with different input patterns
|
||||
- Test error handling for various failure scenarios
|
||||
|
||||
2. Integration tests:
|
||||
- Test the command in a repository with actual code changes
|
||||
- Verify it correctly generates .mdc files in the .cursor/rules directory
|
||||
- Check that generated rules follow the correct format
|
||||
- Verify the command correctly updates existing rules without losing custom modifications
|
||||
|
||||
3. Manual testing scenarios:
|
||||
- Run the command after implementing a feature with specific patterns
|
||||
- Verify the generated rules capture the intended patterns
|
||||
- Test the command with and without existing rules
|
||||
- Verify the command works when chat history is available and when it isn't
|
||||
- Test with large diffs to ensure performance remains acceptable
|
||||
|
||||
4. Validation:
|
||||
- After generating rules, use them in Cursor to verify they correctly guide future implementations
|
||||
- Have multiple team members test the command to ensure consistent results
|
@ -1651,6 +1651,16 @@
|
||||
"priority": "low",
|
||||
"details": "Currently, the 'task-master init' command prompts users with a series of questions to set up the configuration. This task involves modifying the init command to accept command-line flags that can pre-populate these configuration values, allowing for a non-interactive setup process.\n\nImplementation steps:\n1. Identify all configuration options that are currently collected through CLI prompts during initialization\n2. Create corresponding command-line flags for each configuration option (e.g., --project-name, --ai-provider, etc.)\n3. Modify the init command handler to check for these flags before starting the interactive prompts\n4. If a flag is provided, skip the corresponding prompt and use the provided value instead\n5. If all required configuration values are provided via flags, skip the interactive process entirely\n6. Update the command's help text to document all available flags and their usage\n7. Ensure backward compatibility so the command still works with the interactive approach when no flags are provided\n8. Consider adding a --non-interactive flag that will fail if any required configuration is missing rather than prompting for it (useful for scripts and CI/CD)\n\nThe implementation should follow the existing command structure and use the same configuration file format. Make sure to validate flag values with the same validation logic used for interactive inputs.",
|
||||
"testStrategy": "Testing should verify both the interactive and non-interactive paths work correctly:\n\n1. Unit tests:\n - Test each flag individually to ensure it correctly overrides the corresponding prompt\n - Test combinations of flags to ensure they work together properly\n - Test validation of flag values to ensure invalid values are rejected\n - Test the --non-interactive flag to ensure it fails when required values are missing\n\n2. Integration tests:\n - Test a complete initialization with all flags provided\n - Test partial initialization with some flags and some interactive prompts\n - Test initialization with no flags (fully interactive)\n\n3. Manual testing scenarios:\n - Run 'task-master init --project-name=\"Test Project\" --ai-provider=\"openai\"' and verify it skips those prompts\n - Run 'task-master init --help' and verify all flags are documented\n - Run 'task-master init --non-interactive' without required flags and verify it fails with a helpful error message\n - Run a complete non-interactive initialization and verify the resulting configuration file matches expectations\n\nEnsure the command's documentation is updated to reflect the new functionality, and verify that the help text accurately describes all available options."
|
||||
},
|
||||
{
|
||||
"id": 32,
|
||||
"title": "Implement 'learn' Command for Automatic Cursor Rule Generation",
|
||||
"description": "Create a new 'learn' command that analyzes code changes and chat history to automatically generate or update Cursor rules in the .cursor/rules directory based on successful implementation patterns.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns:\n\n1. Create a new module `commands/learn.js` that implements the command logic\n2. Update `index.js` to register the new command\n3. The command should:\n - Accept an optional parameter for specifying which patterns to focus on\n - Use git diff to extract code changes since the last commit\n - Access the Cursor chat history if possible (investigate API or file storage location)\n - Call Claude via ai-services.js with the following context:\n * Code diffs\n * Chat history excerpts showing challenges and solutions\n * Existing rules from .cursor/rules if present\n - Parse Claude's response to extract rule definitions\n - Create or update .mdc files in the .cursor/rules directory\n - Provide a summary of what was learned and which rules were updated\n\n4. Create helper functions to:\n - Extract relevant patterns from diffs\n - Format the prompt for Claude to focus on identifying reusable patterns\n - Parse Claude's response into valid rule definitions\n - Handle rule conflicts or duplications\n\n5. Ensure the command handles errors gracefully, especially if chat history is inaccessible\n6. Add appropriate logging to show the learning process\n7. Document the command in the README.md file",
|
||||
"testStrategy": "1. Unit tests:\n - Create tests for each helper function in isolation\n - Mock git diff responses and chat history data\n - Verify rule extraction logic works with different input patterns\n - Test error handling for various failure scenarios\n\n2. Integration tests:\n - Test the command in a repository with actual code changes\n - Verify it correctly generates .mdc files in the .cursor/rules directory\n - Check that generated rules follow the correct format\n - Verify the command correctly updates existing rules without losing custom modifications\n\n3. Manual testing scenarios:\n - Run the command after implementing a feature with specific patterns\n - Verify the generated rules capture the intended patterns\n - Test the command with and without existing rules\n - Verify the command works when chat history is available and when it isn't\n - Test with large diffs to ensure performance remains acceptable\n\n4. Validation:\n - After generating rules, use them in Cursor to verify they correctly guide future implementations\n - Have multiple team members test the command to ensure consistent results"
|
||||
}
|
||||
]
|
||||
}
|
@ -12,17 +12,19 @@ function testDetectCamelCaseFlags(args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip if it's a single word (no hyphens) or already in kebab-case
|
||||
if (!flagName.includes('-')) {
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
// Skip single-word flags - they can't be camelCase
|
||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -43,17 +43,19 @@ function testDetectCamelCaseFlags(args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip if it's a single word (no hyphens) or already in kebab-case
|
||||
if (!flagName.includes('-')) {
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
// Skip single-word flags - they can't be camelCase
|
||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -538,10 +540,15 @@ describe('CLI Flag Format Validation', () => {
|
||||
expect(flags).toHaveLength(0);
|
||||
});
|
||||
|
||||
test('detectCamelCaseFlags should not flag simple lowercase flags', () => {
|
||||
const args = ['node', 'task-master', 'add-task', '--prompt=test', '--file=tasks.json'];
|
||||
test('detectCamelCaseFlags should respect single-word flags', () => {
|
||||
const args = ['node', 'task-master', 'add-task', '--prompt=test', '--file=test.json', '--priority=high', '--promptText=test'];
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(0);
|
||||
// Should only flag promptText, not the single-word flags
|
||||
expect(flags).toHaveLength(1);
|
||||
expect(flags).toContainEqual({
|
||||
original: 'promptText',
|
||||
kebabCase: 'prompt-text'
|
||||
});
|
||||
});
|
||||
});
|
Loading…
x
Reference in New Issue
Block a user