2025-06-20 16:12:40 +03:00
/ * *
* Tests for the expand - task . js module
* /
import { jest } from '@jest/globals' ;
import fs from 'fs' ;
2025-07-02 12:52:45 +02:00
import {
createGetTagAwareFilePathMock ,
createSlugifyTagForFilePathMock
} from './setup.js' ;
2025-06-20 16:12:40 +03:00
// Mock the dependencies before importing the module under test
jest . unstable _mockModule ( '../../../../../scripts/modules/utils.js' , ( ) => ( {
readJSON : jest . fn ( ) ,
writeJSON : jest . fn ( ) ,
log : jest . fn ( ) ,
CONFIG : {
model : 'mock-claude-model' ,
maxTokens : 4000 ,
temperature : 0.7 ,
debug : false
} ,
sanitizePrompt : jest . fn ( ( prompt ) => prompt ) ,
truncate : jest . fn ( ( text ) => text ) ,
isSilentMode : jest . fn ( ( ) => false ) ,
findTaskById : jest . fn ( ) ,
findProjectRoot : jest . fn ( ( tasksPath ) => '/mock/project/root' ) ,
getCurrentTag : jest . fn ( ( ) => 'master' ) ,
ensureTagMetadata : jest . fn ( ( tagObj ) => tagObj ) ,
flattenTasksWithSubtasks : jest . fn ( ( tasks ) => {
const allTasks = [ ] ;
const queue = [ ... ( tasks || [ ] ) ] ;
while ( queue . length > 0 ) {
const task = queue . shift ( ) ;
allTasks . push ( task ) ;
if ( task . subtasks ) {
for ( const subtask of task . subtasks ) {
queue . push ( { ... subtask , id : ` ${ task . id } . ${ subtask . id } ` } ) ;
}
}
}
return allTasks ;
} ) ,
2025-07-02 12:52:45 +02:00
getTagAwareFilePath : createGetTagAwareFilePathMock ( ) ,
slugifyTagForFilePath : createSlugifyTagForFilePathMock ( ) ,
2025-06-20 16:12:40 +03:00
readComplexityReport : jest . fn ( ) ,
markMigrationForNotice : jest . fn ( ) ,
performCompleteTagMigration : jest . fn ( ) ,
setTasksForTag : jest . fn ( ) ,
getTasksForTag : jest . fn ( ( data , tag ) => data [ tag ] ? . tasks || [ ] )
} ) ) ;
jest . unstable _mockModule ( '../../../../../scripts/modules/ui.js' , ( ) => ( {
displayBanner : jest . fn ( ) ,
getStatusWithColor : jest . fn ( ( status ) => status ) ,
startLoadingIndicator : jest . fn ( ) ,
stopLoadingIndicator : jest . fn ( ) ,
succeedLoadingIndicator : jest . fn ( ) ,
failLoadingIndicator : jest . fn ( ) ,
warnLoadingIndicator : jest . fn ( ) ,
infoLoadingIndicator : jest . fn ( ) ,
displayAiUsageSummary : jest . fn ( ) ,
displayContextAnalysis : jest . fn ( )
} ) ) ;
jest . unstable _mockModule (
'../../../../../scripts/modules/ai-services-unified.js' ,
( ) => ( {
generateTextService : jest . fn ( ) . mockResolvedValue ( {
mainResult : JSON . stringify ( {
subtasks : [
{
id : 1 ,
title : 'Set up project structure' ,
description :
'Create the basic project directory structure and configuration files' ,
dependencies : [ ] ,
details :
'Initialize package.json, create src/ and test/ directories, set up linting configuration' ,
status : 'pending' ,
testStrategy :
'Verify all expected files and directories are created'
} ,
{
id : 2 ,
title : 'Implement core functionality' ,
description : 'Develop the main application logic and core features' ,
dependencies : [ 1 ] ,
details :
'Create main classes, implement business logic, set up data models' ,
status : 'pending' ,
testStrategy : 'Unit tests for all core functions and classes'
} ,
{
id : 3 ,
title : 'Add user interface' ,
description : 'Create the user interface components and layouts' ,
dependencies : [ 2 ] ,
details :
'Design UI components, implement responsive layouts, add user interactions' ,
status : 'pending' ,
testStrategy : 'UI tests and visual regression testing'
}
]
} ) ,
telemetryData : {
timestamp : new Date ( ) . toISOString ( ) ,
userId : '1234567890' ,
commandName : 'expand-task' ,
modelUsed : 'claude-3-5-sonnet' ,
providerName : 'anthropic' ,
inputTokens : 1000 ,
outputTokens : 500 ,
totalTokens : 1500 ,
totalCost : 0.012414 ,
currency : 'USD'
}
} )
} )
) ;
jest . unstable _mockModule (
'../../../../../scripts/modules/config-manager.js' ,
( ) => ( {
getDefaultSubtasks : jest . fn ( ( ) => 3 ) ,
2025-07-03 06:12:27 +10:00
getDebugFlag : jest . fn ( ( ) => false ) ,
2025-08-06 13:41:19 +03:00
getDefaultNumTasks : jest . fn ( ( ) => 10 ) ,
getMainProvider : jest . fn ( ( ) => 'openai' ) ,
getResearchProvider : jest . fn ( ( ) => 'perplexity' )
2025-06-20 16:12:40 +03:00
} )
) ;
jest . unstable _mockModule (
'../../../../../scripts/modules/utils/contextGatherer.js' ,
( ) => ( {
ContextGatherer : jest . fn ( ) . mockImplementation ( ( ) => ( {
2025-07-10 21:46:28 -06:00
gather : jest . fn ( ) . mockResolvedValue ( {
context : 'Mock project context from files'
} )
} ) )
} )
) ;
jest . unstable _mockModule (
'../../../../../scripts/modules/utils/fuzzyTaskSearch.js' ,
( ) => ( {
FuzzyTaskSearch : jest . fn ( ) . mockImplementation ( ( ) => ( {
findRelevantTasks : jest . fn ( ) . mockReturnValue ( [ ] ) ,
getTaskIds : jest . fn ( ) . mockReturnValue ( [ ] )
2025-06-20 16:12:40 +03:00
} ) )
} )
) ;
jest . unstable _mockModule (
'../../../../../scripts/modules/task-manager/generate-task-files.js' ,
( ) => ( {
default : jest . fn ( ) . mockResolvedValue ( )
} )
) ;
2025-07-10 03:52:11 -04:00
jest . unstable _mockModule (
'../../../../../scripts/modules/prompt-manager.js' ,
( ) => ( {
getPromptManager : jest . fn ( ) . mockReturnValue ( {
loadPrompt : jest . fn ( ) . mockResolvedValue ( {
systemPrompt : 'Mocked system prompt' ,
userPrompt : 'Mocked user prompt'
} )
} )
} )
) ;
2025-06-20 16:12:40 +03:00
// Mock external UI libraries
jest . unstable _mockModule ( 'chalk' , ( ) => ( {
default : {
white : { bold : jest . fn ( ( text ) => text ) } ,
cyan : Object . assign (
jest . fn ( ( text ) => text ) ,
{
bold : jest . fn ( ( text ) => text )
}
) ,
green : jest . fn ( ( text ) => text ) ,
yellow : jest . fn ( ( text ) => text ) ,
bold : jest . fn ( ( text ) => text )
}
} ) ) ;
jest . unstable _mockModule ( 'boxen' , ( ) => ( {
default : jest . fn ( ( text ) => text )
} ) ) ;
jest . unstable _mockModule ( 'cli-table3' , ( ) => ( {
default : jest . fn ( ) . mockImplementation ( ( ) => ( {
push : jest . fn ( ) ,
toString : jest . fn ( ( ) => 'mocked table' )
} ) )
} ) ) ;
// Mock process.exit to prevent Jest worker crashes
const mockExit = jest . spyOn ( process , 'exit' ) . mockImplementation ( ( code ) => {
throw new Error ( ` process.exit called with " ${ code } " ` ) ;
} ) ;
// Import the mocked modules
const {
readJSON ,
writeJSON ,
log ,
findTaskById ,
ensureTagMetadata ,
readComplexityReport ,
findProjectRoot
} = await import ( '../../../../../scripts/modules/utils.js' ) ;
const { generateTextService } = await import (
'../../../../../scripts/modules/ai-services-unified.js'
) ;
const generateTaskFiles = (
await import (
'../../../../../scripts/modules/task-manager/generate-task-files.js'
)
) . default ;
2025-07-03 06:12:27 +10:00
const { getDefaultSubtasks } = await import (
'../../../../../scripts/modules/config-manager.js'
) ;
2025-06-20 16:12:40 +03:00
// Import the module under test
const { default : expandTask } = await import (
'../../../../../scripts/modules/task-manager/expand-task.js'
) ;
describe ( 'expandTask' , ( ) => {
const sampleTasks = {
master : {
tasks : [
{
id : 1 ,
title : 'Task 1' ,
description : 'First task' ,
status : 'done' ,
dependencies : [ ] ,
details : 'Already completed task' ,
subtasks : [ ]
} ,
{
id : 2 ,
title : 'Task 2' ,
description : 'Second task' ,
status : 'pending' ,
dependencies : [ ] ,
details : 'Task ready for expansion' ,
subtasks : [ ]
} ,
{
id : 3 ,
title : 'Complex Task' ,
description : 'A complex task that needs breakdown' ,
status : 'pending' ,
dependencies : [ 1 ] ,
details : 'This task involves multiple steps' ,
subtasks : [ ]
} ,
{
id : 4 ,
title : 'Task with existing subtasks' ,
description : 'Task that already has subtasks' ,
status : 'pending' ,
dependencies : [ ] ,
details : 'Has existing subtasks' ,
subtasks : [
{
id : 1 ,
title : 'Existing subtask' ,
description : 'Already exists' ,
status : 'pending' ,
dependencies : [ ]
}
]
}
]
} ,
'feature-branch' : {
tasks : [
{
id : 1 ,
title : 'Feature Task 1' ,
description : 'Task in feature branch' ,
status : 'pending' ,
dependencies : [ ] ,
details : 'Feature-specific task' ,
subtasks : [ ]
}
]
}
} ;
// Create a helper function for consistent mcpLog mock
const createMcpLogMock = ( ) => ( {
info : jest . fn ( ) ,
warn : jest . fn ( ) ,
error : jest . fn ( ) ,
debug : jest . fn ( ) ,
success : jest . fn ( )
} ) ;
beforeEach ( ( ) => {
jest . clearAllMocks ( ) ;
mockExit . mockClear ( ) ;
// Default readJSON implementation - returns tagged structure
readJSON . mockImplementation ( ( tasksPath , projectRoot , tag ) => {
const sampleTasksCopy = JSON . parse ( JSON . stringify ( sampleTasks ) ) ;
const selectedTag = tag || 'master' ;
return {
... sampleTasksCopy [ selectedTag ] ,
tag : selectedTag ,
_rawTaggedData : sampleTasksCopy
} ;
} ) ;
// Default findTaskById implementation
findTaskById . mockImplementation ( ( tasks , taskId ) => {
const id = parseInt ( taskId , 10 ) ;
return tasks . find ( ( t ) => t . id === id ) ;
} ) ;
// Default complexity report (no report available)
readComplexityReport . mockReturnValue ( null ) ;
// Mock findProjectRoot to return consistent path for complexity report
findProjectRoot . mockReturnValue ( '/mock/project/root' ) ;
writeJSON . mockResolvedValue ( ) ;
generateTaskFiles . mockResolvedValue ( ) ;
log . mockImplementation ( ( ) => { } ) ;
// Mock console.log to avoid output during tests
jest . spyOn ( console , 'log' ) . mockImplementation ( ( ) => { } ) ;
} ) ;
afterEach ( ( ) => {
console . log . mockRestore ( ) ;
} ) ;
describe ( 'Basic Functionality' , ( ) => {
test ( 'should expand a task with AI-generated subtasks' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const numSubtasks = 3 ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
const result = await expandTask (
tasksPath ,
taskId ,
numSubtasks ,
false ,
'' ,
context ,
false
) ;
// Assert
expect ( readJSON ) . toHaveBeenCalledWith (
tasksPath ,
'/mock/project/root' ,
undefined
) ;
expect ( generateTextService ) . toHaveBeenCalledWith ( expect . any ( Object ) ) ;
expect ( writeJSON ) . toHaveBeenCalledWith (
tasksPath ,
expect . objectContaining ( {
tasks : expect . arrayContaining ( [
expect . objectContaining ( {
id : 2 ,
subtasks : expect . arrayContaining ( [
expect . objectContaining ( {
id : 1 ,
title : 'Set up project structure' ,
status : 'pending'
} ) ,
expect . objectContaining ( {
id : 2 ,
title : 'Implement core functionality' ,
status : 'pending'
} ) ,
expect . objectContaining ( {
id : 3 ,
title : 'Add user interface' ,
status : 'pending'
} )
] )
} )
] ) ,
tag : 'master' ,
_rawTaggedData : expect . objectContaining ( {
master : expect . objectContaining ( {
tasks : expect . any ( Array )
} )
} )
} ) ,
'/mock/project/root' ,
undefined
) ;
expect ( result ) . toEqual (
expect . objectContaining ( {
task : expect . objectContaining ( {
id : 2 ,
subtasks : expect . arrayContaining ( [
expect . objectContaining ( {
id : 1 ,
title : 'Set up project structure' ,
status : 'pending'
} ) ,
expect . objectContaining ( {
id : 2 ,
title : 'Implement core functionality' ,
status : 'pending'
} ) ,
expect . objectContaining ( {
id : 3 ,
title : 'Add user interface' ,
status : 'pending'
} )
] )
} ) ,
telemetryData : expect . any ( Object )
} )
) ;
} ) ;
test ( 'should handle research flag correctly' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const numSubtasks = 3 ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
await expandTask (
tasksPath ,
taskId ,
numSubtasks ,
true , // useResearch = true
'Additional context for research' ,
context ,
false
) ;
// Assert
expect ( generateTextService ) . toHaveBeenCalledWith (
expect . objectContaining ( {
role : 'research' ,
commandName : expect . any ( String )
} )
) ;
} ) ;
test ( 'should handle complexity report integration without errors' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act & Assert - Should complete without errors
const result = await expandTask (
tasksPath ,
taskId ,
undefined , // numSubtasks not specified
false ,
'' ,
context ,
false
) ;
// Assert - Should successfully expand and return expected structure
expect ( result ) . toEqual (
expect . objectContaining ( {
task : expect . objectContaining ( {
id : 2 ,
subtasks : expect . any ( Array )
} ) ,
telemetryData : expect . any ( Object )
} )
) ;
expect ( generateTextService ) . toHaveBeenCalled ( ) ;
} ) ;
} ) ;
describe ( 'Tag Handling (The Critical Bug Fix)' , ( ) => {
test ( 'should preserve tagged structure when expanding with default tag' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root' ,
tag : 'master' // Explicit tag context
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - CRITICAL: Check tag is passed to readJSON and writeJSON
expect ( readJSON ) . toHaveBeenCalledWith (
tasksPath ,
'/mock/project/root' ,
'master'
) ;
expect ( writeJSON ) . toHaveBeenCalledWith (
tasksPath ,
expect . objectContaining ( {
tag : 'master' ,
_rawTaggedData : expect . objectContaining ( {
master : expect . any ( Object ) ,
'feature-branch' : expect . any ( Object )
} )
} ) ,
'/mock/project/root' ,
'master' // CRITICAL: Tag must be passed to writeJSON
) ;
} ) ;
test ( 'should preserve tagged structure when expanding with non-default tag' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '1' ; // Task in feature-branch
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root' ,
tag : 'feature-branch' // Different tag context
} ;
// Configure readJSON to return feature-branch data
readJSON . mockImplementation ( ( tasksPath , projectRoot , tag ) => {
const sampleTasksCopy = JSON . parse ( JSON . stringify ( sampleTasks ) ) ;
return {
... sampleTasksCopy [ 'feature-branch' ] ,
tag : 'feature-branch' ,
_rawTaggedData : sampleTasksCopy
} ;
} ) ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - CRITICAL: Check tag preservation for non-default tag
expect ( readJSON ) . toHaveBeenCalledWith (
tasksPath ,
'/mock/project/root' ,
'feature-branch'
) ;
expect ( writeJSON ) . toHaveBeenCalledWith (
tasksPath ,
expect . objectContaining ( {
tag : 'feature-branch' ,
_rawTaggedData : expect . objectContaining ( {
master : expect . any ( Object ) ,
'feature-branch' : expect . any ( Object )
} )
} ) ,
'/mock/project/root' ,
'feature-branch' // CRITICAL: Correct tag passed to writeJSON
) ;
} ) ;
test ( 'should NOT corrupt tagged structure when tag is undefined' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
// No tag specified - should default gracefully
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - Should still preserve structure with undefined tag
expect ( readJSON ) . toHaveBeenCalledWith (
tasksPath ,
'/mock/project/root' ,
undefined
) ;
expect ( writeJSON ) . toHaveBeenCalledWith (
tasksPath ,
expect . objectContaining ( {
_rawTaggedData : expect . objectContaining ( {
master : expect . any ( Object )
} )
} ) ,
'/mock/project/root' ,
undefined
) ;
// CRITICAL: Verify structure is NOT flattened to old format
const writeCallArgs = writeJSON . mock . calls [ 0 ] [ 1 ] ;
expect ( writeCallArgs ) . toHaveProperty ( 'tasks' ) ; // Should have tasks property from readJSON mock
expect ( writeCallArgs ) . toHaveProperty ( '_rawTaggedData' ) ; // Should preserve tagged structure
} ) ;
} ) ;
describe ( 'Force Flag Handling' , ( ) => {
test ( 'should replace existing subtasks when force=true' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '4' ; // Task with existing subtasks
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , true ) ;
// Assert - Should replace existing subtasks
expect ( writeJSON ) . toHaveBeenCalledWith (
tasksPath ,
expect . objectContaining ( {
tasks : expect . arrayContaining ( [
expect . objectContaining ( {
id : 4 ,
subtasks : expect . arrayContaining ( [
expect . objectContaining ( {
id : 1 ,
title : 'Set up project structure'
} )
] )
} )
] )
} ) ,
'/mock/project/root' ,
undefined
) ;
} ) ;
test ( 'should append to existing subtasks when force=false' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '4' ; // Task with existing subtasks
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - Should append to existing subtasks with proper ID increments
expect ( writeJSON ) . toHaveBeenCalledWith (
tasksPath ,
expect . objectContaining ( {
tasks : expect . arrayContaining ( [
expect . objectContaining ( {
id : 4 ,
subtasks : expect . arrayContaining ( [
// Should contain both existing and new subtasks
expect . any ( Object ) ,
expect . any ( Object ) ,
expect . any ( Object ) ,
expect . any ( Object ) // 1 existing + 3 new = 4 total
] )
} )
] )
} ) ,
'/mock/project/root' ,
undefined
) ;
} ) ;
} ) ;
2025-07-02 12:52:45 +02:00
describe ( 'Complexity Report Integration (Tag-Specific)' , ( ) => {
test ( 'should use tag-specific complexity report when available' , async ( ) => {
// Arrange
2025-07-10 03:52:11 -04:00
const { getPromptManager } = await import (
'../../../../../scripts/modules/prompt-manager.js'
) ;
const mockLoadPrompt = jest . fn ( ) . mockResolvedValue ( {
systemPrompt : 'Generate exactly 5 subtasks for complexity report' ,
userPrompt :
'Please break this task into 5 parts\n\nUser provided context'
} ) ;
getPromptManager . mockReturnValue ( {
loadPrompt : mockLoadPrompt
} ) ;
2025-07-02 12:52:45 +02:00
const tasksPath = 'tasks/tasks.json' ;
const taskId = '1' ; // Task in feature-branch
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root' ,
fix(core): Implement Boundary-First Tag Resolution (#943)
* refactor(context): Standardize tag and projectRoot handling across all task tools
This commit unifies context management by adopting a boundary-first resolution strategy. All task-scoped tools now resolve `tag` and `projectRoot` at their entry point and forward these values to the underlying direct functions.
This approach centralizes context logic, ensuring consistent behavior and enhanced flexibility in multi-tag environments.
* fix(tag): Clean up tag handling in task functions and sync process
This commit refines the handling of the `tag` parameter across multiple functions, ensuring consistent context management. The `tag` is now passed more efficiently in `listTasksDirect`, `setTaskStatusDirect`, and `syncTasksToReadme`, improving clarity and reducing redundancy. Additionally, a TODO comment has been added in `sync-readme.js` to address future tag support enhancements.
* feat(tag): Implement Boundary-First Tag Resolution for consistent tag handling
This commit introduces Boundary-First Tag Resolution in the task manager, ensuring consistent and deterministic tag handling across CLI and MCP. This change resolves potential race conditions and improves the reliability of tag-specific operations.
Additionally, the `expandTask` function has been updated to use the resolved tag when writing JSON, enhancing data integrity during task updates.
* chore(biome): formatting
* fix(expand-task): Update writeJSON call to use tag instead of resolvedTag
* fix(commands): Enhance complexity report path resolution and task initialization
`resolveComplexityReportPath` function to streamline output path generation based on tag context and user-defined output.
- Improved clarity and maintainability of command handling by centralizing path resolution logic.
* Fix: unknown currentTag
* fix(task-manager): Update generateTaskFiles calls to include tag and projectRoot parameters
This commit modifies the `moveTask` and `updateSubtaskById` functions to pass the `tag` and `projectRoot` parameters to the `generateTaskFiles` function. This ensures that task files are generated with the correct context when requested, enhancing consistency in task management operations.
* fix(commands): Refactor tag handling and complexity report path resolution
This commit updates the `registerCommands` function to utilize `taskMaster.getCurrentTag()` for consistent tag retrieval across command actions. It also enhances the initialization of `TaskMaster` by passing the tag directly, improving clarity and maintainability. The complexity report path resolution is streamlined to ensure correct file naming based on the current tag context.
* fix(task-master): Update complexity report path expectations in tests
This commit modifies the `initTaskMaster` test to expect a valid string for the complexity report path, ensuring it matches the expected file naming convention. This change enhances test reliability by verifying the correct output format when the path is generated.
* fix(set-task-status): Enhance logging and tag resolution in task status updates
This commit improves the logging output in the `registerSetTaskStatusTool` function to include the tag context when setting task statuses. It also updates the tag handling by resolving the tag using the `resolveTag` utility, ensuring that the correct tag is used when updating task statuses. Additionally, the `setTaskStatus` function is modified to remove the tag parameter from the `readJSON` and `writeJSON` calls, streamlining the data handling process.
* fix(commands, expand-task, task-manager): Add complexity report option and enhance path handling
This commit introduces a new `--complexity-report` option in the `registerCommands` function, allowing users to specify a custom path for the complexity report. The `expandTask` function is updated to accept the `complexityReportPath` from the context, ensuring it is utilized correctly during task expansion. Additionally, the `setTaskStatus` function now includes the `tag` parameter in the `readJSON` and `writeJSON` calls, improving task status updates with proper context. The `initTaskMaster` function is also modified to create parent directories for output paths, enhancing file handling robustness.
* fix(expand-task): Add complexityReportPath to context for task expansion tests
This commit updates the test for the `expandTask` function by adding the `complexityReportPath` to the context object. This change ensures that the complexity report path is correctly utilized in the test, aligning with recent enhancements to complexity report handling in the task manager.
* chore: implement suggested changes
* fix(parse-prd): Clarify tag parameter description for task organization
Updated the documentation for the `tag` parameter in the `parse-prd.js` file to provide a clearer context on its purpose for organizing tasks into separate task lists.
* Fix Inconsistent tag resolution pattern.
* fix: Enhance complexity report path handling with tag support
This commit updates various functions to incorporate the `tag` parameter when resolving complexity report paths. The `expandTaskDirect`, `resolveComplexityReportPath`, and related tools now utilize the current tag context, improving consistency in task management. Additionally, the complexity report path is now correctly passed through the context in the `expand-task` and `set-task-status` tools, ensuring accurate report retrieval based on the active tag.
* Updated the JSDoc for the `tag` parameter in the `show-task.js` file.
* Remove redundant comment on tag parameter in readJSON call
* Remove unused import for getTagAwareFilePath
* Add missed complexityReportPath to args for task expansion
* fix(tests): Enhance research tests with tag-aware functionality
This commit updates the `research.test.js` file to improve the testing of the `performResearch` function by incorporating tag-aware functionality. Key changes include mocking the `findProjectRoot` to return a valid path, enhancing the `ContextGatherer` and `FuzzyTaskSearch` mocks, and adding comprehensive tests for tag parameter handling in various scenarios. The tests now cover passing different tag values, ensuring correct behavior when tags are provided, undefined, or null, and validating the integration of tags in task discovery and context gathering processes.
* Remove unused import for
* fix: Refactor complexity report path handling and improve argument destructuring
This commit enhances the `expandTaskDirect` function by improving the destructuring of arguments for better readability. It also updates the `analyze.js` and `analyze-task-complexity.js` files to utilize the new `resolveComplexityReportOutputPath` function, ensuring tag-aware resolution of output paths. Additionally, logging has been added to provide clarity on the report path being used.
* test: Add complexity report tag isolation tests and improve path handling
This commit introduces a new test file for complexity report tag isolation, ensuring that different tags maintain separate complexity reports. It enhances the existing tests in `analyze-task-complexity.test.js` by updating expectations to use `expect.stringContaining` for file paths, improving robustness against path changes. The new tests cover various scenarios, including path resolution and report generation for both master and feature tags, ensuring no cross-tag contamination occurs.
* Update scripts/modules/task-manager/list-tasks.js
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
* Update scripts/modules/task-manager/list-tasks.js
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
* test(complexity-report): Fix tag slugification in filename expectations
- Update mocks to use slugifyTagForFilePath for cross-platform compatibility
- Replace raw tag values with slugified versions in expected filenames
- Fix test expecting 'feature/user-auth-v2' to expect 'feature-user-auth-v2'
- Align test with actual filename generation logic that sanitizes special chars
---------
Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2025-07-18 19:05:04 +02:00
tag : 'feature-branch' ,
complexityReportPath :
'/mock/project/root/task-complexity-report_feature-branch.json'
2025-07-02 12:52:45 +02:00
} ;
// Stub fs.existsSync to simulate complexity report exists for this tag
const existsSpy = jest
. spyOn ( fs , 'existsSync' )
. mockImplementation ( ( filepath ) =>
filepath . endsWith ( 'task-complexity-report_feature-branch.json' )
) ;
// Stub readJSON to return complexity report when reading the report path
readJSON . mockImplementation ( ( filepath , projectRootParam , tagParam ) => {
if ( filepath . includes ( 'task-complexity-report_feature-branch.json' ) ) {
return {
complexityAnalysis : [
{
taskId : 1 ,
complexityScore : 8 ,
recommendedSubtasks : 5 ,
reasoning : 'Needs five detailed steps' ,
expansionPrompt : 'Please break this task into 5 parts'
}
]
} ;
}
// Default tasks data for tasks.json
const sampleTasksCopy = JSON . parse ( JSON . stringify ( sampleTasks ) ) ;
const selectedTag = tagParam || 'master' ;
return {
... sampleTasksCopy [ selectedTag ] ,
tag : selectedTag ,
_rawTaggedData : sampleTasksCopy
} ;
} ) ;
// Act
await expandTask ( tasksPath , taskId , undefined , false , '' , context , false ) ;
// Assert - generateTextService called with systemPrompt for 5 subtasks
const callArg = generateTextService . mock . calls [ 0 ] [ 0 ] ;
expect ( callArg . systemPrompt ) . toContain ( 'Generate exactly 5 subtasks' ) ;
2025-07-10 03:52:11 -04:00
// Assert - Should use complexity-report variant with expansion prompt
expect ( mockLoadPrompt ) . toHaveBeenCalledWith (
'expand-task' ,
expect . objectContaining ( {
subtaskCount : 5 ,
expansionPrompt : 'Please break this task into 5 parts'
} ) ,
'complexity-report'
) ;
2025-07-02 12:52:45 +02:00
// Clean up stub
existsSpy . mockRestore ( ) ;
} ) ;
} ) ;
2025-06-20 16:12:40 +03:00
describe ( 'Error Handling' , ( ) => {
test ( 'should handle non-existent task ID' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '999' ; // Non-existent task
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
findTaskById . mockReturnValue ( null ) ;
// Act & Assert
await expect (
expandTask ( tasksPath , taskId , 3 , false , '' , context , false )
) . rejects . toThrow ( 'Task 999 not found' ) ;
expect ( writeJSON ) . not . toHaveBeenCalled ( ) ;
} ) ;
test ( 'should expand tasks regardless of status (including done tasks)' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '1' ; // Task with 'done' status
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
const result = await expandTask (
tasksPath ,
taskId ,
3 ,
false ,
'' ,
context ,
false
) ;
// Assert - Should successfully expand even 'done' tasks
expect ( writeJSON ) . toHaveBeenCalled ( ) ;
expect ( result ) . toEqual (
expect . objectContaining ( {
task : expect . objectContaining ( {
id : 1 ,
status : 'done' , // Status unchanged
subtasks : expect . arrayContaining ( [
expect . objectContaining ( {
id : 1 ,
title : 'Set up project structure' ,
status : 'pending'
} )
] )
} ) ,
telemetryData : expect . any ( Object )
} )
) ;
} ) ;
test ( 'should handle AI service failures' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
generateTextService . mockRejectedValueOnce ( new Error ( 'AI service error' ) ) ;
// Act & Assert
await expect (
expandTask ( tasksPath , taskId , 3 , false , '' , context , false )
) . rejects . toThrow ( 'AI service error' ) ;
expect ( writeJSON ) . not . toHaveBeenCalled ( ) ;
} ) ;
test ( 'should handle file read errors' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
readJSON . mockImplementation ( ( ) => {
throw new Error ( 'File read failed' ) ;
} ) ;
// Act & Assert
await expect (
expandTask ( tasksPath , taskId , 3 , false , '' , context , false )
) . rejects . toThrow ( 'File read failed' ) ;
expect ( writeJSON ) . not . toHaveBeenCalled ( ) ;
} ) ;
test ( 'should handle invalid tasks data' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
readJSON . mockReturnValue ( null ) ;
// Act & Assert
await expect (
expandTask ( tasksPath , taskId , 3 , false , '' , context , false )
) . rejects . toThrow ( ) ;
} ) ;
} ) ;
describe ( 'Output Format Handling' , ( ) => {
test ( 'should display telemetry for CLI output format' , async ( ) => {
// Arrange
const { displayAiUsageSummary } = await import (
'../../../../../scripts/modules/ui.js'
) ;
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
projectRoot : '/mock/project/root'
// No mcpLog - should trigger CLI mode
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - Should display telemetry for CLI users
expect ( displayAiUsageSummary ) . toHaveBeenCalledWith (
expect . objectContaining ( {
commandName : 'expand-task' ,
modelUsed : 'claude-3-5-sonnet' ,
totalCost : 0.012414
} ) ,
'cli'
) ;
} ) ;
test ( 'should not display telemetry for MCP output format' , async ( ) => {
// Arrange
const { displayAiUsageSummary } = await import (
'../../../../../scripts/modules/ui.js'
) ;
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - Should NOT display telemetry for MCP (handled at higher level)
expect ( displayAiUsageSummary ) . not . toHaveBeenCalled ( ) ;
} ) ;
} ) ;
describe ( 'Edge Cases' , ( ) => {
test ( 'should handle empty additional context' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - Should work with empty context (but may include project context)
expect ( generateTextService ) . toHaveBeenCalledWith (
expect . objectContaining ( {
prompt : expect . stringMatching ( /.*/ ) // Just ensure prompt exists
} )
) ;
} ) ;
test ( 'should handle additional context correctly' , async ( ) => {
// Arrange
2025-07-10 03:52:11 -04:00
const { getPromptManager } = await import (
'../../../../../scripts/modules/prompt-manager.js'
) ;
const mockLoadPrompt = jest . fn ( ) . mockResolvedValue ( {
systemPrompt : 'Mocked system prompt' ,
userPrompt : 'Mocked user prompt with context'
} ) ;
getPromptManager . mockReturnValue ( {
loadPrompt : mockLoadPrompt
} ) ;
2025-06-20 16:12:40 +03:00
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const additionalContext = 'Use React hooks and TypeScript' ;
const context = {
mcpLog : createMcpLogMock ( ) ,
projectRoot : '/mock/project/root'
} ;
// Act
await expandTask (
tasksPath ,
taskId ,
3 ,
false ,
additionalContext ,
context ,
false
) ;
2025-07-10 03:52:11 -04:00
// Assert - Should pass separate context parameters to prompt manager
expect ( mockLoadPrompt ) . toHaveBeenCalledWith (
'expand-task' ,
2025-06-20 16:12:40 +03:00
expect . objectContaining ( {
2025-07-10 03:52:11 -04:00
additionalContext : expect . stringContaining (
'Use React hooks and TypeScript'
) ,
gatheredContext : expect . stringContaining (
'Mock project context from files'
)
} ) ,
expect . any ( String )
) ;
// Additional assertion to verify the context parameters are passed separately
const call = mockLoadPrompt . mock . calls [ 0 ] ;
const parameters = call [ 1 ] ;
expect ( parameters . additionalContext ) . toContain (
'Use React hooks and TypeScript'
) ;
expect ( parameters . gatheredContext ) . toContain (
'Mock project context from files'
2025-06-20 16:12:40 +03:00
) ;
} ) ;
test ( 'should handle missing project root in context' , async ( ) => {
// Arrange
const tasksPath = 'tasks/tasks.json' ;
const taskId = '2' ;
const context = {
mcpLog : createMcpLogMock ( )
// No projectRoot in context
} ;
// Act
await expandTask ( tasksPath , taskId , 3 , false , '' , context , false ) ;
// Assert - Should derive project root from tasksPath
expect ( findProjectRoot ) . toHaveBeenCalledWith ( tasksPath ) ;
expect ( readJSON ) . toHaveBeenCalledWith (
tasksPath ,
'/mock/project/root' ,
undefined
) ;
} ) ;
} ) ;
2025-07-03 06:12:27 +10:00
describe ( 'Dynamic Subtask Generation' , ( ) => {
const tasksPath = 'tasks/tasks.json' ;
const taskId = 1 ;
const context = { session : null , mcpLog : null } ;
beforeEach ( ( ) => {
// Reset all mocks
jest . clearAllMocks ( ) ;
// Setup default mocks
readJSON . mockReturnValue ( {
tasks : [
{
id : 1 ,
title : 'Test Task' ,
description : 'A test task' ,
status : 'pending' ,
subtasks : [ ]
}
]
} ) ;
findTaskById . mockReturnValue ( {
id : 1 ,
title : 'Test Task' ,
description : 'A test task' ,
status : 'pending' ,
subtasks : [ ]
} ) ;
findProjectRoot . mockReturnValue ( '/mock/project/root' ) ;
} ) ;
test ( 'should accept 0 as valid numSubtasks value for dynamic generation' , async ( ) => {
// Act - Call with numSubtasks=0 (should not throw error)
const result = await expandTask (
tasksPath ,
taskId ,
0 ,
false ,
'' ,
context ,
false
) ;
// Assert - Should complete successfully
expect ( result ) . toBeDefined ( ) ;
expect ( generateTextService ) . toHaveBeenCalled ( ) ;
} ) ;
test ( 'should use dynamic prompting when numSubtasks is 0' , async ( ) => {
2025-07-10 03:52:11 -04:00
// Mock getPromptManager to return realistic prompt with dynamic content
const { getPromptManager } = await import (
'../../../../../scripts/modules/prompt-manager.js'
) ;
const mockLoadPrompt = jest . fn ( ) . mockResolvedValue ( {
systemPrompt :
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into an appropriate number of specific subtasks that can be implemented one by one.' ,
userPrompt :
'Break down this task into an appropriate number of specific subtasks'
} ) ;
getPromptManager . mockReturnValue ( {
loadPrompt : mockLoadPrompt
} ) ;
2025-07-03 06:12:27 +10:00
// Act
await expandTask ( tasksPath , taskId , 0 , false , '' , context , false ) ;
// Assert - Verify generateTextService was called
expect ( generateTextService ) . toHaveBeenCalled ( ) ;
// Get the call arguments to verify the system prompt
const callArgs = generateTextService . mock . calls [ 0 ] [ 0 ] ;
expect ( callArgs . systemPrompt ) . toContain (
'an appropriate number of specific subtasks'
) ;
} ) ;
test ( 'should use specific count prompting when numSubtasks is positive' , async ( ) => {
2025-07-10 03:52:11 -04:00
// Mock getPromptManager to return realistic prompt with specific count
const { getPromptManager } = await import (
'../../../../../scripts/modules/prompt-manager.js'
) ;
const mockLoadPrompt = jest . fn ( ) . mockResolvedValue ( {
systemPrompt :
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 5 specific subtasks that can be implemented one by one.' ,
userPrompt : 'Break down this task into exactly 5 specific subtasks'
} ) ;
getPromptManager . mockReturnValue ( {
loadPrompt : mockLoadPrompt
} ) ;
2025-07-03 06:12:27 +10:00
// Act
await expandTask ( tasksPath , taskId , 5 , false , '' , context , false ) ;
// Assert - Verify generateTextService was called
expect ( generateTextService ) . toHaveBeenCalled ( ) ;
// Get the call arguments to verify the system prompt
const callArgs = generateTextService . mock . calls [ 0 ] [ 0 ] ;
expect ( callArgs . systemPrompt ) . toContain ( '5 specific subtasks' ) ;
} ) ;
test ( 'should reject negative numSubtasks values and fallback to default' , async ( ) => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks . mockReturnValue ( 4 ) ;
2025-07-10 03:52:11 -04:00
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import (
'../../../../../scripts/modules/prompt-manager.js'
) ;
const mockLoadPrompt = jest . fn ( ) . mockResolvedValue ( {
systemPrompt :
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 4 specific subtasks that can be implemented one by one.' ,
userPrompt : 'Break down this task into exactly 4 specific subtasks'
} ) ;
getPromptManager . mockReturnValue ( {
loadPrompt : mockLoadPrompt
} ) ;
2025-07-03 06:12:27 +10:00
// Act
await expandTask ( tasksPath , taskId , - 3 , false , '' , context , false ) ;
// Assert - Should use default value instead of negative
expect ( generateTextService ) . toHaveBeenCalled ( ) ;
const callArgs = generateTextService . mock . calls [ 0 ] [ 0 ] ;
expect ( callArgs . systemPrompt ) . toContain ( '4 specific subtasks' ) ;
} ) ;
test ( 'should use getDefaultSubtasks when numSubtasks is undefined' , async ( ) => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks . mockReturnValue ( 6 ) ;
2025-07-10 03:52:11 -04:00
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import (
'../../../../../scripts/modules/prompt-manager.js'
) ;
const mockLoadPrompt = jest . fn ( ) . mockResolvedValue ( {
systemPrompt :
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 6 specific subtasks that can be implemented one by one.' ,
userPrompt : 'Break down this task into exactly 6 specific subtasks'
} ) ;
getPromptManager . mockReturnValue ( {
loadPrompt : mockLoadPrompt
} ) ;
2025-07-03 06:12:27 +10:00
// Act - Call without specifying numSubtasks (undefined)
await expandTask ( tasksPath , taskId , undefined , false , '' , context , false ) ;
// Assert - Should use default value
expect ( generateTextService ) . toHaveBeenCalled ( ) ;
const callArgs = generateTextService . mock . calls [ 0 ] [ 0 ] ;
expect ( callArgs . systemPrompt ) . toContain ( '6 specific subtasks' ) ;
} ) ;
test ( 'should use getDefaultSubtasks when numSubtasks is null' , async ( ) => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks . mockReturnValue ( 7 ) ;
2025-07-10 03:52:11 -04:00
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import (
'../../../../../scripts/modules/prompt-manager.js'
) ;
const mockLoadPrompt = jest . fn ( ) . mockResolvedValue ( {
systemPrompt :
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 7 specific subtasks that can be implemented one by one.' ,
userPrompt : 'Break down this task into exactly 7 specific subtasks'
} ) ;
getPromptManager . mockReturnValue ( {
loadPrompt : mockLoadPrompt
} ) ;
2025-07-03 06:12:27 +10:00
// Act - Call with null numSubtasks
await expandTask ( tasksPath , taskId , null , false , '' , context , false ) ;
// Assert - Should use default value
expect ( generateTextService ) . toHaveBeenCalled ( ) ;
const callArgs = generateTextService . mock . calls [ 0 ] [ 0 ] ;
expect ( callArgs . systemPrompt ) . toContain ( '7 specific subtasks' ) ;
} ) ;
} ) ;
2025-06-20 16:12:40 +03:00
} ) ;