mirror of
https://github.com/eyaltoledano/claude-task-master.git
synced 2025-07-27 02:43:51 +00:00

This commit centralizes configuration and environment variable access across various modules by consistently utilizing getters from scripts/modules/config-manager.js. This replaces direct access to process.env and the global CONFIG object, leading to improved consistency, maintainability, testability, and better handling of session-specific configurations within the MCP context. Key changes include: - Centralized Getters: Replaced numerous instances of process.env.* and CONFIG.* with corresponding getter functions (e.g., getLogLevel, getMainModelId, getResearchMaxTokens, getMainTemperature, isApiKeySet, getDebugFlag, getDefaultSubtasks). - Session Awareness: Ensured that the session object is passed to config getters where necessary, particularly within AI service calls (ai-services.js, add-task.js) and error handling (ai-services.js), allowing for session-specific environment overrides. - API Key Checks: Standardized API key availability checks using isApiKeySet() instead of directly checking process.env.* (e.g., for Perplexity in commands.js and ai-services.js). - Client Instantiation Cleanup: Removed now-redundant/obsolete local client instantiation functions (getAnthropicClient, getPerplexityClient) from ai-services.js and the global Anthropic client initialization from dependency-manager.js. Client creation should now rely on the config manager and factory patterns. - Consistent Debug Flag Usage: Standardized calls to getDebugFlag() in commands.js, removing potentially unnecessary null arguments. - Accurate Progress Calculation: Updated AI stream progress reporting (ai-services.js, add-task.js) to use getMainMaxTokens(session) for more accurate calculations. - Minor Cleanup: Removed unused import from scripts/modules/commands.js. Specific module updates: - : - Uses getLogLevel() instead of process.env.LOG_LEVEL. - : - Replaced direct env/config access for model IDs, tokens, temperature, API keys, and default subtasks with appropriate getters. - Passed session to handleClaudeError. - Removed local getPerplexityClient and getAnthropicClient functions. - Updated progress calculations to use getMainMaxTokens(session). - : - Uses isApiKeySet('perplexity') for API key checks. - Uses getDebugFlag() consistently for debug checks. - Removed unused import. - : - Removed global Anthropic client initialization. - : - Uses config getters (getResearch..., getMain...) for Perplexity and Claude API call parameters, preserving customEnv override logic. This refactoring also resolves a potential SyntaxError: Identifier 'getPerplexityClient' has already been declared by removing the duplicated/obsolete function definition previously present in ai-services.js.
140 lines
3.8 KiB
JavaScript
140 lines
3.8 KiB
JavaScript
import { log, isSilentMode } from '../utils.js';
|
|
|
|
import {
|
|
_handleAnthropicStream,
|
|
getConfiguredAnthropicClient,
|
|
parseSubtasksFromText
|
|
} from '../ai-services.js';
|
|
|
|
// Import necessary config getters
|
|
import {
|
|
getMainModelId,
|
|
getMainMaxTokens,
|
|
getMainTemperature,
|
|
getResearchModelId,
|
|
getResearchMaxTokens,
|
|
getResearchTemperature
|
|
} from '../config-manager.js';
|
|
|
|
/**
|
|
* Call AI to generate subtasks based on a prompt
|
|
* @param {string} prompt - The prompt to send to the AI
|
|
* @param {boolean} useResearch - Whether to use Perplexity for research
|
|
* @param {Object} session - Session object from MCP
|
|
* @param {Object} mcpLog - MCP logger object
|
|
* @returns {Object} - Object containing generated subtasks
|
|
*/
|
|
async function getSubtasksFromAI(
|
|
prompt,
|
|
useResearch = false,
|
|
session = null,
|
|
mcpLog = null
|
|
) {
|
|
try {
|
|
// Get the configured client
|
|
const client = getConfiguredAnthropicClient(session);
|
|
|
|
// Prepare API parameters
|
|
const apiParams = {
|
|
model: getMainModelId(session),
|
|
max_tokens: getMainMaxTokens(session),
|
|
temperature: getMainTemperature(session),
|
|
system:
|
|
'You are an AI assistant helping with task breakdown for software development.',
|
|
messages: [{ role: 'user', content: prompt }]
|
|
};
|
|
|
|
if (mcpLog) {
|
|
mcpLog.info('Calling AI to generate subtasks');
|
|
}
|
|
|
|
let responseText;
|
|
|
|
// Call the AI - with research if requested
|
|
if (useResearch && perplexity) {
|
|
if (mcpLog) {
|
|
mcpLog.info('Using Perplexity AI for research-backed subtasks');
|
|
}
|
|
|
|
const perplexityModel = getResearchModelId(session);
|
|
const result = await perplexity.chat.completions.create({
|
|
model: perplexityModel,
|
|
messages: [
|
|
{
|
|
role: 'system',
|
|
content:
|
|
'You are an AI assistant helping with task breakdown for software development. Research implementation details and provide comprehensive subtasks.'
|
|
},
|
|
{ role: 'user', content: prompt }
|
|
],
|
|
temperature: getResearchTemperature(session),
|
|
max_tokens: getResearchMaxTokens(session)
|
|
});
|
|
|
|
responseText = result.choices[0].message.content;
|
|
} else {
|
|
// Use regular Claude
|
|
if (mcpLog) {
|
|
mcpLog.info('Using Claude for generating subtasks');
|
|
}
|
|
|
|
// Call the streaming API
|
|
responseText = await _handleAnthropicStream(
|
|
client,
|
|
apiParams,
|
|
{ mcpLog, silentMode: isSilentMode() },
|
|
!isSilentMode()
|
|
);
|
|
}
|
|
|
|
// Ensure we have a valid response
|
|
if (!responseText) {
|
|
throw new Error('Empty response from AI');
|
|
}
|
|
|
|
// Try to parse the subtasks
|
|
try {
|
|
const parsedSubtasks = parseSubtasksFromText(responseText);
|
|
if (
|
|
!parsedSubtasks ||
|
|
!Array.isArray(parsedSubtasks) ||
|
|
parsedSubtasks.length === 0
|
|
) {
|
|
throw new Error(
|
|
'Failed to parse valid subtasks array from AI response'
|
|
);
|
|
}
|
|
return { subtasks: parsedSubtasks };
|
|
} catch (parseError) {
|
|
if (mcpLog) {
|
|
mcpLog.error(`Error parsing subtasks: ${parseError.message}`);
|
|
mcpLog.error(`Response start: ${responseText.substring(0, 200)}...`);
|
|
} else {
|
|
log('error', `Error parsing subtasks: ${parseError.message}`);
|
|
}
|
|
// Return error information instead of fallback subtasks
|
|
return {
|
|
error: parseError.message,
|
|
taskId: null, // This will be filled in by the calling function
|
|
suggestion:
|
|
'Use \'task-master update-task --id=<id> --prompt="Generate subtasks for this task"\' to manually create subtasks.'
|
|
};
|
|
}
|
|
} catch (error) {
|
|
if (mcpLog) {
|
|
mcpLog.error(`Error generating subtasks: ${error.message}`);
|
|
} else {
|
|
log('error', `Error generating subtasks: ${error.message}`);
|
|
}
|
|
// Return error information instead of fallback subtasks
|
|
return {
|
|
error: error.message,
|
|
taskId: null, // This will be filled in by the calling function
|
|
suggestion:
|
|
'Use \'task-master update-task --id=<id> --prompt="Generate subtasks for this task"\' to manually create subtasks.'
|
|
};
|
|
}
|
|
}
|
|
|
|
export default getSubtasksFromAI;
|