mirror of
https://github.com/eyaltoledano/claude-task-master.git
synced 2025-07-27 02:43:51 +00:00

This commit focuses on standardizing configuration and API key access patterns across key modules as part of subtask 61.34. Key changes include: - Refactored `ai-services.js` to remove global AI clients and use `resolveEnvVariable` for API key checks. Client instantiation now relies on `getAnthropicClient`/`getPerplexityClient` accepting a session object. - Refactored `task-manager.js` (`analyzeTaskComplexity` function) to use the unified `generateTextService` from `ai-services-unified.js`, removing direct AI client calls. - Replaced direct `process.env` access for model parameters and other configurations (`PERPLEXITY_MODEL`, `CONFIG.*`) in `task-manager.js` with calls to the appropriate getters from `config-manager.js` (e.g., `getResearchModelId(session)`, `getMainMaxTokens(session)`). - Ensured `utils.js` (`resolveEnvVariable`) correctly handles potentially undefined session objects. - Updated function signatures where necessary to propagate the `session` object for correct context-aware configuration/key retrieval. This moves towards the goal of using `ai-client-factory.js` and `ai-services-unified.js` as the standard pattern for AI interactions and centralizing configuration management through `config-manager.js`.
133 lines
3.8 KiB
JavaScript
133 lines
3.8 KiB
JavaScript
import { log, isSilentMode } from '../utils.js';
|
|
|
|
import {
|
|
_handleAnthropicStream,
|
|
getConfiguredAnthropicClient,
|
|
parseSubtasksFromText
|
|
} from '../ai-services.js';
|
|
|
|
/**
|
|
* Call AI to generate subtasks based on a prompt
|
|
* @param {string} prompt - The prompt to send to the AI
|
|
* @param {boolean} useResearch - Whether to use Perplexity for research
|
|
* @param {Object} session - Session object from MCP
|
|
* @param {Object} mcpLog - MCP logger object
|
|
* @returns {Object} - Object containing generated subtasks
|
|
*/
|
|
async function getSubtasksFromAI(
|
|
prompt,
|
|
useResearch = false,
|
|
session = null,
|
|
mcpLog = null
|
|
) {
|
|
try {
|
|
// Get the configured client
|
|
const client = getConfiguredAnthropicClient(session);
|
|
|
|
// Prepare API parameters
|
|
const apiParams = {
|
|
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
|
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
|
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
|
system:
|
|
'You are an AI assistant helping with task breakdown for software development.',
|
|
messages: [{ role: 'user', content: prompt }]
|
|
};
|
|
|
|
if (mcpLog) {
|
|
mcpLog.info('Calling AI to generate subtasks');
|
|
}
|
|
|
|
let responseText;
|
|
|
|
// Call the AI - with research if requested
|
|
if (useResearch && perplexity) {
|
|
if (mcpLog) {
|
|
mcpLog.info('Using Perplexity AI for research-backed subtasks');
|
|
}
|
|
|
|
const perplexityModel =
|
|
process.env.PERPLEXITY_MODEL ||
|
|
session?.env?.PERPLEXITY_MODEL ||
|
|
'sonar-pro';
|
|
const result = await perplexity.chat.completions.create({
|
|
model: perplexityModel,
|
|
messages: [
|
|
{
|
|
role: 'system',
|
|
content:
|
|
'You are an AI assistant helping with task breakdown for software development. Research implementation details and provide comprehensive subtasks.'
|
|
},
|
|
{ role: 'user', content: prompt }
|
|
],
|
|
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
|
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens
|
|
});
|
|
|
|
responseText = result.choices[0].message.content;
|
|
} else {
|
|
// Use regular Claude
|
|
if (mcpLog) {
|
|
mcpLog.info('Using Claude for generating subtasks');
|
|
}
|
|
|
|
// Call the streaming API
|
|
responseText = await _handleAnthropicStream(
|
|
client,
|
|
apiParams,
|
|
{ mcpLog, silentMode: isSilentMode() },
|
|
!isSilentMode()
|
|
);
|
|
}
|
|
|
|
// Ensure we have a valid response
|
|
if (!responseText) {
|
|
throw new Error('Empty response from AI');
|
|
}
|
|
|
|
// Try to parse the subtasks
|
|
try {
|
|
const parsedSubtasks = parseSubtasksFromText(responseText);
|
|
if (
|
|
!parsedSubtasks ||
|
|
!Array.isArray(parsedSubtasks) ||
|
|
parsedSubtasks.length === 0
|
|
) {
|
|
throw new Error(
|
|
'Failed to parse valid subtasks array from AI response'
|
|
);
|
|
}
|
|
return { subtasks: parsedSubtasks };
|
|
} catch (parseError) {
|
|
if (mcpLog) {
|
|
mcpLog.error(`Error parsing subtasks: ${parseError.message}`);
|
|
mcpLog.error(`Response start: ${responseText.substring(0, 200)}...`);
|
|
} else {
|
|
log('error', `Error parsing subtasks: ${parseError.message}`);
|
|
}
|
|
// Return error information instead of fallback subtasks
|
|
return {
|
|
error: parseError.message,
|
|
taskId: null, // This will be filled in by the calling function
|
|
suggestion:
|
|
'Use \'task-master update-task --id=<id> --prompt="Generate subtasks for this task"\' to manually create subtasks.'
|
|
};
|
|
}
|
|
} catch (error) {
|
|
if (mcpLog) {
|
|
mcpLog.error(`Error generating subtasks: ${error.message}`);
|
|
} else {
|
|
log('error', `Error generating subtasks: ${error.message}`);
|
|
}
|
|
// Return error information instead of fallback subtasks
|
|
return {
|
|
error: error.message,
|
|
taskId: null, // This will be filled in by the calling function
|
|
suggestion:
|
|
'Use \'task-master update-task --id=<id> --prompt="Generate subtasks for this task"\' to manually create subtasks.'
|
|
};
|
|
}
|
|
}
|
|
|
|
export default getSubtasksFromAI;
|