mirror of
https://github.com/eyaltoledano/claude-task-master.git
synced 2025-11-23 05:27:09 +00:00
Implements Claude Code as a new AI provider that uses the Claude Code CLI without requiring API keys. This enables users to leverage Claude models through their local Claude Code installation. Key changes: - Add complete AI SDK v1 implementation for Claude Code provider - Custom SDK with streaming/non-streaming support - Session management for conversation continuity - JSON extraction for object generation mode - Support for advanced settings (maxTurns, allowedTools, etc.) - Integrate Claude Code into Task Master's provider system - Update ai-services-unified.js to handle keyless authentication - Add provider to supported-models.json with opus/sonnet models - Ensure correct maxTokens values are applied (opus: 32000, sonnet: 64000) - Fix maxTokens configuration issue - Add max_tokens property to getAvailableModels() output - Update setModel() to properly handle claude-code models - Create update-config-tokens.js utility for init process - Add comprehensive documentation - User guide with configuration examples - Advanced settings explanation and future integration options The implementation maintains full backward compatibility with existing providers while adding seamless Claude Code support to all Task Master commands.
53 lines
1.7 KiB
JavaScript
53 lines
1.7 KiB
JavaScript
/**
|
|
* update-config-tokens.js
|
|
* Updates config.json with correct maxTokens values from supported-models.json
|
|
*/
|
|
|
|
import fs from 'fs';
|
|
import path from 'path';
|
|
import { fileURLToPath } from 'url';
|
|
import { dirname } from 'path';
|
|
|
|
const __filename = fileURLToPath(import.meta.url);
|
|
const __dirname = dirname(__filename);
|
|
|
|
/**
|
|
* Updates the config file with correct maxTokens values from supported-models.json
|
|
* @param {string} configPath - Path to the config.json file to update
|
|
* @returns {boolean} True if successful, false otherwise
|
|
*/
|
|
export function updateConfigMaxTokens(configPath) {
|
|
try {
|
|
// Load supported models
|
|
const supportedModelsPath = path.join(__dirname, 'supported-models.json');
|
|
const supportedModels = JSON.parse(fs.readFileSync(supportedModelsPath, 'utf-8'));
|
|
|
|
// Load config
|
|
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
|
|
|
|
// Update each role's maxTokens if the model exists in supported-models.json
|
|
const roles = ['main', 'research', 'fallback'];
|
|
|
|
for (const role of roles) {
|
|
if (config.models && config.models[role]) {
|
|
const provider = config.models[role].provider;
|
|
const modelId = config.models[role].modelId;
|
|
|
|
// Find the model in supported models
|
|
if (supportedModels[provider]) {
|
|
const modelData = supportedModels[provider].find(m => m.id === modelId);
|
|
if (modelData && modelData.max_tokens) {
|
|
config.models[role].maxTokens = modelData.max_tokens;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Write back the updated config
|
|
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
|
|
return true;
|
|
} catch (error) {
|
|
console.error('Error updating config maxTokens:', error.message);
|
|
return false;
|
|
}
|
|
} |