2024-08-26 10:30:22 -04:00
|
|
|
"""
|
|
|
|
Copyright 2024, Zep Software, Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
"""
|
|
|
|
|
|
|
|
import json
|
|
|
|
import logging
|
|
|
|
import typing
|
|
|
|
|
2024-09-10 08:15:27 -07:00
|
|
|
import anthropic
|
2024-08-26 10:30:22 -04:00
|
|
|
from anthropic import AsyncAnthropic
|
2024-12-05 07:03:18 -08:00
|
|
|
from pydantic import BaseModel
|
2024-08-26 10:30:22 -04:00
|
|
|
|
|
|
|
from ..prompts.models import Message
|
|
|
|
from .client import LLMClient
|
|
|
|
from .config import LLMConfig
|
2024-09-10 08:15:27 -07:00
|
|
|
from .errors import RateLimitError
|
2024-08-26 10:30:22 -04:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2024-08-26 10:13:05 -07:00
|
|
|
DEFAULT_MODEL = 'claude-3-5-sonnet-20240620'
|
2024-09-22 11:33:54 -07:00
|
|
|
DEFAULT_MAX_TOKENS = 8192
|
2024-08-26 10:13:05 -07:00
|
|
|
|
2024-08-26 10:30:22 -04:00
|
|
|
|
|
|
|
class AnthropicClient(LLMClient):
|
2024-08-26 10:13:05 -07:00
|
|
|
def __init__(self, config: LLMConfig | None = None, cache: bool = False):
|
2024-08-26 10:30:22 -04:00
|
|
|
if config is None:
|
2024-09-22 11:33:54 -07:00
|
|
|
config = LLMConfig(max_tokens=DEFAULT_MAX_TOKENS)
|
|
|
|
elif config.max_tokens is None:
|
|
|
|
config.max_tokens = DEFAULT_MAX_TOKENS
|
2024-08-26 10:13:05 -07:00
|
|
|
super().__init__(config, cache)
|
2024-09-22 11:33:54 -07:00
|
|
|
|
2024-09-10 08:15:27 -07:00
|
|
|
self.client = AsyncAnthropic(
|
|
|
|
api_key=config.api_key,
|
|
|
|
# we'll use tenacity to retry
|
|
|
|
max_retries=1,
|
|
|
|
)
|
2024-08-26 10:30:22 -04:00
|
|
|
|
2024-12-05 07:03:18 -08:00
|
|
|
async def _generate_response(
|
|
|
|
self, messages: list[Message], response_model: type[BaseModel] | None = None
|
|
|
|
) -> dict[str, typing.Any]:
|
2024-08-26 10:30:22 -04:00
|
|
|
system_message = messages[0]
|
|
|
|
user_messages = [{'role': m.role, 'content': m.content} for m in messages[1:]] + [
|
|
|
|
{'role': 'assistant', 'content': '{'}
|
|
|
|
]
|
|
|
|
|
|
|
|
try:
|
|
|
|
result = await self.client.messages.create(
|
|
|
|
system='Only include JSON in the response. Do not include any additional text or explanation of the content.\n'
|
|
|
|
+ system_message.content,
|
2024-08-26 10:13:05 -07:00
|
|
|
max_tokens=self.max_tokens,
|
|
|
|
temperature=self.temperature,
|
2024-08-26 10:30:22 -04:00
|
|
|
messages=user_messages, # type: ignore
|
2024-08-26 10:13:05 -07:00
|
|
|
model=self.model or DEFAULT_MODEL,
|
2024-08-26 10:30:22 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
return json.loads('{' + result.content[0].text) # type: ignore
|
2024-09-10 08:15:27 -07:00
|
|
|
except anthropic.RateLimitError as e:
|
|
|
|
raise RateLimitError from e
|
2024-08-26 10:30:22 -04:00
|
|
|
except Exception as e:
|
|
|
|
logger.error(f'Error in generating LLM response: {e}')
|
|
|
|
raise
|