mirror of
https://github.com/getzep/graphiti.git
synced 2025-06-27 02:00:02 +00:00

* Makefile and format * fix podcast stuff * refactor: update import statement for transcript_parser in podcast_runner.py * format and linting * chore: Update import statements and remove unused code in maintenance module
30 lines
910 B
Python
30 lines
910 B
Python
import json
|
|
import logging
|
|
|
|
from openai import AsyncOpenAI
|
|
|
|
from .client import LLMClient
|
|
from .config import LLMConfig
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class OpenAIClient(LLMClient):
|
|
def __init__(self, config: LLMConfig):
|
|
self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
|
self.model = config.model
|
|
|
|
async def generate_response(self, messages: list[dict[str, str]]) -> dict[str, any]:
|
|
try:
|
|
response = await self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=messages,
|
|
temperature=0.1,
|
|
max_tokens=3000,
|
|
response_format={"type": "json_object"},
|
|
)
|
|
return json.loads(response.choices[0].message.content)
|
|
except Exception as e:
|
|
logger.error(f"Error in generating LLM response: {e}")
|
|
raise
|