graphiti/core/llm_client/openai_client.py
Pavlo Paliychuk f1c2224c0e
Refactor maintenance structure, add prompt library (#4)
* chore: Initial draft of stubs

* chore: Add comments and mock implementation of the add_episode method

* chore: Add success and error callbacks

* chore: Add success and error callbacks

* refactor: Fix conflicts with the latest merge
2024-08-15 12:03:41 -04:00

25 lines
848 B
Python

import json
from openai import AsyncOpenAI
from .client import LLMClient
from .config import LLMConfig
class OpenAIClient(LLMClient):
def __init__(self, config: LLMConfig):
self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
self.model = config.model
async def generate_response(self, messages: list[dict[str, str]]) -> dict[str, any]:
try:
response = await self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.1,
max_tokens=3000,
response_format={"type": "json_object"},
)
return json.loads(response.choices[0].message.content)
except Exception as e:
print(f"Error in generating LLM response: {e}")
raise