mirror of
https://github.com/HKUDS/LightRAG.git
synced 2025-11-09 22:33:57 +00:00
Fix linting
This commit is contained in:
parent
3d418d95c5
commit
3b9c28fae9
@ -122,7 +122,9 @@ async def llama_index_complete_if_cache(
|
||||
# Add current prompt
|
||||
formatted_messages.append(ChatMessage(role=MessageRole.USER, content=prompt))
|
||||
|
||||
response: ChatResponse = await model.achat(messages=formatted_messages, **chat_kwargs)
|
||||
response: ChatResponse = await model.achat(
|
||||
messages=formatted_messages, **chat_kwargs
|
||||
)
|
||||
|
||||
# In newer versions, the response is in message.content
|
||||
content = response.message.content
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user