mirror of
https://github.com/microsoft/graphrag.git
synced 2025-12-12 15:31:24 +00:00
Fix: Add await to LLM execution for async handling (#1206)
Co-authored-by: Alonso Guevara <alonsog@microsoft.com>
This commit is contained in:
parent
cd4f1fa9ba
commit
ce8749bd19
@ -39,5 +39,5 @@ class OpenAICompletionLLM(BaseLLM[CompletionInput, CompletionOutput]):
|
||||
args = get_completion_llm_args(
|
||||
kwargs.get("model_parameters"), self.configuration
|
||||
)
|
||||
completion = self.client.completions.create(prompt=input, **args)
|
||||
completion = await self.client.completions.create(prompt=input, **args)
|
||||
return completion.choices[0].text
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user