Fix: Add await to LLM execution for async handling (#1206)

Co-authored-by: Alonso Guevara <alonsog@microsoft.com>
This commit is contained in:
9prodhi 2024-10-09 18:26:28 -05:00 committed by GitHub
parent cd4f1fa9ba
commit ce8749bd19
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -39,5 +39,5 @@ class OpenAICompletionLLM(BaseLLM[CompletionInput, CompletionOutput]):
args = get_completion_llm_args(
kwargs.get("model_parameters"), self.configuration
)
completion = self.client.completions.create(prompt=input, **args)
completion = await self.client.completions.create(prompt=input, **args)
return completion.choices[0].text