diff --git a/notebook/autogen_chatgpt.ipynb b/notebook/autogen_chatgpt.ipynb index e0016922d..672038b9c 100644 --- a/notebook/autogen_chatgpt.ipynb +++ b/notebook/autogen_chatgpt.ipynb @@ -23,7 +23,7 @@ "\n", "FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the [openai,blendsearch] option:\n", "```bash\n", - "pip install flaml[openai,blendsearch]==1.2.1\n", + "pip install flaml[openai,blendsearch]==1.2.2\n", "```" ] }, @@ -40,7 +40,7 @@ }, "outputs": [], "source": [ - "# %pip install flaml[openai,blendsearch]==1.2.1 datasets" + "# %pip install flaml[openai,blendsearch]==1.2.2 datasets" ] }, { diff --git a/notebook/autogen_openai.ipynb b/notebook/autogen_openai.ipynb index 58e87f470..68d77935c 100644 --- a/notebook/autogen_openai.ipynb +++ b/notebook/autogen_openai.ipynb @@ -23,7 +23,7 @@ "\n", "FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the [autogen,blendsearch] option:\n", "```bash\n", - "pip install flaml[autogen,blendsearch]==1.2.1\n", + "pip install flaml[autogen,blendsearch]==1.2.2\n", "```" ] }, @@ -40,7 +40,7 @@ }, "outputs": [], "source": [ - "# %pip install flaml[autogen,blendsearch]==1.2.1 datasets" + "# %pip install flaml[autogen,blendsearch]==1.2.2 datasets" ] }, { diff --git a/notebook/research/autogen_code.ipynb b/notebook/research/autogen_code.ipynb index 29b3f3ae4..25f288ef5 100644 --- a/notebook/research/autogen_code.ipynb +++ b/notebook/research/autogen_code.ipynb @@ -21,7 +21,7 @@ "\n", "FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the [autogen] option:\n", "```bash\n", - "pip install flaml[autogen]==1.2.1\n", + "pip install flaml[autogen]==1.2.2\n", "```" ] }, @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "# %pip install flaml[autogen]==1.2.1 datasets" + "# %pip install flaml[autogen]==1.2.2 datasets" ] }, { diff --git a/notebook/research/math_level5counting.ipynb b/notebook/research/math_level5counting.ipynb index e7e7e0433..aa712cf41 100644 --- a/notebook/research/math_level5counting.ipynb +++ b/notebook/research/math_level5counting.ipynb @@ -21,7 +21,7 @@ "\n", "FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the [openai] option:\n", "```bash\n", - "pip install flaml[openai]==1.2.1\n", + "pip install flaml[openai]==1.2.2\n", "```" ] }, @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "# %pip install flaml[openai]==1.2.1 datasets" + "# %pip install flaml[openai]==1.2.2 datasets" ] }, { diff --git a/website/docs/Examples/AutoGen-OpenAI.md b/website/docs/Examples/AutoGen-OpenAI.md index 4f83cd99f..18759306b 100644 --- a/website/docs/Examples/AutoGen-OpenAI.md +++ b/website/docs/Examples/AutoGen-OpenAI.md @@ -5,9 +5,9 @@ In this example, we will tune several hyperparameters for the OpenAI's completio ### Prerequisites -Install the [autogen,blendsearch] option. The OpenAI integration is in preview. +Install the [autogen,blendsearch] option. ```bash -pip install "flaml[autogen,blendsearch]==1.2.1 datasets" +pip install "flaml[autogen,blendsearch]==1.2.2 datasets" ``` Setup your OpenAI key: @@ -64,7 +64,9 @@ Before starting tuning, you need to define the metric for the optimization. For from functools import partial from flaml.autogen.code_utils import eval_function_completions, generate_assertions -eval_with_generated_assertions = partial(eval_function_completions, assertions=generate_assertions) +eval_with_generated_assertions = partial( + eval_function_completions, assertions=generate_assertions, +) ``` This function will first generate assertion statements for each problem. Then, it uses the assertions to select the generated responses. diff --git a/website/docs/Use-Cases/Auto-Generation.md b/website/docs/Use-Cases/Auto-Generation.md index 8f51a1fa3..541141044 100644 --- a/website/docs/Use-Cases/Auto-Generation.md +++ b/website/docs/Use-Cases/Auto-Generation.md @@ -1,6 +1,6 @@ # Auto Generation -`flaml.autogen` is a subpackage for automating generation tasks. It uses [`flaml.tune`](../reference/tune/tune) to find good hyperparameter configurations under budget constraints. +`flaml.autogen` is a package for automating generation tasks (in preview). It uses [`flaml.tune`](../reference/tune/tune) to find good hyperparameter configurations under budget constraints. Such optimization has several benefits: * Maximize the utility out of using expensive foundation models. * Reduce the inference cost by using cheaper models or configurations which achieve equal or better performance.