Merge branch 'main' into first_contribution

This commit is contained in:
Shaokun 2023-01-29 22:56:45 -05:00 committed by GitHub
commit df5efa5c2d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 124 additions and 38 deletions

View File

@ -125,8 +125,7 @@ class BlendSearch(Searcher):
objectives in the metric list. If not provided, we use "min" as the default mode for all the objectives.
- "targets" (optional): a dictionary to specify the optimization targets on the objectives. The keys are the
metric names (provided in "metric"), and the values are the numerical target values.
- "tolerances" (optional): a dictionary to specify the optimality tolerances on objectives. The keys are the
metric names (provided in "metrics"), and the values are the numerical tolerances values.
- "tolerances" (optional): a dictionary to specify the optimality tolerances on objectives. The keys are the metric names (provided in "metrics"), and the values are the absolute/percentage tolerance in the form of numeric/string.
E.g.,
```python
lexico_objectives = {
@ -136,6 +135,16 @@ class BlendSearch(Searcher):
"targets": {"error_rate": 0.0},
}
```
We also support percentage tolerance.
E.g.,
```python
lexico_objectives = {
"metrics": ["error_rate", "pred_time"],
"modes": ["min", "min"],
"tolerances": {"error_rate": "5%", "pred_time": "0%"},
"targets": {"error_rate": 0.0},
}
```
experimental: A bool of whether to use experimental features.
"""
self._eps = SEARCH_THREAD_EPS

View File

@ -80,8 +80,7 @@ class FLOW2(Searcher):
objectives in the metric list. If not provided, we use "min" as the default mode for all the objectives
- "targets" (optional): a dictionary to specify the optimization targets on the objectives. The keys are the
metric names (provided in "metric"), and the values are the numerical target values.
- "tolerances" (optional): a dictionary to specify the optimality tolerances on objectives. The keys are the
metric names (provided in "metrics"), and the values are the numerical tolerances values.
- "tolerances" (optional): a dictionary to specify the optimality tolerances on objectives. The keys are the metric names (provided in "metrics"), and the values are the absolute/percentage tolerance in the form of numeric/string.
E.g.,
```python
lexico_objectives = {
@ -91,6 +90,16 @@ class FLOW2(Searcher):
"targets": {"error_rate": 0.0},
}
```
We also support percentage tolerance.
E.g.,
```python
lexico_objectives = {
"metrics": ["error_rate", "pred_time"],
"modes": ["min", "min"],
"tolerances": {"error_rate": "5%", "pred_time": "0%"},
"targets": {"error_rate": 0.0},
}
```
"""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
@ -364,14 +373,27 @@ class FLOW2(Searcher):
k_values = np.array(self._histories[k_metric])
feasible_value = k_values.take(feasible_index)
self._f_best[k_metric] = np.min(feasible_value)
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str):
tolerance_bound = (
self._f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric]
)
else:
assert (
self.lexico_objectives["tolerances"][k_metric][-1] == "%"
), "String tolerance of {} should use %% as the suffix".format(k_metric)
tolerance_bound = self._f_best[k_metric] * (
1
+ 0.01
* float(
self.lexico_objectives["tolerances"][k_metric].replace("%", "")
)
)
feasible_index_filter = np.where(
feasible_value
<= max(
[
self._f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
self.lexico_objectives["targets"][k_metric],
]
tolerance_bound,
self.lexico_objectives["targets"][k_metric],
)
)[0]
feasible_index = feasible_index.take(feasible_index_filter)
@ -395,23 +417,31 @@ class FLOW2(Searcher):
if k_mode == "min"
else -self.lexico_objectives["targets"][k_metric]
)
if (
result[k_metric]
< max(
[
self._f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
k_target,
]
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str):
tolerance_bound = (
self._f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric]
)
) and (
else:
assert (
self.lexico_objectives["tolerances"][k_metric][-1] == "%"
), "String tolerance of {} should use %% as the suffix".format(
k_metric
)
tolerance_bound = self._f_best[k_metric] * (
1
+ 0.01
* float(
self.lexico_objectives["tolerances"][k_metric].replace(
"%", ""
)
)
)
if (result[k_metric] < max(tolerance_bound, k_target)) and (
self.best_obj[k_metric]
< max(
[
self._f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
k_target,
]
tolerance_bound,
k_target,
)
):
continue

View File

@ -95,14 +95,25 @@ class ExperimentAnalysis(EA):
)
feasible_value = k_values.take(feasible_index)
f_best[k_metric] = np.min(feasible_value)
feasible_index_filter = np.where(
feasible_value
<= max(
[
f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
k_target,
]
f_best[k_metric] + self.lexico_objectives["tolerances"][k_metric]
if not isinstance(
self.lexico_objectives["tolerances"][k_metric], str
)
else f_best[k_metric]
* (
1
+ 0.01
* float(
self.lexico_objectives["tolerances"][k_metric].replace(
"%", ""
)
)
),
k_target,
)
)[0]
feasible_index = feasible_index.take(feasible_index_filter)
@ -405,8 +416,7 @@ def run(
objectives in the metric list. If not provided, we use "min" as the default mode for all the objectives.
- "targets" (optional): a dictionary to specify the optimization targets on the objectives. The keys are the
metric names (provided in "metric"), and the values are the numerical target values.
- "tolerances" (optional): a dictionary to specify the optimality tolerances on objectives. The keys are the
metric names (provided in "metrics"), and the values are the numerical tolerances values.
- "tolerances" (optional): a dictionary to specify the optimality tolerances on objectives. The keys are the metric names (provided in "metrics"), and the values are the absolute/percentage tolerance in the form of numeric/string.
E.g.,
```python
lexico_objectives = {
@ -415,6 +425,16 @@ def run(
"tolerances": {"error_rate": 0.01, "pred_time": 0.0},
"targets": {"error_rate": 0.0},
}
```
We also support percentage tolerance.
E.g.,
```python
lexico_objectives = {
"metrics": ["error_rate", "pred_time"],
"modes": ["min", "min"],
"tolerances": {"error_rate": "5%", "pred_time": "0%"},
"targets": {"error_rate": 0.0},
}
```
**ray_args: keyword arguments to pass to ray.tune.run().
Only valid when use_ray=True.

View File

@ -105,9 +105,6 @@ def test_lexiflow():
lexico_objectives = {}
lexico_objectives["metrics"] = ["error_rate", "flops"]
lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0}
lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0}
lexico_objectives["modes"] = ["min", "min"]
search_space = {
"n_layers": tune.randint(lower=1, upper=3),
@ -129,7 +126,27 @@ def test_lexiflow():
"n_epoch": 1,
}
# Non lexico tune
analysis = tune.run(
evaluate_function,
metric="error_rate",
mode="min",
num_samples=5,
config=search_space,
use_ray=False,
lexico_objectives=None,
low_cost_partial_config=low_cost_partial_config,
)
print(analysis.best_trial)
print(analysis.best_config)
print(analysis.best_result)
# lexico tune
lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0}
lexico_objectives["modes"] = ["min", "min"]
# 1. lexico tune: absolute tolerance
lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0}
analysis = tune.run(
evaluate_function,
num_samples=5,
@ -142,15 +159,14 @@ def test_lexiflow():
print(analysis.best_config)
print(analysis.best_result)
# Non lexico tune
# 2. lexico tune: percentage tolerance
lexico_objectives["tolerances"] = {"error_rate": "10%", "flops": "0%"}
analysis = tune.run(
evaluate_function,
metric="error_rate",
mode="min",
num_samples=5,
config=search_space,
use_ray=False,
lexico_objectives=None,
lexico_objectives=lexico_objectives,
low_cost_partial_config=low_cost_partial_config,
)
print(analysis.best_trial)

View File

@ -162,5 +162,10 @@ analysis = tune.run(
)
```
We also support providing percentage tolerance as shown below.
```python
lexico_objectives["tolerances"] = {"error_rate": "5%", "flops": "0%"}
```
[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/tune_lexicographic.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/tune_lexicographic.ipynb)

View File

@ -539,7 +539,7 @@ We support tuning multiple objectives with lexicographic preference by providing
`lexico_objectives` is a dictionary that contains the following fields of key-value pairs:
- `metrics`: a list of optimization objectives with the orders reflecting the priorities/preferences of the objectives.
- `modes`: (optional) a list of optimization modes (each mode either "min" or "max") corresponding to the objectives in the metric list. If not provided, we use "min" as the default mode for all the objectives.
- `tolerances`: (optional) a dictionary to specify the optimality tolerances on objectives. The keys are the metric names (provided in "metrics"), and the values are the numerical tolerances values.
- `tolerances`: (optional) a dictionary to specify the optimality tolerances on objectives. The keys are the metric names (provided in "metrics"), and the values are the absolute/percentage tolerance in the form of numeric/string.
- `targets`: (optional) a dictionary to specify the optimization targets on the objectives. The keys are the metric names (provided in "metric"), and the values are the numerical target values.
In the following example, we want to minimize `val_loss` and `pred_time` of the model where `val_loss` has high priority. The tolerances for `val_loss` and `pre_time` are 0.02 and 0 respectively. We do not set targets for these two objectives and we set them to -inf for both objectives.
@ -554,6 +554,12 @@ lexico_objectives["targets"] = {"val_loss": -float('inf'), "pred_time": -float('
# provide the lexico_objectives to tune.run
tune.run(..., search_alg=None, lexico_objectives=lexico_objectives)
```
We also supports providing percentage tolerance as shown below.
```python
lexico_objectives["tolerances"] = {"val_loss": "10%", "pred_time": "0%"}
```
NOTE:
1. When lexico_objectives is not None, the arguments metric, mode, will be invalid, and flaml's tune uses CFO as the `search_alg`, which makes the input (if provided) `search_alg` invalid.