From 3a68da87742c040bbd1321bf435c57d4ccd3be43 Mon Sep 17 00:00:00 2001 From: skzhang1 Date: Tue, 17 Jan 2023 06:49:59 -0800 Subject: [PATCH] update --- flaml/tune/searcher/flow2.py | 90 +++++++++++++++--------------------- test/tune/test_lexiflow.py | 38 +++++++++------ 2 files changed, 63 insertions(+), 65 deletions(-) diff --git a/flaml/tune/searcher/flow2.py b/flaml/tune/searcher/flow2.py index e709ebae7..e656c317a 100644 --- a/flaml/tune/searcher/flow2.py +++ b/flaml/tune/searcher/flow2.py @@ -373,25 +373,27 @@ class FLOW2(Searcher): k_values = np.array(self._histories[k_metric]) feasible_value = k_values.take(feasible_index) self._f_best[k_metric] = np.min(feasible_value) + if not isinstance(self.lexico_objectives["tolerances"][k_metric], str): + tolerance_bound = ( + self._f_best[k_metric] + + self.lexico_objectives["tolerances"][k_metric] + ) + else: + assert ( + self.lexico_objectives["tolerances"][k_metric][-1] == "%" + ), "String tolerance of {} should use %% as the suffix".format(k_metric) + tolerance_bound = self._f_best[k_metric] * ( + 1 + + 0.01 + * float( + self.lexico_objectives["tolerances"][k_metric].replace("%", "") + ) + ) feasible_index_filter = np.where( feasible_value <= max( [ - self._f_best[k_metric] - + self.lexico_objectives["tolerances"][k_metric] - if not isinstance( - self.lexico_objectives["tolerances"][k_metric], str - ) - else self._f_best[k_metric] - * ( - 1 - + 0.01 - * float( - self.lexico_objectives["tolerances"][k_metric].replace( - "%", "" - ) - ) - ), + tolerance_bound, self.lexico_objectives["targets"][k_metric], ] ) @@ -417,47 +419,31 @@ class FLOW2(Searcher): if k_mode == "min" else -self.lexico_objectives["targets"][k_metric] ) - if ( - result[k_metric] - < max( - [ - self._f_best[k_metric] - + self.lexico_objectives["tolerances"][k_metric] - if not isinstance( - self.lexico_objectives["tolerances"][k_metric], str - ) - else self._f_best[k_metric] - * ( - 1 - + 0.01 - * float( - self.lexico_objectives["tolerances"][ - k_metric - ].replace("%", "") - ) - ), - k_target, - ] + if not isinstance(self.lexico_objectives["tolerances"][k_metric], str): + tolerance_bound = ( + self._f_best[k_metric] + + self.lexico_objectives["tolerances"][k_metric] ) - ) and ( + else: + assert ( + self.lexico_objectives["tolerances"][k_metric][-1] == "%" + ), "String tolerance of {} should use %% as the suffix".format( + k_metric + ) + tolerance_bound = self._f_best[k_metric] * ( + 1 + + 0.01 + * float( + self.lexico_objectives["tolerances"][k_metric].replace( + "%", "" + ) + ) + ) + if (result[k_metric] < max([tolerance_bound, k_target])) and ( self.best_obj[k_metric] < max( [ - self._f_best[k_metric] - + self.lexico_objectives["tolerances"][k_metric] - if not isinstance( - self.lexico_objectives["tolerances"][k_metric], str - ) - else self._f_best[k_metric] - * ( - 1 - + 0.01 - * float( - self.lexico_objectives["tolerances"][ - k_metric - ].replace("%", "") - ) - ), + tolerance_bound, k_target, ] ) diff --git a/test/tune/test_lexiflow.py b/test/tune/test_lexiflow.py index 57fc73daf..c366421ee 100644 --- a/test/tune/test_lexiflow.py +++ b/test/tune/test_lexiflow.py @@ -32,7 +32,7 @@ def _BraninCurrin(config): return {"brain": brain_result, "currin": currin_result} -def test_lexiflow(mode="absolute"): +def test_lexiflow(): train_dataset = torchvision.datasets.FashionMNIST( "test/data", train=True, @@ -105,12 +105,6 @@ def test_lexiflow(mode="absolute"): lexico_objectives = {} lexico_objectives["metrics"] = ["error_rate", "flops"] - if mode == "absolute": - lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0} - else: - lexico_objectives["tolerances"] = {"error_rate": "10%", "flops": "0%"} - lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0} - lexico_objectives["modes"] = ["min", "min"] search_space = { "n_layers": tune.randint(lower=1, upper=3), @@ -132,7 +126,27 @@ def test_lexiflow(mode="absolute"): "n_epoch": 1, } + # Non lexico tune + analysis = tune.run( + evaluate_function, + metric="error_rate", + mode="min", + num_samples=5, + config=search_space, + use_ray=False, + lexico_objectives=None, + low_cost_partial_config=low_cost_partial_config, + ) + print(analysis.best_trial) + print(analysis.best_config) + print(analysis.best_result) + # lexico tune + lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0} + lexico_objectives["modes"] = ["min", "min"] + + # 1. lexico tune: absoute tune + lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0} analysis = tune.run( evaluate_function, num_samples=5, @@ -145,15 +159,14 @@ def test_lexiflow(mode="absolute"): print(analysis.best_config) print(analysis.best_result) - # Non lexico tune + # 2. lexico tune: percentage tolerance + lexico_objectives["tolerances"] = {"error_rate": "10%", "flops": "0%"} analysis = tune.run( evaluate_function, - metric="error_rate", - mode="min", num_samples=5, config=search_space, use_ray=False, - lexico_objectives=None, + lexico_objectives=lexico_objectives, low_cost_partial_config=low_cost_partial_config, ) print(analysis.best_trial) @@ -191,6 +204,5 @@ def test_lexiflow_performance(): if __name__ == "__main__": - test_lexiflow(mode="absolute") - test_lexiflow(mode="percentage") + test_lexiflow() test_lexiflow_performance()