mirror of
https://github.com/microsoft/autogen.git
synced 2025-11-01 18:29:49 +00:00
update
This commit is contained in:
parent
b7c0c24269
commit
3a68da8774
@ -373,25 +373,27 @@ class FLOW2(Searcher):
|
||||
k_values = np.array(self._histories[k_metric])
|
||||
feasible_value = k_values.take(feasible_index)
|
||||
self._f_best[k_metric] = np.min(feasible_value)
|
||||
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str):
|
||||
tolerance_bound = (
|
||||
self._f_best[k_metric]
|
||||
+ self.lexico_objectives["tolerances"][k_metric]
|
||||
)
|
||||
else:
|
||||
assert (
|
||||
self.lexico_objectives["tolerances"][k_metric][-1] == "%"
|
||||
), "String tolerance of {} should use %% as the suffix".format(k_metric)
|
||||
tolerance_bound = self._f_best[k_metric] * (
|
||||
1
|
||||
+ 0.01
|
||||
* float(
|
||||
self.lexico_objectives["tolerances"][k_metric].replace("%", "")
|
||||
)
|
||||
)
|
||||
feasible_index_filter = np.where(
|
||||
feasible_value
|
||||
<= max(
|
||||
[
|
||||
self._f_best[k_metric]
|
||||
+ self.lexico_objectives["tolerances"][k_metric]
|
||||
if not isinstance(
|
||||
self.lexico_objectives["tolerances"][k_metric], str
|
||||
)
|
||||
else self._f_best[k_metric]
|
||||
* (
|
||||
1
|
||||
+ 0.01
|
||||
* float(
|
||||
self.lexico_objectives["tolerances"][k_metric].replace(
|
||||
"%", ""
|
||||
)
|
||||
)
|
||||
),
|
||||
tolerance_bound,
|
||||
self.lexico_objectives["targets"][k_metric],
|
||||
]
|
||||
)
|
||||
@ -417,47 +419,31 @@ class FLOW2(Searcher):
|
||||
if k_mode == "min"
|
||||
else -self.lexico_objectives["targets"][k_metric]
|
||||
)
|
||||
if (
|
||||
result[k_metric]
|
||||
< max(
|
||||
[
|
||||
self._f_best[k_metric]
|
||||
+ self.lexico_objectives["tolerances"][k_metric]
|
||||
if not isinstance(
|
||||
self.lexico_objectives["tolerances"][k_metric], str
|
||||
)
|
||||
else self._f_best[k_metric]
|
||||
* (
|
||||
1
|
||||
+ 0.01
|
||||
* float(
|
||||
self.lexico_objectives["tolerances"][
|
||||
k_metric
|
||||
].replace("%", "")
|
||||
)
|
||||
),
|
||||
k_target,
|
||||
]
|
||||
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str):
|
||||
tolerance_bound = (
|
||||
self._f_best[k_metric]
|
||||
+ self.lexico_objectives["tolerances"][k_metric]
|
||||
)
|
||||
) and (
|
||||
else:
|
||||
assert (
|
||||
self.lexico_objectives["tolerances"][k_metric][-1] == "%"
|
||||
), "String tolerance of {} should use %% as the suffix".format(
|
||||
k_metric
|
||||
)
|
||||
tolerance_bound = self._f_best[k_metric] * (
|
||||
1
|
||||
+ 0.01
|
||||
* float(
|
||||
self.lexico_objectives["tolerances"][k_metric].replace(
|
||||
"%", ""
|
||||
)
|
||||
)
|
||||
)
|
||||
if (result[k_metric] < max([tolerance_bound, k_target])) and (
|
||||
self.best_obj[k_metric]
|
||||
< max(
|
||||
[
|
||||
self._f_best[k_metric]
|
||||
+ self.lexico_objectives["tolerances"][k_metric]
|
||||
if not isinstance(
|
||||
self.lexico_objectives["tolerances"][k_metric], str
|
||||
)
|
||||
else self._f_best[k_metric]
|
||||
* (
|
||||
1
|
||||
+ 0.01
|
||||
* float(
|
||||
self.lexico_objectives["tolerances"][
|
||||
k_metric
|
||||
].replace("%", "")
|
||||
)
|
||||
),
|
||||
tolerance_bound,
|
||||
k_target,
|
||||
]
|
||||
)
|
||||
|
||||
@ -32,7 +32,7 @@ def _BraninCurrin(config):
|
||||
return {"brain": brain_result, "currin": currin_result}
|
||||
|
||||
|
||||
def test_lexiflow(mode="absolute"):
|
||||
def test_lexiflow():
|
||||
train_dataset = torchvision.datasets.FashionMNIST(
|
||||
"test/data",
|
||||
train=True,
|
||||
@ -105,12 +105,6 @@ def test_lexiflow(mode="absolute"):
|
||||
|
||||
lexico_objectives = {}
|
||||
lexico_objectives["metrics"] = ["error_rate", "flops"]
|
||||
if mode == "absolute":
|
||||
lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0}
|
||||
else:
|
||||
lexico_objectives["tolerances"] = {"error_rate": "10%", "flops": "0%"}
|
||||
lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0}
|
||||
lexico_objectives["modes"] = ["min", "min"]
|
||||
|
||||
search_space = {
|
||||
"n_layers": tune.randint(lower=1, upper=3),
|
||||
@ -132,7 +126,27 @@ def test_lexiflow(mode="absolute"):
|
||||
"n_epoch": 1,
|
||||
}
|
||||
|
||||
# Non lexico tune
|
||||
analysis = tune.run(
|
||||
evaluate_function,
|
||||
metric="error_rate",
|
||||
mode="min",
|
||||
num_samples=5,
|
||||
config=search_space,
|
||||
use_ray=False,
|
||||
lexico_objectives=None,
|
||||
low_cost_partial_config=low_cost_partial_config,
|
||||
)
|
||||
print(analysis.best_trial)
|
||||
print(analysis.best_config)
|
||||
print(analysis.best_result)
|
||||
|
||||
# lexico tune
|
||||
lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0}
|
||||
lexico_objectives["modes"] = ["min", "min"]
|
||||
|
||||
# 1. lexico tune: absoute tune
|
||||
lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0}
|
||||
analysis = tune.run(
|
||||
evaluate_function,
|
||||
num_samples=5,
|
||||
@ -145,15 +159,14 @@ def test_lexiflow(mode="absolute"):
|
||||
print(analysis.best_config)
|
||||
print(analysis.best_result)
|
||||
|
||||
# Non lexico tune
|
||||
# 2. lexico tune: percentage tolerance
|
||||
lexico_objectives["tolerances"] = {"error_rate": "10%", "flops": "0%"}
|
||||
analysis = tune.run(
|
||||
evaluate_function,
|
||||
metric="error_rate",
|
||||
mode="min",
|
||||
num_samples=5,
|
||||
config=search_space,
|
||||
use_ray=False,
|
||||
lexico_objectives=None,
|
||||
lexico_objectives=lexico_objectives,
|
||||
low_cost_partial_config=low_cost_partial_config,
|
||||
)
|
||||
print(analysis.best_trial)
|
||||
@ -191,6 +204,5 @@ def test_lexiflow_performance():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_lexiflow(mode="absolute")
|
||||
test_lexiflow(mode="percentage")
|
||||
test_lexiflow()
|
||||
test_lexiflow_performance()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user