mirror of
https://github.com/microsoft/autogen.git
synced 2025-11-09 14:24:05 +00:00
update
This commit is contained in:
parent
b7c0c24269
commit
3a68da8774
@ -373,25 +373,27 @@ class FLOW2(Searcher):
|
|||||||
k_values = np.array(self._histories[k_metric])
|
k_values = np.array(self._histories[k_metric])
|
||||||
feasible_value = k_values.take(feasible_index)
|
feasible_value = k_values.take(feasible_index)
|
||||||
self._f_best[k_metric] = np.min(feasible_value)
|
self._f_best[k_metric] = np.min(feasible_value)
|
||||||
|
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str):
|
||||||
|
tolerance_bound = (
|
||||||
|
self._f_best[k_metric]
|
||||||
|
+ self.lexico_objectives["tolerances"][k_metric]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
assert (
|
||||||
|
self.lexico_objectives["tolerances"][k_metric][-1] == "%"
|
||||||
|
), "String tolerance of {} should use %% as the suffix".format(k_metric)
|
||||||
|
tolerance_bound = self._f_best[k_metric] * (
|
||||||
|
1
|
||||||
|
+ 0.01
|
||||||
|
* float(
|
||||||
|
self.lexico_objectives["tolerances"][k_metric].replace("%", "")
|
||||||
|
)
|
||||||
|
)
|
||||||
feasible_index_filter = np.where(
|
feasible_index_filter = np.where(
|
||||||
feasible_value
|
feasible_value
|
||||||
<= max(
|
<= max(
|
||||||
[
|
[
|
||||||
self._f_best[k_metric]
|
tolerance_bound,
|
||||||
+ self.lexico_objectives["tolerances"][k_metric]
|
|
||||||
if not isinstance(
|
|
||||||
self.lexico_objectives["tolerances"][k_metric], str
|
|
||||||
)
|
|
||||||
else self._f_best[k_metric]
|
|
||||||
* (
|
|
||||||
1
|
|
||||||
+ 0.01
|
|
||||||
* float(
|
|
||||||
self.lexico_objectives["tolerances"][k_metric].replace(
|
|
||||||
"%", ""
|
|
||||||
)
|
|
||||||
)
|
|
||||||
),
|
|
||||||
self.lexico_objectives["targets"][k_metric],
|
self.lexico_objectives["targets"][k_metric],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
@ -417,47 +419,31 @@ class FLOW2(Searcher):
|
|||||||
if k_mode == "min"
|
if k_mode == "min"
|
||||||
else -self.lexico_objectives["targets"][k_metric]
|
else -self.lexico_objectives["targets"][k_metric]
|
||||||
)
|
)
|
||||||
if (
|
if not isinstance(self.lexico_objectives["tolerances"][k_metric], str):
|
||||||
result[k_metric]
|
tolerance_bound = (
|
||||||
< max(
|
self._f_best[k_metric]
|
||||||
[
|
+ self.lexico_objectives["tolerances"][k_metric]
|
||||||
self._f_best[k_metric]
|
|
||||||
+ self.lexico_objectives["tolerances"][k_metric]
|
|
||||||
if not isinstance(
|
|
||||||
self.lexico_objectives["tolerances"][k_metric], str
|
|
||||||
)
|
|
||||||
else self._f_best[k_metric]
|
|
||||||
* (
|
|
||||||
1
|
|
||||||
+ 0.01
|
|
||||||
* float(
|
|
||||||
self.lexico_objectives["tolerances"][
|
|
||||||
k_metric
|
|
||||||
].replace("%", "")
|
|
||||||
)
|
|
||||||
),
|
|
||||||
k_target,
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
) and (
|
else:
|
||||||
|
assert (
|
||||||
|
self.lexico_objectives["tolerances"][k_metric][-1] == "%"
|
||||||
|
), "String tolerance of {} should use %% as the suffix".format(
|
||||||
|
k_metric
|
||||||
|
)
|
||||||
|
tolerance_bound = self._f_best[k_metric] * (
|
||||||
|
1
|
||||||
|
+ 0.01
|
||||||
|
* float(
|
||||||
|
self.lexico_objectives["tolerances"][k_metric].replace(
|
||||||
|
"%", ""
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if (result[k_metric] < max([tolerance_bound, k_target])) and (
|
||||||
self.best_obj[k_metric]
|
self.best_obj[k_metric]
|
||||||
< max(
|
< max(
|
||||||
[
|
[
|
||||||
self._f_best[k_metric]
|
tolerance_bound,
|
||||||
+ self.lexico_objectives["tolerances"][k_metric]
|
|
||||||
if not isinstance(
|
|
||||||
self.lexico_objectives["tolerances"][k_metric], str
|
|
||||||
)
|
|
||||||
else self._f_best[k_metric]
|
|
||||||
* (
|
|
||||||
1
|
|
||||||
+ 0.01
|
|
||||||
* float(
|
|
||||||
self.lexico_objectives["tolerances"][
|
|
||||||
k_metric
|
|
||||||
].replace("%", "")
|
|
||||||
)
|
|
||||||
),
|
|
||||||
k_target,
|
k_target,
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|||||||
@ -32,7 +32,7 @@ def _BraninCurrin(config):
|
|||||||
return {"brain": brain_result, "currin": currin_result}
|
return {"brain": brain_result, "currin": currin_result}
|
||||||
|
|
||||||
|
|
||||||
def test_lexiflow(mode="absolute"):
|
def test_lexiflow():
|
||||||
train_dataset = torchvision.datasets.FashionMNIST(
|
train_dataset = torchvision.datasets.FashionMNIST(
|
||||||
"test/data",
|
"test/data",
|
||||||
train=True,
|
train=True,
|
||||||
@ -105,12 +105,6 @@ def test_lexiflow(mode="absolute"):
|
|||||||
|
|
||||||
lexico_objectives = {}
|
lexico_objectives = {}
|
||||||
lexico_objectives["metrics"] = ["error_rate", "flops"]
|
lexico_objectives["metrics"] = ["error_rate", "flops"]
|
||||||
if mode == "absolute":
|
|
||||||
lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0}
|
|
||||||
else:
|
|
||||||
lexico_objectives["tolerances"] = {"error_rate": "10%", "flops": "0%"}
|
|
||||||
lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0}
|
|
||||||
lexico_objectives["modes"] = ["min", "min"]
|
|
||||||
|
|
||||||
search_space = {
|
search_space = {
|
||||||
"n_layers": tune.randint(lower=1, upper=3),
|
"n_layers": tune.randint(lower=1, upper=3),
|
||||||
@ -132,7 +126,27 @@ def test_lexiflow(mode="absolute"):
|
|||||||
"n_epoch": 1,
|
"n_epoch": 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Non lexico tune
|
||||||
|
analysis = tune.run(
|
||||||
|
evaluate_function,
|
||||||
|
metric="error_rate",
|
||||||
|
mode="min",
|
||||||
|
num_samples=5,
|
||||||
|
config=search_space,
|
||||||
|
use_ray=False,
|
||||||
|
lexico_objectives=None,
|
||||||
|
low_cost_partial_config=low_cost_partial_config,
|
||||||
|
)
|
||||||
|
print(analysis.best_trial)
|
||||||
|
print(analysis.best_config)
|
||||||
|
print(analysis.best_result)
|
||||||
|
|
||||||
# lexico tune
|
# lexico tune
|
||||||
|
lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0}
|
||||||
|
lexico_objectives["modes"] = ["min", "min"]
|
||||||
|
|
||||||
|
# 1. lexico tune: absoute tune
|
||||||
|
lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0}
|
||||||
analysis = tune.run(
|
analysis = tune.run(
|
||||||
evaluate_function,
|
evaluate_function,
|
||||||
num_samples=5,
|
num_samples=5,
|
||||||
@ -145,15 +159,14 @@ def test_lexiflow(mode="absolute"):
|
|||||||
print(analysis.best_config)
|
print(analysis.best_config)
|
||||||
print(analysis.best_result)
|
print(analysis.best_result)
|
||||||
|
|
||||||
# Non lexico tune
|
# 2. lexico tune: percentage tolerance
|
||||||
|
lexico_objectives["tolerances"] = {"error_rate": "10%", "flops": "0%"}
|
||||||
analysis = tune.run(
|
analysis = tune.run(
|
||||||
evaluate_function,
|
evaluate_function,
|
||||||
metric="error_rate",
|
|
||||||
mode="min",
|
|
||||||
num_samples=5,
|
num_samples=5,
|
||||||
config=search_space,
|
config=search_space,
|
||||||
use_ray=False,
|
use_ray=False,
|
||||||
lexico_objectives=None,
|
lexico_objectives=lexico_objectives,
|
||||||
low_cost_partial_config=low_cost_partial_config,
|
low_cost_partial_config=low_cost_partial_config,
|
||||||
)
|
)
|
||||||
print(analysis.best_trial)
|
print(analysis.best_trial)
|
||||||
@ -191,6 +204,5 @@ def test_lexiflow_performance():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test_lexiflow(mode="absolute")
|
test_lexiflow()
|
||||||
test_lexiflow(mode="percentage")
|
|
||||||
test_lexiflow_performance()
|
test_lexiflow_performance()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user