mirror of
https://github.com/microsoft/autogen.git
synced 2025-09-27 17:20:08 +00:00
Add performance test for LexiFlow (#812)
* add test * fix * change test name
This commit is contained in:
parent
cb3402052f
commit
5eb9927642
@ -358,14 +358,14 @@ class FLOW2(Searcher):
|
|||||||
def update_fbest(
|
def update_fbest(
|
||||||
self,
|
self,
|
||||||
):
|
):
|
||||||
# TODO: Improve the efficiency
|
|
||||||
obj_initial = self.lexico_objectives["metrics"][0]
|
obj_initial = self.lexico_objectives["metrics"][0]
|
||||||
feasible_index = [*range(len(self._histories[obj_initial]))]
|
feasible_index = np.array([*range(len(self._histories[obj_initial]))])
|
||||||
for k_metric in self.lexico_objectives["metrics"]:
|
for k_metric in self.lexico_objectives["metrics"]:
|
||||||
k_values = np.array(self._histories[k_metric])
|
k_values = np.array(self._histories[k_metric])
|
||||||
self._f_best[k_metric] = np.min(k_values.take(feasible_index))
|
feasible_value = k_values.take(feasible_index)
|
||||||
feasible_index_prior = np.where(
|
self._f_best[k_metric] = np.min(feasible_value)
|
||||||
k_values
|
feasible_index_filter = np.where(
|
||||||
|
feasible_value
|
||||||
<= max(
|
<= max(
|
||||||
[
|
[
|
||||||
self._f_best[k_metric]
|
self._f_best[k_metric]
|
||||||
@ -373,10 +373,8 @@ class FLOW2(Searcher):
|
|||||||
self.lexico_objectives["targets"][k_metric],
|
self.lexico_objectives["targets"][k_metric],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
)[0].tolist()
|
)[0]
|
||||||
feasible_index = [
|
feasible_index = feasible_index.take(feasible_index_filter)
|
||||||
val for val in feasible_index if val in feasible_index_prior
|
|
||||||
]
|
|
||||||
|
|
||||||
def lexico_compare(self, result) -> bool:
|
def lexico_compare(self, result) -> bool:
|
||||||
if self._histories is None:
|
if self._histories is None:
|
||||||
|
@ -74,10 +74,10 @@ class ExperimentAnalysis(EA):
|
|||||||
histories[objective].append(
|
histories[objective].append(
|
||||||
results[keys[time_index]][objective]
|
results[keys[time_index]][objective]
|
||||||
if mode == "min"
|
if mode == "min"
|
||||||
else -trials[keys[time_index]][objective]
|
else -results[keys[time_index]][objective]
|
||||||
)
|
)
|
||||||
obj_initial = self.lexico_objectives["metrics"][0]
|
obj_initial = self.lexico_objectives["metrics"][0]
|
||||||
feasible_index = [*range(len(histories[obj_initial]))]
|
feasible_index = np.array([*range(len(histories[obj_initial]))])
|
||||||
for k_metric, k_mode in zip(
|
for k_metric, k_mode in zip(
|
||||||
self.lexico_objectives["metrics"], self.lexico_objectives["modes"]
|
self.lexico_objectives["metrics"], self.lexico_objectives["modes"]
|
||||||
):
|
):
|
||||||
@ -87,9 +87,10 @@ class ExperimentAnalysis(EA):
|
|||||||
if k_mode == "max"
|
if k_mode == "max"
|
||||||
else self.lexico_objectives["targets"][k_metric]
|
else self.lexico_objectives["targets"][k_metric]
|
||||||
)
|
)
|
||||||
f_best[k_metric] = np.min(k_values.take(feasible_index))
|
feasible_value = k_values.take(feasible_index)
|
||||||
feasible_index_prior = np.where(
|
f_best[k_metric] = np.min(feasible_value)
|
||||||
k_values
|
feasible_index_filter = np.where(
|
||||||
|
feasible_value
|
||||||
<= max(
|
<= max(
|
||||||
[
|
[
|
||||||
f_best[k_metric]
|
f_best[k_metric]
|
||||||
@ -97,10 +98,8 @@ class ExperimentAnalysis(EA):
|
|||||||
k_target,
|
k_target,
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
)[0].tolist()
|
)[0]
|
||||||
feasible_index = [
|
feasible_index = feasible_index.take(feasible_index_filter)
|
||||||
val for val in feasible_index if val in feasible_index_prior
|
|
||||||
]
|
|
||||||
best_trial = trials[feasible_index[-1]]
|
best_trial = trials[feasible_index[-1]]
|
||||||
return best_trial
|
return best_trial
|
||||||
|
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
import torch
|
import torch
|
||||||
import thop
|
import thop
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
from flaml import tune
|
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
import torchvision
|
import torchvision
|
||||||
|
from flaml import tune
|
||||||
|
from collections import defaultdict
|
||||||
|
import math
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
DEVICE = torch.device("cpu")
|
DEVICE = torch.device("cpu")
|
||||||
@ -12,6 +14,24 @@ N_TRAIN_EXAMPLES = BATCHSIZE * 30
|
|||||||
N_VALID_EXAMPLES = BATCHSIZE * 10
|
N_VALID_EXAMPLES = BATCHSIZE * 10
|
||||||
|
|
||||||
|
|
||||||
|
def _BraninCurrin(config):
|
||||||
|
# Rescale brain
|
||||||
|
x_1 = 15 * config["x1"] - 5
|
||||||
|
x_2 = 15 * config["x2"]
|
||||||
|
# Brain function
|
||||||
|
t1 = x_2 - 5.1 / (4 * math.pi**2) * x_1**2 + 5 / math.pi * x_1 - 6
|
||||||
|
t2 = 10 * (1 - 1 / (8 * math.pi)) * math.cos(x_1)
|
||||||
|
brain_result = t1**2 + t2 + 10
|
||||||
|
# Currin function
|
||||||
|
xc_1 = config["x1"]
|
||||||
|
xc_2 = config["x2"]
|
||||||
|
factor1 = 1 - math.exp(-1 / (2 * xc_2))
|
||||||
|
numer = 2300 * pow(xc_1, 3) + 1900 * pow(xc_1, 2) + 2092 * xc_1 + 60
|
||||||
|
denom = 100 * pow(xc_1, 3) + 500 * pow(xc_1, 2) + 4 * xc_1 + 20
|
||||||
|
currin_result = factor1 * numer / denom
|
||||||
|
return {"brain": brain_result, "currin": currin_result}
|
||||||
|
|
||||||
|
|
||||||
def test_lexiflow():
|
def test_lexiflow():
|
||||||
train_dataset = torchvision.datasets.FashionMNIST(
|
train_dataset = torchvision.datasets.FashionMNIST(
|
||||||
"test/data",
|
"test/data",
|
||||||
@ -138,5 +158,35 @@ def test_lexiflow():
|
|||||||
print(analysis.best_result)
|
print(analysis.best_result)
|
||||||
|
|
||||||
|
|
||||||
|
def test_lexiflow_performance():
|
||||||
|
lexico_objectives = {}
|
||||||
|
lexico_objectives["metrics"] = ["brain", "currin"]
|
||||||
|
lexico_objectives["tolerances"] = {"brain": 10.0, "currin": 0.0}
|
||||||
|
lexico_objectives["targets"] = {"brain": 0.0, "currin": 0.0}
|
||||||
|
lexico_objectives["modes"] = ["min", "min"]
|
||||||
|
|
||||||
|
search_space = {
|
||||||
|
"x1": tune.uniform(lower=0.000001, upper=1.0),
|
||||||
|
"x2": tune.uniform(lower=0.000001, upper=1.0),
|
||||||
|
}
|
||||||
|
|
||||||
|
analysis = tune.run(
|
||||||
|
_BraninCurrin,
|
||||||
|
num_samples=1000,
|
||||||
|
config=search_space,
|
||||||
|
use_ray=False,
|
||||||
|
lexico_objectives=lexico_objectives,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(analysis.best_trial)
|
||||||
|
print(analysis.best_config)
|
||||||
|
print(analysis.best_result)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
analysis.best_result["currin"] <= 2.2
|
||||||
|
), "the value of currin function should be less than 2.2"
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test_lexiflow()
|
test_lexiflow()
|
||||||
|
test_lexiflow_performance()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user