autogen/test/automl/test_xgboost2d.py
Chi Wang 72caa2172d
model_history, ITER_HP, settings in AutoML(), checkpoint bug fix (#283)
if save_best_model_per_estimator is False and retrain_final is True, unfit the model after evaluation in HPO.
retrain if using ray.
update ITER_HP in config after a trial is finished.
change prophet logging level.
example and notebook update.
allow settings to be passed to AutoML constructor. Are you planning to add multi-output-regression capability to FLAML #192 Is multi-tasking allowed? #277 can pass the auotml setting to the constructor instead of requiring a derived class.
remove model_history.
checkpoint bug fix.

* model_history meaning save_best_model_per_estimator

* ITER_HP

* example update

* prophet logging level

* comment update in forecast notebook

* print format improvement

* allow settings to be passed to AutoML constructor

* checkpoint bug fix

* time limit for autohf regression test

* skip slow test on macos

* cleanup before del
2021-11-18 09:39:45 -08:00

99 lines
2.7 KiB
Python

import unittest
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from flaml.automl import AutoML
from flaml.model import XGBoostSklearnEstimator
from flaml import tune
dataset = "credit-g"
class XGBoost2D(XGBoostSklearnEstimator):
@classmethod
def search_space(cls, data_size, task):
upper = min(32768, int(data_size))
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"low_cost_init_value": 4,
},
"max_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"low_cost_init_value": 4,
},
}
def test_simple(method=None):
automl = AutoML()
automl.add_learner(learner_name="XGBoost2D", learner_class=XGBoost2D)
automl_settings = {
"estimator_list": ["XGBoost2D"],
"task": "classification",
"log_file_name": f"test/xgboost2d_{dataset}_{method}.log",
"n_jobs": 1,
"hpo_method": method,
"log_type": "all",
"retrain_full": "budget",
"keep_search_state": True,
"time_budget": 1,
}
from sklearn.externals._arff import ArffException
try:
X, y = fetch_openml(name=dataset, return_X_y=True)
except (ArffException, ValueError):
from sklearn.datasets import load_wine
X, y = load_wine(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
print(automl.estimator_list)
print(automl.search_space)
print(automl.points_to_evaluate)
config = automl.best_config.copy()
config["learner"] = automl.best_estimator
automl.trainable(config)
from flaml import tune
from flaml.automl import size
from functools import partial
analysis = tune.run(
automl.trainable,
automl.search_space,
metric="val_loss",
mode="min",
low_cost_partial_config=automl.low_cost_partial_config,
points_to_evaluate=automl.points_to_evaluate,
cat_hp_cost=automl.cat_hp_cost,
prune_attr=automl.prune_attr,
min_resource=automl.min_resource,
max_resource=automl.max_resource,
time_budget_s=automl._state.time_budget,
config_constraints=[(partial(size, automl._state), "<=", automl._mem_thres)],
metric_constraints=automl.metric_constraints,
num_samples=5,
)
print(analysis.trials[-1])
def test_optuna():
test_simple(method="optuna")
def test_random():
test_simple(method="random")
def test_grid():
test_simple(method="grid")
if __name__ == "__main__":
unittest.main()