mirror of
https://github.com/microsoft/autogen.git
synced 2025-09-25 16:16:37 +00:00
Merge branch 'microsoft:main' into main
This commit is contained in:
commit
cc3e9ae968
121
flaml/automl.py
121
flaml/automl.py
@ -237,7 +237,7 @@ class SearchState:
|
||||
|
||||
if self.base_eci is None:
|
||||
self.base_eci = time_used
|
||||
if (obj is not None) and (self.best_loss is None or obj < self.best_loss):
|
||||
if (obj is not None) and (obj < self.best_loss):
|
||||
self.best_loss_old = self.best_loss if self.best_loss < np.inf else 2 * obj
|
||||
self.best_loss = obj
|
||||
self.best_result = result
|
||||
@ -286,7 +286,7 @@ class AutoMLState:
|
||||
sampled_y_train = self.y_train[:sample_size]
|
||||
weight = self.fit_kwargs.get(
|
||||
"sample_weight"
|
||||
) # NOTE: _prepare_sample_train_data is before
|
||||
) # NOTE: _prepare_sample_train_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
if weight is not None:
|
||||
sampled_weight = weight[:sample_size]
|
||||
if self.groups is not None:
|
||||
@ -296,7 +296,7 @@ class AutoMLState:
|
||||
sampled_y_train = self.y_train_all
|
||||
if (
|
||||
"sample_weight" in self.fit_kwargs
|
||||
): # NOTE: _prepare_sample_train_data is before
|
||||
): # NOTE: _prepare_sample_train_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
sampled_weight = self.sample_weight_all
|
||||
if self.groups is not None:
|
||||
groups = self.groups_all
|
||||
@ -311,7 +311,7 @@ class AutoMLState:
|
||||
|
||||
this_estimator_kwargs = state.fit_kwargs_by_estimator.get(
|
||||
estimator
|
||||
).copy() # NOTE: _compute_with_config_base is after
|
||||
).copy() # NOTE: _compute_with_config_base is after kwargs is updated to fit_kwargs_by_estimator
|
||||
(
|
||||
sampled_X_train,
|
||||
sampled_y_train,
|
||||
@ -398,7 +398,7 @@ class AutoMLState:
|
||||
|
||||
this_estimator_kwargs = self.fit_kwargs_by_estimator.get(
|
||||
estimator
|
||||
).copy() # NOTE: _train_with_config is after
|
||||
).copy() # NOTE: _train_with_config is after kwargs is updated to fit_kwargs_by_estimator
|
||||
(
|
||||
sampled_X_train,
|
||||
sampled_y_train,
|
||||
@ -408,14 +408,14 @@ class AutoMLState:
|
||||
if sampled_weight is not None:
|
||||
weight = this_estimator_kwargs[
|
||||
"sample_weight"
|
||||
] # NOTE: _train_with_config is after
|
||||
] # NOTE: _train_with_config is after kwargs is updated to fit_kwargs_by_estimator
|
||||
this_estimator_kwargs[
|
||||
"sample_weight"
|
||||
] = sampled_weight # NOTE: _train_with_config is after
|
||||
] = sampled_weight # NOTE: _train_with_config is after kwargs is updated to fit_kwargs_by_estimator
|
||||
if groups is not None:
|
||||
this_estimator_kwargs[
|
||||
"groups"
|
||||
] = groups # NOTE: _train_with_config is after
|
||||
] = groups # NOTE: _train_with_config is after kwargs is updated to fit_kwargs_by_estimator
|
||||
|
||||
budget = (
|
||||
None
|
||||
@ -432,14 +432,14 @@ class AutoMLState:
|
||||
n_jobs=self.n_jobs,
|
||||
estimator_class=self.learner_classes.get(estimator),
|
||||
budget=budget,
|
||||
fit_kwargs=this_estimator_kwargs, # NOTE: _train_with_config is after
|
||||
fit_kwargs=this_estimator_kwargs, # NOTE: _train_with_config is after kwargs is updated to fit_kwargs_by_estimator
|
||||
eval_metric=self.metric if hasattr(self, "metric") else "train_time",
|
||||
)
|
||||
|
||||
if sampled_weight is not None:
|
||||
this_estimator_kwargs[
|
||||
"sample_weight"
|
||||
] = weight # NOTE: _train_with_config is after
|
||||
] = weight # NOTE: _train_with_config is after kwargs is updated to fit_kwargs_by_estimator
|
||||
|
||||
return estimator, train_time
|
||||
|
||||
@ -626,10 +626,12 @@ class AutoML(BaseEstimator):
|
||||
augment rare classes.
|
||||
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
|
||||
size when sample=True.
|
||||
use_ray: boolean, default=False | Whether to use ray to run the training
|
||||
use_ray: boolean or dict.
|
||||
If boolean: default=False | Whether to use ray to run the training
|
||||
in separate processes. This can be used to prevent OOM for large
|
||||
datasets, but will incur more overhead in time. Only use it if
|
||||
you run into OOM failures.
|
||||
datasets, but will incur more overhead in time.
|
||||
If dict: the dict contains the keywords arguments to be passed to
|
||||
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
|
||||
metric_constraints: list, default=[] | The list of metric constraints.
|
||||
Each element in this list is a 3-tuple, which shall be expressed
|
||||
in the following format: the first element of the 3-tuple is the name of the
|
||||
@ -1103,7 +1105,7 @@ class AutoML(BaseEstimator):
|
||||
|
||||
self._sample_weight_full = self._state.fit_kwargs.get(
|
||||
"sample_weight"
|
||||
) # NOTE: _validate_data is before,
|
||||
) # NOTE: _validate_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
if X_val is not None and y_val is not None:
|
||||
assert (
|
||||
isinstance(X_val, np.ndarray)
|
||||
@ -1164,7 +1166,7 @@ class AutoML(BaseEstimator):
|
||||
self._state.task in CLASSIFICATION
|
||||
and self._auto_augment
|
||||
and self._state.fit_kwargs.get("sample_weight")
|
||||
is None # NOTE: _prepare_data is before
|
||||
is None # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
and self._split_type in ["stratified", "uniform"]
|
||||
and self._state.task != TOKENCLASSIFICATION
|
||||
):
|
||||
@ -1208,7 +1210,9 @@ class AutoML(BaseEstimator):
|
||||
)
|
||||
self._state.fit_kwargs[
|
||||
"sample_weight"
|
||||
] = self._state.sample_weight_all # NOTE: _prepare_data is before
|
||||
] = (
|
||||
self._state.sample_weight_all
|
||||
) # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
else:
|
||||
X_train_all, y_train_all = shuffle(
|
||||
X_train_all, y_train_all, random_state=RANDOM_SEED
|
||||
@ -1227,7 +1231,7 @@ class AutoML(BaseEstimator):
|
||||
num_samples = X_train_all.shape[0]
|
||||
period = self._state.fit_kwargs[
|
||||
"period"
|
||||
] # NOTE: _prepare_data is before
|
||||
] # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
assert (
|
||||
period < num_samples
|
||||
), f"period={period}>#examples={num_samples}"
|
||||
@ -1239,7 +1243,7 @@ class AutoML(BaseEstimator):
|
||||
else:
|
||||
if (
|
||||
"sample_weight" in self._state.fit_kwargs
|
||||
): # NOTE: _prepare_data is before
|
||||
): # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
(
|
||||
X_train,
|
||||
X_val,
|
||||
@ -1247,14 +1251,14 @@ class AutoML(BaseEstimator):
|
||||
y_val,
|
||||
self._state.fit_kwargs[
|
||||
"sample_weight"
|
||||
], # NOTE: _prepare_data is before
|
||||
], # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
self._state.weight_val,
|
||||
) = train_test_split(
|
||||
X_train_all,
|
||||
y_train_all,
|
||||
self._state.fit_kwargs[
|
||||
"sample_weight"
|
||||
], # NOTE: _prepare_data is before
|
||||
], # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
test_size=split_ratio,
|
||||
shuffle=False,
|
||||
)
|
||||
@ -1297,7 +1301,7 @@ class AutoML(BaseEstimator):
|
||||
stratify = y_rest if self._split_type == "stratified" else None
|
||||
if (
|
||||
"sample_weight" in self._state.fit_kwargs
|
||||
): # NOTE: _prepare_data is before
|
||||
): # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
(
|
||||
X_train,
|
||||
X_val,
|
||||
@ -1310,17 +1314,17 @@ class AutoML(BaseEstimator):
|
||||
y_rest,
|
||||
self._state.fit_kwargs["sample_weight"][
|
||||
rest
|
||||
], # NOTE: _prepare_data is before
|
||||
], # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
test_size=split_ratio,
|
||||
random_state=RANDOM_SEED,
|
||||
)
|
||||
weight1 = self._state.fit_kwargs["sample_weight"][
|
||||
first
|
||||
] # NOTE: _prepare_data is before
|
||||
] # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
self._state.weight_val = concat(weight1, weight_val)
|
||||
self._state.fit_kwargs[
|
||||
"sample_weight"
|
||||
] = concat( # NOTE: _prepare_data is before
|
||||
] = concat( # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
weight1, weight_train
|
||||
)
|
||||
else:
|
||||
@ -1346,7 +1350,7 @@ class AutoML(BaseEstimator):
|
||||
elif self._state.task in REGRESSION:
|
||||
if (
|
||||
"sample_weight" in self._state.fit_kwargs
|
||||
): # NOTE: _prepare_data is before
|
||||
): # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
(
|
||||
X_train,
|
||||
X_val,
|
||||
@ -1354,14 +1358,14 @@ class AutoML(BaseEstimator):
|
||||
y_val,
|
||||
self._state.fit_kwargs[
|
||||
"sample_weight"
|
||||
], # NOTE: _prepare_data is before
|
||||
], # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
self._state.weight_val,
|
||||
) = train_test_split(
|
||||
X_train_all,
|
||||
y_train_all,
|
||||
self._state.fit_kwargs[
|
||||
"sample_weight"
|
||||
], # NOTE: _prepare_data is before
|
||||
], # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
test_size=split_ratio,
|
||||
random_state=RANDOM_SEED,
|
||||
)
|
||||
@ -1409,7 +1413,7 @@ class AutoML(BaseEstimator):
|
||||
if self._state.task in TS_FORECAST:
|
||||
period = self._state.fit_kwargs[
|
||||
"period"
|
||||
] # NOTE: _prepare_data is before
|
||||
] # NOTE: _prepare_data is before kwargs is updated to fit_kwargs_by_estimator
|
||||
if period * (n_splits + 1) > y_train_all.size:
|
||||
n_splits = int(y_train_all.size / period - 1)
|
||||
assert n_splits >= 2, (
|
||||
@ -1716,7 +1720,7 @@ class AutoML(BaseEstimator):
|
||||
|
||||
assert isinstance(
|
||||
self._state.fit_kwargs.get("period"),
|
||||
int, # NOTE: _decide_split_type is before
|
||||
int, # NOTE: _decide_split_type is before kwargs is updated to fit_kwargs_by_estimator
|
||||
), f"missing a required integer 'period' for '{TS_FORECAST}' task."
|
||||
elif self._state.task == "rank":
|
||||
assert (
|
||||
@ -1897,32 +1901,14 @@ class AutoML(BaseEstimator):
|
||||
@property
|
||||
def trainable(self) -> Callable[[dict], Optional[float]]:
|
||||
"""Training function.
|
||||
|
||||
Returns:
|
||||
A function that evaluates each config and returns the loss.
|
||||
"""
|
||||
self._state.time_from_start = 0
|
||||
for estimator in self.estimator_list:
|
||||
search_state = self._search_states[estimator]
|
||||
if not hasattr(search_state, "training_function"):
|
||||
if self._use_ray is not False:
|
||||
from ray.tune import with_parameters
|
||||
|
||||
search_state.training_function = with_parameters(
|
||||
AutoMLState._compute_with_config_base,
|
||||
state=self._state,
|
||||
estimator=estimator,
|
||||
)
|
||||
else:
|
||||
search_state.training_function = partial(
|
||||
AutoMLState._compute_with_config_base,
|
||||
state=self._state,
|
||||
estimator=estimator,
|
||||
)
|
||||
states = self._search_states
|
||||
mem_res = self._mem_thres
|
||||
|
||||
def train(config: dict):
|
||||
def train(config: dict, state):
|
||||
|
||||
sample_size = config.get("FLAML_sample_size")
|
||||
config = config.get("ml", config).copy()
|
||||
@ -1932,18 +1918,33 @@ class AutoML(BaseEstimator):
|
||||
# check memory constraints before training
|
||||
if states[estimator].learner_class.size(config) <= mem_res:
|
||||
del config["learner"]
|
||||
result = states[estimator].training_function(config)
|
||||
return result
|
||||
result = AutoMLState._compute_with_config_base(
|
||||
config, state=state, estimator=estimator
|
||||
)
|
||||
else:
|
||||
return {
|
||||
# If search algorithm is not in flaml, it does not handle the config constraint, should also tune.report before return
|
||||
result = {
|
||||
"pred_time": 0,
|
||||
"wall_clock_time": None,
|
||||
"metric_for_logging": np.inf,
|
||||
"val_loss": np.inf,
|
||||
"trained_estimator": None,
|
||||
}
|
||||
tune.report(**result)
|
||||
return result
|
||||
|
||||
return train
|
||||
if self._use_ray is not False:
|
||||
from ray.tune import with_parameters
|
||||
|
||||
return with_parameters(
|
||||
train,
|
||||
state=self._state,
|
||||
)
|
||||
else:
|
||||
return partial(
|
||||
train,
|
||||
state=self._state,
|
||||
)
|
||||
|
||||
@property
|
||||
def metric_constraints(self) -> list:
|
||||
@ -2180,10 +2181,12 @@ class AutoML(BaseEstimator):
|
||||
augment rare classes.
|
||||
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
|
||||
size when sample=True.
|
||||
use_ray: boolean or dict
|
||||
use_ray: boolean or dict.
|
||||
If boolean: default=False | Whether to use ray to run the training
|
||||
in separate processes. This can be used to prevent OOM for large
|
||||
datasets, but will incur more overhead in time.
|
||||
If dict: the dict contains the keywords arguments to be passed to
|
||||
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
|
||||
metric_constraints: list, default=[] | The list of metric constraints.
|
||||
Each element in this list is a 3-tuple, which shall be expressed
|
||||
in the following format: the first element of the 3-tuple is the name of the
|
||||
@ -2565,7 +2568,7 @@ class AutoML(BaseEstimator):
|
||||
this_estimator_kwargs = this_estimator_kwargs.copy()
|
||||
this_estimator_kwargs.update(
|
||||
self._state.fit_kwargs
|
||||
) # update the shallow copy
|
||||
) # update the shallow copy of fit_kwargs to fit_kwargs_by_estimator
|
||||
self._state.fit_kwargs_by_estimator[
|
||||
estimator_name
|
||||
] = this_estimator_kwargs # set self._state.fit_kwargs_by_estimator[estimator_name] to the update, so only self._state.fit_kwargs_by_estimator will be updated
|
||||
@ -2579,7 +2582,9 @@ class AutoML(BaseEstimator):
|
||||
data_size=self._state.data_size,
|
||||
task=self._state.task,
|
||||
starting_point=starting_points.get(estimator_name),
|
||||
period=self._state.fit_kwargs.get("period"), # NOTE: this is after
|
||||
period=self._state.fit_kwargs.get(
|
||||
"period"
|
||||
), # NOTE: this is after kwargs is updated to fit_kwargs_by_estimator
|
||||
custom_hp=custom_hp and custom_hp.get(estimator_name),
|
||||
max_iter=max_iter,
|
||||
)
|
||||
@ -2643,7 +2648,7 @@ class AutoML(BaseEstimator):
|
||||
self._sample_weight_full,
|
||||
self._state.fit_kwargs_by_estimator,
|
||||
self._state.fit_kwargs,
|
||||
) # NOTE: this is after
|
||||
) # NOTE: this is after kwargs is updated to fit_kwargs_by_estimator
|
||||
del self._state.groups, self._state.groups_all, self._state.groups_val
|
||||
logger.setLevel(old_level)
|
||||
|
||||
@ -3257,7 +3262,7 @@ class AutoML(BaseEstimator):
|
||||
stacker.fit(
|
||||
self._X_train_all,
|
||||
self._y_train_all,
|
||||
**sample_weight_dict, # NOTE: _search is after
|
||||
**sample_weight_dict, # NOTE: _search is after kwargs is updated to fit_kwargs_by_estimator
|
||||
)
|
||||
logger.info(f"ensemble: {stacker}")
|
||||
self._trained_estimator = stacker
|
||||
@ -3276,7 +3281,7 @@ class AutoML(BaseEstimator):
|
||||
stacker.fit(
|
||||
self._X_train_all,
|
||||
self._y_train_all,
|
||||
**sample_weight_dict, # NOTE: _search is after
|
||||
**sample_weight_dict, # NOTE: _search is after kwargs is updated to fit_kwargs_by_estimator
|
||||
)
|
||||
logger.info(f"ensemble: {stacker}")
|
||||
self._trained_estimator = stacker
|
||||
|
@ -420,7 +420,6 @@ def get_val_loss(
|
||||
# fit_kwargs['groups_val'] = groups_val
|
||||
# fit_kwargs['X_val'] = X_val
|
||||
# fit_kwargs['y_val'] = y_val
|
||||
|
||||
estimator.fit(X_train, y_train, budget, **fit_kwargs)
|
||||
val_loss, metric_for_logging, pred_time, _ = _eval_estimator(
|
||||
config,
|
||||
|
@ -83,6 +83,8 @@ def report(_metric=None, **kwargs):
|
||||
Raises:
|
||||
StopIteration (when not using ray, i.e., _use_ray=False):
|
||||
A StopIteration exception is raised if the trial has been signaled to stop.
|
||||
SystemExit (when using ray):
|
||||
A SystemExit exception is raised if the trial has been signaled to stop by ray.
|
||||
"""
|
||||
global _use_ray
|
||||
global _verbose
|
||||
@ -239,9 +241,11 @@ def run(
|
||||
respectively. You can also provide a self-defined scheduler instance
|
||||
of the TrialScheduler class. When 'asha' or self-defined scheduler is
|
||||
used, you usually need to report intermediate results in the evaluation
|
||||
function via 'tune.report()'. In addition, when 'use_ray' is not enabled,
|
||||
you also need to stop the evaluation function by explicitly catching the
|
||||
`StopIteration` exception, as shown in the following example.
|
||||
function via 'tune.report()'.
|
||||
If you would like to do some cleanup opearation when the trial is stopped
|
||||
by the scheduler, you can catch the `StopIteration` (when not using ray)
|
||||
or `SystemExit` (when using ray) exception explicitly,
|
||||
as shown in the following example.
|
||||
Please find more examples using different types of schedulers
|
||||
and how to set up the corresponding evaluation functions in
|
||||
test/tune/test_scheduler.py, and test/tune/example_scheduler.py.
|
||||
@ -252,7 +256,8 @@ def run(
|
||||
intermediate_score = evaluation_fn(step, width, height)
|
||||
try:
|
||||
tune.report(iterations=step, mean_loss=intermediate_score)
|
||||
except StopIteration:
|
||||
except (StopIteration, SystemExit):
|
||||
# do cleanup operation here
|
||||
return
|
||||
```
|
||||
search_alg: An instance of BlendSearch as the search algorithm
|
||||
|
@ -1 +1 @@
|
||||
__version__ = "1.0.3"
|
||||
__version__ = "1.0.5"
|
||||
|
553
notebook/zeroshot_lightgbm.ipynb
Normal file
553
notebook/zeroshot_lightgbm.ipynb
Normal file
@ -0,0 +1,553 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"Copyright (c) FLAML authors. All rights reserved. \n",
|
||||
"\n",
|
||||
"Licensed under the MIT License.\n",
|
||||
"\n",
|
||||
"# Zero-shot AutoML with FLAML\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Introduction\n",
|
||||
"\n",
|
||||
"In this notebook, we demonstrate a basic use case of zero-shot AutoML with FLAML.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml and openml:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U flaml openml;"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## What is zero-shot AutoML?\n",
|
||||
"\n",
|
||||
"Zero-shot automl means automl systems without expensive tuning. But it does adapt to data.\n",
|
||||
"A zero-shot automl system will recommend a data-dependent default configuration for a given dataset.\n",
|
||||
"\n",
|
||||
"Think about what happens when you use a `LGBMRegressor`. When you initialize a `LGBMRegressor` without any argument, it will set all the hyperparameters to the default values preset by the lightgbm library.\n",
|
||||
"There is no doubt that these default values have been carefully chosen by the library developers.\n",
|
||||
"But they are static. They are not adaptive to different datasets.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'boosting_type': 'gbdt', 'class_weight': None, 'colsample_bytree': 1.0, 'importance_type': 'split', 'learning_rate': 0.1, 'max_depth': -1, 'min_child_samples': 20, 'min_child_weight': 0.001, 'min_split_gain': 0.0, 'n_estimators': 100, 'n_jobs': -1, 'num_leaves': 31, 'objective': None, 'random_state': None, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'silent': 'warn', 'subsample': 1.0, 'subsample_for_bin': 200000, 'subsample_freq': 0}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from lightgbm import LGBMRegressor\n",
|
||||
"estimator = LGBMRegressor()\n",
|
||||
"print(estimator.get_params())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is unlikely that 100 trees with 31 leaves each is the best hyperparameter setting for every dataset.\n",
|
||||
"\n",
|
||||
"So, we propose to recommend data-dependent default configurations at runtime. \n",
|
||||
"All you need to do is to import the `LGBMRegressor` from flaml.default instead of from lightgbm.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from flaml.default import LGBMRegressor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Other parts of code remain the same. The new `LGBMRegressor` will automatically choose a configuration according to the training data.\n",
|
||||
"For different training data the configuration could be different.\n",
|
||||
"The recommended configuration can be either the same as the static default configuration from the library, or different.\n",
|
||||
"It is expected to be no worse than the static default configuration in most cases.\n",
|
||||
"\n",
|
||||
"For example, let's download [houses dataset](https://www.openml.org/d/537) from OpenML. The task is to predict median price of the house in the region based on demographic composition and a state of housing market in the region."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "subslide"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"load dataset from ./openml_ds537.pkl\n",
|
||||
"Dataset name: houses\n",
|
||||
"X_train.shape: (15480, 8), y_train.shape: (15480,);\n",
|
||||
"X_test.shape: (5160, 8), y_test.shape: (5160,)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from flaml.data import load_openml_dataset\n",
|
||||
"X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=537, data_dir='./')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" median_income housing_median_age ... latitude longitude\n",
|
||||
"19226 7.3003 19.0 ... 38.46 -122.68\n",
|
||||
"14549 5.9547 18.0 ... 32.95 -117.24\n",
|
||||
"9093 3.2125 19.0 ... 34.68 -118.27\n",
|
||||
"12213 6.9930 13.0 ... 33.51 -117.18\n",
|
||||
"12765 2.5162 21.0 ... 38.62 -121.41\n",
|
||||
"... ... ... ... ... ...\n",
|
||||
"13123 4.4125 20.0 ... 38.27 -121.26\n",
|
||||
"19648 2.9135 27.0 ... 37.48 -120.89\n",
|
||||
"9845 3.1977 31.0 ... 36.58 -121.90\n",
|
||||
"10799 5.6315 34.0 ... 33.62 -117.93\n",
|
||||
"2732 1.3882 15.0 ... 32.80 -115.56\n",
|
||||
"\n",
|
||||
"[15480 rows x 8 columns]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(X_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"We fit the `flaml.default.LGBMRegressor` on this dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'boosting_type': 'gbdt', 'class_weight': None, 'colsample_bytree': 0.7019911744574896, 'importance_type': 'split', 'learning_rate': 0.022635758411078528, 'max_depth': -1, 'min_child_samples': 2, 'min_child_weight': 0.001, 'min_split_gain': 0.0, 'n_estimators': 4797, 'n_jobs': -1, 'num_leaves': 122, 'objective': None, 'random_state': None, 'reg_alpha': 0.004252223402511765, 'reg_lambda': 0.11288241427227624, 'silent': 'warn', 'subsample': 1.0, 'subsample_for_bin': 200000, 'subsample_freq': 0, 'max_bin': 511, 'verbose': -1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"estimator = LGBMRegressor() # imported from flaml.default\n",
|
||||
"estimator.fit(X_train, y_train)\n",
|
||||
"print(estimator.get_params())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"The configuration is adapted as shown here. \n",
|
||||
"The number of trees is 4797, the number of leaves is 122.\n",
|
||||
"Does it work better than the static default configuration?\n",
|
||||
"Let’s compare.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0.8537444671194614"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"estimator.score(X_test, y_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The data-dependent configuration has a $r^2$ metric 0.8537 on the test data. What about static default configuration from lightgbm?"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0.8296179648694404"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from lightgbm import LGBMRegressor\n",
|
||||
"estimator = LGBMRegressor()\n",
|
||||
"estimator.fit(X_train, y_train)\n",
|
||||
"estimator.score(X_test, y_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The static default configuration gets $r^2=0.8296$, much lower than 0.8537 by the data-dependent configuration using `flaml.default`.\n",
|
||||
"Again, the only difference in the code is from where you import the `LGBMRegressor`.\n",
|
||||
"The adaptation to the training dataset is under the hood.\n",
|
||||
"\n",
|
||||
"You might wonder, how is it possible to find the data-dependent configuration without tuning?\n",
|
||||
"The answer is that,\n",
|
||||
"flaml can recommend good data-dependent default configurations at runtime without tuning only because it mines the hyperparameter configurations across different datasets offline as a preparation step.\n",
|
||||
"So basically, zero-shot automl shifts the tuning cost from online to offline.\n",
|
||||
"In the offline preparation stage, we applied `flaml.AutoML`.\n",
|
||||
"\n",
|
||||
"### Benefit of zero-shot AutoML\n",
|
||||
"Now, what is the benefit of zero-shot automl? Or what is the benefit of shifting tuning from online to offline?\n",
|
||||
"The first benefit is the online computational cost. That is the cost paid by the final consumers of automl. They only need to train one model.\n",
|
||||
"They get the hyperparameter configuration right away. There is no overhead to worry about.\n",
|
||||
"Another big benefit is that your code doesn’t need to change. So if you currently have a workflow without the setup for tuning, you can use zero-shot automl without breaking that workflow.\n",
|
||||
"Compared to tuning-based automl, zero-shot automl requires less input. For example, it doesn’t need a tuning budget, resampling strategy, validation dataset etc.\n",
|
||||
"A related benefit is that you don’t need to worry about holding a subset of the training data for validation, which the tuning process might overfit.\n",
|
||||
"As there is no tuning, you can use all the training data to train your model.\n",
|
||||
"Finally, you can customize the offline preparation for a domain, and leverage the past tuning experience for better adaptation to similar tasks.\n",
|
||||
"\n",
|
||||
"## How to use at runtime\n",
|
||||
"The easiest way to leverage this technique is to import a \"flamlized\" learner of your favorite choice and use it just as how you use the learner before. \n",
|
||||
"The automation is done behind the scene.\n",
|
||||
"The current list of “flamlized” learners are:\n",
|
||||
"* LGBMClassifier, LGBMRegressor (inheriting LGBMClassifier, LGBMRegressor from lightgbm)\n",
|
||||
"* XGBClassifier, XGBRegressor (inheriting LGBMClassifier, LGBMRegressor from xgboost)\n",
|
||||
"* RandomForestClassifier, RandomForestRegressor (inheriting from scikit-learn)\n",
|
||||
"* ExtraTreesClassifier, ExtraTreesRegressor (inheriting from scikit-learn)\n",
|
||||
"They work for classification or regression tasks.\n",
|
||||
"\n",
|
||||
"### What's the magic behind the scene?\n",
|
||||
"`flaml.default.LGBMRegressor` inherits `lightgbm.LGBMRegressor`, so all the methods and attributes in `lightgbm.LGBMRegressor` are still valid in `flaml.default.LGBMRegressor`.\n",
|
||||
"The difference is, `flaml.default.LGBMRegressor` decides the hyperparameter configurations based on the training data. It would use a different configuration if it is predicted to outperform the original data-independent default. If you inspect the params of the fitted estimator, you can find what configuration is used. If the original default configuration is used, then it is equivalent to the original estimator.\n",
|
||||
"The recommendation of which configuration should be used is based on offline AutoML run results. Information about the training dataset, such as the size of the dataset will be used to recommend a data-dependent configuration. The recommendation is done instantly in negligible time. The training can be faster or slower than using the original default configuration depending on the recommended configuration. \n",
|
||||
"\n",
|
||||
"### Can I check the configuration before training?\n",
|
||||
"Yes. You can use `suggest_hyperparams()` method to find the suggested configuration.\n",
|
||||
"For example, when you run the following code with the houses dataset, it will return the hyperparameter configuration instantly, without training the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'n_estimators': 4797, 'num_leaves': 122, 'min_child_samples': 2, 'learning_rate': 0.022635758411078528, 'colsample_bytree': 0.7019911744574896, 'reg_alpha': 0.004252223402511765, 'reg_lambda': 0.11288241427227624, 'max_bin': 511, 'verbose': -1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from flaml.default import LGBMRegressor\n",
|
||||
"\n",
|
||||
"estimator = LGBMRegressor()\n",
|
||||
"hyperparams, _, _, _ = estimator.suggest_hyperparams(X_train, y_train)\n",
|
||||
"print(hyperparams)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can print the configuration as a dictionary, in case you want to check it before you use it for training.\n",
|
||||
"\n",
|
||||
"This brings up an equivalent, open-box way for zero-shot AutoML if you would like more control over the training. \n",
|
||||
"Import the function `preprocess_and_suggest_hyperparams` from `flaml.default`.\n",
|
||||
"This function takes the task name, the training dataset, and the estimator name as input:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from flaml.default import preprocess_and_suggest_hyperparams\n",
|
||||
"(\n",
|
||||
" hyperparams,\n",
|
||||
" estimator_class,\n",
|
||||
" X_transformed,\n",
|
||||
" y_transformed,\n",
|
||||
" feature_transformer,\n",
|
||||
" label_transformer,\n",
|
||||
") = preprocess_and_suggest_hyperparams(\"regression\", X_train, y_train, \"lgbm\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It outputs the hyperparameter configurations, estimator class, transformed data, feature transformer and label transformer.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'lightgbm.sklearn.LGBMRegressor'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(estimator_class)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this case, the estimator name is “lgbm”. The corresponding estimator class is `lightgbm.LGBMRegressor`.\n",
|
||||
"This line initializes a LGBMClassifier with the recommended hyperparameter configuration:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = estimator_class(**hyperparams)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then we can fit the model on the transformed data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {
|
||||
"slideshow": {
|
||||
"slide_type": "slide"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LGBMRegressor(colsample_bytree=0.7019911744574896,\n",
|
||||
" learning_rate=0.022635758411078528, max_bin=511,\n",
|
||||
" min_child_samples=2, n_estimators=4797, num_leaves=122,\n",
|
||||
" reg_alpha=0.004252223402511765, reg_lambda=0.11288241427227624,\n",
|
||||
" verbose=-1)"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.fit(X_transformed, y_train)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The feature transformer needs to be applied to the test data before prediction."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"X_test_transformed = feature_transformer.transform(X_test)\n",
|
||||
"y_pred = model.predict(X_test_transformed)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"These are automated when you use the \"flamlized\" learner. So you don’t need to know these details when you don’t need to open the box.\n",
|
||||
"We demonstrate them here to help you understand what’s going on. And in case you need to modify some steps, you know what to do.\n",
|
||||
"\n",
|
||||
"(Note that some classifiers like XGBClassifier require the labels to be integers, while others do not. So you can decide whether to use the transformed labels y_transformed and the label transformer label_transformer. Also, each estimator may require specific preprocessing of the data.)\n",
|
||||
"\n",
|
||||
"## Combine Zero-shot AutoML and HPO\n",
|
||||
"\n",
|
||||
"Zero Shot AutoML is fast and simple to use. It is very useful if speed and simplicity are the primary concerns. \n",
|
||||
"If you are not satisfied with the accuracy of the zero shot model, you may want to spend extra time to tune the model.\n",
|
||||
"You can use `flaml.AutoML` to do that. Everything is the same as your normal `AutoML.fit()`, except to set `starting_points=\"data\"`.\n",
|
||||
"This tells AutoML to start the tuning from the data-dependent default configurations. You can set the tuning budget in the same way as before.\n",
|
||||
"Note that if you set `max_iter=0` and `time_budget=None`, you are effectively using zero-shot AutoML. \n",
|
||||
"When `estimator_list` is omitted, the most promising estimator together with its hyperparameter configuration will be tried first, which are both decided by zero-shot automl."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[flaml.automl: 05-31 22:54:25] {2373} INFO - task = regression\n",
|
||||
"[flaml.automl: 05-31 22:54:25] {2375} INFO - Data split method: uniform\n",
|
||||
"[flaml.automl: 05-31 22:54:25] {2379} INFO - Evaluation method: cv\n",
|
||||
"[flaml.automl: 05-31 22:54:25] {2448} INFO - Minimizing error metric: 1-r2\n",
|
||||
"[flaml.automl: 05-31 22:54:25] {2586} INFO - List of ML learners in AutoML Run: ['lgbm']\n",
|
||||
"[flaml.automl: 05-31 22:54:25] {2878} INFO - iteration 0, current learner lgbm\n",
|
||||
"[flaml.automl: 05-31 22:56:54] {3008} INFO - Estimated sufficient time budget=1490299s. Estimated necessary time budget=1490s.\n",
|
||||
"[flaml.automl: 05-31 22:56:54] {3055} INFO - at 149.1s,\testimator lgbm's best error=0.1513,\tbest estimator lgbm's best error=0.1513\n",
|
||||
"[flaml.automl: 05-31 22:56:54] {2878} INFO - iteration 1, current learner lgbm\n",
|
||||
"[flaml.automl: 05-31 22:59:24] {3055} INFO - at 299.0s,\testimator lgbm's best error=0.1513,\tbest estimator lgbm's best error=0.1513\n",
|
||||
"[flaml.automl: 05-31 22:59:24] {2878} INFO - iteration 2, current learner lgbm\n",
|
||||
"[flaml.automl: 05-31 23:01:34] {3055} INFO - at 429.1s,\testimator lgbm's best error=0.1513,\tbest estimator lgbm's best error=0.1513\n",
|
||||
"[flaml.automl: 05-31 23:01:34] {2878} INFO - iteration 3, current learner lgbm\n",
|
||||
"[flaml.automl: 05-31 23:04:43] {3055} INFO - at 618.2s,\testimator lgbm's best error=0.1513,\tbest estimator lgbm's best error=0.1513\n",
|
||||
"[flaml.automl: 05-31 23:05:14] {3315} INFO - retrain lgbm for 31.0s\n",
|
||||
"[flaml.automl: 05-31 23:05:14] {3322} INFO - retrained model: LGBMRegressor(colsample_bytree=0.7019911744574896,\n",
|
||||
" learning_rate=0.02263575841107852, max_bin=511,\n",
|
||||
" min_child_samples=2, n_estimators=4797, num_leaves=122,\n",
|
||||
" reg_alpha=0.004252223402511765, reg_lambda=0.11288241427227633,\n",
|
||||
" verbose=-1)\n",
|
||||
"[flaml.automl: 05-31 23:05:14] {2617} INFO - fit succeeded\n",
|
||||
"[flaml.automl: 05-31 23:05:14] {2618} INFO - Time taken to find the best model: 149.06516432762146\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from flaml import AutoML\n",
|
||||
"\n",
|
||||
"automl = AutoML()\n",
|
||||
"settings = {\n",
|
||||
" \"task\": \"regression\",\n",
|
||||
" \"starting_points\": \"data\",\n",
|
||||
" \"estimator_list\": [\"lgbm\"],\n",
|
||||
" \"time_budget\": 600,\n",
|
||||
"}\n",
|
||||
"automl.fit(X_train, y_train, **settings)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "949777d72b0d2535278d3dc13498b2535136f6dfe0678499012e853ee9abcab1"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.9 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
13
setup.py
13
setup.py
@ -33,6 +33,10 @@ setuptools.setup(
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/microsoft/FLAML",
|
||||
packages=setuptools.find_packages(include=["flaml*"]),
|
||||
package_data={
|
||||
"flaml.default": ["*/*.json"],
|
||||
},
|
||||
include_package_data=True,
|
||||
install_requires=install_requires,
|
||||
extras_require={
|
||||
"notebook": [
|
||||
@ -68,6 +72,7 @@ setuptools.setup(
|
||||
"blendsearch": ["optuna==2.8.0"],
|
||||
"ray": [
|
||||
"ray[tune]~=1.10",
|
||||
"protobuf<4", # to prevent TypeError in ray
|
||||
],
|
||||
"azureml": [
|
||||
"azureml-mlflow",
|
||||
@ -87,11 +92,17 @@ setuptools.setup(
|
||||
"rouge_score",
|
||||
],
|
||||
"ts_forecast": [
|
||||
"holidays<0.14", # to prevent installation error for prophet
|
||||
"prophet>=1.0.1",
|
||||
"statsmodels>=0.12.2",
|
||||
"hcrystalball==0.1.10",
|
||||
],
|
||||
"forecast": [
|
||||
"holidays<0.14", # to prevent installation error for prophet
|
||||
"prophet>=1.0.1",
|
||||
"statsmodels>=0.12.2",
|
||||
"hcrystalball==0.1.10",
|
||||
],
|
||||
"forecast": ["prophet>=1.0.1", "statsmodels>=0.12.2", "hcrystalball==0.1.10"],
|
||||
"benchmark": ["catboost>=0.26", "psutil==5.8.0", "xgboost==1.3.3"],
|
||||
},
|
||||
classifiers=[
|
||||
|
@ -99,7 +99,6 @@ class TestClassification(unittest.TestCase):
|
||||
"ensemble": True,
|
||||
}
|
||||
automl.fit(X, y, **automl_settings)
|
||||
assert automl.model is not None
|
||||
|
||||
automl = AutoML()
|
||||
try:
|
||||
|
@ -21,7 +21,7 @@ def test_metric_constraints():
|
||||
"log_type": "all",
|
||||
"retrain_full": "budget",
|
||||
"keep_search_state": True,
|
||||
"time_budget": 1,
|
||||
"time_budget": 2,
|
||||
"pred_time_limit": 5.1e-05,
|
||||
}
|
||||
from sklearn.externals._arff import ArffException
|
||||
|
@ -58,6 +58,8 @@ X_test.shape: (5160, 8), y_test.shape: (5160,)
|
||||
{'n_estimators': 4797, 'num_leaves': 122, 'min_child_samples': 2, 'learning_rate': 0.022635758411078528, 'colsample_bytree': 0.7019911744574896, 'reg_alpha': 0.004252223402511765, 'reg_lambda': 0.11288241427227624, 'max_bin': 511, 'verbose': -1}
|
||||
```
|
||||
|
||||
[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/zeroshot_lightgbm.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/zeroshot_lightgbm.ipynb)
|
||||
|
||||
## Flamlized XGBClassifier
|
||||
|
||||
### Zero-shot AutoML
|
||||
|
@ -5,7 +5,7 @@
|
||||
- Definition and purpose: The `low_cost_partial_config` is a dictionary of subset of the hyperparameter coordinates whose value corresponds to a configuration with known low-cost (i.e., low computation cost for training the corresponding model). The concept of low/high-cost is meaningful in the case where a subset of the hyperparameters to tune directly affects the computation cost for training the model. For example, `n_estimators` and `max_leaves` are known to affect the training cost of tree-based learners. We call this subset of hyperparameters, *cost-related hyperparameters*. In such scenarios, if you are aware of low-cost configurations for the cost-related hyperparameters, you are recommended to set them as the `low_cost_partial_config`. Using the tree-based method example again, since we know that small `n_estimators` and `max_leaves` generally correspond to simpler models and thus lower cost, we set `{'n_estimators': 4, 'max_leaves': 4}` as the `low_cost_partial_config` by default (note that `4` is the lower bound of search space for these two hyperparameters), e.g., in [LGBM](https://github.com/microsoft/FLAML/blob/main/flaml/model.py#L215). Configuring `low_cost_partial_config` helps the search algorithms make more cost-efficient choices.
|
||||
In AutoML, the `low_cost_init_value` in `search_space()` function for each estimator serves the same role.
|
||||
|
||||
- Usage in practice: It is recommended to configure it if there are cost-related hyperparameters in your tuning task and you happen to know the low-cost values for them, but it is not required( It is fine to leave it the default value, i.e., `None`).
|
||||
- Usage in practice: It is recommended to configure it if there are cost-related hyperparameters in your tuning task and you happen to know the low-cost values for them, but it is not required (It is fine to leave it the default value, i.e., `None`).
|
||||
|
||||
- How does it work: `low_cost_partial_config` if configured, will be used as an initial point of the search. It also affects the search trajectory. For more details about how does it play a role in the search algorithms, please refer to the papers about the search algorithms used: Section 2 of [Frugal Optimization for Cost-related Hyperparameters (CFO)](https://arxiv.org/pdf/2005.01571.pdf) and Section 3 of [Economical Hyperparameter Optimization with Blended Search Strategy (BlendSearch)](https://openreview.net/pdf?id=VbLH04pRA3).
|
||||
|
||||
@ -18,6 +18,37 @@ Currently FLAML does several things for imbalanced data.
|
||||
2. We use stratified sampling when doing holdout and kf.
|
||||
3. We make sure no class is empty in both training and holdout data.
|
||||
4. We allow users to pass `sample_weight` to `AutoML.fit()`.
|
||||
5. User can customize the weight of each class by setting the `custom_hp` or `fit_kwargs_by_estimator` arguments. For example, the following code sets the weight for pos vs. neg as 2:1 for the RandomForest estimator:
|
||||
|
||||
```python
|
||||
from flaml import AutoML
|
||||
from sklearn.datasets import load_iris
|
||||
|
||||
X_train, y_train = load_iris(return_X_y=True)
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
"time_budget": 2,
|
||||
"task": "classification",
|
||||
"log_file_name": "test/iris.log",
|
||||
"estimator_list": ["rf", "xgboost"],
|
||||
}
|
||||
|
||||
automl_settings["custom_hp"] = {
|
||||
"xgboost": {
|
||||
"scale_pos_weight": {
|
||||
"domain": 0.5,
|
||||
"init_value": 0.5,
|
||||
}
|
||||
},
|
||||
"rf": {
|
||||
"class_weight": {
|
||||
"domain": "balanced",
|
||||
"init_value": "balanced"
|
||||
}
|
||||
}
|
||||
}
|
||||
print(automl.model)
|
||||
```
|
||||
|
||||
|
||||
### How to interpret model performance? Is it possible for me to visualize feature importance, SHAP values, optimization history?
|
||||
|
@ -421,7 +421,29 @@ with mlflow.start_run():
|
||||
|
||||
### Extra fit arguments
|
||||
|
||||
Extra fit arguments that are needed by the estimators can be passed to `AutoML.fit()`. For example, if there is a weight associated with each training example, they can be passed via `sample_weight`. For another example, `period` can be passed for time series forecaster. For any extra keywork argument passed to `AutoML.fit()` which has not been explicitly listed in the function signature, it will be passed to the underlying estimators' `fit()` as is.
|
||||
Extra fit arguments that are needed by the estimators can be passed to `AutoML.fit()`. For example, if there is a weight associated with each training example, they can be passed via `sample_weight`. For another example, `period` can be passed for time series forecaster. For any extra keywork argument passed to `AutoML.fit()` which has not been explicitly listed in the function signature, it will be passed to the underlying estimators' `fit()` as is. For another example, you can set the number of gpus used by each trial with the `gpu_per_trial` argument, which is only used by TransformersEstimator and XGBoostSklearnEstimator.
|
||||
|
||||
In addition, you can specify the different arguments needed by different estimators using the `fit_kwargs_by_estimator` argument. For example, you can set the custom arguments for a Transformers model:
|
||||
|
||||
```python
|
||||
from flaml.data import load_openml_dataset
|
||||
from flaml import AutoML
|
||||
|
||||
X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=1169, data_dir="./")
|
||||
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
"task": "classification",
|
||||
"time_budget": 10,
|
||||
"estimator_list": ["catboost", "rf"],
|
||||
"fit_kwargs_by_estimator": {
|
||||
"catboost": {
|
||||
"verbose": True, # setting the verbosity of catboost to True
|
||||
}
|
||||
},
|
||||
}
|
||||
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
|
||||
```
|
||||
|
||||
## Retrieve and analyze the outcomes of AutoML.fit()
|
||||
|
||||
|
@ -353,7 +353,7 @@ tune.run(.., scheduler=my_scheduler, ...)
|
||||
|
||||
- Different from the case when the `flaml` scheduler is used, the amount of resources to use at each iteration is not suggested by the search algorithm through the `resource_attr` in a configuration. You need to specify the evaluation schedule explicitly by yourself in the `evaluation_function` and **report intermediate results (using `tune.report()`) accordingly**. In the following code example, we use the ASHA scheduler by setting `scheduler="asha"`. We specify `resource_attr`, `min_resource`, `min_resource` and `reduction_factor` the same way as in the previous example (when "flaml" is used as the scheduler). We perform the evaluation in a customized schedule.
|
||||
|
||||
- Use ray backend or not? You can choose to use ray backend or not by specifying `use_ray=True` or `use_ray=False`. When ray backend is not used, i.e., `use_ray=False`, you also need to stop the evaluation function by explicitly catching the `StopIteration` exception, as shown in the last two lines of the evaluation function `obj_w_intermediate_report()` in the following code example.
|
||||
- Use ray backend or not? You can choose to use ray backend or not by specifying `use_ray=True` or `use_ray=False`. When ray backend is not used, i.e., `use_ray=False`, you also need to stop the evaluation function by explicitly catching the `StopIteration` exception, as shown in the end of the evaluation function `obj_w_intermediate_report()` in the following code example.
|
||||
|
||||
```python
|
||||
def obj_w_intermediate_report(resource_attr, X_train, X_test, y_train, y_test, min_resource, max_resource, config):
|
||||
@ -375,7 +375,8 @@ def obj_w_intermediate_report(resource_attr, X_train, X_test, y_train, y_test, m
|
||||
# need to report the resource attribute used and the corresponding intermediate results
|
||||
try:
|
||||
tune.report(sample_size=resource, loss=test_loss)
|
||||
except StopIteration:
|
||||
except (StopIteration, SystemExit):
|
||||
# do cleanup operation here
|
||||
return
|
||||
|
||||
resource_attr = "sample_size"
|
||||
@ -399,6 +400,9 @@ analysis = tune.run(
|
||||
)
|
||||
```
|
||||
|
||||
- If you would like to do some cleanup opearation when the trial is stopped
|
||||
by the scheduler, you can do it when you catch the `StopIteration` (when not using ray) or `SystemExit` (when using ray) exception explicitly.
|
||||
|
||||
### Warm start
|
||||
|
||||
Related arguments:
|
||||
|
Loading…
x
Reference in New Issue
Block a user