This commit is contained in:
Anonymous-submission-repo 2022-10-12 04:31:51 +00:00
parent ecca161848
commit 2d18c49cdd
12 changed files with 290 additions and 100 deletions

View File

@ -2406,16 +2406,19 @@ class AutoML(BaseEstimator):
skip_transform: boolean, default=False | Whether to pre-process data prior to modeling. skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
lexico_objectives: A dictionary with four elements. lexico_objectives: A dictionary with four elements.
It specifics the information used for multiple objectives optimization with lexicographic preference. It specifics the information used for multiple objectives optimization with lexicographic preference.
e.g.,```lexico_objectives = {"metrics":["error_rate","pred_time"], "modes":["min","min"], e.g.,
"tolerances":{"error_rate":0.01,"pred_time":0.0}, "targets":{"error_rate":0.0,"pred_time":0.0}}``` ```python
lexico_objectives = {"metrics":["error_rate","pred_time"], "modes":["min","min"],
"tolerances":{"error_rate":0.01,"pred_time":0.0}, "targets":{"error_rate":0.0,"pred_time":0.0}}
```
Either "metrics" or "modes" is a list of str.
It represents the optimization objectives, the objective as minimization or maximization respectively.
Both "metrics" and "modes" are ordered by priorities from high to low.
"tolerances" is a dictionary to specify the optimality tolerance of each objective.
"targets" is a dictionary to specify the optimization targets for each objective.
If providing lexico_objectives, the arguments metric, hpo_method will be invalid.
Either "metrics" or "modes" is a list of str.
It represents the optimization objectives, the objective as minimization or maximization respectively.
Both "metrics" and "modes" are ordered by priorities from high to low.
"tolerances" is a dictionary to specify the optimality tolerance of each objective.
"targets" is a dictionary to specify the optimization targets for each objective.
If providing lexico_objectives, the arguments metric, hpo_method will be invalid.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name. fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
For TransformersEstimator, available fit_kwargs can be found from For TransformersEstimator, available fit_kwargs can be found from
@ -2520,6 +2523,10 @@ class AutoML(BaseEstimator):
if lexico_objectives is None: if lexico_objectives is None:
hpo_method = hpo_method or self._settings.get("hpo_method") hpo_method = hpo_method or self._settings.get("hpo_method")
else: else:
if hpo_method != "cfo":
logger.warning(
"If lexico_objectives is not None, hpo_method is forced to be cfo"
)
hpo_method = "cfo" hpo_method = "cfo"
learner_selector = learner_selector or self._settings.get("learner_selector") learner_selector = learner_selector or self._settings.get("learner_selector")

View File

@ -17,11 +17,8 @@
# Copyright (c) Microsoft Corporation. # Copyright (c) Microsoft Corporation.
from typing import Dict, Optional from typing import Dict, Optional
import numpy as np import numpy as np
from flaml.tune import result
from .trial import Trial from .trial import Trial
from collections import defaultdict from collections import defaultdict
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -91,42 +88,6 @@ class ExperimentAnalysis:
raise ValueError("If set, `mode` has to be one of [min, max]") raise ValueError("If set, `mode` has to be one of [min, max]")
return mode or self.default_mode return mode or self.default_mode
def lexico_best(self, trials):
results = {index: trial.last_result for index, trial in enumerate(trials)}
metrics = self.lexico_objectives["metrics"]
modes = self.lexico_objectives["modes"]
f_best = {}
keys = list(results.keys())
length = len(keys)
histories = defaultdict(list)
for time_index in range(length):
for objective, mode in zip(metrics, modes):
histories[objective].append(
results[keys[time_index]][objective]
if mode == "min"
else trials[keys[time_index]][objective] * -1
)
obj_initial = self.lexico_objectives["metrics"][0]
feasible_index = [*range(len(histories[obj_initial]))]
for k_metric in self.lexico_objectives["metrics"]:
k_values = np.array(histories[k_metric])
f_best[k_metric] = np.min(k_values.take(feasible_index))
feasible_index_prior = np.where(
k_values
<= max(
[
f_best[k_metric]
+ self.lexico_objectives["tolerances"][k_metric],
self.lexico_objectives["targets"][k_metric],
]
)
)[0].tolist()
feasible_index = [
val for val in feasible_index if val in feasible_index_prior
]
best_trial = trials[feasible_index[-1]]
return best_trial
def get_best_trial( def get_best_trial(
self, self,
metric: Optional[str] = None, metric: Optional[str] = None,
@ -158,9 +119,6 @@ class ExperimentAnalysis:
values are disregarded and these trials are never selected as values are disregarded and these trials are never selected as
the best trial. the best trial.
""" """
if self.lexico_objectives is not None:
best_trial = self.lexico_best(self.trials)
return best_trial
metric = self._validate_metric(metric) metric = self._validate_metric(metric)
mode = self._validate_mode(mode) mode = self._validate_mode(mode)
if scope not in ["all", "last", "avg", "last-5-avg", "last-10-avg"]: if scope not in ["all", "last", "avg", "last-5-avg", "last-10-avg"]:

View File

@ -113,6 +113,19 @@ class BlendSearch(Searcher):
Default is "auto", which means that we will automatically chose the cost attribute to use (depending Default is "auto", which means that we will automatically chose the cost attribute to use (depending
on the nature of the resource budget). When cost_attr is set to None, cost differences between different trials will be omitted on the nature of the resource budget). When cost_attr is set to None, cost differences between different trials will be omitted
in our search algorithm. in our search algorithm.
lexico_objectives: A dictionary with four elements.
It specifics the information used for multiple objectives optimization with lexicographic preference.
e.g.,
```python
lexico_objectives = {"metrics":["error_rate","pred_time"], "modes":["min","min"],
"tolerances":{"error_rate":0.01,"pred_time":0.0}, "targets":{"error_rate":0.0,"pred_time":0.0}}
```
Either "metrics" or "modes" is a list of str.
It represents the optimization objectives, the objective as minimization or maximization respectively.
Both "metrics" and "modes" are ordered by priorities from high to low.
"tolerances" is a dictionary to specify the optimality tolerance of each objective.
"targets" is a dictionary to specify the optimization targets for each objective.
If providing lexico_objectives, the arguments metric, mode will be invalid.
experimental: A bool of whether to use experimental features. experimental: A bool of whether to use experimental features.
""" """
self._eps = SEARCH_THREAD_EPS self._eps = SEARCH_THREAD_EPS

View File

@ -70,6 +70,19 @@ class FLOW2(Searcher):
resource_multiple_factor: A float of the multiplicative factor resource_multiple_factor: A float of the multiplicative factor
used for increasing resource. used for increasing resource.
cost_attr: A string of the attribute used for cost. cost_attr: A string of the attribute used for cost.
lexico_objectives: A dictionary with four elements.
It specifics the information used for multiple objectives optimization with lexicographic preference.
e.g.,
```python
lexico_objectives = {"metrics":["error_rate","pred_time"], "modes":["min","min"],
"tolerances":{"error_rate":0.01,"pred_time":0.0}, "targets":{"error_rate":0.0,"pred_time":0.0}}
```
Either "metrics" or "modes" is a list of str.
It represents the optimization objectives, the objective as minimization or maximization respectively.
Both "metrics" and "modes" are ordered by priorities from high to low.
"tolerances" is a dictionary to specify the optimality tolerance of each objective.
"targets" is a dictionary to specify the optimization targets for each objective.
If providing lexico_objectives, the arguments metric, mode will be invalid.
seed: An integer of the random seed. seed: An integer of the random seed.
""" """
if mode: if mode:

View File

@ -141,6 +141,8 @@ class SearchThread:
not hasattr(self._search_alg, "lexico_objectives") not hasattr(self._search_alg, "lexico_objectives")
or self._search_alg.lexico_objectives is None or self._search_alg.lexico_objectives is None
): ):
# TODO: Improve this behavior. When lexico_objectives is provided to CFO,
# related variables are not callable.
obj = result[self._search_alg.metric] * self._metric_op obj = result[self._search_alg.metric] * self._metric_op
if obj < self.obj_best1 or self.best_result is None: if obj < self.obj_best1 or self.best_result is None:
self.cost_best2 = self.cost_best1 self.cost_best2 = self.cost_best1
@ -153,6 +155,8 @@ class SearchThread:
not hasattr(self._search_alg, "lexico_objectives") not hasattr(self._search_alg, "lexico_objectives")
or self._search_alg.lexico_objectives is None or self._search_alg.lexico_objectives is None
): ):
# TODO: Improve this behavior. When lexico_objectives is provided to CFO,
# related variables are not callable.
self._update_speed() self._update_speed()
self.running -= 1 self.running -= 1
assert self.running >= 0 assert self.running >= 0

View File

@ -2,7 +2,7 @@
# * Copyright (c) FLAML authors. All rights reserved. # * Copyright (c) FLAML authors. All rights reserved.
# * Licensed under the MIT License. See LICENSE file in the # * Licensed under the MIT License. See LICENSE file in the
# * project root for license information. # * project root for license information.
from typing import Optional, Union, List, Callable, Tuple from typing import Optional, Union, List, Callable, Tuple, Dict
import numpy as np import numpy as np
import datetime import datetime
import time import time
@ -34,22 +34,32 @@ _training_iteration = 0
INCUMBENT_RESULT = "__incumbent_result__" INCUMBENT_RESULT = "__incumbent_result__"
def is_nan_or_inf(value):
return np.isnan(value) or np.isinf(value)
class ExperimentAnalysis(EA): class ExperimentAnalysis(EA):
"""Class for storing the experiment results.""" """Class for storing the experiment results."""
def __init__(self, trials, metric, mode, lexico_objectives): def __init__(self, trials, metric, mode, lexico_objectives=None):
try: try:
super().__init__(self, None, trials, metric, mode) super().__init__(self, None, trials, metric, mode)
except (TypeError, ValueError): except (TypeError, ValueError):
self.trials = trials self.trials = trials
self.default_metric = metric or DEFAULT_METRIC self.default_metric = metric or DEFAULT_METRIC
self.default_mode = mode or DEFAULT_MODE self.default_mode = mode
self.lexico_objectives = lexico_objectives self.lexico_objectives = lexico_objectives
@property
def best_trial(self) -> Trial:
if self.lexico_objectives is None:
return super().best_trial
else:
return self.get_best_trial(self.default_metric, self.default_mode)
@property
def best_config(self) -> Dict:
if self.lexico_objectives is None:
return super().best_config
else:
return self.get_best_config(self.default_metric, self.default_mode)
def lexico_best(self, trials): def lexico_best(self, trials):
results = {index: trial.last_result for index, trial in enumerate(trials)} results = {index: trial.last_result for index, trial in enumerate(trials)}
metrics = self.lexico_objectives["metrics"] metrics = self.lexico_objectives["metrics"]
@ -99,6 +109,13 @@ class ExperimentAnalysis(EA):
best_trial = super().get_best_trial(metric, mode, scope, filter_nan_and_inf) best_trial = super().get_best_trial(metric, mode, scope, filter_nan_and_inf)
return best_trial return best_trial
@property
def best_result(self) -> Dict:
if self.lexico_best is None:
return super().best_result
else:
return self.best_trial.last_result
def report(_metric=None, **kwargs): def report(_metric=None, **kwargs):
@ -357,16 +374,18 @@ def run(
a trial before the tuning is terminated. a trial before the tuning is terminated.
use_ray: A boolean of whether to use ray as the backend. use_ray: A boolean of whether to use ray as the backend.
lexico_objectives: A dictionary with four elements. lexico_objectives: A dictionary with four elements.
It specifics the information used for multiple objectives optimization with lexicographic preference. It specifics the information used for multiple objectives optimization with lexicographic preference.
e.g.,```lexico_objectives = {"metrics":["error_rate","pred_time"], "modes":["min","min"], e.g.,
"tolerances":{"error_rate":0.01,"pred_time":0.0}, "targets":{"error_rate":0.0,"pred_time":0.0}}``` ```python
lexico_objectives = {"metrics":["error_rate","pred_time"], "modes":["min","min"],
Either "metrics" or "modes" is a list of str. "tolerances":{"error_rate":0.01,"pred_time":0.0}, "targets":{"error_rate":0.0,"pred_time":0.0}}
It represents the optimization objectives, the objective as minimization or maximization respectively. ```
Both "metrics" and "modes" are ordered by priorities from high to low. Either "metrics" or "modes" is a list of str.
"tolerances" is a dictionary to specify the optimality tolerance of each objective. It represents the optimization objectives, the objective as minimization or maximization respectively.
"targets" is a dictionary to specify the optimization targets for each objective. Both "metrics" and "modes" are ordered by priorities from high to low.
If providing lexico_objectives, the arguments metric, mode, and search_alg will be invalid. "tolerances" is a dictionary to specify the optimality tolerance of each objective.
"targets" is a dictionary to specify the optimization targets for each objective.
If providing lexico_objectives, the arguments metric, mode, and search_alg will be invalid.
log_file_name: A string of the log file name. Default to None. log_file_name: A string of the log file name. Default to None.
When set to None: When set to None:

View File

@ -243,7 +243,7 @@
"\n", "\n",
"analysis = tune.run(\n", "analysis = tune.run(\n",
" evaluate_function,\n", " evaluate_function,\n",
" num_samples=100000000,\n", " num_samples=-1,\n",
" time_budget_s=100,\n", " time_budget_s=100,\n",
" config=search_space,\n", " config=search_space,\n",
" use_ray=False,\n", " use_ray=False,\n",

View File

@ -2,7 +2,7 @@ from flaml import AutoML
from flaml.data import load_openml_dataset from flaml.data import load_openml_dataset
def _test_lexiflow(): def test_lexiflow():
X_train, X_test, y_train, y_test = load_openml_dataset( X_train, X_test, y_train, y_test = load_openml_dataset(
dataset_id=179, data_dir="test/data" dataset_id=179, data_dir="test/data"
@ -13,28 +13,18 @@ def _test_lexiflow():
lexico_objectives["tolerances"] = {"val_loss": 0.01, "pred_time": 0.0} lexico_objectives["tolerances"] = {"val_loss": 0.01, "pred_time": 0.0}
lexico_objectives["targets"] = {"val_loss": 0.0, "pred_time": 0.0} lexico_objectives["targets"] = {"val_loss": 0.0, "pred_time": 0.0}
lexico_objectives["modes"] = ["min", "min"] lexico_objectives["modes"] = ["min", "min"]
automl = AutoML() automl = AutoML()
settings = { settings = {
"time_budget": 100, "time_budget": 100,
"lexico_objectives": lexico_objectives, "lexico_objectives": lexico_objectives,
"estimator_list": ["xgboost"], "use_ray": False,
"use_ray": True,
"task": "classification", "task": "classification",
"max_iter": 10000000, "max_iter": -1,
"train_time_limit": 60,
"verbose": 0,
"eval_method": "holdout",
"mem_thres": 128 * (1024**3), "mem_thres": 128 * (1024**3),
"seed": 1,
} }
automl.fit(X_train=X_train, y_train=y_train, X_val=X_test, y_val=y_test, **settings) automl.fit(X_train=X_train, y_train=y_train, X_val=X_test, y_val=y_test, **settings)
print(automl.predict(X_train))
print(automl.model)
print(automl.config_history)
print(automl.best_iteration)
print(automl.best_estimator)
if __name__ == "__main__": if __name__ == "__main__":
_test_lexiflow() test_lexiflow()

View File

@ -12,7 +12,7 @@ N_TRAIN_EXAMPLES = BATCHSIZE * 30
N_VALID_EXAMPLES = BATCHSIZE * 10 N_VALID_EXAMPLES = BATCHSIZE * 10
def _test_lexiflow(): def test_lexiflow():
train_dataset = torchvision.datasets.FashionMNIST( train_dataset = torchvision.datasets.FashionMNIST(
"test/data", "test/data",
train=True, train=True,
@ -109,18 +109,16 @@ def _test_lexiflow():
"n_epoch": 1, "n_epoch": 1,
} }
analysis = tune.run( tune.run(
evaluate_function, evaluate_function,
num_samples=100000000, num_samples=-1,
time_budget_s=100, time_budget_s=100,
config=search_space, config=search_space,
use_ray=False, use_ray=False,
lexico_objectives=lexico_objectives, lexico_objectives=lexico_objectives,
low_cost_partial_config=low_cost_partial_config, low_cost_partial_config=low_cost_partial_config,
) )
result = analysis.best_result
print(result)
if __name__ == "__main__": if __name__ == "__main__":
_test_lexiflow() test_lexiflow()

View File

@ -0,0 +1,165 @@
# Tune - Lexicographic Objectives
## Requirements
```python
pip install thop torchvision torch
```
## Tuning accurate and efficient neural networks with lexicographic preference
### Data
```python
import torch
import thop
import torch.nn as nn
from flaml import tune
import torch.nn.functional as F
import torchvision
import numpy as np
import os
DEVICE = torch.device("cpu")
BATCHSIZE = 128
N_TRAIN_EXAMPLES = BATCHSIZE * 30
N_VALID_EXAMPLES = BATCHSIZE * 10
data_dir = os.path.abspath("data")
train_dataset = torchvision.datasets.FashionMNIST(
data_dir,
train=True,
download=True,
transform=torchvision.transforms.ToTensor(),
)
train_loader = torch.utils.data.DataLoader(
torch.utils.data.Subset(train_dataset, list(range(N_TRAIN_EXAMPLES))),
batch_size=BATCHSIZE,
shuffle=True,
)
val_dataset = torchvision.datasets.FashionMNIST(
data_dir, train=False, transform=torchvision.transforms.ToTensor()
)
val_loader = torch.utils.data.DataLoader(
torch.utils.data.Subset(val_dataset, list(range(N_VALID_EXAMPLES))),
batch_size=BATCHSIZE,
shuffle=True,
```
### Specific the model
```python
def define_model(configuration):
n_layers = configuration["n_layers"]
layers = []
in_features = 28 * 28
for i in range(n_layers):
out_features = configuration["n_units_l{}".format(i)]
layers.append(nn.Linear(in_features, out_features))
layers.append(nn.ReLU())
p = configuration["dropout_{}".format(i)]
layers.append(nn.Dropout(p))
in_features = out_features
layers.append(nn.Linear(in_features, 10))
layers.append(nn.LogSoftmax(dim=1))
return nn.Sequential(*layers)
```
### Train
```python
def train_model(model, optimizer, train_loader):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)
optimizer.zero_grad()
F.nll_loss(model(data), target).backward()
optimizer.step()
```
### Metrics
```python
def eval_model(model, valid_loader):
model.eval()
correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(valid_loader):
data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)
pred = model(data).argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
accuracy = correct / N_VALID_EXAMPLES
flops, params = thop.profile(
model, inputs=(torch.randn(1, 28 * 28).to(DEVICE),), verbose=False
)
return np.log2(flops), 1 - accuracy, params
```
### Evaluation function
```python
def evaluate_function(configuration):
model = define_model(configuration).to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), configuration["lr"])
n_epoch = configuration["n_epoch"]
for epoch in range(n_epoch):
train_model(model, optimizer, train_loader)
flops, error_rate, params = eval_model(model, val_loader)
return {"error_rate": error_rate, "flops": flops, "params": params}
```
### Search space
```python
search_space = {
"n_layers": tune.randint(lower=1, upper=3),
"n_units_l0": tune.randint(lower=4, upper=128),
"n_units_l1": tune.randint(lower=4, upper=128),
"n_units_l2": tune.randint(lower=4, upper=128),
"dropout_0": tune.uniform(lower=0.2, upper=0.5),
"dropout_1": tune.uniform(lower=0.2, upper=0.5),
"dropout_2": tune.uniform(lower=0.2, upper=0.5),
"lr": tune.loguniform(lower=1e-5, upper=1e-1),
"n_epoch": tune.randint(lower=1, upper=20),
}
```
### Launch the tuning process
```python
# Low cost initial point
low_cost_partial_config = {
"n_layers": 1,
"n_units_l0": 4,
"n_units_l1": 4,
"n_units_l2": 4,
"n_epoch": 1,
}
# Specific lexicographic preference
lexico_objectives = {}
lexico_objectives["metrics"] = ["error_rate", "flops"]
lexico_objectives["tolerances"] = {"error_rate": 0.02, "flops": 0.0}
lexico_objectives["targets"] = {"error_rate": 0.0, "flops": 0.0}
lexico_objectives["modes"] = ["min", "min"]
# launch the tuning process
analysis = tune.run(
evaluate_function,
num_samples=-1,
time_budget_s=100,
config=search_space, # search space of NN
use_ray=False,
lexico_objectives=lexico_objectives,
low_cost_partial_config=low_cost_partial_config, # low cost initial point
)
```
[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/tune_lexicographic.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/tune_lexicographic.ipynb)

View File

@ -422,6 +422,33 @@ automl2.fit(X_train, y_train, time_budget=7200, starting_points=automl1.best_con
`starting_points` is a dictionary or a str to specify the starting hyperparameter config. (1) When it is a dictionary, the keys are the estimator names. If you do not need to specify starting points for an estimator, exclude its name from the dictionary. The value for each key can be either a dictionary of a list of dictionaries, corresponding to one hyperparameter configuration, or multiple hyperparameter configurations, respectively. (2) When it is a str: if "data", use data-dependent defaults; if "data:path", use data-dependent defaults which are stored at path; if "static", use data-independent defaults. Please find more details about data-dependent defaults in [zero shot AutoML](Zero-Shot-AutoML#combine-zero-shot-automl-and-hyperparameter-tuning). `starting_points` is a dictionary or a str to specify the starting hyperparameter config. (1) When it is a dictionary, the keys are the estimator names. If you do not need to specify starting points for an estimator, exclude its name from the dictionary. The value for each key can be either a dictionary of a list of dictionaries, corresponding to one hyperparameter configuration, or multiple hyperparameter configurations, respectively. (2) When it is a str: if "data", use data-dependent defaults; if "data:path", use data-dependent defaults which are stored at path; if "static", use data-independent defaults. Please find more details about data-dependent defaults in [zero shot AutoML](Zero-Shot-AutoML#combine-zero-shot-automl-and-hyperparameter-tuning).
### Lexicographic objectives
We support automl for multiple objectives with lexicographic preference by providing argument `lexico_objectives` for `automl.fit()`.
`lexico_objectives` is a dictionary with four mandatory elements:
- `metrics`: A list of optimization objectives. The objectives are ordered by their priority from high to low.
- `modes`: A list to specify each objective as minimization or maximization in `metrics` correspondingly.
- `tolerances`: A dictionary to specify the "tolerance" for each objective. "tolerance" is the amount of performance degradation the user is willing to compromise in order to find choices with better performance on the objectives of lower priorities.
- `targets`: A dictionary to specify the "goals" for each objective. When the objective is better than or equal to the "goals", further minimization is no longer needed.
In the following example, we want to minimize `val_loss` and `pred_time` of the model where `val_loss` has high priority. The tolerances for `val_loss` and `pre_time` are 0.02 and 0 respectively. We do not set targets for these two objectives and we set them to -inf for both objectives.
```python
lexico_objectives = {}
lexico_objectives["metrics"] = ["val_loss","pred_time"]
lexico_objectives["pred_time"] = ["min","min"]
lexico_objectives["tolerances"] = {"val_loss": 0.02, "pred_time":0.0}
lexico_objectives["targets"] = {"val_loss": -float('inf'), "pred_time": -float('inf')}
# provide the lexico_objectives to automl.fit
automl.fit(..., lexico_objectives=lexico_objectives, ...)
```
*Please note that this is a new feature in version 1.1.0 and subject to change in the future version*
### Log the trials ### Log the trials

View File

@ -516,14 +516,14 @@ analysis = tune.run(
``` ```
### Lexicographic Objectives ### Lexicographic Objectives
We support tuning multiple objectives with lexicographic preference by providing argument `lexico_objectives` for `tune.tun()` and `automl.fit()`. We support tuning multiple objectives with lexicographic preference by providing argument `lexico_objectives` for `tune.tun()`.
`lexico_objectives` is a dictionary with four elements: `lexico_objectives` is a dictionary with four mandatory elements:
- `metrics`: A list of optimization objectives. The objectives are ordered by their priority from high to low. - `metrics`: A list of optimization objectives. The objectives are ordered by their priority from high to low.
- `modes`: A list to specify each objective as minimization or maximization in `metrics` correspondingly. - `modes`: A list to specify each objective as minimization or maximization in `metrics` correspondingly.
- `tolerances`: A dictionary to specify the "tolerance" for each objective. "tolerance" is the amount of performance degradation the user is willing to compromise in order to find choices with better performance on the objectives of lower priorities. - `tolerances`: A dictionary to specify the "tolerance" for each objective. "tolerance" is the amount of performance degradation the user is willing to compromise in order to find choices with better performance on the objectives of lower priorities.
- `targets`: A dictionary to specify the "goals" for each objective. When the objective is smaller than or equal to the "goals", further minimization is no longer needed. - `targets`: A dictionary to specify the "goals" for each objective. When the objective is better than or equal to the "goals", further minimization is no longer needed.
In the following example, we want to minimize `val_loss` and `pred_time` of the model where `val_loss` has high priority. The tolerances for `val_loss` and `pre_time` are 0.02 and 0 respectively. We do not set targets for these two objectives and we set them to -inf for both objectives. In the following example, we want to minimize `val_loss` and `pred_time` of the model where `val_loss` has high priority. The tolerances for `val_loss` and `pre_time` are 0.02 and 0 respectively. We do not set targets for these two objectives and we set them to -inf for both objectives.
```python ```python
lexico_objectives = {} lexico_objectives = {}
@ -534,12 +534,8 @@ lexico_objectives["targets"] = {"val_loss": -float('inf'), "pred_time": -float('
# provide the lexico_objectives to tune.run # provide the lexico_objectives to tune.run
tune.run(..., lexico_objectives=lexico_objectives, ...) tune.run(..., lexico_objectives=lexico_objectives, ...)
# provide the lexico_objectives to automl.fit
automl.fit(..., lexico_objectives=lexico_objectives, ...)
``` ```
*Please note that this is a new feature in version 1.1.0 and subject to change in the future version*