2023-03-11 02:39:08 +00:00
|
|
|
from abc import ABC, abstractmethod
|
|
|
|
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
|
|
|
import numpy as np
|
2023-05-24 16:55:04 -07:00
|
|
|
from flaml.automl.data import DataFrame, Series, psDataFrame, psSeries
|
2023-03-11 02:39:08 +00:00
|
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
import flaml
|
|
|
|
|
|
|
|
# TODO: if your task is not specified in here, define your task as an all-capitalized word
|
|
|
|
SEQCLASSIFICATION = "seq-classification"
|
|
|
|
MULTICHOICECLASSIFICATION = "multichoice-classification"
|
|
|
|
TOKENCLASSIFICATION = "token-classification"
|
|
|
|
|
|
|
|
SEQREGRESSION = "seq-regression"
|
|
|
|
|
|
|
|
TS_FORECASTREGRESSION = (
|
|
|
|
"forecast",
|
|
|
|
"ts_forecast",
|
|
|
|
"ts_forecast_regression",
|
|
|
|
)
|
|
|
|
REGRESSION = ("regression", SEQREGRESSION, *TS_FORECASTREGRESSION)
|
|
|
|
TS_FORECASTCLASSIFICATION = "ts_forecast_classification"
|
|
|
|
TS_FORECASTPANEL = "ts_forecast_panel"
|
|
|
|
TS_FORECAST = (
|
|
|
|
*TS_FORECASTREGRESSION,
|
|
|
|
TS_FORECASTCLASSIFICATION,
|
|
|
|
TS_FORECASTPANEL,
|
|
|
|
)
|
|
|
|
CLASSIFICATION = (
|
|
|
|
"binary",
|
|
|
|
"multiclass",
|
|
|
|
"classification",
|
|
|
|
SEQCLASSIFICATION,
|
|
|
|
MULTICHOICECLASSIFICATION,
|
|
|
|
TOKENCLASSIFICATION,
|
|
|
|
TS_FORECASTCLASSIFICATION,
|
|
|
|
)
|
|
|
|
RANK = ("rank",)
|
|
|
|
SUMMARIZATION = "summarization"
|
|
|
|
NLG_TASKS = (SUMMARIZATION,)
|
|
|
|
NLU_TASKS = (
|
|
|
|
SEQREGRESSION,
|
|
|
|
SEQCLASSIFICATION,
|
|
|
|
MULTICHOICECLASSIFICATION,
|
|
|
|
TOKENCLASSIFICATION,
|
|
|
|
)
|
|
|
|
NLP_TASKS = (*NLG_TASKS, *NLU_TASKS)
|
|
|
|
|
|
|
|
|
|
|
|
def get_classification_objective(num_labels: int) -> str:
|
|
|
|
if num_labels == 2:
|
|
|
|
objective_name = "binary"
|
|
|
|
else:
|
|
|
|
objective_name = "multiclass"
|
|
|
|
return objective_name
|
|
|
|
|
|
|
|
|
|
|
|
class Task(ABC):
|
|
|
|
"""
|
|
|
|
Abstract base class for a machine learning task.
|
|
|
|
|
|
|
|
Class definitions should implement abstract methods and provide a non-empty dictionary of estimator classes.
|
|
|
|
A Task can be suitable to be used for multiple machine-learning tasks (e.g. classification or regression) or be
|
|
|
|
implemented specifically for a single one depending on the generality of data validation and model evaluation methods
|
|
|
|
implemented. The implementation of a Task may optionally use the training data and labels to determine data and task
|
|
|
|
specific details, such as in determining if a problem is single-label or multi-label.
|
|
|
|
|
|
|
|
FLAML evaluates at runtime how to behave exactly, relying on the task instance to provide implementations of
|
|
|
|
operations which vary between tasks.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
task_name: str,
|
2023-05-24 16:55:04 -07:00
|
|
|
X_train: Optional[Union[np.ndarray, DataFrame, psDataFrame]] = None,
|
|
|
|
y_train: Optional[Union[np.ndarray, DataFrame, Series, psSeries]] = None,
|
2023-03-11 02:39:08 +00:00
|
|
|
):
|
|
|
|
"""Constructor.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
task_name: String name for this type of task. Used when the Task can be generic and implement a number of
|
|
|
|
types of sub-task.
|
|
|
|
X_train: Optional. Some Task types may use the data shape or features to determine details of their usage,
|
|
|
|
such as in binary vs multilabel classification.
|
|
|
|
y_train: Optional. Some Task types may use the data shape or features to determine details of their usage,
|
|
|
|
such as in binary vs multilabel classification.
|
|
|
|
"""
|
|
|
|
self.name = task_name
|
Factor out time series-related functionality into a time series Task object (#989)
* Refactor into automl subpackage
Moved some of the packages into an automl subpackage to tidy before the
task-based refactor. This is in response to discussions with the group
and a comment on the first task-based PR.
Only changes here are moving subpackages and modules into the new
automl, fixing imports to work with this structure and fixing some
dependencies in setup.py.
* Fix doc building post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Remove vw from test deps as this is breaking the build
* Move default back to the top-level
I'd moved this to automl as that's where it's used internally, but had
missed that this is actually part of the public interface so makes sense
to live where it was.
* Re-add top level modules with deprecation warnings
flaml.data, flaml.ml and flaml.model are re-added to the top level,
being re-exported from flaml.automl for backwards compatability. Adding
a deprecation warning so that we can have a planned removal later.
* Fix model.py line-endings
* WIP
* WIP - Notes below
Got to the point where the methods from AutoML are pulled to
GenericTask. Started removing private markers and removing the passing
of automl to these methods. Done with decide_split_type, started on
prepare_data. Need to do the others after
* Re-add generic_task
* Most of the merge done, test_forecast_automl fit succeeds, fails at predict()
* Remaining fixes - test_forecast.py passes
* Comment out holidays-related code as it's not currently used
* Further holidays cleanup
* Fix imports in a test
* tidy up validate_data in time series task
* Test fixes
* Fix tests: add Task.__str__
* Fix tests: test for ray.ObjectRef
* Hotwire TS_Sklearn wrapper to fix test fail
* Attempt at test fix
* Fix test where val_pred_y is a list
* Attempt to fix remaining tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Remove plots from automl/test_forecast
* Remove unused data size field from Task
* Fix import for CLASSIFICATION in notebook
* Monkey patch TFT to avoid plotting, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Fix circular import
* remove redundant code in task.py post-merge
* Fix test: set svd_solver="full" in PCA
* Update flaml/automl/data.py
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* Fix review comments
* Fix task -> str in custom learner constructor
* Remove unused CLASSIFICATION imports
* Hotwire TS_Sklearn wrapper to fix test fail by setting
optimizer_for_horizon == False
* Revert changes to the automl_classification and pin FLAML version
* Fix imports in reverted notebook
* Fix FLAML version in automl notebooks
* Fix ml.py line endings
* Fix CLASSIFICATION task import in automl_classification notebook
* Uncomment pip install in notebook and revert import
Not convinced this will work because of installing an older version of
the package into the environment in which we're running the tests, but
let's see.
* Revert c6a5dd1a0
* Fix get_classification_objective import in suggest.py
* Remove hcrystallball docs reference in TS_Sklearn
* Merge markharley:extract-task-class-from-automl into this
* Fix import, remove smooth.py
* Fix dependencies to fix TFT fail on Windows Python 3.8 and 3.9
* Add tensorboardX dependency to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Disable PCA reduction of lagged features for now, to fix svd convervence fail
* Merge flaml/main into time_series_task
* Attempt to fix formatting
* Attempt to fix formatting
* tentatively implement holt-winters-no covariates
* fix forecast method, clean class
* checking external regressors too
* update test forecast
* remove duplicated test file, re-add sarimax, search space cleanup
* Update flaml/automl/model.py
removed links. Most important one probably was: https://robjhyndman.com/hyndsight/ets-regressors/
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* prevent short series
* add docs
* First attempt at merging Holt-Winters
* Linter fix
* Add holt-winters to TimeSeriesTask.estimators
* Fix spark test fail
* Attempt to fix another spark test fail
* Attempt to fix another spark test fail
* Change Black max line length to 127
* Change Black max line length to 120
* Add logging for ARIMA params, clean up time series models inheritance
* Add more logging for missing ARIMA params
* Remove a meaningless test causing a fail, add stricter check on ARIMA params
* Fix a bug in HoltWinters
* A pointless change to hopefully trigger the on and off KeyError in ARIMA.fit()
* Fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Add type annotations to _train_with_config() in state.py
* Add type annotations to prepare_sample_train_data() in state.py
* Add docstring for time_col argument of AutoML.fit()
* Address @sonichi's comments on PR
* Fix formatting
* Fix formatting
* Reduce test time budget
* Reduce test time budget
* Increase time budget for the test to pass
* Remove redundant imports
* Remove more redundant imports
* Minor fixes of points raised by Qingyun
* Try to fix pandas import fail
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Formatting fixes
* More formatting fixes
* Added test that loops over TS models to ensure coverage
* Fix formatting issues
* Fix more formatting issues
* Fix random fail in check
* Put back in tests for ARIMA predict without fit
* Put back in tests for lgbm
* Update test/test_model.py
cover dedup
* Match target length to X length in missing test
---------
Co-authored-by: Mark Harley <mark.harley@transferwise.com>
Co-authored-by: Mark Harley <mharley.code@gmail.com>
Co-authored-by: Qingyun Wu <qingyun.wu@psu.edu>
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: Andrea W <a.ruggerini@ammagamma.com>
Co-authored-by: Andrea Ruggerini <nescio.adv@gmail.com>
Co-authored-by: Egor Kraev <Egor.Kraev@tw.com>
Co-authored-by: Li Jiang <bnujli@gmail.com>
2023-06-19 12:20:32 +01:00
|
|
|
self._estimators = None
|
2023-03-11 02:39:08 +00:00
|
|
|
|
|
|
|
def __str__(self) -> str:
|
|
|
|
"""Name of this task type."""
|
|
|
|
return self.name
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def evaluate_model_CV(
|
|
|
|
self,
|
|
|
|
config: dict,
|
|
|
|
estimator: "flaml.automl.ml.BaseEstimator",
|
2023-05-24 16:55:04 -07:00
|
|
|
X_train_all: Union[np.ndarray, DataFrame, psDataFrame],
|
|
|
|
y_train_all: Union[np.ndarray, DataFrame, Series, psSeries],
|
2023-03-11 02:39:08 +00:00
|
|
|
budget: int,
|
|
|
|
kf,
|
|
|
|
eval_metric: str,
|
|
|
|
best_val_loss: float,
|
|
|
|
log_training_metric: bool = False,
|
|
|
|
fit_kwargs: Optional[dict] = {},
|
|
|
|
) -> Tuple[float, float, float, float]:
|
|
|
|
"""Evaluate the model using cross-validation.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config: configuration used in the evaluation of the metric.
|
|
|
|
estimator: Estimator class of the model.
|
|
|
|
X_train_all: Complete training feature data.
|
|
|
|
y_train_all: Complete training target data.
|
|
|
|
budget: Training time budget.
|
|
|
|
kf: Cross-validation index generator.
|
|
|
|
eval_metric: Metric name to be used for evaluation.
|
|
|
|
best_val_loss: Best current validation-set loss.
|
|
|
|
log_training_metric: Bool defaults False. Enables logging of the training metric.
|
|
|
|
fit_kwargs: Additional kwargs passed to the estimator's fit method.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
validation loss, metric value, train time, prediction time
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def validate_data(
|
|
|
|
self,
|
|
|
|
automl: "flaml.automl.automl.AutoML",
|
|
|
|
state: "flaml.automl.state.AutoMLState",
|
2023-05-24 16:55:04 -07:00
|
|
|
X_train_all: Union[np.ndarray, DataFrame, psDataFrame, None],
|
|
|
|
y_train_all: Union[np.ndarray, DataFrame, Series, psSeries, None],
|
|
|
|
dataframe: Union[DataFrame, None],
|
2023-03-11 02:39:08 +00:00
|
|
|
label: str,
|
2023-05-24 16:55:04 -07:00
|
|
|
X_val: Optional[Union[np.ndarray, DataFrame, psDataFrame]] = None,
|
|
|
|
y_val: Optional[Union[np.ndarray, DataFrame, Series, psSeries]] = None,
|
2023-03-11 02:39:08 +00:00
|
|
|
groups_val: Optional[List[str]] = None,
|
|
|
|
groups: Optional[List[str]] = None,
|
|
|
|
):
|
|
|
|
"""Validate that the data is suitable for this task type.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
automl: The AutoML instance from which this task has been constructed.
|
|
|
|
state: The AutoMLState instance for this run.
|
|
|
|
X_train_all: The complete data set or None if dataframe is supplied.
|
|
|
|
y_train_all: The complete target set or None if dataframe is supplied.
|
|
|
|
dataframe: A dataframe constaining the complete data set with targets.
|
|
|
|
label: The name of the target column in dataframe.
|
|
|
|
X_val: Optional. A data set for validation.
|
|
|
|
y_val: Optional. A target vector corresponding to X_val for validation.
|
|
|
|
groups_val: Group labels (with matching length to y_val) or group counts (with sum equal to length of y_val)
|
|
|
|
for validation data. Need to be consistent with groups.
|
|
|
|
groups: Group labels (with matching length to y_train) or groups counts (with sum equal to length of y_train)
|
|
|
|
for training data.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
AssertionError: The data provided is invalid for this task type and configuration.
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def prepare_data(
|
|
|
|
self,
|
|
|
|
state: "flaml.automl.state.AutoMLState",
|
2023-05-24 16:55:04 -07:00
|
|
|
X_train_all: Union[np.ndarray, DataFrame, psDataFrame],
|
|
|
|
y_train_all: Union[np.ndarray, DataFrame, Series, psSeries, None],
|
2023-03-11 02:39:08 +00:00
|
|
|
auto_augment: bool,
|
|
|
|
eval_method: str,
|
|
|
|
split_type: str,
|
|
|
|
split_ratio: float,
|
|
|
|
n_splits: int,
|
|
|
|
data_is_df: bool,
|
|
|
|
sample_weight_full: Optional[List[float]] = None,
|
|
|
|
):
|
|
|
|
"""Prepare the data for fitting or inference.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
automl: The AutoML instance from which this task has been constructed.
|
|
|
|
state: The AutoMLState instance for this run.
|
|
|
|
X_train_all: The complete data set or None if dataframe is supplied. Must
|
|
|
|
contain the target if y_train_all is None
|
|
|
|
y_train_all: The complete target set or None if supplied in X_train_all.
|
|
|
|
auto_augment: If true, task-specific data augmentations will be applied.
|
|
|
|
eval_method: A string of resampling strategy, one of ['auto', 'cv', 'holdout'].
|
|
|
|
split_type: str or splitter object, default="auto" | the data split type.
|
|
|
|
* A valid splitter object is an instance of a derived class of scikit-learn
|
|
|
|
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
|
|
|
|
and have ``split`` and ``get_n_splits`` methods with the same signatures.
|
|
|
|
Set eval_method to "cv" to use the splitter object.
|
|
|
|
* Valid str options depend on different tasks.
|
|
|
|
For classification tasks, valid choices are
|
|
|
|
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
|
|
|
|
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
|
|
|
|
"auto" -> uniform.
|
|
|
|
For time series forecast tasks, must be "auto" or 'time'.
|
|
|
|
For ranking task, must be "auto" or 'group'.
|
|
|
|
split_ratio: A float of the valiation data percentage for holdout.
|
|
|
|
n_splits: An integer of the number of folds for cross - validation.
|
2023-05-24 16:55:04 -07:00
|
|
|
data_is_df: True if the data was provided as a DataFrame else False.
|
2023-03-11 02:39:08 +00:00
|
|
|
sample_weight_full: A 1d arraylike of the sample weight.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
AssertionError: The configuration provided is invalid for this task type and data.
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def decide_split_type(
|
|
|
|
self,
|
|
|
|
split_type: str,
|
2023-05-24 16:55:04 -07:00
|
|
|
y_train_all: Union[np.ndarray, DataFrame, Series, psSeries, None],
|
2023-03-11 02:39:08 +00:00
|
|
|
fit_kwargs: dict,
|
|
|
|
groups: Optional[List[str]] = None,
|
|
|
|
) -> str:
|
|
|
|
"""Choose an appropriate data split type for this data and task.
|
|
|
|
|
|
|
|
If split_type is 'auto' then this is determined based on the task type and data.
|
|
|
|
If a specific split_type is requested then the choice is validated to be appropriate.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
split_type: Either 'auto' or a task appropriate split type.
|
|
|
|
y_train_all: The complete set of targets.
|
|
|
|
fit_kwargs: Additional kwargs passed to the estimator's fit method.
|
|
|
|
groups: Optional. Group labels (with matching length to y_train) or groups counts (with sum equal to length
|
|
|
|
of y_train) for training data.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The determined appropriate split type.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
AssertionError: The requested split_type is invalid for this task, configuration and data.
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def preprocess(
|
|
|
|
self,
|
2023-05-24 16:55:04 -07:00
|
|
|
X: Union[np.ndarray, DataFrame, psDataFrame],
|
2023-03-11 02:39:08 +00:00
|
|
|
transformer: Optional["flaml.automl.data.DataTransformer"] = None,
|
2023-05-24 16:55:04 -07:00
|
|
|
) -> Union[np.ndarray, DataFrame]:
|
2023-03-11 02:39:08 +00:00
|
|
|
"""Preprocess the data ready for fitting or inference with this task type.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
X: The data set to process.
|
|
|
|
transformer: A DataTransformer instance to be used in processing.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The preprocessed data set having the same type as the input.
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def default_estimator_list(
|
Support spark dataframe as input dataset and spark models as estimators (#934)
* add basic support to Spark dataframe
add support to SynapseML LightGBM model
update to pyspark>=3.2.0 to leverage pandas_on_Spark API
* clean code, add TODOs
* add sample_train_data for pyspark.pandas dataframe, fix bugs
* improve some functions, fix bugs
* fix dict change size during iteration
* update model predict
* update LightGBM model, update test
* update SynapseML LightGBM params
* update synapseML and tests
* update TODOs
* Added support to roc_auc for spark models
* Added support to score of spark estimator
* Added test for automl score of spark estimator
* Added cv support to pyspark.pandas dataframe
* Update test, fix bugs
* Added tests
* Updated docs, tests, added a notebook
* Fix bugs in non-spark env
* Fix bugs and improve tests
* Fix uninstall pyspark
* Fix tests error
* Fix java.lang.OutOfMemoryError: Java heap space
* Fix test_performance
* Update test_sparkml to test_0sparkml to use the expected spark conf
* Remove unnecessary widgets in notebook
* Fix iloc java.lang.StackOverflowError
* fix pre-commit
* Added params check for spark dataframes
* Refactor code for train_test_split to a function
* Update train_test_split_pyspark
* Refactor if-else, remove unnecessary code
* Remove y from predict, remove mem control from n_iter compute
* Update workflow
* Improve _split_pyspark
* Fix test failure of too short training time
* Fix typos, improve docstrings
* Fix index errors of pandas_on_spark, add spark loss metric
* Fix typo of ndcgAtK
* Update NDCG metrics and tests
* Remove unuseful logger
* Use cache and count to ensure consistent indexes
* refactor for merge maain
* fix errors of refactor
* Updated SparkLightGBMEstimator and cache
* Updated config2params
* Remove unused import
* Fix unknown parameters
* Update default_estimator_list
* Add unit tests for spark metrics
2023-03-26 03:59:46 +08:00
|
|
|
self,
|
|
|
|
estimator_list: Union[List[str], str] = "auto",
|
|
|
|
is_spark_dataframe: bool = False,
|
2023-03-11 02:39:08 +00:00
|
|
|
) -> List[str]:
|
|
|
|
"""Return the list of default estimators registered for this task type.
|
|
|
|
|
|
|
|
If 'auto' is provided then the default list is returned, else the provided list will be validated given this task
|
|
|
|
type.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
estimator_list: Either 'auto' or a list of estimator names to be validated.
|
Support spark dataframe as input dataset and spark models as estimators (#934)
* add basic support to Spark dataframe
add support to SynapseML LightGBM model
update to pyspark>=3.2.0 to leverage pandas_on_Spark API
* clean code, add TODOs
* add sample_train_data for pyspark.pandas dataframe, fix bugs
* improve some functions, fix bugs
* fix dict change size during iteration
* update model predict
* update LightGBM model, update test
* update SynapseML LightGBM params
* update synapseML and tests
* update TODOs
* Added support to roc_auc for spark models
* Added support to score of spark estimator
* Added test for automl score of spark estimator
* Added cv support to pyspark.pandas dataframe
* Update test, fix bugs
* Added tests
* Updated docs, tests, added a notebook
* Fix bugs in non-spark env
* Fix bugs and improve tests
* Fix uninstall pyspark
* Fix tests error
* Fix java.lang.OutOfMemoryError: Java heap space
* Fix test_performance
* Update test_sparkml to test_0sparkml to use the expected spark conf
* Remove unnecessary widgets in notebook
* Fix iloc java.lang.StackOverflowError
* fix pre-commit
* Added params check for spark dataframes
* Refactor code for train_test_split to a function
* Update train_test_split_pyspark
* Refactor if-else, remove unnecessary code
* Remove y from predict, remove mem control from n_iter compute
* Update workflow
* Improve _split_pyspark
* Fix test failure of too short training time
* Fix typos, improve docstrings
* Fix index errors of pandas_on_spark, add spark loss metric
* Fix typo of ndcgAtK
* Update NDCG metrics and tests
* Remove unuseful logger
* Use cache and count to ensure consistent indexes
* refactor for merge maain
* fix errors of refactor
* Updated SparkLightGBMEstimator and cache
* Updated config2params
* Remove unused import
* Fix unknown parameters
* Update default_estimator_list
* Add unit tests for spark metrics
2023-03-26 03:59:46 +08:00
|
|
|
is_spark_dataframe: True if the data is a spark dataframe.
|
2023-03-11 02:39:08 +00:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
A list of valid estimator names for this task type.
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def default_metric(self, metric: str) -> str:
|
|
|
|
"""Return the default metric for this task type.
|
|
|
|
|
|
|
|
If 'auto' is provided then the default metric for this task will be returned. Otherwise, the provided metric name
|
|
|
|
is validated for this task type.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
metric: The name of a metric to be used in evaluation of models during fitting or validation.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The default metric, or the provided metric if it is valid for this task type.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def is_ts_forecast(self) -> bool:
|
|
|
|
return self.name in TS_FORECAST
|
|
|
|
|
|
|
|
def is_ts_forecastpanel(self) -> bool:
|
|
|
|
return self.name == TS_FORECASTPANEL
|
|
|
|
|
|
|
|
def is_ts_forecastregression(self) -> bool:
|
|
|
|
return self.name in TS_FORECASTREGRESSION
|
|
|
|
|
|
|
|
def is_nlp(self) -> bool:
|
|
|
|
return self.name in NLP_TASKS
|
|
|
|
|
|
|
|
def is_nlg(self) -> bool:
|
|
|
|
return self.name in NLG_TASKS
|
|
|
|
|
|
|
|
def is_classification(self) -> bool:
|
|
|
|
return self.name in CLASSIFICATION
|
|
|
|
|
|
|
|
def is_rank(self) -> bool:
|
|
|
|
return self.name in RANK
|
|
|
|
|
|
|
|
def is_binary(self) -> bool:
|
|
|
|
return self.name == "binary"
|
|
|
|
|
|
|
|
def is_seq_regression(self) -> bool:
|
|
|
|
return self.name == SEQREGRESSION
|
|
|
|
|
|
|
|
def is_seq_classification(self) -> bool:
|
|
|
|
return self.name == SEQCLASSIFICATION
|
|
|
|
|
|
|
|
def is_token_classification(self) -> bool:
|
|
|
|
return self.name == TOKENCLASSIFICATION
|
|
|
|
|
|
|
|
def is_summarization(self) -> bool:
|
|
|
|
return self.name == SUMMARIZATION
|
|
|
|
|
|
|
|
def is_multiclass(self) -> bool:
|
|
|
|
return "multiclass" in self.name
|
|
|
|
|
|
|
|
def is_regression(self) -> bool:
|
|
|
|
return self.name in REGRESSION
|
|
|
|
|
|
|
|
def __eq__(self, other: str) -> bool:
|
|
|
|
"""For backward compatibility with all the string comparisons to task"""
|
|
|
|
return self.name == other
|
|
|
|
|
Factor out time series-related functionality into a time series Task object (#989)
* Refactor into automl subpackage
Moved some of the packages into an automl subpackage to tidy before the
task-based refactor. This is in response to discussions with the group
and a comment on the first task-based PR.
Only changes here are moving subpackages and modules into the new
automl, fixing imports to work with this structure and fixing some
dependencies in setup.py.
* Fix doc building post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Remove vw from test deps as this is breaking the build
* Move default back to the top-level
I'd moved this to automl as that's where it's used internally, but had
missed that this is actually part of the public interface so makes sense
to live where it was.
* Re-add top level modules with deprecation warnings
flaml.data, flaml.ml and flaml.model are re-added to the top level,
being re-exported from flaml.automl for backwards compatability. Adding
a deprecation warning so that we can have a planned removal later.
* Fix model.py line-endings
* WIP
* WIP - Notes below
Got to the point where the methods from AutoML are pulled to
GenericTask. Started removing private markers and removing the passing
of automl to these methods. Done with decide_split_type, started on
prepare_data. Need to do the others after
* Re-add generic_task
* Most of the merge done, test_forecast_automl fit succeeds, fails at predict()
* Remaining fixes - test_forecast.py passes
* Comment out holidays-related code as it's not currently used
* Further holidays cleanup
* Fix imports in a test
* tidy up validate_data in time series task
* Test fixes
* Fix tests: add Task.__str__
* Fix tests: test for ray.ObjectRef
* Hotwire TS_Sklearn wrapper to fix test fail
* Attempt at test fix
* Fix test where val_pred_y is a list
* Attempt to fix remaining tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Remove plots from automl/test_forecast
* Remove unused data size field from Task
* Fix import for CLASSIFICATION in notebook
* Monkey patch TFT to avoid plotting, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Fix circular import
* remove redundant code in task.py post-merge
* Fix test: set svd_solver="full" in PCA
* Update flaml/automl/data.py
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* Fix review comments
* Fix task -> str in custom learner constructor
* Remove unused CLASSIFICATION imports
* Hotwire TS_Sklearn wrapper to fix test fail by setting
optimizer_for_horizon == False
* Revert changes to the automl_classification and pin FLAML version
* Fix imports in reverted notebook
* Fix FLAML version in automl notebooks
* Fix ml.py line endings
* Fix CLASSIFICATION task import in automl_classification notebook
* Uncomment pip install in notebook and revert import
Not convinced this will work because of installing an older version of
the package into the environment in which we're running the tests, but
let's see.
* Revert c6a5dd1a0
* Fix get_classification_objective import in suggest.py
* Remove hcrystallball docs reference in TS_Sklearn
* Merge markharley:extract-task-class-from-automl into this
* Fix import, remove smooth.py
* Fix dependencies to fix TFT fail on Windows Python 3.8 and 3.9
* Add tensorboardX dependency to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Disable PCA reduction of lagged features for now, to fix svd convervence fail
* Merge flaml/main into time_series_task
* Attempt to fix formatting
* Attempt to fix formatting
* tentatively implement holt-winters-no covariates
* fix forecast method, clean class
* checking external regressors too
* update test forecast
* remove duplicated test file, re-add sarimax, search space cleanup
* Update flaml/automl/model.py
removed links. Most important one probably was: https://robjhyndman.com/hyndsight/ets-regressors/
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* prevent short series
* add docs
* First attempt at merging Holt-Winters
* Linter fix
* Add holt-winters to TimeSeriesTask.estimators
* Fix spark test fail
* Attempt to fix another spark test fail
* Attempt to fix another spark test fail
* Change Black max line length to 127
* Change Black max line length to 120
* Add logging for ARIMA params, clean up time series models inheritance
* Add more logging for missing ARIMA params
* Remove a meaningless test causing a fail, add stricter check on ARIMA params
* Fix a bug in HoltWinters
* A pointless change to hopefully trigger the on and off KeyError in ARIMA.fit()
* Fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Add type annotations to _train_with_config() in state.py
* Add type annotations to prepare_sample_train_data() in state.py
* Add docstring for time_col argument of AutoML.fit()
* Address @sonichi's comments on PR
* Fix formatting
* Fix formatting
* Reduce test time budget
* Reduce test time budget
* Increase time budget for the test to pass
* Remove redundant imports
* Remove more redundant imports
* Minor fixes of points raised by Qingyun
* Try to fix pandas import fail
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Formatting fixes
* More formatting fixes
* Added test that loops over TS models to ensure coverage
* Fix formatting issues
* Fix more formatting issues
* Fix random fail in check
* Put back in tests for ARIMA predict without fit
* Put back in tests for lgbm
* Update test/test_model.py
cover dedup
* Match target length to X length in missing test
---------
Co-authored-by: Mark Harley <mark.harley@transferwise.com>
Co-authored-by: Mark Harley <mharley.code@gmail.com>
Co-authored-by: Qingyun Wu <qingyun.wu@psu.edu>
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: Andrea W <a.ruggerini@ammagamma.com>
Co-authored-by: Andrea Ruggerini <nescio.adv@gmail.com>
Co-authored-by: Egor Kraev <Egor.Kraev@tw.com>
Co-authored-by: Li Jiang <bnujli@gmail.com>
2023-06-19 12:20:32 +01:00
|
|
|
def estimator_class_from_str(self, estimator_name: str) -> "flaml.automl.ml.BaseEstimator":
|
2023-03-11 02:39:08 +00:00
|
|
|
"""Determine the estimator class corresponding to the provided name.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
estimator_name: Name of the desired estimator.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The estimator class corresponding to the provided name.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ValueError: The provided estimator_name has not been registered for this task type.
|
|
|
|
"""
|
Factor out time series-related functionality into a time series Task object (#989)
* Refactor into automl subpackage
Moved some of the packages into an automl subpackage to tidy before the
task-based refactor. This is in response to discussions with the group
and a comment on the first task-based PR.
Only changes here are moving subpackages and modules into the new
automl, fixing imports to work with this structure and fixing some
dependencies in setup.py.
* Fix doc building post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Remove vw from test deps as this is breaking the build
* Move default back to the top-level
I'd moved this to automl as that's where it's used internally, but had
missed that this is actually part of the public interface so makes sense
to live where it was.
* Re-add top level modules with deprecation warnings
flaml.data, flaml.ml and flaml.model are re-added to the top level,
being re-exported from flaml.automl for backwards compatability. Adding
a deprecation warning so that we can have a planned removal later.
* Fix model.py line-endings
* WIP
* WIP - Notes below
Got to the point where the methods from AutoML are pulled to
GenericTask. Started removing private markers and removing the passing
of automl to these methods. Done with decide_split_type, started on
prepare_data. Need to do the others after
* Re-add generic_task
* Most of the merge done, test_forecast_automl fit succeeds, fails at predict()
* Remaining fixes - test_forecast.py passes
* Comment out holidays-related code as it's not currently used
* Further holidays cleanup
* Fix imports in a test
* tidy up validate_data in time series task
* Test fixes
* Fix tests: add Task.__str__
* Fix tests: test for ray.ObjectRef
* Hotwire TS_Sklearn wrapper to fix test fail
* Attempt at test fix
* Fix test where val_pred_y is a list
* Attempt to fix remaining tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Remove plots from automl/test_forecast
* Remove unused data size field from Task
* Fix import for CLASSIFICATION in notebook
* Monkey patch TFT to avoid plotting, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Fix circular import
* remove redundant code in task.py post-merge
* Fix test: set svd_solver="full" in PCA
* Update flaml/automl/data.py
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* Fix review comments
* Fix task -> str in custom learner constructor
* Remove unused CLASSIFICATION imports
* Hotwire TS_Sklearn wrapper to fix test fail by setting
optimizer_for_horizon == False
* Revert changes to the automl_classification and pin FLAML version
* Fix imports in reverted notebook
* Fix FLAML version in automl notebooks
* Fix ml.py line endings
* Fix CLASSIFICATION task import in automl_classification notebook
* Uncomment pip install in notebook and revert import
Not convinced this will work because of installing an older version of
the package into the environment in which we're running the tests, but
let's see.
* Revert c6a5dd1a0
* Fix get_classification_objective import in suggest.py
* Remove hcrystallball docs reference in TS_Sklearn
* Merge markharley:extract-task-class-from-automl into this
* Fix import, remove smooth.py
* Fix dependencies to fix TFT fail on Windows Python 3.8 and 3.9
* Add tensorboardX dependency to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Disable PCA reduction of lagged features for now, to fix svd convervence fail
* Merge flaml/main into time_series_task
* Attempt to fix formatting
* Attempt to fix formatting
* tentatively implement holt-winters-no covariates
* fix forecast method, clean class
* checking external regressors too
* update test forecast
* remove duplicated test file, re-add sarimax, search space cleanup
* Update flaml/automl/model.py
removed links. Most important one probably was: https://robjhyndman.com/hyndsight/ets-regressors/
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* prevent short series
* add docs
* First attempt at merging Holt-Winters
* Linter fix
* Add holt-winters to TimeSeriesTask.estimators
* Fix spark test fail
* Attempt to fix another spark test fail
* Attempt to fix another spark test fail
* Change Black max line length to 127
* Change Black max line length to 120
* Add logging for ARIMA params, clean up time series models inheritance
* Add more logging for missing ARIMA params
* Remove a meaningless test causing a fail, add stricter check on ARIMA params
* Fix a bug in HoltWinters
* A pointless change to hopefully trigger the on and off KeyError in ARIMA.fit()
* Fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Add type annotations to _train_with_config() in state.py
* Add type annotations to prepare_sample_train_data() in state.py
* Add docstring for time_col argument of AutoML.fit()
* Address @sonichi's comments on PR
* Fix formatting
* Fix formatting
* Reduce test time budget
* Reduce test time budget
* Increase time budget for the test to pass
* Remove redundant imports
* Remove more redundant imports
* Minor fixes of points raised by Qingyun
* Try to fix pandas import fail
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Formatting fixes
* More formatting fixes
* Added test that loops over TS models to ensure coverage
* Fix formatting issues
* Fix more formatting issues
* Fix random fail in check
* Put back in tests for ARIMA predict without fit
* Put back in tests for lgbm
* Update test/test_model.py
cover dedup
* Match target length to X length in missing test
---------
Co-authored-by: Mark Harley <mark.harley@transferwise.com>
Co-authored-by: Mark Harley <mharley.code@gmail.com>
Co-authored-by: Qingyun Wu <qingyun.wu@psu.edu>
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: Andrea W <a.ruggerini@ammagamma.com>
Co-authored-by: Andrea Ruggerini <nescio.adv@gmail.com>
Co-authored-by: Egor Kraev <Egor.Kraev@tw.com>
Co-authored-by: Li Jiang <bnujli@gmail.com>
2023-06-19 12:20:32 +01:00
|
|
|
if estimator_name in self.estimators:
|
|
|
|
return self.estimators[estimator_name]
|
2023-03-11 02:39:08 +00:00
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
f"{estimator_name} is not a built-in learner for this task type, "
|
Factor out time series-related functionality into a time series Task object (#989)
* Refactor into automl subpackage
Moved some of the packages into an automl subpackage to tidy before the
task-based refactor. This is in response to discussions with the group
and a comment on the first task-based PR.
Only changes here are moving subpackages and modules into the new
automl, fixing imports to work with this structure and fixing some
dependencies in setup.py.
* Fix doc building post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Fix broken links in website post automl subpackage refactor
* Remove vw from test deps as this is breaking the build
* Move default back to the top-level
I'd moved this to automl as that's where it's used internally, but had
missed that this is actually part of the public interface so makes sense
to live where it was.
* Re-add top level modules with deprecation warnings
flaml.data, flaml.ml and flaml.model are re-added to the top level,
being re-exported from flaml.automl for backwards compatability. Adding
a deprecation warning so that we can have a planned removal later.
* Fix model.py line-endings
* WIP
* WIP - Notes below
Got to the point where the methods from AutoML are pulled to
GenericTask. Started removing private markers and removing the passing
of automl to these methods. Done with decide_split_type, started on
prepare_data. Need to do the others after
* Re-add generic_task
* Most of the merge done, test_forecast_automl fit succeeds, fails at predict()
* Remaining fixes - test_forecast.py passes
* Comment out holidays-related code as it's not currently used
* Further holidays cleanup
* Fix imports in a test
* tidy up validate_data in time series task
* Test fixes
* Fix tests: add Task.__str__
* Fix tests: test for ray.ObjectRef
* Hotwire TS_Sklearn wrapper to fix test fail
* Attempt at test fix
* Fix test where val_pred_y is a list
* Attempt to fix remaining tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Push to retrigger tests
* Remove plots from automl/test_forecast
* Remove unused data size field from Task
* Fix import for CLASSIFICATION in notebook
* Monkey patch TFT to avoid plotting, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Monkey patch TFT to avoid plotting v2, to fix tests on MacOS
* Fix circular import
* remove redundant code in task.py post-merge
* Fix test: set svd_solver="full" in PCA
* Update flaml/automl/data.py
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* Fix review comments
* Fix task -> str in custom learner constructor
* Remove unused CLASSIFICATION imports
* Hotwire TS_Sklearn wrapper to fix test fail by setting
optimizer_for_horizon == False
* Revert changes to the automl_classification and pin FLAML version
* Fix imports in reverted notebook
* Fix FLAML version in automl notebooks
* Fix ml.py line endings
* Fix CLASSIFICATION task import in automl_classification notebook
* Uncomment pip install in notebook and revert import
Not convinced this will work because of installing an older version of
the package into the environment in which we're running the tests, but
let's see.
* Revert c6a5dd1a0
* Fix get_classification_objective import in suggest.py
* Remove hcrystallball docs reference in TS_Sklearn
* Merge markharley:extract-task-class-from-automl into this
* Fix import, remove smooth.py
* Fix dependencies to fix TFT fail on Windows Python 3.8 and 3.9
* Add tensorboardX dependency to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Set pytorch-lightning==1.9.0 to fix TFT fail on Windows Python 3.8 and 3.9
* Disable PCA reduction of lagged features for now, to fix svd convervence fail
* Merge flaml/main into time_series_task
* Attempt to fix formatting
* Attempt to fix formatting
* tentatively implement holt-winters-no covariates
* fix forecast method, clean class
* checking external regressors too
* update test forecast
* remove duplicated test file, re-add sarimax, search space cleanup
* Update flaml/automl/model.py
removed links. Most important one probably was: https://robjhyndman.com/hyndsight/ets-regressors/
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
* prevent short series
* add docs
* First attempt at merging Holt-Winters
* Linter fix
* Add holt-winters to TimeSeriesTask.estimators
* Fix spark test fail
* Attempt to fix another spark test fail
* Attempt to fix another spark test fail
* Change Black max line length to 127
* Change Black max line length to 120
* Add logging for ARIMA params, clean up time series models inheritance
* Add more logging for missing ARIMA params
* Remove a meaningless test causing a fail, add stricter check on ARIMA params
* Fix a bug in HoltWinters
* A pointless change to hopefully trigger the on and off KeyError in ARIMA.fit()
* Fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Attempt to fix formatting
* Add type annotations to _train_with_config() in state.py
* Add type annotations to prepare_sample_train_data() in state.py
* Add docstring for time_col argument of AutoML.fit()
* Address @sonichi's comments on PR
* Fix formatting
* Fix formatting
* Reduce test time budget
* Reduce test time budget
* Increase time budget for the test to pass
* Remove redundant imports
* Remove more redundant imports
* Minor fixes of points raised by Qingyun
* Try to fix pandas import fail
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Try to fix pandas import fail, again
* Formatting fixes
* More formatting fixes
* Added test that loops over TS models to ensure coverage
* Fix formatting issues
* Fix more formatting issues
* Fix random fail in check
* Put back in tests for ARIMA predict without fit
* Put back in tests for lgbm
* Update test/test_model.py
cover dedup
* Match target length to X length in missing test
---------
Co-authored-by: Mark Harley <mark.harley@transferwise.com>
Co-authored-by: Mark Harley <mharley.code@gmail.com>
Co-authored-by: Qingyun Wu <qingyun.wu@psu.edu>
Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: Andrea W <a.ruggerini@ammagamma.com>
Co-authored-by: Andrea Ruggerini <nescio.adv@gmail.com>
Co-authored-by: Egor Kraev <Egor.Kraev@tw.com>
Co-authored-by: Li Jiang <bnujli@gmail.com>
2023-06-19 12:20:32 +01:00
|
|
|
f"only {list(self.estimators.keys())} are supported."
|
2023-03-11 02:39:08 +00:00
|
|
|
"Please use AutoML.add_learner() to add a customized learner."
|
|
|
|
)
|