autogen/test/test_python_log.py
Chi Wang 776aa55189
V0.2.2 (#19)
* v0.2.2

separate the HPO part into the module flaml.tune
enhanced implementation of FLOW^2, CFO and BlendSearch
support parallel tuning using ray tune
add support for sample_weight and generic fit arguments
enable mlflow logging

Co-authored-by: Chi Wang (MSR) <chiw@microsoft.com>
Co-authored-by: qingyun-wu <qw2ky@virginia.edu>
2021-02-05 21:41:14 -08:00

47 lines
1.4 KiB
Python

from flaml import AutoML
from sklearn.datasets import load_boston
import os
import unittest
import logging
import tempfile
import io
class TestLogging(unittest.TestCase):
def test_logging_level(self):
from flaml import logger, logger_formatter
with tempfile.TemporaryDirectory() as d:
training_log = os.path.join(d, "training.log")
# Configure logging for the FLAML logger
# and add a handler that outputs to a buffer.
logger.setLevel(logging.INFO)
buf = io.StringIO()
ch = logging.StreamHandler(buf)
ch.setFormatter(logger_formatter)
logger.addHandler(ch)
# Run a simple job.
automl_experiment = AutoML()
automl_settings = {
"time_budget": 1,
"metric": 'mse',
"task": 'regression',
"log_file_name": training_log,
"log_training_metric": True,
"n_jobs": 1,
"model_history": True
}
X_train, y_train = load_boston(return_X_y=True)
n = len(y_train) >> 1
automl_experiment.fit(X_train=X_train[:n], y_train=y_train[:n],
X_val=X_train[n:], y_val=y_train[n:],
**automl_settings)
# Check if the log buffer is populated.
self.assertTrue(len(buf.getvalue()) > 0)