autogen/test/nlp/test_autohf.py
Xueqing Liu 5eb5d43d7f
Fix HPO evaluation bug (#645)
* fix eval automl metric bug on val_loss inconsistency

* updating starting point search space to continuous

* shortening notebok
2022-07-28 23:08:42 -04:00

74 lines
1.9 KiB
Python

import sys
import pytest
import requests
from utils import get_toy_data_seqclassification, get_automl_settings
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
def test_hf_data():
from flaml import AutoML
X_train, y_train, X_val, y_val, X_test = get_toy_data_seqclassification()
automl = AutoML()
automl_settings = get_automl_settings()
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
automl.score(X_val, y_val, **{"metric": "accuracy"})
automl.pickle("automl.pkl")
except requests.exceptions.HTTPError:
return
import json
with open("seqclass.log", "r") as fin:
for line in fin:
each_log = json.loads(line.strip("\n"))
if "validation_loss" in each_log:
val_loss = each_log["validation_loss"]
min_inter_result = min(
each_dict.get("eval_automl_metric", sys.maxsize)
for each_dict in each_log["logged_metric"]["intermediate_results"]
)
if min_inter_result != sys.maxsize:
assert val_loss == min_inter_result
automl = AutoML()
automl_settings.pop("max_iter", None)
automl_settings.pop("use_ray", None)
automl_settings.pop("estimator_list", None)
automl.retrain_from_log(
X_train=X_train,
y_train=y_train,
train_full=True,
record_id=0,
**automl_settings
)
automl.predict(X_test)
automl.predict(["test test", "test test"])
automl.predict(
[
["test test", "test test"],
["test test", "test test"],
["test test", "test test"],
]
)
automl.predict_proba(X_test)
print(automl.classes_)
if __name__ == "__main__":
test_hf_data()