autogen/test/nlp/test_autohf_multichoice_classification.py
Chi Wang 595af7a04f
install editable package in codespace (#826)
* install editable package in codespace

* fix test error in test_forecast

* fix test error in test_space

* openml version

* break tests; pre-commit

* skip on py10+win32

* install mlflow in test

* install mlflow in [test]

* skip test in windows

* import

* handle PermissionError

* skip test in windows

* skip test in windows

* skip test in windows

* skip test in windows

* remove ts_forecast_panel from doc
2022-11-27 14:22:54 -05:00

62 lines
1.5 KiB
Python

import sys
import pytest
from utils import get_toy_data_multiplechoiceclassification, get_automl_settings
import os
import shutil
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"], reason="do not run on mac os or windows"
)
def test_mcc():
from flaml import AutoML
import requests
(
X_train,
y_train,
X_val,
y_val,
X_test,
y_test,
) = get_toy_data_multiplechoiceclassification()
automl = AutoML()
automl_settings = get_automl_settings()
automl_settings["task"] = "multichoice-classification"
automl_settings["metric"] = "accuracy"
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
y_pred = automl.predict(X_test)
proba = automl.predict_proba(X_test)
print(str(len(automl.classes_)) + " classes")
print(y_pred)
print(y_test)
print(proba)
true_count = 0
for i, v in y_test.items():
if y_pred[i] == v:
true_count += 1
accuracy = round(true_count / len(y_pred), 5)
print("Accuracy: " + str(accuracy))
if os.path.exists("test/data/output/"):
try:
shutil.rmtree("test/data/output/")
except PermissionError:
print("PermissionError when deleting test/data/output/")
if __name__ == "__main__":
test_mcc()