autogen/test/nlp/test_autohf_multichoice_classification.py
Susan Xueqing Liu 2ebddd67ae
Remove NLP classification head (#756)
* rm classification head in nlp

* rm classification head in nlp

* rm classification head in nlp

* adding test cases for switch classification head

* adding test cases for switch classification head

* Update test/nlp/test_autohf_classificationhead.py

Co-authored-by: Chi Wang <wang.chi@microsoft.com>

* adding test cases for switch classification head

* run each test separately

* skip classification head test on windows

* disabling wandb reporting

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* fix test nlp custom metric

* Update website/docs/Examples/AutoML-NLP.md

Co-authored-by: Chi Wang <wang.chi@microsoft.com>

* Update website/docs/Examples/AutoML-NLP.md

Co-authored-by: Chi Wang <wang.chi@microsoft.com>

* fix test nlp custom metric

Co-authored-by: Chi Wang <wang.chi@microsoft.com>
2022-10-12 17:04:42 -07:00

57 lines
1.3 KiB
Python

import sys
import pytest
from utils import get_toy_data_multiplechoiceclassification, get_automl_settings
import os
import shutil
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
def test_mcc():
from flaml import AutoML
import requests
(
X_train,
y_train,
X_val,
y_val,
X_test,
y_test,
) = get_toy_data_multiplechoiceclassification()
automl = AutoML()
automl_settings = get_automl_settings()
automl_settings["task"] = "multichoice-classification"
automl_settings["metric"] = "accuracy"
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
y_pred = automl.predict(X_test)
proba = automl.predict_proba(X_test)
print(str(len(automl.classes_)) + " classes")
print(y_pred)
print(y_test)
print(proba)
true_count = 0
for i, v in y_test.items():
if y_pred[i] == v:
true_count += 1
accuracy = round(true_count / len(y_pred), 5)
print("Accuracy: " + str(accuracy))
if os.path.exists("test/data/output/"):
shutil.rmtree("test/data/output/")
if __name__ == "__main__":
test_mcc()