adding catch for HTTP error (#432)

This commit is contained in:
Xueqing Liu 2022-01-30 01:53:32 -05:00 committed by GitHub
parent 1a479e4bdb
commit 438ccaa0c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 81 additions and 30 deletions

View File

@ -2,6 +2,7 @@ import sys
import pytest
import pickle
import shutil
import requests
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
@ -92,9 +93,16 @@ def test_hf_data():
"fp16": False,
}
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
)
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
automl = AutoML()
automl.retrain_from_log(
@ -132,8 +140,8 @@ def _test_custom_data():
train_dataset = pd.read_csv("data/input/train.tsv", delimiter="\t", quoting=3)
dev_dataset = pd.read_csv("data/input/dev.tsv", delimiter="\t", quoting=3)
test_dataset = pd.read_csv("data/input/test.tsv", delimiter="\t", quoting=3)
except requests.exceptions.ConnectionError:
pass
except requests.exceptions.HTTPError:
return
custom_sent_keys = ["#1 String", "#2 String"]
label_key = "Quality"

View File

@ -1,6 +1,7 @@
def test_classification_head():
from flaml import AutoML
import pandas as pd
import requests
train_data = {
"text": [
@ -54,10 +55,17 @@ def test_classification_head():
automl_settings["custom_hpo_args"] = {
"model_path": "google/electra-small-discriminator",
"output_dir": "test/data/output/",
"ckpt_per_epoch": 5,
"ckpt_per_epoch": 1,
"fp16": False,
}
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
)
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return

View File

@ -43,6 +43,7 @@ def custom_metric(
def test_custom_metric():
from flaml import AutoML
import pandas as pd
import requests
train_data = {
"sentence1": [
@ -105,13 +106,20 @@ def test_custom_metric():
automl_settings["custom_hpo_args"] = {
"model_path": "google/electra-small-discriminator",
"output_dir": "data/output/",
"ckpt_per_epoch": 5,
"ckpt_per_epoch": 1,
"fp16": False,
}
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
)
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
# testing calling custom metric in TransformersEstimator._compute_metrics_by_dataset_name

View File

@ -6,6 +6,7 @@ import pytest
def test_cv():
from flaml import AutoML
import pandas as pd
import requests
train_data = {
"sentence1": [
@ -49,7 +50,10 @@ def test_cv():
"fp16": False,
}
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
try:
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
except requests.exceptions.HTTPError:
return
if __name__ == "__main__":

View File

@ -5,7 +5,7 @@ import pytest
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
def test_mcc():
from flaml import AutoML
import requests
import pandas as pd
train_data = {
@ -219,13 +219,20 @@ def test_mcc():
automl_settings["custom_hpo_args"] = {
"model_path": "google/electra-small-discriminator",
"output_dir": "test/data/output/",
"ckpt_per_epoch": 5,
"ckpt_per_epoch": 1,
"fp16": False,
}
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
)
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
y_pred = automl.predict(X_test)
proba = automl.predict_proba(X_test)

View File

@ -71,12 +71,12 @@ def test_regression():
automl_settings["custom_hpo_args"] = {
"model_path": "google/electra-small-discriminator",
"output_dir": "test/data/output/",
"ckpt_per_epoch": 5,
"ckpt_per_epoch": 1,
"fp16": False,
}
ray.shutdown()
ray.init()
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
)

View File

@ -1,5 +1,6 @@
import sys
import pytest
import requests
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
@ -60,13 +61,20 @@ def test_summarization():
automl_settings["custom_hpo_args"] = {
"model_path": "patrickvonplaten/t5-tiny-random",
"output_dir": "test/data/output/",
"ckpt_per_epoch": 5,
"ckpt_per_epoch": 1,
"fp16": False,
}
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
)
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
automl = AutoML()
automl.retrain_from_log(
X_train=X_train,

View File

@ -1,5 +1,6 @@
import sys
import pytest
import requests
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
@ -728,13 +729,20 @@ def test_tokenclassification():
automl_settings["custom_hpo_args"] = {
"model_path": "bert-base-uncased",
"output_dir": "test/data/output/",
"ckpt_per_epoch": 5,
"ckpt_per_epoch": 1,
"fp16": False,
}
automl.fit(
X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings
)
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
if __name__ == "__main__":

View File

@ -64,7 +64,7 @@ def _test_xgboost(method="BlendSearch"):
max_iter = 10
for num_samples in [128]:
time_budget_s = 60
for n_cpu in [4]:
for n_cpu in [2]:
start_time = time.time()
ray.shutdown()
ray.init(num_cpus=n_cpu, num_gpus=0)