2021-09-09 11:54:47 +02:00
|
|
|
import logging
|
|
|
|
from pathlib import Path
|
|
|
|
|
2022-10-17 18:58:35 +02:00
|
|
|
from transformers import AutoTokenizer
|
|
|
|
|
2021-09-13 18:38:14 +02:00
|
|
|
from haystack.modeling.data_handler.processor import SquadProcessor
|
|
|
|
from haystack.modeling.utils import set_all_seeds
|
2021-09-09 11:54:47 +02:00
|
|
|
import torch
|
|
|
|
|
2022-01-26 18:12:55 +01:00
|
|
|
|
2023-04-11 10:33:43 +02:00
|
|
|
def test_processor_saving_loading(tmp_path, caplog, samples_path):
|
2021-09-09 11:54:47 +02:00
|
|
|
if caplog is not None:
|
|
|
|
caplog.set_level(logging.CRITICAL)
|
|
|
|
|
|
|
|
set_all_seeds(seed=42)
|
|
|
|
lang_model = "roberta-base"
|
|
|
|
|
2022-10-17 18:58:35 +02:00
|
|
|
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=lang_model, do_lower_case=False)
|
2021-09-09 11:54:47 +02:00
|
|
|
|
|
|
|
processor = SquadProcessor(
|
|
|
|
tokenizer=tokenizer,
|
2021-09-13 18:38:14 +02:00
|
|
|
max_seq_len=256,
|
2021-09-09 11:54:47 +02:00
|
|
|
label_list=["start_token", "end_token"],
|
2021-09-09 13:02:35 +02:00
|
|
|
train_filename="train-sample.json",
|
|
|
|
dev_filename="dev-sample.json",
|
|
|
|
test_filename=None,
|
2023-04-11 10:33:43 +02:00
|
|
|
data_dir=samples_path / "qa",
|
2021-09-09 11:54:47 +02:00
|
|
|
)
|
|
|
|
|
2023-04-11 10:33:43 +02:00
|
|
|
dicts = processor.file_to_dicts(file=samples_path / "qa" / "dev-sample.json")
|
2021-09-13 18:38:14 +02:00
|
|
|
data, tensor_names, _ = processor.dataset_from_dicts(dicts=dicts, indices=[1])
|
2021-09-09 11:54:47 +02:00
|
|
|
|
2022-03-15 11:17:26 +01:00
|
|
|
save_dir = tmp_path / Path("testsave/processor")
|
2021-09-09 11:54:47 +02:00
|
|
|
processor.save(save_dir)
|
|
|
|
|
|
|
|
processor = processor.load_from_dir(save_dir)
|
2023-04-11 10:33:43 +02:00
|
|
|
dicts = processor.file_to_dicts(file=samples_path / "qa" / "dev-sample.json")
|
2021-09-13 18:38:14 +02:00
|
|
|
data_loaded, tensor_names_loaded, _ = processor.dataset_from_dicts(dicts, indices=[1])
|
2021-09-09 11:54:47 +02:00
|
|
|
|
|
|
|
assert tensor_names == tensor_names_loaded
|
|
|
|
for i in range(len(data.tensors)):
|
|
|
|
assert torch.all(torch.eq(data.tensors[i], data_loaded.tensors[i]))
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
test_processor_saving_loading(None)
|