haystack/test/modeling/test_tokenization.py

501 lines
15 KiB
Python
Raw Normal View History

import logging
import pytest
import re
from transformers import (
BertTokenizer,
BertTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
XLNetTokenizer,
XLNetTokenizerFast,
ElectraTokenizerFast,
)
from tokenizers.pre_tokenizers import WhitespaceSplit
from haystack.modeling.model.tokenization import Tokenizer
import numpy as np
TEXTS = [
"This is a sentence",
"Der entscheidende Pass",
"This is a sentence with multiple spaces",
"力加勝北区ᴵᴺᵀᵃছজটডণত",
"Thiso text is included tolod makelio sure Unicodeel is handled properly:",
"This is a sentence...",
"Let's see all on this text and. !23# neverseenwordspossible",
"""This is a sentence.
With linebreak""",
"""Sentence with multiple
newlines
""",
"and another one\n\n\nwithout space",
"This is a sentence with tab",
"This is a sentence with multiple tabs",
]
def test_basic_loading(caplog):
caplog.set_level(logging.CRITICAL)
# slow tokenizers
tokenizer = Tokenizer.load(pretrained_model_name_or_path="bert-base-cased", do_lower_case=True, use_fast=False)
assert type(tokenizer) == BertTokenizer
assert tokenizer.basic_tokenizer.do_lower_case == True
tokenizer = Tokenizer.load(pretrained_model_name_or_path="xlnet-base-cased", do_lower_case=True, use_fast=False)
assert type(tokenizer) == XLNetTokenizer
assert tokenizer.do_lower_case == True
tokenizer = Tokenizer.load(pretrained_model_name_or_path="roberta-base", use_fast=False)
assert type(tokenizer) == RobertaTokenizer
# fast tokenizers
tokenizer = Tokenizer.load(pretrained_model_name_or_path="bert-base-cased", do_lower_case=True)
assert type(tokenizer) == BertTokenizerFast
assert tokenizer.do_lower_case == True
tokenizer = Tokenizer.load(pretrained_model_name_or_path="xlnet-base-cased", do_lower_case=True)
assert type(tokenizer) == XLNetTokenizerFast
assert tokenizer.do_lower_case == True
tokenizer = Tokenizer.load(pretrained_model_name_or_path="roberta-base")
assert type(tokenizer) == RobertaTokenizerFast
def test_bert_tokenizer_all_meta(caplog):
caplog.set_level(logging.CRITICAL)
lang_model = "bert-base-cased"
tokenizer = Tokenizer.load(pretrained_model_name_or_path=lang_model, do_lower_case=False)
basic_text = "Some Text with neverseentokens plus !215?#. and a combined-token_with/chars"
tokenized = tokenizer.tokenize(basic_text)
assert tokenized == [
"Some",
"Text",
"with",
"never",
"##see",
"##nto",
"##ken",
"##s",
"plus",
"!",
"215",
"?",
"#",
".",
"and",
"a",
"combined",
"-",
"token",
"_",
"with",
"/",
"ch",
"##ars",
]
encoded_batch = tokenizer.encode_plus(basic_text)
encoded = encoded_batch.encodings[0]
words = np.array(encoded.words)
words[words == None] = -1
start_of_word_single = [False] + list(np.ediff1d(words) > 0)
assert encoded.tokens == [
"[CLS]",
"Some",
"Text",
"with",
"never",
"##see",
"##nto",
"##ken",
"##s",
"plus",
"!",
"215",
"?",
"#",
".",
"and",
"a",
"combined",
"-",
"token",
"_",
"with",
"/",
"ch",
"##ars",
"[SEP]",
]
assert [x[0] for x in encoded.offsets] == [
0,
0,
5,
10,
15,
20,
23,
26,
29,
31,
36,
37,
40,
41,
42,
44,
48,
50,
58,
59,
64,
65,
69,
70,
72,
0,
]
assert start_of_word_single == [
False,
True,
True,
True,
True,
False,
False,
False,
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
]
Pipeline's YAML: syntax validation (#2226) * Add BasePipeline.validate_config, BasePipeline.validate_yaml, and some new custom exception classes * Make error composition work properly * Clarify typing * Help mypy a bit more * Update Documentation & Code Style * Enable autogenerated docs for Milvus1 and 2 separately * Revert "Enable autogenerated docs for Milvus1 and 2 separately" This reverts commit 282be4a78a6e95862a9b4c924fc3dea5ca71e28d. * Update Documentation & Code Style * Re-enable 'additionalProperties: False' * Add pipeline.type to JSON Schema, was somehow forgotten * Disable additionalProperties on the pipeline properties too * Fix json-schemas for 1.1.0 and 1.2.0 (should not do it again in the future) * Cal super in PipelineValidationError * Improve _read_pipeline_config_from_yaml's error handling * Fix generate_json_schema.py to include document stores * Fix json schemas (retro-fix 1.1.0 again) * Improve custom errors printing, add link to docs * Add function in BaseComponent to list its subclasses in a module * Make some document stores base classes abstract * Add marker 'integration' in pytest flags * Slighly improve validation of pipelines at load * Adding tests for YAML loading and validation * Make custom_query Optional for validation issues * Fix bug in _read_pipeline_config_from_yaml * Improve error handling in BasePipeline and Pipeline and add DAG check * Move json schema generation into haystack/nodes/_json_schema.py (useful for tests) * Simplify errors slightly * Add some YAML validation tests * Remove load_from_config from BasePipeline, it was never used anyway * Improve tests * Include json-schemas in package * Fix conftest imports * Make BasePipeline abstract * Improve mocking by making the test independent from the YAML version * Add exportable_to_yaml decorator to forget about set_config on mock nodes * Fix mypy errors * Comment out one monkeypatch * Fix typing again * Improve error message for validation * Add required properties to pipelines * Fix YAML version for REST API YAMLs to 1.2.0 * Fix load_from_yaml call in load_from_deepset_cloud * fix HaystackError.__getattr__ * Add super().__init__()in most nodes and docstore, comment set_config * Remove type from REST API pipelines * Remove useless init from doc2answers * Call super in Seq3SeqGenerator * Typo in deepsetcloud.py * Fix rest api indexing error mismatch and mock version of JSON schema in all tests * Working on pipeline tests * Improve errors printing slightly * Add back test_pipeline.yaml * _json_schema.py supports different versions with identical schemas * Add type to 0.7 schema for backwards compatibility * Fix small bug in _json_schema.py * Try alternative to generate json schemas on the CI * Update Documentation & Code Style * Make linux CI match autoformat CI * Fix super-init-not-called * Accidentally committed file * Update Documentation & Code Style * fix test_summarizer_translation.py's import * Mock YAML in a few suites, split and simplify test_pipeline_debug_and_validation.py::test_invalid_run_args * Fix json schema for ray tests too * Update Documentation & Code Style * Reintroduce validation * Usa unstable version in tests and rest api * Make unstable support the latest versions * Update Documentation & Code Style * Remove needless fixture * Make type in pipeline optional in the strings validation * Fix schemas * Fix string validation for pipeline type * Improve validate_config_strings * Remove type from test p[ipelines * Update Documentation & Code Style * Fix test_pipeline * Removing more type from pipelines * Temporary CI patc * Fix issue with exportable_to_yaml never invoking the wrapped init * rm stray file * pipeline tests are green again * Linux CI now needs .[all] to generate the schema * Bugfixes, pipeline tests seems to be green * Typo in version after merge * Implement missing methods in Weaviate * Trying to avoid FAISS tests from running in the Milvus1 test suite * Fix some stray test paths and faiss index dumping * Fix pytest markers list * Temporarily disable cache to be able to see tests failures * Fix pyproject.toml syntax * Use only tmp_path * Fix preprocessor signature after merge * Fix faiss bug * Fix Ray test * Fix documentation issue by removing quotes from faiss type * Update Documentation & Code Style * use document properly in preprocessor tests * Update Documentation & Code Style * make preprocessor capable of handling documents * import document * Revert support for documents in preprocessor, do later * Fix bug in _json_schema.py that was breaking validation * re-enable cache * Update Documentation & Code Style * Simplify calling _json_schema.py from the CI * Remove redundant ABC inheritance * Ensure exportable_to_yaml works only on implementations * Rename subclass to class_ in Meta * Make run() and get_config() abstract in BasePipeline * Revert unintended change in preprocessor * Move outgoing_edges_input_node check inside try block * Rename VALID_CODE_GEN_INPUT_REGEX into VALID_INPUT_REGEX * Add check for a RecursionError on validate_config_strings * Address usages of _pipeline_config in data silo and elasticsearch * Rename _pipeline_config into _init_parameters * Fix pytest marker and remove unused imports * Remove most redundant ABCs * Rename _init_parameters into _component_configuration * Remove set_config and type from _component_configuration's dict * Remove last instances of set_config and replace with super().__init__() * Implement __init_subclass__ approach * Simplify checks on the existence of _component_configuration * Fix faiss issue * Dynamic generation of node schemas & weed out old schemas * Add debatable test * Add docstring to debatable test * Positive diff between schemas implemented * Improve diff printing * Rename REST API YAML files to trigger IDE validation * Fix typing issues * Fix more typing * Typo in YAML filename * Remove needless type:ignore * Add tests * Fix tests & validation feedback for accessory classes in custom nodes * Refactor RAGeneratorType out * Fix broken import in conftest * Improve source error handling * Remove unused import in test_eval.py breaking tests * Fix changed error message in tests matches too * Normalize generate_openapi_specs.py and generate_json_schema.py in the actions * Fix path to generate_openapi_specs.py in autoformat.yml * Update Documentation & Code Style * Add test for FAISSDocumentStore-like situations (superclass with init params) * Update Documentation & Code Style * Fix indentation * Remove commented set_config * Store model_name_or_path in FARMReader to use in DistillationDataSilo * Rename _component_configuration into _component_config * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2022-03-15 11:17:26 +01:00
def test_save_load(tmp_path, caplog):
caplog.set_level(logging.CRITICAL)
lang_names = ["bert-base-cased", "roberta-base", "xlnet-base-cased"]
tokenizers = []
for lang_name in lang_names:
if "xlnet" in lang_name.lower():
t = Tokenizer.load(lang_name, lower_case=False, use_fast=True, from_slow=True)
else:
t = Tokenizer.load(lang_name, lower_case=False)
t.add_tokens(new_tokens=["neverseentokens"])
tokenizers.append(t)
basic_text = "Some Text with neverseentokens plus !215?#. and a combined-token_with/chars"
for tokenizer in tokenizers:
tokenizer_type = tokenizer.__class__.__name__
Pipeline's YAML: syntax validation (#2226) * Add BasePipeline.validate_config, BasePipeline.validate_yaml, and some new custom exception classes * Make error composition work properly * Clarify typing * Help mypy a bit more * Update Documentation & Code Style * Enable autogenerated docs for Milvus1 and 2 separately * Revert "Enable autogenerated docs for Milvus1 and 2 separately" This reverts commit 282be4a78a6e95862a9b4c924fc3dea5ca71e28d. * Update Documentation & Code Style * Re-enable 'additionalProperties: False' * Add pipeline.type to JSON Schema, was somehow forgotten * Disable additionalProperties on the pipeline properties too * Fix json-schemas for 1.1.0 and 1.2.0 (should not do it again in the future) * Cal super in PipelineValidationError * Improve _read_pipeline_config_from_yaml's error handling * Fix generate_json_schema.py to include document stores * Fix json schemas (retro-fix 1.1.0 again) * Improve custom errors printing, add link to docs * Add function in BaseComponent to list its subclasses in a module * Make some document stores base classes abstract * Add marker 'integration' in pytest flags * Slighly improve validation of pipelines at load * Adding tests for YAML loading and validation * Make custom_query Optional for validation issues * Fix bug in _read_pipeline_config_from_yaml * Improve error handling in BasePipeline and Pipeline and add DAG check * Move json schema generation into haystack/nodes/_json_schema.py (useful for tests) * Simplify errors slightly * Add some YAML validation tests * Remove load_from_config from BasePipeline, it was never used anyway * Improve tests * Include json-schemas in package * Fix conftest imports * Make BasePipeline abstract * Improve mocking by making the test independent from the YAML version * Add exportable_to_yaml decorator to forget about set_config on mock nodes * Fix mypy errors * Comment out one monkeypatch * Fix typing again * Improve error message for validation * Add required properties to pipelines * Fix YAML version for REST API YAMLs to 1.2.0 * Fix load_from_yaml call in load_from_deepset_cloud * fix HaystackError.__getattr__ * Add super().__init__()in most nodes and docstore, comment set_config * Remove type from REST API pipelines * Remove useless init from doc2answers * Call super in Seq3SeqGenerator * Typo in deepsetcloud.py * Fix rest api indexing error mismatch and mock version of JSON schema in all tests * Working on pipeline tests * Improve errors printing slightly * Add back test_pipeline.yaml * _json_schema.py supports different versions with identical schemas * Add type to 0.7 schema for backwards compatibility * Fix small bug in _json_schema.py * Try alternative to generate json schemas on the CI * Update Documentation & Code Style * Make linux CI match autoformat CI * Fix super-init-not-called * Accidentally committed file * Update Documentation & Code Style * fix test_summarizer_translation.py's import * Mock YAML in a few suites, split and simplify test_pipeline_debug_and_validation.py::test_invalid_run_args * Fix json schema for ray tests too * Update Documentation & Code Style * Reintroduce validation * Usa unstable version in tests and rest api * Make unstable support the latest versions * Update Documentation & Code Style * Remove needless fixture * Make type in pipeline optional in the strings validation * Fix schemas * Fix string validation for pipeline type * Improve validate_config_strings * Remove type from test p[ipelines * Update Documentation & Code Style * Fix test_pipeline * Removing more type from pipelines * Temporary CI patc * Fix issue with exportable_to_yaml never invoking the wrapped init * rm stray file * pipeline tests are green again * Linux CI now needs .[all] to generate the schema * Bugfixes, pipeline tests seems to be green * Typo in version after merge * Implement missing methods in Weaviate * Trying to avoid FAISS tests from running in the Milvus1 test suite * Fix some stray test paths and faiss index dumping * Fix pytest markers list * Temporarily disable cache to be able to see tests failures * Fix pyproject.toml syntax * Use only tmp_path * Fix preprocessor signature after merge * Fix faiss bug * Fix Ray test * Fix documentation issue by removing quotes from faiss type * Update Documentation & Code Style * use document properly in preprocessor tests * Update Documentation & Code Style * make preprocessor capable of handling documents * import document * Revert support for documents in preprocessor, do later * Fix bug in _json_schema.py that was breaking validation * re-enable cache * Update Documentation & Code Style * Simplify calling _json_schema.py from the CI * Remove redundant ABC inheritance * Ensure exportable_to_yaml works only on implementations * Rename subclass to class_ in Meta * Make run() and get_config() abstract in BasePipeline * Revert unintended change in preprocessor * Move outgoing_edges_input_node check inside try block * Rename VALID_CODE_GEN_INPUT_REGEX into VALID_INPUT_REGEX * Add check for a RecursionError on validate_config_strings * Address usages of _pipeline_config in data silo and elasticsearch * Rename _pipeline_config into _init_parameters * Fix pytest marker and remove unused imports * Remove most redundant ABCs * Rename _init_parameters into _component_configuration * Remove set_config and type from _component_configuration's dict * Remove last instances of set_config and replace with super().__init__() * Implement __init_subclass__ approach * Simplify checks on the existence of _component_configuration * Fix faiss issue * Dynamic generation of node schemas & weed out old schemas * Add debatable test * Add docstring to debatable test * Positive diff between schemas implemented * Improve diff printing * Rename REST API YAML files to trigger IDE validation * Fix typing issues * Fix more typing * Typo in YAML filename * Remove needless type:ignore * Add tests * Fix tests & validation feedback for accessory classes in custom nodes * Refactor RAGeneratorType out * Fix broken import in conftest * Improve source error handling * Remove unused import in test_eval.py breaking tests * Fix changed error message in tests matches too * Normalize generate_openapi_specs.py and generate_json_schema.py in the actions * Fix path to generate_openapi_specs.py in autoformat.yml * Update Documentation & Code Style * Add test for FAISSDocumentStore-like situations (superclass with init params) * Update Documentation & Code Style * Fix indentation * Remove commented set_config * Store model_name_or_path in FARMReader to use in DistillationDataSilo * Rename _component_configuration into _component_config * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2022-03-15 11:17:26 +01:00
save_dir = f"{tmp_path}/testsave/{tokenizer_type}"
tokenizer.save_pretrained(save_dir)
tokenizer_loaded = Tokenizer.load(save_dir, tokenizer_class=tokenizer_type)
encoded_before = tokenizer.encode_plus(basic_text).encodings[0]
encoded_after = tokenizer_loaded.encode_plus(basic_text).encodings[0]
data_before = {
"tokens": encoded_before.tokens,
"offsets": encoded_before.offsets,
"words": encoded_before.words,
}
data_after = {"tokens": encoded_after.tokens, "offsets": encoded_after.offsets, "words": encoded_after.words}
assert data_before == data_after
@pytest.mark.parametrize("model_name", ["bert-base-german-cased", "google/electra-small-discriminator"])
def test_fast_tokenizer_with_examples(caplog, model_name):
fast_tokenizer = Tokenizer.load(model_name, lower_case=False, use_fast=True)
tokenizer = Tokenizer.load(model_name, lower_case=False, use_fast=False)
for text in TEXTS:
# plain tokenize function
tokenized = tokenizer.tokenize(text)
fast_tokenized = fast_tokenizer.tokenize(text)
assert tokenized == fast_tokenized
def test_all_tokenizer_on_special_cases(caplog):
caplog.set_level(logging.CRITICAL)
lang_names = ["bert-base-cased", "roberta-base", "xlnet-base-cased"]
tokenizers = []
for lang_name in lang_names:
if "roberta" in lang_name:
add_prefix_space = True
else:
add_prefix_space = False
t = Tokenizer.load(lang_name, lower_case=False, add_prefix_space=add_prefix_space)
tokenizers.append(t)
texts = [
"This is a sentence",
"Der entscheidende Pass",
"力加勝北区ᴵᴺᵀᵃছজটডণত",
"Thiso text is included tolod makelio sure Unicodeel is handled properly:",
"This is a sentence...",
"Let's see all on this text and. !23# neverseenwordspossible" "This is a sentence with multiple spaces",
"""This is a sentence.
With linebreak""",
"""Sentence with multiple
newlines
""",
"and another one\n\n\nwithout space",
"This is a sentence with multiple tabs",
]
expected_to_fail = {(2, 1), (2, 5)}
for i_tok, tokenizer in enumerate(tokenizers):
for i_text, text in enumerate(texts):
# Important: we don't assume to preserve whitespaces after tokenization.
# This means: \t, \n " " etc will all resolve to a single " ".
# This doesn't make a difference for BERT + XLNet but it does for roBERTa
test_passed = True
# 1. original tokenize function from transformer repo on full sentence
standardized_whitespace_text = " ".join(text.split()) # remove multiple whitespaces
tokenized = tokenizer.tokenize(standardized_whitespace_text)
# 2. Our tokenization method using a pretokenizer which can normalize multiple white spaces
# This approach is used in NER
pre_tokenizer = WhitespaceSplit()
words_and_spans = pre_tokenizer.pre_tokenize_str(text)
words = [x[0] for x in words_and_spans]
word_spans = [x[1] for x in words_and_spans]
encoded = tokenizer.encode_plus(words, is_split_into_words=True, add_special_tokens=False).encodings[0]
# verify that tokenization on full sequence is the same as the one on "whitespace tokenized words"
if encoded.tokens != tokenized:
test_passed = False
# token offsets are originally relative to the beginning of the word
# These lines convert them so they are relative to the beginning of the sentence
token_offsets = []
for ((start, end), w_index) in zip(encoded.offsets, encoded.words):
word_start_ch = word_spans[w_index][0]
token_offsets.append((start + word_start_ch, end + word_start_ch))
# verify that offsets align back to original text
if text == "力加勝北区ᴵᴺᵀᵃছজটডণত":
# contains [UNK] that are impossible to match back to original text space
continue
for tok, (start, end) in zip(encoded.tokens, token_offsets):
# subword-tokens have special chars depending on model type. In order to align with original text we need to get rid of them
tok = re.sub(r"^(##|Ġ|▁)", "", tok)
# tok = tokenizer.decode(tokenizer.convert_tokens_to_ids(tok))
original_tok = text[start:end]
if tok != original_tok:
test_passed = False
if (i_tok, i_text) in expected_to_fail:
assert not test_passed, f"Behaviour of {tokenizer.__class__.__name__} has changed on text {text}'"
else:
assert test_passed, f"Behaviour of {tokenizer.__class__.__name__} has changed on text {text}'"
def test_bert_custom_vocab(caplog):
caplog.set_level(logging.CRITICAL)
lang_model = "bert-base-cased"
tokenizer = Tokenizer.load(pretrained_model_name_or_path=lang_model, do_lower_case=False)
# deprecated: tokenizer.add_custom_vocab("samples/tokenizer/custom_vocab.txt")
tokenizer.add_tokens(new_tokens=["neverseentokens"])
basic_text = "Some Text with neverseentokens plus !215?#. and a combined-token_with/chars"
# original tokenizer from transformer repo
tokenized = tokenizer.tokenize(basic_text)
assert tokenized == [
"Some",
"Text",
"with",
"neverseentokens",
"plus",
"!",
"215",
"?",
"#",
".",
"and",
"a",
"combined",
"-",
"token",
"_",
"with",
"/",
"ch",
"##ars",
]
# ours with metadata
encoded = tokenizer.encode_plus(basic_text, add_special_tokens=False).encodings[0]
offsets = [x[0] for x in encoded.offsets]
start_of_word_single = [True] + list(np.ediff1d(encoded.words) > 0)
assert encoded.tokens == tokenized
assert offsets == [0, 5, 10, 15, 31, 36, 37, 40, 41, 42, 44, 48, 50, 58, 59, 64, 65, 69, 70, 72]
assert start_of_word_single == [
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
]
def test_fast_bert_custom_vocab(caplog):
caplog.set_level(logging.CRITICAL)
lang_model = "bert-base-cased"
tokenizer = Tokenizer.load(pretrained_model_name_or_path=lang_model, do_lower_case=False, use_fast=True)
# deprecated: tokenizer.add_custom_vocab("samples/tokenizer/custom_vocab.txt")
tokenizer.add_tokens(new_tokens=["neverseentokens"])
basic_text = "Some Text with neverseentokens plus !215?#. and a combined-token_with/chars"
# original tokenizer from transformer repo
tokenized = tokenizer.tokenize(basic_text)
assert tokenized == [
"Some",
"Text",
"with",
"neverseentokens",
"plus",
"!",
"215",
"?",
"#",
".",
"and",
"a",
"combined",
"-",
"token",
"_",
"with",
"/",
"ch",
"##ars",
]
# ours with metadata
encoded = tokenizer.encode_plus(basic_text, add_special_tokens=False).encodings[0]
offsets = [x[0] for x in encoded.offsets]
start_of_word_single = [True] + list(np.ediff1d(encoded.words) > 0)
assert encoded.tokens == tokenized
assert offsets == [0, 5, 10, 15, 31, 36, 37, 40, 41, 42, 44, 48, 50, 58, 59, 64, 65, 69, 70, 72]
assert start_of_word_single == [
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
]
@pytest.mark.parametrize(
"model_name, tokenizer_type",
[("bert-base-german-cased", BertTokenizerFast), ("google/electra-small-discriminator", ElectraTokenizerFast)],
)
def test_fast_tokenizer_type(caplog, model_name, tokenizer_type):
caplog.set_level(logging.CRITICAL)
tokenizer = Tokenizer.load(model_name, use_fast=True)
assert type(tokenizer) is tokenizer_type
# See discussion in https://github.com/deepset-ai/FARM/pull/624 for reason to remove the test
# def test_fast_bert_tokenizer_strip_accents(caplog):
# caplog.set_level(logging.CRITICAL)
#
# tokenizer = Tokenizer.load("dbmdz/bert-base-german-uncased",
# use_fast=True,
# strip_accents=False)
# assert type(tokenizer) is BertTokenizerFast
# assert tokenizer.do_lower_case
# assert tokenizer._tokenizer._parameters['strip_accents'] is False
def test_fast_electra_tokenizer(caplog):
caplog.set_level(logging.CRITICAL)
tokenizer = Tokenizer.load("dbmdz/electra-base-german-europeana-cased-discriminator", use_fast=True)
assert type(tokenizer) is ElectraTokenizerFast
@pytest.mark.parametrize("model_name", ["bert-base-cased", "distilbert-base-uncased", "deepset/electra-base-squad2"])
def test_detokenization_in_fast_tokenizers(model_name):
tokenizer = Tokenizer.load(pretrained_model_name_or_path=model_name, use_fast=True)
for text in TEXTS:
encoded = tokenizer.encode_plus(text, add_special_tokens=False).encodings[0]
detokenized = " ".join(encoded.tokens)
detokenized = re.sub(r"(^|\s+)(##)", "", detokenized)
detokenized_ids = tokenizer(detokenized, add_special_tokens=False)["input_ids"]
detokenized_tokens = [tokenizer.decode([tok_id]).strip() for tok_id in detokenized_ids]
assert encoded.tokens == detokenized_tokens
if __name__ == "__main__":
test_all_tokenizer_on_special_cases()