Skip to content

Commit

Permalink
[T5 Tokenizer] Model has no fixed position ids - there is no hardcode… (
Browse files Browse the repository at this point in the history
huggingface#16990)

* [T5 Tokenizer] Model has no fixed position ids - there is no hardcoded max length

* [T5 Tokenizer] Model has no fixed position ids - there is no hardcoded max length

* correct t5 tokenizer

* correct t5 tokenizer

* fix test

* Apply suggestions from code review

Co-authored-by: Sylvain Gugger <[email protected]>

* finish

Co-authored-by: Sylvain Gugger <[email protected]>
  • Loading branch information
patrickvonplaten and sgugger authored May 2, 2022
1 parent 1073f00 commit 31616b8
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 1 deletion.
20 changes: 20 additions & 0 deletions src/transformers/models/t5/tokenization_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@
}
}


# TODO(PVP) - this should be removed in Transformers v5
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-small": 512,
"t5-base": 512,
Expand Down Expand Up @@ -151,6 +153,24 @@ def __init__(
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)

@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
if pretrained_model_name_or_path in T5Tokenizer.max_model_input_sizes:
deprecated_max_model_length = T5Tokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
f"This tokenizer was incorrectly instantiated with a model max length of {deprecated_max_model_length} which will be corrected in Transformers v5.\n"
f"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n"
f"- Be aware that you SHOULD NOT rely on {pretrained_model_name_or_path} automatically truncating your input to {deprecated_max_model_length} when padding/encoding.\n"
f"- If you want to encode/pad to sequences longer than {deprecated_max_model_length} you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n"
f"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.",
FutureWarning,
)

return max_model_length

@property
def vocab_size(self):
return self.sp_model.get_piece_size() + self._extra_ids
Expand Down
21 changes: 21 additions & 0 deletions src/transformers/models/t5/tokenization_t5_fast.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@


import os
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple

Expand Down Expand Up @@ -50,6 +51,8 @@
},
}


# TODO(PVP) - this should be removed in Transformers v5
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-small": 512,
"t5-base": 512,
Expand Down Expand Up @@ -142,6 +145,24 @@ def __init__(
self.can_save_slow_tokenizer = False if not self.vocab_file else True
self._extra_ids = extra_ids

@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
if pretrained_model_name_or_path in T5TokenizerFast.max_model_input_sizes:
deprecated_max_model_length = T5TokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
f"This tokenizer was incorrectly instantiated with a model max length of {deprecated_max_model_length} which will be corrected in Transformers v5.\n"
f"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n"
f"- Be aware that you SHOULD NOT rely on {pretrained_model_name_or_path} automatically truncating your input to {deprecated_max_model_length} when padding/encoding.\n"
f"- If you want to encode/pad to sequences longer than {deprecated_max_model_length} you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n"
f"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.",
FutureWarning,
)

return max_model_length

def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
Expand Down
20 changes: 19 additions & 1 deletion src/transformers/tokenization_utils_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1899,9 +1899,19 @@ def convert_added_tokens(obj: Union[AddedToken, Any]):
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings

model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)

model_max_length = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
# TODO(PVP) - uncomment following line in Transformers v5
# init_kwargs["model_max_length"] = model_max_length
# TODO(PVP) - remove in Transformers v5
# ---
init_kwargs["model_max_length"] = cls._eventually_correct_t5_max_length(
pretrained_model_name_or_path, model_max_length, init_kwargs.get("model_max_length")
)
# ---

# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
Expand Down Expand Up @@ -1983,6 +1993,14 @@ def convert_added_tokens(obj: Union[AddedToken, Any]):

return tokenizer

@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
# This method should be deleted in Transformers v5
# Its only purpose is to potentially throw a warning
# that incorrectly defined max lengths of T5's tokenizer are used
# which we will correct in Transformers v5.
return max_model_length

def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
Expand Down
10 changes: 10 additions & 0 deletions tests/t5/test_tokenization_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,9 @@ def test_outputs_not_longer_than_maxlen(self):
["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors=FRAMEWORK
)
self.assertIsInstance(batch, BatchEncoding)
# Since T5 does NOT have a max input length,
# this test should be changed to the following in Transformers v5:
# self.assertEqual(batch.input_ids.shape, (2, 8001))
self.assertEqual(batch.input_ids.shape, (2, 512))

def test_eos_in_input(self):
Expand Down Expand Up @@ -361,6 +364,13 @@ def test_special_tokens_initialization_with_non_empty_additional_special_tokens(
),
)

# overwritten from `test_tokenization_common` since T5 has no max length
def test_pretrained_model_lists(self):
# We should have at least one default checkpoint for each tokenizer
# We should specify the max input length as well (used in some part to list the pretrained checkpoints)
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1)

@slow
def test_tokenizer_integration(self):
# fmt: off
Expand Down

0 comments on commit 31616b8

Please sign in to comment.