Skip to content

Commit

Permalink
Clean up vision tests (huggingface#17024)
Browse files Browse the repository at this point in the history
* Clean up tests

* Make fixup

Co-authored-by: Niels Rogge <[email protected]>
  • Loading branch information
NielsRogge and Niels Rogge authored May 2, 2022
1 parent 4be8b95 commit 2de2c9e
Show file tree
Hide file tree
Showing 7 changed files with 48 additions and 699 deletions.
134 changes: 17 additions & 117 deletions tests/beit/test_modeling_beit.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,9 @@ def __init__(
self.out_indices = out_indices
self.num_labels = num_labels

# in BeiT, the expected seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.expected_seq_length = num_patches + 1
self.seq_length = num_patches + 1

def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
Expand Down Expand Up @@ -136,16 +136,14 @@ def create_and_check_model(self, config, pixel_values, labels, pixel_labels):
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_length, self.hidden_size)
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))

def create_and_check_for_masked_lm(self, config, pixel_values, labels, pixel_labels):
model = BeitForMaskedImageModeling(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.expected_seq_length - 1, self.vocab_size))
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size))

def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels):
config.num_labels = self.type_sequence_label_size
Expand All @@ -155,7 +153,7 @@ def create_and_check_for_image_classification(self, config, pixel_values, labels
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))

def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels):
def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels):
config.num_labels = self.num_labels
model = BeitForSemanticSegmentation(config)
model.to(torch_device)
Expand Down Expand Up @@ -200,8 +198,8 @@ def setUp(self):
def test_config(self):
self.config_tester.run_common_tests()

@unittest.skip(reason="BEiT does not use inputs_embeds")
def test_inputs_embeds(self):
# BEiT does not use inputs_embeds
pass

def test_model_common_attributes(self):
Expand Down Expand Up @@ -229,9 +227,17 @@ def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)

def test_for_image_segmentation(self):
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)

def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)

def test_for_semantic_segmentation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs)
self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs)

def test_training(self):
if not self.model_tester.is_training:
Expand Down Expand Up @@ -267,13 +273,7 @@ def test_training_gradient_checkpointing(self):
or not model_class.supports_gradient_checkpointing
):
continue
# TODO: remove the following 3 lines once we have a MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
# this can then be incorporated into _prepare_for_class in test_modeling_common.py
elif model_class.__name__ == "BeitForSemanticSegmentation":
batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape
inputs_dict["labels"] = torch.zeros(
[self.model_tester.batch_size, height, width], device=torch_device
).long()

model = model_class(config)
model.gradient_checkpointing_enable()
model.to(torch_device)
Expand All @@ -300,106 +300,6 @@ def test_initialization(self):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)

def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True

# BEiT has a different seq_length
seq_len = self.model_tester.expected_seq_length

for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))

attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)

# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))

self.assertEqual(out_len + 1, len(outputs))

self_attentions = outputs.attentions

self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)

def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()

with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))

hidden_states = outputs.hidden_states

expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)

# BEiT has a different seq_length
seq_length = self.model_tester.expected_seq_length

self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)

config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)

# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True

check_hidden_states_output(inputs_dict, config, model_class)

def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)

def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)

@slow
def test_model_from_pretrained(self):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
84 changes: 5 additions & 79 deletions tests/beit/test_modeling_flax_beit.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ def __init__(
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range

# in BeiT, the expected seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.expected_seq_length = num_patches + 1
self.seq_length = num_patches + 1

def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
Expand Down Expand Up @@ -108,14 +108,12 @@ def create_and_check_model(self, config, pixel_values, labels):

model = FlaxBeitModel(config=config)
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_length, self.hidden_size)
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))

def create_and_check_for_masked_lm(self, config, pixel_values, labels):
model = FlaxBeitForMaskedImageModeling(config=config)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.expected_seq_length - 1, self.vocab_size))
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size))

def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
Expand Down Expand Up @@ -148,51 +146,7 @@ def setUp(self) -> None:
def test_config(self):
self.config_tester.run_common_tests()

# We need to override this test because in Beit, the seq_len equals the number of patches + 1
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True

seq_length = self.model_tester.expected_seq_length

for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
out_len = len(outputs)

# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))

added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))

self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)

# We neeed to override this test because Beit's forward signature is different than text models.
# We need to override this test because Beit's forward signature is different than text models.
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down Expand Up @@ -229,34 +183,6 @@ def model_jitted(pixel_values, **kwargs):
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)

# We need to override this test because in Beit, the seq_len equals the number of patches + 1
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
seq_length = self.model_tester.expected_seq_length

outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states

self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)

self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)

config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)

# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True

check_hidden_states_output(inputs_dict, config, model_class)

def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
Expand Down
Loading

0 comments on commit 2de2c9e

Please sign in to comment.