Skip to content

Commit

Permalink
Adjusting test for opset 18
Browse files Browse the repository at this point in the history
Signed-off-by: Boris Fomitchev <[email protected]>
  • Loading branch information
borisfom committed Aug 28, 2023
1 parent 1d01e5c commit a8d9e43
Showing 1 changed file with 5 additions and 3 deletions.
8 changes: 5 additions & 3 deletions tests/L0/run_fused_layer_norm/test_fused_layer_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,9 +268,11 @@ def _verify_export(self, fused, fused_x):
# check that export() is working
onnx_str = torch.onnx.export_to_pretty_string(fused, (fused_x,),
input_names=['x_in'],
opset_version=18,
)
print(onnx_str)
assert 'x_in' in onnx_str
assert 'ReduceMean' in onnx_str
assert 'ReduceMean' in onnx_str or 'LayerNormalization' in onnx_str

def test_rms_export(self):
batch_size = 16
Expand All @@ -279,7 +281,7 @@ def test_rms_export(self):
normalized_shape=normalized_shape, elementwise_affine=True
).cuda()
fused_m = MixedFusedRMSNorm(
normalized_shape=normalized_shape, elementwise_affine=True
normalized_shape=normalized_shape
).cuda()
native_x, fused_x = _prep_inputs(batch_size, normalized_shape, torch.float32)
self._verify_export(fused, fused_x)
Expand All @@ -292,7 +294,7 @@ def test_layer_norm_export(self):
normalized_shape=normalized_shape, elementwise_affine=True
).cuda()
fused_m = MixedFusedLayerNorm(
normalized_shape=normalized_shape, elementwise_affine=True
normalized_shape=normalized_shape
).cuda()
native_x, fused_x = _prep_inputs(batch_size, normalized_shape, torch.float32)
self._verify_export(fused, fused_x)
Expand Down

0 comments on commit a8d9e43

Please sign in to comment.