Skip to content

Commit

Permalink
Port hypot method_tests() to OpInfo (pytorch#55140)
Browse files Browse the repository at this point in the history
Summary:
Related pytorch#54261

Pull Request resolved: pytorch#55140

Reviewed By: ngimel

Differential Revision: D27562164

Pulled By: mruberry

fbshipit-source-id: fc698ddc624d2abf5d540aac76baa5d398993f1f
  • Loading branch information
RockingJavaBean authored and facebook-github-bot committed Apr 9, 2021
1 parent f3367f9 commit 53f9fc1
Showing 1 changed file with 14 additions and 1 deletion.
15 changes: 14 additions & 1 deletion torch/testing/_internal/common_methods_invocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,14 @@ def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **

return (SampleInput(tensors),)

def sample_inputs_hypot(op_info, device, dtype, requires_grad):
input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)

return (
SampleInput(input, args=(args,)),
)

def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
Expand Down Expand Up @@ -3867,6 +3875,12 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
skips=(
# hstack does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),),),
OpInfo('hypot',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=sample_inputs_hypot,
),
OpInfo('vstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
Expand Down Expand Up @@ -4347,7 +4361,6 @@ def method_tests():
('fmod', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),
('fmod', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),
('fmod', (S, S, S), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor_broadcast_rhs'),
('hypot', (S, S), ((S, S),)),
('remainder', (S, S, S), (1.5,), '', (True,)),
('remainder', (), (1.5,), 'scalar', (True,)),
('remainder', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),
Expand Down

0 comments on commit 53f9fc1

Please sign in to comment.