Skip to content

Commit

Permalink
[static runtime] Fix up deprecated exact equality in tests (pytorch#5…
Browse files Browse the repository at this point in the history
…2617)

Summary:
Pull Request resolved: pytorch#52617

swaps `.equals` with `torch::allclose`

tests are broken right now

Test Plan: buck test caffe2/benchmarks/static_runtime:static_runtime_cpptest -- --run-disabled

Reviewed By: bertmaher, maratsubkhankulov, yinghai

Differential Revision: D26585079

fbshipit-source-id: 9bd2a7b87208301415a8925f95c69fe44accf159
  • Loading branch information
bwasti authored and facebook-github-bot committed Feb 23, 2021
1 parent 7f4dff5 commit a0652c8
Showing 1 changed file with 8 additions and 10 deletions.
18 changes: 8 additions & 10 deletions benchmarks/static_runtime/test_static_runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ TEST(StaticRuntime, LongModel) {
torch::jit::StaticRuntime runtime(g);
at::Tensor output_2 = runtime.run(input_tensors)[0];
runtime.check_for_memory_leak();
EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));
}

TEST(StaticRuntime, TrivialModel) {
Expand All @@ -173,7 +173,7 @@ TEST(StaticRuntime, TrivialModel) {
torch::jit::StaticRuntime runtime(g);
at::Tensor output_2 = runtime.run(input_tensors)[0];
runtime.check_for_memory_leak();
EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));
}

TEST(StaticRuntime, LeakyReLU) {
Expand All @@ -190,7 +190,7 @@ TEST(StaticRuntime, LeakyReLU) {
torch::jit::StaticRuntime runtime(g);
at::Tensor output_2 = runtime.run(input_tensors)[0];
runtime.check_for_memory_leak();
EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));
}

TEST(StaticRuntime, DeepWide) {
Expand All @@ -214,8 +214,7 @@ TEST(StaticRuntime, DeepWide) {
std::vector<at::Tensor> input_tensors({ad_emb_packed, user_emb, wide});
at::Tensor output_2 = runtime.run(input_tensors)[0];
runtime.check_for_memory_leak();

EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));
}
}
}
Expand All @@ -242,7 +241,7 @@ TEST(StaticRuntime, KWargsAPI_1) {
runtime.check_for_memory_leak();

at::Tensor output_2 = getTensor(output_ivalue);
EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));

// check for output aliasing
EXPECT_EQ(output_ivalue.use_count(), 1);
Expand Down Expand Up @@ -287,7 +286,7 @@ TEST(StaticRuntime, KWargsAPI_2) {
runtime.check_for_memory_leak();

at::Tensor output_2 = getTensor(output_ivalue);
EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));

// check for output aliasing
EXPECT_EQ(output_ivalue.use_count(), 1);
Expand Down Expand Up @@ -331,8 +330,7 @@ TEST(StaticRuntime, CleanUpMemory) {
{ad_emb_packed, user_emb, wide});
at::Tensor output_2 = runtime.run(input_tensors)[0];
runtime.check_for_memory_leak();

EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));
}
}
}
Expand Down Expand Up @@ -364,7 +362,7 @@ TEST(StaticRuntime, FusionPass) {
}
EXPECT_TRUE(hit);
auto output_2 = getTensor(module.forward(inputs));
EXPECT_TRUE(output_1.equal(output_2));
EXPECT_TRUE(torch::allclose(output_1, output_2, 1e-6));
}
}
}

0 comments on commit a0652c8

Please sign in to comment.