Skip to content

Commit

Permalink
Revert "Enable Wunused-variable on tests (pytorch#127161)"
Browse files Browse the repository at this point in the history
This reverts commit 6436a64.

Reverted pytorch#127161 on behalf of https://github.com/malfet due to Broke ReduceTests on Windows (by testing more), see https://github.com/pytorch/pytorch/actions/runs/9274944325/job/25519484937 ([comment](pytorch#127161 (comment)))
  • Loading branch information
pytorchmergebot committed May 29, 2024
1 parent 85172fb commit 52e448a
Show file tree
Hide file tree
Showing 6 changed files with 24 additions and 4 deletions.
12 changes: 10 additions & 2 deletions aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -731,7 +731,8 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenFallbackKernelWithout
}

TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
bool called = false;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool called;
std::optional<Tensor> called_arg2 = c10::nullopt;
std::optional<int64_t> called_arg3 = c10::nullopt;
std::optional<std::string> called_arg4 = c10::nullopt;
Expand Down Expand Up @@ -770,7 +771,8 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
}

TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
bool called = false;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool called;
std::optional<Tensor> called_arg2 = c10::nullopt;
std::optional<int64_t> called_arg3 = c10::nullopt;
std::optional<std::string> called_arg4 = c10::nullopt;
Expand Down Expand Up @@ -812,6 +814,12 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
}

TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool called;
std::optional<Tensor> called_arg2 = c10::nullopt;
std::optional<int64_t> called_arg3 = c10::nullopt;
std::optional<std::string> called_arg4 = c10::nullopt;

auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
[] (Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,7 @@ void stackBasedKernel(const OperatorHandle&, c10::Stack* stack) {
}

TEST(OperatorRegistrationTest, whenRegisteringMultipleKernelsByNameAndNoneCanInferSchema_thenFails) {
bool called_kernel = false;
expectThrows<c10::Error>([&] {
auto registrar1 = c10::RegisterOperators().op("_test::dummy", c10::RegisterOperators::options()
.kernel<&stackBasedKernel>(c10::DispatchKey::CPU)
Expand Down
8 changes: 7 additions & 1 deletion aten/src/ATen/test/pow_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@
#include <vector>
#include <type_traits>

#ifdef _WIN32
#define DISABLED_ON_WINDOWS(x) DISABLED_##x
#else
#define DISABLED_ON_WINDOWS(x) x
#endif

using namespace at;

namespace {
Expand Down Expand Up @@ -198,7 +204,7 @@ void tensor_pow_tensor(const Vals vals, c10::ScalarType vals_dtype, Pows pows, c
std::cout.precision(dbl::max_digits10);

const auto vals_tensor = torch::tensor(vals, vals_dtype);
for ([[maybe_unused]] const auto shirt : c10::irange(pows.size())) {
for (const auto shift : c10::irange(pows.size())) {
const auto pows_tensor = torch::tensor(pows, pows_dtype);

const auto actual_pow = vals_tensor.pow(pows_tensor);
Expand Down
3 changes: 2 additions & 1 deletion aten/src/ATen/test/reduce_ops_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@ TEST(ReduceOpsTest, MaxValuesAndMinValues) {
const int W = 10;
const int H = 10;
if (hasCUDA()) {
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
for (const auto dtype : {kHalf, kFloat, kDouble, kShort, kInt, kLong}) {
auto a = at::rand({H, W}, TensorOptions(kCUDA).dtype(dtype));
auto a = at::rand({H, W}, TensorOptions(kCUDA).dtype(at::kHalf));
ASSERT_FLOAT_EQ(
a.amax(c10::IntArrayRef{0, 1}).item<double>(),
a.max().item<double>()
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/test/scalar_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ TEST(TestScalar, TestScalar) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(gen.set_current_seed(std::random_device()()));
}
auto&& C = at::globalContext();
if (at::hasCUDA()) {
auto t2 = zeros({4, 4}, at::kCUDA);
cout << &t2 << "\n";
Expand Down
3 changes: 3 additions & 0 deletions caffe2/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1783,6 +1783,9 @@ if(BUILD_TEST)
target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
target_include_directories(${test_name} PRIVATE $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}/include>)
target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE})
if(NOT MSVC)
target_compile_options(${test_name} PRIVATE -Wno-unused-variable)
endif()
add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
if(INSTALL_TEST)
install(TARGETS ${test_name} DESTINATION test)
Expand Down

0 comments on commit 52e448a

Please sign in to comment.