Skip to content

Commit

Permalink
[codemod][fbcode/caffe2] Apply clang-format update fixes
Browse files Browse the repository at this point in the history
Test Plan: Sandcastle and visual inspection.

Reviewed By: igorsugak

Differential Revision: D25849205

fbshipit-source-id: ef664c1ad4b3ee92d5c020a5511b4ef9837a09a0
  • Loading branch information
zertosh authored and facebook-github-bot committed Jan 9, 2021
1 parent d4c1684 commit 8530c65
Show file tree
Hide file tree
Showing 141 changed files with 615 additions and 448 deletions.
17 changes: 9 additions & 8 deletions test/cpp/jit/test_alias_analysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -906,14 +906,15 @@ graph():
}

TEST(WildcardsTest, Basic) {
RegisterOperators reg({Operator(
"prim::returns_wildcard(Tensor a) -> Tensor(*)",
[](Stack* stack) {},
aliasAnalysisFromSchema()),
Operator(
"prim::writes(Tensor(z!) a) -> Tensor(a)",
[](Stack* stack) {},
aliasAnalysisFromSchema())});
RegisterOperators reg(
{Operator(
"prim::returns_wildcard(Tensor a) -> Tensor(*)",
[](Stack* stack) {},
aliasAnalysisFromSchema()),
Operator(
"prim::writes(Tensor(z!) a) -> Tensor(a)",
[](Stack* stack) {},
aliasAnalysisFromSchema())});
const auto returns_wildcard =
Symbol::fromQualString("prim::returns_wildcard");
const auto writes = Symbol::fromQualString("prim::writes");
Expand Down
44 changes: 24 additions & 20 deletions test/cpp/jit/test_argument_spec.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,21 +50,23 @@ TEST(ArgumentSpecTest, CompleteArgumentSpec_CUDA) {
auto const GF = at::CUDA(at::kFloat);
auto const GD = at::CUDA(at::kDouble);

auto list = createStack({var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});
auto list = createStack(
{var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});

// make sure we have some non-standard strides
list[1].toTensor().transpose_(0, 1);

// same list but different backing values
auto list2 = createStack({var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});
auto list2 = createStack(
{var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});
list2[1].toTensor().transpose_(0, 1);

CompleteArgumentSpec a(true, list);
Expand Down Expand Up @@ -142,21 +144,23 @@ TEST(ArgumentSpecTest, Basic_CUDA) {

ArgumentSpecCreator arg_spec_creator(*graph);

auto list = createStack({var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});
auto list = createStack(
{var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});

// make sure we have some non-standard strides
list[1].toTensor().transpose_(0, 1);

// same list but different backing values
auto list2 = createStack({var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});
auto list2 = createStack(
{var(CF, {1}, true),
var(CD, {1, 2}, false),
var(GF, {}, true),
var(GD, {4, 5, 6}, false),
undef()});
list2[1].toTensor().transpose_(0, 1);

ArgumentSpec a = arg_spec_creator.create(true, list);
Expand Down
131 changes: 69 additions & 62 deletions test/cpp/jit/test_gpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2681,12 +2681,13 @@ TEST(NVFuserTest, FusionBinaryOps_CUDA) {
using OpTuple = std::tuple<AtenFuncSig, BinaryOpType, std::string>;

// see [Note: explicit tuple type for uniform initialization list]
std::vector<OpTuple> logic_ops{OpTuple{at::eq, BinaryOpType::Eq, "eq"},
OpTuple{at::ge, BinaryOpType::GE, "ge"},
OpTuple{at::gt, BinaryOpType::GT, "gt"},
OpTuple{at::le, BinaryOpType::LE, "le"},
OpTuple{at::lt, BinaryOpType::LT, "lt"},
OpTuple{at::ne, BinaryOpType::NE, "ne"}};
std::vector<OpTuple> logic_ops{
OpTuple{at::eq, BinaryOpType::Eq, "eq"},
OpTuple{at::ge, BinaryOpType::GE, "ge"},
OpTuple{at::gt, BinaryOpType::GT, "gt"},
OpTuple{at::le, BinaryOpType::LE, "le"},
OpTuple{at::lt, BinaryOpType::LT, "lt"},
OpTuple{at::ne, BinaryOpType::NE, "ne"}};

std::for_each(logic_ops.begin(), logic_ops.end(), [](OpTuple& op) {
test_op(
Expand Down Expand Up @@ -4184,13 +4185,14 @@ TEST(NVFuserTest, FusionSoftmax1DNormalized_CUDA) {
sub_tv3->computeAt(sum_exp_rf_tv9, -1);
sub_tv3_copy->computeAt(output_tv7, -1);

TensorView* tensors_to_parallelize[] = {max_val_tv1,
bcast_max_tv2,
sum_exp_tv5,
bcast_sum_tv6,
output_tv7,
max_val_rf_tv8,
sum_exp_rf_tv9};
TensorView* tensors_to_parallelize[] = {
max_val_tv1,
bcast_max_tv2,
sum_exp_tv5,
bcast_sum_tv6,
output_tv7,
max_val_rf_tv8,
sum_exp_rf_tv9};

for (auto tv : tensors_to_parallelize) {
tv->axis(-1)->parallelize(ParallelType::TIDx);
Expand Down Expand Up @@ -4318,13 +4320,14 @@ TEST(NVFuserTest, FusionSoftmax3DNormalized_CUDA) {
sub_tv3->computeAt(sum_exp_rf_tv9, -1);
sub_tv3_copy->computeAt(output_tv7, -1);

TensorView* tensors_to_parallelize[] = {max_val_tv1,
bcast_max_tv2,
sum_exp_tv5,
bcast_sum_tv6,
output_tv7,
max_val_rf_tv8,
sum_exp_rf_tv9};
TensorView* tensors_to_parallelize[] = {
max_val_tv1,
bcast_max_tv2,
sum_exp_tv5,
bcast_sum_tv6,
output_tv7,
max_val_rf_tv8,
sum_exp_rf_tv9};

for (auto tv : tensors_to_parallelize) {
tv->axis(0)->parallelize(ParallelType::BIDx);
Expand Down Expand Up @@ -5931,15 +5934,16 @@ TEST(NVFuserTest, FusionSmemDynamicPersistentSoftmax2D_CUDA) {
cache_x->setMemoryType(MemoryType::Shared);
exp->setMemoryType(MemoryType::Shared);

std::vector<TensorView*> all_tensors({x,
cache_x,
max_val,
bcast_max,
x_max_sub,
exp,
sum_exp,
bcast_sum,
softmax});
std::vector<TensorView*> all_tensors(
{x,
cache_x,
max_val,
bcast_max,
x_max_sub,
exp,
sum_exp,
bcast_sum,
softmax});

auto tidx = new Int();
fusion.addInput(tidx);
Expand Down Expand Up @@ -6168,25 +6172,27 @@ TEST(NVFuserTest, FusionPersistentBatchNormLocalShared_CUDA) {
std::vector<TensorView*> common_tensors(
{x_sum, x_sum_bcast, x_mean, var_sum, var_sum_bcast, var, var_eps, rvar});

std::vector<TensorView*> static_tensors({sx,
sx_cache,
sx_sum,
sx_mean_sub,
sx_mean_sub_pow,
sx_var_sum,
sx_norm,
sx_norm_gamma,
sx_norm_gamma_beta});

std::vector<TensorView*> dynamic_tensors({dx,
dx_cache,
dx_sum,
dx_mean_sub,
dx_mean_sub_pow,
dx_var_sum,
dx_norm,
dx_norm_gamma,
dx_norm_gamma_beta});
std::vector<TensorView*> static_tensors(
{sx,
sx_cache,
sx_sum,
sx_mean_sub,
sx_mean_sub_pow,
sx_var_sum,
sx_norm,
sx_norm_gamma,
sx_norm_gamma_beta});

std::vector<TensorView*> dynamic_tensors(
{dx,
dx_cache,
dx_sum,
dx_mean_sub,
dx_mean_sub_pow,
dx_var_sum,
dx_norm,
dx_norm_gamma,
dx_norm_gamma_beta});

std::vector<TensorView*> all_tensors;
all_tensors.insert(
Expand Down Expand Up @@ -6309,20 +6315,21 @@ TEST(NVFuserTest, FusionSmemDynamicPersistentBatchNorm_CUDA) {
cache_x->setMemoryType(MemoryType::Shared);
x_mean_sub->setMemoryType(MemoryType::Shared);

std::vector<TensorView*> all_tensors({x_sum,
x_mean,
cache_x,
x_sum_bcast,
x_mean_sub,
x_mean_sub_pow,
var_sum,
var_sum_bcast,
var,
var_eps,
rvar,
norm,
norm_gamma,
norm_gamma_beta});
std::vector<TensorView*> all_tensors(
{x_sum,
x_mean,
cache_x,
x_sum_bcast,
x_mean_sub,
x_mean_sub_pow,
var_sum,
var_sum_bcast,
var,
var_eps,
rvar,
norm,
norm_gamma,
norm_gamma_beta});

auto tidx = new Int();
fusion.addInput(tidx);
Expand Down
4 changes: 2 additions & 2 deletions test/cpp/jit/test_misc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1044,7 +1044,7 @@ TEST(RecordFunctionTest, Callbacks) {
ids.clear();
{ // START: global test
addGlobalCallback(RecordFunctionCallback(
[](const RecordFunction &
[](const RecordFunction&
/* unused */) -> std::unique_ptr<at::ObserverContext> {
auto ctx = std::make_unique<TestContext>();
ctx->a = 123;
Expand All @@ -1070,7 +1070,7 @@ TEST(RecordFunctionTest, Callbacks) {
const int test_val = 234;
const std::string test_str = "test thread str";
addThreadLocalCallback(RecordFunctionCallback(
[](const RecordFunction &
[](const RecordFunction&
/* unused */) -> std::unique_ptr<at::ObserverContext> {
auto ctx = std::make_unique<TestContext>();
ctx->a = 234;
Expand Down
7 changes: 4 additions & 3 deletions test/cpp/tensorexpr/test_conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,10 @@ TEST(Conv, Conv2D) {

at::Tensor result = at::empty_like(ref);
te::SimpleIREvaluator cg(s, {inputB, filterB, conv});
cg.call({input.data_ptr<float>(),
filter.data_ptr<float>(),
result.data_ptr<float>()});
cg.call(
{input.data_ptr<float>(),
filter.data_ptr<float>(),
result.data_ptr<float>()});

ASSERT_TRUE(at::allclose(ref, result, 1e-3, 1e-3));
}
Expand Down
18 changes: 9 additions & 9 deletions test/cpp/tensorexpr/test_llvm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -294,15 +294,15 @@ TEST(LLVM, LetTestMultitype) {
std::vector<void*> args({v.data()});
VarHandle x("x", kByte);
VarHandle y("y", kHalf);
auto block =
Block::make({Let::make(x, 3),
Let::make(y, 6.f),
a.store(
{0},
Cast::make(
kDouble,
ExprHandle(2.f) +
(x * ExprHandle(3.f) + y * ExprHandle(4.f))))});
auto block = Block::make(
{Let::make(x, 3),
Let::make(y, 6.f),
a.store(
{0},
Cast::make(
kDouble,
ExprHandle(2.f) +
(x * ExprHandle(3.f) + y * ExprHandle(4.f))))});

LLVMCodeGen cg(block, {a});
ASSERT_EQ(cg.value<int>(args), 0);
Expand Down
30 changes: 16 additions & 14 deletions test/cpp/tensorexpr/test_loopnest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2606,8 +2606,9 @@ TEST(LoopNest, UnrollMultipleStatements) {
x,
0,
kTotalSize,
Block::make({Store::make(a_buf, {x}, x * 2),
Store::make(b_buf, {x}, Load::make(a_buf, {x}, 1))}));
Block::make(
{Store::make(a_buf, {x}, x * 2),
Store::make(b_buf, {x}, Load::make(a_buf, {x}, 1))}));
Block::make({f});
Stmt* unrolled = nullptr;
LoopNest::unroll(f, &unrolled);
Expand Down Expand Up @@ -2658,9 +2659,10 @@ TEST(LoopNest, UnrollWithLet) {
x,
0,
kTotalSize,
Block::make({Let::make(e, 7),
Store::make(a_buf, {x}, e),
Store::make(b_buf, {x}, e + 1)}));
Block::make(
{Let::make(e, 7),
Store::make(a_buf, {x}, e),
Store::make(b_buf, {x}, e + 1)}));
Block::make({f});
Stmt* unrolled = nullptr;
LoopNest::unroll(f, &unrolled);
Expand Down Expand Up @@ -2700,9 +2702,9 @@ TEST(LoopNest, NormalizeStartPositive) {
BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
VarHandle x("x", kInt);
auto for_body =
Block::make({Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
Store::make(b_buf, {x}, x * 2)});
auto for_body = Block::make(
{Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
Store::make(b_buf, {x}, x * 2)});
auto for_stmt = For::make(x, 50, 100, for_body);
Block::make({for_stmt});

Expand Down Expand Up @@ -2768,9 +2770,9 @@ TEST(LoopNest, NormalizeStartZero) {
BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
VarHandle x("x", kInt);
auto for_body =
Block::make({Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
Store::make(b_buf, {x}, x * 2)});
auto for_body = Block::make(
{Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
Store::make(b_buf, {x}, x * 2)});
auto for_stmt = For::make(x, 0, 100, for_body);
Block::make({for_stmt});

Expand Down Expand Up @@ -2803,9 +2805,9 @@ TEST(LoopNest, NormalizeStartVariable) {
BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
VarHandle x("x", kInt);
VarHandle y("y", kInt);
auto for_body =
Block::make({Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
Store::make(b_buf, {x}, x * 2)});
auto for_body = Block::make(
{Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x}, 1), 1),
Store::make(b_buf, {x}, x * 2)});
auto for_stmt = For::make(x, y, 100, for_body);
Block::make({for_stmt});

Expand Down
Loading

0 comments on commit 8530c65

Please sign in to comment.