Skip to content

Commit

Permalink
add log stmts to peephole.cpp
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: pytorch#23279

Differential Revision: D16519245

Pulled By: Krovatkin

fbshipit-source-id: 50c49d890c0acac8259b3c367d183a1aa7cf6859
  • Loading branch information
Krovatkin authored and facebook-github-bot committed Jul 30, 2019
1 parent 9dea86f commit 649fa8e
Showing 1 changed file with 69 additions and 2 deletions.
71 changes: 69 additions & 2 deletions torch/csrc/jit/passes/peephole.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#include <torch/csrc/jit/passes/peephole.h>
#include <torch/csrc/jit/ir_views.h>
#include <torch/csrc/jit/symbolic_variable.h>

#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/symbolic_variable.h>

namespace torch {
namespace jit {
Expand Down Expand Up @@ -49,13 +49,21 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
->cast<CompleteTensorType>()) {
auto expanded_sizes = node->get<c10::List<int64_t>>(attr::size);
if (!expanded_sizes.has_value() || c10::impl::toVector(*expanded_sizes) == input_type->sizes()) {
GRAPH_UPDATE(
*node,
" (x.expand(x.size()) == x) is replaced with ",
node->namedInput(attr::self)->debugName());
node->output()->replaceAllUsesWith(node->namedInput(attr::self));
}
}
} else if (node->matches("aten::t(Tensor self) -> Tensor")) {
// x.t().t() == x
Node* input_node = node->input()->node();
if (input_node->matches("aten::t(Tensor self) -> Tensor")) {
GRAPH_UPDATE(
*node,
" (x.t().t() == x) is replaced with ",
input_node->input()->debugName());
node->output()->replaceAllUsesWith(input_node->input());
}
} else if (node->matches(
Expand All @@ -65,6 +73,10 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
auto other_type = ProfiledTensorType::create(node->input(1)->type());
if (mustBeEqual(self_type->scalarType(), other_type->scalarType()) &&
mustBeEqual(self_type->device(), other_type->device())) {
GRAPH_UPDATE(
*node,
" (x.type_as(y) == x) is replaced with ",
node->input(0)->debugName());
node->output()->replaceAllUsesWith(node->input(0));
}
} else if (
Expand Down Expand Up @@ -136,6 +148,15 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {

// Copy shape information from output node
((Value*)addmm_value)->copyMetadata(node->output());
GRAPH_UPDATE(
"Fusing ",
mm_node->input(0)->debugName(),
", ",
mm_node->input(1)->debugName(),
" and ",
node->input(1 - mm_side)->debugName(),
" into ",
addmm_value.value()->debugName());
node->output()->replaceAllUsesWith(addmm_value);
}
}
Expand All @@ -151,6 +172,10 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
/*const_inputs=*/attr::other)) {
// x * 1 == x / 1 == x
if (node->get<at::Scalar>(attr::other)->toDouble() == 1) {
GRAPH_UPDATE(
*node,
" (x * 1 == x / 1 == x) is replaced with ",
node->input(0)->debugName());
node->output()->replaceAllUsesWith(node->input(0));
}
} else if (
Expand All @@ -163,26 +188,42 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
// x + 0 == x - 0 == x
if (node->get<at::Scalar>(attr::alpha)->toDouble() == 1 &&
node->get<at::Scalar>(attr::other)->toDouble() == 0) {
GRAPH_UPDATE(
*node,
" (x + 0 == x - 0 == x) is replaced with ",
node->input(0)->debugName());
node->output()->replaceAllUsesWith(node->input(0));
}
} else if (
node->kind() == aten::Float || node->kind() == aten::Int ||
node->kind() == prim::ImplicitTensorToNum) {
Node* input_node = node->input()->node();
if (input_node->kind() == prim::NumToTensor) {
GRAPH_UPDATE(
*node,
" (x.NumToTensor().ImplicitTensorToNum() == x.NumToTensor()) is replaced with ",
node->input()->debugName());
node->output()->replaceAllUsesWith(input_node->input());
}
} else if (
node->matches(
"aten::_grad_sum_to_size(Tensor(a) self, int[]? size) -> Tensor(a)")) {
if (node->input(1)->mustBeNone()) {
GRAPH_UPDATE(
*node,
" (x._grad_sum_to_size(x, None) == x) is replaced with ",
node->input(0)->debugName());
node->output()->replaceAllUsesWith(node->input(0));
} else {
auto uses = node->output()->uses();
for (Use u : uses) {
if (u.user->matches(
"aten::_grad_sum_to_size(Tensor(a) self, int[]? size) -> Tensor(a)") &&
u.user->input(1)->type()->isSubtypeOf(ListType::ofInts())) {
GRAPH_UPDATE(
*node,
" (x._grad_sum_to_size(y)._grad_sum_to_size(z) == x._grad_sum_to_size(z)) is replaced with ",
node->inputs().at(0)->debugName());
u.user->replaceInput(0, node->inputs().at(0));
}
}
Expand All @@ -201,6 +242,11 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
// if an if node's output equals its condition replace output with
// condition
if (true_val && !false_val) {
GRAPH_UPDATE(
"Replacing ",
n.outputs().at(i)->debugName(),
" (True or False) with ",
n.cond()->debugName());
n.outputs().at(i)->replaceAllUsesWith(n.cond());
}
}
Expand All @@ -219,6 +265,7 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
WithInsertPoint guard(node);
auto output = node->owningGraph()->insertConstant(
node->kind() == aten::__isnot__);
GRAPH_UPDATE("Folding ", *node, " to ", output->debugName());
node->output()->replaceAllUsesWith(output);
}
}
Expand All @@ -228,6 +275,8 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
// we are unwrapping an input that can't be None, remove the unwrap
auto input = node->input();
if (input->mustNotBeNone()) {
GRAPH_UPDATE(
"Unwrapping ", *node, " as ", node->input(), " can't be optional");
node->output()->replaceAllUsesWith(node->input());
}
} else if (node->matches("prim::dtype(Tensor a) -> int")) {
Expand All @@ -236,13 +285,20 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
WithInsertPoint guard(node);
auto output = node->owningGraph()->insertConstant(
static_cast<int64_t>(*ptt->scalarType()));
GRAPH_UPDATE(
"Replacing ", *node, " with a type constant ", output->debugName());
node->output()->replaceAllUsesWith(output);
}
} else if (node->matches("prim::device(Tensor a) -> Device")) {
auto ptt = ProfiledTensorType::create(node->input()->type());
if (ptt->device()) {
WithInsertPoint guard(node);
auto output = node->owningGraph()->insertConstant(*ptt->device());
GRAPH_UPDATE(
"Replacing ",
*node,
" with a device constant ",
output->debugName());
node->output()->replaceAllUsesWith(output);
}
} else if (node->matches("aten::dim(Tensor self) -> int")) {
Expand All @@ -251,6 +307,11 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
WithInsertPoint guard(node);
auto output =
node->owningGraph()->insertConstant(static_cast<int64_t>(*dim));
GRAPH_UPDATE(
"Replacing ",
*node,
" with a \"dim\" constant ",
output->debugName());
node->output()->replaceAllUsesWith(output);
}
} else if (node->matches("prim::is_cuda(Tensor a) -> bool")) {
Expand All @@ -259,6 +320,11 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {
WithInsertPoint guard(node);
auto output =
node->owningGraph()->insertConstant((*ptt->device()).is_cuda());
GRAPH_UPDATE(
"Replacing ",
*node,
" with a is_cuda constant ",
output->debugName());
node->output()->replaceAllUsesWith(output);
}
}
Expand All @@ -267,6 +333,7 @@ void PeepholeOptimizeImpl(Block* block, bool addmm_fusion_enabled) {

void PeepholeOptimize(Block* block, bool addmm_fusion_enabled) {
PeepholeOptimizeImpl(block, addmm_fusion_enabled);
GRAPH_DUMP("After PeepholeOptimize: ", block->owningGraph());
// Eliminate dead code created by any peephole passes we've just done
EliminateDeadCode(block);
}
Expand Down

0 comments on commit 649fa8e

Please sign in to comment.