Skip to content

Commit

Permalink
Update dilation/group settings in conv/deconv layers
Browse files Browse the repository at this point in the history
Signed-off-by: Dheeraj Peri <[email protected]>
  • Loading branch information
peri044 committed Dec 9, 2020
1 parent 01baec2 commit 74eb616
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 14 deletions.
3 changes: 1 addition & 2 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -71,18 +71,17 @@ http_archive(
name = "cudnn",
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.0.5/11.0_20201106/cudnn-11.0-linux-x64-v8.0.5.39.tgz",],
build_file = "@//third_party/cudnn/archive:BUILD",
sha256 = "600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20",
strip_prefix = "cuda"
)

http_archive(
name = "tensorrt",
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.1/tars/TensorRT-7.2.1.6.Ubuntu-18.04.x86_64-gnu.cuda-11.0.cudnn8.0.tar.gz",],
build_file = "@//third_party/tensorrt/archive:BUILD",
sha256 = "c7d73b2585b18aae68b740249efa8c8ba5ae852abe9a023720595432a8eb4efd",
strip_prefix = "TensorRT-7.2.1.6"
)


####################################################################################
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
####################################################################################
Expand Down
29 changes: 17 additions & 12 deletions core/conversion/converters/impl/conv_deconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ namespace {

bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args) {
auto in = args[0].ITensor(); // assumes non-static input Tensor

auto w = Weights(ctx, args[1].unwrapToTensor());
auto stride = util::toDims(args[3].unwrapToIntList());
LOG_DEBUG("stride: " << stride);
Expand All @@ -24,38 +23,44 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
auto out_padding = util::toDims(args[7].unwrapToIntList());
LOG_DEBUG("out_padding: " << out_padding);
int64_t groups = args[8].unwrapToInt();
LOG_DEBUG("groups: " << groups);

nvinfer1::ILayer* new_layer;
if (transposed) {
nvinfer1::IDeconvolutionLayer* deconv;
Weights bias;
if (args[2].IValue()->isTensor()) {
Weights b(ctx, args[2].IValue()->toTensor());
deconv = ctx->net->addDeconvolutionNd(*in, w.num_input_maps, w.kernel_shape, w.data, b.data);
bias = Weights(ctx, args[2].unwrapToTensor());
} else {
deconv = ctx->net->addDeconvolutionNd(*in, w.num_input_maps, w.kernel_shape, w.data, {});
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[1] * groups));
}

// shape of deconvolution's weight: [in, out/groups, ...]
auto deconv = ctx->net->addDeconvolutionNd(
*in, args[1].unwrapToTensor().sizes()[1] * groups, w.kernel_shape, w.data, bias.data);
TRTORCH_CHECK(deconv, "Unable to create deconvolution layer from node: " << *n);

deconv->setStrideNd(stride);
deconv->setPaddingNd(padding);
#if NV_TENSORRT_MAJOR > 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR == 1)
#if NV_TENSORRT_MAJOR > 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR >= 1)
deconv->setDilationNd(dilation);
deconv->setNbGroups(groups);
#else
LOG_WARNING("Dilation is not supported in TensorRT versions prior to 7.1");
TRTORCH_CHECK(groups == 1, "for deconv with groups > 1, require TensorRT version >= 7.1");
for (int idx = 0; idx < dilation.nbDims; idx++) {
TRTORCH_CHECK(dilation.d[idx] == 1, "for deconv with dilation > 1, require TensorRT version >= 7.1");
}
#endif
new_layer = deconv;
} else {
nvinfer1::IConvolutionLayer* conv;
Weights bias;
if (args[2].IValue()->isTensor()) {
Weights b(ctx, args[2].unwrapToTensor());
conv = ctx->net->addConvolutionNd(*in, w.num_output_maps, w.kernel_shape, w.data, b.data);
bias = Weights(ctx, args[2].unwrapToTensor());
} else {
Weights b(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[0]));
conv = ctx->net->addConvolutionNd(*in, w.num_output_maps, w.kernel_shape, w.data, b.data);
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[0]));
}

// shape of convolution's weight: [out, in/groups, ...]
auto conv = ctx->net->addConvolutionNd(*in, args[1].unwrapToTensor().sizes()[0], w.kernel_shape, w.data, bias.data);
TRTORCH_CHECK(conv, "Unable to create convolution layer from node: " << *n);

conv->setStrideNd(stride);
Expand Down

0 comments on commit 74eb616

Please sign in to comment.