Skip to content

Commit

Permalink
cudnn deconv implememtation
Browse files Browse the repository at this point in the history
  • Loading branch information
wangyang59 committed Mar 20, 2017
1 parent 5a933b4 commit b8afb14
Show file tree
Hide file tree
Showing 15 changed files with 789 additions and 389 deletions.
4 changes: 4 additions & 0 deletions paddle/gserver/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,16 @@ filter_test(GSERVER_HEADER)
filter_test(GSERVER_SOURCES)
if(NOT WITH_GPU)
list(REMOVE_ITEM GSERVER_HEADER
layers/CudnnConvBaseLayer.h
layers/CudnnConvLayer.h
layers/CudnnConvTransLayer.h
layers/CudnnPoolLayer.h
layers/CudnnBatchNormLayer.h)

list(REMOVE_ITEM GSERVER_SOURCES
layers/CudnnConvBaseLayer.cpp
layers/CudnnConvLayer.cpp
layers/CudnnConvTransLayer.cpp
layers/CudnnPoolLayer.cpp
layers/CudnnBatchNormLayer.cpp)
compile_cu_as_cpp(layers/LstmCompute.cu)
Expand Down
204 changes: 204 additions & 0 deletions paddle/gserver/layers/ConvBaseProjection.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "ConvBaseProjection.h"
#include "paddle/utils/Stat.h"

namespace paddle {

ThreadLocalD<std::vector<MemoryHandle *>> ConvBaseProjection::convMem_;

ConvBaseProjection::ConvBaseProjection(const ProjectionConfig &config,
ParameterPtr parameter,
bool useGpu)
: Projection(config, parameter, useGpu) {
CHECK(useGpu); // only support GPU
getConvParams();
initCudnn();

size_t height = filterH_ * filterW_ * channels_ / groups_;
size_t width = numFilters_;
weight_.reset(new Weight(height, width, parameter));
weightOffset_ = height * width / groups_;
}

void ConvBaseProjection::getConvParams() {
const ConvConfig &conf = config_.conv_conf();
paddingH_ = conf.padding_y();
paddingW_ = conf.padding();

strideH_ = conf.stride_y();
strideW_ = conf.stride();

filterH_ = conf.filter_size_y();
filterW_ = conf.filter_size();

configImgH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size();
configImgW_ = conf.img_size();

configOutH_ = conf.has_output_y() ? conf.output_y() : conf.output_x();
configOutW_ = conf.output_x();

configChannels_ = conf.channels();
configNumFilters_ = config_.num_filters();

isDeconv_ = (config_.type() == "conv") ? false : true;

channels_ = (isDeconv_) ? configNumFilters_ : configChannels_;
numFilters_ = (isDeconv_) ? configChannels_ : configNumFilters_;

groups_ = conf.groups();
CHECK_EQ(channels_ % groups_, 0);
CHECK_EQ(numFilters_ % groups_, 0);
}

void ConvBaseProjection::initCudnn() {
hl_create_filter_descriptor(&filterDesc_,
channels_ / groups_,
numFilters_ / groups_,
filterH_,
filterW_);
hl_create_tensor_descriptor(&imageDesc_);
hl_create_tensor_descriptor(&outputDesc_);
hl_create_convolution_descriptor(&convDesc_,
imageDesc_,
filterDesc_,
paddingH_,
paddingW_,
strideH_,
strideW_);

// initialize all to default algorithms
fwdAlgo_ = 0;
bwdFilterAlgo_ = 0;
bwdDataAlgo_ = 0;
fwdLimitBytes_ = 0;
bwdDataLimitBytes_ = 0;
bwdFilterLimitBytes_ = 0;
workSpaceInBytes_ = 0;

batchNum_ = 0;
isSelectAlgo_ = false;
}

void ConvBaseProjection::reshapeTensorDesc(int batchSize) {
hl_tensor_reshape(imageDesc_,
batchSize,
channels_ / groups_,
imageH_,
imageW_,
channels_ * imageH_ * imageW_,
imageH_ * imageW_,
imageW_,
1);
hl_reset_convolution_descriptor(convDesc_,
imageDesc_,
filterDesc_,
paddingH_,
paddingW_,
strideH_,
strideW_);

// The stride between two consecutive images in ConvProjection may not be 1,
// for example, in the case of layer ConcatenateLayer2 with two
// ConvProjection, the stride is the output_size of layer ConcatenateLayer2.
// So the calculation of nStride is different from CudnnConvLayer.
// In fact, only "nStride = out_->value->getStride()" is ok.
// size_t nStride = numFilters_ * outputH_ * outputW_;
// if (out_->value->isContiguous()) {
// CHECK_EQ(nStride, out_->value->getWidth());
// } else {
// nStride = out_->value->getStride();
// }
size_t nStride = out_->value->getStride();

hl_tensor_reshape(outputDesc_,
batchSize,
numFilters_ / groups_,
outputH_,
outputW_,
nStride,
outputH_ * outputW_,
outputW_,
1);
}

void ConvBaseProjection::reshape(int batchSize) {
size_t width = calOutputSize();
CHECK_EQ(width, out_->value->getWidth());
if (isDeconv_) {
CHECK_EQ(static_cast<size_t>(configChannels_ * outputH_ * outputW_),
in_->value->getWidth())
<< "Wrong input size for convolution transpose"
<< " channels=" << configChannels_ << " outputH=" << outputH_
<< " outputW=" << outputW_ << " inputSize=" << in_->value->getWidth();
} else {
CHECK_EQ(static_cast<size_t>(configChannels_ * imageH_ * imageW_),
in_->value->getWidth())
<< "Wrong input size for convolution"
<< " channels=" << configChannels_ << " imageH=" << imageH_
<< " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth();
}

isSelectAlgo_ = (batchSize == batchNum_);
batchNum_ = batchSize;

if (!isSelectAlgo_) {
reshapeTensorDesc(batchSize);
hl_conv_workspace(imageDesc_,
outputDesc_,
filterDesc_,
convDesc_,
&fwdAlgo_,
&fwdLimitBytes_,
&bwdDataAlgo_,
&bwdDataLimitBytes_,
&bwdFilterAlgo_,
&bwdFilterLimitBytes_);

size_t maxWorkSpace = 0;
maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_);
maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_);
workSpaceInBytes_ = maxWorkSpace;

VLOG(3) << getName() << " Fwd / BwdData / BwdFilter algo: " << fwdAlgo_
<< " / " << bwdDataAlgo_ << " / " << bwdFilterAlgo_;
}

isSelectAlgo_ = true;
}

void *ConvBaseProjection::getSpaceBytes(size_t size) {
std::vector<MemoryHandle *> &convMem = *convMem_;
if (convMem.empty()) {
int numDevices = hl_get_device_count();
convMem.resize(numDevices);
}

int devId = hl_get_device();
MemoryHandle **localMem = &(convMem[devId]);
if (NULL == *localMem || size > (*localMem)->getAllocSize()) {
*localMem = new GpuMemoryHandle(size);
}
return (*localMem)->getBuf();
}

ConvBaseProjection::~ConvBaseProjection() {
hl_destroy_tensor_descriptor(imageDesc_);
hl_destroy_tensor_descriptor(outputDesc_);
hl_destroy_filter_descriptor(filterDesc_);
hl_destroy_convolution_descriptor(convDesc_);
}

} // namespace paddle
162 changes: 162 additions & 0 deletions paddle/gserver/layers/ConvBaseProjection.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "Projection.h"
#include "paddle/math/MathUtils.h"

namespace paddle {

/**
* @brief Base class for ConvProjection and ConvTransProjection.
*/
class ConvBaseProjection : public Projection {
public:
/**
* Constructor.
*/
ConvBaseProjection(const ProjectionConfig& config,
ParameterPtr parameter,
bool useGpu);

~ConvBaseProjection();

protected:
void getConvParams();
void initCudnn();

void reshapeTensorDesc(int batchSize);
void reshape(int batchSize);

size_t calOutputSize() {
if (isDeconv_) {
outputH_ = in_->getFrameHeight();
outputW_ = in_->getFrameWidth();
if (outputH_ == 0) outputH_ = configOutH_;
if (outputW_ == 0) outputW_ = configOutW_;
imageH_ = imageSize(outputH_,
filterH_,
paddingH_,
strideH_,
/* caffeMode */ true);

imageW_ = imageSize(outputW_,
filterW_,
paddingW_,
strideW_,
/* caffeMode */ true);

const_cast<Argument*>(out_)->setFrameHeight(imageH_);
const_cast<Argument*>(out_)->setFrameWidth(imageW_);

inputOffset_ = (configChannels_ / groups_) * outputH_ * outputW_;
outputOffset_ = (configNumFilters_ / groups_) * imageH_ * imageW_;
return imageH_ * imageW_ * configNumFilters_;
} else {
imageH_ = in_->getFrameHeight();
imageW_ = in_->getFrameWidth();
if (imageH_ == 0) imageH_ = configImgH_;
if (imageW_ == 0) imageW_ = configImgW_;
outputH_ = outputSize(imageH_,
filterH_,
paddingH_,
strideH_,
/* caffeMode */ true);
outputW_ = outputSize(imageW_,
filterW_,
paddingW_,
strideW_,
/* caffeMode */ true);

const_cast<Argument*>(out_)->setFrameHeight(outputH_);
const_cast<Argument*>(out_)->setFrameWidth(outputW_);

inputOffset_ = (configChannels_ / groups_) * imageH_ * imageW_;
outputOffset_ = (configNumFilters_ / groups_) * outputH_ * outputW_;
return outputH_ * outputW_ * configNumFilters_;
}
}

static void* getSpaceBytes(size_t size);

/// True if it's deconv projection layer, false if it's ConvProjection layer
bool isDeconv_;
/// imageH_ and imageW_ / outputH_ and outputW_
/// is calculated from the input layer.
int imageH_, imageW_;
int outputH_, outputW_;
/// configImgH_ and configImgW_ / configOutH_ and configOutW_
/// is obtained from config.
int configImgH_, configImgW_;
int configOutH_, configOutW_;
/// channels_ and numFilters_ are defined in terms of convolution semantics
int channels_, numFilters_;
/// configChannels and configNumFilters_ are obtained from config
/// For Conv they are the same as channels_ and numFilters
/// For ConvTrans they are opposite to channels_ and numFilters
int configChannels_, configNumFilters_;
int paddingH_, paddingW_;
int strideH_, strideW_;
int filterH_, filterW_;
/// One group offset of input data.
int inputOffset_;
/// One group offset of output data.
int outputOffset_;
/// One group offset of weight.
int weightOffset_;
int groups_;

/// Cudnn tensor descriptor for input.
hl_tensor_descriptor imageDesc_;
/// Cudnn tensor descriptor for output.
hl_tensor_descriptor outputDesc_;
/// Cudnn tensor descriptor for filter.
hl_filter_descriptor filterDesc_;
/// Cudnn tensor descriptor for a convolution operation.
hl_convolution_descriptor convDesc_;

/// Record the algorithm for forward convolution, which is obtained by cudnn
/// api to search the best suited algorithm.
int fwdAlgo_;
/// Record the algorithm for computing convolution gradient with respect to
/// filter coefficients.
int bwdFilterAlgo_;
/// Record the algorithm for computing convolution gradient with respect to
/// the output.
int bwdDataAlgo_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// forward convolution with the specified algo.
size_t fwdLimitBytes_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardFilter with the specified algo.
size_t bwdDataLimitBytes_;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardData with the specified algo.
size_t bwdFilterLimitBytes_;
/// Size of total work space.
size_t workSpaceInBytes_;

/// Whether to call cuDNN api to choose conv algorithm.
bool isSelectAlgo_;
/// batchNum is used to record batch size. If the batch size is changed,
/// the selection algorithm will be called.
int batchNum_;
bool bias_;

std::unique_ptr<Weight> weight_;
static ThreadLocalD<std::vector<MemoryHandle*>> convMem_;
};

} // namespace paddle
Loading

0 comments on commit b8afb14

Please sign in to comment.