Skip to content

Commit

Permalink
fix error and pass unit test
Browse files Browse the repository at this point in the history
  • Loading branch information
tensor-tang committed Nov 16, 2017
1 parent 40a486d commit 19c989a
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 7 deletions.
18 changes: 15 additions & 3 deletions paddle/gserver/layers/MKLDNNConcatLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@ void MKLDNNConcatLayer::reshape(
CHECK_GT(inputLayers_.size(), 1UL);
channels_.resize(inputLayers_.size());
channels_[0] = ic;
oc = ic;
// need change the output channel, so use oc_ instead
// TODO(TJ): change API, use &oc
oc_ = ic;
for (size_t i = 1; i < inputLayers_.size(); i++) {
int batchsize, height, witdh;
reshapeInput(batchsize, height, witdh, i);
Expand All @@ -50,12 +52,12 @@ void MKLDNNConcatLayer::reshape(

channels_[i] = inputLayers_[i]->getSize() / height / witdh;
CHECK_EQ((size_t)channels_[i] * height * witdh, inputLayers_[i]->getSize());
oc += channels_[i];
oc_ += channels_[i];
}
oh = ih;
ow = iw;
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
resizeOutput(bs, oc_ * oh * ow);
}

void MKLDNNConcatLayer::resetFwd(std::vector<primitive>& pipeline,
Expand Down Expand Up @@ -88,6 +90,9 @@ void MKLDNNConcatLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
inputs.resize(inputLayers_.size());
bool has8c = false, has16c = false, hasnc = false;
for (size_t i = 0; i < inputs.size(); i++) {
// resetInValue will use ic_ so temporary change as current input's channel
// TODO(TJ): change ic_ as vector then can remove channels_
ic_ = channels_[i];
resetInValue(inputs[i], nullptr, i);
CHECK(inputs[i]);
auto dm = inputs[i]->getDims();
Expand All @@ -109,6 +114,8 @@ void MKLDNNConcatLayer::resetFwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
has16c = true;
}
}
// change back, ic_ always save the input 0 size
ic_ = channels_[0];

format outFmt;
if (has16c && oc_ % 16 == 0) {
Expand Down Expand Up @@ -161,9 +168,14 @@ void MKLDNNConcatLayer::resetBwdBuffers(std::vector<MKLDNNMatrixPtr>& inputs,
inputs.resize(inputLayers_.size());
for (size_t i = 0; i < inputs.size(); i++) {
CHECK(inVals_[i]);
// resetInGrad will use inVal_
// TODO(TJ): change move inVals_ to MKLDNNLayer ans remove inVal_
inVal_ = inVals_[i];
resetInGrad(inputs[i], inVals_[i]->getPrimitiveDesc(), i);
CHECK_PRIMITIVE_DESC_EQ(inputs[i], inVals_[i]->getPrimitiveDesc());
}
// change back, inVal_ always save the input 0
inVal_ = inVals_[0];
}

void MKLDNNConcatLayer::resetBwdPipeline(
Expand Down
8 changes: 4 additions & 4 deletions paddle/gserver/layers/MKLDNNConcatLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ class MKLDNNConcatLayer : public MKLDNNLayer {

void printValueFormat() override {
for (size_t i = 0; i < inVals_.size(); ++i) {
VLOG(MKLDNN_FMTS) << "Input " << i << inputLayers_[i]->getName() << ": "
<< inVals_[i]->getFormat() << " >>>";
VLOG(MKLDNN_FMTS) << "Input " << i << ", " << inputLayers_[i]->getName()
<< ": " << inVals_[i]->getFormat() << " >>>";
}
if (outVal_) {
VLOG(MKLDNN_FMTS) << outVal_->getFormat() << " >>> ";
Expand All @@ -93,8 +93,8 @@ class MKLDNNConcatLayer : public MKLDNNLayer {
VLOG(MKLDNN_FMTS) << outGrad_->getFormat() << " <<< ";
}
for (size_t i = 0; i < inGrads_.size(); ++i) {
VLOG(MKLDNN_FMTS) << "Input " << i << inputLayers_[i]->getName() << ": "
<< inGrads_[i]->getFormat() << "<<<";
VLOG(MKLDNN_FMTS) << "Input " << i << ", " << inputLayers_[i]->getName()
<< ": " << inGrads_[i]->getFormat() << "<<<";
}
}

Expand Down

0 comments on commit 19c989a

Please sign in to comment.