Skip to content

Commit

Permalink
Merge pull request opencv#12837 from dkurt:dnn_fix_ie
Browse files Browse the repository at this point in the history
  • Loading branch information
alalek committed Oct 15, 2018
2 parents f8a27d2 + dc3406e commit 113793f
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 4 deletions.
10 changes: 8 additions & 2 deletions modules/dnn/src/dnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1511,10 +1511,10 @@ struct Net::Impl
CV_Assert(!ieNode.empty());
ieNode->net = net;

auto weightableLayer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(ieNode->layer);
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD) && !fused)
{
ieNode->layer->precision = InferenceEngine::Precision::FP16;
auto weightableLayer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(ieNode->layer);
if (weightableLayer)
{
if (weightableLayer->_weights)
Expand All @@ -1532,7 +1532,13 @@ struct Net::Impl
}
}
}

if (weightableLayer)
{
if (weightableLayer->_weights)
weightableLayer->blobs["weights"] = weightableLayer->_weights;
if (weightableLayer->_biases)
weightableLayer->blobs["biases"] = weightableLayer->_biases;
}
ieNode->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers);
net->addBlobs(ld.inputBlobsWrappers);
net->addBlobs(ld.outputBlobsWrappers);
Expand Down
30 changes: 28 additions & 2 deletions modules/dnn/src/layers/convolution_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -449,15 +449,28 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ConvolutionLayer> ieLayer(new InferenceEngine::ConvolutionLayer(lp));

#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
#else
ieLayer->_kernel_x = kernel.width;
ieLayer->_kernel_y = kernel.height;
ieLayer->_stride_x = stride.width;
ieLayer->_stride_y = stride.height;
ieLayer->_out_depth = outCn;
ieLayer->_padding_x = pad.width;
ieLayer->_padding_y = pad.height;
ieLayer->_dilation_x = dilation.width;
ieLayer->_dilation_y = dilation.height;
#endif
ieLayer->_out_depth = outCn;
ieLayer->_group = group;

ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
Expand Down Expand Up @@ -1659,15 +1672,28 @@ class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::DeconvolutionLayer> ieLayer(new InferenceEngine::DeconvolutionLayer(lp));

#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
#else
ieLayer->_kernel_x = kernel.width;
ieLayer->_kernel_y = kernel.height;
ieLayer->_stride_x = stride.width;
ieLayer->_stride_y = stride.height;
ieLayer->_out_depth = numOutput;
ieLayer->_padding_x = pad.width;
ieLayer->_padding_y = pad.height;
ieLayer->_dilation_x = dilation.width;
ieLayer->_dilation_y = dilation.height;
#endif
ieLayer->_out_depth = numOutput;
ieLayer->_group = group;

ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
Expand Down
11 changes: 11 additions & 0 deletions modules/dnn/src/layers/pooling_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,16 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer
{
lp.type = "Pooling";
InferenceEngine::PoolingLayer* poolLayer = new InferenceEngine::PoolingLayer(lp);
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
poolLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
poolLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
poolLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
poolLayer->_padding.insert(InferenceEngine::X_AXIS, pad_l);
poolLayer->_padding.insert(InferenceEngine::Y_AXIS, pad_t);
poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad_r);
poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad_b);
#else
poolLayer->_kernel_x = kernel.width;
poolLayer->_kernel_y = kernel.height;
poolLayer->_stride_x = stride.width;
Expand All @@ -276,6 +286,7 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer
poolLayer->_padding_y = pad_t;
poolLayer->params["pad-r"] = format("%d", pad_r);
poolLayer->params["pad-b"] = format("%d", pad_b);
#endif
poolLayer->_exclude_pad = type == AVE && padMode == "SAME";
poolLayer->params["rounding-type"] = ceilMode ? "ceil" : "floor";
poolLayer->_type = type == MAX ? InferenceEngine::PoolingLayer::PoolType::MAX :
Expand Down

0 comments on commit 113793f

Please sign in to comment.