Skip to content

Commit

Permalink
cleanedup ConvolutionNode vs. image interpretation of TensorShape;
Browse files Browse the repository at this point in the history
TensorOp() optimization to use SGEMM disabled for 'double' in Debug builds, so we get our code path tested once in a while;
fixed ConvolutionEngineTests.cpp w.r.t. Create();
removed unused InputIsImage() methods
  • Loading branch information
frankseide committed Jan 1, 2016
1 parent bb4f72c commit 8fc4b00
Show file tree
Hide file tree
Showing 8 changed files with 146 additions and 154 deletions.
6 changes: 3 additions & 3 deletions Source/CNTK/SynchronousExecutionEngine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
size_t numImages = parameter.size() > 3 ? ((NDLNode<ElemType>*)params[3])->GetScalar() : 1; // BUGBUG: This comes through MBLayout, and should be forbidden.
ImageLayoutKind imageLayoutKind = ImageLayoutKindFrom(node->GetOptionalParameter("imageLayout", "HWC"));

nodePtr = builder.CreateInputNode(name, ImageLayout(imageWidth, imageHeight, imageChannels, imageLayoutKind), numImages);
nodePtr = builder.CreateInputNode(name, ImageDimensions::AsTensorShape(imageWidth, imageHeight, imageChannels, imageLayoutKind), numImages);
}
}
else if (cnNodeType == L"SparseImageInput")
Expand All @@ -129,7 +129,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
size_t numImages = parameter.size() > 3 ? ((NDLNode<ElemType>*)params[3])->GetScalar() : 1;
ImageLayoutKind imageLayoutKind = ImageLayoutKindFrom(node->GetOptionalParameter("imageLayout", "HWC"));

nodePtr = builder.CreateSparseInputNode(name, ImageLayout(imageWidth, imageHeight, imageChannels, imageLayoutKind), numImages);
nodePtr = builder.CreateSparseInputNode(name, ImageDimensions::AsTensorShape(imageWidth, imageHeight, imageChannels, imageLayoutKind), numImages);
}
}
else if (OperationNameOf(LearnableParameter) == cnNodeType)
Expand Down Expand Up @@ -325,7 +325,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
size_t img_channels = node->GetOptionalParameter("imageChannels", "0");

bool needGradient = node->GetOptionalParameter("needGradient", "false");
nodePtr = builder.Reshape(NULL, num_rows, ImageLayoutWHC(img_width, img_height, img_channels), name); // BUGBUG: use a tensor descriptor instead
nodePtr = builder.Reshape(NULL, num_rows, ImageDimensions::AsTensorShape(img_width, img_height, img_channels, ImageLayoutKind::HWC/*legacy*/), name); // BUGBUG: use a tensor descriptor instead
nodePtr->SetParameterUpdateRequired(needGradient);
}
}
Expand Down
55 changes: 37 additions & 18 deletions Source/Common/Include/DataTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -249,12 +249,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
const SmallVector<ptrdiff_t> & GetStrides() const { return m_strides; }

// interpretation as an image tensor
size_t GetNumChannels() const { if (m_dims.empty()) return 0; else return m_dims.size() > 0 ? m_dims[0] : 1; }
size_t GetWidth() const { if (m_dims.empty()) return 0; else return m_dims.size() > 1 ? m_dims[1] : 1; }
size_t GetHeight() const { if (m_dims.empty()) return 0; else return m_dims.size() > 2 ? m_dims[2] : 1; }
// heuristics used for pretty-printing
// TODO: This will go away.
bool IsInputAnImage() const { return GetRank() == 3 && (GetWidth() != 1 || GetNumChannels() != 1); }
//size_t GetNumChannels() const { if (m_dims.empty()) return 0; else return m_dims.size() > 0 ? m_dims[0] : 1; }
//size_t GetWidth() const { if (m_dims.empty()) return 0; else return m_dims.size() > 1 ? m_dims[1] : 1; }
//size_t GetHeight() const { if (m_dims.empty()) return 0; else return m_dims.size() > 2 ? m_dims[2] : 1; }
// legacy helper function for RowSliceNode. Will go away.
bool IsVectorStoredAsImage() const { return GetRank() == 3 && m_dims[0] == 1 && m_dims[1] == 1; }

// indexing
Expand Down Expand Up @@ -468,19 +466,40 @@ namespace Microsoft { namespace MSR { namespace CNTK {
else if (s == L"HWC" || s == L"legacy") return ImageLayoutKind::HWC;
else InvalidArgument("ImageLayoutKindFrom: Unknown ImageLayoutKind '%ls', must be 'CHW' (cudnn) or 'HWC' (CNTK legacy)", s.c_str());
}
static inline TensorShape ImageLayout(size_t width, size_t height, size_t channels, ImageLayoutKind imageLayoutKind)
{
if (imageLayoutKind == ImageLayoutKind::CHW) return TensorShape(width, height, channels);
else if (imageLayoutKind == ImageLayoutKind::HWC) return TensorShape(channels, width, height);
else LogicError("ImageLayout: Invalid ImageLayoutKind");
}

// When constructing an image tensor with the usual W, H, C format, use the following function instead.
// This will sort the three parameters into the correct order.
// BUGBUG: This only works for ImageLayoutKind::HWC. Also the naming is bad.
static inline TensorShape ImageLayoutWHC(size_t width, size_t height, size_t channels)
// interpret TensorShape as an image descriptor
// considering that we support two ways of storingimages
struct ImageDimensions
{
return TensorShape(channels, width, height);
}
size_t m_width, m_height, m_numChannels;
// interpret TensorShape as image
ImageDimensions(const TensorShape & shape, ImageLayoutKind imageLayoutKind)
{
if (shape.GetRank() != 3)
InvalidArgument("Convolution operation currently only supports 1D or 2D convolution on 3D tensors.");
if (imageLayoutKind == ImageLayoutKind::CHW)
{
m_width = shape[0];
m_height = shape[1];
m_numChannels = shape[2];
}
else if (imageLayoutKind == ImageLayoutKind::HWC)
{
m_width = shape[1];
m_height = shape[2];
m_numChannels = shape[0];
}
else LogicError("WHC: Invalid ImageLayoutKind");
}
ImageDimensions(size_t width, size_t height, size_t numChannels) : m_width(width), m_height(height), m_numChannels(numChannels) {}
// intepret image as TensorShape
static TensorShape AsTensorShape(size_t width, size_t height, size_t numChannels, ImageLayoutKind imageLayoutKind/* = ImageLayoutKind::HWC*/)
{
if (imageLayoutKind == ImageLayoutKind::CHW) return TensorShape(width, height, numChannels);
else if (imageLayoutKind == ImageLayoutKind::HWC) return TensorShape(numChannels, width, height);
else LogicError("ImageLayout: Invalid ImageLayoutKind");
}
TensorShape AsTensorShape(ImageLayoutKind imageLayoutKind) { return AsTensorShape(m_width, m_height, m_numChannels, imageLayoutKind); }
};

}}}
17 changes: 5 additions & 12 deletions Source/ComputationNetworkLib/ComputationNode.h
Original file line number Diff line number Diff line change
Expand Up @@ -503,9 +503,10 @@ namespace Microsoft { namespace MSR { namespace CNTK {
}

const char * mbSizeMark = child->m_pMBLayout ? "MBSize " : "";
if (child->m_sampleLayout.GetRank() == 3 && (child->m_sampleLayout.GetWidth() != 1 || child->m_sampleLayout.GetNumChannels() != 1)) // looks like an image: use WHC notation
if (child->m_sampleLayout.GetRank() == 3 && (child->m_sampleLayout[1] != 1 || child->m_sampleLayout[0] != 1)) // looks like an image: use WHC notation
fprintf(stderr, "%ls[%lu {W=%lu, H=%lu, C=%lu}, %s%lu]", child->NodeName().c_str(), child->GetNumRows(),
child->m_sampleLayout.GetWidth(), child->m_sampleLayout.GetHeight(), child->m_sampleLayout.GetNumChannels(), mbSizeMark, child->GetNumCols());
child->m_sampleLayout[1], child->m_sampleLayout[2], child->m_sampleLayout[0], mbSizeMark, child->GetNumCols());
//BUGBUG: This ^^ will print based on the old legacy layout, and we have no way of knowing here whether that is correct.
else if (child->m_sampleLayout.GetRank() > 1) // tensor: output the tensor dimensions --TODO: there will be no numRows in the future, only the tensor
fprintf(stderr, "%ls[%lu [%s], %s%lu]", child->NodeName().c_str(), child->GetNumRows(), string(child->m_sampleLayout).c_str(), mbSizeMark, child->GetNumCols());
else
Expand Down Expand Up @@ -540,14 +541,6 @@ namespace Microsoft { namespace MSR { namespace CNTK {
return !g_shareNodeValueMatrices || m_outputNeededDuringBackprop;
}

// TODO: Remove this.
// used from:
// - Plus/Minus/ElementTimesNode --> replace by max dim over inputs. Make this standard behavior for all binary element-wise ops.
bool IsInputAnImage(const size_t index) const
{
return m_inputs[index]->m_sampleLayout.IsInputAnImage();
}

const size_t GetNumInputs() const { return m_inputs.size(); }

virtual void SetInput(const size_t childIndex, const ComputationNodeBasePtr& node) = 0;
Expand Down Expand Up @@ -1528,7 +1521,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {
#define UsingComputationNodeMembers /*without OperationName; needed to support inconsistent pattern of InputValue--TODO: This comment it out of date. */ \
protected: \
typedef shared_ptr<ComputationNode<ElemType>> ComputationNodePtr; \
using Base::m_deviceId; using Base::SetDims; using Base::SetDims1; using Base::SetNumCols; using Base::GetNumRows; using Base::GetNumCols; using Base::UpdateFunctionValuesSize; using Base::LoadValue; \
using Base::m_deviceId; using Base::GetDeviceId; using Base::SetDims; using Base::SetDims1; using Base::SetNumCols; using Base::GetNumRows; using Base::GetNumCols; using Base::UpdateFunctionValuesSize; using Base::LoadValue; \
using Base::m_pMBLayout; using Base::GetNumTimeSteps; using Base::GetNumParallelSequences; \
using Base::MaskMissingColumnsToZero; using Base::MaskMissingValueColumnsToZero; using Base::MaskMissingGradientColumnsToZero; using Base::InvalidateMissingValueColumns; using Base::InvalidateMissingGradientColumns; \
using Base::DataFor; using Base::ValueFor; using Base::Gradient; using Base::GradientFor; \
Expand All @@ -1547,7 +1540,7 @@ protected: \
using Base::DumpNodeInfo; using Base::EnumerateNodes; \
using Base::HasMBLayout; using Base::GetMBLayout; using Base::LinkToMBLayout; \
using Base::Input; using Base::SetInput; \
using Base::IsInputAnImage; using Base::IsEqualTo; using Base::IsOutputOlderThanInputs; using Base::IsLeaf; using Base::SetParameterUpdateRequired; \
using Base::IsEqualTo; using Base::IsOutputOlderThanInputs; using Base::IsLeaf; using Base::SetParameterUpdateRequired; \
using Base::Load; \
using Base::PrintNodeValuesToFile; using Base::PrintSelfBeforeValidation; \
using Base::Save; using Base::UpdateFunctionMBSize; \
Expand Down
Loading

0 comments on commit 8fc4b00

Please sign in to comment.