Skip to content

Commit

Permalink
Added simple deconv sample.
Browse files Browse the repository at this point in the history
  • Loading branch information
Alexey Kamenev committed Jun 14, 2016
1 parent c5a6f11 commit 75dbb2c
Show file tree
Hide file tree
Showing 8 changed files with 185 additions and 12 deletions.
102 changes: 102 additions & 0 deletions Examples/Image/MNIST/Config/04_DeConv.cntk
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
# Parameters can be overwritten on the command line
# for example: cntk configFile=myConfigFile RootDir=../..
# For running from Visual Studio add
# currentDirectory=$(SolutionDir)/<path to corresponding data folder>
RootDir = ".."

ConfigDir = "$RootDir$/Config"
DataDir = "$RootDir$/Data"
OutputDir = "$RootDir$/Output"
ModelDir = "$OutputDir$/Models"

deviceId = 0
imageLayout = "cudnn"
# Override the above as follows when running on CPU:
# deviceId = -1
# Note: Compared to GPU, this runs very slow.

command = train:test

precision = "float"
modelPath = "$ModelDir$/04_DeConv"
ndlMacros = "$ConfigDir$/Macros.ndl"

# uncomment the following line to write logs to a file
# stderr = "$OutputDir$/04_DeConv_out"
traceLevel=1
numMBsToShowResult=500

prefetch=true

# If set to true, always initialize the network on CPU, making initialization consistent across CPU and GPU targets (for testing).
initOnCPUOnly=true

#######################################
# TRAINING CONFIG #
#######################################

train = [
action = "train"

NDLNetworkBuilder = [
networkDescription = "$ConfigDir$/04_DeConv.ndl"
]

SGD = [
epochSize = 60000
minibatchSize = 32
learningRatesPerMB = 1*5:0.03
momentumPerMB = 0*10:0.7
maxEpochs = 15
]

# Note: this reader crashes if randomization is turned on.
reader = [
readerType = "UCIFastReader"
# To get the data (Train-28x28.txt) please run `python mnist_convert.py`
# from the 'AdditionalFiles' folder. See REAMDE.md for details.
file = "$DataDir$/Train-28x28.txt"

features = [
dim = 784
start = 1
]

labels = [
dim = 1
start = 0
labelDim = 10
labelMappingFile = "$DataDir$/labelsmap.txt"
]
]
]

#######################################
# TEST CONFIG #
#######################################

test = [
action = test
minibatchSize = 16

NDLNetworkBuilder = [
networkDescription = "$ConfigDir$/04_DeConv.ndl"
]

reader = [
readerType = "UCIFastReader"
file = "$DataDir$/Test-28x28.txt"

features = [
dim = 784
start = 1
]

labels = [
dim = 1
start = 0
labelDim = 10
labelMappingFile = "$DataDir$/labelsmap.txt"
]
]
]
60 changes: 60 additions & 0 deletions Examples/Image/MNIST/Config/04_DeConv.ndl
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# macros to include
load = ndlMnistMacros

# the actual NDL that defines the network
run = DNN

ndlMnistMacros = [
imageW = 28
imageH = 28
imageC = 1
labelDim = 10

features = ImageInput(imageW, imageH, imageC, imageLayout=$imageLayout$)
featScale = Constant(0.00390625)
featScaled = Scale(featScale, features)
labels = InputValue(labelDim)
]

DNN=[
# conv1
kW1 = 5
kH1 = 5
cMap1 = 16
hStride1 = 2
vStride1 = 2
wScale1 = 10
bValue1 = 1
# weight[cMap1, kW1 * kH1 * inputChannels]
# Conv2DReLULayer is defined in Macros.ndl
conv1 = Conv2DReLULayer(featScaled, cMap1, 25, kW1, kH1, hStride1, vStride1, wScale1, bValue1)

# pool1
pool1W = 2
pool1H = 2
pool1hStride = 2
pool1vStride = 2
# MaxPooling is a standard NDL node.
pool1 = MaxPooling(conv1, pool1W, pool1H, pool1hStride, pool1vStride, imageLayout=$imageLayout$)

#unpool1
unpool1 = MaxNDUnpooling(pool1, conv1, pool1W, pool1H, pool1hStride, pool1vStride)

# deconv1
lpad1 = 2
upad1 = 1
# weight[cMap2, kW2 * kH2 * cMap1]
# DeconvReLULayer is defined in Macros.ndl
deconv1 = DeconvReLULayer(unpool1, kW1, kH1, imageC, 25, cMap1, hStride1, vStride1, lpad1, upad1, wScale1, bValue1)

mse = SquareError(featScaled, deconv1)
#err = ErrorPrediction(labels, ol)

# Special Nodes
FeatureNodes = (features)
#LabelNodes = (labels)
CriterionNodes = (mse)
#EvalNodes = (err)
OutputNodes = (deconv1)
]

16 changes: 12 additions & 4 deletions Examples/Image/MNIST/Config/Macros.ndl
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ ConvND(w, inp, kW, kH, inMap, outMap, hStride, vStride) = [
c = Convolution(w, inp, {kW, kH, inMap}, mapCount=outMap, stride={hStride, vStride, inMap}, sharing={true, true, true}, autoPadding={true, true, false}, lowerPad=0, upperPad=0, imageLayout=$imageLayout$)
]

DeConv(w, inp, k, inMap, outMap, hStride, vStride, lpad, upad) = [
c = Convolution(w, inp, {k, k, inMap}, mapCount=outMap, stride={hStride, vStride, inMap}, sharing={true, true, true}, autoPadding=false, lowerPad={lpad, lpad, 0}, upperPad={upad, upad, 0}, transpose=1, imageLayout=$imageLayout$)
DeConv(w, inp, kW, kH, inMap, outMap, hStride, vStride, lpad, upad) = [
c = Convolution(w, inp, {kW, kH, inMap}, mapCount=outMap, stride={hStride, vStride, inMap}, sharing={true, true, true}, autoPadding=false, lowerPad={lpad, lpad, 0}, upperPad={upad, upad, 0}, transpose=1, imageLayout=$imageLayout$)
]

Conv2DReLULayer(inp, outMap, inWCount, kW, kH, hStride, vStride, wScale, bValue) = [
Expand Down Expand Up @@ -88,11 +88,19 @@ ConvBNReLULayer(inp, outMap, inWCount, kW, kH, hStride, vStride, wScale, bValue,
y = RectifiedLinear(c)
]

DeconvReLULayer(inp, kW, kH, inMap, inWCount, outMap, hStride, vStride, lpad, upad, wScale, bValue) = [
w = ConvW(outMap, inWCount, wScale)
b = ConvB(inMap, bValue)
dc = DeConv(w, inp, kW, kH, inMap, outMap, hStride, vStride, lpad, upad)
cpb = Plus(dc, b);
out = RectifiedLinear(dc);
]

MaxNDPooling(inp, kW, kH, hStride, vStride) = [
mask = MaxPoolingMask(inp, {kW, kH, 1}, stride={hStride, vStride, 1}, autoPadding={true, true, false}, lowerPad=0, upperPad=0, imageLayout=$imageLayout$)
p = Pooling(inp, "max", {kW, kH, 1}, stride={hStride, vStride, 1}, autoPadding={true, true, false}, lowerPad=0, upperPad=0, imageLayout=$imageLayout$)
]

MaxNDUnpooling(inp, mask, kW, kH, hStride, vStride) = [
MaxNDUnpooling(inp, poolInp, kW, kH, hStride, vStride) = [
mask = MaxPoolingMask(poolInp, {kW, kH, 1}, stride={hStride, vStride, 1}, autoPadding={true, true, false}, lowerPad=0, upperPad=0, imageLayout=$imageLayout$)
up = MaxUnpooling(inp, mask, {kW, kH, 1}, stride={hStride, vStride, 1}, autoPadding={false, false, false}, lowerPad=0, upperPad=0, imageLayout=$imageLayout$)
]
5 changes: 4 additions & 1 deletion Source/ComputationNetworkLib/ConvolutionalNodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,10 @@ class MaxUnpoolingNode : public ConvolutionNodeBase<ElemType>, public NumInputs<

void ForwardProp(const FrameRange& fr) override
{
UNUSED(fr);
const Matrix<ElemType>& input = Input(0)->ValueFor(fr);
const Matrix<ElemType>& mask = Input(1)->ValueFor(fr);
Matrix<ElemType> sliceOutputValue = ValueFor(fr);
m_convEng->MaxUnpooling(input, mask, sliceOutputValue);
}

void Validate(bool isFinalValidationPass) override
Expand Down
2 changes: 1 addition & 1 deletion Source/Math/CPUMatrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4358,7 +4358,7 @@ void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPU
const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& mask,
CPUMatrix<ElemType>& input) const
{
//#pragma omp parallel for
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
Expand Down
6 changes: 3 additions & 3 deletions Source/Math/ConvolutionEngine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void ConvolutionEngine<ElemType>::MaxPoolingMask(const Mat& in, Mat& mask)
}

template <class ElemType>
void ConvolutionEngine<ElemType>::MaxUnpooling(const Mat& out, Mat& mask, Mat& in)
void ConvolutionEngine<ElemType>::MaxUnpooling(const Mat& out, const Mat& mask, Mat& in)
{
const auto& g = *m_geometry;
assert(g.InputShape().GetNumElements() == in.GetNumRows());
Expand Down Expand Up @@ -253,7 +253,7 @@ class ReferenceConvolutionEngine : public ConvolutionEngine<ElemType>
in.MaxPoolingMask(m_mpRowCol, *m_mpRowIndices, *m_indices, mask);
}

void MaxUnpoolingCore(const Mat& out, Mat& mask, Mat& in) override
void MaxUnpoolingCore(const Mat& out, const Mat& mask, Mat& in) override
{
out.MaxUnpooling(m_mpRowCol, *m_mpRowIndices, *m_indices, mask, in);
}
Expand Down Expand Up @@ -556,7 +556,7 @@ class LegacyConvolutionEngine : public ConvolutionEngine<ElemType>
LogicError("MaxPoolingMask is not implemented for legacy engine.");
}

void MaxUnpoolingCore(const Mat& out, Mat& mask, Mat& in) override
void MaxUnpoolingCore(const Mat& out, const Mat& mask, Mat& in) override
{
UNUSED(out);
UNUSED(mask);
Expand Down
4 changes: 2 additions & 2 deletions Source/Math/ConvolutionEngine.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class MATH_API ConvolutionEngine

void MaxPoolingMask(const Mat& in, Mat& mask);

void MaxUnpooling(const Mat& out, Mat& mask, Mat& in);
void MaxUnpooling(const Mat& out, const Mat& mask, Mat& in);

std::shared_ptr<const ConvolveGeometry> Geometry() const { return m_geometry; }

Expand Down Expand Up @@ -99,7 +99,7 @@ class MATH_API ConvolutionEngine

virtual void MaxPoolingMaskCore(const Mat& in, Mat& mask) = 0;

virtual void MaxUnpoolingCore(const Mat& out, Mat& mask, Mat& in) = 0;
virtual void MaxUnpoolingCore(const Mat& out, const Mat& mask, Mat& in) = 0;

protected:
ConvolveGeometryPtr m_geometry;
Expand Down
2 changes: 1 addition & 1 deletion Source/Math/CuDnnConvolutionEngine.cu
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ protected:
LogicError("MaxPoolingMask is not implemented for cuDNN engine.");
}

void MaxUnpoolingCore(const Mat& out, Mat& mask, Mat& in) override
void MaxUnpoolingCore(const Mat& out, const Mat& mask, Mat& in) override
{
UNUSED(out);
UNUSED(mask);
Expand Down

0 comments on commit 75dbb2c

Please sign in to comment.