Skip to content

Commit

Permalink
Image/QuickE2E now implements both legacy and cudnn layouts, selected…
Browse files Browse the repository at this point in the history
… by a command-line overridable parameter 'useCuDnn' that defaults to cudnn;

added printfs to cunn tests
  • Loading branch information
frankseide committed Jan 1, 2016
1 parent b007ad0 commit ac02cb6
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 4 deletions.
8 changes: 5 additions & 3 deletions Tests/EndToEndTests/Image/QuickE2E/cntk.config
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ precision = "float"
command = train:test
deviceId = $DeviceId$

useCuDnn = true # can be overridden by the command line

ndlMacros = "$ConfigDir$/Macros.ndl"

parallelTrain = false
Expand All @@ -20,7 +22,7 @@ train = [

BrainScriptNetworkBuilder = [

useCuDnn = true
useCuDnn = $useCuDnn$

// HACK to enforce same evaluation order or LearnableParameters as for NDL, as to get same radomization
// Nodes are evaluated in sorting order.
Expand All @@ -31,8 +33,8 @@ train = [
convW = Parameter(outMap, inWCount, init="uniform", initValueScale=wScale, initOnCPUOnly=false)
conv = Convolution(convW, inp, kW, kH, outMap, hStride, vStride, zeroPadding=false, imageLayout=if useCuDnn then "cudnn" else "legacy")
convB = if useCuDnn
then Parameter(outMap, 1, init="fixedValue", value=bValue)
else ParameterTensor((1 : 1 : outMap : 1/*col dim*/), init="fixedValue", value=bValue)
then ParameterTensor((1 : 1 : outMap : 1/*col dim*/), init="fixedValue", value=bValue)
else Parameter(outMap, 1, init="fixedValue", value=bValue)
convPlusB = Plus(conv, convB);
out = RectifiedLinear(convPlusB);
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ COMMAND: currentDirectory=$(SolutionDir)ExampleSetups\Image\MNIST configFil

--- Image/QuickE2E:

COMMAND: configFile=$(SolutionDir)Tests\EndToEndTests\Image\QuickE2E\cntk.config RunDir=$(SolutionDir)Tests\EndToEndTests\Image\_run DataDir=$(SolutionDir)Tests\EndToEndTests\Image\Data ConfigDir=$(SolutionDir)Tests\EndToEndTests\Image\QuickE2E stderr=$(SolutionDir)Tests\EndToEndTests\RunDir\Image\QuickE2E\models\cntkImage.dnn.log DeviceId=0 makeMode=false
COMMAND: configFile=$(SolutionDir)Tests\EndToEndTests\Image\QuickE2E\cntk.config RunDir=$(SolutionDir)Tests\EndToEndTests\Image\_run DataDir=$(SolutionDir)Tests\EndToEndTests\Image\Data ConfigDir=$(SolutionDir)Tests\EndToEndTests\Image\QuickE2E stderr=$(SolutionDir)Tests\EndToEndTests\RunDir\Image\QuickE2E\models\cntkImage.dnn.log DeviceId=0 useCuDnn=false makeMode=false

Simple test
-----------
Expand Down
14 changes: 14 additions & 0 deletions Tests/UnitTests/MathTests/ConvolutionEngineTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,18 @@ namespace Microsoft { namespace MSR { namespace CNTK { namespace Test

static bool IsCuDnnSupported()
{
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
try
{
// TODO: Will this ever return nullptr?
return ConvFact::Create(0, ConvFact::EngineType::CuDnn, ImageLayoutKind::CHW) != nullptr;
}
catch (std::runtime_error)
{
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
return false;
}
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
}

BOOST_AUTO_TEST_SUITE(ConvolutionSuite)
Expand All @@ -56,6 +60,7 @@ namespace Microsoft { namespace MSR { namespace CNTK { namespace Test
for (int deviceId : { 0 })
{
// BUGBUG: These will fail depending on whether we built with cuDNN or not. Without cuDNN we should use HWC
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
auto fact = ConvFact::Create(deviceId, ConvFact::EngineType::Auto, ImageLayoutKind::CHW);
auto tt = typeid(fact).name();
UNUSED(tt);
Expand All @@ -66,12 +71,14 @@ namespace Microsoft { namespace MSR { namespace CNTK { namespace Test
auto convT = fact->CreateConvDescriptor(*inT, *filtT, sW, sH, false);
auto biasT = fact->CreateTensor(1, 1, cmapOut, 1);

fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
vec buf(inW * inH * cmapIn * n);
int seed = 0;
// Create input, cmapIn feature maps, inW x inH each (NCHW format).
std::generate(buf.begin(), buf.end(), [=, &seed]{ return seed++ % (inW * inH * cmapIn); });
SingleMatrix in(inW * inH * cmapIn, n, buf.data(), matrixFlagNormal, deviceId);

fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
seed = 0;
buf.resize(kW * kH * cmapIn * cmapOut);
// Create cmapOut filters, each kW x kH x cmapIn (NCHW format).
Expand All @@ -81,7 +88,9 @@ namespace Microsoft { namespace MSR { namespace CNTK { namespace Test
SingleMatrix out(outW * outH * cmapOut, n, deviceId);
SingleMatrix temp(deviceId);

fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
eng->Forward(*inT, in, *filtT, filt, *convT, *outT, out, temp);
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);

// Output is in NCHW format.
std::array<float, 4 * 2 * 2> expBuf = {
Expand All @@ -91,19 +100,24 @@ namespace Microsoft { namespace MSR { namespace CNTK { namespace Test
15219.0f, 15921.0f, 18729.0f, 19431.0f
};
SingleMatrix exp(outW * outH * cmapOut, n, expBuf.data(), matrixFlagNormal, deviceId);
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
BOOST_CHECK_MESSAGE(out.IsEqualTo(exp), "Unexpected convolution output.");
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);

float b[] = { 1.0f, 2.0f };
SingleMatrix bias(cmapOut, 1, b, matrixFlagNormal, deviceId);

SingleMatrix plusB(outW * outH * cmapOut, n, expBuf.data(), matrixFlagNormal, deviceId);
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
eng->AddBias(*outT, out, *biasT, bias, plusB);
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);

// Bias is per-channel.
seed = 0;
std::transform(expBuf.begin(), expBuf.end(), expBuf.begin(),
[=, &seed, &b](const float& a) { return a + b[(seed++ % (outW * outH * cmapOut)) / (outW * outH)]; });
SingleMatrix expPlusB(outW * outH * cmapOut, n, expBuf.data(), matrixFlagNormal, deviceId);
fprintf(stderr, "ConvolutionEngineTests.cpp %d\n", __LINE__);
BOOST_CHECK_MESSAGE(plusB.IsEqualTo(expPlusB), "Unexpected (convolution + bias) output.");
}
}
Expand Down

0 comments on commit ac02cb6

Please sign in to comment.