Skip to content

Commit

Permalink
Extend checks and tests for extended eval interface
Browse files Browse the repository at this point in the history
Removed m_numberOfSamples, which is redundant
Added more checks
Fixed and extended test cases
  • Loading branch information
Clemens Marschner committed May 25, 2016
1 parent feb7ac0 commit 35986ae
Show file tree
Hide file tree
Showing 4 changed files with 128 additions and 82 deletions.
6 changes: 4 additions & 2 deletions Source/Common/Include/Eval.h
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,12 @@ class Eval : public IEvaluateModel<ElemType>, protected Plugin
template<typename ElemType>
struct VariableBuffer
{
size_t m_numberOfSamples = 0;

//
// All elements of a sequence, concatenated.
// For dense inputs, the number of samples is given by the the length of
// this vector / product of tensor dimensions. E.g. for a tensor of dimension
// [2,2] and 12 elements in the buffer, the number of samples is 3.
// For sparse inputs, the number of samples is indicated by the m_colIndices field.
//
std::vector<ElemType> m_buffer;

Expand Down
34 changes: 31 additions & 3 deletions Source/EvalDll/CNTKEval.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -304,11 +304,39 @@ void CNTKEvalExtended<ElemType>::ForwardPass(const Variables<ElemType>& inputs,
for (auto& input : m_inputMatrices)
{
VariableBuffer<ElemType> buffer = inputs[i];
int numRows = input.second.sampleLayout.GetNumElements();
int numCols = buffer.m_numberOfSamples;
shared_ptr<Matrix<ElemType>> matrix = dynamic_pointer_cast<Matrix<ElemType>>(input.second.matrix);
auto type = matrix->GetMatrixType();
auto type = matrix->GetMatrixType();
int numRows = input.second.sampleLayout.GetNumElements();

if (type == MatrixType::DENSE)
{
if (buffer.m_buffer.size() % numRows != 0)
{
RuntimeError("Input %ls: Expected input data to be a multiple of %ld, but it is %ld", m_inputNodes[i]->GetName().c_str(), numRows, buffer.m_buffer.size());
}
if (buffer.m_buffer.size() == 0)
{
RuntimeError("Input %ls: Expected at least one element.", m_inputNodes[i]->GetName().c_str());
}
}
else if (type == MatrixType::SPARSE)
{
if (buffer.m_colIndices.size() < 2)
{
RuntimeError("Input %ls: Expected at least one element.", m_inputNodes[i]->GetName().c_str());
}
if (buffer.m_colIndices[0] != 0)
{
RuntimeError("Input %ls: First element of column indices must be 0", m_inputNodes[i]->GetName().c_str());
}
if (buffer.m_colIndices[buffer.m_colIndices.size()-1] != buffer.m_indices.size())
{
RuntimeError("Input %ls: Last element of column indices must be equal to the size of indices (%ld), but was %d", m_inputNodes[i]->GetName().c_str(), buffer.m_indices.size(), buffer.m_colIndices[buffer.m_colIndices.size() - 1]);
}
}

int numCols = type == MatrixType::DENSE ? buffer.m_buffer.size() / numRows : buffer.m_colIndices.size() - 1;
assert(numCols >= 1);
input.second.pMBLayout->Init(1, numCols);
input.second.pMBLayout->AddSequence(0, 0, 0, numCols);

Expand Down
142 changes: 91 additions & 51 deletions Tests/UnitTests/EvalTests/EvalExtendedTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@

#include "stdafx.h"
#include "EvalTestHelper.h"
#include "fileutil.h"
#include "ExceptionWithCallStack.h"
#include "ScriptableObjects.h"

using namespace Microsoft::MSR::CNTK;

Expand Down Expand Up @@ -87,13 +90,16 @@ BOOST_AUTO_TEST_CASE(EvalConstantPlusTest)
// Allocate the output values layer
std::vector<VariableBuffer<float>> outputBuffer(1);

// Allocate the input values layer (empty)
// Allocate the input values layer (empty)> EvalTests.exe!Microsoft::MSR::CNTK::Test::EvalTestSuite::EvalConstantPlusTest::test_method() Line 85 C++

std::vector<VariableBuffer<float>> inputBuffer;

// We can call the evaluate method and get back the results...
eval->ForwardPass(inputBuffer, outputBuffer);

BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[0], 3 /* 1 + 2 */);
std::vector<float> expected{ 3 /* 1 + 2 */ };
auto buf = outputBuffer[0].m_buffer;
BOOST_CHECK_EQUAL_COLLECTIONS(buf.begin(), buf.end(), expected.begin(), expected.end());

eval->Destroy();
}
Expand Down Expand Up @@ -121,15 +127,16 @@ BOOST_AUTO_TEST_CASE(EvalScalarTimesTest)

// Allocate the input values layer
std::vector<VariableBuffer<float>> inputBuffer(1);
inputBuffer[0].m_numberOfSamples = 1;
inputBuffer[0].m_buffer.push_back(2);
inputBuffer[0].m_indices.push_back(0);
inputBuffer[0].m_colIndices.push_back(0);

// We can call the evaluate method and get back the results...
eval->ForwardPass(inputBuffer, outputBuffer);

BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[0], 6);
std::vector<float> expected{ 6 };
auto buf = outputBuffer[0].m_buffer;
BOOST_CHECK_EQUAL_COLLECTIONS(buf.begin(), buf.end(), expected.begin(), expected.end());

eval->Destroy();
}
Expand Down Expand Up @@ -159,7 +166,6 @@ BOOST_AUTO_TEST_CASE(EvalScalarTimesDualOutputTest)

// Allocate the input values layer
std::vector<VariableBuffer<float>> inputBuffer(1);
inputBuffer[0].m_numberOfSamples = 1;
inputBuffer[0].m_buffer.push_back(2);
inputBuffer[0].m_indices.push_back(0);
inputBuffer[0].m_colIndices.push_back(0);
Expand All @@ -168,48 +174,68 @@ BOOST_AUTO_TEST_CASE(EvalScalarTimesDualOutputTest)
// TODO: Indicate to ForwardPass that we want output o2 only
eval->ForwardPass(inputBuffer, outputBuffer);

BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[0], 6);
std::vector<float> expected{ 6 };
auto buf = outputBuffer[0].m_buffer;
BOOST_CHECK_EQUAL_COLLECTIONS(buf.begin(), buf.end(), expected.begin(), expected.end());

eval->Destroy();
}

BOOST_AUTO_TEST_CASE(EvalDenseTimesTest)
{
std::string modelDefinition =
"deviceId = -1 \n"
"precision = \"float\" \n"
"traceLevel = 1 \n"
"run=NDLNetworkBuilder \n"
"NDLNetworkBuilder=[ \n"
"i1 = Input(4) \n"
"o1 = Times(i1, Constant(2), tag=\"output\") \n"
"FeatureNodes = (i1) \n"
"] \n";

VariableSchema inputLayouts;
VariableSchema outputLayouts;
IEvaluateModelExtended<float> *eval;
eval = SetupNetworkAndGetLayouts(modelDefinition, inputLayouts, outputLayouts);

// Allocate the output values layer
std::vector<VariableBuffer<float>> outputBuffer(1);

// Allocate the input values layer
std::vector<VariableBuffer<float>> inputBuffer(1);
inputBuffer[0].m_numberOfSamples = 4;
inputBuffer[0].m_buffer = {1, 2, 3, 4};
inputBuffer[0].m_indices.push_back(0);
inputBuffer[0].m_colIndices.push_back(0);

// We can call the evaluate method and get back the results...
eval->ForwardPass(inputBuffer, outputBuffer);

BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[0], 2);
BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[1], 4);
BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[2], 6);
BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[3], 8);

eval->Destroy();
try
{
std::string modelDefinition =
"deviceId = -1 \n"
"precision = \"float\" \n"
"traceLevel = 1 \n"
"run=BrainScriptNetworkBuilder \n"
"BrainScriptNetworkBuilder=[ \n"
"i1 = Input(4) \n"
"o1 = Times(ConstantTensor(2, 1:4), i1, tag=\"output\") \n"
"FeatureNodes = (i1) \n"
"] \n";

VariableSchema inputLayouts;
VariableSchema outputLayouts;
IEvaluateModelExtended<float> *eval;
eval = SetupNetworkAndGetLayouts(modelDefinition, inputLayouts, outputLayouts);

// Allocate the output values layer
std::vector<VariableBuffer<float>> outputBuffer(1);

// Number of inputs must adhere to the schema
std::vector<VariableBuffer<float>> inputBuffer1(0);
BOOST_REQUIRE_THROW(eval->ForwardPass(inputBuffer1, outputBuffer), std::exception); // Not enough inputs

// Number of elements in the input must adhere to the schema
std::vector<VariableBuffer<float>> inputBuffer(1);
inputBuffer[0].m_buffer = { 1, 2, 3 };
BOOST_REQUIRE_THROW(eval->ForwardPass(inputBuffer, outputBuffer), std::exception); // Not enough elements in the sample

// Output values and shape must be correct.
inputBuffer[0].m_buffer = { 1, 2, 3, 4 };
eval->ForwardPass(inputBuffer, outputBuffer);

std::vector<float> expected{ 20 };
auto buf = outputBuffer[0].m_buffer;
BOOST_CHECK_EQUAL_COLLECTIONS(buf.begin(), buf.end(), expected.begin(), expected.end());

eval->Destroy();
}
catch (const ScriptableObjects::ScriptingException& err)
{
fprintf(stderr, "\n");
err.PrintError(L"EXCEPTION occurred");
throw;
}
catch (const IExceptionWithCallStackBase& err)
{
fprintf(stderr, "\n");
fprintf(stderr, "%s", err.CallStack());
fprintf(stderr, "EXCEPTION occurred: %s\n", dynamic_cast<const std::exception&>(err).what());
throw;
}
}

BOOST_AUTO_TEST_CASE(EvalSparseTimesTest)
Expand All @@ -220,8 +246,8 @@ BOOST_AUTO_TEST_CASE(EvalSparseTimesTest)
"traceLevel = 1 \n"
"run=NDLNetworkBuilder \n"
"NDLNetworkBuilder=[ \n"
"i1 = SparseInput(9) \n"
"o1 = Times(i1, Constant(2), tag=\"output\") \n"
"i1 = SparseInput(3) \n"
"o1 = Times(Constant(2, rows=1, cols=3), i1, tag=\"output\") \n"
"FeatureNodes = (i1) \n"
"] \n";

Expand All @@ -235,16 +261,30 @@ BOOST_AUTO_TEST_CASE(EvalSparseTimesTest)

// Allocate the input values layer
std::vector<VariableBuffer<float>> inputBuffer(1);
inputBuffer[0].m_numberOfSamples = 9;
inputBuffer[0].m_buffer = {1, 2, 3, 4, 5, 6};
inputBuffer[0].m_indices = {0, 2, 3, 6};
inputBuffer[0].m_colIndices = {0, 2, 2, 0, 1, 2};
inputBuffer[0].m_buffer = {1, 2, 3, 5, 6};
inputBuffer[0].m_indices = {0, 2, 2, 1, 2};

// We can call the evaluate method and get back the results...
// TODO: Enable when SparseInput is supported
//eval->ForwardPass(inputBuffer, outputBuffer);
//BOOST_CHECK_EQUAL(outputBuffer[0].m_buffer[0], 2);
inputBuffer[0].m_colIndices = {};
BOOST_REQUIRE_THROW(eval->ForwardPass(inputBuffer, outputBuffer), std::exception); // Empty input

inputBuffer[0].m_colIndices = { 0 };
BOOST_REQUIRE_THROW(eval->ForwardPass(inputBuffer, outputBuffer), std::exception); // Empty input

inputBuffer[0].m_colIndices = { 1, 0 };
BOOST_REQUIRE_THROW(eval->ForwardPass(inputBuffer, outputBuffer), std::exception); // Illegal: First entry must be 0

inputBuffer[0].m_colIndices = { 0, 2, 2, 4 };
BOOST_REQUIRE_THROW(eval->ForwardPass(inputBuffer, outputBuffer), std::exception); // Illegal: Last entry must be indices.size()

inputBuffer[0].m_colIndices = { 0, 2, 2, 5 };

// We can call the evaluate method and get back the results...
eval->ForwardPass(inputBuffer, outputBuffer);

// [2,2,2] * [1,2,3]^T etc.
std::vector<float> expected{ 6, 0, 28 };
auto buf = outputBuffer[0].m_buffer;
BOOST_CHECK_EQUAL_COLLECTIONS(buf.begin(), buf.end(), expected.begin(), expected.end());
eval->Destroy();
}

Expand Down
28 changes: 2 additions & 26 deletions Tests/UnitTests/EvalTests/EvalTests.vcxproj
Original file line number Diff line number Diff line change
Expand Up @@ -123,35 +123,11 @@
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories);$(CudaLibPath)</AdditionalLibraryDirectories>
<DelayLoadDLLs>%(DelayLoadDLLs);nvml.dll;$(CudaRuntimeDll)</DelayLoadDLLs>
</Link>
<PostBuildEvent>
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">copy "$(OutDir)..\evaldll.dll" "$(TargetDir)"</Command>
</PostBuildEvent>
<PostBuildEvent>
<Message Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Copy EvalDLL to the UnitTest output directory</Message>
</PostBuildEvent>
<PostBuildEvent>
<Command Condition="'$(Configuration)|$(Platform)'=='Release|x64'">copy "$(OutDir)..\evaldll.dll" "$(TargetDir)"</Command>
</PostBuildEvent>
<PostBuildEvent>
<Message Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Copy EvalDLL to the UnitTest output directory</Message>
</PostBuildEvent>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="$(CpuOnlyBuild)">
<ClCompile>
<PreprocessorDefinitions>CPUONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</ClCompile>
<PostBuildEvent>
<Command Condition="'$(Configuration)|$(Platform)'=='Release_CpuOnly|x64'">copy "$(OutDir)..\evaldll.dll" "$(TargetDir)"</Command>
</PostBuildEvent>
<PostBuildEvent>
<Message Condition="'$(Configuration)|$(Platform)'=='Release_CpuOnly|x64'">Copy EvalDLL to the UnitTest output directory</Message>
</PostBuildEvent>
<PostBuildEvent>
<Command Condition="'$(Configuration)|$(Platform)'=='Debug_CpuOnly|x64'">copy "$(OutDir)..\evaldll.dll" "$(TargetDir)"</Command>
</PostBuildEvent>
<PostBuildEvent>
<Message Condition="'$(Configuration)|$(Platform)'=='Debug_CpuOnly|x64'">Copy EvalDLL to the UnitTest output directory</Message>
</PostBuildEvent>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="EvalTestHelper.h" />
Expand All @@ -177,10 +153,10 @@
<CuDnnDll Condition="$(GpuBuild) And Exists('$(OutDir)..\cudnn64_4.dll')">$(OutDir)..\cudnn64_4.dll</CuDnnDll>
</PropertyGroup>
<ItemGroup>
<UnitTestDependencies Include="$(OutDir)..\Math.dll;$(OutDir)..\libacml_mp_dll.dll;$(OutDir)..\libifcoremd.dll;$(OutDir)..\libifportmd.dll;$(OutDir)..\libiomp*.dll;$(OutDir)..\libmmd.dll;$(OutDir)..\svml_dispmd.dll;" />
<UnitTestDependencies Include="$(OutDir)..\CNTK.Core.BS;$(OutDir)..\evaldll.dll;$(OutDir)..\Math.dll;$(OutDir)..\libacml_mp_dll.dll;$(OutDir)..\libifcoremd.dll;$(OutDir)..\libifportmd.dll;$(OutDir)..\libiomp*.dll;$(OutDir)..\libmmd.dll;$(OutDir)..\svml_dispmd.dll;" />
</ItemGroup>
<ItemGroup Condition="$(GpuBuild)">
<UnitTestDependencies Include="$(OutDir)..\cuda*.dll;$(OutDir)..\svml_dispmd.dll;$(CuDnnDll);$(UnitTestDependencies)" />
<UnitTestDependencies Include="$(OutDir)..\CNTK.Core.BS;$(OutDir)..\evaldll.dll;$(OutDir)..\cuda*.dll;$(OutDir)..\svml_dispmd.dll;$(CuDnnDll);$(UnitTestDependencies)" />
</ItemGroup>
<Copy SourceFiles="@(UnitTestDependencies)" DestinationFolder="$(OutDir)" SkipUnchangedFiles="true">
<Output TaskParameter="DestinationFiles" ItemName="NewFileWrites" />
Expand Down

0 comments on commit 35986ae

Please sign in to comment.