Skip to content

Commit

Permalink
normalized comment separator from /// to //
Browse files Browse the repository at this point in the history
  • Loading branch information
frankseide committed Jan 22, 2016
1 parent 03a4fcb commit 27641d8
Show file tree
Hide file tree
Showing 29 changed files with 202 additions and 202 deletions.
6 changes: 3 additions & 3 deletions Source/ActionsLib/OtherActions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ void DoWriteWordAndClassInfo(const ConfigParameters& config)
}
std::unordered_map<string, double> v_count;

/// get line
// get line
string str;
vector<string> vstr;
long long prevClsIdx = -1;
Expand Down Expand Up @@ -422,7 +422,7 @@ void DoWriteWordAndClassInfo(const ConfigParameters& config)
long long clsIdx = nbrCls > 0 ? m_class[i] : 0;
if (nbrCls > 0 && clsIdx != prevClsIdx)
{
cls2idx(clsIdx, 0) = (ElemType) i; /// the left boundary of clsIdx
cls2idx(clsIdx, 0) = (ElemType) i; // the left boundary of clsIdx
prevClsIdx = m_class[i];
}
ofvocab << " " << i << "\t " << m_count[i] << "\t" << m_words[i] << "\t" << clsIdx << std::endl;
Expand All @@ -431,7 +431,7 @@ void DoWriteWordAndClassInfo(const ConfigParameters& config)
ofvocab.close();
if (nbrCls > 0)
{
/// write the outputs
// write the outputs
msra::files::make_intermediate_dirs(s2ws(outputWord2Cls));
ofstream ofp(outputWord2Cls.c_str());
if (!ofp)
Expand Down
68 changes: 34 additions & 34 deletions Source/CNTK/SimpleNetworkBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildSimpleRNN()
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

pastValue = builder.PastValue(NULL, m_defaultHiddenActivity, m_layerSizes[1], 1);
/// unless there is a good algorithm to detect loops, use this explicit setup
// unless there is a good algorithm to detect loops, use this explicit setup
output = ApplyNonlinearFunction(
builder.Plus(
builder.Times(u, input), builder.Times(w, pastValue)),
Expand Down Expand Up @@ -235,7 +235,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildSimpleRNN()
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

pastValue = builder.PastValue(NULL, m_defaultHiddenActivity, (size_t) m_layerSizes[i + 1], 1);
/// unless there is a good algorithm to detect loops, use this explicit setup
// unless there is a good algorithm to detect loops, use this explicit setup
output = ApplyNonlinearFunction(
builder.Plus(
builder.Times(u, input), builder.Times(w, pastValue)),
Expand Down Expand Up @@ -316,7 +316,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildClassEntropyNetwork()
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

pastValue = builder.PastValue(NULL, m_defaultHiddenActivity, m_layerSizes[1], 1);
/// unless there is a good algorithm to detect loops, use this explicit setup
// unless there is a good algorithm to detect loops, use this explicit setup
output = ApplyNonlinearFunction(
builder.Plus(
builder.Times(u, input), builder.Times(w, pastValue)),
Expand Down Expand Up @@ -346,7 +346,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildClassEntropyNetwork()
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

pastValue = builder.PastValue(NULL, m_defaultHiddenActivity, (size_t) m_layerSizes[i + 1], 1);
/// unless there is a good algorithm to detect loops, use this explicit setup
// unless there is a good algorithm to detect loops, use this explicit setup
output = ApplyNonlinearFunction(
builder.Plus(
builder.Times(u, input), builder.Times(w, pastValue)),
Expand All @@ -366,13 +366,13 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildClassEntropyNetwork()
}
}

/// need to have [input_dim x output_dim] matrix
/// e.g., [200 x 10000], where 10000 is the vocabulary size
/// this is for speed-up issue as per word matrix can be simply obtained using column slice
// need to have [input_dim x output_dim] matrix
// e.g., [200 x 10000], where 10000 is the vocabulary size
// this is for speed-up issue as per word matrix can be simply obtained using column slice
w = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"W%d", numHiddenLayers), m_layerSizes[numHiddenLayers], m_layerSizes[numHiddenLayers + 1]);
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

/// the label is a dense matrix. each element is the word index
// the label is a dense matrix. each element is the word index
label = builder.CreateInputNode(L"labels", 4);

clsweight = builder.CreateLearnableParameter(L"WeightForClassPostProb", m_nbrCls, m_layerSizes[numHiddenLayers]);
Expand Down Expand Up @@ -444,7 +444,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildConditionalLSTMNetwor
{
// output = (ComputationNodePtr)BuildLSTMNodeComponent(randomSeed, 0, m_layerSizes[offset] * (offset ? m_lookupTableOrder : 1), m_layerSizes[offset + 1], input);
output = (ComputationNodePtr) BuildLSTMComponent(randomSeed, 0, m_layerSizes[offset] * (offset ? m_lookupTableOrder : 1), m_layerSizes[offset + 1], input);
/// previously used function. now uses LSTMNode which is correct and fast
// previously used function. now uses LSTMNode which is correct and fast
input = output;
for (int i = 1 + offset; i < numHiddenLayers; i++)
{
Expand All @@ -458,7 +458,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildConditionalLSTMNetwor
}
}

/// serve as a global bias term
// serve as a global bias term
gt = builder.CreateInputNode(L"binaryFeature", m_auxFeatDim);
m_net->FeatureNodes().push_back(gt);
e = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"AuxTrans%d", 0),
Expand All @@ -468,13 +468,13 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildConditionalLSTMNetwor
output = builder.Plus(input, u, L"PlusGlobalBias");
input = output;

/// need to have [input_dim x output_dim] matrix
/// e.g., [200 x 10000], where 10000 is the vocabulary size
/// this is for speed-up issue as per word matrix can be simply obtained using column slice
// need to have [input_dim x output_dim] matrix
// e.g., [200 x 10000], where 10000 is the vocabulary size
// this is for speed-up issue as per word matrix can be simply obtained using column slice
w = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"W%d", numHiddenLayers), m_layerSizes[numHiddenLayers], m_layerSizes[numHiddenLayers + 1]);
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

/// the label is a dense matrix. each element is the word index
// the label is a dense matrix. each element is the word index
label = builder.CreateInputNode(L"labels", 4);

clsweight = builder.CreateLearnableParameter(L"WeightForClassPostProb", m_nbrCls, m_layerSizes[numHiddenLayers]);
Expand Down Expand Up @@ -542,7 +542,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildLogBilinearNetworkFro
}

int recur_idx = 0;
/// unless there is a good algorithm to detect loops, use this explicit setup
// unless there is a good algorithm to detect loops, use this explicit setup
int ik = 1;
output = input;
while (ik <= m_maOrder)
Expand Down Expand Up @@ -675,7 +675,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildNeuralProbNetworkFrom
Wxi = builder.CreateLearnableParameter(L"WXI", m_layerSizes[1], m_layerSizes[0]);
m_net->InitLearnableParameters(Wxi, m_uniformInit, randomSeed++, m_initValueScale);

/// unless there is a good algorithm to detect loops, use this explicit setup
// unless there is a good algorithm to detect loops, use this explicit setup
it = builder.Plus(
builder.Tanh(
builder.Plus(
Expand Down Expand Up @@ -994,7 +994,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildSeqTrnLSTMNetworkFrom
outputFromEachLayer[1] = input;
}

/// direct connect from input node to output node
// direct connect from input node to output node

int recur_idx = 0;
int offset = m_lookupTableOrder > 0 ? 1 : 0;
Expand Down Expand Up @@ -1097,7 +1097,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildCLASSLSTMNetworkFromD
{
// output = (ComputationNodePtr)BuildLSTMNodeComponent(randomSeed, 0, m_layerSizes[offset] * (offset ? m_lookupTableOrder : 1), m_layerSizes[offset + 1], input);
output = (ComputationNodePtr) BuildLSTMComponent(randomSeed, 0, m_layerSizes[offset] * (offset ? m_lookupTableOrder : 1), m_layerSizes[offset + 1], input);
/// previously used function. now uses LSTMNode which is correct and fast
// previously used function. now uses LSTMNode which is correct and fast
input = output;
for (int i = 1 + offset; i < numHiddenLayers; i++)
{
Expand All @@ -1111,13 +1111,13 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildCLASSLSTMNetworkFromD
}
}

/// need to have [input_dim x output_dim] matrix
/// e.g., [200 x 10000], where 10000 is the vocabulary size
/// this is for speed-up issue as per word matrix can be simply obtained using column slice
// need to have [input_dim x output_dim] matrix
// e.g., [200 x 10000], where 10000 is the vocabulary size
// this is for speed-up issue as per word matrix can be simply obtained using column slice
w = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"W%d", numHiddenLayers), m_layerSizes[numHiddenLayers], m_layerSizes[numHiddenLayers + 1]);
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

/// the label is a dense matrix. each element is the word index
// the label is a dense matrix. each element is the word index
label = builder.CreateInputNode(L"labels", 4);

clsweight = builder.CreateLearnableParameter(L"WeightForClassPostProb", m_nbrCls, m_layerSizes[numHiddenLayers]);
Expand Down Expand Up @@ -1158,16 +1158,16 @@ shared_ptr<ComputationNode<ElemType>> /*ComputationNodePtr*/ SimpleNetworkBuilde
size_t nDim = inputDim + outputDim + 2;
wInputGate = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"WINPUTGATE%d", iLayer), outputDim, nDim);
m_net->InitLearnableParameters(wInputGate, m_uniformInit, randomSeed++, m_initValueScale);
wInputGate->Value().ColumnSlice(0, 1).SetValue(m_inputGateInitVal); /// init to input gate bias
wInputGate->Value().ColumnSlice(0, 1).SetValue(m_inputGateInitVal); // init to input gate bias
wForgetGate = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"WFORGETGATE%d", iLayer), outputDim, nDim);
m_net->InitLearnableParameters(wForgetGate, m_uniformInit, randomSeed++, m_initValueScale);
wForgetGate->Value().ColumnSlice(0, 1).SetValue(m_forgetGateInitVal); /// init to forget gate bias
wForgetGate->Value().ColumnSlice(0, 1).SetValue(m_forgetGateInitVal); // init to forget gate bias
wOutputGate = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"WOUTPUTGATE%d", iLayer), outputDim, nDim);
m_net->InitLearnableParameters(wOutputGate, m_uniformInit, randomSeed++, m_initValueScale);
wOutputGate->Value().ColumnSlice(0, 1).SetValue(m_outputGateInitVal); /// init to output gate bias
wOutputGate->Value().ColumnSlice(0, 1).SetValue(m_outputGateInitVal); // init to output gate bias
wMemoryCellMatrix = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"WMEMORYCELLWEIGHT%d", iLayer), outputDim, inputDim + outputDim + 1);
m_net->InitLearnableParameters(wMemoryCellMatrix, m_uniformInit, randomSeed++, m_initValueScale);
wMemoryCellMatrix->Value().ColumnSlice(0, 1).SetValue(0); /// init to memory cell bias
wMemoryCellMatrix->Value().ColumnSlice(0, 1).SetValue(0); // init to memory cell bias
output = builder.LSTM(inputObs, wInputGate, wForgetGate, wOutputGate, wMemoryCellMatrix, msra::strfun::wstrprintf(L"LSTM%d", iLayer));
Expand Down Expand Up @@ -1241,7 +1241,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildLSTMNetworkFromDescri
outputFromEachLayer[1] = input;
}

/// direct connect from input node to output node
// direct connect from input node to output node

int recur_idx = 0;
int offset = m_lookupTableOrder > 0 ? 1 : 0;
Expand All @@ -1250,7 +1250,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildLSTMNetworkFromDescri

//output = (ComputationNodePtr)BuildLSTMNodeComponent(randomSeed, 0, m_layerSizes[offset] * (offset ? m_lookupTableOrder : 1), m_layerSizes[offset + 1], input);
output = (ComputationNodePtr) BuildLSTMComponent(randomSeed, 0, m_layerSizes[offset] * (offset ? m_lookupTableOrder : 1), m_layerSizes[offset + 1], input);
/// previously used function. now uses LSTMNode which is correct and fast
// previously used function. now uses LSTMNode which is correct and fast
input = output;
outputFromEachLayer[offset + 1] = input;

Expand Down Expand Up @@ -1543,7 +1543,7 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildNCELSTMNetworkFromDes
outputFromEachLayer[1] = input;
}

/// direct connect from input node to output node
// direct connect from input node to output node

int recur_idx = 0;
int offset = m_lookupTableOrder > 0 ? 1 : 0;
Expand Down Expand Up @@ -1580,19 +1580,19 @@ ComputationNetworkPtr SimpleNetworkBuilder<ElemType>::BuildNCELSTMNetworkFromDes

for (size_t i = offset; i < m_layerSizes.size(); i++)
{
/// add direct connect from each layers' output to the layer before the output layer
// add direct connect from each layers' output to the layer before the output layer
output = BuildDirectConnect(randomSeed, i, (i > 1) ? m_layerSizes[i] : ((offset == 0) ? m_layerSizes[i] : m_layerSizes[i] * m_lookupTableOrder), m_layerSizes[numHiddenLayers], outputFromEachLayer[i], input);
if (output != nullptr)
input = output;
}

/// need to have [input_dim x output_dim] matrix
/// e.g., [200 x 10000], where 10000 is the vocabulary size
/// this is for speed-up issue as per word matrix can be simply obtained using column slice
// need to have [input_dim x output_dim] matrix
// e.g., [200 x 10000], where 10000 is the vocabulary size
// this is for speed-up issue as per word matrix can be simply obtained using column slice
w = builder.CreateLearnableParameter(msra::strfun::wstrprintf(L"W%d", numHiddenLayers), m_layerSizes[numHiddenLayers], m_layerSizes[numHiddenLayers + 1]);
m_net->InitLearnableParameters(w, m_uniformInit, randomSeed++, m_initValueScale);

/// the label is a dense matrix. each element is the word index
// the label is a dense matrix. each element is the word index
label = builder.CreateInputNode(L"labels", 2 * (this->nce_noises + 1));

bias = builder.CreateLearnableParameter(L"BiasVector", 1, m_layerSizes[m_layerSizes.size() - 1]);
Expand Down
28 changes: 14 additions & 14 deletions Source/CNTK/SimpleNetworkBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ namespace Microsoft { namespace MSR { namespace CNTK {

enum RNNTYPE
{
SIMPLENET = 0, /// no recurrent connections
SIMPLENET = 0, // no recurrent connections
SIMPLERNN = 1,
LSTM = 2,
DEEPRNN = 4,
Expand Down Expand Up @@ -147,9 +147,9 @@ class SimpleNetworkBuilder

ConfigArray sSizes = config("streamSizes", "");
m_streamSizes = sSizes;
sSizes = config("lookupTableOrderSizes", ""); /// this allows having a multiple streams of inputs with
/// different lookuptable order sizes. the older one lookupTableOrder is still kept to have backward
/// support.
sSizes = config("lookupTableOrderSizes", ""); // this allows having a multiple streams of inputs with
// different lookuptable order sizes. the older one lookupTableOrder is still kept to have backward
// support.
m_lookupTabelOrderSizes = sSizes;

m_labelEmbeddingSize = config("labelEmbeddingSize", "10");
Expand Down Expand Up @@ -346,14 +346,14 @@ class SimpleNetworkBuilder
TrainingCriterion m_trainCriterion;
EvalCriterion m_evalCriterion;

intargvector m_directConnect; /// connect those layers directly in a sequence order
/// for example: 1:2:3 will connect 1 to 2 and then 2 to 3
intargvector m_directConnect; // connect those layers directly in a sequence order
// for example: 1:2:3 will connect 1 to 2 and then 2 to 3

/// recurrent network
// recurrent network
intargvector m_recurrentLayers;
float m_defaultHiddenActivity;
RNNTYPE m_rnnType;
int m_maOrder; /// MA model order
int m_maOrder; // MA model order

bool m_constForgetGateValue;
bool m_constInputGateValue;
Expand All @@ -363,18 +363,18 @@ class SimpleNetworkBuilder
ElemType m_inputGateInitVal;
ElemType m_outputGateInitVal;

intargvector m_streamSizes; /// for multiple stream data
intargvector m_lookupTabelOrderSizes; /// each stream has its own projection, so need to provide with the lookup table order size for each stream
intargvector m_streamSizes; // for multiple stream data
intargvector m_lookupTabelOrderSizes; // each stream has its own projection, so need to provide with the lookup table order size for each stream

int m_lookupTableOrder;
int m_labelEmbeddingSize;

/// these are the file names for word 2 class mapping and class to word index mapping
/// these are used for class-based language modeling
// these are the file names for word 2 class mapping and class to word index mapping
// these are used for class-based language modeling
string m_cls2index;
string m_word2class;
int m_nbrCls; /// number of classes
int m_vocabSize; /// vocabulary size
int m_nbrCls; // number of classes
int m_vocabSize; // vocabulary size
int nce_noises;

bool m_sparse_input;
Expand Down
2 changes: 1 addition & 1 deletion Source/Common/DataReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ void DataReader<ElemType>::InitFromConfig(const ConfigRecordType& /*config*/)
template <class ElemType>
void DataReader<ElemType>::Destroy()
{
/// newer code that explicitly place multiple streams for inputs
// newer code that explicitly place multiple streams for inputs
foreach_index (i, m_ioNames) // inputNames should map to node names
{
m_dataReaders[m_ioNames[i]]->Destroy();
Expand Down
2 changes: 1 addition & 1 deletion Source/ComputationNetworkLib/ComputationNetwork.h
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ class ComputationNetwork : public ScriptableObjects::Object, public ScriptableOb
std::vector<ComputationNodeBasePtr> m_finalCriteria;
std::vector<ComputationNodeBasePtr> m_evalNodes;
std::vector<ComputationNodeBasePtr> m_outputNodes;
std::vector<ComputationNodeBasePtr> m_pairNodes; /// nodes for the children network to pair
std::vector<ComputationNodeBasePtr> m_pairNodes; // nodes for the children network to pair
vector<std::vector<ComputationNodeBasePtr>*> GetAllNodeGroups() // get all groups to allow to iterate over all of them ...continue
{
return vector<std::vector<ComputationNodeBasePtr>*>{&m_features, &m_labels, &m_finalCriteria, &m_evalNodes, &m_outputNodes, &m_pairNodes};
Expand Down
2 changes: 1 addition & 1 deletion Source/ComputationNetworkLib/ComputationNode.h
Original file line number Diff line number Diff line change
Expand Up @@ -1576,7 +1576,7 @@ class ComputationNode : public ComputationNodeBase // abstract class that cannot
{
}

/// these two are used to pass gradients from future minibatch
// these two are used to pass gradients from future minibatch
virtual void GetErrorsToPreviousMinibatch(Matrix<ElemType>&)
{
}
Expand Down
Loading

0 comments on commit 27641d8

Please sign in to comment.