Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
gangliao committed Sep 28, 2016
2 parents a8df411 + d130d18 commit 23e47bb
Show file tree
Hide file tree
Showing 44 changed files with 1,215 additions and 444 deletions.
2 changes: 1 addition & 1 deletion doc_cn/demo/quick_start/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

## 安装(Install)

首先请参考<a href = "../../build_and_install/install/index.html">安装教程</a>安装PaddlePaddle。
首先请参考<a href = "../../build_and_install/index.html">安装教程</a>安装PaddlePaddle。

## 使用概述(Overview)

Expand Down
4 changes: 2 additions & 2 deletions paddle/cuda/src/hl_cuda_cublas.cc
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ void hl_matrix_mul(real *A_d, hl_trans_op_t transa,
} else {
LOG(FATAL) << "parameter transa error!";
}
CHECK_EQ(stat, CUBLAS_STATUS_SUCCESS);
CHECK_EQ(stat, CUBLAS_STATUS_SUCCESS) << hl_cublas_get_error_string(stat);
CHECK_SYNC("hl_matrix_mul failed");
}

Expand Down Expand Up @@ -266,7 +266,7 @@ void hl_matrix_mul_vector(real *A_d, hl_trans_op_t trans,
LOG(FATAL) << "parameter transa error!";
}

CHECK_EQ(stat, CUBLAS_STATUS_SUCCESS);
CHECK_EQ(stat, CUBLAS_STATUS_SUCCESS) << hl_cublas_get_error_string(stat);
CHECK_SYNC("hl_matrix_mul_vector");
}

Expand Down
41 changes: 25 additions & 16 deletions paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -497,20 +497,21 @@ void RecurrentGradientMachine::forward(const std::vector<Argument>& inArgs,
int idSize = 0;
// connect in_links
for (size_t j = 0; j < inFrameLines_.size(); ++j) {
Info& info = info_[shareInlinkInfo ? 0 : j];
// idSize denotes the sum number of tokens in each length i
idSize = info_[j].idIndex[i + 1] - info_[j].idIndex[i];
idSize = info.idIndex[i + 1] - info.idIndex[i];
InFrameLine inFrameLine = inFrameLines_[j];
auto scatterAgent =
dynamic_cast<ScatterAgentLayer*>(inFrameLine.agents[i].get());
scatterAgent->setRealLayerAndOutput(inFrameLine.inLayer,
inFrameLine.outArg, info_[j].allIds,
info_[j].idIndex[i], idSize);
inFrameLine.outArg, info.allIds,
info.idIndex[i], idSize);
if (hasSubseq) {
// size: the length of subsequence
int size =
info_[j].seqStartPosIndex[i + 1] - info_[j].seqStartPosIndex[i];
scatterAgent->setSequenceStartPositions(info_[j].sequenceStartPositions,
info_[j].seqStartPosIndex[i],
info.seqStartPosIndex[i + 1] - info.seqStartPosIndex[i];
scatterAgent->setSequenceStartPositions(info.sequenceStartPositions,
info.seqStartPosIndex[i],
size);
}
}
Expand Down Expand Up @@ -744,16 +745,24 @@ void RecurrentGradientMachine::selectRowsOneTime(LayerPtr layer,
const IVectorPtr& allIds,
Argument* arg,
PassType passType) {
const MatrixPtr& realV = layer->getOutputValue();
int height = realV->getHeight();
int width = realV->getWidth();
Matrix::resizeOrCreate(arg->value, height, width, /* trans */ false, useGpu_);
arg->value->zeroMem();
arg->value->selectRows(*realV, *allIds);
if (passType != PASS_TEST) {
Matrix::resizeOrCreate(arg->grad, height, width, /* trans */ false,
useGpu_);
arg->grad->zeroMem();
Argument& src = layer->getOutput();
if (src.value) {
const MatrixPtr& realV = src.value;
int height = realV->getHeight();
int width = realV->getWidth();
Matrix::resizeOrCreate(
arg->value, height, width, /* trans */ false, useGpu_);
arg->value->zeroMem();
arg->value->selectRows(*realV, *allIds);
if (passType != PASS_TEST) {
Matrix::resizeOrCreate(arg->grad, height, width, /* trans */ false,
useGpu_);
arg->grad->zeroMem();
}
}
if (src.ids) {
IVector::resizeOrCreate(arg->ids, src.ids->getSize(), useGpu_);
arg->ids->selectFrom(*src.ids, *allIds);
}
}

Expand Down
26 changes: 13 additions & 13 deletions paddle/gserver/layers/AgentLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,15 +139,16 @@ void ScatterAgentLayer::forward(PassType passType) {
Layer::forward(passType);
CHECK_EQ(realLayer_->getDeviceId(), this->getDeviceId());

if (realLayer_->getOutput().ids) { // ids scatter
IVector::resizeOrCreate(output_.ids, ids_->getSize(), useGpu_);
output_.ids->selectFrom(*realLayer_->getOutput().ids, *ids_);
} else { // value scatter
int width = this->getSize();
if (realOutArg_.value) {
output_.subArgFrom(realOutArg_, /* offset */ idIndex_ * width, idSize_,
width, useGpu_);
} else { // used in generation
int width = this->getSize();
if (realOutArg_.value || realOutArg_.ids) {
output_.subArgFrom(realOutArg_, /* offset */ idIndex_, idSize_,
width, useGpu_);
} else { // used in generation
if (realLayer_->getOutput().ids) {
IVector::resizeOrCreate(output_.ids, ids_->getSize(), useGpu_);
output_.ids->selectFrom(*realLayer_->getOutput().ids, *ids_);
}
if (realLayer_->getOutput().value) {
int height = ids_->getSize();
resetOutput(height, width);

Expand Down Expand Up @@ -213,18 +214,17 @@ void SequenceGatherAgentLayer::forward(PassType passType) {
void SequenceScatterAgentLayer::forward(PassType passType) {
Layer::forward(passType);
CHECK_EQ(realLayer_->getDeviceId(), this->getDeviceId());
CHECK(!realLayer_->getOutput().ids) << "Not supported";

const Argument& input = realLayer_->getOutput();
CHECK_EQ(input.value->getWidth(), this->getSize());
CHECK_EQ(realLayer_->getSize(), this->getSize());
int width = this->getSize();

AsyncGpuBlock asyncGpuBlock;
REGISTER_TIMER_INFO("SequenceAgentLayerForward", getName().c_str());

if (realOutArg_.value) {
if (realOutArg_.value || realOutArg_.ids) {
CHECK(realOutArg_.sequenceStartPositions);
output_.subArgFrom(realOutArg_, /* offset */ idIndex_ * width, idSize_,
output_.subArgFrom(realOutArg_, /* offset */ idIndex_, idSize_,
width, useGpu_, /* trans */ false, /* seqFlag */ true,
/* seqStart */ seqStartPosIndex_,
/* seqSize */ numSequences_);
Expand Down
2 changes: 1 addition & 1 deletion paddle/gserver/layers/CRFLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace paddle {
/**
* A layer for calculating the cost of sequential conditional random field
* model.
* See LinearChainCRF.h for the detail of the CRF formulation.
* See class LinearChainCRF for the detail of the CRF formulation.
*/
class CRFLayer : public Layer {
public:
Expand Down
48 changes: 24 additions & 24 deletions paddle/gserver/layers/LinearChainCRF.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,39 +21,39 @@ namespace paddle {

class LinearChainCRF {
public:
/*
The size of para and grad must be (numClasses + 2) * numClasses.
The first numClasses values of para are for starting weights (a).
The next numClasses values of para are for ending weights (b),
The remaning values are for transition weights (w).
The probability of a state sequence s of length L is defined as:
P(s) = (1/Z) exp(a_{s_1} + b_{s_L}
+ \sum_{l=1}^L x_{s_l}
+ \sum_{l=2}^L w_{s_{l-1},s_l})
where Z is a normalization value so that the sum of P(s) over all possible
sequences is 1, and x is the input feature to the CRF.
/**
* The size of para and grad must be \f$(numClasses + 2) * numClasses\f$.
* The first numClasses values of para are for starting weights (\f$a\f$).
* The next numClasses values of para are for ending weights (\f$b\f$),
* The remaning values are for transition weights (\f$w\f$).
*
* The probability of a state sequence s of length \f$L\f$ is defined as:
* \f$P(s) = (1/Z) exp(a_{s_1} + b_{s_L}
* + \sum_{l=1}^L x_{s_l}
* + \sum_{l=2}^L w_{s_{l-1},s_l})\f$
* where \f$Z\f$ is a normalization value so that the sum of \f$P(s)\f$ over all possible
* sequences is \f$1\f$, and \f$x\f$ is the input feature to the CRF.
*/
LinearChainCRF(int numClasses, real* para, real* grad);

/*
Calculate the negative log likelihood of s given x.
The size of x must be length * numClasses. Each consecutive numClasses
values are the features for one time step.
/**
* Calculate the negative log likelihood of s given x.
* The size of x must be length * numClasses. Each consecutive numClasses
* values are the features for one time step.
*/
real forward(real* x, int* s, int length);

/*
Calculate the gradient with respect to x, a, b, and w.
The gradient of x will be stored in dx.
backward() can only be called after a corresponding call to forward() with
the same x, s and length.
NOTE: The gradient is added to dx and grad (provided at constructor).
/**
* Calculate the gradient with respect to x, a, b, and w.
* The gradient of x will be stored in dx.
* backward() can only be called after a corresponding call to forward() with
* the same x, s and length.
* @note The gradient is added to dx and grad (provided at constructor).
*/
void backward(real* x, real* dx, int* s, int length);

/*
Find the most probable sequence given x. The result will be stored in s.
/**
* Find the most probable sequence given x. The result will be stored in s.
*/
void decode(real* x, int* s, int length);

Expand Down
1 change: 0 additions & 1 deletion paddle/gserver/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ add_test(NAME test_RecurrentGradientMachine
COMMAND .set_python_path.sh -d
${PROJ_ROOT}/python:${PROJ_ROOT}/paddle/gserver/tests
${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine
--use_gpu=false
WORKING_DIRECTORY ${PROJ_ROOT}/paddle)

add_unittest_without_exec(test_NetworkCompare
Expand Down
77 changes: 77 additions & 0 deletions paddle/gserver/tests/sequence_nest_rnn_multi_input.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
#edit-mode: -*- python -*-
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle.trainer_config_helpers import *

######################## data source ################################
define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list',
test_list=None,
module='rnn_data_provider',
obj='process_subseq')


settings(batch_size=2, learning_rate=0.01)
######################## network configure ################################
dict_dim = 10
word_dim = 8
hidden_dim = 8
label_dim = 3

data = data_layer(name="word", size=dict_dim)

emb = embedding_layer(input=data, size=word_dim)

# This hierachical RNN is designed to be equivalent to the simple RNN in
# sequence_rnn.conf

def outer_step(wid, x):
outer_mem = memory(name="outer_rnn_state", size=hidden_dim)
def inner_step(y, wid):
z = embedding_layer(input=wid, size=word_dim)
inner_mem = memory(name="inner_rnn_state",
size=hidden_dim,
boot_layer=outer_mem)
out = fc_layer(input=[y, z, inner_mem],
size=hidden_dim,
act=TanhActivation(),
bias_attr=True,
name="inner_rnn_state")
return out

inner_rnn_output = recurrent_group(
step=inner_step,
name="inner",
input=[x, wid])
last = last_seq(input=inner_rnn_output, name="outer_rnn_state")

# "return last" should also work. But currently RecurrentGradientMachine
# does not handle it correctly. Current implementation requires that
# all the out links are from sequences. However, it does not report error
# when the out links are not sequences.
return inner_rnn_output

out = recurrent_group(
name="outer",
step=outer_step,
input=[SubsequenceInput(data), SubsequenceInput(emb)])

rep = last_seq(input=out)
prob = fc_layer(size=label_dim,
input=rep,
act=SoftmaxActivation(),
bias_attr=True)

outputs(classification_cost(input=prob,
label=data_layer(name="label", size=label_dim)))
58 changes: 58 additions & 0 deletions paddle/gserver/tests/sequence_rnn_multi_input.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#edit-mode: -*- python -*-
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle.trainer_config_helpers import *

######################## data source ################################
define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list',
test_list=None,
module='rnn_data_provider',
obj='process_seq')


settings(batch_size=2, learning_rate=0.01)
######################## network configure ################################
dict_dim = 10
word_dim = 8
hidden_dim = 8
label_dim = 3

data = data_layer(name="word", size=dict_dim)

emb = embedding_layer(input=data, size=word_dim)

def step(y, wid):
z = embedding_layer(input=wid, size=word_dim)
mem = memory(name="rnn_state", size=hidden_dim)
out = fc_layer(input=[y, z, mem],
size=hidden_dim,
act=TanhActivation(),
bias_attr=True,
name="rnn_state")
return out

out = recurrent_group(
name="rnn",
step=step,
input=[emb, data])

rep = last_seq(input=out)
prob = fc_layer(size=label_dim,
input=rep,
act=SoftmaxActivation(),
bias_attr=True)

outputs(classification_cost(input=prob,
label=data_layer(name="label", size=label_dim)))
Loading

0 comments on commit 23e47bb

Please sign in to comment.