Skip to content

Commit

Permalink
fix interface bug of block_expand_layer and add unittest (PaddlePaddl…
Browse files Browse the repository at this point in the history
…e#265)

* fix interface bug of block_expand_layer and add unittest

* auto compute num_channels

* default value of num_channels is None

* adjust input order of block_expand
  • Loading branch information
luotao1 authored and qingqing01 committed Oct 28, 2016
1 parent ca5a5ec commit fc9ca53
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 18 deletions.
34 changes: 18 additions & 16 deletions python/paddle/trainer_config_helpers/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
'cross_entropy_with_selfnorm', 'cross_entropy',
'multi_binary_label_cross_entropy',
'rank_cost', 'lambda_cost', 'huber_cost',
# 'block_expand_layer', # TODO(yuyang18): this layer is not correct
'block_expand_layer',
'maxout_layer', 'out_prod_layer', 'print_layer'
]

Expand Down Expand Up @@ -3284,18 +3284,18 @@ def linear_comb_layer(weights, vectors, size=None, name=None,
@wrap_name_default()
@layer_support()
def block_expand_layer(input,
channel=0,
block_x=0,
block_y=0,
stride_x=0,
stride_y=0,
padding_x=0,
padding_y=0,
num_channels=None,
name=None,
layer_attr=None):
"""
Expand feature map to minibatch matrix.
- matrix width is: block_y * block_x * channel
- matrix width is: block_y * block_x * num_channels
- matirx height is: outputH * outputW
.. math::
Expand All @@ -3307,24 +3307,24 @@ def block_expand_layer(input,
The expand method is the same with ExpandConvLayer, but saved the transposed
value. After expanding, output.sequenceStartPositions will store timeline.
The number of time steps are outputH * outputW and the dimension of each
time step is block_y * block_x * channel. This layer can be used after
time step is block_y * block_x * num_channels. This layer can be used after
convolution neural network, and before recurrent neural network.
The simple usage is:
.. code-block:: python
block_expand = block_expand_layer(input,
channel=128,
num_channels=128,
stride_x=1,
stride_y=1,
block_x=1,
block_x=3)
:param input: The input layer.
:type input: LayerOutput
:param channel: The channel number of input layer.
:type channel: int
:param num_channels: The channel number of input layer.
:type num_channels: int|None
:param block_x: The width of sub block.
:type block_x: int
:param block_y: The width of sub block.
Expand All @@ -3344,16 +3344,18 @@ def block_expand_layer(input,
:return: LayerOutput object.
:rtype: LayerOutput
"""
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
Layer(name=name,
input=Input(input.name,
block_expand=BlockExpand(channels=channel,
block_x=block_x,
block_y=block_y,
stride_x=stride_x,
stride_y=stride_y,
padding_x=padding_x,
padding_y=padding_y)
),
inputs=Input(input.name,
block_expand=BlockExpand(channels=num_channels,
block_x=block_x,
block_y=block_y,
stride_x=stride_x,
stride_y=stride_y,
padding_x=padding_x,
padding_y=padding_y)),
type=LayerType.BLOCK_EXPAND,
**ExtraLayerAttribute.to_kwargs(layer_attr)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr
8bb44e1e5072d0c261572307e7672bda test_grumemory_layer.protostr
1f3510672dce7a9ed25317fc58579ac7 test_hsigmoid.protostr
d350bd91a0dc13e854b1364c3d9339c6 test_lstmemory_layer.protostr
6fa59551808ee7012bbd24f757e782d2 test_maxout.protostr
5433ed33d4e7414eaf658f2a55946186 test_maxout.protostr
251a948ba41c1071afcd3d9cf9c233f7 test_ntm_layers.protostr
e6ff04e70aea27c7b06d808cc49c9497 test_print_layer.protostr
2a75dd33b640c49a8821c2da6e574577 test_rnn_group.protostr
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,25 @@
stride=2,
pool_type=MaxPooling())

fc = fc_layer(input=pool, size=384, bias_attr=False)
conv2 = img_conv_layer(input=pool,
filter_size = 3,
num_channels=32,
num_filters=128,
padding=1,
act=LinearActivation(),
bias_attr=True)

maxout2 = maxout_layer(input=conv,
num_channels=128,
groups=4)

block = block_expand_layer(input=maxout,
num_channels=32,
stride_x=1,
stride_y=1,
block_x=1,
block_y=6)

fc = fc_layer(input=block, size=384, bias_attr=False)

outputs(fc)

0 comments on commit fc9ca53

Please sign in to comment.