Skip to content

Commit

Permalink
fix doc, use to_tensor
Browse files Browse the repository at this point in the history
fix doc, use to_tensor for the loss ops
  • Loading branch information
ZHUI authored Aug 31, 2020
1 parent 7ee70a4 commit 1f6df87
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 30 deletions.
20 changes: 8 additions & 12 deletions python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
label = paddle.to_tensor(label_data)
output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095]
paddle.enable_static()
"""
if reduction not in ['sum', 'mean', 'none']:
Expand All @@ -165,8 +164,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
return core.ops.reduce_sum(out, 'dim', [0], 'keep_dim', False,
"reduce_all", True)
elif reduction == 'mean':
return core.ops.reduce_mean(out, 'dim', [0], 'keep_dim', False,
"reduce_all", True)
return core.ops.mean(out)
else:
return out

Expand Down Expand Up @@ -467,14 +465,12 @@ def margin_ranking_loss(input,
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype('float32'))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype('float32'))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype('float32'))
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75]
"""
Expand Down Expand Up @@ -578,8 +574,8 @@ def l1_loss(input, label, reduction='mean', name=None):
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
Expand Down Expand Up @@ -675,9 +671,9 @@ def nll_loss(input,
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
"""
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/nn/layer/distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@ class PairwiseDistance(layers.Layer):
For more information, please refer to :ref:`api_guide_Name`.
Shape:
x: :math:`(N, D)` where `D` is the dimension of vector, available dtype
x: :math:`[N, D]` where `D` is the dimension of vector, available dtype
is float32, float64.
y: :math:`(N, D)`, y have the same shape and dtype as x.
out: :math:`(N)`. If :attr:`keepdim` is ``True``, the out shape is :math:`(N, 1)`.
y: :math:`[N, D]`, y have the same shape and dtype as x.
out: :math:`[N]`. If :attr:`keepdim` is ``True``, the out shape is :math:`[N, 1]`.
The same dtype as input tensor.
Examples:
Expand All @@ -58,8 +58,8 @@ class PairwiseDistance(layers.Layer):
paddle.disable_static()
x_np = np.array([[1., 3.], [3., 5.]]).astype(np.float64)
y_np = np.array([[5., 6.], [7., 8.]]).astype(np.float64)
x = paddle.to_variable(x_np)
y = paddle.to_variable(y_np)
x = paddle.to_tensor(x_np)
y = paddle.to_tensor(y_np)
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance.numpy()) # [5. 5.]
Expand Down
23 changes: 10 additions & 13 deletions python/paddle/nn/layer/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,8 +376,8 @@ class L1Loss(fluid.dygraph.Layer):
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
l1_loss = paddle.nn.loss.L1Loss()
output = l1_loss(input, label)
Expand Down Expand Up @@ -455,7 +455,7 @@ class BCELoss(fluid.dygraph.Layer):
For more information, please refer to :ref:`api_guide_Name`.
Shape:
input (Tensor): 2-D tensor with shape: (N, *), N is batch_size, `*` means
input (Tensor): 2-D tensor with shape: [N, *], N is batch_size, `*` means
number of additional dimensions. The input ``input`` should always
be the output of sigmod. Available dtype is float32, float64.
label (Tensor): 2-D tensor with the same shape as ``input``. The target
Expand All @@ -476,12 +476,11 @@ class BCELoss(fluid.dygraph.Layer):
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
bce_loss = paddle.nn.loss.BCELoss()
output = bce_loss(input, label)
print(output.numpy()) # [0.65537095]
paddle.enable_static()
"""

Expand Down Expand Up @@ -584,9 +583,9 @@ class NLLLoss(fluid.dygraph.Layer):
place = paddle.CPUPlace()
paddle.disable_static(place)
input = paddle.to_variable(input_np)
input = paddle.to_tensor(input_np)
log_out = log_softmax(input)
label = paddle.to_variable(label_np)
label = paddle.to_tensor(label_np)
result = nll_loss(log_out, label)
print(result.numpy()) # [1.0720209]
Expand Down Expand Up @@ -729,14 +728,12 @@ class MarginRankingLoss(fluid.dygraph.Layer):
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input = paddle.to_variable(np.array([[1, 2], [3, 4]]).astype("float32"))
other = paddle.to_variable(np.array([[2, 1], [2, 4]]).astype("float32"))
label = paddle.to_variable(np.array([[1, -1], [-1, -1]]).astype("float32"))
input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75]
Expand Down

0 comments on commit 1f6df87

Please sign in to comment.