Skip to content

Commit

Permalink
修改COPY-FROM No. 19 distribution (PaddlePaddle#5965)
Browse files Browse the repository at this point in the history
  • Loading branch information
enkilee authored Jul 5, 2023
1 parent c6e9c7e commit 2e14503
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 249 deletions.
134 changes: 8 additions & 126 deletions docs/api/paddle/distribution/Bernoulli_cn.rst
Original file line number Diff line number Diff line change
Expand Up @@ -27,25 +27,7 @@ Bernoulli
代码示例
::::::::::::

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
# init `probs` with a float
rv = Bernoulli(probs=0.3)
print(rv.mean)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.30000001)
print(rv.variance)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.21000001)
print(rv.entropy())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.61086434)
COPY-FROM: paddle.distribution.Bernoulli

属性
:::::::::
Expand Down Expand Up @@ -86,26 +68,7 @@ Tensor,样本,其维度为 :math:`\text{sample shape} + \text{batch shape} +

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
rv = Bernoulli(paddle.full((), 0.3))
print(rv.sample([100]).shape)
# [100]
rv = Bernoulli(paddle.to_tensor(0.3))
print(rv.sample([100]).shape)
# [100]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.sample([100]).shape)
# [100, 2]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.sample([100, 2]).shape)
# [100, 2, 2]
COPY-FROM: paddle.distribution.Bernoulli.sample

rsample(shape, temperature=1.0)
'''''''''
Expand All @@ -132,46 +95,7 @@ Tensor,样本,其维度为 :math:`\text{sample shape} + \text{batch shape} +

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
paddle.seed(2023)
rv = Bernoulli(paddle.full((), 0.3))
print(rv.sample([100]).shape)
# [100]
rv = Bernoulli(0.3)
print(rv.rsample([100]).shape)
# [100]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.rsample([100]).shape)
# [100, 2]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.rsample([100, 2]).shape)
# [100, 2, 2]
# `rsample` has to be followed by a `sigmoid`
rv = Bernoulli(0.3)
rsample = rv.rsample([3])
rsample_sigmoid = paddle.nn.functional.sigmoid(rsample)
print(rsample, rsample_sigmoid)
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-2.37732768, -0.61203325, -3.18344760]) Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.08491799, 0.35159552, 0.03979339])
# The smaller the `temperature`, the distribution of `rsample` closer to `sample`, with `probs` of 0.3.
print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=1.0)).sum())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 361.06829834)
print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=0.1)).sum())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 288.66418457)
COPY-FROM: paddle.distribution.Bernoulli.rsample

cdf(value)
'''''''''
Expand All @@ -197,15 +121,7 @@ Tensor, ``value`` 的累积分布函数。

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.cdf(paddle.to_tensor([1.0])))
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1.])
COPY-FROM: paddle.distribution.Bernoulli.cdf

log_prob(value)
'''''''''
Expand All @@ -222,15 +138,7 @@ Tensor, ``value`` 的对数概率密度函数。

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.log_prob(paddle.to_tensor([1.0])))
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [-1.20397282])
COPY-FROM: paddle.distribution.Bernoulli.log_prob

prob(value)
'''''''''
Expand All @@ -255,15 +163,7 @@ Tensor, ``value`` 的概率密度函数。

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.prob(paddle.to_tensor([1.0])))
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [0.29999998])
COPY-FROM: paddle.distribution.Bernoulli.prob

entropy()
'''''''''
Expand All @@ -282,15 +182,7 @@ Tensor,伯努利分布的信息熵。

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.entropy())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.61086434)
COPY-FROM: paddle.distribution.Bernoulli.entropy

kl_divergence(other)
'''''''''
Expand All @@ -313,14 +205,4 @@ Tensor,两个伯努利分布之间的 KL 散度。

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
rv_other = Bernoulli(0.7)
print(rv.kl_divergence(rv_other))
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.33891910)
COPY-FROM: paddle.distribution.Bernoulli.kl_divergence
129 changes: 6 additions & 123 deletions docs/api/paddle/distribution/Categorical_cn.rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,43 +30,7 @@ Categorical
代码示例
::::::::::::

.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y)
# [0.77663314 0.90824795 0.15685187
# 0.04279523 0.34468332 0.7955718 ]
cat = Categorical(x)
cat2 = Categorical(y)
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
cat.entropy()
# 1.77528
cat.kl_divergence(cat2)
# [0.071952]
value = paddle.to_tensor([2,1,3])
cat.probs(value)
# [0.00608027 0.108298 0.269656]
cat.log_prob(value)
# [-5.10271 -2.22287 -1.31061]
COPY-FROM: paddle.distribution.Categorical


方法
Expand All @@ -87,23 +51,7 @@ sample(shape)

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
cat = Categorical(x)
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
COPY-FROM: paddle.distribution.Categorical.sample

kl_divergence(other)
'''''''''
Expand All @@ -120,28 +68,7 @@ kl_divergence(other)

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y)
# [0.77663314 0.90824795 0.15685187
# 0.04279523 0.34468332 0.7955718 ]
cat = Categorical(x)
cat2 = Categorical(y)
cat.kl_divergence(cat2)
# [0.071952]
COPY-FROM: paddle.distribution.Categorical.kl_divergence

entropy()
'''''''''
Expand All @@ -154,21 +81,7 @@ entropy()

**代码示例**

.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
cat = Categorical(x)
cat.entropy()
# 1.77528
COPY-FROM: paddle.distribution.Categorical.entropy

probs(value)
'''''''''
Expand All @@ -186,22 +99,7 @@ probs(value)

给定类别下标的概率。

.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
cat = Categorical(x)
value = paddle.to_tensor([2,1,3])
cat.probs(value)
# [0.00608027 0.108298 0.269656]
COPY-FROM: paddle.distribution.Categorical.probs

log_prob(value)
'''''''''
Expand All @@ -216,19 +114,4 @@ log_prob(value)

对数概率。

.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
cat = Categorical(x)
value = paddle.to_tensor([2,1,3])
cat.log_prob(value)
# [-5.10271 -2.22287 -1.31061]
COPY-FROM: paddle.distribution.Categorical.log_prob

0 comments on commit 2e14503

Please sign in to comment.