Skip to content

Commit

Permalink
Misc cleanups for style and consistency.
Browse files Browse the repository at this point in the history
Change: 147297316
  • Loading branch information
Joshua V. Dillon authored and tensorflower-gardener committed Feb 13, 2017
1 parent 59fdd2e commit b4d475b
Show file tree
Hide file tree
Showing 40 changed files with 542 additions and 533 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -222,9 +222,10 @@ def testCovarianceFromSampling(self):
dist = ds.DirichletMultinomial(n, alpha)
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[None, ...]
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., None], x_centered[..., None, :]), 0)
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
Expand Down Expand Up @@ -317,7 +318,7 @@ def testCovarianceNAlphaBroadcast(self):
dist = ds.DirichletMultinomial(ns, alpha)
covariance = dist.covariance()
expected_covariance = shared_matrix * (
ns * (ns + alpha_0) / (1 + alpha_0))[..., None]
ns * (ns + alpha_0) / (1 + alpha_0))[..., array_ops.newaxis]

self.assertEqual([4, 3, 3], covariance.get_shape())
self.assertAllClose(expected_covariance, covariance.eval())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -239,17 +239,18 @@ def testCovarianceFromSampling(self):
# via broadcast between alpha, n.
theta = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
theta /= np.sum(theta, 1)[..., None]
theta /= np.sum(theta, 1)[..., array_ops.newaxis]
# Ideally we'd be able to test broadcasting but, the multinomial sampler
# doesn't support different total counts.
n = np.float32(5)
with self.test_session() as sess:
dist = ds.Multinomial(n, theta) # batch_shape=[2], event_shape=[3]
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[None, ...]
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., None], x_centered[..., None, :]), 0)
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def testProbScalarBaseDistributionNonScalarTransform(self):
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.

expected_mst = _FakeVectorStudentT(
df=np.tile(df, len(scale_diag)),
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)

Expand Down Expand Up @@ -207,7 +207,7 @@ def testProbScalarBaseDistributionNonScalarTransformDynamic(self):
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.

expected_mst = _FakeVectorStudentT(
df=np.tile(df, len(scale_diag)),
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)

Expand Down Expand Up @@ -236,8 +236,9 @@ def testProbNonScalarBaseDistributionScalarTransform(self):

expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[None, :], [len(df), 1]),
scale_tril=np.tile(scale_tril[None, :, :], [len(df), 1, 1]))
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))

with self.test_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
Expand All @@ -261,8 +262,9 @@ def testProbNonScalarBaseDistributionScalarTransformDynamic(self):

expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[None, :], [len(df), 1]),
scale_tril=np.tile(scale_tril[None, :, :], [len(df), 1, 1]))
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))

with self.test_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
Expand Down
10 changes: 5 additions & 5 deletions tensorflow/contrib/distributions/python/ops/bernoulli.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,15 @@ def __init__(self,
Bernoulli distribution. Only one of `logits` or `probs` should be passed
in.
dtype: The type of the event samples. Default: `int32`.
validate_args: Python `Boolean`, default `False`. When `True` distribution
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `Boolean`, default `True`. When `True`,
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: `String` name prefixed to Ops created by this class.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If p and logits are passed, or if neither are passed.
Expand Down Expand Up @@ -114,7 +114,7 @@ def _event_shape(self):
return tensor_shape.scalar()

def _sample_n(self, n, seed=None):
new_shape = array_ops.concat(([n], self.batch_shape_tensor()), 0)
new_shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.probs.dtype)
sample = math_ops.less(uniform, self.probs)
Expand Down
10 changes: 5 additions & 5 deletions tensorflow/contrib/distributions/python/ops/beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,15 +139,15 @@ def __init__(self,
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `Boolean`, default `False`. When `True` distribution
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `Boolean`, default `True`. When `True`, statistics
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: `String` name prefixed to Ops created by this class.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration1,
Expand Down Expand Up @@ -267,7 +267,7 @@ def _variance(self):
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
Expand Down
Loading

0 comments on commit b4d475b

Please sign in to comment.