Skip to content

Commit

Permalink
[OP] Topk and arange + Update submodules (apache#4565)
Browse files Browse the repository at this point in the history
* initial add topk arange

fix arange

Put back ones and zeros

fix

fix default ctx

* fix lint

* Update Submodules

* Update MShadow

* fix warning

* fix pylint

* style fix
  • Loading branch information
sxjscience authored Jan 7, 2017
1 parent 3a820bf commit 9f9c135
Show file tree
Hide file tree
Showing 20 changed files with 1,113 additions and 206 deletions.
2 changes: 1 addition & 1 deletion dmlc-core
2 changes: 1 addition & 1 deletion mshadow
49 changes: 43 additions & 6 deletions python/mxnet/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -1049,9 +1049,11 @@ def zeros(shape, ctx=None, dtype=mx_real_t):
out: Array
The created NDArray.
"""
arr = empty(shape, ctx, dtype)
arr[:] = 0.0
return arr
if ctx is None:
ctx = Context.default_ctx
# pylint: disable= no-member, protected-access
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype)
# pylint: enable= no-member, protected-access

def ones(shape, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with 1, with specified shape.
Expand All @@ -1068,9 +1070,11 @@ def ones(shape, ctx=None, dtype=mx_real_t):
out: Array
The created NDArray.
"""
arr = empty(shape, ctx, dtype)
arr[:] = 1.0
return arr
if ctx is None:
ctx = Context.default_ctx
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype)
# pylint: enable= no-member, protected-access

def full(shape, val, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with given value, with specified shape.
Expand Down Expand Up @@ -1174,6 +1178,39 @@ def concatenate(arrays, axis=0, always_copy=True):

return ret

# pylint: disable= no-member, protected-access, too-many-arguments
def arange(start, stop=None, step=1.0, repeat=1, ctx=None, dtype=mx_real_t):
"""Simlar function in the MXNet ndarray as numpy.arange
See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default start value is 0.
stop : number, optional
End of interval. The interval does not include this value.
step : number, optional
Spacing between values
repeat : number, optional
"The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
ctx : Context, optional
The context of the NDArray, default to current default context.
dtype : type, optional
The value type of the NDArray, default to np.float32
Returns
-------
out : NDArray
The created NDArray
"""
if ctx is None:
ctx = Context.default_ctx
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
dtype=dtype, ctx=str(ctx))
# pylint: enable= no-member, protected-access, too-many-arguments


def load(fname):
"""Load ndarray from binary file.
Expand Down
65 changes: 65 additions & 0 deletions python/mxnet/symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -1186,3 +1186,68 @@ def hypot(left, right):
return _numpy.hypot(left, right)
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))


def zeros(shape, dtype=_numpy.float32):
"""Create a Tensor filled with zeros, similar to numpy.zeros
See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : type, optional
The value type of the NDArray, default to np.float32
Returns
-------
out : Symbol
The created Symbol
"""
return _internal._zeros(shape=shape, dtype=dtype)


def ones(shape, dtype=_numpy.float32):
"""Create a Tensor filled with ones, similar to numpy.ones
See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : type, optional
The value type of the NDArray, default to np.float32
Returns
-------
out : Symbol
The created Symbol
"""
return _internal._ones(shape=shape, dtype=dtype)


def arange(start, stop=None, step=1.0, repeat=1, name=None, dtype=_numpy.float32):
"""Simlar function in the MXNet ndarray as numpy.arange
See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html.
Parameters
----------
start : number
Start of interval. The interval includes this value. The default start value is 0.
stop : number, optional
End of interval. The interval does not include this value.
step : number, optional
Spacing between values
repeat : int, optional
"The repeating time of all elements.
E.g repeat=3, the element a will be repeated three times --> a, a, a.
dtype : type, optional
The value type of the NDArray, default to np.float32
Returns
-------
out : Symbol
The created Symbol
"""
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
name=name, dtype=dtype)
129 changes: 0 additions & 129 deletions src/operator/block_grad-inl.h

This file was deleted.

35 changes: 0 additions & 35 deletions src/operator/block_grad.cc

This file was deleted.

22 changes: 0 additions & 22 deletions src/operator/block_grad.cu

This file was deleted.

12 changes: 10 additions & 2 deletions src/operator/tensor/broadcast_reduce_op_index.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,19 @@ namespace mxnet {
namespace op {
MXNET_OPERATOR_REGISTER_REDUCE_AXIS(argmax)
.MXNET_DESCRIBE("Compute argmax")
.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::maximum>);
.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::maximum>)
.set_attr<nnvm::FGradient>("FGradient",
[](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
return MakeGradNode("_zeros", n, {}, {});
});

MXNET_OPERATOR_REGISTER_REDUCE_AXIS(argmin)
.MXNET_DESCRIBE("Compute argmin")
.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::minimum>);
.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::minimum>)
.set_attr<nnvm::FGradient>("FGradient",
[](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
return MakeGradNode("_zeros", n, {}, {});
});

// Legacy support
NNVM_REGISTER_OP(argmax_channel)
Expand Down
9 changes: 9 additions & 0 deletions src/operator/tensor/elemwise_unary_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,15 @@ NNVM_REGISTER_OP(_backward_copy)
})
.set_attr<FCompute>("FCompute<cpu>", IdentityCompute<cpu>);

MXNET_OPERATOR_REGISTER_UNARY(BlockGrad)
.MXNET_DESCRIBE("Get output from a symbol and pass 0 gradient back")
.set_attr<FCompute>("FCompute<cpu>", IdentityCompute<cpu>)
.set_attr<nnvm::FGradient>("FGradient",
[](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
// pass back zero gradient
return MakeGradNode("_zeros", n, {}, {});
});

// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_num_inputs(2)
Expand Down
3 changes: 3 additions & 0 deletions src/operator/tensor/elemwise_unary_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ NNVM_REGISTER_OP(_copy)
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", IdentityCompute<gpu>);

NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", IdentityCompute<gpu>);

// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", IdentityCompute<gpu>);
Expand Down
Loading

0 comments on commit 9f9c135

Please sign in to comment.