Skip to content

Commit

Permalink
Merge pull request tensorflow#6794 from drpngx/branch_144215355
Browse files Browse the repository at this point in the history
Branch 144215355
  • Loading branch information
drpngx authored Jan 11, 2017
2 parents f25907e + e1d4c55 commit cb17d1b
Show file tree
Hide file tree
Showing 225 changed files with 5,937 additions and 2,192 deletions.
9 changes: 7 additions & 2 deletions models.BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,13 @@ filegroup(
name = "model_files",
srcs = glob(
[
"**/*.pb",
"**/*.txt",
"**/*",
],
exclude = [
"**/BUILD",
"**/WORKSPACE",
"**/LICENSE",
"**/*.zip",
],
),
)
1 change: 1 addition & 0 deletions tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ filegroup(
"//tensorflow/tensorboard/lib/python:all_files",
"//tensorflow/tensorboard/scripts:all_files",
"//tensorflow/tools/common:all_files",
"//tensorflow/tools/compatibility:all_files",
"//tensorflow/tools/dist_test/server:all_files",
"//tensorflow/tools/docker:all_files",
"//tensorflow/tools/docker/notebooks:all_files",
Expand Down
3 changes: 3 additions & 0 deletions tensorflow/c/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1344,6 +1344,7 @@ TF_AttrMetadata TF_OperationGetAttrMetadata(TF_Operation* oper,
metadata.total_size += s.unknown_rank() ? 0 : s.dim_size();
});
LIST_CASE(tensor, TF_ATTR_TENSOR);
LIST_CASE(tensor, TF_ATTR_FUNC);
#undef LIST_CASE
// All lists empty, determine the type from the OpDef.
if (metadata.list_size == 0) {
Expand All @@ -1365,6 +1366,8 @@ TF_AttrMetadata TF_OperationGetAttrMetadata(TF_Operation* oper,
metadata.type = TF_ATTR_SHAPE;
} else if (typestr == "list(tensor)") {
metadata.type = TF_ATTR_TENSOR;
} else if (typestr == "list(func)") {
metadata.type = TF_ATTR_FUNC;
} else {
status->status = InvalidArgument(
"Attribute '", attr_name,
Expand Down
12 changes: 6 additions & 6 deletions tensorflow/compiler/tests/binary_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,17 +159,17 @@ def testNumericOps(self):
expected=np.array([[8], [9]], dtype=dtype))

self._testBinary(
math_ops.sub,
math_ops.subtract,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([-9, -18], dtype=dtype))
self._testBinary(
math_ops.sub,
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.sub,
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
Expand Down Expand Up @@ -207,17 +207,17 @@ def testNumericOps(self):
expected=np.array([[7], [2]], dtype=dtype))

self._testBinary(
math_ops.mul,
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.mul,
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.mul,
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
Expand Down
36 changes: 18 additions & 18 deletions tensorflow/compiler/tests/concat_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def testHStack(self):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
with self.test_scope():
c = array_ops.concat_v2([p1, p2], 0)
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
Expand All @@ -54,7 +54,7 @@ def testVStack(self):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
with self.test_scope():
c = array_ops.concat_v2([p1, p2], 1)
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
Expand All @@ -72,7 +72,7 @@ def testInt32(self):
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
with self.test_scope():
c = array_ops.concat_v2([x1, x2], 0)
c = array_ops.concat([x1, x2], 0)
result = c.eval()
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
Expand Down Expand Up @@ -105,7 +105,7 @@ def _testRandom(self, dtype):
else:
concat_inputs = p
with self.test_scope():
c = array_ops.concat_v2(concat_inputs, concat_dim)
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
Expand Down Expand Up @@ -144,13 +144,13 @@ def _testGradientsSimple(self):
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat_v2(inp_tensors, 1)
c = array_ops.concat(inp_tensors, 1)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat_v2(grad, 1)
concated_grad = array_ops.concat(grad, 1)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)

Expand All @@ -171,13 +171,13 @@ def _testGradientsFirstDim(self):
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat_v2(inp_tensors, 0)
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat_v2(grad, 0)
concated_grad = array_ops.concat(grad, 0)
result = concated_grad.eval()

self.assertAllEqual(result, grad_inp)
Expand All @@ -199,13 +199,13 @@ def _testGradientsLastDim(self):
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat_v2(inp_tensors, 2)
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat_v2(grad, 2)
concated_grad = array_ops.concat(grad, 2)
result = concated_grad.eval()

self.assertAllEqual(result, grad_inp)
Expand Down Expand Up @@ -235,14 +235,14 @@ def _RunAndVerifyGradientsRandom(self):
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat_v2(inp_tensors, concat_dim)
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat_v2(grad, concat_dim)
concated_grad = array_ops.concat(grad, concat_dim)
result = concated_grad.eval()

self.assertAllEqual(result, grad_inp)
Expand All @@ -267,7 +267,7 @@ def DISABLED_testZeroSize(self):
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat_v2(xs, axis)
c = array_ops.concat(xs, axis)
self.assertAllEqual(c.eval(), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
Expand All @@ -284,7 +284,7 @@ def testTensorConcatDim0Grad(self):
with self.test_session():
with self.test_scope():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat_v2(xs, 0)
output = array_ops.concat(xs, 0)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-4)
Expand All @@ -299,7 +299,7 @@ def testTensorConcatDim1Grad(self):
with self.test_session():
with self.test_scope():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat_v2(xs, 1)
output = array_ops.concat(xs, 1)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-4)
Expand All @@ -309,8 +309,8 @@ def testConcatTuple(self):
c2 = np.random.rand(4, 4).astype(np.float32)
with self.test_session():
with self.test_scope():
concat_list_t = array_ops.concat_v2([c1, c2], 0)
concat_tuple_t = array_ops.concat_v2((c1, c2), 0)
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())

def testConcatNoScalars(self):
Expand All @@ -320,7 +320,7 @@ def testConcatNoScalars(self):
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.pack instead\)"):
array_ops.concat_v2([scalar, scalar, scalar], dim)
array_ops.concat([scalar, scalar, scalar], dim)


class ConcatOffsetTest(XLATestCase):
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/tests/lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def LSTMCell(weights, m_prev, c_prev, x, pad):
"""
# Apply weights to the input and previous hidden state.
# The matmul here is the "big" operation.
xm = array_ops.concat_v2([x, m_prev], 1)
xm = array_ops.concat([x, m_prev], 1)
xmw = math_ops.matmul(xm, weights)

# Element-wise ops for the standard LSTM cell, with clipped activations.
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/compiler/tests/nary_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def testFloat(self):

def testConcat(self):
self._testNAry(
lambda x: array_ops.concat_v2(x, 0), [
lambda x: array_ops.concat(x, 0), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
Expand All @@ -67,7 +67,7 @@ def testConcat(self):
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32))

self._testNAry(
lambda x: array_ops.concat_v2(x, 1), [
lambda x: array_ops.concat(x, 1), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/compiler/tests/unary_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def testNumericOps(self):
expected=np.array([[2, 1]], dtype=dtype))

self._testUnary(
math_ops.neg,
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))

Expand Down Expand Up @@ -324,7 +324,7 @@ def testSize(self):

def testUnpack(self):
self._testUnary(
array_ops.unpack,
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
Expand Down
63 changes: 35 additions & 28 deletions tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ IrArray::Index::Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_EQ(shape.dimensions_size(), multidim.size());
CHECK(LayoutUtil::HasLayout(shape));
linear_ = Linearize(shape, ir_builder);
linear_ = Linearize(AsInt64Slice(shape.dimensions()), ir_builder);
}

IrArray::IrArray(llvm::Value* base_ptr, const Shape& shape)
Expand Down Expand Up @@ -109,35 +109,41 @@ IrArray::Index IrArray::Index::SourceIndexOfReshape(
llvm::IRBuilder<>* builder) const {
const auto& target_index = *this;
CHECK_EQ(target_index.size(), ShapeUtil::Rank(output_shape));
llvm::Value* logical_linear_index = Linearize(output_shape, builder);
// Delinearizes logical_linear_index for the source array in row-major
// collapsed order. The first rank-1 indices are the remainder of the
// linear index by each dimension size.
std::vector<std::pair<int64, int64>> unmodified_dims =
ShapeUtil::DimensionsUnmodifiedByReshape(input_shape, output_shape);
std::vector<llvm::Value*> source_multidim_index(ShapeUtil::Rank(input_shape));
for (int64 i = ShapeUtil::Rank(input_shape) - 1; i >= 0; --i) {
auto divisor = builder->getInt64(input_shape.dimensions(i));
if (input_shape.dimensions(i) <= 1) {
source_multidim_index[i] = builder->getInt64(0);
} else {
// Search unmodified_dims for a pair whose first element is exactly "i".
//
// Because unmodified_dims are sorted by both "first" and "second", and
// "i" is monotonically decreasing, we avoid redundant searching by
// popping the back of unmodified_dims until the rear pair's first element
// <= i. If we stop precisely at "i", we find a match.
while (!unmodified_dims.empty() && unmodified_dims.back().first > i) {
unmodified_dims.pop_back();
}
if (!unmodified_dims.empty() && unmodified_dims.back().first == i) {
source_multidim_index[i] = target_index[unmodified_dims.back().second];
std::vector<std::pair<int64, int64>> common_factors =
CommonFactors(AsInt64Slice(input_shape.dimensions()),
AsInt64Slice(output_shape.dimensions()));
std::vector<llvm::Value*> source_multidim_index(
ShapeUtil::Rank(input_shape),
llvm::UndefValue::get(builder->getInt64Ty()));
// We compute the source indices in each common factor from only the target
// indices in the same common factor.
for (ssize_t k = common_factors.size() - 2; k >= 0; --k) {
llvm::Value* logical_linear_index =
Index(tensorflow::gtl::ArraySlice<llvm::Value*>(
multidim_, common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second))
.Linearize(
tensorflow::gtl::ArraySlice<int64>(
AsInt64Slice(output_shape.dimensions()),
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second),
builder);
// Delinearizes logical_linear_index for the source array in row-major
// collapsed order. The first rank-1 indices are the remainder of the
// linear index by each dimension size.
for (int64 i = common_factors[k + 1].first - 1;
i >= common_factors[k].first; --i) {
llvm::Value* divisor = builder->getInt64(input_shape.dimensions(i));
if (input_shape.dimensions(i) == 1) {
source_multidim_index[i] = builder->getInt64(0);
} else if (i == common_factors[k].first) {
source_multidim_index[i] = logical_linear_index;
} else {
source_multidim_index[i] =
builder->CreateURem(logical_linear_index, divisor);
}
logical_linear_index = builder->CreateUDiv(logical_linear_index, divisor);
}
logical_linear_index = builder->CreateUDiv(logical_linear_index, divisor);
}

if (linear() != nullptr &&
Expand All @@ -160,8 +166,9 @@ IrArray::Index IrArray::Index::SourceIndexOfTranspose(
return Index(operand_multidim_index);
}

llvm::Value* IrArray::Index::Linearize(const Shape& shape,
llvm::IRBuilder<>* builder) const {
llvm::Value* IrArray::Index::Linearize(
tensorflow::gtl::ArraySlice<int64> dimensions,
llvm::IRBuilder<>* builder) const {
// Each dimension is multiplied by the product of the sizes of all
// earlier dimensions and added to the accumulator logical_linear_index.
llvm::Value* logical_linear_index = builder->getInt64(0);
Expand All @@ -172,7 +179,7 @@ llvm::Value* IrArray::Index::Linearize(const Shape& shape,
/*HasNUW=*/true, /*HasNSW=*/true);
logical_linear_index = builder->CreateAdd(logical_linear_index, addend, "",
/*HasNUW=*/true, /*HasNSW=*/true);
multiplier *= shape.dimensions(i);
multiplier *= dimensions[i];
}
return logical_linear_index;
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/xla/service/llvm_ir/ir_array.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ class IrArray {

// Linearizes the index into the given shape, i.e. reshapes it to rank-1 and
// returns the index into the sole dimension 0 of the new shape.
llvm::Value* Linearize(const Shape& shape,
llvm::Value* Linearize(tensorflow::gtl::ArraySlice<int64> dimensions,
llvm::IRBuilder<>* builder) const;

private:
Expand Down
Loading

0 comments on commit cb17d1b

Please sign in to comment.