Skip to content

Commit

Permalink
Merge commit for internal changes
Browse files Browse the repository at this point in the history
  • Loading branch information
caisq committed Oct 16, 2017
2 parents dc65f63 + 1cf9f7a commit 22d7d0f
Show file tree
Hide file tree
Showing 106 changed files with 4,163 additions and 544 deletions.
16 changes: 0 additions & 16 deletions tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -141,22 +141,6 @@ config_setting(
visibility = ["//visibility:public"],
)

config_setting(
name = "linux_armhf",
values = {
"cpu": "armeabi-v7a",
},
visibility = ["//visibility:public"],
)

config_setting(
name = "linux_arm64",
values = {
"cpu": "arm64-v8a",
},
visibility = ["//visibility:public"],
)

config_setting(
name = "debug",
values = {
Expand Down
11 changes: 11 additions & 0 deletions tensorflow/c/eager/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,14 @@ tf_cc_test(
"//tensorflow/core:test_main",
],
)

cc_library(
name = "tape",
srcs = ["tape.cc"],
hdrs = ["tape.h"],
visibility = ["//tensorflow:internal"],
deps = [
"//tensorflow/core:framework",
"//tensorflow/core:lib",
],
)
102 changes: 102 additions & 0 deletions tensorflow/c/eager/tape.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include "tensorflow/c/eager/tape.h"

namespace tensorflow {
namespace eager {

bool GradientTape::ShouldRecord(gtl::ArraySlice<int64> tensor_ids) {
for (int64 i : tensor_ids) {
if (tensor_tape_.find(i) != tensor_tape_.end()) {
return true;
}
}
return false;
}

void GradientTape::Watch(int64 tensor_id) {
tensor_tape_.emplace(tensor_id, -1);
}

void GradientTape::RecordOperation(
const string& op_type, gtl::ArraySlice<TapeTensor> output_tensors,
gtl::ArraySlice<int64> input_tensor_id, void* backward_function,
const std::function<void()>& backward_function_deleter) {
if (!ShouldRecord(input_tensor_id)) {
backward_function_deleter();
return;
}
std::vector<int64> ids;
ids.reserve(input_tensor_id.size());
for (int64 i : input_tensor_id) {
tensor_usage_[i]++;
ids.push_back(i);
}
const int64 op_id = next_op_id_++;
std::vector<TapeTensor> tensors;
tensors.reserve(output_tensors.size());
for (const TapeTensor& o : output_tensors) {
// Note: the tensor can have already been watched and hence be in the tape,
// so we cannot check that we're inserting it here.
tensor_tape_[o.id] = op_id;
tensor_usage_[o.id] = 1;
tensors.push_back(o);
}
op_tape_[op_id] = OpTapeEntry{op_type, tensors, ids, backward_function,
backward_function_deleter};
}

void GradientTape::DeleteTrace(int64 tensor_id) {
auto it = tensor_usage_.find(tensor_id);
if (it == tensor_usage_.end()) {
return;
}
it->second--;
if (it->second != 0) {
return;
}
tensor_usage_.erase(it);
auto tensor_op_it = tensor_tape_.find(tensor_id);
if (tensor_op_it == tensor_tape_.end()) {
return;
}
const int64 op_id = tensor_op_it->second;
if (op_id == -1) {
// Do not delete watched tensors.
return;
}
tensor_tape_.erase(tensor_op_it);
auto op_it = op_tape_.find(op_id);
CHECK(op_it != op_tape_.end());
for (const auto& output : op_it->second.output_tensor_info) {
if (tensor_usage_.find(output.id) != tensor_usage_.end()) {
// Found a usage for an output, so cannot delete the op.
return;
}
}
for (int64 id : op_it->second.input_tensor_id) {
DeleteTrace(id);
}
op_it->second.backward_function_deleter();
op_tape_.erase(op_it);
}

std::pair<TensorTape, OpTape> GradientTape::Export() {
return {std::move(tensor_tape_), std::move(op_tape_)};
}

} // namespace eager
} // namespace tensorflow
96 changes: 96 additions & 0 deletions tensorflow/c/eager/tape.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EAGER_TAPE_H_
#define TENSORFLOW_C_EAGER_TAPE_H_

// Language-agnostic gradient tape. Does not perform backpropagation, just
// maintains the data structures required to do so.

#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/types.h"

namespace tensorflow {
namespace eager {

// Information about a tensor.
struct TapeTensor {
int64 id; // Expected to be unique in the lifetime of this process.
DataType dtype;
TensorShape shape;
};

// Represents an entry in the tape.
struct OpTapeEntry {
string op_type;
std::vector<TapeTensor> output_tensor_info;
std::vector<int64> input_tensor_id;

// TODO(apassos) consider narrowing down this interface.
void* backward_function;

// Should be called before deleting the backward function. TODO(apassos) use
// unique_ptrs to ensure this happens.
std::function<void()> backward_function_deleter;
};

// Map from tensor_id to internally-defined operation-id of the operation which
// produced this tensor. A value of -1 means that the tensor was directly
// watched and not the result of any operation in the tape.
using TensorTape = std::unordered_map<int64, int64>;

// Map from operation-id to tape entry.
using OpTape = std::unordered_map<int64, OpTapeEntry>;

// Traces the execution of operations, doing eager garbage collection, and
// exporting a full trace so other code can do backpropagation. Not thread-safe.
class GradientTape {
public:
GradientTape() {}

bool ShouldRecord(gtl::ArraySlice<int64> tensor_ids);

void Watch(int64 tensor_id);

void RecordOperation(const string& op_type,
gtl::ArraySlice<TapeTensor> output_tensors,
gtl::ArraySlice<int64> input_tensor_id,
void* backward_function,
const std::function<void()>& backward_function_deleter);

void DeleteTrace(int64 tensor_id);

// Note: it is only valid to call Export once per tape, and after calling
// export the tape is no longer valid (i.e. calls to ShouldRecord, Watch,
// Record, and Delete have undefined behavior).
std::pair<TensorTape, OpTape> Export();

private:
TensorTape tensor_tape_;
OpTape op_tape_;
int64 next_op_id_{0};

// Map from tensor id to number of remaining usages (i.e. how many entries in
// the tape refer to it); to aid in tape garbage collection.
std::unordered_map<int64, int64> tensor_usage_;
};

} // namespace eager
} // namespace tensorflow

#endif // TENSORFLOW_C_EAGER_TAPE_H_
2 changes: 1 addition & 1 deletion tensorflow/cc/saved_model/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ cc_library(
hdrs = ["loader.h"],
deps = [
":constants",
"//tensorflow/core:framework",
] + if_not_mobile([
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/compiler/aot/tfcompile.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,6 @@ def target_llvm_triple():
"//tensorflow:android_arm": "armv7-none-android",
"//tensorflow:android_arm64": "aarch64-none-android",
"//tensorflow:android_x86": "i686-none-android",
"//tensorflow:linux_armhf": "armv7-none-linux-gnueabihf",
"//tensorflow:linux_arm64": "aarch64-none-linux-gnu",
"//tensorflow:linux_ppc64le": "ppc64le-ibm-linux-gnu",
"//tensorflow:darwin": "x86_64-none-darwin",
"//conditions:default": "x86_64-pc-linux",
Expand Down
8 changes: 5 additions & 3 deletions tensorflow/compiler/tf2xla/functionalize_control_flow.cc
Original file line number Diff line number Diff line change
Expand Up @@ -475,9 +475,11 @@ Status FunctionalizeLoop(Graph* graph, Frame* frame,
int dst_input = edge->dst_input();
graph->RemoveEdge(edge);

int src_output =
dst_input == Graph::kControlSlot ? Graph::kControlSlot : i;
graph->AddEdge(while_node, src_output, dst, dst_input);
if (dst_input == Graph::kControlSlot) {
graph->AddControlEdge(while_node, dst);
} else {
graph->AddEdge(while_node, i, dst, dst_input);
}
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/tf2xla/graph_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ Status GraphCompiler::Compile() {
std::vector<Node*> topo_sorted_nodes;
// XLA requires determinism, generate a stable ordering from DFS.
GetReversePostOrder(*graph_, &topo_sorted_nodes,
/*stable_comparator=*/NodeComparatorID());
/*stable_comparator=*/NodeComparatorName());

OpKernelContext::Params params;
PartiallySetupParams(&params);
Expand Down
30 changes: 24 additions & 6 deletions tensorflow/compiler/xla/protobuf_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,21 +51,39 @@ StatusOr<string> ToJson(const tensorflow::protobuf::Message& message) {
return json_output;
}

Status DumpJsonToDirectory(const tensorflow::protobuf::Message& message,
const string& directory, const string& file_name) {
TF_ASSIGN_OR_RETURN(const string json_output, ToJson(message));
namespace {

tensorflow::Env* env = tensorflow::Env::Default();
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(directory));
string safe_file_name = file_name + ".json";
string SanitizeFilename(const string& file_name) {
string safe_file_name = file_name;
for (char& c : safe_file_name) {
if (c == '/' || c == '\\') {
c = '_';
}
}
return safe_file_name;
}

} // namespace

Status DumpJsonToDirectory(const tensorflow::protobuf::Message& message,
const string& directory, const string& file_name) {
TF_ASSIGN_OR_RETURN(const string json_output, ToJson(message));

tensorflow::Env* env = tensorflow::Env::Default();
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(directory));
string safe_file_name = SanitizeFileName(file_name) + ".json";
const string path = tensorflow::io::JoinPath(directory, safe_file_name);
return tensorflow::WriteStringToFile(env, path, json_output);
}

Status DumpProtoToDirectory(const tensorflow::protobuf::Message& message,
const string& directory, const string& file_name) {
tensorflow::Env* env = tensorflow::Env::Default();
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(directory));
string safe_file_name = SanitizeFileName(file_name) + ".pb";
const string path = tensorflow::io::JoinPath(directory, safe_file_name);
return tensorflow::WriteBinaryProto(env, path, message);
}

} // namespace protobuf_util
} // namespace xla
10 changes: 6 additions & 4 deletions tensorflow/compiler/xla/protobuf_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,12 @@ extern bool ProtobufEquals(const tensorflow::protobuf::Message& m1,
// Returns 'message' as a JSON string.
StatusOr<string> ToJson(const tensorflow::protobuf::Message& message);

// Converts 'message' to JSON, and dumps it to the path formed by joining
// 'directory/file_name.json'. The 'directory' is recursively created if it
// doesn't already exist, and the 'file_name' is sanitized by replacing illegal
// characters with underscore '_'.
// Writes the given message in binary proto or JSON format to the path formed by
// joining 'directory/file_name.pb' (or file_name.json). The 'directory' is
// recursively created if it doesn't already exist, and the 'file_name' is
// sanitized by replacing illegal characters with underscore '_'.
Status DumpProtoToDirectory(const tensorflow::protobuf::Message& message,
const string& directory, const string& file_name);
Status DumpJsonToDirectory(const tensorflow::protobuf::Message& message,
const string& directory, const string& file_name);

Expand Down
13 changes: 13 additions & 0 deletions tensorflow/compiler/xla/service/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,7 @@ cc_library(
":hlo_evaluator",
":hlo_execution_profile",
":hlo_module_config",
":hlo_verifier",
":platform_util",
":session_proto",
":transfer_manager",
Expand Down Expand Up @@ -717,6 +718,18 @@ cc_library(
],
)

tf_cc_test(
name = "name_uniquer_test",
srcs = ["name_uniquer_test.cc"],
deps = [
":name_uniquer",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:test",
],
)

cc_library(
name = "liveness_util",
srcs = ["liveness_util.cc"],
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/compiler/xla/service/algebraic_simplifier.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,11 @@ bool ReshapeIsBitcast(
HloComputation* CreateScalarBinaryComputation(HloModule* module,
PrimitiveType primitive_type,
HloOpcode opcode) {
HloComputation::Builder b("scalar computation");
HloComputation::Builder b("scalar_computation");
auto scalar_lhs = b.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "scalar lhs"));
0, ShapeUtil::MakeShape(F32, {}), "scalar_lhs"));
auto scalar_rhs = b.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "scalar rhs"));
1, ShapeUtil::MakeShape(F32, {}), "scalar_rhs"));
auto scalar_op = b.AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(primitive_type, {}),
opcode, scalar_lhs, scalar_rhs));
Expand Down
Loading

0 comments on commit 22d7d0f

Please sign in to comment.