Skip to content

Commit

Permalink
[cleanup] Replace raw pointers with unique ptr.
Browse files Browse the repository at this point in the history
  • Loading branch information
Roman Dzhabarov authored and rdzhabarov committed Dec 6, 2018
1 parent 2b73e3b commit 018a598
Show file tree
Hide file tree
Showing 7 changed files with 83 additions and 89 deletions.
7 changes: 6 additions & 1 deletion include/glow/Importer/CommonOperatorLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ namespace glow {
/// from ProtobufLoader class, therefore modifying the class instance itself.
template <typename OpType, typename AttrType>
class CommonOperatorLoader : public ProtobufLoader {
public:
CommonOperatorLoader(llvm::ArrayRef<const char *> names,
llvm::ArrayRef<TypeRef> types, Function &F)
: ProtobufLoader(names, types, F) {}

protected:
using ArgumentDictionaryTy =
std::unordered_map<std::string, const AttrType *>;
Expand Down Expand Up @@ -95,7 +100,7 @@ class CommonOperatorLoader : public ProtobufLoader {
// This is statically known data, and so we create a Tensor for it and
// register it in tensors_.
auto *T = new Tensor(ElemKind::Int64ITy, {in.dims().size()});
tensors_[opName] = T;
tensors_[opName].reset(T);
T->template getHandle<int64_t>() =
std::vector<int64_t>(in.dims().begin(), in.dims().end());

Expand Down
7 changes: 5 additions & 2 deletions include/glow/Importer/ProtobufLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

#include <google/protobuf/text_format.h>

#include <memory>
#include <string>
#include <vector>

Expand Down Expand Up @@ -92,7 +93,7 @@ class ProtobufLoader {
/// Saves network nodes by name.
llvm::StringMap<NodeValue> nodeValueByName_;
/// A list of weight tensors indexed by name.
llvm::StringMap<Tensor *> tensors_;
llvm::StringMap<std::unique_ptr<Tensor>> tensors_;
/// A map from names of the external outputs of the network to Variables.
llvm::StringMap<Placeholder *> outputVarsByName_;

Expand Down Expand Up @@ -137,7 +138,9 @@ class ProtobufLoader {
llvm::ArrayRef<TypeRef> types, Function &F,
llvm::Error *errPtr = nullptr);

virtual ~ProtobufLoader();
ProtobufLoader(const ProtobufLoader &other) = delete;
ProtobufLoader &operator=(const ProtobufLoader &) = delete;
virtual ~ProtobufLoader() = default;

/// \returns the single final output of the network. The function assumes that
/// there is only one output, returns Error otherwise. For image
Expand Down
118 changes: 60 additions & 58 deletions lib/Importer/Caffe2ModelLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -845,46 +845,47 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) {
floats: -0.028315347
*/

auto *T = new Tensor();
for (auto &o : op.output()) {
tensors_[o] = T;
}
auto *T = new Tensor();
tensors_[o].reset(T);

auto dim = getShape(dict["shape"]);
auto dim = getShape(dict["shape"]);

size_t i = 0;
size_t i = 0;
#define LOAD_TENSOR_FILL(TYPE_NAME, NATIVE_TYPE, PROTO_TYPE_NAME) \
T->reset(ElemKind::TYPE_NAME, dim); \
auto TH = T->getHandle<NATIVE_TYPE>(); \
for (auto num : dict["values"]->PROTO_TYPE_NAME()) { \
TH.raw(i++) = num; \
}

if (dict["values"]->floats_size()) {
RETURN_ERR_IF_NOT(
typeName != "GivenTensorIntFill" &&
typeName != "GivenTensorInt64Fill",
"Typename must not be GivenTensorIntFill or GivenTensorInt64Fill");
LOAD_TENSOR_FILL(FloatTy, float, floats);
} else if (dict["values"]->ints_size()) {
if (typeName == "GivenTensorIntFill") {
LOAD_TENSOR_FILL(Int32ITy, int32_t, ints);
} else if (typeName == "GivenTensorInt64Fill" ||
typeName == "GivenTensorFill") {
LOAD_TENSOR_FILL(Int64ITy, int64_t, ints);
if (dict["values"]->floats_size()) {
RETURN_ERR_IF_NOT(
typeName != "GivenTensorIntFill" &&
typeName != "GivenTensorInt64Fill",
"Typename must not be GivenTensorIntFill or GivenTensorInt64Fill");
LOAD_TENSOR_FILL(FloatTy, float, floats);
} else if (dict["values"]->ints_size()) {
if (typeName == "GivenTensorIntFill") {
LOAD_TENSOR_FILL(Int32ITy, int32_t, ints);
} else if (typeName == "GivenTensorInt64Fill" ||
typeName == "GivenTensorFill") {
LOAD_TENSOR_FILL(Int64ITy, int64_t, ints);
} else {
RETURN_ERR(unexpectedNodeErrorMessage(
op, "Unsupported data type for " + typeName));
}
} else {
RETURN_ERR(unexpectedNodeErrorMessage(op, "Unsupported data type for " +
typeName));
}
} else {
RETURN_ERR(unexpectedNodeErrorMessage(op, "Unsupported data type for " +
typeName));
}
#undef LOAD_TENSOR_FILL

RETURN_ERR_IF_NOT(i == T->size(),
"The number of serialized values does not "
"match the size of the tensor.");
RETURN_ERR_IF_NOT(i == T->size(),
"The number of serialized values does not "
"match the size of the tensor.");
}

return llvm::Error::success();
}

Expand Down Expand Up @@ -915,47 +916,48 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) {
i: 127
}
*/
auto *T = new Tensor();
for (auto &o : op.output()) {
auto *T = new Tensor();
if (tensors_.count(o)) {
continue;
}
tensors_[o] = T;
}
tensors_[o].reset(T);

auto dim = getShape(dict["shape"]);
auto dim = getShape(dict["shape"]);

RETURN_ERR_IF_NOT(dict.count("Y_zero_point"),
"missing zero point for quantized output type");
RETURN_ERR_IF_NOT(dict.count("Y_scale"),
"missing Y_scale for quantized output type");
RETURN_ERR_IF_NOT(dict.count("Y_zero_point"),
"missing zero point for quantized output type");
RETURN_ERR_IF_NOT(dict.count("Y_scale"),
"missing Y_scale for quantized output type");

float scale;
ASSIGN_VALUE_OR_RETURN_ERR(scale, loadFloat(dict["Y_scale"]));
int32_t offset;
ASSIGN_VALUE_OR_RETURN_ERR(offset, loadInt(dict["Y_zero_point"]));
size_t i = 0;
if (typeName == "Int8GivenTensorFill") {
// Although in Caffe2 quantized model, the weights is int8 quantized,
// the weights is stored in uint8_t format due to that Caffe2 requires the
// type of input and weights must be the same. Therefore, we need to
// convert it to int8 by subtracting 128.
T->reset(ElemKind::Int8QTy, dim, scale, offset - OFFSETSHIFT);
auto TH = T->getHandle<int8_t>();
std::string str = dict["values"]->s();
for (; i < str.size(); i++) {
TH.raw(i) = ((uint8_t)(str.c_str()[i]) - OFFSETSHIFT);
}
} else {
T->reset(ElemKind::Int32QTy, dim, scale, offset);
auto TH = T->getHandle<int32_t>();
for (auto num : dict["values"]->ints()) {
TH.raw(i++) = num;
float scale;
ASSIGN_VALUE_OR_RETURN_ERR(scale, loadFloat(dict["Y_scale"]));
int32_t offset;
ASSIGN_VALUE_OR_RETURN_ERR(offset, loadInt(dict["Y_zero_point"]));
size_t i = 0;
if (typeName == "Int8GivenTensorFill") {
// Although in Caffe2 quantized model, the weights is int8 quantized,
// the weights is stored in uint8_t format due to that Caffe2 requires
// the type of input and weights must be the same. Therefore, we need to
// convert it to int8 by subtracting 128.
T->reset(ElemKind::Int8QTy, dim, scale, offset - OFFSETSHIFT);
auto TH = T->getHandle<int8_t>();
std::string str = dict["values"]->s();
for (; i < str.size(); i++) {
TH.raw(i) = ((uint8_t)(str.c_str()[i]) - OFFSETSHIFT);
}
} else {
T->reset(ElemKind::Int32QTy, dim, scale, offset);
auto TH = T->getHandle<int32_t>();
for (auto num : dict["values"]->ints()) {
TH.raw(i++) = num;
}
}
RETURN_ERR_IF_NOT(i == T->size(),
"The number of serialized values does not "
"match the size of the tensor.");
}
RETURN_ERR_IF_NOT(i == T->size(),
"The number of serialized values does not "
"match the size of the tensor.");

return llvm::Error::success();
}

Expand All @@ -979,7 +981,7 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) {
}

auto *T = new Tensor();
tensors_[name] = T;
tensors_[name].reset(T);

// The shape is set either the shape argument, or from another input
// tensor. Shape takes priority over input.
Expand Down Expand Up @@ -1052,7 +1054,7 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) {
*/
const auto &name = op.output(0);
auto *T = new Tensor();
tensors_[name] = T;
tensors_[name].reset(T);
auto dim = getShape(dict["shape"]);
T->reset(ElemKind::FloatTy, dim);
auto TH = T->getHandle<>();
Expand Down
11 changes: 3 additions & 8 deletions lib/Importer/ONNXIFIModelLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,14 +116,9 @@ static llvm::Error loadWeight(const onnxTensorDescriptorV1 &in, Tensor *T) {
llvm::Error ONNXIFIModelLoader::loadWeights(
uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors) {
for (uint32_t i = 0; i < weightsCount; ++i) {
Tensor *T = new Tensor();

if (auto err = loadWeight(weightDescriptors[i], T)) {
delete T;
return err;
}

tensors_[weightDescriptors[i].name] = T;
auto *T = new Tensor();
tensors_[weightDescriptors[i].name].reset(T);
RETURN_IF_ERR(loadWeight(weightDescriptors[i], T));
}

return llvm::Error::success();
Expand Down
15 changes: 5 additions & 10 deletions lib/Importer/ONNXModelLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,11 +254,9 @@ llvm::Error ONNXModelLoader::loadConstant(const ONNX_NAMESPACE::NodeProto &op,
"Only Tensor type constants are supported.");

auto *T = new Tensor();
if (auto err = loadTensor(dict.at("value")->t(), T)) {
delete T;
return err;
}
tensors_[name] = T;
tensors_[name].reset(T);
RETURN_IF_ERR(loadTensor(dict.at("value")->t(), T));

return llvm::Error::success();
}

Expand Down Expand Up @@ -826,11 +824,8 @@ llvm::Error ONNXModelLoader::loadInitializers(ONNX_NAMESPACE::GraphProto &net) {
// Load the network initializaers:
for (const auto &in : net.initializer()) {
Tensor *T = new Tensor();
if (auto err = loadTensor(in, T)) {
delete T;
return err;
}
tensors_[in.name()] = T;
tensors_[in.name()].reset(T);
RETURN_IF_ERR(loadTensor(in, T));
}
return llvm::Error::success();
}
Expand Down
8 changes: 1 addition & 7 deletions lib/Importer/ProtobufLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ bool isArrayConstant(llvm::ArrayRef<size_t> a) {
llvm::Expected<Tensor *> ProtobufLoader::getTensorByName(llvm::StringRef name) {
RETURN_ERR_IF_NOT(tensors_.count(name),
"There is no tensor registered with this name.");
return tensors_[name];
return tensors_[name].get();
}

llvm::Expected<Placeholder *>
Expand Down Expand Up @@ -136,10 +136,4 @@ ProtobufLoader::ProtobufLoader(llvm::ArrayRef<const char *> tensorNames,
}
}

ProtobufLoader::~ProtobufLoader() {
for (auto &it : tensors_) {
delete it.second;
}
}

}; // namespace glow
6 changes: 3 additions & 3 deletions tools/loader/TextTranslator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,9 +322,9 @@ int main(int argc, char **argv) {
&encoderInputs.getType(), &attnWeights.getType(),
&prevHyposIndices.getType(), &prevScores.getType(), &prevToken.getType()};

auto LD = Caffe2ModelLoader(loader.getCaffe2NetDescFilename(),
loader.getCaffe2NetWeightFilename(), inputNames,
inputTensors, *loader.getFunction());
Caffe2ModelLoader LD(loader.getCaffe2NetDescFilename(),
loader.getCaffe2NetWeightFilename(), inputNames,
inputTensors, *loader.getFunction());

// Allocate tensors to back all inputs and outputs.
ctx.allocate(loader.getModule()->getPlaceholders());
Expand Down

0 comments on commit 018a598

Please sign in to comment.