Skip to content

Commit

Permalink
[Caffe2ModelLoader] Load GivenTensorIntFill using int32_t
Browse files Browse the repository at this point in the history
*Description*: GivenTensorIntFill was using `int64_t`, but should be using `int32_t`.
*Testing*: Added a test for all the different supported TensorFills
  • Loading branch information
jfix71 committed Nov 29, 2018
1 parent 79447f3 commit 4247424
Show file tree
Hide file tree
Showing 4 changed files with 141 additions and 10 deletions.
27 changes: 17 additions & 10 deletions lib/Importer/Caffe2ModelLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -713,23 +713,30 @@ void Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) {
auto dim = getShape(dict["shape"]);

size_t i = 0;
#define LOAD_TENSOR_FILL(TYPE_NAME, NATIVE_TYPE, PROTO_TYPE_NAME) \
T->reset(ElemKind::TYPE_NAME, dim); \
auto TH = T->getHandle<NATIVE_TYPE>(); \
for (auto num : dict["values"]->PROTO_TYPE_NAME()) { \
TH.raw(i++) = num; \
}

if (dict["values"]->floats_size()) {
assert(typeName != "GivenTensorIntFill" &&
typeName != "GivenTensorInt64Fill");
T->reset(ElemKind::FloatTy, dim);
auto TH = T->getHandle<>();
for (auto num : dict["values"]->floats()) {
TH.raw(i++) = num;
}
LOAD_TENSOR_FILL(FloatTy, float, floats);
} else if (dict["values"]->ints_size()) {
T->reset(ElemKind::Int64ITy, dim);
auto TH = T->getHandle<int64_t>();
for (auto num : dict["values"]->ints()) {
TH.raw(i++) = num;
if (typeName == "GivenTensorIntFill") {
LOAD_TENSOR_FILL(Int32ITy, int32_t, ints);
} else if (typeName == "GivenTensorInt64Fill" ||
typeName == "GivenTensorFill") {
LOAD_TENSOR_FILL(Int64ITy, int64_t, ints);
} else {
unexpectedNodeError(op, "Unsupported data type for " + typeName);
}
} else {
unexpectedNodeError(op, "Unsupported data type for GivenTensorFill.");
unexpectedNodeError(op, "Unsupported data type for " + typeName);
}
#undef LOAD_TENSOR_FILL

assert(i == T->size() && "The number of serialized values does not "
"match the size of the tensor.");
Expand Down
1 change: 1 addition & 0 deletions tests/models/caffe2Models/empty_predict_net.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
external_output: "unused_output"
65 changes: 65 additions & 0 deletions tests/models/caffe2Models/fill_test_init_net.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
name: "init"
op {
output: "tensor_fill_float"
type: "GivenTensorFill"
arg {
name: "shape"
ints: 2
ints: 2
}
arg {
name: "values"
floats: 0.0
floats: 1.0
floats: 2.0
floats: 3.0
}
}
op {
output: "tensor_fill_int"
type: "GivenTensorFill"
arg {
name: "shape"
ints: 2
ints: 2
}
arg {
name: "values"
ints: 0
ints: 1
ints: 2
ints: 3
}
}
op {
output: "tensor_int_fill"
type: "GivenTensorIntFill"
arg {
name: "shape"
ints: 2
ints: 2
}
arg {
name: "values"
ints: 0
ints: 1
ints: 2
ints: 3
}
}
op {
output: "tensor_int64_fill"
type: "GivenTensorInt64Fill"
arg {
name: "shape"
ints: 2
ints: 2
}
arg {
name: "values"
ints: 0
ints: 1
ints: 2
ints: 3
}
}
58 changes: 58 additions & 0 deletions tests/unittests/caffe2ImporterTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1465,3 +1465,61 @@ TEST(caffe2, lengthsSum) {
// Graph has two inputs and one output.
EXPECT_EQ(mod.getPlaceholders().size(), 3);
}

/// Verify that different fill types are loaded with the correct types.
TEST(caffe2, tensorFillsTest) {
ExecutionEngine EE{BackendKind::Interpreter};
auto &mod = EE.getModule();
Function *F = mod.createFunction("main");

std::string NetDescFilename(
"tests/models/caffe2Models/empty_predict_net.pbtxt");
std::string NetWeightFilename(
"tests/models/caffe2Models/fill_test_init_net.pbtxt");

Constant *tensorFillFloat, *tensorFillInt, *tensorIntFill, *tensorInt64Fill;

// Destroy the loader after the graph is loaded since the following execution
// will not depend on anything from the loader.
{
// Loaded protos must have at least one external output, so load an unused
// output and type to satisfy it. It is named unused_output in
// empty_predict_net.pbtxt.
Type unusedTy = Type(ElemKind::FloatTy, {1});
Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
{"unused_output"}, {&unusedTy}, *F);
tensorFillFloat = llvm::dyn_cast<Constant>(
caffe2LD.getNodeValueOrCreateConstantByName("tensor_fill_float"));
tensorFillInt = llvm::dyn_cast<Constant>(
caffe2LD.getNodeValueOrCreateConstantByName("tensor_fill_int"));
tensorIntFill = llvm::dyn_cast<Constant>(
caffe2LD.getNodeValueOrCreateConstantByName("tensor_int_fill"));
tensorInt64Fill = llvm::dyn_cast<Constant>(
caffe2LD.getNodeValueOrCreateConstantByName("tensor_int64_fill"));
}

ASSERT_TRUE(tensorFillFloat);
ASSERT_TRUE(tensorFillInt);
ASSERT_TRUE(tensorIntFill);
ASSERT_TRUE(tensorInt64Fill);

// All fills in fill_test_init_net.pbtxt use shape {2, 2}.
const std::vector<size_t> expectedDims = {2, 2};
ASSERT_TRUE(tensorFillFloat->dims().equals(expectedDims));
ASSERT_TRUE(tensorFillInt->dims().equals(expectedDims));
ASSERT_TRUE(tensorIntFill->dims().equals(expectedDims));
ASSERT_TRUE(tensorInt64Fill->dims().equals(expectedDims));

auto tensorFillFloatH = tensorFillFloat->getPayload().getHandle<float>();
auto tensorFillIntH = tensorFillInt->getPayload().getHandle<int64_t>();
auto tensorIntFillH = tensorIntFill->getPayload().getHandle<int32_t>();
auto tensorInt64FillH = tensorInt64Fill->getPayload().getHandle<int64_t>();

// All fills in fill_test_init_net.pbtxt are set to 0 through 3.
for (size_t i = 0, e = 4; i < e; i++) {
EXPECT_FLOAT_EQ(tensorFillFloatH.raw(i), (float)i);
EXPECT_EQ(tensorFillIntH.raw(i), (int64_t)i);
EXPECT_EQ(tensorIntFillH.raw(i), (int32_t)i);
EXPECT_EQ(tensorInt64FillH.raw(i), (int64_t)i);
}
}

0 comments on commit 4247424

Please sign in to comment.