Skip to content

Commit

Permalink
Change API to support specifying "single", "optional" and "variadic" …
Browse files Browse the repository at this point in the history
…input (onnx#321)

* add formal parameter option to distinguish input types: single, optional, variadic.

* change python apis accordingly.

* fix gen doc issue.

* doc update

* fix comments.

* update operator document.

* remove optional comments which is not needed.
  • Loading branch information
linkerzhang authored Nov 30, 2017
1 parent ace128f commit fd54930
Show file tree
Hide file tree
Showing 12 changed files with 96 additions and 52 deletions.
24 changes: 12 additions & 12 deletions docs/Operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ expect(node, inputs=[x, y], outputs=[x + y],
#### Inputs (1 - ∞)

<dl>
<dt><tt>inputs</tt> : T</dt>
<dt><tt>inputs</tt> (variadic) : T</dt>
<dd>List of tensors for concatenation</dd>
</dl>

Expand Down Expand Up @@ -699,7 +699,7 @@ expect(node, inputs=[], outputs=[values],
<dd>Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image.Otherwise the size is (N x D1 x D2 ... x Dn)</dd>
<dt><tt>W</tt> : T</dt>
<dd>The weight tensor that will be used in the convolutions; has size (M x C x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C x k1 x k2 x ... x kn), where is the dimension of the kernel</dd>
<dt><tt>B</tt> : T</dt>
<dt><tt>B</tt> (optional) : T</dt>
<dd>Optional 1D bias to be added to the convolution, has size of M.</dd>
</dl>

Expand Down Expand Up @@ -749,7 +749,7 @@ expect(node, inputs=[], outputs=[values],
<dd>Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image.Otherwise the size is (N x D1 x D2 ... x Dn)</dd>
<dt><tt>W</tt> : T</dt>
<dd>The weight tensor that will be used in the convolutions; has size (C x M x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (C x M x k1 x k2 x ... x kn), where is the dimension of the kernel</dd>
<dt><tt>B</tt> : T</dt>
<dt><tt>B</tt> (optional) : T</dt>
<dd>Optional 1D bias to be added to the convolution, has size of C.</dd>
</dl>

Expand Down Expand Up @@ -2084,8 +2084,8 @@ expect(node, inputs=[a, b], outputs=[c],
#### Inputs (1 - &#8734;)

<dl>
<dt><tt>data_0</tt> : T</dt>
<dd>First of the input tensors.</dd>
<dt><tt>data_0</tt> (variadic) : T</dt>
<dd>List of tensors for Max.</dd>
</dl>

#### Outputs
Expand Down Expand Up @@ -2193,8 +2193,8 @@ expect(node, inputs=[a, b], outputs=[c],
#### Inputs (1 - &#8734;)

<dl>
<dt><tt>data_0</tt> : T</dt>
<dd>First of the input tensors.</dd>
<dt><tt>data_0</tt> (variadic) : T</dt>
<dd>List of tensors for Mean.</dd>
</dl>

#### Outputs
Expand All @@ -2220,8 +2220,8 @@ expect(node, inputs=[a, b], outputs=[c],
#### Inputs (1 - &#8734;)

<dl>
<dt><tt>data_0</tt> : T</dt>
<dd>First of the input tensors.</dd>
<dt><tt>data_0</tt> (variadic) : T</dt>
<dd>List of tensors for Min</dd>
</dl>

#### Outputs
Expand Down Expand Up @@ -3882,8 +3882,8 @@ expect(node, inputs=[x], outputs=[y],
#### Inputs (1 - &#8734;)

<dl>
<dt><tt>data_0</tt> : T</dt>
<dd>First of the input tensors.</dd>
<dt><tt>data_0</tt> (variadic) : T</dt>
<dd>List of tensors for Sum.</dd>
</dl>

#### Outputs
Expand Down Expand Up @@ -4340,7 +4340,7 @@ expect(node, inputs=[x], outputs=[y],
#### Inputs (0 - 1)

<dl>
<dt><tt>shape</tt> : T</dt>
<dt><tt>shape</tt> (optional) : T</dt>
<dd>The shape of filled tensor</dd>
</dl>

Expand Down
7 changes: 6 additions & 1 deletion onnx/cpp2py_export.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,19 @@ PYBIND11_MODULE(onnx_cpp2py_export, onnx_cpp2py_export) {
"allowed_type_strs",
&OpSchema::TypeConstraintParam::allowed_type_strs);

py::enum_<OpSchema::FormalParameterOption>(op_schema, "FormalParameterOption")
.value("Single", OpSchema::Single)
.value("Optional", OpSchema::Optional)
.value("Variadic", OpSchema::Variadic);

py::class_<OpSchema::FormalParameter>(op_schema, "FormalParameter")
.def_property_readonly("name", &OpSchema::FormalParameter::GetName)
.def_property_readonly("types", &OpSchema::FormalParameter::GetTypes)
.def_property_readonly("typeStr", &OpSchema::FormalParameter::GetTypeStr)
.def_property_readonly(
"description", &OpSchema::FormalParameter::GetDescription)
.def_property_readonly(
"optional", &OpSchema::FormalParameter::IsOptional);
"option", &OpSchema::FormalParameter::GetOption);

py::enum_<OpSchema::AttrType>(op_schema, "AttrType")
.value("FLOAT", OpSchema::AttrType::FLOAT)
Expand Down
4 changes: 2 additions & 2 deletions onnx/defs/experiments/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ NOTE: Currently, it supports data type of float, int32, int64, and bool.
"input",
"Input tensor (optional) to provide shape information.",
"T1",
true)
OpSchema::Optional)
.Output(
0,
"output",
Expand All @@ -166,7 +166,7 @@ OPERATOR_SCHEMA(GivenTensorFill)
.SetSupportLevel(SupportType::EXPERIMENTAL)
.NumInputs(0, 1)
.NumOutputs(1)
.Input(0, "shape", "The shape of filled tensor", "T")
.Input(0, "shape", "The shape of filled tensor", "T", OpSchema::Optional)
.Output(0, "X", "The filled tensor", "T")
.TypeConstraint(
"T",
Expand Down
7 changes: 6 additions & 1 deletion onnx/defs/gen_doc.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,12 @@ def main(args):
if schema.inputs:
s += '<dl>\n'
for input in schema.inputs:
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(input.name, ' (optional)' if input.optional else '', input.typeStr)
option_str = ""
if OpSchema.FormalParameterOption.Optional == input.option:
option_str = " (optional)"
elif OpSchema.FormalParameterOption.Variadic == input.option:
option_str = " (variadic)"
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(input.name, option_str, input.typeStr)
s += '<dd>{}</dd>\n'.format(input.description)
s += '</dl>\n'

Expand Down
8 changes: 4 additions & 4 deletions onnx/defs/math/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ OPERATOR_SCHEMA(Max)
Element-wise max of each of the input tensors. All inputs and outputs must
have the same shape and data type.
)DOC")
.Input(0, "data_0", "First of the input tensors.", "T")
.Input(0, "data_0", "List of tensors for Max.", "T", OpSchema::Variadic)
.Output(0, "max", "Output tensor. Same dimension as inputs.", "T")
.TypeConstraint("T", { "tensor(float16)", "tensor(float)", "tensor(double)" },
"Constrain input and output types to float tensors.");
Expand All @@ -409,7 +409,7 @@ OPERATOR_SCHEMA(Min)
Element-wise min of each of the input tensors. All inputs and outputs must
have the same shape and data type.
)DOC")
.Input(0, "data_0", "First of the input tensors.", "T")
.Input(0, "data_0", "List of tensors for Min", "T", OpSchema::Variadic)
.Output(0, "min", "Output tensor. Same dimension as inputs.", "T")
.TypeConstraint("T", { "tensor(float16)", "tensor(float)", "tensor(double)" },
"Constrain input and output types to float tensors.");
Expand All @@ -422,7 +422,7 @@ OPERATOR_SCHEMA(Sum)
Element-wise sum of each of the input tensors. All inputs and outputs must
have the same shape and data type.
)DOC")
.Input(0, "data_0", "First of the input tensors.", "T")
.Input(0, "data_0", "List of tensors for Sum.", "T", OpSchema::Variadic)
.Output(0, "sum", "Output tensor. Same dimension as inputs.", "T")
.TypeConstraint("T", { "tensor(float16)", "tensor(float)", "tensor(double)" },
"Constrain input and output types to float tensors.");
Expand All @@ -435,7 +435,7 @@ OPERATOR_SCHEMA(Mean)
Element-wise mean of each of the input tensors. All inputs and outputs must
have the same shape and data type.
)DOC")
.Input(0, "data_0", "First of the input tensors.", "T")
.Input(0, "data_0", "List of tensors for Mean.", "T", OpSchema::Variadic)
.Output(0, "mean", "Output tensor. Same dimension as inputs.", "T")
.TypeConstraint("T", { "tensor(float16)", "tensor(float)", "tensor(double)" },
"Constrain input and output types to float tensors.");
Expand Down
4 changes: 2 additions & 2 deletions onnx/defs/nn/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ computes the output.)DOC";
"where is the dimension of the kernel", "T");
schema.Input(2,
"B",
"Optional 1D bias to be added to the convolution, has size of M.", "T");
"Optional 1D bias to be added to the convolution, has size of M.", "T", OpSchema::Optional);
schema.Output(0,
"Y",
"Output data tensor that contains the result of the "
Expand Down Expand Up @@ -260,7 +260,7 @@ and computes the output.)DOC";
"where is the dimension of the kernel", "T");
schema.Input(2,
"B",
"Optional 1D bias to be added to the convolution, has size of C.", "T");
"Optional 1D bias to be added to the convolution, has size of C.", "T", OpSchema::Optional);
schema.Output(0,
"Y",
"Output data tensor that contains the result of the convolution. The "
Expand Down
14 changes: 7 additions & 7 deletions onnx/defs/rnn/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,11 @@ std::function<void(OpSchema&)> RNNDocGenerator(const char* name) {
"Optional tensor specifying lengths of the sequences in a batch. "
"If not specified - assumed all sequences in the batch to have "
"length `seq_length`. It has shape `[batch_size]`.", "T1",
true /*optional*/);
OpSchema::Optional);
schema.Input(5, "initial_h",
"Optional initial value of the hidden. If not specified - assumed "
"to be 0. It has shape `[num_directions, batch_size, hidden_size]`.",
"T", true /*optional*/);
"T", OpSchema::Optional);
schema.Output(0, "Y",
"A tensor that concats all the intermediate output values of the hidden. "
"It has shape `[seq_length, num_directions, batch_size, hidden_size]`. "
Expand Down Expand Up @@ -142,7 +142,7 @@ Equations (Default: f=tanh):
"and `[WBbi, RBbi]` (if bidirectional). The tensor has shape "
"`[num_directions, 2*hidden_size]`. Optional: If not specified - assumed "
"to be 0.", "T",
true /*optional*/)
OpSchema::Optional)
.FillUsing(RNNDocGenerator("RNN"));


Expand Down Expand Up @@ -243,7 +243,7 @@ Equations (Default: f=sigmoid, g=tanh):
"`[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor "
"has shape `[num_directions, 6*hidden_size]`. Optional: If not specified "
"- assumed to be 0", "T",
true /*optional*/)
OpSchema::Optional)
.FillUsing(RNNDocGenerator("GRU"));


Expand Down Expand Up @@ -356,17 +356,17 @@ Equations (Default: f=sigmoid, g=tanh, h=tanh):
"and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. This "
"tensor has shape `[num_directions, 8*hidden_size]`. Optional: If not "
"specified - assumed to be 0.", "T",
true /*optional*/)
OpSchema::Optional)
.Input(6, "initial_c",
"Optional initial value of the cell. If not specified - assumed "
"to be 0. It has shape `[num_directions, batch_size, hidden_size]`.",
"T", true /*optional*/)
"T", OpSchema::Optional)
.Input(7, "P",
"The weight tensor for peepholes. Concatenation of `P[iof]` and "
"`PB[iof]` (if bidirectional) along dimension 0. It has shape "
"`[num_directions, 3*hidde_size]`. Optional: If not specified - "
"assumed to be 0.", "T",
true /*optional*/)
OpSchema::Optional)
.FillUsing(RNNDocGenerator("LSTM"));

} // namespace onnx
46 changes: 34 additions & 12 deletions onnx/defs/schema.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,22 @@ OpSchema::FormalParameter::FormalParameter(
const DataTypeSet& allowed_type_set,
const std::string& type_str,
const std::string& description,
bool optional)
FormalParameterOption param_option)
: name_(name),
type_set_(allowed_type_set),
type_str_(type_str),
description_(description),
is_optional_(optional) {}
param_option_(param_option) {}

OpSchema::FormalParameter::FormalParameter(
const std::string& name,
const std::string& description,
const std::string& type_str,
bool optional)
FormalParameterOption param_option)
: name_(name),
type_str_(type_str),
description_(description),
is_optional_(optional) {}
param_option_(param_option) {}

const std::string& OpSchema::FormalParameter::GetName() const {
return name_;
Expand All @@ -49,8 +49,8 @@ const std::string& OpSchema::FormalParameter::GetDescription() const {
return description_;
}

bool OpSchema::FormalParameter::IsOptional() const {
return is_optional_;
OpSchema::FormalParameterOption OpSchema::FormalParameter::GetOption() const {
return param_option_;
}

void OpSchema::Verify(const NodeProto& node) const {
Expand Down Expand Up @@ -110,11 +110,27 @@ void OpSchema::Verify(const NodeProto& node) const {

// Check the values of inputs / outputs
for (int in_idx = 0; in_idx < node.input_size(); ++in_idx) {
if (node.input(in_idx).empty() && !(inputs_[in_idx].IsOptional())) {
if (in_idx >= inputs_.size()) {
if (Variadic == inputs_.back().GetOption()) {
// The last input formal parameter should be variadic.
break;
}
else {
fail_check(
"Node (",
node.name(),
") has more inputs (",
node.input_size(),
") than declared (",
inputs_.size(),
") in op definition.");
}
}
if (node.input(in_idx).empty() && (Single == inputs_[in_idx].GetOption())) {
fail_check(
"Input ",
in_idx,
" is not marked optional but has an empty string in the graph");
" is marked single but has an empty string in the graph");
}
}
for (int out_idx = 0; out_idx < node.output_size(); ++out_idx) {
Expand Down Expand Up @@ -403,11 +419,11 @@ OpSchema& OpSchema::Input(
const std::string& name,
const std::string& description,
const std::string& type_str,
bool optional) {
OpSchema::FormalParameterOption param_option) {
if (int(inputs_.size()) <= n) {
inputs_.resize(n + 1);
}
inputs_[n] = FormalParameter(name, description, type_str, optional);
inputs_[n] = FormalParameter(name, description, type_str, param_option);
return *this;
}

Expand All @@ -419,7 +435,7 @@ OpSchema& OpSchema::Output(
if (int(outputs_.size()) <= n) {
outputs_.resize(n + 1);
}
outputs_[n] = FormalParameter(name, description, type_str, false);
outputs_[n] = FormalParameter(name, description, type_str, Single);
return *this;
}

Expand Down Expand Up @@ -499,13 +515,19 @@ void OpSchema::Finalize() {
for (const auto& it : outputs_) {
ENFORCE(!(it.GetName().empty()));
}

// Only the last input could be variadic.
for (int i = 0; i < (int)(inputs_.size()) - 1; ++i) {
ENFORCE(Variadic != inputs_[i].GetOption());
}

// TODO: also cover checks for arbitrary number of inputs
// allow extra tailing inputs not be present if all inputs at the end are
// marked as optional
if (max_input_ < std::numeric_limits<int>::max()) {
int ind = max_input_;
for (auto& input : inputs_) {
if (input.IsOptional() && ind > 0) {
if (Single != input.GetOption() && ind > 0) {
--ind;
}
}
Expand Down
Loading

0 comments on commit fd54930

Please sign in to comment.