Skip to content

Commit

Permalink
Merge branch 'feature/pad' into 'master'
Browse files Browse the repository at this point in the history
@3c16a772

See merge request ai/esp-dl!21
  • Loading branch information
ESP-YHY committed Oct 25, 2021
2 parents f108a83 + 56bf6aa commit c4735f4
Show file tree
Hide file tree
Showing 25 changed files with 357 additions and 6 deletions.
9 changes: 5 additions & 4 deletions include/dl_define.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,10 @@ namespace dl

typedef enum
{
CONSTANT,
EDGE,
REFLECT,
SYMMETRIC,
PADDING_EMPTY,
PADDING_CONSTANT,
PADDING_EDGE,
PADDING_REFLECT,
PADDING_SYMMETRIC,
} padding_mode_t;
} // namespace dl
2 changes: 1 addition & 1 deletion include/layer/dl_layer_max2d.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ namespace dl
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_exponent(input0.exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
Expand Down
169 changes: 169 additions & 0 deletions include/layer/dl_layer_pad.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
#pragma once

#include "dl_nn_pad.hpp"
#include "dl_layer_base.hpp"

namespace dl
{
namespace layer
{
/**
* @brief Pad.
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Pad : public Layer
{
private:
std::vector<int> paddings;
std::vector<feature_t> constant_values;
padding_mode_t mode;
Tensor<feature_t> *output; /*<! output ptr of Pad >*/
std::vector<int> output_shape; /*<! output shape of Pad >*/

public:
Pad(std::vector<int> paddings,
std::vector<feature_t> constant_values = {0},
padding_mode_t mode = PADDING_CONSTANT,
const char *name = "Pad") : Layer(name),
paddings(paddings),
constant_values(constant_values),
mode(mode)
{
this->output = new Tensor<feature_t>;
}

/**
* @brief Destroy the Pad object.
*
*/
~Pad()
{
if (this->output != NULL)
{
delete this->output;
}
}

/**
* @brief Update output padding and input padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(this->paddings.size() > 0);
int input_dims = input.shape.size();
int padding_dims = input_dims * 2;
if (this->paddings.size() == 1)
{
std::vector<int> _paddings(padding_dims, 0);
for (int i = 0; i < padding_dims; ++i)
{
_paddings[i] = this->paddings[0];
}
this->paddings = _paddings;
}
else if (this->paddings.size() == 2)
{
std::vector<int> _paddings(padding_dims, 0);
for (int i = 0; i < input_dims; ++i)
{
_paddings[2 * i] = this->paddings[0];
_paddings[2 * i + 1] = this->paddings[1];
}
this->paddings = _paddings;
}
else
{
assert(this->paddings.size() == padding_dims);
}

if (this->mode == PADDING_CONSTANT)
{
if (this->constant_values.size() == 1)
{
std::vector<feature_t> _constant_values(padding_dims, 0);
for (int i = 0; i < padding_dims; ++i)
{
_constant_values[i] = this->constant_values[0];
}
this->constant_values = _constant_values;
}
else if (this->constant_values.size() == 2)
{
std::vector<feature_t> _constant_values(padding_dims, 0);
for (int i = 0; i < input_dims; ++i)
{
_constant_values[2 * i] = this->constant_values[0];
_constant_values[2 * i + 1] = this->constant_values[1];
}
this->constant_values = _constant_values;
}
else
{
assert(constant_values.size() == padding_dims);
}
}
this->output_shape = input.shape;
for (int i = 0; i < input_dims; ++i)
{
this->output_shape[i] += (this->paddings[2 * i] + this->paddings[2 * i + 1]);
}

this->output->set_shape(this->output_shape);
this->output->set_exponent(input.exponent);
this->output->free_element();

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
* @brief Get the output
*
* @return Tensor<feature_t>& Pad result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}

/**
* @brief Call Pad operation
*
* @param input as an input.
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return Pad result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

DL_LOG_LAYER_LATENCY_START();
nn::pad(*this->output, input, this->paddings, this->constant_values, this->mode, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "pad");
return *this->output;
}
};
} // namespace layer
} // namespace dl
120 changes: 120 additions & 0 deletions include/nn/dl_nn_pad.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
#pragma once

#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"

namespace dl
{
namespace nn
{
/**
* @brief pad(input)
*
* @tparam feature_t
* @param output as an output
* @param input as an input
* @param paddings number of values padded to the edges of each dim
* @param constant_values used in PADDING_CONSTANT, the values to set the padded values for each dim
* @param mode One of the following: PADDING_EMPTY, PADDING_CONSTANT, PADDING_EDGE, PADDING_REFLECT, PADDING_SYMMETRIC
* @param assign_core not effective yet
*/
template <typename feature_t>
void pad(Tensor<feature_t> &output,
Tensor<feature_t> &input,
std::vector<int> paddings,
std::vector<feature_t> constant_values,
padding_mode_t mode,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);


/**
* @brief
*
* @tparam feature_t
* @param input as an input
* @param paddings number of values padded to the edges of each dim
* @param constant_values used in PADDING_CONSTANT, the values to set the padded values for each dim
* @param mode One of the following: PADDING_EMPTY, PADDING_CONSTANT, PADDING_EDGE, PADDING_REFLECT, PADDING_SYMMETRIC
* @param assign_core not effective yet
* @return Tensor<feature_t> the padded Tensor
*/
template <typename feature_t>
Tensor<feature_t> pad(Tensor<feature_t> &input,
std::vector<int> paddings,
std::vector<feature_t> constant_values,
padding_mode_t mode,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();

DL_LOG_NN_LATENCY_START();

assert(paddings.size() > 0);
int input_dims = input.shape.size();
int padding_dims = input_dims * 2;
std::vector<int> _paddings(padding_dims, 0);
if (paddings.size() == 1)
{
for (int i = 0; i < padding_dims; ++i)
{
_paddings[i] = paddings[0];
}
}
else if (paddings.size() == 2)
{
for (int i = 0; i < input_dims; ++i)
{
_paddings[2 * i] = paddings[0];
_paddings[2 * i + 1] = paddings[1];
}
}
else
{
assert(paddings.size() == padding_dims);
_paddings = paddings;
}

std::vector<feature_t> _constant_values(padding_dims, 0);
if (mode == PADDING_CONSTANT)
{
if (constant_values.size() == 1)
{
for (int i = 0; i < padding_dims; ++i)
{
_constant_values[i] = constant_values[0];
}
}
else if (constant_values.size() == 2)
{
for (int i = 0; i < input_dims; ++i)
{
_constant_values[2 * i] = constant_values[0];
_constant_values[2 * i + 1] = constant_values[1];
}
}
else
{
assert(constant_values.size() == padding_dims);
_constant_values = constant_values;
}
}

std::vector<int> output_shape = input.shape;
for (int i = 0; i < input_dims; ++i)
{
output_shape[i] += (_paddings[2 * i] + _paddings[2 * i + 1]);
}

Tensor<feature_t> output;
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");

DL_LOG_NN_LATENCY_START();
pad(output, input, _paddings, _constant_values, mode, assign_core);
DL_LOG_NN_LATENCY_END("pad");

return output;
}
} // namespace nn
} // namespace dl
63 changes: 62 additions & 1 deletion include/typedef/dl_variable.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ namespace dl
}

/**
* @brief
* @brief copy the element of the input Tensor.
*
* @param input an input Tensor
* @param deep one of true or false
Expand Down Expand Up @@ -258,6 +258,56 @@ namespace dl
return this->element[index];
}

/**
* @brief Set the all the element to value.
*
* @param value target value
* @return Tensor<T>& self
*/
Tensor<T> &set_value(T value);

/**
* @brief Set the the element to value
*
* @param value target value, it will be broadcast automatically.
* @return Tensor<T>& self
*/
Tensor<T> &set_value(Tensor<T> &value);

/**
* @brief Set the sliced element to value
*
* @param axis_index_range range of slices
* @param value target value
* @return Tensor<T>& self
*/
Tensor<T> &set_value(std::vector<int> axis_index_range, T value);

/**
* @brief Set the sliced element to value
*
* @param axis_index_range range of slices
* @param value target value, it will be broadcast automatically.
* @return Tensor<T>& self
*/
Tensor<T> &set_value(std::vector<int> axis_index_range, Tensor<T> &value);

/**
* @brief Extracts a slice from the Tensor.
*
* @param axis_index_range range of slices
* @return Tensor<T> output
*/
Tensor<T> slice(std::vector<int> axis_index_range);

/**
* @brief Reverses specific dims of the tensor.
*
* @param axis The dims to be reversed
* @return Tensor<T>&
*/
Tensor<T> &reverse(std::vector<int> axis);

/**
* @brief Get the size of Tensor.
*
Expand Down Expand Up @@ -491,5 +541,16 @@ namespace dl
return *this;
}
}

static Tensor<T> arange(int size)
{
Tensor<T> output;
output.set_auto_free(true).set_exponent(0).set_shape({size}).malloc_element();
for (int i = 0; i < size; ++i)
{
output.element[i] = i;
}
return output;
}
};
} // namespace dl
Loading

0 comments on commit c4735f4

Please sign in to comment.