Skip to content

Commit

Permalink
Migrate to clang-format-16 (Samsung#12442)
Browse files Browse the repository at this point in the history
* Migrate to clang-format-16

This commit upgrades clang-format to 16.
- Add .FORMATDENY into onert-micro/externals

ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh <[email protected]>
  • Loading branch information
hseok-oh authored Jan 11, 2024
1 parent 9a3d279 commit a3a157a
Show file tree
Hide file tree
Showing 76 changed files with 177 additions and 183 deletions.
1 change: 1 addition & 0 deletions .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: true
AfterCaseLabel: true
AfterControlStatement: true
AfterEnum: true
AfterFunction: true
Expand Down
7 changes: 5 additions & 2 deletions .github/workflows/check-format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,14 @@ jobs:
with:
python-version: '3.x'

# C format: clang-format-8
# C format: clang-format-16
# Python format: yapf==0.22.0
- name: Install packages
run: |
sudo apt-get install -y clang-format-8
sudo apt-get install -y gnupg2 software-properties-common
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
sudo add-apt-repository "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main"
sudo apt-get update && sudo apt-get install -qqy clang-format-16
python -m pip install --upgrade pip
pip install yapf==0.22.0
Expand Down
4 changes: 2 additions & 2 deletions compiler/circle-opselector/src/OpSelector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ bool is_number(const std::vector<std::string> &vec)
{
for (const auto &s : vec)
{
if (not::is_number(s))
if (not ::is_number(s))
{
return false;
}
Expand Down Expand Up @@ -270,7 +270,7 @@ OpSelector::select_by<SelectType::ID>(const std::vector<std::string> &comma_toke
for (const auto &comma_token : comma_tokens)
{
auto dash_tokens = ::split_into_vector(comma_token, '-');
if (not::is_number(dash_tokens))
if (not ::is_number(dash_tokens))
{
throw std::runtime_error{
"ERROR: To select operator by id, please use these args: [0-9], '-', ','"};
Expand Down
2 changes: 1 addition & 1 deletion compiler/loco/include/loco/IR/NodePool.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class NodePool final : public ObjectPool<Node>
~NodePool();

public:
template <typename Derived, typename... Args> Derived *create(Args &&... args)
template <typename Derived, typename... Args> Derived *create(Args &&...args)
{
std::unique_ptr<Derived> ptr{new Derived(std::forward<Args>(args)...)};
ptr->graph(_graph);
Expand Down
2 changes: 1 addition & 1 deletion compiler/loco/src/IR/MockupNode.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class MockupNode final : public loco::Node
Node *arg(uint32_t N) const final { return _arg.node(); }
void drop(void) final { _arg.node(nullptr); }

Node *in(void)const { return _arg.node(); }
Node *in(void) const { return _arg.node(); }
void in(Node *node) { _arg.node(node); }

private:
Expand Down
2 changes: 1 addition & 1 deletion compiler/loco/src/Service/GraphBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class GraphBuilder final
public:
// "Layer" is in theory a subgraph builder.
template <typename Layer, typename... Args>
auto push(Args &&... args)
auto push(Args &&...args)
-> decltype(static_cast<Layer *>(nullptr)->operator()(static_cast<Context *>(nullptr)))
{
Layer layer{std::forward<Args>(args)...};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class KernelBuilderTest : public Test

std::unique_ptr<IMemoryManager> _memory_manager;

template <typename NodeT, typename... Args> NodeT *createNode(Args &&... args)
template <typename NodeT, typename... Args> NodeT *createNode(Args &&...args)
{
auto *node = _graph.nodes()->create<NodeT>(std::forward<Args>(args)...);
// The actual type does not matter for the purpose of the tests.
Expand Down
4 changes: 2 additions & 2 deletions compiler/luci/lang/include/luci/IR/SparsityParam.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,8 @@ class DimMetaData
}
DimMetaData(DimensionType format, int32_t dense_size, const SparseIndexVector &array_segments,
const SparseIndexVector &array_indices)
: _format{format}, _dense_size{dense_size}, _array_segments{array_segments}, _array_indices{
array_indices}
: _format{format}, _dense_size{dense_size}, _array_segments{array_segments},
_array_indices{array_indices}
{
// DO NOTHING
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/luci/pass/include/luci/CircleQuantizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class CircleQuantizer final
public:
// some helper methods
size_t size(void) const { return items.size(); }
template <class... Args> void emplace_back(Args &&... args) { items.emplace_back(args...); }
template <class... Args> void emplace_back(Args &&...args) { items.emplace_back(args...); }
std::vector<LayerParams>::iterator begin() { return items.begin(); };
std::vector<LayerParams>::iterator end() { return items.end(); };

Expand Down
2 changes: 1 addition & 1 deletion compiler/mir-interpreter/src/ops/Common.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace mir_interpreter
{

template <template <typename> class F, typename... Args>
void dispatch(mir::DataType dt, Args &&... args)
void dispatch(mir::DataType dt, Args &&...args)
{
switch (dt)
{
Expand Down
2 changes: 1 addition & 1 deletion compiler/mir/include/mir/Graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class Graph

virtual ~Graph();

template <typename T, typename... Args> Operation *create(Args &&... args)
template <typename T, typename... Args> Operation *create(Args &&...args)
{
auto op = new T(std::forward<Args>(args)...);
op->setId(_last_node_id++);
Expand Down
4 changes: 2 additions & 2 deletions compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,11 @@ class Caffe2OpCreator
private:
mir::Graph *_graph = nullptr;

template <typename OpType, typename... Types> mir::Operation *createOp(Types &&... args);
template <typename OpType, typename... Types> mir::Operation *createOp(Types &&...args);
};

template <typename OpType, typename... Types>
mir::Operation *Caffe2OpCreator::createOp(Types &&... args)
mir::Operation *Caffe2OpCreator::createOp(Types &&...args)
{
return _graph->create<OpType>(std::forward<Types>(args)...);
}
Expand Down
4 changes: 2 additions & 2 deletions compiler/mir/src/mir_caffe_importer/caffe_op_creator.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,11 @@ class CaffeOpCreator

mir::TensorVariant convertBlob(const caffe::BlobProto &blob);

template <typename OpType, typename... Types> mir::Operation *createOp(Types &&... args);
template <typename OpType, typename... Types> mir::Operation *createOp(Types &&...args);
};

template <typename OpType, typename... Types>
mir::Operation *CaffeOpCreator::createOp(Types &&... args)
mir::Operation *CaffeOpCreator::createOp(Types &&...args)
{
return _graph->create<OpType>(std::forward<Types>(args)...);
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/mir/src/mir_onnx_importer/ONNXHelpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ mir::TensorVariant createTensor(const onnx::TensorProto *tensor);
mir::Operation *foldConstants(mir::Graph *graph, mir::Operation *op);

template <typename OpType, typename... Types>
mir::Operation *createOp(mir::Graph *graph, Types &&... args)
mir::Operation *createOp(mir::Graph *graph, Types &&...args)
{
auto op = graph->create<OpType>(std::forward<Types>(args)...);
op = foldConstants(graph, op);
Expand Down
4 changes: 2 additions & 2 deletions compiler/mir/src/mir_tflite_importer/tflite_op_creator.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,11 @@ class TFLiteOpCreator
mir::Operation::Output *addFusedActivation(mir::Operation::Output *input,
tflite::ActivationFunctionType activation_type);

template <typename OpType, typename... Types> mir::Operation *createOp(Types &&... args);
template <typename OpType, typename... Types> mir::Operation *createOp(Types &&...args);
};

template <typename OpType, typename... Types>
mir::Operation *TFLiteOpCreator::createOp(Types &&... args)
mir::Operation *TFLiteOpCreator::createOp(Types &&...args)
{
return _graph->create<OpType>(std::forward<Types>(args)...);
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/mir/unittests/ShapeRange.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ struct ParamType
Shape shape;

template <typename... Args>
explicit ParamType(int32_t actual_len, Args &&... args)
explicit ParamType(int32_t actual_len, Args &&...args)
: actual_length(actual_len), shape({static_cast<int32_t>(args)...})
{
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/nest/core/include/nest/Closure.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class Closure
{
public:
template <typename... Args>
Closure(const DomainID &id, Args &&... indices) : _id{id}, _sub{std::forward<Args>(indices)...}
Closure(const DomainID &id, Args &&...indices) : _id{id}, _sub{std::forward<Args>(indices)...}
{
// DO NOTHING
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/nest/core/include/nest/Domain.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class Domain
Domain(const Domain &) = default;

public:
template <typename... Args> Closure operator()(Args &&... indices)
template <typename... Args> Closure operator()(Args &&...indices)
{
return Closure{_id, std::forward<Args>(indices)...};
}
Expand Down
3 changes: 1 addition & 2 deletions compiler/nest/core/include/nest/expr/DerefNode.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ class DerefNode final : public Node
{
public:
template <typename... Args>
DerefNode(const DomainID &id, Args &&... indicies)
: _id{id}, _sub{std::forward<Args>(indicies)...}
DerefNode(const DomainID &id, Args &&...indicies) : _id{id}, _sub{std::forward<Args>(indicies)...}
{
// DO NOTHING
}
Expand Down
4 changes: 2 additions & 2 deletions compiler/nnc/unittests/soft_backend/CPPOperations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ mir::TensorVariant getReferenceTensor(mir::Graph &g, mir::Operation *op)
* @brief Run selected operation, used to make code in tests more compact and fit getReferenceTensor
* format
*/
template <typename Operation, typename... Args> Tensor run(Operation op, const Args &... args)
template <typename Operation, typename... Args> Tensor run(Operation op, const Args &...args)
{
Tensor output;
op(output, args...);
Expand Down Expand Up @@ -308,7 +308,7 @@ void createAndRunTestGraph(
function<mir::Operation *(mir::Graph &, const std::vector<mir::Operation::Output *> &inputs)>
op_generator,
TestFunc artifactOperation, const vector<unique_ptr<mir::TensorVariant>> &input_ntensors,
Args &... input_atensors)
Args &...input_atensors)
{
mir::Graph g;
mir::Operation *actual_operation = fillGraph(g, op_generator, input_ntensors);
Expand Down
4 changes: 2 additions & 2 deletions compiler/nnsuite/conv/nnkit-caffe/ConvBackend.test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ class TestModel : public nnsuite::conv::Model
TestModel(const std::string &ifm_name, const feature::Shape &ifm_shape,
const std::string &ofm_name, const feature::Shape &ofm_shape,
const kernel::Shape &ker_shape, const kernel::Layout &ker_layout, float *ker_data)
: _ifm_name(ifm_name), _ifm_shape(ifm_shape), _ofm_name(ofm_name),
_ofm_shape(ofm_shape), _ker{ker_shape, ker_layout, ker_data}
: _ifm_name(ifm_name), _ifm_shape(ifm_shape), _ofm_name(ofm_name), _ofm_shape(ofm_shape),
_ker{ker_shape, ker_layout, ker_data}
{
// DO NOTHING
}
Expand Down
4 changes: 2 additions & 2 deletions compiler/nnsuite/conv/nnkit-tflite/ConvBackend.test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ class TestModel : public nnsuite::conv::Model
TestModel(const std::string &ifm_name, const feature::Shape &ifm_shape,
const std::string &ofm_name, const feature::Shape &ofm_shape,
const kernel::Shape &ker_shape, const kernel::Layout &ker_layout, float *ker_data)
: _ifm_name(ifm_name), _ifm_shape(ifm_shape), _ofm_name(ofm_name),
_ofm_shape(ofm_shape), _ker{ker_shape, ker_layout, ker_data}
: _ifm_name(ifm_name), _ifm_shape(ifm_shape), _ofm_name(ofm_name), _ofm_shape(ofm_shape),
_ker{ker_shape, ker_layout, ker_data}
{
// DO NOTHING
}
Expand Down
4 changes: 2 additions & 2 deletions compiler/oops/include/oops/UserExn.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class UserExn : public std::exception
public:
UserExn() = delete;

template <typename... Info> UserExn(const std::string &msg, Info &&... args)
template <typename... Info> UserExn(const std::string &msg, Info &&...args)
{
std::stringstream out;

Expand All @@ -58,7 +58,7 @@ class UserExn : public std::exception

private:
template <typename Attr, typename Val, typename... AttsVals>
void build_info(std::stringstream &out, Attr &attr, Val &val, AttsVals &... args)
void build_info(std::stringstream &out, Attr &attr, Val &val, AttsVals &...args)
{
out << pepper::str(attr, " = ", val);
out << ", ";
Expand Down
6 changes: 3 additions & 3 deletions compiler/pepper-str/include/pepper/str.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace pepper
namespace details
{

template <typename... Arg> void str_impl(std::ostream &os, Arg &&... args);
template <typename... Arg> void str_impl(std::ostream &os, Arg &&...args);

template <> inline void str_impl(std::ostream &)
{
Expand All @@ -41,7 +41,7 @@ template <typename Arg> inline void str_impl(std::ostream &os, Arg &&arg)
}

template <typename Arg, typename... Args>
inline void str_impl(std::ostream &os, Arg &&arg, Args &&... args)
inline void str_impl(std::ostream &os, Arg &&arg, Args &&...args)
{
str_impl(os, std::forward<Arg>(arg));
str_impl(os, std::forward<Args>(args)...);
Expand All @@ -53,7 +53,7 @@ inline void str_impl(std::ostream &os, Arg &&arg, Args &&... args)
namespace pepper
{

template <typename... Args> static inline std::string str(Args &&... args)
template <typename... Args> static inline std::string str(Args &&...args)
{
std::stringstream ss;
details::str_impl(ss, std::forward<Args>(args)...);
Expand Down
4 changes: 2 additions & 2 deletions compiler/pp/include/pp/Format.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ namespace pp

template <typename Arg> static inline void _fmt(std::ostream &os, const Arg &arg) { os << arg; }
template <typename Arg, typename... Args>
static inline void _fmt(std::ostream &os, const Arg &arg, const Args &... args)
static inline void _fmt(std::ostream &os, const Arg &arg, const Args &...args)
{
_fmt(os, arg);
_fmt(os, args...);
}

template <typename... Args> static inline std::string fmt(const Args &... args)
template <typename... Args> static inline std::string fmt(const Args &...args)
{
std::stringstream ss;
_fmt(ss, args...);
Expand Down
5 changes: 1 addition & 4 deletions compiler/pp/include/pp/IndentedStringBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,7 @@ class IndentedStringBuilder
std::string build(const std::string &content);

public:
template <typename... Args> std::string build(const Args &... args)
{
return build(fmt(args...));
}
template <typename... Args> std::string build(const Args &...args) { return build(fmt(args...)); }

private:
uint32_t _level;
Expand Down
2 changes: 1 addition & 1 deletion compiler/pp/include/pp/LinearDocument.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class LinearDocument final : public MultiLineText
}
}

template <typename... Args> void append(const Args &... args) { append(fmt(args...)); }
template <typename... Args> void append(const Args &...args) { append(fmt(args...)); }

public:
void append(const LinearDocument &doc);
Expand Down
24 changes: 12 additions & 12 deletions compute/ARMComputeEx/src/core/CL/cl_kernels/gemmlowp.cl
Original file line number Diff line number Diff line change
Expand Up @@ -2141,26 +2141,26 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
* element in the output shifts vector
*/
__kernel void
gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DECLARATION(mm_result)
gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DECLARATION(mm_result)
#if defined(A_OFFSET)
,
IMAGE_DECLARATION(sum_col)
,
IMAGE_DECLARATION(sum_col)
#endif // defined(A_OFFSET)
#if defined(B_OFFSET)
,
IMAGE_DECLARATION(sum_row)
,
IMAGE_DECLARATION(sum_row)
#endif // defined(B_OFFSET)
,
,
#if defined(ADD_BIAS)
VECTOR_DECLARATION(biases),
VECTOR_DECLARATION(biases),
#endif // defined(ADD_BIAS)
TENSOR3D_DECLARATION(dst)
TENSOR3D_DECLARATION(dst)
#if defined(PER_CHANNEL_QUANTIZATION)
,
VECTOR_DECLARATION(result_multipliers),
VECTOR_DECLARATION(result_shifts)
,
VECTOR_DECLARATION(result_multipliers),
VECTOR_DECLARATION(result_shifts)
#endif // defined(PER_CHANNEL_QUANTIZATION)
)
)
{
const int x = get_global_id(0) * 4;
const int y = get_global_id(1);
Expand Down
4 changes: 2 additions & 2 deletions compute/ARMComputeEx/src/core/NEON/kernels/NEOneHotKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ bool isOnValue(U index, U depth)
} // namespace

NEOneHotKernel::NEOneHotKernel()
: _indices{nullptr}, _depth{nullptr}, _on_value{nullptr},
_off_value{nullptr}, _axis{-1}, _output{nullptr}, _func{}
: _indices{nullptr}, _depth{nullptr}, _on_value{nullptr}, _off_value{nullptr}, _axis{-1},
_output{nullptr}, _func{}
{
}

Expand Down
Loading

0 comments on commit a3a157a

Please sign in to comment.