From 2b98993b2dc1fa71f044ff61e55f0524ae8072c6 Mon Sep 17 00:00:00 2001 From: RedContritio Date: Wed, 22 Mar 2023 14:05:56 +0800 Subject: [PATCH] support auto generate for p_norm (#51590) * supoort auto generate p_norm * fix bug in backward --- paddle/fluid/operators/p_norm_op.cc | 139 ----------------------- paddle/phi/api/yaml/backward.yaml | 10 ++ paddle/phi/api/yaml/legacy_backward.yaml | 10 -- paddle/phi/api/yaml/legacy_ops.yaml | 9 -- paddle/phi/api/yaml/op_compat.yaml | 7 ++ paddle/phi/api/yaml/op_version.yaml | 8 ++ paddle/phi/api/yaml/ops.yaml | 9 ++ paddle/phi/ops/compat/p_norm_sig.cc | 26 ----- 8 files changed, 34 insertions(+), 184 deletions(-) delete mode 100644 paddle/fluid/operators/p_norm_op.cc delete mode 100644 paddle/phi/ops/compat/p_norm_sig.cc diff --git a/paddle/fluid/operators/p_norm_op.cc b/paddle/fluid/operators/p_norm_op.cc deleted file mode 100644 index 766ecaee0d6c9..0000000000000 --- a/paddle/fluid/operators/p_norm_op.cc +++ /dev/null @@ -1,139 +0,0 @@ -/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -Indicesou may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#include -#include -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/op_version_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/backward.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class PnormOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "(Tensor) A tensor of rank >= axis."); - AddAttr("porder", - "(float, default 2) The porder is the p order vector norm " - "to calculate. Available for porder=0, inf, -inf and any " - "real number.") - .SetDefault(2.0f); - AddAttr("axis", - "The axis on which to apply norm operation. If axis < 0, " - "the dimension to pnorm is rank(X) + axis. -1 is " - "the last dimension.") - .SetDefault(-1); - AddAttr("epsilon", - "(float, default 1e-12) The epsilon value is used " - "to avoid division by zero.") - .SetDefault(1.0e-12f); - AddAttr( - "keepdim", - "(bool, default false) Whether to keep the dimensions as the input.") - .SetDefault(false); - - AddAttr("asvector", - "(bool, default false) as vector norm when axis is None and " - "input is matrix, ") - .SetDefault(false); - AddOutput("Out", "(Tensor) Output result tensor of p-norm"); - AddComment(R"DOC( -Pnorm Operator. -Given a tensor X, compute Lp-norm of X. - -When p = 0, defining $0^0 = 0$, the zero-norm of X is simply the number of non-zero elements of X. -$$ -||X||_{0} = \lim_{p \rightarrow 0} \sum_i |x_i|^p -$$ - -When p = inf, the inf-norm of X is the maximum element of X. -$$ -||X||_\infty = \max_i |x_i| -$$ - -When p = -inf, the negative-inf-norm of X is the minimum element of X. -$$ -||X||_{-\infty} = \min_i |x_i| -$$ - -Otherwise, the p-norm of X follows the formula, -$$ -||X||_{p} = (\sum_i |x_i|^p)^{1/p} -$$ -where, $\sum_i $ is calculated along the `axis` dimension. - -)DOC"); - } -}; - -class PnormOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; -}; - -class PnormOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; -}; - -template -class PnormOpGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("p_norm_grad"); - op->SetAttrMap(this->Attrs()); - op->SetInput("X", this->Input("X")); - op->SetInput("Out", this->Output("Out")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -using CPU = phi::CPUContext; - -DECLARE_INFER_SHAPE_FUNCTOR(p_norm, - PNormInferShapeFunctor, - PD_INFER_META(phi::PNormInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(p_norm_grad, - PNormGradInferShapeFunctor, - PD_INFER_META(phi::GeneralUnaryGradInferMeta)); - -REGISTER_OPERATOR(p_norm, - ops::PnormOp, - ops::PnormOpMaker, - ops::PnormOpGradOpMaker, - ops::PnormOpGradOpMaker, - PNormInferShapeFunctor); -REGISTER_OPERATOR(p_norm_grad, ops::PnormOpGrad, PNormGradInferShapeFunctor); - -REGISTER_OP_VERSION(p_norm).AddCheckpoint( - R"ROC( - Upgrade p_norm, add 1 attribute [asvector]. - )ROC", - paddle::framework::compatible::OpVersionDesc().NewAttr( - "asvector", - "Compute as vector when axis is None and input is matrix", - false)); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index 644cbd96cd936..d164484d0026e 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -1028,6 +1028,16 @@ func : overlap_add_grad data_type : x +- backward_op : p_norm_grad + forward : p_norm(Tensor x, float porder=2, int axis=-1, float epsilon=1.0e-12f, bool keepdim=false, bool asvector=false) -> Tensor(out) + args : (Tensor x, Tensor out, Tensor out_grad, float porder, int axis, float epsilon, bool keepdim, bool asvector) + output : Tensor(x_grad) + infer_meta : + func : GeneralUnaryGradInferMeta + param: [x] + kernel : + func : p_norm_grad + - backward_op : pixel_shuffle_grad forward : pixel_shuffle (Tensor x, int upscale_factor=1, str data_format="NCHW") -> Tensor(out) args : (Tensor out_grad, int upscale_factor, str data_format) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index d669f77929554..8e467746c8214 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -863,16 +863,6 @@ kernel : func : norm_grad -- backward_op : p_norm_grad - forward : p_norm(Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false) -> Tensor(out) - args : (Tensor x, Tensor out, Tensor out_grad, float porder, int axis, float epsilon, bool keepdim, bool asvector) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [x] - kernel : - func : p_norm_grad - - backward_op : pad3d_double_grad forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(grad_x) args : (Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index dbfbdec37d290..5c99d79961f64 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -1235,15 +1235,6 @@ output : Tensor(out) invoke : full_like(x, 1, dtype, place) -- op : p_norm - args : (Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false) - output : Tensor(out) - infer_meta : - func : PNormInferMeta - kernel : - func : p_norm - backward : p_norm_grad - - op : pad args : (Tensor x, int[] paddings, Scalar pad_value) output : Tensor diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index aeabee6a73792..5f3d4ba7bf837 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1290,6 +1290,13 @@ outputs : out : Out +- op : p_norm + backward: p_norm_grad + inputs : + x : X + outputs : + out : Out + - op : pad2d backward : pad2d_grad extra : diff --git a/paddle/phi/api/yaml/op_version.yaml b/paddle/phi/api/yaml/op_version.yaml index 3c77755c6c830..2851b86615b74 100644 --- a/paddle/phi/api/yaml/op_version.yaml +++ b/paddle/phi/api/yaml/op_version.yaml @@ -102,6 +102,14 @@ comment : In order to force fill output variable to gpu memory. default : "false" +- op : p_norm + version : + - checkpoint : Upgrade p_norm, add 1 attribute [asvector]. + action : + - add_attr : asvector + comment : Compute as vector when axis is None and input is matrix. + default : "false" + - op : pixel_shuffle version : - checkpoint : Compatible upgrade of pixel_shuffle, add a new attribute [data_format] diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index c219993038c54..24887ee6512ca 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -1078,6 +1078,15 @@ data_type : x backward: overlap_add_grad +- op : p_norm + args : (Tensor x, float porder=2, int axis=-1, float epsilon=1.0e-12f, bool keepdim=false, bool asvector=false) + output : Tensor(out) + infer_meta : + func : PNormInferMeta + kernel : + func : p_norm + backward : p_norm_grad + - op : pixel_shuffle args : (Tensor x, int upscale_factor=1, str data_format="NCHW") output : Tensor diff --git a/paddle/phi/ops/compat/p_norm_sig.cc b/paddle/phi/ops/compat/p_norm_sig.cc deleted file mode 100644 index 82b88aa09ff2f..0000000000000 --- a/paddle/phi/ops/compat/p_norm_sig.cc +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { -KernelSignature PNormGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature("p_norm_grad", - {"X", "Out", "Out@GRAD"}, - {"porder", "axis", "epsilon", "keepdim", "asvector"}, - {"X@GRAD"}); -} -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(p_norm_grad, phi::PNormGradOpArgumentMapping);