From 55bd773a080a051bb8d013bd88cbb8e042903adf Mon Sep 17 00:00:00 2001 From: gouzil <66515297+gouzil@users.noreply.github.com> Date: Wed, 1 Feb 2023 14:40:43 +0800 Subject: [PATCH] [Divide by 0 Error] add norm check (#49966) * [Divide by 0 Error] add norm check * [Divide by 0 Error] fix x AttributeError * [Divide by 0 Error] norm check migrate to c++ --- paddle/phi/kernels/cpu/p_norm_kernel.cc | 7 +++++++ paddle/phi/kernels/gpu/p_norm_kernel.cu | 7 +++++++ paddle/phi/kernels/xpu/p_norm_kernel.cc | 8 ++++++++ python/paddle/fluid/tests/unittests/test_norm_all.py | 9 +++++++++ 4 files changed, 31 insertions(+) diff --git a/paddle/phi/kernels/cpu/p_norm_kernel.cc b/paddle/phi/kernels/cpu/p_norm_kernel.cc index 597939953b277..bb33b8a397e02 100644 --- a/paddle/phi/kernels/cpu/p_norm_kernel.cc +++ b/paddle/phi/kernels/cpu/p_norm_kernel.cc @@ -61,6 +61,13 @@ void PNormKernel(const Context& dev_ctx, int pre, n, post; GetDims(xdim, axis, &pre, &n, &post, asvector); + for (int i = 0; i < xdim.size(); i++) { + PADDLE_ENFORCE_LT(0, + xdim[i], + errors::InvalidArgument( + "The dims of Input(X) should be greater than 0.")); + } + auto* place = dev_ctx.eigen_device(); Eigen::DSizes shape(pre, n, post); diff --git a/paddle/phi/kernels/gpu/p_norm_kernel.cu b/paddle/phi/kernels/gpu/p_norm_kernel.cu index c7a6261ce381e..fb869a00d9c50 100644 --- a/paddle/phi/kernels/gpu/p_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/p_norm_kernel.cu @@ -105,6 +105,13 @@ void PNormKernel(const Context& dev_ctx, std::vector reduce_axis = funcs::details::GetReduceDim(axis_dims, xdim.size(), asvector); + for (int i = 0; i < xdim.size(); i++) { + PADDLE_ENFORCE_LT(0, + xdim[i], + errors::InvalidArgument( + "The dims of Input(X) should be greater than 0.")); + } + using MT = typename dtype::MPTypeTrait::Type; if (porder == 0) { phi::funcs::ReduceKernel>( diff --git a/paddle/phi/kernels/xpu/p_norm_kernel.cc b/paddle/phi/kernels/xpu/p_norm_kernel.cc index 7ef72c61ad3aa..60abc59517b78 100644 --- a/paddle/phi/kernels/xpu/p_norm_kernel.cc +++ b/paddle/phi/kernels/xpu/p_norm_kernel.cc @@ -55,6 +55,14 @@ void PNormKernel(const Context& dev_ctx, int n = 1; int t = 1; GetDims(xdim, axis, &m, &t, &n, asvector); + + for (int i = 0; i < xdim.size(); i++) { + PADDLE_ENFORCE_LT(0, + xdim[i], + errors::InvalidArgument( + "The dims of Input(X) should be greater than 0.")); + } + x_dim.push_back(m); x_dim.push_back(t); x_dim.push_back(n); diff --git a/python/paddle/fluid/tests/unittests/test_norm_all.py b/python/paddle/fluid/tests/unittests/test_norm_all.py index d70d0dd9f065d..beff458bd1b70 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_all.py +++ b/python/paddle/fluid/tests/unittests/test_norm_all.py @@ -655,6 +655,15 @@ def err_dtype(p, shape_x, xdtype, out=None): ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1] ) + with fluid.dygraph.guard(): + # The size of input in Norm should not be 0. + def test_0_size(): + array = np.array([], dtype=np.float32) + x = paddle.to_tensor(np.reshape(array, [0, 0]), dtype='float32') + paddle.linalg.norm(x, axis=0) + + self.assertRaises(ValueError, test_0_size) + if __name__ == '__main__': paddle.enable_static()