diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 60c5afb99fc7b..98efac14802e5 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -20,7 +20,7 @@ from ..base.data_feeder import check_dtype, check_type, check_variable_and_dtype from ..common_ops_import import Variable -from ..framework import LayerHelper, in_dynamic_mode +from ..framework import LayerHelper, in_dynamic_mode, in_dynamic_or_pir_mode from .creation import full from .manipulation import cast from .math import _get_reduce_axis @@ -225,7 +225,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): [10, 3, 5, 5] """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.matmul(x, y, transpose_x, transpose_y) else: attrs = { diff --git a/test/legacy_test/test_matmul_v2_op.py b/test/legacy_test/test_matmul_v2_op.py index f867d4c959636..0293e0414a23e 100644 --- a/test/legacy_test/test_matmul_v2_op.py +++ b/test/legacy_test/test_matmul_v2_op.py @@ -98,7 +98,8 @@ def setUp(self): def test_check_output(self): self.check_output( - check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True + check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) def test_check_grad(self): @@ -110,6 +111,7 @@ def test_check_grad(self): check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) else: self.check_grad( @@ -118,6 +120,7 @@ def test_check_grad(self): check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) @@ -359,6 +362,7 @@ def test_check_output(self): check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) def test_check_grad(self): @@ -372,6 +376,7 @@ def test_check_grad(self): check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) cls_name = "{}_{}".format(parent.__name__, "Fp16") @@ -431,6 +436,7 @@ def test_check_output(self): check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) def test_check_grad_x(self): @@ -447,6 +453,7 @@ def test_check_grad_x(self): check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) def test_check_grad_y(self): @@ -463,6 +470,7 @@ def test_check_grad_y(self): check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True, + check_new_ir=True, ) def test_check_grad(self): @@ -499,6 +507,7 @@ def setUp(self): self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): + paddle.enable_static() with base.program_guard(base.Program(), base.Program()): input_x = paddle.static.data( name="input_x", shape=[4, 3], dtype="float32" @@ -518,6 +527,7 @@ def check_static_result(self, place): feed={"input_x": x_np, "input_y": y_np}, fetch_list=[result], ) + paddle.disable_static() def test_static(self): for place in self.places: @@ -735,7 +745,7 @@ def init_input_output(self): self.out = np.matmul(self.x, self.y) def test_check_output(self): - self.check_output(check_cinn=False) + self.check_output(check_cinn=False, check_new_ir=True) class TestInt32MatMulOpBroadcast(OpTest): @@ -787,7 +797,7 @@ def init_input_output(self): self.out = np.matmul(self.x, self.y) def test_check_output(self): - self.check_output(check_cinn=False) + self.check_output(check_cinn=False, check_new_ir=True) class TestInt64MatMulOpBroadcast(OpTest):