Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR] Migrate Sparse API No.7 #67442

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 12 additions & 4 deletions python/paddle/sparse/binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@
}


@dygraph_only
def matmul(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
"""
Note:
Expand Down Expand Up @@ -133,10 +132,12 @@ def matmul(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
[2., 2.],
[3., 3.]])
"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return _C_ops.sparse_matmul(x, y)


@dygraph_only
def masked_matmul(
x: Tensor, y: Tensor, mask: Tensor, name: str | None = None
) -> Tensor:
Expand Down Expand Up @@ -199,10 +200,12 @@ def masked_matmul(
values=[0.98986477, 0.97800624, 1.14591956, 0.68561077, 0.94714981])

"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return _C_ops.sparse_masked_matmul(x, y, mask)


@dygraph_only
def mv(x: Tensor, vec: Tensor, name: str | None = None) -> Tensor:
"""
Note:
Expand Down Expand Up @@ -257,6 +260,9 @@ def mv(x: Tensor, vec: Tensor, name: str | None = None) -> Tensor:
[-3.85499096, -2.42975140, -1.75087738])

"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return _C_ops.sparse_mv(x, vec)


Expand Down Expand Up @@ -474,7 +480,6 @@ def divide(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
)


@dygraph_only
def is_same_shape(x: Tensor, y: Tensor) -> bool:
"""
Return the results of shape comparison between two Tensors, check whether x.shape equal to y.shape.
Expand Down Expand Up @@ -504,6 +509,9 @@ def is_same_shape(x: Tensor, y: Tensor) -> bool:
False

"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return x.is_same_shape(y)


Expand Down
6 changes: 4 additions & 2 deletions python/paddle/sparse/multiary.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,14 @@
from typing import TYPE_CHECKING

from paddle import _C_ops
from paddle.base.framework import dygraph_only
from paddle.base.framework import in_dynamic_or_pir_mode

if TYPE_CHECKING:
from paddle import Tensor

__all__ = []


@dygraph_only
def addmm(
input: Tensor,
x: Tensor,
Expand Down Expand Up @@ -94,4 +93,7 @@ def addmm(
>>> out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)

"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return _C_ops.sparse_addmm(input, x, y, beta, alpha)
10 changes: 7 additions & 3 deletions python/paddle/sparse/nn/functional/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
__all__ = []

from paddle import _C_ops
from paddle.base.framework import dygraph_only, in_dynamic_or_pir_mode
from paddle.base.framework import in_dynamic_or_pir_mode
from paddle.base.layer_helper import LayerHelper

if TYPE_CHECKING:
Expand Down Expand Up @@ -152,7 +152,6 @@ def softmax(x: Tensor, axis: int = -1, name: str | None = None) -> Tensor:
return out


@dygraph_only
def relu6(x: Tensor, name: str | None = None) -> Tensor:
"""
sparse relu6 activation, requiring x to be a SparseCooTensor or SparseCsrTensor.
Expand All @@ -178,10 +177,12 @@ def relu6(x: Tensor, name: str | None = None) -> Tensor:
>>> sparse_x = dense_x.to_sparse_coo(1)
>>> out = paddle.sparse.nn.functional.relu6(sparse_x)
"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return _C_ops.sparse_relu6(x)


@dygraph_only
def leaky_relu(
x: Tensor, negative_slope: float = 0.01, name: str | None = None
) -> Tensor:
Expand Down Expand Up @@ -216,4 +217,7 @@ def leaky_relu(
>>> sparse_x = dense_x.to_sparse_coo(1)
>>> out = paddle.sparse.nn.functional.leaky_relu(sparse_x, 0.5)
"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return _C_ops.sparse_leaky_relu(x, negative_slope)
6 changes: 4 additions & 2 deletions python/paddle/sparse/nn/functional/transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,12 @@
__all__ = []

from paddle import _C_ops
from paddle.base.framework import dygraph_only
from paddle.base.framework import in_dynamic_or_pir_mode

if TYPE_CHECKING:
from paddle import Tensor


@dygraph_only
def attention(
query: Tensor,
key: Tensor,
Expand Down Expand Up @@ -98,6 +97,9 @@ def attention(
>>> output = paddle.sparse.nn.functional.attention(query, key, value, sp_mask, kp_mask, attn_mask)
>>> output.backward()
"""
assert (
in_dynamic_or_pir_mode()
), "Currently, Sparse API only support dynamic mode or pir mode."
return _C_ops.sparse_fused_attention(
query, key, value, sparse_mask, key_padding_mask, attn_mask
)
93 changes: 93 additions & 0 deletions test/legacy_test/test_sparse_addmm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import numpy as np

import paddle
from paddle.base.framework import in_pir_mode

paddle.set_default_dtype('float64')

Expand Down Expand Up @@ -105,5 +106,97 @@ def test_addmm_3d(self):
self.check_result([8, 16, 10], [8, 16, 12], [8, 12, 10], 'csr')


class TestAddmmStatic(unittest.TestCase):

def check_result(self, input_shape, x_shape, y_shape):
'''Only support sparse_coo_tensor in static graph'''
if len(x_shape) == 3:
mask = paddle.randint(0, 2, [x_shape[-2], x_shape[-1]])
else:
mask = paddle.randint(0, 2, x_shape)

origin_input = paddle.rand(input_shape)
origin_x = paddle.rand(x_shape) * mask.astype(
paddle.get_default_dtype()
)
origin_y = paddle.rand(y_shape)

dense_input = origin_input.detach()
dense_x = origin_x.detach()
dense_y = origin_y.detach()
dense_out = 2.0 * paddle.matmul(dense_x, dense_y) + 3.0 * dense_input

indices_data, values_data = (
origin_x.detach().to_sparse_coo(sparse_dim=len(x_shape)).indices(),
origin_x.detach().to_sparse_coo(sparse_dim=len(x_shape)).values(),
)

paddle.enable_static()
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
indices = paddle.static.data(
name='indices',
shape=indices_data.shape,
dtype=indices_data.dtype,
)
values = paddle.static.data(
name='values',
shape=values_data.shape,
dtype=values_data.dtype,
)
sp_x = paddle.sparse.sparse_coo_tensor(
indices,
values,
shape=dense_x.shape,
dtype=dense_x.dtype,
)
sp_y = paddle.static.data(
name='sp_y',
shape=dense_y.shape,
dtype=dense_y.dtype,
)
sp_input = paddle.static.data(
name='sp_input',
shape=dense_input.shape,
dtype=dense_input.dtype,
)
sp_out = paddle.sparse.addmm(sp_input, sp_x, sp_y, 3.0, 2.0)
sp_dense_out = sp_out.to_dense()

sparse_exe = paddle.static.Executor()
sparse_fetch = sparse_exe.run(
feed={
'indices': indices_data.numpy(),
"values": values_data.numpy(),
'sp_y': origin_y.numpy(),
'sp_input': origin_input.numpy(),
},
fetch_list=[sp_dense_out],
return_numpy=True,
)

np.testing.assert_allclose(
dense_out.numpy(), sparse_fetch[0], rtol=1e-5
)
paddle.disable_static()

@unittest.skipIf(
not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000,
"only support cuda>=11.0",
)
def test_addmm_2d(self):
if in_pir_mode():
self.check_result([16, 10], [16, 12], [12, 10])

@unittest.skipIf(
not paddle.is_compiled_with_cuda() or get_cuda_version() < 11080,
"only support cuda>=11.8",
)
def test_addmm_3d(self):
if in_pir_mode():
self.check_result([8, 16, 10], [8, 16, 12], [8, 12, 10])


if __name__ == "__main__":
unittest.main()
Loading