Skip to content

Commit

Permalink
[xdoctest] reformat example code with google style in No.386-389 (#58206
Browse files Browse the repository at this point in the history
)

* fix

* undo mkldnn

* fix

* fix sys msg

* fix another sys msg

---------

Co-authored-by: SigureMo <[email protected]>
  • Loading branch information
enkilee and SigureMo authored Oct 25, 2023
1 parent abf5ecb commit ff79525
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 17 deletions.
25 changes: 19 additions & 6 deletions python/paddle/nn/functional/flash_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,22 @@ def flash_attention(
>>> import paddle
>>> paddle.seed(1)
>>> paddle.seed(2023)
>>> q = paddle.rand((1, 128, 2, 16))
>>> output = paddle.nn.functional.flash_attention.flash_attention(q, q, q, 0.9, False, False)
>>> print(output)
(Tensor(shape=[1, 128, 2, 16], dtype=float32, place=Place(cpu), stop_gradient=True,
[[[[0.34992966, 0.34456208, 0.45826620, ..., 0.39883569,
0.42132431, 0.39157745],
[0.76687670, 0.65837246, 0.69117945, ..., 0.82817286,
0.76690865, 0.71485823]],
...,
[[0.71662450, 0.57275224, 0.57053083, ..., 0.48108247,
0.53336465, 0.54540104],
[0.59137970, 0.51350880, 0.50449550, ..., 0.38860250,
0.40526697, 0.60541755]]]]), None)
"""
head_dim = query.shape[3]
sdp_func_name = _select_sdp(head_dim)
Expand Down Expand Up @@ -357,11 +369,12 @@ def flash_attn_unpadded(
.. code-block:: python
>>> import paddle
>>> paddle.seed(1)
>>> q = paddle.rand((1, 128, 2, 16))
>>> paddle.seed(2023)
>>> q = paddle.rand((2, 128, 8, 16), dtype='float16')
>>> cu = paddle.arange(0, 384, 128, dtype='int32')
>>> qq = paddle.reshape(q, [256, 8, 16])
>>> output = paddle.nn.functional.flash_attention.flash_attn_unpadded(qq, qq, qq, cu, cu, 128, 128, 0.25, 0.0, False, False)
>>> output = paddle.nn.functional.flash_attention.flash_attn_unpadded(q, q, q, 0.9, False, False)
>>> print(output)
"""
if in_dynamic_mode():
(
Expand Down Expand Up @@ -478,7 +491,7 @@ def scaled_dot_product_attention(
Examples:
.. code-block:: python
>>> # doctest: +SKIP()
>>> # doctest: +SKIP('bfloat need V100 compile')
>>> import paddle
>>> q = paddle.rand((1, 128, 2, 16), dtype=paddle.bfloat16)
>>> output = paddle.nn.functional.scaled_dot_product_attention(q, q, q, None, 0.9, False)
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def add_layer_config(
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> # doctest: +SKIP('random memory address')
>>> print(q_config)
Global config:
None
Expand Down Expand Up @@ -176,7 +176,7 @@ def add_name_config(
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_name_config([model.fc.full_name()], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> # doctest: +SKIP('random memory address')
>>> print(q_config)
Global config:
None
Expand Down Expand Up @@ -226,7 +226,7 @@ def add_type_config(
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_type_config([Linear], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> # doctest: +SKIP('random memory address')
>>> print(q_config)
Global config:
None
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/quantization/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def quanter(class_name):
Examples:
.. code-block:: python
>>> # doctest: +SKIP
>>> # doctest: +SKIP('need 2 file to run example')
>>> # Given codes in ./customized_quanter.py
>>> from paddle.quantization import quanter
>>> from paddle.quantization import BaseQuanter
Expand Down
14 changes: 7 additions & 7 deletions python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1958,14 +1958,12 @@ def slogdet(x, name=None):
>>> import paddle
>>> paddle.seed(2023)
>>> x = paddle.randn([3,3,3])
>>> x = paddle.randn([3, 3, 3])
>>> A = paddle.linalg.slogdet(x)
>>> print(A)
>>> # doctest: +SKIP
Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[-1. , 1. , 1. ],
[ 0.25681755, -0.25061053, -0.10809582]])
>>> # doctest: -SKIP
"""
if in_dynamic_mode():
Expand Down Expand Up @@ -2801,10 +2799,12 @@ def eigh(x, UPLO='L', name=None):
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
- out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64.
The eigenvalues of eigh op.
- out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,
complex64 and complex128. The eigenvectors of eigh op.
2-element tuple containing
- out_value(Tensor): A Tensor with shape :math:`[*, N]` and data type of float32 and float64.
The eigenvalues of eigh op.
- out_vector(Tensor): A Tensor with shape :math:`[*, N, N]` and data type of float32, float64,
complex64 and complex128. The eigenvectors of eigh op.
Examples:
.. code-block:: python
Expand Down

0 comments on commit ff79525

Please sign in to comment.