diff --git a/python/paddle/batch.py b/python/paddle/batch.py index 958166bc1495a..788e413fa96c9 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -35,21 +35,20 @@ def batch(reader, batch_size, drop_last=False): Examples: .. code-block:: python - import paddle - def reader(): - for i in range(10): - yield i - batch_reader = paddle.batch(reader, batch_size=2) + >>> import paddle + >>> def reader(): + ... for i in range(10): + ... yield i + >>> batch_reader = paddle.batch(reader, batch_size=2) - for data in batch_reader(): - print(data) - - # Output is - # [0, 1] - # [2, 3] - # [4, 5] - # [6, 7] - # [8, 9] + >>> for data in batch_reader(): + ... print(data) + ... + [0, 1] + [2, 3] + [4, 5] + [6, 7] + [8, 9] """ def batch_reader(): diff --git a/python/paddle/dataset/image.py b/python/paddle/dataset/image.py index 261e12ba69d37..771fb189432f3 100755 --- a/python/paddle/dataset/image.py +++ b/python/paddle/dataset/image.py @@ -123,9 +123,9 @@ def load_image_bytes(bytes, is_color=True): .. code-block:: python - with open('cat.jpg') as f: - im = load_image_bytes(f.read()) - + >>> with open('cat.jpg') as f: + ... im = load_image_bytes(f.read()) + ... :param bytes: the input image bytes array. :type bytes: str :param is_color: If set is_color True, it will load and @@ -148,7 +148,7 @@ def load_image(file, is_color=True): .. code-block:: python - im = load_image('cat.jpg') + >>> im = load_image('cat.jpg') :param file: the input image path. :type file: string @@ -178,8 +178,8 @@ def resize_short(im, size): .. code-block:: python - im = load_image('cat.jpg') - im = resize_short(im, 256) + >>> im = load_image('cat.jpg') + >>> im = resize_short(im, 256) :param im: the input image with HWC layout. :type im: ndarray @@ -208,9 +208,9 @@ def to_chw(im, order=(2, 0, 1)): .. code-block:: python - im = load_image('cat.jpg') - im = resize_short(im, 256) - im = to_chw(im) + >>> im = load_image('cat.jpg') + >>> im = resize_short(im, 256) + >>> im = to_chw(im) :param im: the input image with HWC layout. :type im: ndarray @@ -230,7 +230,8 @@ def center_crop(im, size, is_color=True): .. code-block:: python - im = center_crop(im, 224) + >>> im = load_image('cat.jpg') + >>> im = center_crop(im, 224) :param im: the input image with HWC layout. :type im: ndarray @@ -258,7 +259,8 @@ def random_crop(im, size, is_color=True): .. code-block:: python - im = random_crop(im, 224) + >>> im = load_image('cat.jpg') + >>> im = random_crop(im, 224) :param im: the input image with HWC layout. :type im: ndarray @@ -287,7 +289,8 @@ def left_right_flip(im, is_color=True): .. code-block:: python - im = left_right_flip(im) + >>> im = load_image('cat.jpg') + >>> im = left_right_flip(im) :param im: input image with HWC layout or HW layout for gray image :type im: ndarray @@ -311,7 +314,8 @@ def simple_transform( .. code-block:: python - im = simple_transform(im, 256, 224, True) + >>> im = load_image('cat.jpg') + >>> im = simple_transform(im, 256, 224, True) :param im: The input image with HWC layout. :type im: ndarray @@ -365,7 +369,7 @@ def load_and_transform( .. code-block:: python - im = load_and_transform('cat.jpg', 256, 224, True) + >>> im = load_and_transform('cat.jpg', 256, 224, True) :param filename: The file name of input image. :type filename: string diff --git a/python/paddle/fft.py b/python/paddle/fft.py index 4cfa5e2da6662..704bc56823ae1 100644 --- a/python/paddle/fft.py +++ b/python/paddle/fft.py @@ -196,16 +196,14 @@ def fft(x, n=None, axis=-1, norm="backward", name=None): .. code-block:: python - import numpy as np - import paddle + >>> import numpy as np + >>> import paddle - x = np.exp(3j * np.pi * np.arange(7) / 7) - xp = paddle.to_tensor(x) - fft_xp = paddle.fft.fft(xp).numpy() - print(fft_xp) - # [1.+1.25396034e+00j 1.+4.38128627e+00j 1.-4.38128627e+00j - # 1.-1.25396034e+00j 1.-4.81574619e-01j 1.+8.88178420e-16j - # 1.+4.81574619e-01j] + >>> x = np.exp(3j * np.pi * np.arange(7) / 7) + >>> xp = paddle.to_tensor(x) + >>> fft_xp = paddle.fft.fft(xp).numpy().round(3) + >>> print(fft_xp) + [1.+1.254j 1.+4.381j 1.-4.381j 1.-1.254j 1.-0.482j 1.+0.j 1.+0.482j] """ @@ -261,17 +259,14 @@ def ifft(x, n=None, axis=-1, norm="backward", name=None): .. code-block:: python - import numpy as np - import paddle + >>> import numpy as np + >>> import paddle - x = np.exp(3j * np.pi * np.arange(7) / 7) - xp = paddle.to_tensor(x) - ifft_xp = paddle.fft.ifft(xp).numpy() - print(ifft_xp) - # [0.14285714+1.79137191e-01j 0.14285714+6.87963741e-02j - # 0.14285714+1.26882631e-16j 0.14285714-6.87963741e-02j - # 0.14285714-1.79137191e-01j 0.14285714-6.25898038e-01j - # 0.14285714+6.25898038e-01j] + >>> x = np.exp(3j * np.pi * np.arange(7) / 7) + >>> xp = paddle.to_tensor(x) + >>> ifft_xp = paddle.fft.ifft(xp).numpy().round(3) + >>> print(ifft_xp) + [0.143+0.179j 0.143+0.069j 0.143+0.j 0.143-0.069j 0.143-0.179j 0.143-0.626j 0.143+0.626j] """ if is_integer(x) or is_floating_point(x): @@ -325,12 +320,12 @@ def rfft(x, n=None, axis=-1, norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor([0.0, 1.0, 0.0, 0.0]) - print(paddle.fft.rfft(x)) - # Tensor(shape=[3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, - # [ (1+0j), -1j , (-1+0j)]) + >>> x = paddle.to_tensor([0.0, 1.0, 0.0, 0.0]) + >>> print(paddle.fft.rfft(x)) + Tensor(shape=[3], dtype=complex64, place=Place(cpu), stop_gradient=True, + [(1+0j), -1j, (-1+0j)]) """ return fft_r2c(x, n, axis, norm, forward=True, onesided=True, name=name) @@ -375,13 +370,13 @@ def irfft(x, n=None, axis=-1, norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor([1, -1j, -1]) - irfft_x = paddle.fft.irfft(x) - print(irfft_x) - # Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - # [0., 1., 0., 0.]) + >>> x = paddle.to_tensor([1, -1j, -1]) + >>> irfft_x = paddle.fft.irfft(x) + >>> print(irfft_x) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [0., 1., 0., 0.]) """ return fft_c2r(x, n, axis, norm, forward=False, name=name) @@ -417,13 +412,13 @@ def hfft(x, n=None, axis=-1, norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor([1, -1j, -1]) - hfft_x = paddle.fft.hfft(x) - print(hfft_x) - # Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - # [0., 0., 0., 4.]) + >>> x = paddle.to_tensor([1, -1j, -1]) + >>> hfft_x = paddle.fft.hfft(x) + >>> print(hfft_x) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [0., 0., 0., 4.]) """ return fft_c2r(x, n, axis, norm, forward=True, name=name) @@ -466,15 +461,19 @@ def ihfft(x, n=None, axis=-1, norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - spectrum = paddle.to_tensor([10.0, -5.0, 0.0, -1.0, 0.0, -5.0]) - print(paddle.fft.ifft(spectrum)) - # Tensor(shape=[6], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, - # [(-0.1666666716337204+0j), (1-1.9868215517249155e-08j), (2.3333334922790527-1.9868215517249155e-08j), (3.5+0j), (2.3333334922790527+1.9868215517249155e-08j), (1+1.9868215517249155e-08j)]) - print(paddle.fft.ihfft(spectrum)) - # Tensor(shape = [4], dtype = complex64, place = CUDAPlace(0), stop_gradient = True, - # [(-0.1666666716337204+0j), (1-1.9868215517249155e-08j), (2.3333334922790527-1.9868215517249155e-08j), (3.5+0j)]) + >>> spectrum = paddle.to_tensor([10.0, -5.0, 0.0, -1.0, 0.0, -5.0]) + >>> print(paddle.fft.ifft(spectrum)) + Tensor(shape=[6], dtype=complex64, place=Place(cpu), stop_gradient=True, + [(-0.1666666716337204+0j), (1-0j), + (2.3333334922790527-0j), (3.5+0j), + (2.3333334922790527+0j), (1+0j)]) + + >>> print(paddle.fft.ihfft(spectrum)) + Tensor(shape=[4], dtype=complex64, place=Place(cpu), stop_gradient=True, + [(-0.1666666716337204+0j), (1-0j), + (2.3333334922790527-0j), (3.5+0j)]) """ return fft_r2c(x, n, axis, norm, forward=False, onesided=True, name=name) @@ -516,33 +515,30 @@ def fftn(x, s=None, axes=None, norm="backward", name=None): .. code-block:: python - import paddle - - arr = paddle.arange(4, dtype="float64") - x = paddle.meshgrid(arr, arr, arr)[1] - - fftn_xp = paddle.fft.fftn(x, axes=(1, 2)) - print(fftn_xp) - # Tensor(shape=[4, 4, 4], dtype=complex128, place=Place(gpu:0), stop_gradient=True, - # [[[(24+0j), 0j , 0j , -0j ], - # [(-8+8j), 0j , 0j , -0j ], - # [(-8+0j), 0j , 0j , -0j ], - # [(-8-8j), 0j , 0j , -0j ]], - - # [[(24+0j), 0j , 0j , -0j ], - # [(-8+8j), 0j , 0j , -0j ], - # [(-8+0j), 0j , 0j , -0j ], - # [(-8-8j), 0j , 0j , -0j ]], - - # [[(24+0j), 0j , 0j , -0j ], - # [(-8+8j), 0j , 0j , -0j ], - # [(-8+0j), 0j , 0j , -0j ], - # [(-8-8j), 0j , 0j , -0j ]], - - # [[(24+0j), 0j , 0j , -0j ], - # [(-8+8j), 0j , 0j , -0j ], - # [(-8+0j), 0j , 0j , -0j ], - # [(-8-8j), 0j , 0j , -0j ]]]) + >>> import paddle + + >>> arr = paddle.arange(4, dtype="float64") + >>> x = paddle.meshgrid(arr, arr, arr)[1] + + >>> fftn_xp = paddle.fft.fftn(x, axes=(1, 2)) + >>> print(fftn_xp) + Tensor(shape=[4, 4, 4], dtype=complex128, place=Place(cpu), stop_gradient=True, + [[[(24+0j), 0j, 0j, -0j], + [(-8+8j), 0j, 0j, -0j], + [(-8+0j), 0j, 0j, -0j], + [(-8-8j), 0j, 0j, -0j]], + [[(24+0j), 0j, 0j, -0j], + [(-8+8j), 0j, 0j, -0j], + [(-8+0j), 0j, 0j, -0j], + [(-8-8j), 0j, 0j, -0j]], + [[(24+0j), 0j, 0j, -0j], + [(-8+8j), 0j, 0j, -0j], + [(-8+0j), 0j, 0j, -0j], + [(-8-8j), 0j, 0j, -0j]], + [[(24+0j), 0j, 0j, -0j], + [(-8+8j), 0j, 0j, -0j], + [(-8+0j), 0j, 0j, -0j], + [(-8-8j), 0j, 0j, -0j]]]) """ if is_integer(x) or is_floating_point(x): return fftn_r2c( @@ -596,21 +592,21 @@ def ifftn(x, s=None, axes=None, norm="backward", name=None): .. code-block:: python - import paddle - - x = paddle.eye(3) - ifftn_x = paddle.fft.ifftn(x, axes=(1,)) - print(ifftn_x) - # Tensor(shape=[3, 3], dtype=complex64, place=Place(cpu), stop_gradient=True, - # [[ (0.3333333432674408+0j) , - # (0.3333333432674408-0j) , - # (0.3333333432674408+0j) ], - # [ (0.3333333432674408+0j) , - # (-0.1666666716337204+0.28867512941360474j), - # (-0.1666666716337204-0.28867512941360474j)], - # [ (0.3333333432674408+0j) , - # (-0.1666666716337204-0.28867512941360474j), - # (-0.1666666716337204+0.28867512941360474j)]]) + >>> import paddle + + >>> x = paddle.eye(3) + >>> ifftn_x = paddle.fft.ifftn(x, axes=(1,)) + >>> print(ifftn_x) + Tensor(shape=[3, 3], dtype=complex64, place=Place(cpu), stop_gradient=True, + [[(0.3333333432674408+0j), + (0.3333333432674408-0j), + (0.3333333432674408+0j)], + [(0.3333333432674408+0j), + (-0.1666666716337204+0.28867512941360474j), + (-0.1666666716337204-0.28867512941360474j)], + [(0.3333333432674408+0j), + (-0.1666666716337204-0.28867512941360474j), + (-0.1666666716337204+0.28867512941360474j)]]) """ if is_integer(x) or is_floating_point(x): return fftn_r2c( @@ -672,30 +668,28 @@ def rfftn(x, s=None, axes=None, norm="backward", name=None): Examples: .. code-block:: python - import paddle - - # default, all axis will be used to exec fft - x = paddle.ones((2, 3, 4)) - print(paddle.fft.rfftn(x)) - # Tensor(shape=[2, 3, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, - # [[[(24+0j), 0j , 0j ], - # [0j , 0j , 0j ], - # [0j , 0j , 0j ]], - # - # [[0j , 0j , 0j ], - # [0j , 0j , 0j ], - # [0j , 0j , 0j ]]]) - - # use axes(2, 0) - print(paddle.fft.rfftn(x, axes=(2, 0))) - # Tensor(shape=[2, 3, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, - # [[[(8+0j), 0j , 0j ], - # [(8+0j), 0j , 0j ], - # [(8+0j), 0j , 0j ]], - # - # [[0j , 0j , 0j ], - # [0j , 0j , 0j ], - # [0j , 0j , 0j ]]]) + >>> import paddle + + >>> # default, all axis will be used to exec fft + >>> x = paddle.ones((2, 3, 4)) + >>> print(paddle.fft.rfftn(x)) + Tensor(shape=[2, 3, 3], dtype=complex64, place=Place(cpu), stop_gradient=True, + [[[(24+0j), 0j, 0j], + [0j, 0j, 0j], + [0j, 0j, 0j]], + [[0j, 0j, 0j], + [0j, 0j, 0j], + [0j, 0j, 0j]]]) + + >>> # use axes(2, 0) + >>> print(paddle.fft.rfftn(x, axes=(2, 0))) + Tensor(shape=[2, 3, 4], dtype=complex64, place=Place(cpu), stop_gradient=True, + [[[(8+0j), 0j, 0j, 0j], + [(8+0j), 0j, 0j, 0j], + [(8+0j), 0j, 0j, 0j]], + [[0j, 0j, 0j, 0j], + [0j, 0j, 0j, 0j], + [0j, 0j, 0j, 0j]]]) """ return fftn_r2c(x, s, axes, norm, forward=True, onesided=True, name=name) @@ -755,17 +749,17 @@ def irfftn(x, s=None, axes=None, norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor([2.+2.j, 2.+2.j, 3.+3.j]).astype(paddle.complex128) - print(x) - irfftn_x = paddle.fft.irfftn(x) - print(irfftn_x) + >>> x = paddle.to_tensor([2.+2.j, 2.+2.j, 3.+3.j]).astype(paddle.complex128) + >>> print(x) + Tensor(shape=[3], dtype=complex128, place=Place(cpu), stop_gradient=True, + [(2+2j), (2+2j), (3+3j)]) - # Tensor(shape=[3], dtype=complex128, place=Place(cpu), stop_gradient=True, - # [(2+2j), (2+2j), (3+3j)]) - # Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=True, - # [ 2.25000000, -1.25000000, 0.25000000, 0.75000000]) + >>> irfftn_x = paddle.fft.irfftn(x) + >>> print(irfftn_x) + Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=True, + [2.25000000, -1.25000000, 0.25000000, 0.75000000]) """ return fftn_c2r(x, s, axes, norm, forward=False, name=name) @@ -809,13 +803,13 @@ def hfftn(x, s=None, axes=None, norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor([(2+2j), (2+2j), (3+3j)]) - hfftn_x = paddle.fft.hfftn(x) - print(hfftn_x) - # Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - # [ 9., 3., 1., -5.]) + >>> x = paddle.to_tensor([(2+2j), (2+2j), (3+3j)]) + >>> hfftn_x = paddle.fft.hfftn(x) + >>> print(hfftn_x) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [9., 3., 1., -5.]) """ return fftn_c2r(x, s, axes, norm, forward=True, name=name) @@ -853,15 +847,19 @@ def ihfftn(x, s=None, axes=None, norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle + + >>> spectrum = paddle.to_tensor([10.0, -5.0, 0.0, -1.0, 0.0, -5.0]) + >>> print(paddle.fft.ifft(spectrum)) + Tensor(shape=[6], dtype=complex64, place=Place(cpu), stop_gradient=True, + [(-0.1666666716337204+0j), (1-0j), + (2.3333334922790527-0j), (3.5+0j), + (2.3333334922790527+0j), (1+0j)]) - spectrum = paddle.to_tensor([10.0, -5.0, 0.0, -1.0, 0.0, -5.0]) - print(paddle.fft.ifft(spectrum)) - # Tensor(shape=[6], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, - # [(-0.1666666716337204+0j), (1-1.9868215517249155e-08j), (2.3333334922790527-1.9868215517249155e-08j), (3.5+0j), (2.3333334922790527+1.9868215517249155e-08j), (1+1.9868215517249155e-08j)]) - print(paddle.fft.ihfft(spectrum)) - # Tensor(shape = [4], dtype = complex64, place = CUDAPlace(0), stop_gradient = True, - # [(-0.1666666716337204+0j), (1-1.9868215517249155e-08j), (2.3333334922790527-1.9868215517249155e-08j), (3.5+0j)]) + >>> print(paddle.fft.ihfft(spectrum)) + Tensor(shape=[4], dtype=complex64, place=Place(cpu), stop_gradient=True, + [(-0.1666666716337204+0j), (1-0j), + (2.3333334922790527-0j), (3.5+0j)]) """ return fftn_r2c(x, s, axes, norm, forward=False, onesided=True, name=name) @@ -900,16 +898,16 @@ def fft2(x, s=None, axes=(-2, -1), norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - arr = paddle.arange(2, dtype="float64") - x = paddle.meshgrid(arr, arr)[0] + >>> arr = paddle.arange(2, dtype="float64") + >>> x = paddle.meshgrid(arr, arr)[0] - fft2_xp = paddle.fft.fft2(x) - print(fft2_xp) - # Tensor(shape=[2, 2], dtype=complex128, place=Place(gpu:0), stop_gradient=True, - # [[ (2+0j), 0j ], - # [(-2+0j), 0j ]]) + >>> fft2_xp = paddle.fft.fft2(x) + >>> print(fft2_xp) + Tensor(shape=[2, 2], dtype=complex128, place=Place(cpu), stop_gradient=True, + [[(2+0j), 0j], + [(-2+0j), 0j]]) """ _check_at_least_ndim(x, 2) @@ -971,16 +969,16 @@ def ifft2(x, s=None, axes=(-2, -1), norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - arr = paddle.arange(2, dtype="float64") - x = paddle.meshgrid(arr, arr)[0] + >>> arr = paddle.arange(2, dtype="float64") + >>> x = paddle.meshgrid(arr, arr)[0] - ifft2_xp = paddle.fft.ifft2(x) - print(ifft2_xp) - # Tensor(shape=[2, 2], dtype=complex128, place=Place(gpu:0), stop_gradient=True, - # [[ (0.5+0j), 0j ], - # [(-0.5+0j), 0j ]]) + >>> ifft2_xp = paddle.fft.ifft2(x) + >>> print(ifft2_xp) + Tensor(shape=[2, 2], dtype=complex128, place=Place(cpu), stop_gradient=True, + [[(0.5+0j), 0j], + [(-0.5+0j), 0j]]) """ _check_at_least_ndim(x, 2) if s is not None: @@ -1033,18 +1031,18 @@ def rfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - arr = paddle.arange(5, dtype="float64") - x = paddle.meshgrid(arr, arr)[0] + >>> arr = paddle.arange(5, dtype="float64") + >>> x = paddle.meshgrid(arr, arr)[0] - result = paddle.fft.rfft2(x) - print(result.numpy()) - # [[ 50. +0.j 0. +0.j 0. +0.j ] - # [-12.5+17.20477401j 0. +0.j 0. +0.j ] - # [-12.5 +4.0614962j 0. +0.j 0. +0.j ] - # [-12.5 -4.0614962j 0. +0.j 0. +0.j ] - # [-12.5-17.20477401j 0. +0.j 0. +0.j ]] + >>> result = paddle.fft.rfft2(x) + >>> print(result.numpy()) + [[50. +0.j 0. +0.j 0. +0.j] + [-12.5+17.20477401j 0. +0.j 0. +0.j] + [-12.5 +4.0614962j 0. +0.j 0. +0.j] + [-12.5 -4.0614962j 0. +0.j 0. +0.j] + [-12.5-17.20477401j 0. +0.j 0. +0.j]] """ _check_at_least_ndim(x, 2) if s is not None: @@ -1093,14 +1091,14 @@ def irfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor([[3.+3.j, 2.+2.j, 3.+3.j], [2.+2.j, 2.+2.j, 3.+3.j]]) - irfft2_x = paddle.fft.irfft2(x) - print(irfft2_x) - # Tensor(shape=[2, 4], dtype=float32, place=Place(cpu), stop_gradient=True, - # [[ 2.37500000, -1.12500000, 0.37500000, 0.87500000], - # [ 0.12500000, 0.12500000, 0.12500000, 0.12500000]]) + >>> x = paddle.to_tensor([[3.+3.j, 2.+2.j, 3.+3.j], [2.+2.j, 2.+2.j, 3.+3.j]]) + >>> irfft2_x = paddle.fft.irfft2(x) + >>> print(irfft2_x) + Tensor(shape=[2, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + [[2.37500000, -1.12500000, 0.37500000, 0.87500000], + [0.12500000, 0.12500000, 0.12500000, 0.12500000]]) """ _check_at_least_ndim(x, 2) if s is not None: @@ -1142,14 +1140,14 @@ def hfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor([[3.+3.j, 2.+2.j, 3.+3.j], [2.+2.j, 2.+2.j, 3.+3.j]]) - hfft2_x = paddle.fft.hfft2(x) - print(hfft2_x) - # Tensor(shape=[2, 4], dtype=float32, place=Place(cpu), stop_gradient=True, - # [[19., 7., 3., -9.], - # [ 1., 1., 1., 1.]]) + >>> x = paddle.to_tensor([[3.+3.j, 2.+2.j, 3.+3.j], [2.+2.j, 2.+2.j, 3.+3.j]]) + >>> hfft2_x = paddle.fft.hfft2(x) + >>> print(hfft2_x) + Tensor(shape=[2, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + [[19., 7., 3., -9.], + [1., 1., 1., 1.]]) """ _check_at_least_ndim(x, 2) if s is not None: @@ -1194,25 +1192,25 @@ def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): .. code-block:: python - import paddle - - arr = paddle.arange(5, dtype="float64") - x = paddle.meshgrid(arr, arr)[0] - print(x) - # Tensor(shape=[5, 5], dtype=float64, place=Place(gpu:0), stop_gradient=True, - # [[0., 0., 0., 0., 0.], - # [1., 1., 1., 1., 1.], - # [2., 2., 2., 2., 2.], - # [3., 3., 3., 3., 3.], - # [4., 4., 4., 4., 4.]]) - - ihfft2_xp = paddle.fft.ihfft2(x) - print(ihfft2_xp.numpy()) - # [[ 2. +0.j 0. +0.j 0. +0.j ] - # [-0.5-0.68819096j 0. +0.j 0. +0.j ] - # [-0.5-0.16245985j 0. +0.j 0. +0.j ] - # [-0.5+0.16245985j 0. +0.j 0. +0.j ] - # [-0.5+0.68819096j 0. +0.j 0. +0.j ]] + >>> import paddle + + >>> arr = paddle.arange(5, dtype="float64") + >>> x = paddle.meshgrid(arr, arr)[0] + >>> print(x) + Tensor(shape=[5, 5], dtype=float64, place=Place(cpu), stop_gradient=True, + [[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) + + >>> ihfft2_xp = paddle.fft.ihfft2(x) + >>> print(ihfft2_xp.numpy()) + [[2. +0.j 0. -0.j 0. -0.j] + [-0.5-0.68819096j 0. +0.j 0. +0.j] + [-0.5-0.16245985j 0. +0.j 0. +0.j] + [-0.5+0.16245985j 0. +0.j 0. +0.j] + [-0.5+0.68819096j 0. +0.j 0. +0.j]] """ _check_at_least_ndim(x, 2) if s is not None: @@ -1259,13 +1257,13 @@ def fftfreq(n, d=1.0, dtype=None, name=None): .. code-block:: python - import paddle + >>> import paddle - scalar_temp = 0.5 - fftfreq_xp = paddle.fft.fftfreq(5, d=scalar_temp) - print(fftfreq_xp) - # Tensor(shape=[5], dtype=float32, place=CUDAPlace(0), stop_gradient=True, - # [ 0. , 0.40000001, 0.80000001, -0.80000001, -0.40000001]) + >>> scalar_temp = 0.5 + >>> fftfreq_xp = paddle.fft.fftfreq(5, d=scalar_temp) + >>> print(fftfreq_xp) + Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, + [0., 0.40000001, 0.80000001, -0.80000001, -0.40000001]) """ if d * n == 0: raise ValueError("d or n should not be 0.") @@ -1308,14 +1306,13 @@ def rfftfreq(n, d=1.0, dtype=None, name=None): .. code-block:: python - import paddle - - scalar_temp = 0.3 - rfftfreq_xp = paddle.fft.rfftfreq(5, d=scalar_temp) - print(rfftfreq_xp) + >>> import paddle - # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, - # [0. , 0.66666669, 1.33333337]) + >>> scalar_temp = 0.3 + >>> rfftfreq_xp = paddle.fft.rfftfreq(5, d=scalar_temp) + >>> print(rfftfreq_xp) + Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, + [0., 0.66666669, 1.33333337]) """ if d * n == 0: @@ -1349,17 +1346,17 @@ def fftshift(x, axes=None, name=None): .. code-block:: python - import paddle + >>> import paddle - fftfreq_xp = paddle.fft.fftfreq(5, d=0.3) - print(fftfreq_xp) - # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True, - # [ 0. , 0.66666669, 1.33333337, -1.33333337, -0.66666669]) + >>> fftfreq_xp = paddle.fft.fftfreq(5, d=0.3) + >>> print(fftfreq_xp) + Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, + [0., 0.66666669, 1.33333337, -1.33333337, -0.66666669]) - res = paddle.fft.fftshift(fftfreq_xp) - print(res) - # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True, - # [-1.33333337, -0.66666669, 0. , 0.66666669, 1.33333337]) + >>> res = paddle.fft.fftshift(fftfreq_xp) + >>> print(res) + Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, + [-1.33333337, -0.66666669, 0., 0.66666669, 1.33333337]) """ shape = paddle.shape(x) @@ -1394,17 +1391,17 @@ def ifftshift(x, axes=None, name=None): .. code-block:: python - import paddle + >>> import paddle - fftfreq_xp = paddle.fft.fftfreq(5, d=0.3) - print(fftfreq_xp) - # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True, - # [ 0. , 0.66666669, 1.33333337, -1.33333337, -0.66666669]) + >>> fftfreq_xp = paddle.fft.fftfreq(5, d=0.3) + >>> print(fftfreq_xp) + Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, + [0., 0.66666669, 1.33333337, -1.33333337, -0.66666669]) - res = paddle.fft.ifftshift(fftfreq_xp) - print(res) - # Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True, - # [ 1.33333337, -1.33333337, -0.66666669, 0. , 0.66666669]) + >>> res = paddle.fft.ifftshift(fftfreq_xp) + >>> print(res) + Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, + [1.33333337, -1.33333337, -0.66666669, 0., 0.66666669]) """ shape = paddle.shape(x) diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index f0e64bd633365..21064b9bb0e90 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -85,13 +85,13 @@ class Metric(metaclass=abc.ABCMeta): .. code-block:: python :name: code-compute-example - def compute(pred, label): - # sort prediction and slice the top-5 scores - pred = paddle.argsort(pred, descending=True)[:, :5] - # calculate whether the predictions are correct - correct = pred == label - return paddle.cast(correct, dtype='float32') - + >>> def compute(pred, label): + ... # sort prediction and slice the top-5 scores + ... pred = paddle.argsort(pred, descending=True)[:, :5] + ... # calculate whether the predictions are correct + ... correct = pred == label + ... return paddle.cast(correct, dtype='float32') + ... With the :code:`compute`, we split some calculations to OPs (which may run on GPU devices, will be faster), and only fetch 1 tensor with shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1]. @@ -100,15 +100,15 @@ def compute(pred, label): .. code-block:: python :name: code-update-example - def update(self, correct): - accs = [] - for i, k in enumerate(self.topk): - num_corrects = correct[:, :k].sum() - num_samples = len(correct) - accs.append(float(num_corrects) / num_samples) - self.total[i] += num_corrects - self.count[i] += num_samples - return accs + >>> def update(self, correct): + ... accs = [] + ... for i, k in enumerate(self.topk): + ... num_corrects = correct[:, :k].sum() + ... num_samples = len(correct) + ... accs.append(float(num_corrects) / num_samples) + ... self.total[i] += num_corrects + ... self.count[i] += num_samples + ... return accs """ def __init__(self): @@ -201,44 +201,45 @@ class Accuracy(Metric): .. code-block:: python :name: code-standalone-example - import numpy as np - import paddle + >>> import numpy as np + >>> import paddle - x = paddle.to_tensor(np.array([ - [0.1, 0.2, 0.3, 0.4], - [0.1, 0.4, 0.3, 0.2], - [0.1, 0.2, 0.4, 0.3], - [0.1, 0.2, 0.3, 0.4]])) - y = paddle.to_tensor(np.array([[0], [1], [2], [3]])) + >>> x = paddle.to_tensor(np.array([ + ... [0.1, 0.2, 0.3, 0.4], + ... [0.1, 0.4, 0.3, 0.2], + ... [0.1, 0.2, 0.4, 0.3], + ... [0.1, 0.2, 0.3, 0.4]])) + >>> y = paddle.to_tensor(np.array([[0], [1], [2], [3]])) - m = paddle.metric.Accuracy() - correct = m.compute(x, y) - m.update(correct) - res = m.accumulate() - print(res) # 0.75 + >>> m = paddle.metric.Accuracy() + >>> correct = m.compute(x, y) + >>> m.update(correct) + >>> res = m.accumulate() + >>> print(res) + 0.75 .. code-block:: python :name: code-model-api-example - import paddle - from paddle.static import InputSpec - import paddle.vision.transforms as T - from paddle.vision.datasets import MNIST - - input = InputSpec([None, 1, 28, 28], 'float32', 'image') - label = InputSpec([None, 1], 'int64', 'label') - transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) - train_dataset = MNIST(mode='train', transform=transform) - - model = paddle.Model(paddle.vision.models.LeNet(), input, label) - optim = paddle.optimizer.Adam( - learning_rate=0.001, parameters=model.parameters()) - model.prepare( - optim, - loss=paddle.nn.CrossEntropyLoss(), - metrics=paddle.metric.Accuracy()) - - model.fit(train_dataset, batch_size=64) + >>> import paddle + >>> from paddle.static import InputSpec + >>> import paddle.vision.transforms as T + >>> from paddle.vision.datasets import MNIST + + >>> input = InputSpec([None, 1, 28, 28], 'float32', 'image') + >>> label = InputSpec([None, 1], 'int64', 'label') + >>> transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) + >>> train_dataset = MNIST(mode='train', transform=transform) + + >>> model = paddle.Model(paddle.vision.models.LeNet(), input, label) + >>> optim = paddle.optimizer.Adam( + ... learning_rate=0.001, parameters=model.parameters()) + >>> model.prepare( + ... optim, + ... loss=paddle.nn.CrossEntropyLoss(), + ... metrics=paddle.metric.Accuracy()) + ... + >>> model.fit(train_dataset, batch_size=64) """ @@ -353,51 +354,52 @@ class Precision(Metric): .. code-block:: python :name: code-standalone-example - import numpy as np - import paddle + >>> import numpy as np + >>> import paddle - x = np.array([0.1, 0.5, 0.6, 0.7]) - y = np.array([0, 1, 1, 1]) + >>> x = np.array([0.1, 0.5, 0.6, 0.7]) + >>> y = np.array([0, 1, 1, 1]) - m = paddle.metric.Precision() - m.update(x, y) - res = m.accumulate() - print(res) # 1.0 + >>> m = paddle.metric.Precision() + >>> m.update(x, y) + >>> res = m.accumulate() + >>> print(res) + 1.0 .. code-block:: python :name: code-model-api-example - import numpy as np - - import paddle - import paddle.nn as nn - - class Data(paddle.io.Dataset): - def __init__(self): - super().__init__() - self.n = 1024 - self.x = np.random.randn(self.n, 10).astype('float32') - self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') - - def __getitem__(self, idx): - return self.x[idx], self.y[idx] - - def __len__(self): - return self.n - - model = paddle.Model(nn.Sequential( - nn.Linear(10, 1), - nn.Sigmoid() - )) - optim = paddle.optimizer.Adam( - learning_rate=0.001, parameters=model.parameters()) - model.prepare( - optim, - loss=nn.BCELoss(), - metrics=paddle.metric.Precision()) - - data = Data() - model.fit(data, batch_size=16) + >>> import numpy as np + + >>> import paddle + >>> import paddle.nn as nn + + >>> class Data(paddle.io.Dataset): + ... def __init__(self): + ... super().__init__() + ... self.n = 1024 + ... self.x = np.random.randn(self.n, 10).astype('float32') + ... self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') + ... + ... def __getitem__(self, idx): + ... return self.x[idx], self.y[idx] + ... + ... def __len__(self): + ... return self.n + ... + >>> model = paddle.Model(nn.Sequential( + ... nn.Linear(10, 1), + ... nn.Sigmoid() + ... )) + >>> optim = paddle.optimizer.Adam( + ... learning_rate=0.001, parameters=model.parameters()) + >>> model.prepare( + ... optim, + ... loss=nn.BCELoss(), + ... metrics=paddle.metric.Precision()) + ... + >>> data = Data() + >>> model.fit(data, batch_size=16) """ def __init__(self, name='precision', *args, **kwargs): @@ -484,51 +486,52 @@ class Recall(Metric): .. code-block:: python :name: code-standalone-example - import numpy as np - import paddle + >>> import numpy as np + >>> import paddle - x = np.array([0.1, 0.5, 0.6, 0.7]) - y = np.array([1, 0, 1, 1]) + >>> x = np.array([0.1, 0.5, 0.6, 0.7]) + >>> y = np.array([1, 0, 1, 1]) - m = paddle.metric.Recall() - m.update(x, y) - res = m.accumulate() - print(res) # 2.0 / 3.0 + >>> m = paddle.metric.Recall() + >>> m.update(x, y) + >>> res = m.accumulate() + >>> print(res) + 0.6666666666666666 .. code-block:: python :name: code-model-api-example - import numpy as np - - import paddle - import paddle.nn as nn - - class Data(paddle.io.Dataset): - def __init__(self): - super().__init__() - self.n = 1024 - self.x = np.random.randn(self.n, 10).astype('float32') - self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') - - def __getitem__(self, idx): - return self.x[idx], self.y[idx] - - def __len__(self): - return self.n - - model = paddle.Model(nn.Sequential( - nn.Linear(10, 1), - nn.Sigmoid() - )) - optim = paddle.optimizer.Adam( - learning_rate=0.001, parameters=model.parameters()) - model.prepare( - optim, - loss=nn.BCELoss(), - metrics=[paddle.metric.Precision(), paddle.metric.Recall()]) - - data = Data() - model.fit(data, batch_size=16) + >>> import numpy as np + + >>> import paddle + >>> import paddle.nn as nn + + >>> class Data(paddle.io.Dataset): + ... def __init__(self): + ... super().__init__() + ... self.n = 1024 + ... self.x = np.random.randn(self.n, 10).astype('float32') + ... self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') + ... + ... def __getitem__(self, idx): + ... return self.x[idx], self.y[idx] + ... + ... def __len__(self): + ... return self.n + ... + >>> model = paddle.Model(nn.Sequential( + ... nn.Linear(10, 1), + ... nn.Sigmoid() + ... )) + >>> optim = paddle.optimizer.Adam( + ... learning_rate=0.001, parameters=model.parameters()) + >>> model.prepare( + ... optim, + ... loss=nn.BCELoss(), + ... metrics=[paddle.metric.Precision(), paddle.metric.Recall()]) + ... + >>> data = Data() + >>> model.fit(data, batch_size=16) """ def __init__(self, name='recall', *args, **kwargs): @@ -624,56 +627,56 @@ class Auc(Metric): .. code-block:: python :name: code-standalone-example - import numpy as np - import paddle + >>> import numpy as np + >>> import paddle - m = paddle.metric.Auc() + >>> m = paddle.metric.Auc() - n = 8 - class0_preds = np.random.random(size = (n, 1)) - class1_preds = 1 - class0_preds + >>> n = 8 + >>> class0_preds = np.random.random(size = (n, 1)) + >>> class1_preds = 1 - class0_preds - preds = np.concatenate((class0_preds, class1_preds), axis=1) - labels = np.random.randint(2, size = (n, 1)) + >>> preds = np.concatenate((class0_preds, class1_preds), axis=1) + >>> labels = np.random.randint(2, size = (n, 1)) - m.update(preds=preds, labels=labels) - res = m.accumulate() + >>> m.update(preds=preds, labels=labels) + >>> res = m.accumulate() .. code-block:: python :name: code-model-api-example - import numpy as np - import paddle - import paddle.nn as nn - - class Data(paddle.io.Dataset): - def __init__(self): - super().__init__() - self.n = 1024 - self.x = np.random.randn(self.n, 10).astype('float32') - self.y = np.random.randint(2, size=(self.n, 1)).astype('int64') - - def __getitem__(self, idx): - return self.x[idx], self.y[idx] - - def __len__(self): - return self.n - - model = paddle.Model(nn.Sequential( - nn.Linear(10, 2), nn.Softmax()) - ) - optim = paddle.optimizer.Adam( - learning_rate=0.001, parameters=model.parameters()) - - def loss(x, y): - return nn.functional.nll_loss(paddle.log(x), y) - - model.prepare( - optim, - loss=loss, - metrics=paddle.metric.Auc()) - data = Data() - model.fit(data, batch_size=16) + >>> import numpy as np + >>> import paddle + >>> import paddle.nn as nn + + >>> class Data(paddle.io.Dataset): + ... def __init__(self): + ... super().__init__() + ... self.n = 1024 + ... self.x = np.random.randn(self.n, 10).astype('float32') + ... self.y = np.random.randint(2, size=(self.n, 1)).astype('int64') + ... + ... def __getitem__(self, idx): + ... return self.x[idx], self.y[idx] + ... + ... def __len__(self): + ... return self.n + ... + >>> model = paddle.Model(nn.Sequential( + ... nn.Linear(10, 2), nn.Softmax()) + ... ) + >>> optim = paddle.optimizer.Adam( + ... learning_rate=0.001, parameters=model.parameters()) + ... + >>> def loss(x, y): + ... return nn.functional.nll_loss(paddle.log(x), y) + ... + >>> model.prepare( + ... optim, + ... loss=loss, + ... metrics=paddle.metric.Auc()) + >>> data = Data() + >>> model.fit(data, batch_size=16) """ def __init__( @@ -789,12 +792,14 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): Examples: .. code-block:: python - import paddle + >>> import paddle - predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32') - label = paddle.to_tensor([[2], [0]], dtype="int64") - result = paddle.metric.accuracy(input=predictions, label=label, k=1) - # 0.5 + >>> predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32') + >>> label = paddle.to_tensor([[2], [0]], dtype="int64") + >>> result = paddle.metric.accuracy(input=predictions, label=label, k=1) + >>> print(result) + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 0.50000000) """ if label.dtype == paddle.int32: label = paddle.cast(label, paddle.int64) diff --git a/python/paddle/onnx/export.py b/python/paddle/onnx/export.py index 2167bfb664c1f..4ed3379d3d169 100644 --- a/python/paddle/onnx/export.py +++ b/python/paddle/onnx/export.py @@ -46,45 +46,46 @@ def export(layer, path, input_spec=None, opset_version=9, **configs): Examples: .. code-block:: python - import paddle - - class LinearNet(paddle.nn.Layer): - def __init__(self): - super().__init__() - self._linear = paddle.nn.Linear(128, 10) - - def forward(self, x): - return self._linear(x) - - # Export model with 'InputSpec' to support dynamic input shape. - def export_linear_net(): - model = LinearNet() - x_spec = paddle.static.InputSpec(shape=[None, 128], dtype='float32') - paddle.onnx.export(model, 'linear_net', input_spec=[x_spec]) - - export_linear_net() - - class Logic(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, x, y, z): - if z: - return x - else: - return y - - # Export model with 'Tensor' to support pruned model by set 'output_spec'. - def export_logic(): - model = Logic() - x = paddle.to_tensor([1]) - y = paddle.to_tensor([2]) - # Static and run model. - paddle.jit.to_static(model) - out = model(x, y, z=True) - paddle.onnx.export(model, 'pruned', input_spec=[x], output_spec=[out]) - - export_logic() + >>> import paddle + + >>> class LinearNet(paddle.nn.Layer): + ... def __init__(self): + ... super().__init__() + ... self._linear = paddle.nn.Linear(128, 10) + ... + ... def forward(self, x): + ... return self._linear(x) + ... + >>> # Export model with 'InputSpec' to support dynamic input shape. + >>> def export_linear_net(): + ... model = LinearNet() + ... x_spec = paddle.static.InputSpec(shape=[None, 128], dtype='float32') + ... paddle.onnx.export(model, 'linear_net', input_spec=[x_spec]) + ... + >>> # doctest: +SKIP('raise ImportError') + >>> export_linear_net() + + >>> class Logic(paddle.nn.Layer): + ... def __init__(self): + ... super().__init__() + ... + ... def forward(self, x, y, z): + ... if z: + ... return x + ... else: + ... return y + ... + >>> # Export model with 'Tensor' to support pruned model by set 'output_spec'. + >>> def export_logic(): + ... model = Logic() + ... x = paddle.to_tensor([1]) + ... y = paddle.to_tensor([2]) + ... # Static and run model. + ... paddle.jit.to_static(model) + ... out = model(x, y, z=True) + ... paddle.onnx.export(model, 'pruned', input_spec=[x], output_spec=[out]) + ... + >>> export_logic() """ p2o = try_import('paddle2onnx')