diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index fc26715d7cc4e..b82434d0588ac 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -152,6 +152,12 @@ def _index_(var): def _ndim_(var): return len(var.shape) + def ndimension(var): + return len(var.shape) + + def dim(var): + return len(var.shape) + @property def _size_(var): return int(np.prod(var.shape)) @@ -174,8 +180,8 @@ def _T_(var): ('__len__', _len_), ('__index__', _index_), ('astype', astype), - ('dim', lambda x: len(x.shape)), - ('ndimension', lambda x: len(x.shape)), + ('dim', dim), + ('ndimension', ndimension), ('ndim', _ndim_), ('size', _size_), ('T', _T_), diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index a9ba6f91a1e25..823f790f41030 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1260,6 +1260,7 @@ class Variable(metaclass=VariableMetaClass): In Static Graph Mode: .. code-block:: python + :name: code-example-1 import paddle.fluid as fluid cur_program = fluid.Program() @@ -1271,6 +1272,7 @@ class Variable(metaclass=VariableMetaClass): In Dygraph Mode: .. code-block:: python + :name: code-example-2 import paddle.fluid as fluid import numpy as np @@ -5743,21 +5745,22 @@ def clone(self, for_test=False): use :code:`clone` after :code:`Opimizer.minimize`, but we still recommend you to use :code:`clone` before using :code:`Opimizer.minimize`. - For Example: - :: + Examples: + .. code-block:: python + :name: code-example-1 - import paddle - import paddle.static as static + import paddle + import paddle.static as static - paddle.enable_static() + paddle.enable_static() - img = static.data(name='image', shape=[None, 784]) - pred = static.nn.fc(x=img, size=10, actvation='relu') - loss = paddle.mean(pred) - # Here we use clone before Momentum - test_program = static.default_main_program().clone(for_test=True) - optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) - optimizer.minimize(loss) + img = static.data(name='image', shape=[None, 784]) + pred = static.nn.fc(x=img, size=10, actvation='relu') + loss = paddle.mean(pred) + # Here we use clone before Momentum + test_program = static.default_main_program().clone(for_test=True) + optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + optimizer.minimize(loss) Args: @@ -5778,6 +5781,7 @@ def clone(self, for_test=False): after :code:`clone`: .. code-block:: python + :name: code-example-2 import paddle @@ -5795,6 +5799,7 @@ def print_prog(prog): 1. To clone a test program, the sample code is: .. code-block:: python + :name: code-example-3 import paddle import paddle.static as static @@ -5847,6 +5852,7 @@ def print_prog(prog): 2. The clone method can be avoid if you create program for training and program for testing individually. .. code-block:: python + :name: code-example-4 import paddle import paddle.static as static @@ -7235,30 +7241,32 @@ def program_guard(main_program, startup_program=None): Default: None. Examples: - .. code-block:: python + .. code-block:: python + :name: code-example-1 - import paddle + import paddle - paddle.enable_static() - main_program = paddle.static.Program() - startup_program = paddle.static.Program() - with paddle.static.program_guard(main_program, startup_program): - data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') - hidden = paddle.static.nn.fc(x=data, size=10, activation='relu') + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + hidden = paddle.static.nn.fc(x=data, size=10, activation='relu') Notes: The temporary :code:`Program` can be used if the user does not need to construct either of startup program or main program. Examples: - .. code-block:: python + .. code-block:: python + :name: code-example-2 - import paddle + import paddle - paddle.enable_static() - main_program = paddle.static.Program() - # does not care about startup program. Just pass a temporary value. - with paddle.static.program_guard(main_program, paddle.static.Program()): - data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + paddle.enable_static() + main_program = paddle.static.Program() + # does not care about startup program. Just pass a temporary value. + with paddle.static.program_guard(main_program, paddle.static.Program()): + data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') """ from .data_feeder import check_type diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 41299a624c12b..4191b6a8142b7 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -323,6 +323,48 @@ def _ndim_(self): """ return len(self.shape) + def ndimension(self): + """ + Returns the dimension of current Variable + + Returns: + the dimension + + Examples: + .. code-block:: python + + import paddle + + paddle.enable_static() + + # create a static Variable + x = paddle.static.data(name='x', shape=[3, 2, 1]) + # print the dimension of the Variable + print(x.ndimension) + """ + return len(self.shape) + + def dim(self): + """ + Returns the dimension of current Variable + + Returns: + the dimension + + Examples: + .. code-block:: python + + import paddle + + paddle.enable_static() + + # create a static Variable + x = paddle.static.data(name='x', shape=[3, 2, 1]) + # print the dimension of the Variable + print(x.dim) + """ + return len(self.shape) + def _scalar_add_(var, value): return _scalar_op_(var, 1.0, value) @@ -509,8 +551,8 @@ def to_dense(var): ('append', append), ('item', _item), ('pop', pop), - ('dim', lambda x: len(x.shape)), - ('ndimension', lambda x: len(x.shape)), + ('dim', dim), + ('ndimension', ndimension), ('ndim', _ndim_), ( '__add__', diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 007b417a98b11..c48128d2c083f 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -896,16 +896,18 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): 3. If it is in static graph mode, any tensors or operations created outside or inside of ``true_fn`` and ``false_fn`` will be in net building regardless of which branch is selected at runtime. This has frequently - surprised users who expected a lazy semantics. For example: + surprised users who expected a lazy semantics. - .. code-block:: python + Examples: + .. code-block:: python + :name: code-example-1 - import paddle + import paddle - a = paddle.zeros((1, 1)) - b = paddle.zeros((1, 1)) - c = a * b - out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) + a = paddle.zeros((1, 1)) + b = paddle.zeros((1, 1)) + c = a * b + out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) No matter whether ``a < b`` , ``c = a * b`` will be in net building and run. ``a + c`` and ``b * b`` will be in net building, but only one @@ -933,6 +935,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): Examples: .. code-block:: python + :name: code-example-2 import paddle