Skip to content

Commit

Permalink
修改COPY-FROM No.4 optimizer (PaddlePaddle#55238)
Browse files Browse the repository at this point in the history
Signed-off-by: jjyaoao <[email protected]>
  • Loading branch information
jjyaoao authored and cqulilujia committed Jul 24, 2023
1 parent 37695f5 commit ad3d4ab
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 16 deletions.
2 changes: 2 additions & 0 deletions python/paddle/optimizer/adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ class Adam(Optimizer):
Examples:
.. code-block:: python
:name: code-example1
import paddle
Expand All @@ -110,6 +111,7 @@ class Adam(Optimizer):
adam.clear_grad()
.. code-block:: python
:name: code-example2
# Adam with beta1/beta2 as Tensor and weight_decay as float
import paddle
Expand Down
140 changes: 124 additions & 16 deletions python/paddle/optimizer/lr.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,18 @@ def step(self, epoch=None):
Returns:
None
Examples:
.. code-block:: python
import paddle
value = paddle.arange(26, dtype='float32')
a = paddle.reshape(value, [2, 13])
linear = paddle.nn.Linear(13, 5)
adadelta = paddle.optimizer.Adadelta(learning_rate=0.0003, epsilon=1e-06, rho=0.95,
parameters = linear.parameters())
out = linear(a)
out.backward()
adadelta.step()
adadelta.clear_grad()
"""
if epoch is None:
self.last_epoch += 1
Expand Down Expand Up @@ -240,7 +252,9 @@ class NoamDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -259,7 +273,12 @@ class NoamDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -343,7 +362,9 @@ class PiecewiseDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -362,7 +383,12 @@ class PiecewiseDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -433,11 +459,11 @@ class NaturalExpDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
Expand All @@ -452,7 +478,12 @@ class NaturalExpDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -515,7 +546,9 @@ class InverseTimeDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -534,7 +567,12 @@ class InverseTimeDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -611,7 +649,9 @@ class PolynomialDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -630,7 +670,12 @@ class PolynomialDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -735,7 +780,9 @@ class LinearWarmup(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -755,7 +802,12 @@ class LinearWarmup(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -868,7 +920,9 @@ class ExponentialDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -887,7 +941,12 @@ class ExponentialDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -959,7 +1018,9 @@ class MultiStepDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -978,7 +1039,12 @@ class MultiStepDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -1066,7 +1132,9 @@ class StepDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -1085,7 +1153,12 @@ class StepDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -1163,7 +1236,9 @@ class LambdaDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -1182,7 +1257,12 @@ class LambdaDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -1264,7 +1344,9 @@ class ReduceOnPlateau(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -1283,7 +1365,12 @@ class ReduceOnPlateau(LRScheduler):
scheduler.step(loss) # If you update learning rate each step
# scheduler.step(loss) # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -1488,7 +1575,9 @@ class CosineAnnealingDecay(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -1507,7 +1596,12 @@ class CosineAnnealingDecay(LRScheduler):
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -1686,7 +1780,9 @@ class OneCycleLR(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -1704,7 +1800,12 @@ class OneCycleLR(LRScheduler):
sgd.clear_gradients()
scheduler.step() # You should update learning rate each step
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down Expand Up @@ -1929,7 +2030,9 @@ class CyclicLR(LRScheduler):
Examples:
.. code-block:: python
:name: code-example1
# Example1: train on default dynamic graph mode
import paddle
import numpy as np
Expand All @@ -1947,7 +2050,12 @@ class CyclicLR(LRScheduler):
sgd.clear_gradients()
scheduler.step() # You should update learning rate each step
# train on static graph mode
.. code-block:: python
:name: code-example2
# Example2: train on static graph mode
import paddle
import numpy as np
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
Expand Down

0 comments on commit ad3d4ab

Please sign in to comment.