RecomputeOptimizer

class paddle.fluid.optimizer. RecomputeOptimizer ( optimizer ) [source]
api_attr

Static Graph

Recompute Optimizer Wrapper

Normally, a training step contains three sub-steps: first, run forward Operators to calculate the loss; second, run backward Operators to calculate gradient of the parameters; third, apply optimization method to update the value of the parameters.

In the forward computation process, all variables that are needed by backward computation process will be kept in memory, which occupy a great amount of memory when the network becomes very deep.

Recompute split the network to k segments. In each segment, It will recompute the forward Operators, before running backward operators. It is very helpful for saving memory.

The Variables that separate a network to segments are called as checkpoints, and users should set it manually. The usage is very simple:

Parameters

optimizer (Optimizer) – The optimizer that is applied to parameters.

Examples

import paddle.fluid as fluid
import numpy as np
def gen_data():
    return {"x": np.random.random(size=(32, 32)).astype('float32'),
    "y": np.random.randint(2, size=(32, 1)).astype('int64')}
def mlp(input_x, input_y, hid_dim=128, label_dim=2):
    print(input_x)
    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
    sum_cost = fluid.layers.reduce_mean(cost)
    return sum_cost, fc_1, prediction
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)

sgd = fluid.optimizer.Adam(learning_rate=0.01)
sgd = fluid.optimizer.RecomputeOptimizer(sgd)
sgd._set_checkpoints([fc_1, pred])
sgd.minimize(cost)

print("Finished optimize")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
step = 10

for i in range(step):
    cost_val = exe.run(feed=gen_data(),
           program=fluid.default_main_program(),
           fetch_list=[cost.name])
    print("step=%d cost=%f" % (i, cost_val[0]))
load ( state_dict )

load

api_attr

Static Graph

load function is not supported by Recompute Optimizer for now. :return: None

Parameters

state_dict – the dict load by load_persistable method

Examples

import paddle.fluid as fluid
import paddle.compat as cpt

def mlp(input_x, input_y, hid_dim=128, label_dim=2):
    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
    sum_cost = fluid.layers.reduce_mean(cost)
    return sum_cost, fc_1, prediction

input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
print("Finished FF")

sgd = fluid.optimizer.Adam(learning_rate=0.01)
sgd = fluid.optimizer.RecomputeOptimizer(sgd)
sgd._set_checkpoints([fc_1, pred])
try:
    state_dict = {}
    sgd.load(state_dict)
except NotImplementedError as e:
    print(cpt.get_exception_message(e))
apply_gradients ( params_grads )

apply_gradients

call apply_gradients function of self._optimizer.

Parameters

params_grads (list) – list of (param, grad) pair to do optimization.

Returns

A list of operators appended to the current program.

Return type

list

Examples

import paddle.fluid as fluid
import paddle.fluid.framework as framework

def mlp(input_x, input_y, hid_dim=128, label_dim=2):
    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
    sum_cost = fluid.layers.reduce_mean(cost)
    return sum_cost, fc_1, prediction


input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
print("Finished FF")

sgd = fluid.optimizer.Adam(learning_rate=0.01)
sgd = fluid.optimizer.RecomputeOptimizer(sgd)
sgd._set_checkpoints([fc_1, pred])
params_grads = sgd.backward(
    cost,
    startup_program=None,
    parameter_list=None,
    no_grad_set=None)

program = cost.block.program
with framework.program_guard(program, None):
    optimize_ops = sgd.apply_gradients(params_grads)

print("Finished apply gradients")
backward ( loss, startup_program=None, parameter_list=None, no_grad_set=None, callbacks=None )

backward

call append_backward with checkpoints.

Parameters
  • loss (Variable) – loss variable to run optimizations.

  • startup_program (Program) – startup_program for initializing parameters in parameter_list.

  • parameter_list (list) – list of Variables or Variable.names to update.

  • no_grad_set (set|None) – set of Variables or Variables.names should be ignored.

  • callbacks (list|None) – list of callables to run when appending backward operator for one parameter.

  • checkpoints (list) – list of Variables as checkpoints

Examples

import paddle.fluid as fluid

def mlp(input_x, input_y, hid_dim=128, label_dim=2):
    fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
    prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
    cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
    sum_cost = fluid.layers.reduce_mean(cost)
    return sum_cost, fc_1, prediction


input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
cost, fc_1, pred = mlp(input_x, input_y)
print("Finished FF")

sgd = fluid.optimizer.Adam(learning_rate=0.01)
sgd = fluid.optimizer.RecomputeOptimizer(sgd)
sgd._set_checkpoints([fc_1, pred])
params_grads = sgd.backward(
    cost,
    startup_program=None,
    parameter_list=None,
    no_grad_set=None)
print("Finished backward")
apply_optimize ( loss, startup_program, params_grads )

apply_optimize

call the apply_optimize function of self._optimizer :param loss: loss variable to run optimizations. :type loss: Variable :param startup_program: startup_program for initializing parameters

System Message: ERROR/3 (/usr/local/lib/python3.8/site-packages/paddle/fluid/optimizer.py:docstring of paddle.fluid.optimizer.RecomputeOptimizer.apply_optimize, line 5)

Unexpected indentation.

in parameter_list.

System Message: WARNING/2 (/usr/local/lib/python3.8/site-packages/paddle/fluid/optimizer.py:docstring of paddle.fluid.optimizer.RecomputeOptimizer.apply_optimize, line 6)

Block quote ends without a blank line; unexpected unindent.

Parameters

params_grads (list) – list of (param, grad) pair to do optimization.

Examples

System Message: ERROR/3 (/usr/local/lib/python3.8/site-packages/paddle/fluid/optimizer.py:docstring of paddle.fluid.optimizer.RecomputeOptimizer.apply_optimize, line 12)

Error in “code-block” directive: maximum 1 argument(s) allowed, 5 supplied.

.. code-block:: python
    import paddle.fluid as fluid

    def mlp(input_x, input_y, hid_dim=128, label_dim=2):
        fc_1 = fluid.layers.fc(input=input_x, size=hid_dim)
        prediction = fluid.layers.fc(input=[fc_1], size=label_dim, act='softmax')
        cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
        sum_cost = fluid.layers.reduce_mean(cost)
        return sum_cost, fc_1, prediction

    input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
    input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
    cost, fc_1, pred = mlp(input_x, input_y)
    print("Finished FF")

    sgd = fluid.optimizer.Adam(learning_rate=0.01)
    sgd = fluid.optimizer.RecomputeOptimizer(sgd)
    sgd._set_checkpoints([fc_1, pred])
    params_grads = sgd.backward(
        cost,
        startup_program=None,
        parameter_list=None,
        no_grad_set=None)

    optimize_ops = sgd.apply_optimize(
        cost, startup_program=None, params_grads=params_grads)

    print("Finished apply_optimize")
minimize ( loss, startup_program=None, parameter_list=None, no_grad_set=None )

minimize

Add operations to minimize loss by updating parameter_list.

Parameters
  • loss (Variable) – A Variable containing the value to minimize.

  • startup_program (Program, optional) – api_fluid_Program for initializing parameters in parameter_list. The default value is None, at this time api_fluid_default_startup_program will be used.

  • parameter_list (Iterable, optional) – Iterable of Variable or Variable.name to update to minimize loss. The default value is None, at this time all parameters will be updated.

  • no_grad_set (set, optional) – Set of Variable or Variable.name that don’t need to be updated. The default value is None.

Returns

tuple (optimize_ops, params_grads), A list of operators appended by minimize and a list of (param, grad) variable pairs, param is Parameter, grad is the gradient value corresponding to the parameter. The returned tuple can be passed to fetch_list in Executor.run() to indicate program pruning. If so, the program will be pruned by feed and fetch_list before run, see details in Executor.

Return type

tuple

Examples

Please refer to the example of current Optimizer.

append_regularization_ops ( parameters_and_grads, regularization=None )

append_regularization_ops

Create and add backward regularization Operators

Creates and adds backward regularization operators in the BlockDesc. This will add gradients of the regularizer function to the gradients of the parameters and return these modified gradients. This is the same as implementing weight decay in optimizers for regularization.

Parameters
  • parameters_and_grads – A list of (parameters, gradients) pairs that need to be regularized.

  • regularization – A global regularizer. If the parameter is not set. It will be applied with regularizer.

Returns

list of (parameters, gradients) pair with the regularized gradient

Return type

list[(Variable, Variable)]

Raises

Exception – Unknown regularization type

clear_gradients ( )

clear_gradients

Clear the gradients of all optimized parameters for model.

If not, new gradient will accumulat on previous gradient.

Returns

None

Examples

import paddle.fluid as fluid
import numpy as np

with fluid.dygraph.guard():
    value = np.arange(26).reshape(2, 13).astype("float32")
    a = fluid.dygraph.to_variable(value)
    linear = fluid.Linear(13, 5, dtype="float32")
    # This can be any optimizer supported by dygraph.
    adam = fluid.optimizer.Adam(learning_rate = 0.01,
                                parameter_list = linear.parameters())
    out = linear(a)
    out.backward()
    adam.minimize(out)
    adam.clear_gradients()
current_step_lr ( )

current_step_lr

Api_attr

imperative

Get current step learning rate. The return value is all the same When LearningRateDecay is not used, otherwise return the step learning rate.

Returns

The learning rate of the current step.

Return type

float

Examples

import paddle.fluid as fluid
import numpy as np

# example1: LearningRateDecay is not used, return value is all the same
with fluid.dygraph.guard():
    emb = fluid.dygraph.Embedding([10, 10])
    adam = fluid.optimizer.Adam(0.001, parameter_list = emb.parameters())
    lr = adam.current_step_lr()
    print(lr) # 0.001

# example2: PiecewiseDecay is used, return the step learning rate
with fluid.dygraph.guard():
    inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
    linear = fluid.dygraph.nn.Linear(10, 10)
    inp = fluid.dygraph.to_variable(inp)
    out = linear(inp)
    loss = fluid.layers.reduce_mean(out)

    bd = [2, 4, 6, 8]
    value = [0.2, 0.4, 0.6, 0.8, 1.0]
    adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay(bd, value, 0),
                           parameter_list=linear.parameters())

    # first step: learning rate is 0.2
    np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0) # True

    # learning rate for different steps
    ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0]
    for i in range(12):
        adam.minimize(loss)
        lr = adam.current_step_lr()
        np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True
set_dict ( state_dict )

set_dict

Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be changed.

Parameters

state_dict (dict) – Dict contains all the Variable needed by optimizer

Returns

None

Examples

import paddle
import paddle.fluid as fluid

paddle.disable_static()

emb = paddle.nn.Embedding(10, 10)

state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")

scheduler = paddle.optimizer.lr.NoamDecay(
    d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
    learning_rate=scheduler,
    parameters=emb.parameters())
state_dict = adam.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")

para_state_dict, opti_state_dict = fluid.load_dygraph("paddle_dy")
set_lr ( value )

set_lr

Api_attr

imperative

Set the value of the learning rate manually in the optimizer. If the optimizer use LearningRateDecay, this API cannot be invoked, because it will lead to conflict.

Parameters

value (float|Variable) – the value of learning rate

Returns

None

Examples

import paddle.fluid as fluid

with fluid.dygraph.guard():
    linear = fluid.dygraph.nn.Linear(10, 10)

    adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())

    # set learning rate manually by python float value
    lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
    for i in range(5):
        adam.set_lr(lr_list[i])
        lr = adam.current_step_lr()
        print("current lr is {}".format(lr))
    # Print:
    #    current lr is 0.2
    #    current lr is 0.3
    #    current lr is 0.4
    #    current lr is 0.5
    #    current lr is 0.6


    # set learning rate manually by framework Variable
    lr_var = fluid.layers.create_global_var(
        shape=[1], value=0.7, dtype='float32')
    adam.set_lr(lr_var)
    lr = adam.current_step_lr()
    print("current lr is {}".format(lr))
    # Print:
    #    current lr is 0.7
set_state_dict ( state_dict )

set_state_dict

Load optimizer state dict. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be changed.

Parameters

state_dict (dict) – Dict contains all the Variable needed by optimizer

Returns

None

Examples

import paddle
import paddle.fluid as fluid

paddle.disable_static()

emb = paddle.nn.Embedding(10, 10)

state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")

scheduler = paddle.optimizer.lr.NoamDecay(
    d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
    learning_rate=scheduler,
    parameters=emb.parameters())
state_dict = adam.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")

para_state_dict, opti_state_dict = fluid.load_dygraph("paddle_dy")
state_dict ( )

state_dict

Get state dict information from optimizer. It contain all the variable used by optimizer. For Adam optimizer, contains beta1, beta2, momentum etc. If LearningRateDecay have been used, global_step will be include in state dict. If the optimizer never be called(minimize function), the state_dict is empty.

Args: None :returns: dict contains all the variable used by optimizer :rtype: state_dict(dict)

Examples

import paddle.fluid as fluid

with fluid.dygraph.guard():
    emb = fluid.dygraph.Embedding([10, 10])

    adam = fluid.optimizer.Adam(0.001, parameter_list=emb.parameters())
    state_dict = adam.state_dict()