当前位置: 代码迷 >> 综合 >> Keras--动态调整学习率
  详细解决方案

Keras--动态调整学习率

热度:52   发布时间:2023-10-27 03:01:31.0

在训练模型时,有时我们需要随着训练的深入调整学习率,

这里介绍几种Keras常用方式

from keras.callbacks import Callback
from keras.backend as Kclass LinearDecayLR(Callback):'''第一种,每个batch之后衰减一次学习率(lr)# Argumentsmin_lr: The lower bound (final value) of the learning rate.max_lr: The upper bound (initial value) of the learning rate.steps_per_epoch: 一个epoch所包含的batch数 epochs: Number of epochs to run training. # Usagelr_decay = LinearDecayLR(min_lr=1e-5, max_lr=0.01, steps_per_epoch=step_size_train, epochs=20, verbose=1)model.fit(X_train, Y_train, callbacks=[lr_decay])'''def __init__(self, min_lr=1e-5, max_lr=1e-2, steps_per_epoch=None, epochs=None, verbose=0):super().__init__()self.min_lr = min_lrself.max_lr = max_lrself.total_iterations = steps_per_epoch * epochs # 每个epoch包含的batch数 * 总epoch数 = 总batch数self.iteration = 0self.verbose = verbosedef linear_decay(self): # 一个epoch之后, 计算一次学习率'''Calculate the learning rate.'''r = self.iteration / self.total_iterations return self.max_lr - (self.max_lr-self.min_lr) * rdef on_train_begin(self, logs=None): # 整个训练的开始,给学习率赋最大值'''Initialize the learning rate to the initial value at the start of training.'''logs = logs or {
    }K.set_value(self.model.optimizer.lr, self.max_lr)def on_batch_end(self, epoch, logs=None): # 每个batch结束, 计算一下学习率, '''Update the learning rate after each batch update'''logs = logs or {
    }self.iteration += 1K.set_value(self.model.optimizer.lr, self.linear_decay())def on_epoch_begin(self, epoch, logs=None):if self.verbose > 0:print('\nEpoch %05d: LearningRateScheduler setting learning ''rate to %s.' % (epoch + 1, K.get_value(self.model.optimizer.lr)))    def on_epoch_end(self, epoch, logs=None):logs = logs or {
    }logs['lr'] = K.get_value(self.model.optimizer.lr)class StepLR(Callback):'''第二种方式,台阶式的学习率调整,不一定是衰减,也可以是增加,epoch_list = [100, 200, 300]lr_list = [0.5, 0.1, 0.8]举个例子,参数设置如上, 在epoch 0~99区域内, 学习率是 0.5, 在100~199范围内,学习率是0.1, 在200~300范围内,学习率是0.8注意两个列表一定要一样长, 这是我简单的实现, 不是很鲁棒.而且, 将 "lr"放入logs时,tensorboard是可以观察 lr 的,'''def __init__(self, lr_list, epoch_list, verbose=1):super().__init__()self.lr_list = lr_listself.epoch_list = epoch_listself.verbose = verbosedef on_epoch_begin(self, epoch, logs=None):if epoch < self.epoch_list[0]:K.set_value(self.model.optimizer.lr, self.lr_list[0])elif epoch == self.epoch_list[0]:self.lr_list.pop(0)self.epoch_list.pop(0)K.set_value(self.model.optimizer.lr, self.lr_list[0])if self.verbose > 0:print('\nEpoch %05d: LearningRate''is %s.' % (epoch + 1, K.get_value(self.model.optimizer.lr)))def on_epoch_end(self, epoch, logs=None):logs = logs or {
    }logs['lr'] = K.get_value(self.model.optimizer.lr)

用法示例

epoch_list = [100, 200, 300]
lr_list    = [0.5, 0.1, 0.8]
stepLR = StepLR(lr_list, epoch_list)
model.fit_generator(callbacks=[stepLR]
)
  相关解决方案