# step
lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=lr_rate,
decay_steps=10,
decay_rate=0.96)
opt = tf.keras.optimizers.Adamax(lr=lr_scheduler, beta_1=0.9, beta_2=0.999, epsilon=1e-09)
# epoch修改学习率
def scheduler(epoch):
if epoch < 5:
return lr_rate
else:
lr = tf.maximum(lr_rate * tf.math.exp(0.1 * (5 - epoch)),1e-4)
return lr.numpy()
reduce_lr = tf.keras.callbacks.LearningRateScheduler(scheduler)
reduce_lr2 = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_dice_coef',
factor=0.5,
patience=3,
verbose=0,
mode='max',
min_delta=1e-4,
cooldown=0,
min_lr=1e-4,)
s_model.fit(train_db, epochs=epochs, validation_data=test_db, callbacks=[reduce_lr2,early_stoping, history2])