diff --git a/src/lightning/pytorch/loops/training_epoch_loop.py b/src/lightning/pytorch/loops/training_epoch_loop.py index 9e36ee65176c8..975070816700b 100644 --- a/src/lightning/pytorch/loops/training_epoch_loop.py +++ b/src/lightning/pytorch/loops/training_epoch_loop.py @@ -368,7 +368,7 @@ def _update_learning_rates(self, interval: str, update_plateau_schedulers: bool) if update_plateau_schedulers ^ config.reduce_on_plateau: continue - current_idx = self.batch_idx if interval == "step" else trainer.current_epoch + current_idx = self.total_batch_idx if interval == "step" else trainer.current_epoch current_idx += 1 # account for both batch and epoch starts from 0 # Take step if call to update_learning_rates matches the interval key and # the current step modulo the schedulers frequency is zero