md: add new helpers to suspend/resume array

Advantages for new apis:
 - reconfig_mutex is not required;
 - the weird logical that suspend array hold 'reconfig_mutex' for
   mddev_check_recovery() to update superblock is not needed;
 - the specail handling, 'pers->prepare_suspend', for raid456 is not
   needed;
 - It's safe to be called at any time once mddev is allocated, and it's
   designed to be used from slow path where array configuration is changed;
 - the new helpers is designed to be called before mddev_lock(), hence
   it support to be interrupted by user as well.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20231010151958.145896-5-yukuai1@huaweicloud.com
This commit is contained in:
Yu Kuai 2023-10-10 23:19:43 +08:00 committed by Song Liu
parent 2e82248b70
commit 714d20150e
2 changed files with 103 additions and 2 deletions

View File

@ -443,12 +443,22 @@ void mddev_suspend(struct mddev *mddev)
lockdep_is_held(&mddev->reconfig_mutex));
WARN_ON_ONCE(thread && current == thread->tsk);
if (mddev->suspended++)
/* can't concurrent with __mddev_suspend() and __mddev_resume() */
mutex_lock(&mddev->suspend_mutex);
if (mddev->suspended++) {
mutex_unlock(&mddev->suspend_mutex);
return;
}
wake_up(&mddev->sb_wait);
set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
percpu_ref_kill(&mddev->active_io);
/*
* TODO: cleanup 'pers->prepare_suspend after all callers are replaced
* by __mddev_suspend().
*/
if (mddev->pers && mddev->pers->prepare_suspend)
mddev->pers->prepare_suspend(mddev);
@ -459,14 +469,21 @@ void mddev_suspend(struct mddev *mddev)
del_timer_sync(&mddev->safemode_timer);
/* restrict memory reclaim I/O during raid array is suspend */
mddev->noio_flag = memalloc_noio_save();
mutex_unlock(&mddev->suspend_mutex);
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);
if (--mddev->suspended)
/* can't concurrent with __mddev_suspend() and __mddev_resume() */
mutex_lock(&mddev->suspend_mutex);
if (--mddev->suspended) {
mutex_unlock(&mddev->suspend_mutex);
return;
}
/* entred the memalloc scope from mddev_suspend() */
memalloc_noio_restore(mddev->noio_flag);
@ -477,9 +494,89 @@ void mddev_resume(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
mutex_unlock(&mddev->suspend_mutex);
}
EXPORT_SYMBOL_GPL(mddev_resume);
int __mddev_suspend(struct mddev *mddev, bool interruptible)
{
int err = 0;
/*
* hold reconfig_mutex to wait for normal io will deadlock, because
* other context can't update super_block, and normal io can rely on
* updating super_block.
*/
lockdep_assert_not_held(&mddev->reconfig_mutex);
if (interruptible)
err = mutex_lock_interruptible(&mddev->suspend_mutex);
else
mutex_lock(&mddev->suspend_mutex);
if (err)
return err;
if (mddev->suspended) {
WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
mutex_unlock(&mddev->suspend_mutex);
return 0;
}
percpu_ref_kill(&mddev->active_io);
if (interruptible)
err = wait_event_interruptible(mddev->sb_wait,
percpu_ref_is_zero(&mddev->active_io));
else
wait_event(mddev->sb_wait,
percpu_ref_is_zero(&mddev->active_io));
if (err) {
percpu_ref_resurrect(&mddev->active_io);
mutex_unlock(&mddev->suspend_mutex);
return err;
}
/*
* For raid456, io might be waiting for reshape to make progress,
* allow new reshape to start while waiting for io to be done to
* prevent deadlock.
*/
WRITE_ONCE(mddev->suspended, mddev->suspended + 1);
del_timer_sync(&mddev->safemode_timer);
/* restrict memory reclaim I/O during raid array is suspend */
mddev->noio_flag = memalloc_noio_save();
mutex_unlock(&mddev->suspend_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(__mddev_suspend);
void __mddev_resume(struct mddev *mddev)
{
lockdep_assert_not_held(&mddev->reconfig_mutex);
mutex_lock(&mddev->suspend_mutex);
WRITE_ONCE(mddev->suspended, mddev->suspended - 1);
if (mddev->suspended) {
mutex_unlock(&mddev->suspend_mutex);
return;
}
/* entred the memalloc scope from __mddev_suspend() */
memalloc_noio_restore(mddev->noio_flag);
percpu_ref_resurrect(&mddev->active_io);
wake_up(&mddev->sb_wait);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
mutex_unlock(&mddev->suspend_mutex);
}
EXPORT_SYMBOL_GPL(__mddev_resume);
/*
* Generic flush handling for md
*/
@ -672,6 +769,7 @@ int mddev_init(struct mddev *mddev)
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->sync_mutex);
mutex_init(&mddev->suspend_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);

View File

@ -316,6 +316,7 @@ struct mddev {
unsigned long sb_flags;
int suspended;
struct mutex suspend_mutex;
struct percpu_ref active_io;
int ro;
int sysfs_active; /* set when sysfs deletes
@ -811,6 +812,8 @@ extern void md_rdev_clear(struct md_rdev *rdev);
extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern void mddev_suspend(struct mddev *mddev);
extern void mddev_resume(struct mddev *mddev);
extern int __mddev_suspend(struct mddev *mddev, bool interruptible);
extern void __mddev_resume(struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);