mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
md: raid1/raid10: drop pending_cnt
Those counters are not necessary after commit 11bb45e8aaf6 ("md: drop queue limitation for RAID1 and RAID10"). Remove them from all code (conf and plug structs). raid1_plug_cb and raid10_plug_cb are identical, so move definition of raid1_plug_cb to common raid1-10 definitions and use it for RAID10 too. Signed-off-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com> Signed-off-by: Song Liu <song@kernel.org>
This commit is contained in:
parent
a76370690c
commit
daae161fd2
@ -28,6 +28,11 @@ struct resync_pages {
|
||||
struct page *pages[RESYNC_PAGES];
|
||||
};
|
||||
|
||||
struct raid1_plug_cb {
|
||||
struct blk_plug_cb cb;
|
||||
struct bio_list pending;
|
||||
};
|
||||
|
||||
static void rbio_pool_free(void *rbio, void *data)
|
||||
{
|
||||
kfree(rbio);
|
||||
|
@ -824,7 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_list_get(&conf->pending_bio_list);
|
||||
conf->pending_count = 0;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
/*
|
||||
@ -1167,12 +1166,6 @@ free_pages:
|
||||
bio_put(behind_bio);
|
||||
}
|
||||
|
||||
struct raid1_plug_cb {
|
||||
struct blk_plug_cb cb;
|
||||
struct bio_list pending;
|
||||
int pending_cnt;
|
||||
};
|
||||
|
||||
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
{
|
||||
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
|
||||
@ -1184,7 +1177,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
if (from_schedule || current->bio_list) {
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||
conf->pending_count += plug->pending_cnt;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_barrier);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
@ -1588,11 +1580,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
||||
plug = NULL;
|
||||
if (plug) {
|
||||
bio_list_add(&plug->pending, mbio);
|
||||
plug->pending_cnt++;
|
||||
} else {
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
@ -3058,7 +3048,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
|
||||
init_waitqueue_head(&conf->wait_barrier);
|
||||
|
||||
bio_list_init(&conf->pending_bio_list);
|
||||
conf->pending_count = 0;
|
||||
conf->recovery_disabled = mddev->recovery_disabled - 1;
|
||||
|
||||
err = -EIO;
|
||||
|
@ -87,7 +87,6 @@ struct r1conf {
|
||||
|
||||
/* queue pending writes to be submitted on unplug */
|
||||
struct bio_list pending_bio_list;
|
||||
int pending_count;
|
||||
|
||||
/* for use when syncing mirrors:
|
||||
* We don't allow both normal IO and resync/recovery IO at
|
||||
|
@ -861,7 +861,6 @@ static void flush_pending_writes(struct r10conf *conf)
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_list_get(&conf->pending_bio_list);
|
||||
conf->pending_count = 0;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
/*
|
||||
@ -1054,16 +1053,9 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
|
||||
return rdev->new_data_offset;
|
||||
}
|
||||
|
||||
struct raid10_plug_cb {
|
||||
struct blk_plug_cb cb;
|
||||
struct bio_list pending;
|
||||
int pending_cnt;
|
||||
};
|
||||
|
||||
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
{
|
||||
struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
|
||||
cb);
|
||||
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
|
||||
struct mddev *mddev = plug->cb.data;
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct bio *bio;
|
||||
@ -1071,7 +1063,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
if (from_schedule || current->bio_list) {
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||
conf->pending_count += plug->pending_cnt;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_barrier);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
@ -1238,7 +1229,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
|
||||
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
|
||||
unsigned long flags;
|
||||
struct blk_plug_cb *cb;
|
||||
struct raid10_plug_cb *plug = NULL;
|
||||
struct raid1_plug_cb *plug = NULL;
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct md_rdev *rdev;
|
||||
int devnum = r10_bio->devs[n_copy].devnum;
|
||||
@ -1280,16 +1271,14 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
|
||||
|
||||
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
|
||||
if (cb)
|
||||
plug = container_of(cb, struct raid10_plug_cb, cb);
|
||||
plug = container_of(cb, struct raid1_plug_cb, cb);
|
||||
else
|
||||
plug = NULL;
|
||||
if (plug) {
|
||||
bio_list_add(&plug->pending, mbio);
|
||||
plug->pending_cnt++;
|
||||
} else {
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
@ -75,7 +75,6 @@ struct r10conf {
|
||||
|
||||
/* queue pending writes and submit them on unplug */
|
||||
struct bio_list pending_bio_list;
|
||||
int pending_count;
|
||||
|
||||
spinlock_t resync_lock;
|
||||
atomic_t nr_pending;
|
||||
|
Loading…
Reference in New Issue
Block a user