md: simplify md_seq_ops

Before this patch, the implementation is hacky and hard to understand:

1) md_seq_start set pos to 1;
2) md_seq_show found pos is 1, then print Personalities;
3) md_seq_next found pos is 1, then it update pos to the first mddev;
4) md_seq_show found pos is not 1 or 2, show mddev;
5) md_seq_next found pos is not 1 or 2, update pos to next mddev;
6) loop 4-5 until the last mddev, then md_seq_next update pos to 2;
7) md_seq_show found pos is 2, then print unused devices;
8) md_seq_next found pos is 2, stop;

This patch remove the magic value and use seq_list_start/next/stop()
directly, and move printing "Personalities" to md_seq_start(),
"unsed devices" to md_seq_stop():

1) md_seq_start print Personalities, and then set pos to first mddev;
2) md_seq_show show mddev;
3) md_seq_next update pos to next mddev;
4) loop 2-3 until the last mddev;
5) md_seq_stop print unsed devices;

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20230927061241.1552837-3-yukuai1@huaweicloud.com
This commit is contained in:
Yu Kuai 2023-09-27 14:12:41 +08:00 committed by Song Liu
parent 3d8d32873c
commit cf1b6d4441

View File

@ -8213,105 +8213,46 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(&all_mddevs_lock)
{
struct list_head *tmp;
loff_t l = *pos;
struct mddev *mddev;
struct md_personality *pers;
if (l == 0x10000) {
++*pos;
return (void *)2;
}
if (l > 0x10000)
return NULL;
if (!l--)
/* header */
return (void*)1;
seq_puts(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_puts(seq, "\n");
seq->poll_event = atomic_read(&md_event_count);
spin_lock(&all_mddevs_lock);
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
if (!mddev_get(mddev))
continue;
spin_unlock(&all_mddevs_lock);
return mddev;
}
spin_unlock(&all_mddevs_lock);
if (!l--)
return (void*)2;/* tail */
return NULL;
return seq_list_start(&all_mddevs, *pos);
}
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
struct mddev *to_put = NULL;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
if (v == (void*)1) {
tmp = all_mddevs.next;
} else {
to_put = mddev;
tmp = mddev->all_mddevs.next;
}
for (;;) {
if (tmp == &all_mddevs) {
next_mddev = (void*)2;
*pos = 0x10000;
break;
}
next_mddev = list_entry(tmp, struct mddev, all_mddevs);
if (mddev_get(next_mddev))
break;
mddev = next_mddev;
tmp = mddev->all_mddevs.next;
}
spin_unlock(&all_mddevs_lock);
if (to_put)
mddev_put(to_put);
return next_mddev;
return seq_list_next(v, &all_mddevs, pos);
}
static void md_seq_stop(struct seq_file *seq, void *v)
__releases(&all_mddevs_lock)
{
struct mddev *mddev = v;
if (mddev && v != (void*)1 && v != (void*)2)
mddev_put(mddev);
status_unused(seq);
spin_unlock(&all_mddevs_lock);
}
static int md_seq_show(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
struct mddev *mddev = list_entry(v, struct mddev, all_mddevs);
sector_t sectors;
struct md_rdev *rdev;
if (v == (void*)1) {
struct md_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
seq->poll_event = atomic_read(&md_event_count);
if (!mddev_get(mddev))
return 0;
}
if (v == (void*)2) {
status_unused(seq);
return 0;
}
spin_unlock(&all_mddevs_lock);
spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
@ -8382,6 +8323,9 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
spin_unlock(&mddev->lock);
spin_lock(&all_mddevs_lock);
if (atomic_dec_and_test(&mddev->active))
__mddev_put(mddev);
return 0;
}