mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-14 16:44:29 +08:00
- 3 fixes for invalid memory accesses discovered by using KASAN while
running the lvm2 testsuite's dm-raid tests. Includes changes to MD's raid5.c given the dependency dm-raid has on the MD code. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmK/HrUACgkQxSPxCi2d A1r8ygf6A1D837Z0x3cuncGPPwtRxK7XjGGmhn1L+ycxacdq2bnIdbDUqCQbdtp/ fB+M3s0D+CWPx0F1fTPtMpGfpKZoVvv7KST2Xlf7hhn14yECZDaa7NHupNZvYFtt ydL40GCBVsrxOqqcJ88MMK1R0YHWkgVpwixnAsRSAAe4QhL9JM9gF6Uv2XVRh9y+ P6zxXjJbzyhvA2iLi3BW4KwD6EBhjOjoE50L059e9X9mv06ZRHP/WCjMBuXTrbKp HrswsxopQwh078W6kMuzgyZZbB+vUx7O6tzETtYlwt9MtT2ger7UfZj1EHfcNjlP FMBE+a4tgKsLrJng9NQyM/j3NOr15A== =Ve3U -----END PGP SIGNATURE----- Merge tag 'for-5.19/dm-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: "Three fixes for invalid memory accesses discovered by using KASAN while running the lvm2 testsuite's dm-raid tests. Includes changes to MD's raid5.c given the dependency dm-raid has on the MD code" * tag 'for-5.19/dm-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm raid: fix KASAN warning in raid5_add_disks dm raid: fix KASAN warning in raid5_remove_disk dm raid: fix accesses beyond end of raid member array
This commit is contained in:
commit
8300d38030
@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
|
|||||||
static int validate_raid_redundancy(struct raid_set *rs)
|
static int validate_raid_redundancy(struct raid_set *rs)
|
||||||
{
|
{
|
||||||
unsigned int i, rebuild_cnt = 0;
|
unsigned int i, rebuild_cnt = 0;
|
||||||
unsigned int rebuilds_per_group = 0, copies;
|
unsigned int rebuilds_per_group = 0, copies, raid_disks;
|
||||||
unsigned int group_size, last_group_start;
|
unsigned int group_size, last_group_start;
|
||||||
|
|
||||||
for (i = 0; i < rs->md.raid_disks; i++)
|
for (i = 0; i < rs->raid_disks; i++)
|
||||||
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
|
if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
|
||||||
!rs->dev[i].rdev.sb_page)
|
((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
|
||||||
|
!rs->dev[i].rdev.sb_page)))
|
||||||
rebuild_cnt++;
|
rebuild_cnt++;
|
||||||
|
|
||||||
switch (rs->md.level) {
|
switch (rs->md.level) {
|
||||||
@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
|||||||
* A A B B C
|
* A A B B C
|
||||||
* C D D E E
|
* C D D E E
|
||||||
*/
|
*/
|
||||||
|
raid_disks = min(rs->raid_disks, rs->md.raid_disks);
|
||||||
if (__is_raid10_near(rs->md.new_layout)) {
|
if (__is_raid10_near(rs->md.new_layout)) {
|
||||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
for (i = 0; i < raid_disks; i++) {
|
||||||
if (!(i % copies))
|
if (!(i % copies))
|
||||||
rebuilds_per_group = 0;
|
rebuilds_per_group = 0;
|
||||||
if ((!rs->dev[i].rdev.sb_page ||
|
if ((!rs->dev[i].rdev.sb_page ||
|
||||||
@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
|||||||
* results in the need to treat the last (potentially larger)
|
* results in the need to treat the last (potentially larger)
|
||||||
* set differently.
|
* set differently.
|
||||||
*/
|
*/
|
||||||
group_size = (rs->md.raid_disks / copies);
|
group_size = (raid_disks / copies);
|
||||||
last_group_start = (rs->md.raid_disks / group_size) - 1;
|
last_group_start = (raid_disks / group_size) - 1;
|
||||||
last_group_start *= group_size;
|
last_group_start *= group_size;
|
||||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
for (i = 0; i < raid_disks; i++) {
|
||||||
if (!(i % copies) && !(i > last_group_start))
|
if (!(i % copies) && !(i > last_group_start))
|
||||||
rebuilds_per_group = 0;
|
rebuilds_per_group = 0;
|
||||||
if ((!rs->dev[i].rdev.sb_page ||
|
if ((!rs->dev[i].rdev.sb_page ||
|
||||||
@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
for (i = 0; i < rs->raid_disks; i++) {
|
||||||
struct md_rdev *rdev = &rs->dev[i].rdev;
|
struct md_rdev *rdev = &rs->dev[i].rdev;
|
||||||
|
|
||||||
if (!test_bit(Journal, &rdev->flags) &&
|
if (!test_bit(Journal, &rdev->flags) &&
|
||||||
@ -3766,13 +3768,13 @@ static int raid_iterate_devices(struct dm_target *ti,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
for (i = 0; !r && i < rs->md.raid_disks; i++)
|
for (i = 0; !r && i < rs->raid_disks; i++) {
|
||||||
if (rs->dev[i].data_dev)
|
if (rs->dev[i].data_dev) {
|
||||||
r = fn(ti,
|
r = fn(ti, rs->dev[i].data_dev,
|
||||||
rs->dev[i].data_dev,
|
0, /* No offset on data devs */
|
||||||
0, /* No offset on data devs */
|
rs->md.dev_sectors, data);
|
||||||
rs->md.dev_sectors,
|
}
|
||||||
data);
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -7933,7 +7933,7 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
int number = rdev->raid_disk;
|
int number = rdev->raid_disk;
|
||||||
struct md_rdev __rcu **rdevp;
|
struct md_rdev __rcu **rdevp;
|
||||||
struct disk_info *p = conf->disks + number;
|
struct disk_info *p;
|
||||||
struct md_rdev *tmp;
|
struct md_rdev *tmp;
|
||||||
|
|
||||||
print_raid5_conf(conf);
|
print_raid5_conf(conf);
|
||||||
@ -7952,6 +7952,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|||||||
log_exit(conf);
|
log_exit(conf);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
if (unlikely(number >= conf->pool_size))
|
||||||
|
return 0;
|
||||||
|
p = conf->disks + number;
|
||||||
if (rdev == rcu_access_pointer(p->rdev))
|
if (rdev == rcu_access_pointer(p->rdev))
|
||||||
rdevp = &p->rdev;
|
rdevp = &p->rdev;
|
||||||
else if (rdev == rcu_access_pointer(p->replacement))
|
else if (rdev == rcu_access_pointer(p->replacement))
|
||||||
@ -8062,6 +8065,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|||||||
*/
|
*/
|
||||||
if (rdev->saved_raid_disk >= 0 &&
|
if (rdev->saved_raid_disk >= 0 &&
|
||||||
rdev->saved_raid_disk >= first &&
|
rdev->saved_raid_disk >= first &&
|
||||||
|
rdev->saved_raid_disk <= last &&
|
||||||
conf->disks[rdev->saved_raid_disk].rdev == NULL)
|
conf->disks[rdev->saved_raid_disk].rdev == NULL)
|
||||||
first = rdev->saved_raid_disk;
|
first = rdev->saved_raid_disk;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user