>From 852b91efc183e9b87ac50f15fb4df86f26f73532 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen <[email protected]> Date: Mon, 27 Jun 2022 23:54:24 +0200 Subject: [PATCH] dm-raid: fix array out of bounds (OOB)
Supersedes "[PATCH] dm-raid: fix out of memory accesses in dm-raid" On RAID mapping construction, dm-raid allocates an array rs->devs[rs->raid_disks] for the raid device members. rs->raid_disks is defined by the number of raid metadata and image tupples passed into the target's constructor. That number can be different from the current number of members for existing raid sets as defined in their superblocks in case of layout changes requested: - raid1 legs being added/removed - raid4/5/6/10 number of stripes changed (stripe reshaping) - takeover to higher raid level (e.g. raid5 -> raid6) Accessing array members, rs->raid_disks has to be used in loops instead of potentially larger rs->md.raid_disks causing OOB access on the devices array. Patch changes instances prone to OOB. Also ensures added devices are validated in validate_raid_redundancy(). Initially discovered by KASAN. Passes all LVM2 RAID tests (KASAN enabled). Signed-off-by: Heinz Mauelshagen <[email protected]> Tested-by: Heinz Mauelshagen <[email protected]> --- drivers/md/dm-raid.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2b26435a6946..bcec0e1a049d 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) static int validate_raid_redundancy(struct raid_set *rs) { unsigned int i, rebuild_cnt = 0; - unsigned int rebuilds_per_group = 0, copies; + unsigned int rebuilds_per_group = 0, copies, raid_disks; unsigned int group_size, last_group_start; - for (i = 0; i < rs->md.raid_disks; i++) - if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || - !rs->dev[i].rdev.sb_page) + for (i = 0; i < rs->raid_disks; i++) + if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) && + ((!test_bit(In_sync, &rs->dev[i].rdev.flags) || + !rs->dev[i].rdev.sb_page))) rebuild_cnt++; switch (rs->md.level) { @@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs) * A A B B C * C D D E E */ + raid_disks = min(rs->raid_disks, rs->md.raid_disks); if (__is_raid10_near(rs->md.new_layout)) { - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set *rs) * results in the need to treat the last (potentially larger) * set differently. */ - group_size = (rs->md.raid_disks / copies); - last_group_start = (rs->md.raid_disks / group_size) - 1; + group_size = (raid_disks / copies); + last_group_start = (raid_disks / group_size) - 1; last_group_start *= group_size; - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies) && !(i > last_group_start)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs) { int i; - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < rs->raid_disks; i++) { struct md_rdev *rdev = &rs->dev[i].rdev; if (!test_bit(Journal, &rdev->flags) && @@ -3771,7 +3773,7 @@ static int raid_iterate_devices(struct dm_target *ti, unsigned int i; int r = 0; - for (i = 0; !r && i < rs->md.raid_disks; i++) + for (i = 0; !r && i < rs->raid_disks; i++) if (rs->dev[i].data_dev) r = fn(ti, rs->dev[i].data_dev, -- 2.36.1 On Mon, Jun 27, 2022 at 3:56 PM Mikulas Patocka <[email protected]> wrote: > dm-raid allocates the array of devices with rs->raid_disks entries and > then accesses it in a loop for rs->md.raid_disks. During reshaping, > rs->md.raid_disks may be greater than rs->raid_disks, so it accesses > entries beyond the end of the array. > > We fix this bug by limiting the iteration to rs->raid_disks. > > The bug is triggered when running lvm test shell/lvconvert-raid.sh and the > kernel is compiled with kasan. > > Signed-off-by: Mikulas Patocka <[email protected]> > Cc: [email protected] > > --- > drivers/md/dm-raid.c | 12 ++++++------ > 1 file changed, 6 insertions(+), 6 deletions(-) > > Index: linux-2.6/drivers/md/dm-raid.c > =================================================================== > --- linux-2.6.orig/drivers/md/dm-raid.c 2022-06-27 15:44:12.000000000 +0200 > +++ linux-2.6/drivers/md/dm-raid.c 2022-06-27 15:44:12.000000000 +0200 > @@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru > unsigned int rebuilds_per_group = 0, copies; > unsigned int group_size, last_group_start; > > - for (i = 0; i < rs->md.raid_disks; i++) > + for (i = 0; i < rs->raid_disks; i++) > if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || > !rs->dev[i].rdev.sb_page) > rebuild_cnt++; > @@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru > * C D D E E > */ > if (__is_raid10_near(rs->md.new_layout)) { > - for (i = 0; i < rs->md.raid_disks; i++) { > + for (i = 0; i < rs->raid_disks; i++) { > if (!(i % copies)) > rebuilds_per_group = 0; > if ((!rs->dev[i].rdev.sb_page || > @@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru > group_size = (rs->md.raid_disks / copies); > last_group_start = (rs->md.raid_disks / group_size) - 1; > last_group_start *= group_size; > - for (i = 0; i < rs->md.raid_disks; i++) { > + for (i = 0; i < rs->raid_disks; i++) { > if (!(i % copies) && !(i > last_group_start)) > rebuilds_per_group = 0; > if ((!rs->dev[i].rdev.sb_page || > @@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra > { > int i; > > - for (i = 0; i < rs->md.raid_disks; i++) { > + for (i = 0; i < rs->raid_disks; i++) { > struct md_rdev *rdev = &rs->dev[i].rdev; > > if (!test_bit(Journal, &rdev->flags) && > @@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d > unsigned int i; > int r = 0; > > - for (i = 0; !r && i < rs->md.raid_disks; i++) > + for (i = 0; !r && i < rs->raid_disks; i++) > if (rs->dev[i].data_dev) > r = fn(ti, > rs->dev[i].data_dev, > @@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de > > memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); > > - for (i = 0; i < mddev->raid_disks; i++) { > + for (i = 0; i < rs->raid_disks; i++) { > r = &rs->dev[i].rdev; > /* HM FIXME: enhance journal device recovery processing */ > if (test_bit(Journal, &r->flags)) > >
-- dm-devel mailing list [email protected] https://listman.redhat.com/mailman/listinfo/dm-devel
