dm-raid allocates the array of devices with rs->raid_disks entries and
then accesses it in a loop for rs->md.raid_disks. During reshaping,
rs->md.raid_disks may be greater than rs->raid_disks, so it accesses
entries beyond the end of the array.

We fix this bug by limiting the iteration to rs->raid_disks.

The bug is triggered when running lvm test shell/lvconvert-raid.sh and the
kernel is compiled with kasan.

Signed-off-by: Mikulas Patocka <[email protected]>
Cc: [email protected]

---
 drivers/md/dm-raid.c |   12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Index: linux-2.6/drivers/md/dm-raid.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-raid.c 2022-06-27 15:44:12.000000000 +0200
+++ linux-2.6/drivers/md/dm-raid.c      2022-06-27 15:44:12.000000000 +0200
@@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru
        unsigned int rebuilds_per_group = 0, copies;
        unsigned int group_size, last_group_start;
 
-       for (i = 0; i < rs->md.raid_disks; i++)
+       for (i = 0; i < rs->raid_disks; i++)
                if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
                    !rs->dev[i].rdev.sb_page)
                        rebuild_cnt++;
@@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru
                 *          C    D    D    E    E
                 */
                if (__is_raid10_near(rs->md.new_layout)) {
-                       for (i = 0; i < rs->md.raid_disks; i++) {
+                       for (i = 0; i < rs->raid_disks; i++) {
                                if (!(i % copies))
                                        rebuilds_per_group = 0;
                                if ((!rs->dev[i].rdev.sb_page ||
@@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru
                group_size = (rs->md.raid_disks / copies);
                last_group_start = (rs->md.raid_disks / group_size) - 1;
                last_group_start *= group_size;
-               for (i = 0; i < rs->md.raid_disks; i++) {
+               for (i = 0; i < rs->raid_disks; i++) {
                        if (!(i % copies) && !(i > last_group_start))
                                rebuilds_per_group = 0;
                        if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra
 {
        int i;
 
-       for (i = 0; i < rs->md.raid_disks; i++) {
+       for (i = 0; i < rs->raid_disks; i++) {
                struct md_rdev *rdev = &rs->dev[i].rdev;
 
                if (!test_bit(Journal, &rdev->flags) &&
@@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d
        unsigned int i;
        int r = 0;
 
-       for (i = 0; !r && i < rs->md.raid_disks; i++)
+       for (i = 0; !r && i < rs->raid_disks; i++)
                if (rs->dev[i].data_dev)
                        r = fn(ti,
                                 rs->dev[i].data_dev,
@@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de
 
        memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
 
-       for (i = 0; i < mddev->raid_disks; i++) {
+       for (i = 0; i < rs->raid_disks; i++) {
                r = &rs->dev[i].rdev;
                /* HM FIXME: enhance journal device recovery processing */
                if (test_bit(Journal, &r->flags))

--
dm-devel mailing list
[email protected]
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to