Please reply to Mikulas's updated patch with your Reviewed-by: and possibly Tested-by:
Thanks, Mike On Tue, Sep 5, 2023, 10:16 PM Li Lingfeng <lilingfe...@huawei.com> wrote: > Hi > > Thanks to Mikulas for the patch. I have test the patch and it can fix > the problem. > Can this patch be applied to mainline? > > Thanks. > > 在 2023/8/9 18:44, Mikulas Patocka 写道: > > > > On Tue, 8 Aug 2023, Mikulas Patocka wrote: > > > >> On Wed, 2 Aug 2023, Mike Snitzer wrote: > >> > >>> [Cc'ing Mikulas so he can take a look at this too.] > >> Hi > >> > >> I suggest this patch (but it's only compile-tested, so please run some > >> testsuite on it). > >> > >> Mikulas > > I self-nack that patch - it doesn't work if there are multiple targets > > calling dm_table_set_devices_mutex in the same table. This is not an > issue > > for multipath, but it will be a problem if other targets use this > > functionality. > > > > Here I'm sending a better patch that doesn't need any modification to the > > targets at all. > > > > Mikulas > > > > > > > > From: Mikulas Patocka <mpatocka at redhat.com> > > Subject: [PATCH] dm: fix a race condition in retrieve_deps > > > > There's a race condition in the multipath target when retrieve_deps races > > with multipath_message calling dm_get_device and dm_put_device. > > retrieve_deps walks the list of open devices without holding any lock but > > multipath may add or remove devices to the list while it is running. The > > end result may be memory corruption or use-after-free memory access. > > > > Fix this bug by introducing a new rw semaphore "devices_lock". We grab > > devices_lock for read in retrieve_deps and we grab it for write in > > dm_get_device and dm_put_device. > > > > Signed-off-by: Mikulas Patocka <mpato...@redhat.com> > > Cc: sta...@vger.kernel.org > > > > --- > > drivers/md/dm-core.h | 1 + > > drivers/md/dm-ioctl.c | 7 ++++++- > > drivers/md/dm-table.c | 32 ++++++++++++++++++++++++-------- > > 3 files changed, 31 insertions(+), 9 deletions(-) > > > > Index: linux-2.6/drivers/md/dm-core.h > > =================================================================== > > --- linux-2.6.orig/drivers/md/dm-core.h > > +++ linux-2.6/drivers/md/dm-core.h > > @@ -214,6 +214,7 @@ struct dm_table { > > > > /* a list of devices used by this table */ > > struct list_head devices; > > + struct rw_semaphore devices_lock; > > > > /* events get handed up using this callback */ > > void (*event_fn)(void *data); > > Index: linux-2.6/drivers/md/dm-ioctl.c > > =================================================================== > > --- linux-2.6.orig/drivers/md/dm-ioctl.c > > +++ linux-2.6/drivers/md/dm-ioctl.c > > @@ -1630,6 +1630,8 @@ static void retrieve_deps(struct dm_tabl > > struct dm_dev_internal *dd; > > struct dm_target_deps *deps; > > > > + down_read(&table->devices_lock); > > + > > deps = get_result_buffer(param, param_size, &len); > > > > /* > > @@ -1644,7 +1646,7 @@ static void retrieve_deps(struct dm_tabl > > needed = struct_size(deps, dev, count); > > if (len < needed) { > > param->flags |= DM_BUFFER_FULL_FLAG; > > - return; > > + goto out; > > } > > > > /* > > @@ -1656,6 +1658,9 @@ static void retrieve_deps(struct dm_tabl > > deps->dev[count++] = > huge_encode_dev(dd->dm_dev->bdev->bd_dev); > > > > param->data_size = param->data_start + needed; > > + > > +out: > > + up_read(&table->devices_lock); > > } > > > > static int table_deps(struct file *filp, struct dm_ioctl *param, > size_t param_size) > > Index: linux-2.6/drivers/md/dm-table.c > > =================================================================== > > --- linux-2.6.orig/drivers/md/dm-table.c > > +++ linux-2.6/drivers/md/dm-table.c > > @@ -135,6 +135,7 @@ int dm_table_create(struct dm_table **re > > return -ENOMEM; > > > > INIT_LIST_HEAD(&t->devices); > > + init_rwsem(&t->devices_lock); > > > > if (!num_targets) > > num_targets = KEYS_PER_NODE; > > @@ -359,16 +360,20 @@ int __ref dm_get_device(struct dm_target > > if (dev == disk_devt(t->md->disk)) > > return -EINVAL; > > > > + down_write(&t->devices_lock); > > + > > dd = find_device(&t->devices, dev); > > if (!dd) { > > dd = kmalloc(sizeof(*dd), GFP_KERNEL); > > - if (!dd) > > - return -ENOMEM; > > + if (!dd) { > > + r = -ENOMEM; > > + goto unlock_ret_r; > > + } > > > > r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev); > > if (r) { > > kfree(dd); > > - return r; > > + goto unlock_ret_r; > > } > > > > refcount_set(&dd->count, 1); > > @@ -378,12 +383,17 @@ int __ref dm_get_device(struct dm_target > > } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { > > r = upgrade_mode(dd, mode, t->md); > > if (r) > > - return r; > > + goto unlock_ret_r; > > } > > refcount_inc(&dd->count); > > out: > > + up_write(&t->devices_lock); > > *result = dd->dm_dev; > > return 0; > > + > > +unlock_ret_r: > > + up_write(&t->devices_lock); > > + return r; > > } > > EXPORT_SYMBOL(dm_get_device); > > > > @@ -419,9 +429,12 @@ static int dm_set_device_limits(struct d > > void dm_put_device(struct dm_target *ti, struct dm_dev *d) > > { > > int found = 0; > > - struct list_head *devices = &ti->table->devices; > > + struct dm_table *t = ti->table; > > + struct list_head *devices = &t->devices; > > struct dm_dev_internal *dd; > > > > + down_write(&t->devices_lock); > > + > > list_for_each_entry(dd, devices, list) { > > if (dd->dm_dev == d) { > > found = 1; > > @@ -430,14 +443,17 @@ void dm_put_device(struct dm_target *ti, > > } > > if (!found) { > > DMERR("%s: device %s not in table devices list", > > - dm_device_name(ti->table->md), d->name); > > - return; > > + dm_device_name(t->md), d->name); > > + goto unlock_ret; > > } > > if (refcount_dec_and_test(&dd->count)) { > > - dm_put_table_device(ti->table->md, d); > > + dm_put_table_device(t->md, d); > > list_del(&dd->list); > > kfree(dd); > > } > > + > > +unlock_ret: > > + up_write(&t->devices_lock); > > } > > EXPORT_SYMBOL(dm_put_device); > > > > > > -- > dm-devel mailing list > dm-devel@redhat.com > https://listman.redhat.com/mailman/listinfo/dm-devel >
-- dm-devel mailing list dm-devel@redhat.com https://listman.redhat.com/mailman/listinfo/dm-devel