We should check pmd->fail_io before using pmd->data_sm since pmd->data_sm
may be destroyed by other processes.

       P1(kworker)                             P2(message)
do_worker
 process_prepared
  process_prepared_discard_passdown_pt2
   dm_pool_dec_data_range
                                    pool_message
                                     commit
                                      dm_pool_commit_metadata
                                        ↓
                                       // commit failed
                                      metadata_operation_failed
                                       abort_transaction
                                        dm_pool_abort_metadata
                                         dm_block_manager_create
                                           ↓
                                          // create failed
                                         __destroy_persistent_data_objects
                                          dm_sm_destroy(pmd->data_sm)
                                            ↓
                                           // free data_sm
    dm_sm_dec_blocks
      ↓
     // try to access pmd->data_sm --> UAF

As shown above, if dm_pool_commit_metadata() and dm_block_manager_create()
fail in pool_message process, kworker may trigger UAF.

Signed-off-by: Li Lingfeng <lilingfe...@huawei.com>
---
 drivers/md/dm-thin-metadata.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 9f5cb52c5763..b9461faa9f0d 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1756,13 +1756,15 @@ int dm_thin_remove_range(struct dm_thin_device *td,
 
 int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool 
*result)
 {
-       int r;
+       int r = -EINVAL;
        uint32_t ref_count;
 
        down_read(&pmd->root_lock);
-       r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
-       if (!r)
-               *result = (ref_count > 1);
+       if (!pmd->fail_io) {
+               r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
+               if (!r)
+                       *result = (ref_count > 1);
+       }
        up_read(&pmd->root_lock);
 
        return r;
@@ -1770,10 +1772,11 @@ int dm_pool_block_is_shared(struct dm_pool_metadata 
*pmd, dm_block_t b, bool *re
 
 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, 
dm_block_t e)
 {
-       int r = 0;
+       int r = -EINVAL;
 
        pmd_write_lock(pmd);
-       r = dm_sm_inc_blocks(pmd->data_sm, b, e);
+       if (!pmd->fail_io)
+               r = dm_sm_inc_blocks(pmd->data_sm, b, e);
        pmd_write_unlock(pmd);
 
        return r;
@@ -1781,10 +1784,11 @@ int dm_pool_inc_data_range(struct dm_pool_metadata 
*pmd, dm_block_t b, dm_block_
 
 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, 
dm_block_t e)
 {
-       int r = 0;
+       int r = -EINVAL;
 
        pmd_write_lock(pmd);
-       r = dm_sm_dec_blocks(pmd->data_sm, b, e);
+       if (!pmd->fail_io)
+               r = dm_sm_dec_blocks(pmd->data_sm, b, e);
        pmd_write_unlock(pmd);
 
        return r;
-- 
2.31.1

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to