On 4/17/19 5:44 AM, Ming Lei wrote:
In normal queue cleanup path, hctx is released after request queue
is freed, see blk_mq_release().

However, in __blk_mq_update_nr_hw_queues(), hctx may be freed because
of hw queues shrinking. This way is easy to cause use-after-free,
because: one implicit rule is that it is safe to call almost all block
layer APIs if the request queue is alive; and one hctx may be retrieved
by one API, then the hctx can be freed by blk_mq_update_nr_hw_queues();
finally use-after-free is triggered.

Fixes this issue by always freeing hctx after releasing request queue.
If some hctxs are removed in blk_mq_update_nr_hw_queues(), introduce
a per-queue list to hold them, then try to resuse these hctxs if numa
node is matched.

Cc: Dongli Zhang <dongli.zh...@oracle.com>
Cc: James Smart <james.sm...@broadcom.com>
Cc: Bart Van Assche <bart.vanass...@wdc.com>
Cc: linux-scsi@vger.kernel.org,
Cc: Martin K . Petersen <martin.peter...@oracle.com>,
Cc: Christoph Hellwig <h...@lst.de>,
Cc: James E . J . Bottomley <j...@linux.vnet.ibm.com>,
Cc: jianchao wang <jianchao.w.w...@oracle.com>
Signed-off-by: Ming Lei <ming....@redhat.com>
---
  block/blk-mq.c         | 40 +++++++++++++++++++++++++++-------------
  include/linux/blk-mq.h |  2 ++
  include/linux/blkdev.h |  7 +++++++
  3 files changed, 36 insertions(+), 13 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index eeebba6ec0f7..2ca4395f794d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2274,6 +2274,10 @@ static void blk_mq_exit_hctx(struct request_queue *q,
                set->ops->exit_hctx(hctx, hctx_idx);
blk_mq_remove_cpuhp(hctx);
+
+       spin_lock(&q->dead_hctx_lock);
+       list_add(&hctx->hctx_list, &q->dead_hctx_list);
+       spin_unlock(&q->dead_hctx_lock);
  }
static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -2675,15 +2679,13 @@ static int blk_mq_alloc_ctxs(struct request_queue *q)
   */
  void blk_mq_release(struct request_queue *q)
  {
-       struct blk_mq_hw_ctx *hctx;
-       unsigned int i;
+       struct blk_mq_hw_ctx *hctx, *next;
cancel_delayed_work_sync(&q->requeue_work); - /* hctx kobj stays in hctx */
-       queue_for_each_hw_ctx(q, hctx, i) {
-               if (!hctx)
-                       continue;
+       /* all hctx are in .dead_hctx_list now */
+       list_for_each_entry_safe(hctx, next, &q->dead_hctx_list, hctx_list) {
+               list_del_init(&hctx->hctx_list);
                kobject_put(&hctx->kobj);
        }
@@ -2750,9 +2752,22 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
                struct blk_mq_tag_set *set, struct request_queue *q,
                int hctx_idx, int node)
  {
-       struct blk_mq_hw_ctx *hctx;
+       struct blk_mq_hw_ctx *hctx = NULL, *tmp;
+
+       /* reuse dead hctx first */
+       spin_lock(&q->dead_hctx_lock);
+       list_for_each_entry(tmp, &q->dead_hctx_list, hctx_list) {
+               if (tmp->numa_node == node) {
+                       hctx = tmp;
+                       break;
+               }
+       }
+       if (hctx)
+               list_del_init(&hctx->hctx_list);
+       spin_unlock(&q->dead_hctx_lock);
- hctx = blk_mq_alloc_hctx(q, set, hctx_idx, node);
+       if (!hctx)
+               hctx = blk_mq_alloc_hctx(q, set, hctx_idx, node);
        if (!hctx)
                goto fail;
@@ -2790,10 +2805,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
                if (hctx) {
-                       if (hctxs[i]) {
+                       if (hctxs[i])
                                blk_mq_exit_hctx(q, set, hctxs[i], i);
-                               kobject_put(&hctxs[i]->kobj);
-                       }
                        hctxs[i] = hctx;
                } else {
                        if (hctxs[i])
@@ -2824,9 +2837,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set 
*set,
                        if (hctx->tags)
                                blk_mq_free_map_and_requests(set, j);
                        blk_mq_exit_hctx(q, set, hctx, j);
-                       kobject_put(&hctx->kobj);
                        hctxs[j] = NULL;
-
                }
        }
        mutex_unlock(&q->sysfs_lock);
@@ -2869,6 +2880,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct 
blk_mq_tag_set *set,
        if (!q->queue_hw_ctx)
                goto err_sys_init;
+ INIT_LIST_HEAD(&q->dead_hctx_list);
+       spin_lock_init(&q->dead_hctx_lock);
+
        blk_mq_realloc_hw_ctxs(set, q);
        if (!q->nr_hw_queues)
                goto err_hctxs;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index db29928de467..15d1aa53d96c 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -70,6 +70,8 @@ struct blk_mq_hw_ctx {
        struct dentry           *sched_debugfs_dir;
  #endif
+ struct list_head hctx_list;
+
        /* Must be the last member - see also blk_mq_hw_ctx_size(). */
        struct srcu_struct      srcu[0];
  };
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4b85dc066264..1325f941f0be 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -535,6 +535,13 @@ struct request_queue {
struct mutex sysfs_lock; + /*
+        * for reusing dead hctx instance in case of updating
+        * nr_hw_queues
+        */
+       struct list_head        dead_hctx_list;
+       spinlock_t              dead_hctx_lock;
+
        atomic_t                mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)

Hmm.
I don't particularly like this approach.
The much saner approach would be to avoid having I/O in-flight in the first place by setting the queue to something else than live, no?

Cheers,

Hannes
--
Dr. Hannes Reinecke            Teamlead Storage & Networking
h...@suse.de                              +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Mary Higgins, Sri Rasiah
HRB 21284 (AG Nürnberg)

Reply via email to