From: Jason Gunthorpe <j...@mellanox.com>

commit e8dc4e885c459343970b25acd9320fe9ee5492e7 upstream.

xa_alloc_cyclic() is a SMP release to be paired with some later acquire
during xa_load() as part of cm_acquire_id().

As such, xa_alloc_cyclic() must be done after the cm_id is fully
initialized, in particular, it absolutely must be after the
refcount_set(), otherwise the refcount_inc() in cm_acquire_id() may not
see the set.

As there are several cases where a reader will be able to use the
id.local_id after cm_acquire_id in the IB_CM_IDLE state there needs to be
an unfortunate split into a NULL allocate and a finalizing xa_store.

Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation")
Link: https://lore.kernel.org/r/20200310092545.251365-2-l...@kernel.org
Signed-off-by: Leon Romanovsky <leo...@mellanox.com>
Signed-off-by: Jason Gunthorpe <j...@mellanox.com>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>

---
 drivers/infiniband/core/cm.c |   27 +++++++++++----------------
 1 file changed, 11 insertions(+), 16 deletions(-)

--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -597,18 +597,6 @@ static int cm_init_av_by_path(struct sa_
        return 0;
 }
 
-static int cm_alloc_id(struct cm_id_private *cm_id_priv)
-{
-       int err;
-       u32 id;
-
-       err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
-                       xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
-
-       cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
-       return err;
-}
-
 static u32 cm_local_id(__be32 local_id)
 {
        return (__force u32) (local_id ^ cm.random_id_operand);
@@ -862,6 +850,7 @@ struct ib_cm_id *ib_create_cm_id(struct
                                 void *context)
 {
        struct cm_id_private *cm_id_priv;
+       u32 id;
        int ret;
 
        cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
@@ -873,9 +862,6 @@ struct ib_cm_id *ib_create_cm_id(struct
        cm_id_priv->id.cm_handler = cm_handler;
        cm_id_priv->id.context = context;
        cm_id_priv->id.remote_cm_qpn = 1;
-       ret = cm_alloc_id(cm_id_priv);
-       if (ret)
-               goto error;
 
        spin_lock_init(&cm_id_priv->lock);
        init_completion(&cm_id_priv->comp);
@@ -884,11 +870,20 @@ struct ib_cm_id *ib_create_cm_id(struct
        INIT_LIST_HEAD(&cm_id_priv->altr_list);
        atomic_set(&cm_id_priv->work_count, -1);
        atomic_set(&cm_id_priv->refcount, 1);
+
+       ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
+                                 &cm.local_id_next, GFP_KERNEL);
+       if (ret)
+               goto error;
+       cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
+       xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
+                    cm_id_priv, GFP_KERNEL);
+
        return &cm_id_priv->id;
 
 error:
        kfree(cm_id_priv);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL(ib_create_cm_id);
 


Reply via email to