On Tue, Feb 24, 2026 at 10:30:37PM +0000, Long Li wrote:
> > diff --git a/drivers/infiniband/hw/mana/cq.c 
> > b/drivers/infiniband/hw/mana/cq.c
> > index 2dce1b677115..605122ecf9f9 100644
> > --- a/drivers/infiniband/hw/mana/cq.c
> > +++ b/drivers/infiniband/hw/mana/cq.c
> > @@ -5,8 +5,8 @@
> > 
> >  #include "mana_ib.h"
> > 
> > -int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr 
> > *attr,
> > -                 struct uverbs_attr_bundle *attrs)
> > +int mana_ib_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr
> > *attr,
> > +                      struct uverbs_attr_bundle *attrs)
> >  {
> >     struct ib_udata *udata = &attrs->driver_udata;
> >     struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
> > @@ -17,7 +17,6 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct
> > ib_cq_init_attr *attr,
> >     struct mana_ib_dev *mdev;
> >     bool is_rnic_cq;
> >     u32 doorbell;
> > -   u32 buf_size;
> >     int err;
> > 
> >     mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); @@ -26,44
> > +25,100 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct
> > ib_cq_init_attr *attr,
> >     cq->cq_handle = INVALID_MANA_HANDLE;
> >     is_rnic_cq = mana_ib_is_rnic(mdev);
> > 
> > -   if (udata) {
> > -           if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
> > -                   return -EINVAL;
> > +   if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
> > +           return -EINVAL;
> > 
> > -           err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd),
> > udata->inlen));
> > -           if (err) {
> > -                   ibdev_dbg(ibdev, "Failed to copy from udata for create
> > cq, %d\n", err);
> > -                   return err;
> > -           }
> > +   err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata-
> > >inlen));
> > +   if (err) {
> > +           ibdev_dbg(ibdev, "Failed to copy from udata for create
> > cq, %d\n", err);
> > +           return err;
> > +   }
> > 
> > -           if ((!is_rnic_cq && attr->cqe > mdev-
> > >adapter_caps.max_qp_wr) ||
> > -               attr->cqe > U32_MAX / COMP_ENTRY_SIZE) {
> > -                   ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr-
> > >cqe);
> > -                   return -EINVAL;
> > -           }
> > +   if ((!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) ||
> > +       attr->cqe > U32_MAX / COMP_ENTRY_SIZE) {
> > +           ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
> > +           return -EINVAL;
> > +   }
> > +
> > +   cq->cqe = attr->cqe;
> > +   if (!ibcq->umem)
> > +           ibcq->umem = ib_umem_get(ibdev, ucmd.buf_addr,
> > +                                cq->cqe * COMP_ENTRY_SIZE,
> > +                                IB_ACCESS_LOCAL_WRITE);
> > +   if (IS_ERR(ibcq->umem))
> > +           return PTR_ERR(ibcq->umem);
> > +   cq->queue.umem = ibcq->umem;
> > +
> > +   err = mana_ib_create_queue(mdev, &cq->queue);
> > +   if (err)
> > +           return err;
> 
> Should we call ib_umem_release() on this err?

<...>

> >  err_destroy_queue:
> >     mana_ib_destroy_queue(mdev, &qp->raw_sq);
> > +   return err;
> 
> Should remove this "return err", the error handling code should fall through.

The main idea of this series is to allocate/release umem in the core logic.
See patch #5 
https://lore.kernel.org/linux-rdma/[email protected]/

> 
> > +
> > +err_release_umem:
> > +   ib_umem_release(qp->raw_sq.umem);
> > 
> >  err_free_vport:
> >     mana_ib_uncfg_vport(mdev, pd, port);
> > @@ -553,13 +566,25 @@ static int mana_ib_create_rc_qp(struct ib_qp *ibqp,
> > struct ib_pd *ibpd,
> >             if (i == MANA_RC_SEND_QUEUE_FMR) {
> >                     qp->rc_qp.queues[i].id = INVALID_QUEUE_ID;
> >                     qp->rc_qp.queues[i].gdma_region =
> > GDMA_INVALID_DMA_REGION;
> > +                   qp->rc_qp.queues[i].umem = NULL;
> >                     continue;
> >             }
> > -           err = mana_ib_create_queue(mdev, ucmd.queue_buf[j],
> > ucmd.queue_size[j],
> > -                                      &qp->rc_qp.queues[i]);
> > +           qp->rc_qp.queues[i].umem = ib_umem_get(&mdev->ib_dev,
> > +                                                  ucmd.queue_buf[j],
> > +                                                  ucmd.queue_size[j],
> > +
> > IB_ACCESS_LOCAL_WRITE);
> > +           if (IS_ERR(qp->rc_qp.queues[i].umem)) {
> > +                   err = PTR_ERR(qp->rc_qp.queues[i].umem);
> > +                   ibdev_err(&mdev->ib_dev, "Failed to get umem for
> > queue %d, err %d\n",
> > +                             i, err);
> > +                   goto release_umems;
> 
> mana_ib_create_queue() may already have created some queues, need to clean 
> them up or we have a leak. 
> 
> Maybe use destroy_queues: to call ib_umem_release()?

We should remove mana_ib_create_rc_qp() hunk, it came from my future
work where I removed umem from QPs as well.

Thanks

Reply via email to