From: Leon Romanovsky <[email protected]> The .invalidate_mapping() callback is documented as optional, yet it effectively became mandatory whenever importer_ops were provided. This led to cases where RDMA non-ODP code had to supply an empty stub.
Relax the checks in the dma-buf core so the callback can be omitted, allowing RDMA code to drop the unnecessary function. Removing the stub allows the next patch to tell that RDMA does not support .invalidate_mapping() by checking for a NULL op. Signed-off-by: Leon Romanovsky <[email protected]> --- drivers/dma-buf/dma-buf.c | 6 ++---- drivers/infiniband/core/umem_dmabuf.c | 13 ------------- 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cd68c1c0bfd7..1629312d364a 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -947,9 +947,6 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); - if (WARN_ON(importer_ops && !importer_ops->invalidate_mappings)) - return ERR_PTR(-EINVAL); - attach = kzalloc(sizeof(*attach), GFP_KERNEL); if (!attach) return ERR_PTR(-ENOMEM); @@ -1260,7 +1257,8 @@ void dma_buf_invalidate_mappings(struct dma_buf *dmabuf) dma_resv_assert_held(dmabuf->resv); list_for_each_entry(attach, &dmabuf->attachments, node) - if (attach->importer_ops) + if (attach->importer_ops && + attach->importer_ops->invalidate_mappings) attach->importer_ops->invalidate_mappings(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_invalidate_mappings, "DMA_BUF"); diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index d77a739cfe7a..256e34c15e6b 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -129,9 +129,6 @@ ib_umem_dmabuf_get_with_dma_device(struct ib_device *device, if (check_add_overflow(offset, (unsigned long)size, &end)) return ret; - if (unlikely(!ops || !ops->invalidate_mappings)) - return ret; - dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return ERR_CAST(dmabuf); @@ -184,18 +181,8 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, } EXPORT_SYMBOL(ib_umem_dmabuf_get); -static void -ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach) -{ - struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; - - ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev, - "Invalidate callback should not be called when memory is pinned\n"); -} - static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = { .allow_peer2peer = true, - .invalidate_mappings = ib_umem_dmabuf_unsupported_move_notify, }; struct ib_umem_dmabuf * -- 2.52.0
