To support driver-allocated vIOMMU objects, it's required for IOMMU driver
to call the provided iommufd_viommu_alloc helper to embed the core struct.
However, there is no guarantee that every driver will call it and allocate
objects properly.
Make the iommufd_object_finalize/abort functions more robust to verify if
the xarray slot indexed by the input obj->id is having an XA_ZERO_ENTRY,
which is the reserved value stored by xa_alloc via iommufd_object_alloc.

Suggested-by: Jason Gunthorpe <j...@nvidia.com>
Signed-off-by: Nicolin Chen <nicol...@nvidia.com>
---
 drivers/iommu/iommufd/main.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 3c32b440471b..30e6c2af3b45 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -41,20 +41,26 @@ static struct miscdevice vfio_misc_dev;
 void iommufd_object_finalize(struct iommufd_ctx *ictx,
                             struct iommufd_object *obj)
 {
+       XA_STATE(xas, &ictx->objects, obj->id);
        void *old;
 
-       old = xa_store(&ictx->objects, obj->id, obj, GFP_KERNEL);
-       /* obj->id was returned from xa_alloc() so the xa_store() cannot fail */
-       WARN_ON(old);
+       xa_lock(&ictx->objects);
+       old = xas_store(&xas, obj);
+       xa_unlock(&ictx->objects);
+       /* obj->id was returned from xa_alloc() so the xas_store() cannot fail 
*/
+       WARN_ON(old != XA_ZERO_ENTRY);
 }
 
 /* Undo _iommufd_object_alloc() if iommufd_object_finalize() was not called */
 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
 {
+       XA_STATE(xas, &ictx->objects, obj->id);
        void *old;
 
-       old = xa_erase(&ictx->objects, obj->id);
-       WARN_ON(old);
+       xa_lock(&ictx->objects);
+       old = xas_store(&xas, NULL);
+       xa_unlock(&ictx->objects);
+       WARN_ON(old != XA_ZERO_ENTRY);
        kfree(obj);
 }
 
-- 
2.43.0


Reply via email to