From: Dave Airlie <airl...@redhat.com>

This isn't needed currently, but to reuse sync file for Vulkan
permanent shared semaphore semantics, we need to be able to swap
the fence backing a sync file. This patch adds a mutex to the
sync file and uses to protect accesses to the fence and cb members.

Signed-off-by: Dave Airlie <airl...@redhat.com>
---
 drivers/dma-buf/sync_file.c | 23 +++++++++++++++++++----
 include/linux/sync_file.h   |  3 +++
 2 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 2321035..105f48c 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -47,6 +47,7 @@ static struct sync_file *sync_file_alloc(void)
 
        INIT_LIST_HEAD(&sync_file->cb.node);
 
+       mutex_init(&sync_file->lock);
        return sync_file;
 
 err:
@@ -204,10 +205,13 @@ static struct sync_file *sync_file_merge(const char 
*name, struct sync_file *a,
        if (!sync_file)
                return NULL;
 
+       mutex_lock(&a->lock);
+       mutex_lock(&b->lock);
        a_fences = get_fences(a, &a_num_fences);
        b_fences = get_fences(b, &b_num_fences);
-       if (a_num_fences > INT_MAX - b_num_fences)
-               return NULL;
+       if (a_num_fences > INT_MAX - b_num_fences) {
+               goto unlock;
+       }
 
        num_fences = a_num_fences + b_num_fences;
 
@@ -268,11 +272,17 @@ static struct sync_file *sync_file_merge(const char 
*name, struct sync_file *a,
                goto err;
        }
 
+       mutex_unlock(&b->lock);
+       mutex_unlock(&a->lock);
+
        strlcpy(sync_file->name, name, sizeof(sync_file->name));
        return sync_file;
 
 err:
        fput(sync_file->file);
+unlock:
+       mutex_unlock(&b->lock);
+       mutex_unlock(&a->lock);
        return NULL;
 
 }
@@ -299,16 +309,20 @@ static int sync_file_release(struct inode *inode, struct 
file *file)
 static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 {
        struct sync_file *sync_file = file->private_data;
+       unsigned int ret_val;
 
        poll_wait(file, &sync_file->wq, wait);
 
+       mutex_lock(&sync_file->lock);
        if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
                if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
                                           fence_check_cb_func) < 0)
                        wake_up_all(&sync_file->wq);
        }
+       ret_val = dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+       mutex_unlock(&sync_file->lock);
 
-       return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+       return ret_val;
 }
 
 static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -393,6 +407,7 @@ static long sync_file_ioctl_fence_info(struct sync_file 
*sync_file,
        if (info.flags || info.pad)
                return -EINVAL;
 
+       mutex_lock(&sync_file->lock);
        fences = get_fences(sync_file, &num_fences);
 
        /*
@@ -433,7 +448,7 @@ static long sync_file_ioctl_fence_info(struct sync_file 
*sync_file,
 
 out:
        kfree(fence_info);
-
+       mutex_unlock(&sync_file->lock);
        return ret;
 }
 
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 3e3ab84..5aef17f 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -30,6 +30,7 @@
  * @wq:                        wait queue for fence signaling
  * @fence:             fence with the fences in the sync_file
  * @cb:                        fence callback information
+ * @lock:               mutex to protect fence/cb - used for semaphores
  */
 struct sync_file {
        struct file             *file;
@@ -43,6 +44,8 @@ struct sync_file {
 
        struct dma_fence        *fence;
        struct dma_fence_cb cb;
+       /* protects the fence pointer and cb */
+       struct mutex lock;
 };
 
 #define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to