When using Mesa and LibVA in the same process, one would like to be
able bind buffers from the output of the decoder to a GL texture
through an EGLImage.

LibVA can reuse buffers allocated by Gbm through a file descriptor. It
will then wrap it into a drm_intel_bo with
drm_intel_bo_gem_create_from_prime().

Given both libraries are using libdrm to allocate and use buffer
objects, there is a need to have the buffer objects properly
refcounted. That is possible if both API use the same drm_intel_bo
objects, but that also requires that both API use the same
drm_intel_bufmgr object.

This patch modifies drm_intel_bufmgr_gem_init() so given a file
descriptor, it will look for an already existing drm_intel_bufmgr
using the same file descriptor and return that object.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
---
 intel/intel_bufmgr_gem.c | 100 +++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 88 insertions(+), 12 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 0e1cb0d..125c81c 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket {
 typedef struct _drm_intel_bufmgr_gem {
        drm_intel_bufmgr bufmgr;

+       atomic_t refcount;
+
        int fd;

        int max_relocs;
@@ -3186,6 +3188,85 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo 
*bo,
        bo_gem->aub_annotation_count = count;
 }

+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drm_intel_bufmgr_gem **bufmgr_list = NULL;
+static unsigned bufmgr_list_size = 0, bufmgr_list_nb;
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find_or_create_for_fd(int fd, int *found)
+{
+        drm_intel_bufmgr_gem *bufmgr_gem;
+
+        assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+        if (bufmgr_list == NULL) {
+                bufmgr_list_size = 2;
+                bufmgr_list = calloc(bufmgr_list_size, 
sizeof(drm_intel_bufmgr_gem *));
+        } else {
+                unsigned i;
+                for (i = 0; i < bufmgr_list_nb; i++) {
+                        bufmgr_gem = bufmgr_list[i];
+                        if (bufmgr_gem->fd == fd) {
+                                atomic_inc(&bufmgr_gem->refcount);
+                                *found = 1;
+                                goto exit;
+                        }
+                }
+        }
+
+        bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+        if (bufmgr_gem == NULL)
+                goto exit;
+
+        bufmgr_gem->fd = fd;
+        atomic_set(&bufmgr_gem->refcount, 1);
+
+       assert(pthread_mutex_init(&bufmgr_gem->lock, NULL) == 0);
+
+        if (bufmgr_list_nb >= bufmgr_list_size) {
+                bufmgr_list_size *= 2;
+                bufmgr_list = realloc(bufmgr_list, bufmgr_list_size);
+                assert(bufmgr_list != NULL);
+        }
+        bufmgr_list[bufmgr_list_nb] = bufmgr_gem;
+        bufmgr_list_nb++;
+
+        pthread_mutex_lock(&bufmgr_gem->lock);
+
+        *found = 0;
+
+exit:
+        pthread_mutex_unlock(&bufmgr_list_mutex);
+
+        return bufmgr_gem;
+}
+
+static void
+drm_intel_bufmgr_gem_unref (drm_intel_bufmgr *bufmgr)
+{
+        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+        if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+                unsigned i, compact_start = bufmgr_list_nb;
+
+                assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+                for (i = 0; i < bufmgr_list_nb; i++) {
+                        if (bufmgr_list[i] == bufmgr_gem) {
+                                compact_start = i;
+                                bufmgr_list_nb--;
+                                break;
+                        }
+                }
+                for (i = compact_start; i < bufmgr_list_nb; i++)
+                        bufmgr_list[i] = bufmgr_list[i + 1];
+
+                pthread_mutex_unlock(&bufmgr_list_mutex);
+
+                drm_intel_bufmgr_gem_destroy(bufmgr);
+        }
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
@@ -3201,16 +3282,9 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
        int ret, tmp;
        bool exec2 = false;

-       bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
-       if (bufmgr_gem == NULL)
-               return NULL;
-
-       bufmgr_gem->fd = fd;
-
-       if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
-               free(bufmgr_gem);
-               return NULL;
-       }
+        bufmgr_gem = drm_intel_bufmgr_gem_find_or_create_for_fd(fd, &ret);
+       if (bufmgr_gem && ret)
+               return &bufmgr_gem->bufmgr;

        ret = drmIoctl(bufmgr_gem->fd,
                       DRM_IOCTL_I915_GEM_GET_APERTURE,
@@ -3245,7 +3319,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
        else if (IS_GEN8(bufmgr_gem->pci_device))
                bufmgr_gem->gen = 8;
        else {
-               free(bufmgr_gem);
+               drm_intel_bufmgr_gem_unref(&bufmgr_gem->bufmgr);
                return NULL;
        }

@@ -3357,7 +3431,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
                bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
        bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
        bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
-       bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+       bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
        bufmgr_gem->bufmgr.debug = 0;
        bufmgr_gem->bufmgr.check_aperture_space =
            drm_intel_gem_check_aperture_space;
@@ -3373,5 +3447,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
        DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
        bufmgr_gem->vma_max = -1; /* unlimited by default */

+        pthread_mutex_unlock(&bufmgr_gem->lock);
+
        return &bufmgr_gem->bufmgr;
 }
-- 
2.0.1

Reply via email to