This adds a new mode to map gem objects in a non-blocking way. This
needs to be enabled on a per-object basis with object_enable_nonblocking.

The new kernel interface required to get the caching level/coherency
is not yet wired up. All the code to transparently choose between gtt
mappings or (if coherent) cpu mappings is already in place, though.

Cc: Eric Anholt <e...@anholt.net>
Cc: Ben Widawsky <b...@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vet...@ffwll.ch>
---
Hi Eric,

Not really tested, but can you please take a quick lock and see if this is
suitable for mesa and ack the general approach?

Thanks, Daniel

 intel/intel_bufmgr.h     |    4 +
 intel/intel_bufmgr_gem.c |  165 +++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 152 insertions(+), 17 deletions(-)

diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index 889ef46..95216dd 100644
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
@@ -149,6 +149,10 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
 void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
 
+int drm_intel_gem_bo_enable_nonblocking_map(drm_intel_bo *bo);
+int drm_intel_gem_bo_map_nonblocking(drm_intel_bo *bo);
+int drm_intel_gem_bo_unmap_nonblocking(drm_intel_bo *bo);
+
 int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
 
 int drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total);
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 4f4de92..124d372 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -141,6 +141,9 @@ struct _drm_intel_bo_gem {
        uint32_t swizzle_mode;
        unsigned long stride;
 
+       unsigned nonblocking_mmap : 1;
+       unsigned gpu_coherent_cpu_mmap : 1;
+
        time_t free_time;
 
        /** Array passed to the DRM containing relocation information. */
@@ -937,6 +940,7 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t 
time)
        }
        bo_gem->reloc_count = 0;
        bo_gem->used_as_reloc_target = 0;
+       bo_gem->nonblocking_mmap = 0;
 
        DBG("bo_unreference final: %d (%s)\n",
            bo_gem->gem_handle, bo_gem->name);
@@ -998,15 +1002,11 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo 
*bo)
        }
 }
 
-static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
+static int do_mmap_cpu(drm_intel_bufmgr_gem *bufmgr_gem,
+                      drm_intel_bo_gem *bo_gem)
 {
-       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-       struct drm_i915_gem_set_domain set_domain;
        int ret;
 
-       pthread_mutex_lock(&bufmgr_gem->lock);
-
        /* Allow recursive mapping. Mesa may recursively map buffers with
         * nested display loops.
         */
@@ -1018,7 +1018,7 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int 
write_enable)
                memset(&mmap_arg, 0, sizeof(mmap_arg));
                mmap_arg.handle = bo_gem->gem_handle;
                mmap_arg.offset = 0;
-               mmap_arg.size = bo->size;
+               mmap_arg.size = bo_gem->bo.size;
                ret = drmIoctl(bufmgr_gem->fd,
                               DRM_IOCTL_I915_GEM_MMAP,
                               &mmap_arg);
@@ -1027,11 +1027,28 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int 
write_enable)
                        DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
                            __FILE__, __LINE__, bo_gem->gem_handle,
                            bo_gem->name, strerror(errno));
-                       pthread_mutex_unlock(&bufmgr_gem->lock);
                        return ret;
                }
                bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
        }
+       return 0;
+}
+
+static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_domain set_domain;
+       int ret;
+
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       assert(!bo_gem->nonblocking_mmap);
+
+       ret = do_mmap_cpu(bufmgr_gem, bo_gem);
+       if (ret != 0) {
+               pthread_mutex_unlock(&bufmgr_gem->lock);
+               return ret;
+       }
        DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
            bo_gem->mem_virtual);
        bo->virtual = bo_gem->mem_virtual;
@@ -1056,15 +1073,11 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int 
write_enable)
        return 0;
 }
 
-int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+static int do_mmap_gtt(drm_intel_bufmgr_gem *bufmgr_gem,
+                      drm_intel_bo_gem *bo_gem)
 {
-       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-       struct drm_i915_gem_set_domain set_domain;
        int ret;
 
-       pthread_mutex_lock(&bufmgr_gem->lock);
-
        /* Get a mapping of the buffer if we haven't before. */
        if (bo_gem->gtt_virtual == NULL) {
                struct drm_i915_gem_mmap_gtt mmap_arg;
@@ -1085,12 +1098,11 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
                            __FILE__, __LINE__,
                            bo_gem->gem_handle, bo_gem->name,
                            strerror(errno));
-                       pthread_mutex_unlock(&bufmgr_gem->lock);
                        return ret;
                }
 
                /* and mmap it */
-               bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+               bo_gem->gtt_virtual = mmap(0, bo_gem->bo.size, PROT_READ | 
PROT_WRITE,
                                           MAP_SHARED, bufmgr_gem->fd,
                                           mmap_arg.offset);
                if (bo_gem->gtt_virtual == MAP_FAILED) {
@@ -1100,11 +1112,29 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
                            __FILE__, __LINE__,
                            bo_gem->gem_handle, bo_gem->name,
                            strerror(errno));
-                       pthread_mutex_unlock(&bufmgr_gem->lock);
                        return ret;
                }
        }
 
+       return 0;
+}
+
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_domain set_domain;
+       int ret;
+
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       assert(!bo_gem->nonblocking_mmap);
+
+       ret = do_mmap_gtt(bufmgr_gem, bo_gem);
+       if (ret != 0) {
+               pthread_mutex_unlock(&bufmgr_gem->lock);
+               return ret;
+       }
+
        bo->virtual = bo_gem->gtt_virtual;
 
        DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
@@ -1284,6 +1314,105 @@ drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int 
write_enable)
        }
 }
 
+/**
+ * Enable non-blocking mmap on this object.
+ *
+ * The object may not be tiled and cannot be shared with flink. The other mmap
+ * functions will be disabled.
+ */
+int
+drm_intel_gem_bo_enable_nonblocking_map(drm_intel_bo *bo)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_domain set_domain;
+       int ret;
+
+       assert(bo_gem->tiling_mode == I915_TILING_NONE);
+       assert(bo_gem->global_name == 0);
+
+       /* Move object to the gtt domain _once_. Thats the right thing even when
+        * using cpu mmaps, because we'll be using them only when they're fully
+        * coherent with the gtt mappings. */
+       set_domain.handle = bo_gem->gem_handle;
+       set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+       set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                      &set_domain);
+       if (ret != 0) {
+               DBG("%s:%d: Error setting domain %d: %s\n",
+                   __FILE__, __LINE__, bo_gem->gem_handle,
+                   strerror(errno));
+               return ret;
+       }
+
+       bo_gem->nonblocking_mmap = 1;
+       /* TODO: ask kernel about llc caching */
+       bo_gem->gpu_coherent_cpu_mmap = 0;
+
+       return 0;
+}
+
+/**
+ * map an object in the non-blocking mode
+ *
+ * This automagically chooses either the gtt mapping or if coherent and faster,
+ * the cpu mapping.
+ */
+int drm_intel_gem_bo_map_nonblocking(drm_intel_bo *bo)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int ret;
+
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       assert(!bo_gem->nonblocking_mmap);
+
+       if (bo_gem->gpu_coherent_cpu_mmap) {
+               ret = do_mmap_cpu(bufmgr_gem, bo_gem);
+               if (ret != 0) {
+                       pthread_mutex_unlock(&bufmgr_gem->lock);
+                       return ret;
+               }
+
+               bo->virtual = bo_gem->mem_virtual;
+       } else {
+               ret = do_mmap_gtt(bufmgr_gem, bo_gem);
+               if (ret != 0) {
+                       pthread_mutex_unlock(&bufmgr_gem->lock);
+                       return ret;
+               }
+
+               bo->virtual = bo_gem->gtt_virtual;
+       }
+
+       DBG("bo_map_nonblocking: %d (%s) -> %p\n", bo_gem->gem_handle, 
bo_gem->name,
+           bo_gem->gtt_virtual);
+
+       pthread_mutex_unlock(&bufmgr_gem->lock);
+
+       return 0;
+}
+
+/**
+ * unmap an object in the non-blocking mode
+ */
+int drm_intel_gem_bo_unmap_nonblocking(drm_intel_bo *bo)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       int ret = 0;
+
+       if (bo == NULL)
+               return 0;
+
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       bo->virtual = NULL;
+       pthread_mutex_unlock(&bufmgr_gem->lock);
+
+       return ret;
+}
+
 static void
 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
 {
@@ -1790,6 +1919,8 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
        struct drm_gem_flink flink;
        int ret;
 
+       assert(!bo_gem->nonblocking_mmap);
+
        if (!bo_gem->global_name) {
                memset(&flink, 0, sizeof(flink));
                flink.handle = bo_gem->gem_handle;
-- 
1.7.6.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to