Assume that pages may be pinned in a background task and use a
completion event to synchronise with callers that must access the pages
immediately.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.a...@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  1 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  7 +--
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  3 ++
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     | 53 +++++++++++++++----
 4 files changed, 52 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 5f75f69687e8..ee69cd7948c0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -65,6 +65,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        obj->mm.madv = I915_MADV_WILLNEED;
        INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->mm.get_page.lock);
+       init_completion(&obj->mm.completion);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 67d70d144bd9..7ea8013d108f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -234,7 +234,7 @@ int ____i915_gem_object_get_pages(struct 
drm_i915_gem_object *obj);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
-i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+i915_gem_object_pin_pages_async(struct drm_i915_gem_object *obj)
 {
        might_lock(&obj->mm.lock);
 
@@ -244,6 +244,9 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
        return __i915_gem_object_get_pages(obj);
 }
 
+int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj);
+
 static inline bool
 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
 {
@@ -267,9 +270,7 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object 
*obj)
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
-       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
        atomic_dec(&obj->mm.pages_pin_count);
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e87fca4d8194..8f61d7a93078 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -7,6 +7,7 @@
 #ifndef __I915_GEM_OBJECT_TYPES_H__
 #define __I915_GEM_OBJECT_TYPES_H__
 
+#include <linux/completion.h>
 #include <linux/reservation.h>
 
 #include <drm/drm_gem.h>
@@ -210,6 +211,8 @@ struct drm_i915_gem_object {
                 */
                struct list_head link;
 
+               struct completion completion;
+
                /**
                 * Advice: are the backing pages purgeable?
                 */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b36ad269f4ea..6bec301cee79 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -73,21 +73,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object 
*obj,
 
                spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
        }
+
+       complete_all(&obj->mm.completion);
 }
 
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       int err;
-
        if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
                DRM_DEBUG("Attempting to obtain a purgeable object\n");
                return -EFAULT;
        }
 
-       err = obj->ops->get_pages(obj);
-       GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
-
-       return err;
+       return obj->ops->get_pages(obj);
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -105,7 +102,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object 
*obj)
        if (err)
                return err;
 
-       if (unlikely(!i915_gem_object_has_pages(obj))) {
+       if (!obj->mm.pages) {
                GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
                err = ____i915_gem_object_get_pages(obj);
@@ -121,6 +118,32 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object 
*obj)
        return err;
 }
 
+int i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       err = i915_gem_object_pin_pages_async(obj);
+       if (err)
+               return err;
+
+       err = wait_for_completion_interruptible(&obj->mm.completion);
+       if (err)
+               goto err_unpin;
+
+       if (IS_ERR(obj->mm.pages)) {
+               err = PTR_ERR(obj->mm.pages);
+               goto err_unpin;
+       }
+
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+       return 0;
+
+err_unpin:
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       atomic_dec(&obj->mm.pages_pin_count);
+       return err;
+}
+
 /* Immediately discard the backing storage */
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
@@ -201,6 +224,9 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj,
 
        GEM_BUG_ON(atomic_read(&obj->bind_count));
 
+       if (obj->mm.pages == ERR_PTR(-EAGAIN))
+               wait_for_completion(&obj->mm.completion);
+
        /* May be called by shrinker from within get_pages() (on another bo) */
        mutex_lock_nested(&obj->mm.lock, subclass);
        if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
@@ -227,6 +253,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj,
        if (!IS_ERR(pages))
                obj->ops->put_pages(obj, pages);
 
+       reinit_completion(&obj->mm.completion);
        err = 0;
 unlock:
        mutex_unlock(&obj->mm.lock);
@@ -304,7 +331,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object 
*obj,
        type &= ~I915_MAP_OVERRIDE;
 
        if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-               if (unlikely(!i915_gem_object_has_pages(obj))) {
+               if (!obj->mm.pages) {
                        GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
                        err = ____i915_gem_object_get_pages(obj);
@@ -316,7 +343,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object 
*obj,
                atomic_inc(&obj->mm.pages_pin_count);
                pinned = false;
        }
-       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
        ptr = page_unpack_bits(obj->mm.mapping, &has_type);
        if (ptr && has_type != type) {
@@ -334,6 +360,15 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object 
*obj,
        }
 
        if (!ptr) {
+               err = wait_for_completion_interruptible(&obj->mm.completion);
+               if (err)
+                       goto err_unpin;
+
+               if (IS_ERR(obj->mm.pages)) {
+                       err = PTR_ERR(obj->mm.pages);
+                       goto err_unpin;
+               }
+
                ptr = i915_gem_object_map(obj, type);
                if (!ptr) {
                        err = -ENOMEM;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to