From: John Harrison <john.c.harri...@intel.com>

For: VIZ-4377
Signed-off-by: john.c.harri...@intel.com
---
 drivers/gpu/drm/i915/i915_gem.c |   57 +++++++++++++++++----------------------
 1 file changed, 24 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d4657e7..f1e64d8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1133,10 +1133,9 @@ static bool can_wait_boost(struct drm_i915_file_private 
*file_priv)
 }
 
 /**
- * __wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @reset_counter: reset sequence associated with the given seqno
+ * __wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @reset_counter: reset sequence associated with the given request
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time 
remaining
  *
@@ -1147,15 +1146,16 @@ static bool can_wait_boost(struct drm_i915_file_private 
*file_priv)
  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  * inserted.
  *
- * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * Returns 0 if the request was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
-                       unsigned reset_counter,
-                       bool interruptible,
-                       s64 *timeout,
-                       struct drm_i915_file_private *file_priv)
+static int __wait_request(struct drm_i915_gem_request *req,
+                         unsigned reset_counter,
+                         bool interruptible,
+                         s64 *timeout,
+                         struct drm_i915_file_private *file_priv)
 {
+       struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        const bool irq_test_in_progress =
@@ -1167,7 +1167,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 
seqno,
 
        WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
-       if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+       if (i915_gem_request_completed(req, true))
                return 0;
 
        timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 
0;
@@ -1184,7 +1184,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 
seqno,
                return -ENODEV;
 
        /* Record current time in case interrupted by signal, or wedged */
-       trace_i915_gem_request_wait_begin(ring, seqno);
+       trace_i915_gem_request_wait_begin(i915_gem_request_get_ring(req), 
i915_gem_request_get_seqno(req));
        before = ktime_get_raw_ns();
        for (;;) {
                struct timer_list timer;
@@ -1203,7 +1203,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 
seqno,
                        break;
                }
 
-               if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+               if (i915_gem_request_completed(req, false)) {
                        ret = 0;
                        break;
                }
@@ -1235,7 +1235,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 
seqno,
                }
        }
        now = ktime_get_raw_ns();
-       trace_i915_gem_request_wait_end(ring, seqno);
+       trace_i915_gem_request_wait_end(i915_gem_request_get_ring(req), 
i915_gem_request_get_seqno(req));
 
        if (!irq_test_in_progress)
                ring->irq_put(ring);
@@ -1279,9 +1279,9 @@ i915_wait_request(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       return __wait_seqno(req->ring, i915_gem_request_get_seqno(req),
-                           atomic_read(&dev_priv->gpu_error.reset_counter),
-                           interruptible, NULL, NULL);
+       return __wait_request(req,
+                             atomic_read(&dev_priv->gpu_error.reset_counter),
+                             interruptible, NULL, NULL);
 }
 
 static int
@@ -1335,7 +1335,6 @@ i915_gem_object_wait_rendering__nonblocking(struct 
drm_i915_gem_object *obj,
        struct drm_i915_gem_request *req;
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = obj->ring;
        unsigned reset_counter;
        int ret;
 
@@ -1357,7 +1356,7 @@ i915_gem_object_wait_rendering__nonblocking(struct 
drm_i915_gem_object *obj,
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
-       ret = __wait_seqno(ring, i915_gem_request_get_seqno(req), 
reset_counter, true, NULL, file_priv);
+       ret = __wait_request(req, reset_counter, true, NULL, file_priv);
        mutex_lock(&dev->struct_mutex);
        i915_gem_request_unreference(req);
        if (ret)
@@ -2816,9 +2815,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, 
struct drm_file *file)
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
        struct drm_i915_gem_request *req;
-       struct intel_engine_cs *ring = NULL;
        unsigned reset_counter;
-       u32 seqno = 0;
        int ret = 0;
 
        ret = i915_mutex_lock_interruptible(dev);
@@ -2840,9 +2837,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, 
struct drm_file *file)
                goto out;
 
        req = obj->last_read_req;
-       seqno = i915_gem_request_get_seqno(req);
-       BUG_ON(seqno == 0);
-       ring = obj->ring;
 
        /* Do this after OLR check to make sure we make forward progress polling
         * on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2857,8 +2851,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, 
struct drm_file *file)
        i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
-                          file->driver_priv);
+       ret = __wait_request(req, reset_counter, true, &args->timeout_ns,
+                            file->driver_priv);
        mutex_lock(&dev->struct_mutex);
        i915_gem_request_unreference(req);
        mutex_unlock(&dev->struct_mutex);
@@ -4053,10 +4047,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct 
drm_file *file)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
-       struct drm_i915_gem_request *request;
-       struct intel_engine_cs *ring = NULL;
+       struct drm_i915_gem_request *request, *target = NULL;
        unsigned reset_counter;
-       u32 seqno = 0;
        int ret;
 
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -4072,16 +4064,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct 
drm_file *file)
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
-               ring = request->ring;
-               seqno = request->seqno;
+               target = request;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        spin_unlock(&file_priv->mm.lock);
 
-       if (seqno == 0)
+       if (target == NULL)
                return 0;
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+       ret = __wait_request(target, reset_counter, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
-- 
1.7.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to