[  196.988204] clocksource: timekeeping watchdog: Marking clocksource 'tsc' as 
unstable because the skew is too large:
[  196.988512] clocksource:                       'refined-jiffies' wd_now: 
ffff9b48 wd_last: ffff9acb mask: ffffffff
[  196.988559] clocksource:                       'tsc' cs_now: 4fcfa84354 
cs_last: 4f95425e98 mask: ffffffffffffffff
[  196.992115] clocksource: Switched to clocksource refined-jiffies

Followed by a hard lockup.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |   5 +-
 drivers/gpu/drm/i915/i915_gem.c         |  15 +--
 drivers/gpu/drm/i915/i915_irq.c         |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c        | 164 +++++++++++++++++---------------
 drivers/gpu/drm/i915/intel_lrc.h        |   3 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h |   1 +
 6 files changed, 98 insertions(+), 92 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 378bc73296aa..15a6fddfb79b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2094,7 +2094,6 @@ static int i915_execlists(struct seq_file *m, void *data)
        for_each_ring(ring, dev_priv, ring_id) {
                struct drm_i915_gem_request *head_req = NULL;
                int count = 0;
-               unsigned long flags;
 
                seq_printf(m, "%s\n", ring->name);
 
@@ -2121,12 +2120,12 @@ static int i915_execlists(struct seq_file *m, void 
*data)
                                   i, status, ctx_id);
                }
 
-               spin_lock_irqsave(&ring->execlist_lock, flags);
+               spin_lock(&ring->execlist_lock);
                list_for_each(cursor, &ring->execlist_queue)
                        count++;
                head_req = list_first_entry_or_null(&ring->execlist_queue,
                                struct drm_i915_gem_request, execlist_link);
-               spin_unlock_irqrestore(&ring->execlist_lock, flags);
+               spin_unlock(&ring->execlist_lock);
 
                seq_printf(m, "\t%d requests in queue\n", count);
                if (head_req) {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 391f840d29b7..eb875ecd7907 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2192,13 +2192,13 @@ static void i915_gem_reset_ring_cleanup(struct 
intel_engine_cs *engine)
         */
 
        if (i915.enable_execlists) {
-               spin_lock_irq(&engine->execlist_lock);
+               spin_lock(&engine->execlist_lock);
 
                /* list_splice_tail_init checks for empty lists */
                list_splice_tail_init(&engine->execlist_queue,
                                      &engine->execlist_retired_req_list);
 
-               spin_unlock_irq(&engine->execlist_lock);
+               spin_unlock(&engine->execlist_lock);
                intel_execlists_retire_requests(engine);
        }
 
@@ -2290,15 +2290,8 @@ i915_gem_retire_requests(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i) {
                i915_gem_retire_requests_ring(ring);
                idle &= list_empty(&ring->request_list);
-               if (i915.enable_execlists) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&ring->execlist_lock, flags);
-                       idle &= list_empty(&ring->execlist_queue);
-                       spin_unlock_irqrestore(&ring->execlist_lock, flags);
-
-                       intel_execlists_retire_requests(ring);
-               }
+               if (i915.enable_execlists)
+                       idle &= intel_execlists_retire_requests(ring);
        }
 
        if (idle)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ce047ac84f5f..b2ef2d0c211b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1316,7 +1316,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 
iir, int test_shift)
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
                notify_ring(ring);
        if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
-               intel_lrc_irq_handler(ring);
+               wake_up_process(ring->execlists_submit);
 }
 
 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b5f62b5f4913..de5889e95d6d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -132,6 +132,8 @@
  *
  */
 
+#include <linux/kthread.h>
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -341,7 +343,7 @@ static void execlists_elsp_write(struct 
drm_i915_gem_request *rq0,
        rq0->elsp_submitted++;
 
        /* You must always write both descriptors in the order below. */
-       spin_lock(&dev_priv->uncore.lock);
+       spin_lock_irq(&dev_priv->uncore.lock);
        intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
        I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
        I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
@@ -353,7 +355,7 @@ static void execlists_elsp_write(struct 
drm_i915_gem_request *rq0,
        /* ELSP is a wo register, use another nearby reg for posting */
        POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
        intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
-       spin_unlock(&dev_priv->uncore.lock);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 static int execlists_update_context(struct drm_i915_gem_request *rq)
@@ -492,89 +494,84 @@ static bool execlists_check_remove_request(struct 
intel_engine_cs *ring,
        return false;
 }
 
-static void get_context_status(struct intel_engine_cs *ring,
-                              u8 read_pointer,
-                              u32 *status, u32 *context_id)
+static void set_rtpriority(void)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-       if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
-               return;
-
-       *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
-       *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+        struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2-1 };
+        sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
 }
 
-/**
- * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @ring: Engine Command Streamer to handle.
- *
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+static int intel_execlists_submit(void *arg)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       u32 status_pointer;
-       u8 read_pointer;
-       u8 write_pointer;
-       u32 status = 0;
-       u32 status_id;
-       u32 submit_contexts = 0;
+       struct intel_engine_cs *ring = arg;
+       struct drm_i915_private *dev_priv = ring->i915;
 
-       status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+       set_rtpriority();
 
-       read_pointer = ring->next_context_status_buffer;
-       write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
-       if (read_pointer > write_pointer)
-               write_pointer += GEN8_CSB_ENTRIES;
+       do {
+               u32 status;
+               u32 status_id;
+               u32 submit_contexts;
+               u8 head, tail;
 
-       spin_lock(&ring->execlist_lock);
+               set_current_state(TASK_INTERRUPTIBLE);
+               head = ring->next_context_status_buffer;
+               tail = I915_READ(RING_CONTEXT_STATUS_PTR(ring)) & 
GEN8_CSB_PTR_MASK;
+               if (head == tail) {
+                       if (kthread_should_stop())
+                               return 0;
 
-       while (read_pointer < write_pointer) {
+                       schedule();
+                       continue;
+               }
+               __set_current_state(TASK_RUNNING);
 
-               get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
-                                  &status, &status_id);
+               if (head > tail)
+                       tail += GEN8_CSB_ENTRIES;
 
-               if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
-                       continue;
+               status = 0;
+               submit_contexts = 0;
 
-               if (status & GEN8_CTX_STATUS_PREEMPTED) {
-                       if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
-                               if (execlists_check_remove_request(ring, 
status_id))
-                                       WARN(1, "Lite Restored request removed 
from queue\n");
-                       } else
-                               WARN(1, "Preemption without Lite Restore\n");
-               }
+               spin_lock(&ring->execlist_lock);
 
-               if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
-                   (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
-                       if (execlists_check_remove_request(ring, status_id))
-                               submit_contexts++;
-               }
-       }
+               while (head++ < tail) {
+                       status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, 
head % GEN8_CSB_ENTRIES));
+                       status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, 
head % GEN8_CSB_ENTRIES));
 
-       if (disable_lite_restore_wa(ring)) {
-               /* Prevent a ctx to preempt itself */
-               if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
-                   (submit_contexts != 0))
-                       execlists_context_unqueue(ring);
-       } else if (submit_contexts != 0) {
-               execlists_context_unqueue(ring);
-       }
+                       if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+                               continue;
 
-       spin_unlock(&ring->execlist_lock);
+                       if (status & GEN8_CTX_STATUS_PREEMPTED) {
+                               if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
+                                       if 
(execlists_check_remove_request(ring, status_id))
+                                               WARN(1, "Lite Restored request 
removed from queue\n");
+                               } else
+                                       WARN(1, "Preemption without Lite 
Restore\n");
+                       }
 
-       if (unlikely(submit_contexts > 2))
-               DRM_ERROR("More than two context complete events?\n");
+                       if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
+                           (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
+                               if (execlists_check_remove_request(ring, 
status_id))
+                                       submit_contexts++;
+                       }
+               }
+
+               if (disable_lite_restore_wa(ring)) {
+                       /* Prevent a ctx to preempt itself */
+                       if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
+                                       (submit_contexts != 0))
+                               execlists_context_unqueue(ring);
+               } else if (submit_contexts != 0) {
+                       execlists_context_unqueue(ring);
+               }
 
-       ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
+               spin_unlock(&ring->execlist_lock);
 
-       /* Update the read pointer to the old write pointer. Manual ringbuffer
-        * management ftw </sarcasm> */
-       I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-                  _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-                                ring->next_context_status_buffer << 8));
+               WARN(submit_contexts > 2, "More than two context complete 
events?\n");
+               ring->next_context_status_buffer = tail % GEN8_CSB_ENTRIES;
+               I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
+                          _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+                                        ring->next_context_status_buffer<<8));
+       } while (1);
 }
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -585,7 +582,7 @@ static int execlists_context_queue(struct 
drm_i915_gem_request *request)
 
        i915_gem_request_get(request);
 
-       spin_lock_irq(&engine->execlist_lock);
+       spin_lock(&engine->execlist_lock);
 
        list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
                if (++num_elements > 2)
@@ -611,7 +608,7 @@ static int execlists_context_queue(struct 
drm_i915_gem_request *request)
        if (num_elements == 0)
                execlists_context_unqueue(engine);
 
-       spin_unlock_irq(&engine->execlist_lock);
+       spin_unlock(&engine->execlist_lock);
 
        return 0;
 }
@@ -667,19 +664,19 @@ intel_logical_ring_advance_and_submit(struct 
drm_i915_gem_request *request)
                execlists_context_queue(request);
 }
 
-void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+bool intel_execlists_retire_requests(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_request *req, *tmp;
        struct list_head retired_list;
 
        WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
        if (list_empty(&ring->execlist_retired_req_list))
-               return;
+               goto out;
 
        INIT_LIST_HEAD(&retired_list);
-       spin_lock_irq(&ring->execlist_lock);
+       spin_lock(&ring->execlist_lock);
        list_replace_init(&ring->execlist_retired_req_list, &retired_list);
-       spin_unlock_irq(&ring->execlist_lock);
+       spin_unlock(&ring->execlist_lock);
 
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
                struct intel_context *ctx = req->ctx;
@@ -691,6 +688,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs 
*ring)
                list_del(&req->execlist_link);
                i915_gem_request_put(req);
        }
+
+out:
+       return list_empty(&ring->execlist_queue);
 }
 
 void intel_logical_ring_stop(struct intel_engine_cs *ring)
@@ -1525,6 +1525,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs 
*ring)
        if (!intel_engine_initialized(ring))
                return;
 
+       if (ring->execlists_submit)
+               kthread_stop(ring->execlists_submit);
+
        if (ring->buffer) {
                struct drm_i915_private *dev_priv = ring->i915;
                intel_logical_ring_stop(ring);
@@ -1550,13 +1553,15 @@ void intel_logical_ring_cleanup(struct intel_engine_cs 
*ring)
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs 
*ring)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct task_struct *task;
        int ret;
 
        /* Intentionally left blank. */
        ring->buffer = NULL;
 
        ring->dev = dev;
-       ring->i915 = to_i915(dev);
+       ring->i915 = dev_priv;
        ring->fence_context = fence_context_alloc(1);
        INIT_LIST_HEAD(&ring->request_list);
        i915_gem_batch_pool_init(dev, &ring->batch_pool);
@@ -1587,6 +1592,15 @@ static int logical_ring_init(struct drm_device *dev, 
struct intel_engine_cs *rin
                goto error;
        }
 
+       ring->next_context_status_buffer =
+                       I915_READ(RING_CONTEXT_STATUS_PTR(ring)) & 
GEN8_CSB_PTR_MASK;
+       task = kthread_run(intel_execlists_submit, ring,
+                          "irq/i915:%de", ring->id);
+       if (IS_ERR(task))
+               goto error;
+
+       ring->execlists_submit = task;
+
        return 0;
 
 error:
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 87bc9acc4224..33f82a84065a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -81,7 +81,6 @@ uint64_t intel_lr_context_descriptor(struct intel_context 
*ctx,
 int intel_sanitize_enable_execlists(struct drm_device *dev, int 
enable_execlists);
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
-void intel_lrc_irq_handler(struct intel_engine_cs *ring);
-void intel_execlists_retire_requests(struct intel_engine_cs *ring);
+bool intel_execlists_retire_requests(struct intel_engine_cs *ring);
 
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bb92d831a100..edaf07b2292e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -291,6 +291,7 @@ struct intel_engine_cs {
        } semaphore;
 
        /* Execlists */
+       struct task_struct *execlists_submit;
        spinlock_t execlist_lock;
        struct list_head execlist_queue;
        struct list_head execlist_retired_req_list;
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to