On 02/11/2016 17:50, Chris Wilson wrote:
The execlist_lock is now completely subsumed by the engine->timeline->lock,
and so we can remove the redundant layer of locking.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c     | 4 ++--
 drivers/gpu/drm/i915/i915_gem.c         | 4 ++--
 drivers/gpu/drm/i915/intel_engine_cs.c  | 1 -
 drivers/gpu/drm/i915/intel_lrc.c        | 7 +------
 drivers/gpu/drm/i915/intel_ringbuffer.h | 1 -
 5 files changed, 5 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index c9465fbff2df..3cb96d260dfb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3256,11 +3256,11 @@ static int i915_engine_info(struct seq_file *m, void 
*unused)
                                seq_printf(m, "\t\tELSP[1] idle\n");
                        rcu_read_unlock();

-                       spin_lock_irq(&engine->execlist_lock);

Hm why did we have this as _irq and not _bh already?

+                       spin_lock_irq(&engine->timeline->lock);
                        list_for_each_entry(rq, &engine->execlist_queue, 
execlist_link) {
                                print_request(m, rq, "\t\tQ ");
                        }
-                       spin_unlock_irq(&engine->execlist_lock);
+                       spin_unlock_irq(&engine->timeline->lock);
                } else if (INTEL_GEN(dev_priv) > 6) {
                        seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
                                   I915_READ(RING_PP_DIR_BASE(engine)));
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5839bebba64a..cf28666021f8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2686,12 +2686,12 @@ static void i915_gem_cleanup_engine(struct 
intel_engine_cs *engine)
         */

        if (i915.enable_execlists) {
-               spin_lock(&engine->execlist_lock);

Likewise should have been _bh here, never mind now. :)

+               spin_lock_irq(&engine->timeline->lock);
                INIT_LIST_HEAD(&engine->execlist_queue);
                i915_gem_request_put(engine->execlist_port[0].request);
                i915_gem_request_put(engine->execlist_port[1].request);
                memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
-               spin_unlock(&engine->execlist_lock);
+               spin_unlock_irq(&engine->timeline->lock);
        }
 }

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index 841f8d1e1410..298f0f95dd3f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -237,7 +237,6 @@ static void intel_engine_init_timeline(struct 
intel_engine_cs *engine)
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
        INIT_LIST_HEAD(&engine->execlist_queue);
-       spin_lock_init(&engine->execlist_lock);

        intel_engine_init_timeline(engine);
        intel_engine_init_hangcheck(engine);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b9aba16851ac..186c3ccb2c34 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -471,7 +471,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
         */

        spin_lock_irqsave(&engine->timeline->lock, flags);
-       spin_lock(&engine->execlist_lock);
        list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
                /* Can we combine this request with the current port? It has to
                 * be the same context/ringbuffer and not have any exceptions
@@ -515,7 +514,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)

                i915_gem_request_assign(&port->request, last);
        }
-       spin_unlock(&engine->execlist_lock);
        spin_unlock_irqrestore(&engine->timeline->lock, flags);

        if (submit)
@@ -600,9 +598,8 @@ static void intel_lrc_irq_handler(unsigned long data)
 static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-       unsigned long flags;

-       spin_lock_irqsave(&engine->execlist_lock, flags);

Again I am confused why this wasn't just _bh.

+       assert_spin_locked(&engine->timeline->lock);

        /* We keep the previous context alive until we retire the following
         * request. This ensures that any the context object is still pinned
@@ -616,8 +613,6 @@ static void execlists_submit_request(struct 
drm_i915_gem_request *request)
        list_add_tail(&request->execlist_link, &engine->execlist_queue);
        if (execlists_elsp_idle(engine))
                tasklet_hi_schedule(&engine->irq_tasklet);
-
-       spin_unlock_irqrestore(&engine->execlist_lock, flags);
 }

 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request 
*request)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 642b54692d0d..062bc8e1872a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -335,7 +335,6 @@ struct intel_engine_cs {

        /* Execlists */
        struct tasklet_struct irq_tasklet;
-       spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
        struct execlist_port {
                struct drm_i915_gem_request *request;
                unsigned int count;


Anyway, looks correct.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to