From: Sourab Gupta <sourab.gu...@intel.com>

This patch adds support for opening multiple concurrent perf streams for
different gpu engines, while having the restriction to open only a single
stream open for a particular gpu engine.
This enables userspace client to open multiple streams, one per engine,
at any time to capture sample data for multiple gpu engines.

Signed-off-by: Sourab Gupta <sourab.gu...@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h  |  2 +-
 drivers/gpu/drm/i915/i915_perf.c | 41 ++++++++++++++++++++++------------------
 2 files changed, 24 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 40ac362..c005ffa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2547,7 +2547,7 @@ struct drm_i915_private {
                spinlock_t hook_lock;
 
                struct hrtimer poll_check_timer;
-               struct i915_perf_stream *exclusive_stream;
+               struct i915_perf_stream *engine_stream[I915_NUM_ENGINES];
                wait_queue_head_t poll_wq[I915_NUM_ENGINES];
                bool pollin[I915_NUM_ENGINES];
 
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index d3d934e..5437d08 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1382,7 +1382,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream 
*stream,
                 * switches since it's not-uncommon for periodic samples to
                 * identify a switch before any 'context switch' report.
                 */
-               if (!dev_priv->perf.exclusive_stream->ctx ||
+               if (!stream->ctx ||
                    dev_priv->perf.oa.specific_ctx_id == ctx_id ||
                    (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
                     dev_priv->perf.oa.specific_ctx_id) ||
@@ -1391,7 +1391,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream 
*stream,
                        /* While filtering for a single context we avoid
                         * leaking the IDs of other contexts.
                         */
-                       if (dev_priv->perf.exclusive_stream->ctx &&
+                       if (stream->ctx &&
                            dev_priv->perf.oa.specific_ctx_id != ctx_id) {
                                report32[2] = INVALID_CTX_ID;
                        }
@@ -2190,7 +2190,7 @@ static void i915_engine_stream_destroy(struct 
i915_perf_stream *stream)
 {
        struct drm_i915_private *dev_priv = stream->dev_priv;
 
-       if (WARN_ON(stream != dev_priv->perf.exclusive_stream))
+       if (WARN_ON(stream != dev_priv->perf.engine_stream[stream->engine]))
                return;
 
        if (stream->using_oa) {
@@ -2208,7 +2208,7 @@ static void i915_engine_stream_destroy(struct 
i915_perf_stream *stream)
        if (stream->cs_mode)
                free_command_stream_buf(dev_priv, stream->engine);
 
-       dev_priv->perf.exclusive_stream = NULL;
+       dev_priv->perf.engine_stream[stream->engine] = NULL;
 }
 
 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
@@ -2755,10 +2755,10 @@ static void gen7_update_oacontrol_locked(struct 
drm_i915_private *dev_priv)
 {
        assert_spin_locked(&dev_priv->perf.hook_lock);
 
-       if (dev_priv->perf.exclusive_stream->state !=
+       if (dev_priv->perf.engine_stream[RCS]->state !=
                                        I915_PERF_STREAM_DISABLED) {
                struct i915_gem_context *ctx =
-                       dev_priv->perf.exclusive_stream->ctx;
+                       dev_priv->perf.engine_stream[RCS]->ctx;
                u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
 
                bool periodic = dev_priv->perf.oa.periodic;
@@ -2931,8 +2931,9 @@ static int i915_engine_stream_init(struct 
i915_perf_stream *stream,
         * counter reports and marshal to the appropriate client
         * we currently only allow exclusive access
         */
-       if (dev_priv->perf.exclusive_stream) {
-               DRM_DEBUG("I915 perf stream already in use\n");
+       if (dev_priv->perf.engine_stream[props->engine]) {
+               DRM_DEBUG("I915 perf stream : %d already in use\n",
+                               props->engine);
                return -EBUSY;
        }
 
@@ -3120,7 +3121,7 @@ static int i915_engine_stream_init(struct 
i915_perf_stream *stream,
        }
 
        stream->ops = &i915_engine_stream_ops;
-       dev_priv->perf.exclusive_stream = stream;
+       dev_priv->perf.engine_stream[stream->engine] = stream;
 
        return 0;
 
@@ -3343,24 +3344,28 @@ static ssize_t i915_perf_read(struct file *file,
        return ret;
 }
 
-static enum hrtimer_restart poll_check_timer_cb(struct hrtimer *hrtimer)
+static void wake_up_perf_streams(void *data, async_cookie_t cookie)
 {
+       struct drm_i915_private *dev_priv = data;
        struct i915_perf_stream *stream;
-       struct drm_i915_private *dev_priv =
-               container_of(hrtimer, typeof(*dev_priv),
-                            perf.poll_check_timer);
 
-       /* No need to protect the streams list here, since the hrtimer is
-        * disabled before the stream is removed from list, and currently a
-        * single exclusive_stream is supported.
-        * XXX: revisit this when multiple concurrent streams are supported.
-        */
+       mutex_lock(&dev_priv->perf.streams_lock);
        list_for_each_entry(stream, &dev_priv->perf.streams, link) {
                if (stream_have_data_unlocked(stream)) {
                        dev_priv->perf.pollin[stream->engine] = true;
                        wake_up(&dev_priv->perf.poll_wq[stream->engine]);
                }
        }
+       mutex_unlock(&dev_priv->perf.streams_lock);
+}
+
+static enum hrtimer_restart poll_check_timer_cb(struct hrtimer *hrtimer)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(hrtimer, typeof(*dev_priv),
+                            perf.poll_check_timer);
+
+       async_schedule(wake_up_perf_streams, dev_priv);
 
        hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
 
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to