The testcase added here, stimulates this scenario where a high priority
request is sent while another process keeps submitting requests and
filling its ringbuffer.

From RFC (Chris):
        - Use two FDs, one for each priority submission.
        - Move from gem_ringfill to gem_exec_schedule.
        - Bound processes to same cpu and wake with signals.

Cc: Chris Wilson <ch...@chris-wilson.co.uk>

Signed-off-by: Antonio Argenziano <antonio.argenzi...@intel.com>
---
 tests/gem_exec_schedule.c | 113 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 112 insertions(+), 1 deletion(-)

diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index f2847863..476626e6 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -21,7 +21,12 @@
  * IN THE SOFTWARE.
  */
 
+#define _GNU_SOURCE
+#include <sched.h>
+#include <signal.h>
+
 #include <sys/poll.h>
+#include <sys/ioctl.h>
 
 #include "igt.h"
 #include "igt_vgem.h"
@@ -39,6 +44,15 @@
 
 IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
 
+static void alarm_handler(int sig)
+{
+}
+
+static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
+{
+       return ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
+}
+
 static int __ctx_set_priority(int fd, uint32_t ctx, int prio)
 {
        struct local_i915_gem_context_param param;
@@ -659,6 +673,95 @@ static bool has_scheduler(int fd)
        return has > 0;
 }
 
+static void bind_to_cpu(int cpu)
+{
+       const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+       cpu_set_t allowed;
+
+       CPU_ZERO(&allowed);
+       CPU_SET(cpu % ncpus, &allowed);
+       igt_assert(sched_setaffinity(getpid(), sizeof(cpu_set_t), &allowed) == 
0);
+}
+
+static void setup_timer(int timeout)
+{
+       struct itimerval itv;
+       struct sigaction sa = { .sa_handler = alarm_handler };
+
+       sigaction(SIGALRM, &sa, NULL);
+       itv.it_interval.tv_sec = 0;
+       itv.it_interval.tv_usec = 100;
+       itv.it_value.tv_sec = 0;
+       itv.it_value.tv_usec = timeout * 1000;
+       setitimer(ITIMER_REAL, &itv, NULL);
+}
+
+/*
+ * This test checks that is possible for a high priority request to trigger a
+ * preemption if another context has filled its ringbuffer.
+ * The aim of the test is to make sure that high priority requests cannot be
+ * stalled by low priority tasks.
+ * */
+static void preempt_while_ringbuffer_full(int fd, uint32_t engine)
+{
+       uint32_t hp_fd;
+       uint32_t hp_ctx, lp_ctx;
+       struct drm_i915_gem_exec_object2 obj, hp_obj;
+       struct drm_i915_gem_execbuffer2 execbuf;
+
+       const unsigned timeout = 10; /*ms*/
+       const uint32_t bbe = MI_BATCH_BUFFER_END;
+
+       hp_fd = drm_open_driver(DRIVER_INTEL);
+
+       memset(&execbuf, 0, sizeof(execbuf));
+       memset(&obj, 0, sizeof(obj));
+       memset(&hp_obj, 0, sizeof(hp_obj));
+
+       lp_ctx = gem_context_create(fd);
+       ctx_set_priority(fd, lp_ctx, -MAX_PRIO);
+
+       hp_ctx = gem_context_create(hp_fd);
+       ctx_set_priority(hp_fd, hp_ctx, MAX_PRIO);
+
+       hp_obj.handle =  gem_create(fd, 4096);
+       gem_write(fd, hp_obj.handle, 0, &bbe, sizeof(bbe));
+
+       obj.handle =  gem_create(fd, 4096);
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+       execbuf.buffers_ptr = to_user_pointer(&obj);
+       execbuf.flags = engine;
+       execbuf.buffer_count = 1;
+       execbuf.rsvd1 = lp_ctx;
+
+       /*Stall execution and fill ring*/
+       igt_spin_batch_new(fd, lp_ctx, engine, 0);
+
+       setup_timer(timeout);
+       while (__execbuf(fd, &execbuf) == 0)
+               ;
+
+       /* steal cpu */
+       bind_to_cpu(0);
+       igt_fork(child, 1) {
+               /* this child is forced to wait for parent to sleep */
+               execbuf.buffers_ptr = to_user_pointer(&hp_obj);
+               execbuf.rsvd1 = hp_ctx;
+               setup_timer(timeout);
+               if (__execbuf(fd, &execbuf)) {
+                       igt_assert_f(0, "Failed High priority submission.\n");
+                       igt_terminate_spin_batches();
+               }
+       }
+
+       /* process sleeps waiting for space to free, releasing child */
+       setup_timer(2*timeout);
+       __execbuf(fd, &execbuf);
+       igt_terminate_spin_batches();
+       igt_waitchildren();
+}
+
 igt_main
 {
        const struct intel_execution_engine *e;
@@ -669,7 +772,7 @@ igt_main
        igt_fixture {
                fd = drm_open_driver_master(DRIVER_INTEL);
                igt_require_gem(fd);
-               gem_require_mmap_wc(fd);
+               //gem_require_mmap_wc(fd);
                igt_fork_hang_detector(fd);
        }
 
@@ -731,6 +834,14 @@ igt_main
                }
        }
 
+       igt_subtest_group {
+                       for (e = intel_execution_engines; e->name; e++) {
+                               igt_subtest_f("preempt-while-full-%s", e->name) 
{
+                                       preempt_while_ringbuffer_full(fd, 
e->exec_id | e->flags);
+                               }
+                       }
+       }
+
        igt_fixture {
                igt_stop_hang_detector();
                close(fd);
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to