Since drm_i915_gem_request already contains a backpointer to
drm_i915_private, this is a fairly trivial operation. However, using a
consistent interface does lean convenience to when we need to query
device properties, for example.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |  8 +++++++
 drivers/gpu/drm/i915/i915_gem.c            | 18 ++++++--------
 drivers/gpu/drm/i915/i915_gem_context.c    | 21 ++++++++---------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  3 +--
 drivers/gpu/drm/i915/intel_lrc.c           | 19 +++++++--------
 drivers/gpu/drm/i915/intel_mocs.c          |  2 +-
 drivers/gpu/drm/i915/intel_pm.c            |  3 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 38 +++++++++++++-----------------
 9 files changed, 54 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 76eeb47c9c15..c6ead62bc411 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2338,6 +2338,12 @@ struct drm_i915_gem_request {
 
 };
 
+static inline struct drm_i915_private *
+__request_to_i915(const struct drm_i915_gem_request *request)
+{
+       return request->i915;
+}
+
 struct drm_i915_gem_request * __must_check
 i915_gem_request_alloc(struct intel_engine_cs *engine,
                       struct intel_context *ctx);
@@ -2509,6 +2515,8 @@ struct drm_i915_cmd_table {
                __p = __obj_to_i915((struct drm_i915_gem_object *)p); \
        else if (__builtin_types_compatible_p(typeof(*p), struct intel_guc)) \
                __p = __guc_to_i915((struct intel_guc *)p); \
+       else if (__builtin_types_compatible_p(typeof(*p), struct 
drm_i915_gem_request)) \
+               __p = __request_to_i915((struct drm_i915_gem_request *)(p)); \
        else \
                BUILD_BUG(); \
        __p; \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bbd7bfadbaef..237d5e884610 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1442,18 +1442,16 @@ __i915_gem_request_retire__upto(struct 
drm_i915_gem_request *req)
 int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
-       struct drm_device *dev;
        struct drm_i915_private *dev_priv;
        bool interruptible;
        int ret;
 
        BUG_ON(req == NULL);
 
-       dev = req->engine->dev;
-       dev_priv = dev->dev_private;
+       dev_priv = to_i915(req);
        interruptible = dev_priv->mm.interruptible;
 
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
        ret = __i915_wait_request(req, interruptible, NULL, NULL);
        if (ret)
@@ -2589,7 +2587,7 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
                return;
 
        engine = request->engine;
-       dev_priv = request->i915;
+       dev_priv = to_i915(request);
        ringbuf = request->ringbuf;
 
        /*
@@ -2721,13 +2719,13 @@ void i915_gem_request_free(struct kref *req_ref)
                i915_gem_request_remove_from_client(req);
 
        if (ctx) {
-               if (i915.enable_execlists && ctx != req->i915->kernel_context)
+               if (i915.enable_execlists && ctx != 
to_i915(req)->kernel_context)
                        intel_lr_context_unpin(ctx, req->engine);
 
                i915_gem_context_unreference(ctx);
        }
 
-       kmem_cache_free(req->i915->requests, req);
+       kmem_cache_free(to_i915(req)->requests, req);
 }
 
 static inline int
@@ -4729,12 +4727,10 @@ err:
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+       u32 *remap_info = to_i915(req)->l3_parity.remap_info[slice];
        int i, ret;
 
-       if (!HAS_L3_DPF(dev) || !remap_info)
+       if (!HAS_L3_DPF(req) || !remap_info)
                return 0;
 
        ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index b8439971c9bb..e53889498696 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -515,7 +515,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
                i915_semaphore_is_enabled(engine->dev) ?
-               hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+               hweight32(INTEL_INFO(req)->ring_mask) - 1 :
                0;
        int len, ret;
 
@@ -524,21 +524,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
         * explicitly, so we rely on the value at ring init, stored in
         * itlb_before_ctx_switch.
         */
-       if (IS_GEN6(engine->dev)) {
+       if (IS_GEN6(req)) {
                ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
                if (ret)
                        return ret;
        }
 
        /* These flags are for resource streamer on HSW+ */
-       if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+       if (IS_HASWELL(req) || INTEL_INFO(req)->gen >= 8)
                flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-       else if (INTEL_INFO(engine->dev)->gen < 8)
+       else if (INTEL_INFO(req)->gen < 8)
                flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
        len = 4;
-       if (INTEL_INFO(engine->dev)->gen >= 7)
+       if (INTEL_INFO(req)->gen >= 7)
                len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
        ret = intel_ring_begin(req, len);
@@ -546,14 +546,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
                return ret;
 
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-       if (INTEL_INFO(engine->dev)->gen >= 7) {
+       if (INTEL_INFO(req)->gen >= 7) {
                intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
                if (num_rings) {
                        struct intel_engine_cs *signaller;
 
                        intel_ring_emit(engine,
                                        MI_LOAD_REGISTER_IMM(num_rings));
-                       for_each_engine(signaller, engine->dev) {
+                       for_each_engine(signaller, req) {
                                if (signaller == engine)
                                        continue;
 
@@ -576,14 +576,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
         */
        intel_ring_emit(engine, MI_NOOP);
 
-       if (INTEL_INFO(engine->dev)->gen >= 7) {
+       if (INTEL_INFO(req)->gen >= 7) {
                if (num_rings) {
                        struct intel_engine_cs *signaller;
                        i915_reg_t last_reg = {}; /* keep gcc quiet */
 
                        intel_ring_emit(engine,
                                        MI_LOAD_REGISTER_IMM(num_rings));
-                       for_each_engine(signaller, engine->dev) {
+                       for_each_engine(signaller, req) {
                                if (signaller == engine)
                                        continue;
 
@@ -817,10 +817,9 @@ unpin_out:
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = req->i915;
 
        WARN_ON(i915.enable_execlists);
-       WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&to_i915(req)->dev->struct_mutex));
 
        if (engine->id != RCS ||
            req->ctx->legacy_hw_ctx.rcs_state == NULL) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 86911dcafb6a..9c582c815af2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1095,7 +1095,6 @@ void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, vmas, exec_list) {
@@ -1122,7 +1121,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
                        i915_gem_request_assign(&obj->last_fenced_req, req);
                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-                               struct drm_i915_private *dev_priv = 
to_i915(engine->dev);
+                               struct drm_i915_private *dev_priv = 
to_i915(req);
                                
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
                                               &dev_priv->mm.fence_list);
                        }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 627a4accc6e3..780e3ad3ca10 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2182,8 +2182,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 
 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
 {
-       struct drm_i915_private *dev_priv = req->i915;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+       struct i915_hw_ppgtt *ppgtt = to_i915(req)->mm.aliasing_ppgtt;
 
        if (i915.enable_execlists)
                return 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7141a2a3f481..1926bef2e612 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -612,7 +612,7 @@ static void execlists_context_queue(struct 
drm_i915_gem_request *request)
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
-       if (request->ctx != request->i915->kernel_context)
+       if (request->ctx != to_i915(request)->kernel_context)
                intel_lr_context_pin(request->ctx, engine);
 
        i915_gem_request_reference(request);
@@ -708,14 +708,14 @@ int intel_logical_ring_alloc_request_extras(struct 
drm_i915_gem_request *request
                 * going any further, as the i915_add_request() call
                 * later on mustn't fail ...
                 */
-               struct intel_guc *guc = &request->i915->guc;
+               struct intel_guc *guc = &to_i915(request)->guc;
 
                ret = i915_guc_wq_check_space(guc->execbuf_client);
                if (ret)
                        return ret;
        }
 
-       if (request->ctx != request->i915->kernel_context)
+       if (request->ctx != to_i915(request)->kernel_context)
                ret = intel_lr_context_pin(request->ctx, request->engine);
 
        return ret;
@@ -776,7 +776,7 @@ static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct drm_i915_private *dev_priv = request->i915;
+       struct drm_i915_private *dev_priv = to_i915(request);
        struct intel_engine_cs *engine = request->engine;
 
        intel_logical_ring_advance(ringbuf);
@@ -798,7 +798,7 @@ intel_logical_ring_advance_and_submit(struct 
drm_i915_gem_request *request)
        if (engine->last_context != request->ctx) {
                if (engine->last_context)
                        intel_lr_context_unpin(engine->last_context, engine);
-               if (request->ctx != request->i915->kernel_context) {
+               if (request->ctx != to_i915(request)->kernel_context) {
                        intel_lr_context_pin(request->ctx, engine);
                        engine->last_context = request->ctx;
                } else {
@@ -1030,7 +1030,7 @@ void intel_execlists_retire_requests(struct 
intel_engine_cs *engine)
                struct drm_i915_gem_object *ctx_obj =
                                ctx->engine[engine->id].state;
 
-               if (ctx_obj && (ctx != req->i915->kernel_context))
+               if (ctx_obj && (ctx != to_i915(req)->kernel_context))
                        intel_lr_context_unpin(ctx, engine);
 
                list_del(&req->execlist_link);
@@ -1168,8 +1168,7 @@ static int intel_logical_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
        int ret, i;
        struct intel_engine_cs *engine = req->engine;
        struct intel_ringbuffer *ringbuf = req->ringbuf;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(req);
        struct i915_workarounds *w = &dev_priv->workarounds;
 
        if (w->count == 0)
@@ -1708,8 +1707,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request 
*req,
         * not needed in 48-bit.*/
        if (req->ctx->ppgtt &&
            (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) 
{
-               if (!USES_FULL_48BIT_PPGTT(req->i915) &&
-                   !intel_vgpu_active(req->i915->dev)) {
+               if (!USES_FULL_48BIT_PPGTT(req) &&
+                   !intel_vgpu_active(to_i915(req)->dev)) {
                        ret = intel_logical_ring_emit_pdps(req);
                        if (ret)
                                return ret;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
b/drivers/gpu/drm/i915/intel_mocs.c
index 23b8545ad6b0..b7b5fefa0cec 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -405,7 +405,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request 
*req)
        struct drm_i915_mocs_table t;
        int ret;
 
-       if (get_mocs_settings(req->i915, &t)) {
+       if (get_mocs_settings(to_i915(req), &t)) {
                /* Program the RCS control registers */
                ret = emit_mocs_control_table(req, &t);
                if (ret)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b7c218602c6e..f7bb0084a557 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7363,8 +7363,7 @@ static void __intel_rps_boost_work(struct work_struct 
*work)
        struct drm_i915_gem_request *req = boost->req;
 
        if (!i915_gem_request_completed(req, true))
-               gen6_rps_boost(to_i915(req->engine->dev), NULL,
-                              req->emitted_jiffies);
+               gen6_rps_boost(to_i915(req), NULL, req->emitted_jiffies);
 
        i915_gem_request_unreference__unlocked(req);
        kfree(boost);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 89811fbe723e..0c8c2d40987f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -107,7 +107,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      flush_domains)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
        u32 cmd;
        int ret;
 
@@ -146,7 +145,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                cmd |= MI_EXE_FLUSH;
 
        if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-           (IS_G4X(dev) || IS_GEN5(dev)))
+           (IS_G4X(req) || IS_GEN5(req)))
                cmd |= MI_INVALIDATE_ISP;
 
        ret = intel_ring_begin(req, 2);
@@ -710,8 +709,7 @@ static int intel_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
 {
        int ret, i;
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(req);
        struct i915_workarounds *w = &dev_priv->workarounds;
 
        if (w->count == 0)
@@ -1278,13 +1276,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request 
*signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 8
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(signaller_req);
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1320,13 +1317,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request 
*signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 6
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(signaller_req);
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1359,14 +1355,13 @@ static int gen6_signal(struct drm_i915_gem_request 
*signaller_req,
                       unsigned int num_dwords)
 {
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(signaller_req);
        struct intel_engine_cs *useless;
        enum intel_engine_id id;
        int ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1446,7 +1441,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
               u32 seqno)
 {
        struct intel_engine_cs *waiter = waiter_req->engine;
-       struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(waiter_req);
        int ret;
 
        ret = intel_ring_begin(waiter_req, 4);
@@ -2367,7 +2362,7 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 
        /* Make sure we do not trigger any retires */
        return __i915_wait_request(req,
-                                  req->i915->mm.interruptible,
+                                  to_i915(req)->mm.interruptible,
                                   NULL, NULL);
 }
 
@@ -2495,7 +2490,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
 
        WARN_ON(req == NULL);
        engine = req->engine;
-       dev_priv = req->i915;
+       dev_priv = to_i915(req);
 
        ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
        if (ret)
@@ -2608,7 +2603,7 @@ static int gen6_bsd_ring_flush(struct 
drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(engine->dev)->gen >= 8)
+       if (INTEL_INFO(req)->gen >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2630,7 +2625,7 @@ static int gen6_bsd_ring_flush(struct 
drm_i915_gem_request *req,
        intel_ring_emit(engine, cmd);
        intel_ring_emit(engine,
                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(engine->dev)->gen >= 8) {
+       if (INTEL_INFO(req)->gen >= 8) {
                intel_ring_emit(engine, 0); /* upper addr */
                intel_ring_emit(engine, 0); /* value */
        } else  {
@@ -2647,7 +2642,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request 
*req,
                              unsigned dispatch_flags)
 {
        struct intel_engine_cs *engine = req->engine;
-       bool ppgtt = USES_PPGTT(engine->dev) &&
+       bool ppgtt = USES_PPGTT(req) &&
                        !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
@@ -2721,7 +2716,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request 
*req,
                           u32 invalidate, u32 flush)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
        uint32_t cmd;
        int ret;
 
@@ -2730,7 +2724,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request 
*req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(dev)->gen >= 8)
+       if (INTEL_INFO(req)->gen >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2751,7 +2745,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request 
*req,
        intel_ring_emit(engine, cmd);
        intel_ring_emit(engine,
                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_INFO(req)->gen >= 8) {
                intel_ring_emit(engine, 0); /* upper addr */
                intel_ring_emit(engine, 0); /* value */
        } else  {
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to