Makefile.am | 2 benchmarks/.gitignore | 2 benchmarks/Makefile.am | 14 + benchmarks/dri2-swap.c | 588 ++++++++++++++++++++++++++++++++++++++++++++++++ benchmarks/dri3-swap.c | 595 +++++++++++++++++++++++++++++++++++++++++++++++++ configure.ac | 2 man/intel.man | 10 src/compat-api.h | 4 src/intel_device.c | 27 +- src/intel_options.c | 1 src/intel_options.h | 1 src/sna/fb/fb.h | 4 src/sna/fb/fbpict.h | 4 src/sna/gen3_render.c | 73 +++++- src/sna/gen4_render.c | 19 + src/sna/gen5_render.c | 14 + src/sna/kgem.c | 346 +++++++++++++++++----------- src/sna/sna.h | 17 + src/sna/sna_accel.c | 45 +++ src/sna/sna_display.c | 252 +++++++++++++++++--- src/sna/sna_dri2.c | 272 +++++++++++++++------- src/sna/sna_driver.c | 59 ++-- src/sna/sna_io.c | 9 src/sna/sna_present.c | 25 +- src/sna/xassert.h | 6 test/.gitignore | 1 test/Makefile.am | 1 test/basic-copyplane.c | 99 ++++++++ tools/cursor.c | 3 29 files changed, 2177 insertions(+), 318 deletions(-)
New commits: commit 4cea8037984c3d5e171be22710384f66660ca4c6 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Sat Aug 8 22:10:20 2015 +0100 sna/dri2: Restore caching of fullscreen triple buffers Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_dri2.c b/src/sna/sna_dri2.c index 7a2d9d0..bae9314 100644 --- a/src/sna/sna_dri2.c +++ b/src/sna/sna_dri2.c @@ -360,7 +360,7 @@ sna_dri2_get_back(struct sna *sna, } assert(bo->active_scanout == 0); - if (reuse && get_private(back)->bo->refcnt == 1) { + if (reuse && get_private(back)->bo->refcnt == 1 + get_private(back)->bo->active_scanout) { if (&c->link == &priv->cache) c = malloc(sizeof(*c)); if (c != NULL) { commit fd5f44f4d27e7a28221ea9755664a6da6dd14cd8 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Sat Aug 8 22:09:53 2015 +0100 sna: A couple more asserts for valid flip data and handling Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c index 606c3bd..5b975c1 100644 --- a/src/sna/sna_display.c +++ b/src/sna/sna_display.c @@ -6244,6 +6244,7 @@ error: if (data) { assert(crtc->flip_bo == NULL); + assert(handler); crtc->flip_handler = handler; crtc->flip_data = data; crtc->flip_bo = kgem_bo_reference(bo); @@ -8564,6 +8565,7 @@ again: crtc->swap.tv_usec = vbl->tv_usec; crtc->swap.msc = msc; } + assert(crtc->flip_pending); crtc->flip_pending = false; assert(crtc->flip_bo); commit cadea260de76a398f9141b18ae91d1646e9a305e Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Sat Aug 8 15:39:05 2015 +0100 sna/dri2: Add the old buffer from a chain swap to the swap cache Rather than just discarding the old buffer, we want to add it to the swap cache and so hand it back to the client in the near future. Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_dri2.c b/src/sna/sna_dri2.c index 1872d0c..7a2d9d0 100644 --- a/src/sna/sna_dri2.c +++ b/src/sna/sna_dri2.c @@ -214,6 +214,8 @@ sna_dri2_cache_bo(struct sna *sna, { struct dri_bo *c; + DBG(("%s(handle=%d, name=%d)\n", __FUNCTION__, bo->handle, name)); + if (draw == NULL) { DBG(("%s: no draw, releasing handle=%d\n", __FUNCTION__, bo->handle)); @@ -2405,10 +2407,13 @@ static void chain_swap(struct sna_dri2_event *chain) chain->back->flags = tmp.flags; chain->back->pitch = tmp.bo->pitch; - tmp.bo = get_private(chain->back)->copy.bo; - } - - kgem_bo_destroy(&chain->sna->kgem, tmp.bo); + sna_dri2_cache_bo(chain->sna, chain->draw, + get_private(chain->back)->copy.bo, + get_private(chain->back)->copy.name, + get_private(chain->back)->copy.size, + get_private(chain->back)->copy.flags); + } else + kgem_bo_destroy(&chain->sna->kgem, tmp.bo); get_private(chain->back)->copy.bo = ref(get_private(chain->back)->bo); get_private(chain->back)->copy.name = chain->back->name; @@ -2668,6 +2673,8 @@ sna_dri2_immediate_blit(struct sna *sna, assert(chain->bo == NULL); assert(chain->queued); + DBG(("%s: stealing placeholder\n", __FUNCTION__)); + _sna_dri2_destroy_buffer(chain->sna, chain->draw, chain->front); _sna_dri2_destroy_buffer(chain->sna, chain->draw, chain->back); @@ -2711,6 +2718,8 @@ sna_dri2_immediate_blit(struct sna *sna, chain->chain->type == SWAP_THROTTLE) { struct sna_dri2_event *tmp = chain->chain; + DBG(("%s: replacing next swap\n", __FUNCTION__)); + assert(!tmp->queued); assert(info->chain == NULL); commit 611ec7d7d476c47eefc35e2857bc6b87af819e43 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 17:19:25 2015 +0100 sna: Remove incorrect assertion Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 34893d7..33c48fd 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -3997,7 +3997,6 @@ void _kgem_submit(struct kgem *kgem) assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); assert(kgem->nfence <= kgem->fence_max); - assert(kgem->ring < ARRAY_SIZE(kgem->requests)); kgem_finish_buffers(kgem); commit f324506f7da369222b6c5dd65dfddaea65b7cb41 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 16:44:19 2015 +0100 sna: Prefer direct writes if the target is LLC If we can use WB CPU writes into the target, prefer to do so. Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index f6418cd..d32bd58 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -794,6 +794,9 @@ static bool __upload_inplace(struct kgem *kgem, if (bo->flush) return true; + if (kgem_bo_can_map__cpu(kgem, bo, true)) + return true; + /* If we are writing through the GTT, check first if we might be * able to almagamate a series of small writes into a single * operation. commit 095528e6c3ca6aba2b141f451d0e1f14f3e57a59 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 16:29:54 2015 +0100 sna: Tweak the semantics for small uploads into Damage tracked pixmaps Based on further study of behaviour under a compositing manager, always prefer to upload directly into the flushed bo, with a couple of exceptions. Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index 6bf87ee..f6418cd 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -788,6 +788,12 @@ static bool __upload_inplace(struct kgem *kgem, if (FORCE_INPLACE) return FORCE_INPLACE > 0; + if (bo->exec) + return false; + + if (bo->flush) + return true; + /* If we are writing through the GTT, check first if we might be * able to almagamate a series of small writes into a single * operation. @@ -797,7 +803,7 @@ static bool __upload_inplace(struct kgem *kgem, bytes += (box->x2 - box->x1) * (box->y2 - box->y1); box++; } - if (!bo->flush && __kgem_bo_is_busy(kgem, bo)) + if (__kgem_bo_is_busy(kgem, bo)) return bytes * bpp >> 12 >= kgem->half_cpu_cache_pages; else return bytes * bpp >> 12; commit 07bcd3f0c715a37ac24a630173e0e28056567421 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 15:26:30 2015 +0100 sna: Use direct uploads into a Damage tracked for v.small operations If the batch is being frequently flushed due to Damage+Composite tracking, we cannot amalgamate the small copies into the destination and so end up sending thousands and thousands of tiny batches with tiny render copies hogging the GPU. References: https://bugs.freedesktop.org/show_bug.cgi?id=91577 Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index db317ee..6bf87ee 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -797,7 +797,7 @@ static bool __upload_inplace(struct kgem *kgem, bytes += (box->x2 - box->x1) * (box->y2 - box->y1); box++; } - if (__kgem_bo_is_busy(kgem, bo)) + if (!bo->flush && __kgem_bo_is_busy(kgem, bo)) return bytes * bpp >> 12 >= kgem->half_cpu_cache_pages; else return bytes * bpp >> 12; commit 3f128867d957e30690218404337b00bb327e647b Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 15:19:17 2015 +0100 sna: Skip a no-op copy If the source has no contents, the destination is equally undefined. Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index a816b77..c624d9e 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -6445,6 +6445,15 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, assert(region_num_rects(region)); + if (src_priv && + src_priv->gpu_damage == NULL && + src_priv->cpu_damage == NULL) { + /* Rare but still happens, nothing to copy */ + DBG(("%s: src pixmap=%ld is empty\n", + __FUNCTION__, src_pixmap->drawable.serialNumber)); + return; + } + if (src_pixmap == dst_pixmap) return sna_self_copy_boxes(src, dst, gc, region, dx, dy, commit a3ac461a302498335fc7b03ec3a59e9a2fd61a75 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 15:18:49 2015 +0100 sna: Add a handle=%d to a DBG for consistency Helps with grepping if the DBG are consistent. Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 86356af..34893d7 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -4339,7 +4339,7 @@ bool kgem_expire_cache(struct kgem *kgem) count++; size += bytes(bo); kgem_bo_free(kgem, bo); - DBG(("%s: expiring %d\n", + DBG(("%s: expiring handle=%d\n", __FUNCTION__, bo->handle)); } } commit 5ef9d68043dac2628695b145846e724c2fa98abc Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 14:04:31 2015 +0100 sna: More simple DBG tracing around cleanup/retire handling Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 064853c..86356af 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -661,10 +661,11 @@ static void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) struct local_i915_gem_mmap arg; int err; -retry: VG_CLEAR(arg); - arg.handle = bo->handle; arg.offset = 0; + +retry: + arg.handle = bo->handle; arg.size = bytes(bo); if ((err = do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_MMAP, &arg))) { DBG(("%s: failed %d, throttling/cleaning caches\n", @@ -2518,6 +2519,8 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem, if (bucket(bo) >= NUM_CACHE_BUCKETS) { if (bo->map__gtt) { + DBG(("%s: relinquishing large GTT mapping for handle=%d\n", + __FUNCTION__, bo->handle)); munmap(bo->map__gtt, bytes(bo)); bo->map__gtt = NULL; } @@ -3381,8 +3384,10 @@ static void kgem_close_inactive(struct kgem *kgem) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) + for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { kgem_close_list(kgem, &kgem->inactive[i]); + assert(list_is_empty(&kgem->inactive[i])); + } } static void kgem_finish_buffers(struct kgem *kgem) commit fd0236bb8e2e15665d72b8eb7f5ff15571a5c60b Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 13:48:38 2015 +0100 sna: Release the reference on the rq->bo during forced cleanup Since the switch to preallocating the batch buffer, the request owns its reference to the rq->bo. However, dropping that reference was missed during the cleanup. Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 33f9f7e..064853c 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -3594,6 +3594,9 @@ static void kgem_cleanup(struct kgem *kgem) kgem_bo_free(kgem, bo); } + if (--rq->bo->refcnt == 0) + kgem_bo_free(kgem, rq->bo); + __kgem_request_free(rq); } } commit 6bab82b91d64af2e31d9f5fe7705c68caf09004b Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 13:25:14 2015 +0100 sna: Always prefer a fresh allocation for the batch if !llc An unwanted subtle change in preference from commit 8c465d0fbf84b1d29c54d620f09063d2b7ccfeb8 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 10:15:42 2015 +0100 sna: Fallback after a bo allocation failure for the batch Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 9925628..33f9f7e 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -3858,7 +3858,7 @@ out_16384: size = kgem->nbatch * sizeof(uint32_t); #endif - if (!kgem->batch_bo) { + if (!kgem->batch_bo || !kgem->has_llc) { bo = kgem_create_linear(kgem, size, CREATE_NO_THROTTLE); if (bo) { write: commit a00fdce1fdf21379887511e7d4247ca401dc3a77 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 13:24:13 2015 +0100 sna: Add a little more DBG information to kgem_retire() Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 30058dc..9925628 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -3012,7 +3012,7 @@ static bool kgem_retire__flushing(struct kgem *kgem) int count = 0; list_for_each_entry(bo, &kgem->flushing, request) count++; - DBG(("%s: %d bo on flushing list\n", __FUNCTION__, count)); + DBG(("%s: %d bo on flushing list, retired? %d\n", __FUNCTION__, count, retired)); } #endif @@ -3111,6 +3111,8 @@ static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) while (!list_is_empty(&kgem->requests[ring])) { struct kgem_request *rq; + DBG(("%s: retiring ring %d\n", __FUNCTION__, ring)); + rq = list_first_entry(&kgem->requests[ring], struct kgem_request, list); @@ -3135,8 +3137,8 @@ static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) struct kgem_request, list)->bo; - DBG(("%s: ring=%d, %d outstanding requests, oldest=%d\n", - __FUNCTION__, ring, count, bo ? bo->handle : 0)); + DBG(("%s: ring=%d, %d outstanding requests, oldest=%d, retired? %d\n", + __FUNCTION__, ring, count, bo ? bo->handle : 0, retired)); } #endif commit d78685787a595ca68aec08074744fa23ed4f3d93 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 12:59:57 2015 +0100 sna: Fix sync'ing to the most recent request on forced cache cleanup We picked the oldest, not most recent and so were not recovering as much memory as desired. Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index be0480c..30058dc 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -4369,13 +4369,14 @@ bool kgem_cleanup_cache(struct kgem *kgem) if (!list_is_empty(&kgem->requests[n])) { struct kgem_request *rq; - rq = list_first_entry(&kgem->requests[n], - struct kgem_request, - list); + rq = list_last_entry(&kgem->requests[n], + struct kgem_request, + list); DBG(("%s: sync on cleanup\n", __FUNCTION__)); kgem_bo_wait(kgem, rq->bo); } + assert(list_is_empty(&kgem->requests[n])); } kgem_retire(kgem); commit 862c0d3cc9634923f4c6db987c658164e6de50da Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 12:57:31 2015 +0100 sna/dri2: Make event chaining DBG clearer Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/sna_dri2.c b/src/sna/sna_dri2.c index e64c8ac..1872d0c 100644 --- a/src/sna/sna_dri2.c +++ b/src/sna/sna_dri2.c @@ -2325,6 +2325,9 @@ static void chain_swap(struct sna_dri2_event *chain) union drm_wait_vblank vbl; struct copy tmp; + DBG(("%s: draw=%ld, queued?=%d, type=%d\n", + __FUNCTION__, (long)chain->draw->id, chain->queued, chain->type)); + if (chain->draw == NULL) { sna_dri2_event_free(chain); return; @@ -2333,8 +2336,6 @@ static void chain_swap(struct sna_dri2_event *chain) if (chain->queued) /* too early! */ return; - DBG(("%s: chaining draw=%ld, type=%d\n", - __FUNCTION__, (long)chain->draw->id, chain->type)); assert(chain == dri2_chain(chain->draw)); chain->queued = true; @@ -2583,6 +2584,7 @@ void sna_dri2_vblank_handler(struct drm_event_vblank *event) } if (info->chain) { + DBG(("%s: continuing chain\n", __FUNCTION__)); assert(info->chain != info); assert(info->draw == draw); sna_dri2_remove_event((WindowPtr)draw, info); commit f1b1baa7ffe55a78537160d8b679ce0225f06d63 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 12:45:26 2015 +0100 sna: Add a DBG for when we discard an invalid old mmapping Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index bc393ff..be0480c 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -2528,6 +2528,8 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem, assert(list_is_empty(&bo->vma)); list_move(&bo->list, &kgem->inactive[bucket(bo)]); if (bo->map__gtt && !kgem_bo_can_map(kgem, bo)) { + DBG(("%s: relinquishing old GTT mapping for handle=%d\n", + __FUNCTION__, bo->handle)); munmap(bo->map__gtt, bytes(bo)); bo->map__gtt = NULL; } commit 672436efd5b69fb9007cb80804a351b1e1572b60 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 12:03:34 2015 +0100 sna: Treat being wedged as unlikely during rendering with HW Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 6873a18..bc393ff 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -1518,7 +1518,7 @@ static bool kgem_init_pinned_batches(struct kgem *kgem) int ret = 0; int n, i; - if (kgem->wedged) + if (unlikely(kgem->wedged)) return true; for (n = 0; n < ARRAY_SIZE(count); n++) { @@ -4127,7 +4127,7 @@ void _kgem_submit(struct kgem *kgem) kgem_commit(kgem); } - if (kgem->wedged) + if (unlikely(kgem->wedged)) kgem_cleanup(kgem); kgem_reset(kgem); @@ -4137,7 +4137,7 @@ void _kgem_submit(struct kgem *kgem) void kgem_throttle(struct kgem *kgem) { - if (kgem->wedged) + if (unlikely(kgem->wedged)) return; if (__kgem_throttle(kgem, true)) { @@ -4274,7 +4274,7 @@ bool kgem_expire_cache(struct kgem *kgem) #endif kgem_retire(kgem); - if (kgem->wedged) + if (unlikely(kgem->wedged)) kgem_cleanup(kgem); kgem->expire(kgem); commit ccc553ff034534233f08ce306d4c4911059337c6 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 12:01:38 2015 +0100 sna: Stop allocating requests once wedged Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 6ebca96..6873a18 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -1005,13 +1005,17 @@ static struct kgem_request *__kgem_request_alloc(struct kgem *kgem) { struct kgem_request *rq; - rq = __kgem_freed_request; - if (rq) { - __kgem_freed_request = *(struct kgem_request **)rq; + if (unlikely(kgem->wedged)) { + rq = &kgem->static_request; } else { - rq = malloc(sizeof(*rq)); - if (rq == NULL) - rq = &kgem->static_request; + rq = __kgem_freed_request; + if (rq) { + __kgem_freed_request = *(struct kgem_request **)rq; + } else { + rq = malloc(sizeof(*rq)); + if (rq == NULL) + rq = &kgem->static_request; + } } list_init(&rq->buffers); commit b0f125495caced05548442bc2fe64a4b1b46339c Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 11:57:27 2015 +0100 sna: Tweak DBG traces for cache cleanup Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 014a31b..6ebca96 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -1704,6 +1704,7 @@ static int kgem_bo_wait(struct kgem *kgem, struct kgem_bo *bo) struct drm_i915_gem_wait wait; int ret; + DBG(("%s: waiting for handle=%d\n", __FUNCTION__, bo->handle)); if (bo->rq == NULL) return 0; @@ -1756,14 +1757,12 @@ restart: if (kgem->batch_bo) kgem->batch = kgem_bo_map__cpu(kgem, kgem->batch_bo); if (kgem->batch == NULL) { - DBG(("%s: unable to map batch bo, mallocing(size=%d)\n", - __FUNCTION__, - sizeof(uint32_t)*kgem->batch_size)); if (kgem->batch_bo) { kgem_bo_destroy(kgem, kgem->batch_bo); kgem->batch_bo = NULL; } + assert(kgem->ring < ARRAY_SIZE(kgem->requests)); if (!list_is_empty(&kgem->requests[kgem->ring])) { struct kgem_request *rq; @@ -1773,6 +1772,8 @@ restart: goto restart; } + DBG(("%s: unable to map batch bo, mallocing(size=%d)\n", + __FUNCTION__, sizeof(uint32_t)*kgem->batch_size)); if (posix_memalign((void **)&kgem->batch, PAGE_SIZE, ALIGN(sizeof(uint32_t) * kgem->batch_size, PAGE_SIZE))) { ERR(("%s: batch allocation failed, disabling acceleration\n", __FUNCTION__)); @@ -2666,7 +2667,6 @@ static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo) list_move(&bo->list, &kgem->scanout); kgem->need_expire = true; - } static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo) @@ -3101,6 +3101,7 @@ static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) { bool retired = false; + assert(ring < ARRAY_SIZE(kgem->requests)); while (!list_is_empty(&kgem->requests[ring])) { struct kgem_request *rq; @@ -3980,6 +3981,7 @@ void _kgem_submit(struct kgem *kgem) assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); assert(kgem->nfence <= kgem->fence_max); + assert(kgem->ring < ARRAY_SIZE(kgem->requests)); kgem_finish_buffers(kgem); @@ -6001,6 +6003,7 @@ inline static bool nearly_idle(struct kgem *kgem) { int ring = kgem->ring == KGEM_BLT; + assert(ring < ARRAY_SIZE(kgem->requests)); if (list_is_singular(&kgem->requests[ring])) return true; commit 90b6cba0669b296ff21ff7b4410a7a0515ee2489 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 11:07:09 2015 +0100 sna: Add DBG trace when doing a forced cache cleanup Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 44083b4..014a31b 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -574,6 +574,8 @@ static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) retry_gtt: gtt.handle = bo->handle; if ((err = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, >t))) { + DBG(("%s: failed %d, throttling/cleaning caches\n", + __FUNCTION__, err)); assert(err != EINVAL); (void)__kgem_throttle_retire(kgem, 0); @@ -593,6 +595,8 @@ retry_mmap: kgem->fd, gtt.offset); if (ptr == MAP_FAILED) { err = errno; + DBG(("%s: failed %d, throttling/cleaning caches\n", + __FUNCTION__, err)); assert(err != EINVAL); if (__kgem_throttle_retire(kgem, 0)) @@ -631,6 +635,8 @@ retry_wc: wc.size = bytes(bo); wc.flags = I915_MMAP_WC; if ((err = do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_MMAP_v2, &wc))) { + DBG(("%s: failed %d, throttling/cleaning caches\n", + __FUNCTION__, err)); assert(err != EINVAL); if (__kgem_throttle_retire(kgem, 0)) @@ -661,6 +667,8 @@ retry: arg.offset = 0; arg.size = bytes(bo); if ((err = do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_MMAP, &arg))) { + DBG(("%s: failed %d, throttling/cleaning caches\n", + __FUNCTION__, err)); assert(err != -EINVAL || bo->prime); if (__kgem_throttle_retire(kgem, 0)) @@ -820,6 +828,8 @@ retry: } if ((err = gem_write(kgem->fd, bo->handle, 0, length, data))) { + DBG(("%s: failed %d, throttling/cleaning caches\n", + __FUNCTION__, err)); assert(err != EINVAL); (void)__kgem_throttle_retire(kgem, 0); @@ -4344,31 +4354,26 @@ bool kgem_cleanup_cache(struct kgem *kgem) unsigned int i; int n; + DBG(("%s\n", __FUNCTION__)); + /* sync to the most recent request */ for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { if (!list_is_empty(&kgem->requests[n])) { struct kgem_request *rq; - struct drm_i915_gem_set_domain set_domain; rq = list_first_entry(&kgem->requests[n], struct kgem_request, list); DBG(("%s: sync on cleanup\n", __FUNCTION__)); - - VG_CLEAR(set_domain); - set_domain.handle = rq->bo->handle; - set_domain.read_domains = I915_GEM_DOMAIN_GTT; - set_domain.write_domain = I915_GEM_DOMAIN_GTT; - (void)do_ioctl(kgem->fd, - DRM_IOCTL_I915_GEM_SET_DOMAIN, - &set_domain); + kgem_bo_wait(kgem, rq->bo); } } kgem_retire(kgem); kgem_cleanup(kgem); + DBG(("%s: need_expire?=%d\n", __FUNCTION__, kgem->need_expire)); if (!kgem->need_expire) return false; @@ -4395,6 +4400,8 @@ bool kgem_cleanup_cache(struct kgem *kgem) kgem->need_purge = false; kgem->need_expire = false; + + DBG(("%s: complete\n", __FUNCTION__)); return true; } commit ca71199679cac9cc161c84cb09d12f133abf8a64 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 10:40:30 2015 +0100 sna: Harden batch allocation against resource starvation If the batch allocation fails, retire the oldest request and try again. References: https://bugs.freedesktop.org/show_bug.cgi?id=91577 Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 251c9cc..44083b4 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -1689,6 +1689,36 @@ static void kgem_fixup_relocs(struct kgem *kgem, struct kgem_bo *bo, int shrink) } } +static int kgem_bo_wait(struct kgem *kgem, struct kgem_bo *bo) +{ + struct drm_i915_gem_wait wait; + int ret; + + if (bo->rq == NULL) + return 0; + + VG_CLEAR(wait); + wait.bo_handle = bo->handle; + wait.timeout_ns = -1; + ret = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); + if (ret) { + struct drm_i915_gem_set_domain set_domain; + + VG_CLEAR(set_domain); + set_domain.handle = bo->handle; + set_domain.read_domains = I915_GEM_DOMAIN_GTT; + set_domain.write_domain = I915_GEM_DOMAIN_GTT; + ret = do_ioctl(kgem->fd, + DRM_IOCTL_I915_GEM_SET_DOMAIN, + &set_domain); + } + + if (ret == 0) + __kgem_retire_requests_upto(kgem, bo); + + return ret; +} + static struct kgem_bo *kgem_new_batch(struct kgem *kgem) { struct kgem_bo *last; @@ -1709,6 +1739,7 @@ static struct kgem_bo *kgem_new_batch(struct kgem *kgem) if (!kgem->has_llc) flags |= CREATE_UNCACHED; +restart: kgem->batch_bo = kgem_create_linear(kgem, sizeof(uint32_t)*kgem->batch_size, flags); @@ -1723,6 +1754,15 @@ static struct kgem_bo *kgem_new_batch(struct kgem *kgem) kgem->batch_bo = NULL; } + if (!list_is_empty(&kgem->requests[kgem->ring])) { + struct kgem_request *rq; + + rq = list_first_entry(&kgem->requests[kgem->ring], + struct kgem_request, list); + if (kgem_bo_wait(kgem, rq->bo) == 0) + goto restart; + } + if (posix_memalign((void **)&kgem->batch, PAGE_SIZE, ALIGN(sizeof(uint32_t) * kgem->batch_size, PAGE_SIZE))) { ERR(("%s: batch allocation failed, disabling acceleration\n", __FUNCTION__)); @@ -3777,24 +3817,14 @@ out_16384: } if (size < 16384) { - struct drm_i915_gem_set_domain set_domain; - bo = list_first_entry(&kgem->pinned_batches[size > 4096], struct kgem_bo, list); list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]); DBG(("%s: syncing due to busy batches\n", __FUNCTION__)); - - VG_CLEAR(set_domain); - set_domain.handle = bo->handle; - set_domain.read_domains = I915_GEM_DOMAIN_GTT; - set_domain.write_domain = I915_GEM_DOMAIN_GTT; - if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { - DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); - kgem_throttle(kgem); + if (kgem_bo_wait(kgem, bo)) return NULL; - } kgem_retire(kgem); assert(bo->rq == NULL); commit 8c465d0fbf84b1d29c54d620f09063d2b7ccfeb8 Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 10:15:42 2015 +0100 sna: Fallback after a bo allocation failure for the batch If we fail to allocate the next bo to use for the next_request, we can just fallback to the delayed allocation used by !llc. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91577 Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> diff --git a/src/sna/kgem.c b/src/sna/kgem.c index a8b96a4..251c9cc 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -3716,12 +3716,10 @@ static int compact_batch_surface(struct kgem *kgem, int *shrink) static struct kgem_bo * kgem_create_batch(struct kgem *kgem) { -#if !DBG_NO_SHRINK_BATCHES - struct drm_i915_gem_set_domain set_domain; struct kgem_bo *bo; - int shrink = 0; - int size; + int size, shrink = 0; +#if !DBG_NO_SHRINK_BATCHES if (kgem->surface != kgem->batch_size) size = compact_batch_surface(kgem, &shrink); else @@ -3779,6 +3777,8 @@ out_16384: } if (size < 16384) { + struct drm_i915_gem_set_domain set_domain; + bo = list_first_entry(&kgem->pinned_batches[size > 4096], struct kgem_bo, list); @@ -3802,8 +3802,14 @@ out_16384: goto write; } } +#else + if (kgem->surface != kgem->batch_size) + size = kgem->batch_size * sizeof(uint32_t); + else + size = kgem->nbatch * sizeof(uint32_t); +#endif - if (!kgem->has_llc) { + if (!kgem->batch_bo) { bo = kgem_create_linear(kgem, size, CREATE_NO_THROTTLE); if (bo) { write: @@ -3815,7 +3821,7 @@ write: return bo; } } -#endif + return kgem_new_batch(kgem); } commit 69d8edc11173df021aa2e158b2530257113141fd Author: Chris Wilson <ch...@chris-wilson.co.uk> Date: Fri Aug 7 10:08:17 2015 +0100 sna: Handle batch allocation failure Whilst we currently do not try and submit a failed batch buffer allocation, we still treat it as a valid request. This explodes much later when we inspect the NULL rq->bo.