There are quite a few reports regarding "BUG: sleeping function called from
invalid context at mm/page_alloc.c"

Basically after the io_mapping_map_atomic_wc/kmap_atomic, it enters atomic
context, but compress_page cannot be called in atomic context as it will
call pool_alloc with GFP_KERNEL flag which can go to sleep. This is why
the bug got reported.

So, changed to non atomic version instead.

Signed-off-by: Bruce Chang <yu.bruce.ch...@intel.com>
---
 drivers/gpu/drm/i915/i915_gpu_error.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 1f2f266f26af..7118ecb7f144 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1007,67 +1007,67 @@ i915_error_object_create(struct drm_i915_private *i915,
        compress->wc = i915_gem_object_is_lmem(vma->obj) ||
                       drm_mm_node_allocated(&ggtt->error_capture);
 
        ret = -EINVAL;
        if (drm_mm_node_allocated(&ggtt->error_capture)) {
                void __iomem *s;
                dma_addr_t dma;
 
                for_each_sgt_daddr(dma, iter, vma->pages) {
                        ggtt->vm.insert_page(&ggtt->vm, dma, slot,
                                             I915_CACHE_NONE, 0);
 
                        s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
                        ret = compress_page(compress, (void  __force *)s, dst);
                        io_mapping_unmap(s);
                        if (ret)
                                break;
                }
        } else if (i915_gem_object_is_lmem(vma->obj)) {
                struct intel_memory_region *mem = vma->obj->mm.region;
                dma_addr_t dma;
 
                for_each_sgt_daddr(dma, iter, vma->pages) {
                        void __iomem *s;
 
-                       s = io_mapping_map_atomic_wc(&mem->iomap, dma);
+                       s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
                        ret = compress_page(compress, (void __force *)s, dst);
-                       io_mapping_unmap_atomic(s);
+                       io_mapping_unmap(s);
                        if (ret)
                                break;
                }
        } else {
                struct page *page;
 
                for_each_sgt_page(page, iter, vma->pages) {
                        void *s;
 
                        drm_clflush_pages(&page, 1);
 
-                       s = kmap_atomic(page);
+                       s = kmap(page);
                        ret = compress_page(compress, s, dst);
-                       kunmap_atomic(s);
+                       kunmap(s);
 
                        drm_clflush_pages(&page, 1);
 
                        if (ret)
                                break;
                }
        }
 
        if (ret || compress_flush(compress, dst)) {
                while (dst->page_count--)
                        pool_free(&compress->pool, dst->pages[dst->page_count]);
                kfree(dst);
                dst = NULL;
        }
        compress_finish(compress);
 
        return dst;
 }
 
 /*
  * Generate a semi-unique error code. The code is not meant to have meaning, 
The
  * code's only purpose is to try to prevent false duplicated bug reports by
  * grossly estimating a GPU error state.
  *
  * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to