When converting to folios the cleanup path of shmem_get_pages() was
missed. When a DMA remap fails and the max segment size is greater than
PAGE_SIZE it will attempt to retry the remap with a PAGE_SIZEd segment
size. The cleanup code isn't properly using the folio apis and as a
result isn't handling compound pages correctly.

Link: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13487
Fixes: 0b62af28f249 ("i915: convert shmem_sg_free_table() to use a folio_batch")
Signed-off-by: Brian Geffon <bgef...@google.com>
Suggested-by: Tomasz Figa <tf...@google.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index fe69f2c8527d..02ddab5bf5c0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -37,8 +37,6 @@ void shmem_sg_free_table(struct sg_table *st, struct 
address_space *mapping,
        struct folio *last = NULL;
        struct page *page;
 
-       mapping_clear_unevictable(mapping);
-
        folio_batch_init(&fbatch);
        for_each_sgt_page(page, sgt_iter, st) {
                struct folio *folio = page_folio(page);
@@ -180,10 +178,10 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, 
struct sg_table *st,
        return 0;
 err_sg:
        sg_mark_end(sg);
+       mapping_clear_unevictable(mapping);
        if (sg != st->sgl) {
                shmem_sg_free_table(st, mapping, false, false);
        } else {
-               mapping_clear_unevictable(mapping);
                sg_free_table(st);
        }
 
@@ -209,8 +207,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
        struct address_space *mapping = obj->base.filp->f_mapping;
        unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
        struct sg_table *st;
-       struct sgt_iter sgt_iter;
-       struct page *page;
        int ret;
 
        /*
@@ -239,9 +235,8 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
                 * for PAGE_SIZE chunks instead may be helpful.
                 */
                if (max_segment > PAGE_SIZE) {
-                       for_each_sgt_page(page, sgt_iter, st)
-                               put_page(page);
-                       sg_free_table(st);
+                       /* Leave the mapping unevictable while we retry */
+                       shmem_sg_free_table(st, mapping, false, false);
                        kfree(st);
 
                        max_segment = PAGE_SIZE;
@@ -265,6 +260,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 
 err_pages:
+       mapping_clear_unevictable(mapping);
        shmem_sg_free_table(st, mapping, false, false);
        /*
         * shmemfs first checks if there is enough memory to allocate the page
@@ -402,6 +398,7 @@ void i915_gem_object_put_pages_shmem(struct 
drm_i915_gem_object *obj, struct sg_
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_save_bit_17_swizzle(obj, pages);
 
+       mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
        shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
                            obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
        kfree(pages);
-- 
2.48.0.rc2.279.g1de40edade-goog

Reply via email to