On 08/07/2025 07:54, Arunpravin Paneer Selvam wrote:
- Added a handler in DRM buddy manager to reset the cleared
   flag for the blocks in the freelist.

- This is necessary because, upon resuming, the VRAM becomes
   cluttered with BIOS data, yet the VRAM backend manager
   believes that everything has been cleared.

v2:
   - Add lock before accessing drm_buddy_clear_reset_blocks()(Matthew Auld)
   - Force merge the two dirty blocks.(Matthew Auld)
   - Add a new unit test case for this issue.(Matthew Auld)
   - Having this function being able to flip the state either way would be
     good. (Matthew Brost)

v3(Matthew Auld):
   - Do merge step first to avoid the use of extra reset flag.

Signed-off-by: Arunpravin Paneer Selvam <arunpravin.paneersel...@amd.com>
Suggested-by: Christian König <christian.koe...@amd.com>
Cc: sta...@vger.kernel.org
Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality")
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3812

Reviewed-by: Matthew Auld <matthew.a...@intel.com>

---
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c   |  2 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h      |  1 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 17 ++++++++
  drivers/gpu/drm/drm_buddy.c                  | 43 ++++++++++++++++++++
  include/drm/drm_buddy.h                      |  2 +
  5 files changed, 65 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a59f194e3360..b89e46f29b51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5193,6 +5193,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool 
notify_clients)
                dev->dev->power.disable_depth--;
  #endif
        }
+
+       amdgpu_vram_mgr_clear_reset_blocks(adev);
        adev->in_suspend = false;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 208b7d1d8a27..450e4bf093b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr 
*mgr,
                                  uint64_t start, uint64_t size);
  int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
                                      uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
                            struct ttm_resource *res);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index abdc52b0895a..07c936e90d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -782,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr 
*mgr)
        return atomic64_read(&mgr->vis_usage);
  }
+/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+       struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+       struct drm_buddy *mm = &mgr->mm;
+
+       mutex_lock(&mgr->lock);
+       drm_buddy_reset_clear(mm, false);
+       mutex_unlock(&mgr->lock);
+}
+
  /**
   * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
   *
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index a1e652b7631d..a94061f373de 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -405,6 +405,49 @@ drm_get_buddy(struct drm_buddy_block *block)
  }
  EXPORT_SYMBOL(drm_get_buddy);
+/**
+ * drm_buddy_reset_clear - reset blocks clear state
+ *
+ * @mm: DRM buddy manager
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+ * in the freelist.
+ */
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+{
+       u64 root_size, size, start;
+       unsigned int order;
+       int i;
+
+       size = mm->size;
+       for (i = 0; i < mm->n_roots; ++i) {
+               order = ilog2(size) - ilog2(mm->chunk_size);
+               start = drm_buddy_block_offset(mm->roots[i]);
+               __force_merge(mm, start, start + size, order);
+
+               root_size = mm->chunk_size << order;
+               size -= root_size;
+       }
+
+       for (i = 0; i <= mm->max_order; ++i) {
+               struct drm_buddy_block *block;
+
+               list_for_each_entry_reverse(block, &mm->free_list[i], link) {
+                       if (is_clear != drm_buddy_block_is_clear(block)) {
+                               if (is_clear) {
+                                       mark_cleared(block);
+                                       mm->clear_avail += 
drm_buddy_block_size(mm, block);
+                               } else {
+                                       clear_reset(block);
+                                       mm->clear_avail -= 
drm_buddy_block_size(mm, block);
+                               }
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL(drm_buddy_reset_clear);
+
  /**
   * drm_buddy_free_block - free a block
   *
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 9689a7c5dd36..513837632b7d 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
                         u64 new_size,
                         struct list_head *blocks);
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
+
  void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block 
*block);
void drm_buddy_free_list(struct drm_buddy *mm,

Reply via email to