On Tue, Aug 05, 2025 at 10:20:43PM -0400, Zi Yan wrote:
>Current behavior is to move to next PAGE_SIZE and split, but that makes it
>hard to check after-split folio orders. This is a preparation patch to
>allow more precise split_huge_page_test check in an upcoming commit.
>
>split_folio_to_order() part is not changed, since split_pte_mapped_thp test
>relies on its current behavior.
>
>Signed-off-by: Zi Yan <z...@nvidia.com>
>---
> mm/huge_memory.c | 18 +++++++++++-------
> 1 file changed, 11 insertions(+), 7 deletions(-)
>
>diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>index 8a11c2d402d4..b2ce8ac0c5a9 100644
>--- a/mm/huge_memory.c
>+++ b/mm/huge_memory.c
>@@ -4341,6 +4341,7 @@ static int split_huge_pages_pid(int pid, unsigned long 
>vaddr_start,
>               struct folio *folio;
>               struct address_space *mapping;
>               unsigned int target_order = new_order;
>+              long nr_pages;
> 
>               if (!vma)
>                       break;
>@@ -4358,6 +4359,8 @@ static int split_huge_pages_pid(int pid, unsigned long 
>vaddr_start,
>               if (!is_transparent_hugepage(folio))
>                       goto next;
> 
>+              nr_pages = folio_nr_pages(folio);
>+

Could be folio_large_nr_pages()?

>               if (!folio_test_anon(folio)) {
>                       mapping = folio->mapping;
>                       target_order = max(new_order,
>@@ -4385,15 +4388,16 @@ static int split_huge_pages_pid(int pid, unsigned long 
>vaddr_start,
>               if (!folio_test_anon(folio) && folio->mapping != mapping)
>                       goto unlock;
> 
>-              if (in_folio_offset < 0 ||
>-                  in_folio_offset >= folio_nr_pages(folio)) {
>+              if (in_folio_offset < 0 || in_folio_offset >= nr_pages) {
>                       if (!split_folio_to_order(folio, target_order))
>                               split++;
>               } else {
>-                      struct page *split_at = folio_page(folio,
>-                                                         in_folio_offset);
>-                      if (!folio_split(folio, target_order, split_at, NULL))
>+                      struct page *split_at =
>+                              folio_page(folio, in_folio_offset);
>+                      if (!folio_split(folio, target_order, split_at, NULL)) {
>                               split++;
>+                              addr += PAGE_SIZE * nr_pages;
>+                      }
>               }
> 
> unlock:
>@@ -4438,8 +4442,8 @@ static int split_huge_pages_in_file(const char 
>*file_path, pgoff_t off_start,
>       if (IS_ERR(candidate))
>               goto out;
> 
>-      pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 
>0x%lx]\n",
>-               file_path, off_start, off_end);
>+      pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 
>0x%lx], new_order %u in_folio_offset %ld\n",
>+               file_path, off_start, off_end, new_order, in_folio_offset);
> 

How about move this part into patch 1?

>       mapping = candidate->f_mapping;
>       min_order = mapping_min_folio_order(mapping);
>-- 
>2.47.2
>

-- 
Wei Yang
Help you, Help me

Reply via email to