Support splitting pages during THP zone device migration as needed.
The common case that arises is that after setup, during migrate
the destination might not be able to allocate MIGRATE_PFN_COMPOUND
pages.

Add a new routine migrate_vma_split_pages() to support the splitting
of already isolated pages. The pages being migrated are already unmapped
and marked for migration during setup (via unmap). folio_split() and
__split_unmapped_folio() take additional isolated arguments, to avoid
unmapping and remaping these pages and unlocking/putting the folio.

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: David Hildenbrand <da...@redhat.com>
Cc: Zi Yan <z...@nvidia.com>
Cc: Joshua Hahn <joshua.hah...@gmail.com>
Cc: Rakie Kim <rakie....@sk.com>
Cc: Byungchul Park <byungc...@sk.com>
Cc: Gregory Price <gou...@gourry.net>
Cc: Ying Huang <ying.hu...@linux.alibaba.com>
Cc: Alistair Popple <apop...@nvidia.com>
Cc: Oscar Salvador <osalva...@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoa...@oracle.com>
Cc: Baolin Wang <baolin.w...@linux.alibaba.com>
Cc: "Liam R. Howlett" <liam.howl...@oracle.com>
Cc: Nico Pache <npa...@redhat.com>
Cc: Ryan Roberts <ryan.robe...@arm.com>
Cc: Dev Jain <dev.j...@arm.com>
Cc: Barry Song <bao...@kernel.org>
Cc: Lyude Paul <ly...@redhat.com>
Cc: Danilo Krummrich <d...@kernel.org>
Cc: David Airlie <airl...@gmail.com>
Cc: Simona Vetter <sim...@ffwll.ch>
Cc: Ralph Campbell <rcampb...@nvidia.com>
Cc: Mika Penttilä <mpent...@redhat.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Francois Dugast <francois.dug...@intel.com>

Signed-off-by: Balbir Singh <balb...@nvidia.com>
---
 include/linux/huge_mm.h | 11 +++++--
 lib/test_hmm.c          |  9 ++++++
 mm/huge_memory.c        | 45 ++++++++++++++------------
 mm/migrate_device.c     | 71 ++++++++++++++++++++++++++++++++++-------
 4 files changed, 101 insertions(+), 35 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a4880fe98e46..52d8b435950b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -343,8 +343,8 @@ unsigned long thp_get_unmapped_area_vmflags(struct file 
*filp, unsigned long add
                vm_flags_t vm_flags);
 
 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
-int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
-               unsigned int new_order);
+int __split_huge_page_to_list_to_order(struct page *page, struct list_head 
*list,
+               unsigned int new_order, bool unmapped);
 int min_order_for_split(struct folio *folio);
 int split_folio_to_list(struct folio *folio, struct list_head *list);
 bool uniform_split_supported(struct folio *folio, unsigned int new_order,
@@ -353,6 +353,13 @@ bool non_uniform_split_supported(struct folio *folio, 
unsigned int new_order,
                bool warns);
 int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
                struct list_head *list);
+
+static inline int split_huge_page_to_list_to_order(struct page *page, struct 
list_head *list,
+               unsigned int new_order)
+{
+       return __split_huge_page_to_list_to_order(page, list, new_order, false);
+}
+
 /*
  * try_folio_split - try to split a @folio at @page using non uniform split.
  * @folio: folio to be split
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 14dbce719896..dda87c34b440 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1611,6 +1611,15 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault 
*vmf)
        order = folio_order(page_folio(vmf->page));
        nr = 1 << order;
 
+       /*
+        * When folios are partially mapped, we can't rely on the folio
+        * order of vmf->page as the folio might not be fully split yet
+        */
+       if (vmf->pte) {
+               order = 0;
+               nr = 1;
+       }
+
        /*
         * Consider a per-cpu cache of src and dst pfns, but with
         * large number of cpus that might not scale well.
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index dc58081b661c..863393dec1f1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3474,15 +3474,6 @@ static void __split_folio_to_order(struct folio *folio, 
int old_order,
                new_folio->mapping = folio->mapping;
                new_folio->index = folio->index + i;
 
-               /*
-                * page->private should not be set in tail pages. Fix up and 
warn once
-                * if private is unexpectedly set.
-                */
-               if (unlikely(new_folio->private)) {
-                       VM_WARN_ON_ONCE_PAGE(true, new_head);
-                       new_folio->private = NULL;
-               }
-
                if (folio_test_swapcache(folio))
                        new_folio->swap.val = folio->swap.val + i;
 
@@ -3711,6 +3702,7 @@ bool uniform_split_supported(struct folio *folio, 
unsigned int new_order,
  * @lock_at: a page within @folio to be left locked to caller
  * @list: after-split folios will be put on it if non NULL
  * @uniform_split: perform uniform split or not (non-uniform split)
+ * @unmapped: The pages are already unmapped, they are migration entries.
  *
  * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
  * It is in charge of checking whether the split is supported or not and
@@ -3726,7 +3718,7 @@ bool uniform_split_supported(struct folio *folio, 
unsigned int new_order,
  */
 static int __folio_split(struct folio *folio, unsigned int new_order,
                struct page *split_at, struct page *lock_at,
-               struct list_head *list, bool uniform_split)
+               struct list_head *list, bool uniform_split, bool unmapped)
 {
        struct deferred_split *ds_queue = get_deferred_split_queue(folio);
        XA_STATE(xas, &folio->mapping->i_pages, folio->index);
@@ -3776,13 +3768,15 @@ static int __folio_split(struct folio *folio, unsigned 
int new_order,
                 * is taken to serialise against parallel split or collapse
                 * operations.
                 */
-               anon_vma = folio_get_anon_vma(folio);
-               if (!anon_vma) {
-                       ret = -EBUSY;
-                       goto out;
+               if (!unmapped) {
+                       anon_vma = folio_get_anon_vma(folio);
+                       if (!anon_vma) {
+                               ret = -EBUSY;
+                               goto out;
+                       }
+                       anon_vma_lock_write(anon_vma);
                }
                mapping = NULL;
-               anon_vma_lock_write(anon_vma);
        } else {
                unsigned int min_order;
                gfp_t gfp;
@@ -3849,7 +3843,8 @@ static int __folio_split(struct folio *folio, unsigned 
int new_order,
                goto out_unlock;
        }
 
-       unmap_folio(folio);
+       if (!unmapped)
+               unmap_folio(folio);
 
        /* block interrupt reentry in xa_lock and spinlock */
        local_irq_disable();
@@ -3936,10 +3931,13 @@ static int __folio_split(struct folio *folio, unsigned 
int new_order,
 
                        next = folio_next(new_folio);
 
+                       zone_device_private_split_cb(folio, new_folio);
+
                        expected_refs = folio_expected_ref_count(new_folio) + 1;
                        folio_ref_unfreeze(new_folio, expected_refs);
 
-                       lru_add_split_folio(folio, new_folio, lruvec, list);
+                       if (!unmapped)
+                               lru_add_split_folio(folio, new_folio, lruvec, 
list);
 
                        /*
                         * Anonymous folio with swap cache.
@@ -3973,6 +3971,7 @@ static int __folio_split(struct folio *folio, unsigned 
int new_order,
                        folio_put_refs(new_folio, nr_pages);
                }
 
+               zone_device_private_split_cb(folio, NULL);
                /*
                 * Unfreeze @folio only after all page cache entries, which
                 * used to point to it, have been updated with new folios.
@@ -3996,6 +3995,9 @@ static int __folio_split(struct folio *folio, unsigned 
int new_order,
 
        local_irq_enable();
 
+       if (unmapped)
+               return ret;
+
        if (nr_shmem_dropped)
                shmem_uncharge(mapping->host, nr_shmem_dropped);
 
@@ -4086,12 +4088,13 @@ static int __folio_split(struct folio *folio, unsigned 
int new_order,
  * Returns -EINVAL when trying to split to an order that is incompatible
  * with the folio. Splitting to order 0 is compatible with all folios.
  */
-int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
-                                    unsigned int new_order)
+int __split_huge_page_to_list_to_order(struct page *page, struct list_head 
*list,
+                                    unsigned int new_order, bool unmapped)
 {
        struct folio *folio = page_folio(page);
 
-       return __folio_split(folio, new_order, &folio->page, page, list, true);
+       return __folio_split(folio, new_order, &folio->page, page, list, true,
+                               unmapped);
 }
 
 /*
@@ -4120,7 +4123,7 @@ int folio_split(struct folio *folio, unsigned int 
new_order,
                struct page *split_at, struct list_head *list)
 {
        return __folio_split(folio, new_order, split_at, &folio->page, list,
-                       false);
+                       false, false);
 }
 
 int min_order_for_split(struct folio *folio)
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 6621bba62710..9206a3d5c0d1 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -864,6 +864,29 @@ static int migrate_vma_insert_huge_pmd_page(struct 
migrate_vma *migrate,
                src[i] &= ~MIGRATE_PFN_MIGRATE;
        return 0;
 }
+
+static int migrate_vma_split_pages(struct migrate_vma *migrate,
+                                       unsigned long idx, unsigned long addr,
+                                       struct folio *folio)
+{
+       unsigned long i;
+       unsigned long pfn;
+       unsigned long flags;
+       int ret = 0;
+
+       folio_get(folio);
+       split_huge_pmd_address(migrate->vma, addr, true);
+       ret = __split_huge_page_to_list_to_order(folio_page(folio, 0), NULL,
+                                                       0, true);
+       if (ret)
+               return ret;
+       migrate->src[idx] &= ~MIGRATE_PFN_COMPOUND;
+       flags = migrate->src[idx] & ((1UL << MIGRATE_PFN_SHIFT) - 1);
+       pfn = migrate->src[idx] >> MIGRATE_PFN_SHIFT;
+       for (i = 1; i < HPAGE_PMD_NR; i++)
+               migrate->src[i+idx] = migrate_pfn(pfn + i) | flags;
+       return ret;
+}
 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
 static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
                                         unsigned long addr,
@@ -873,6 +896,13 @@ static int migrate_vma_insert_huge_pmd_page(struct 
migrate_vma *migrate,
 {
        return 0;
 }
+
+static int migrate_vma_split_pages(struct migrate_vma *migrate,
+                                       unsigned long idx, unsigned long addr,
+                                       struct folio *folio)
+{
+       return 0;
+}
 #endif
 
 /*
@@ -1022,8 +1052,9 @@ static void __migrate_device_pages(unsigned long 
*src_pfns,
                                struct migrate_vma *migrate)
 {
        struct mmu_notifier_range range;
-       unsigned long i;
+       unsigned long i, j;
        bool notified = false;
+       unsigned long addr;
 
        for (i = 0; i < npages; ) {
                struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
@@ -1065,12 +1096,16 @@ static void __migrate_device_pages(unsigned long 
*src_pfns,
                                (!(dst_pfns[i] & MIGRATE_PFN_COMPOUND))) {
                                nr = HPAGE_PMD_NR;
                                src_pfns[i] &= ~MIGRATE_PFN_COMPOUND;
-                               src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
-                               goto next;
+                       } else {
+                               nr = 1;
                        }
 
-                       migrate_vma_insert_page(migrate, addr, &dst_pfns[i],
-                                               &src_pfns[i]);
+                       for (j = 0; j < nr && i + j < npages; j++) {
+                               src_pfns[i+j] |= MIGRATE_PFN_MIGRATE;
+                               migrate_vma_insert_page(migrate,
+                                       addr + j * PAGE_SIZE,
+                                       &dst_pfns[i+j], &src_pfns[i+j]);
+                       }
                        goto next;
                }
 
@@ -1092,7 +1127,14 @@ static void __migrate_device_pages(unsigned long 
*src_pfns,
                                                         MIGRATE_PFN_COMPOUND);
                                        goto next;
                                }
-                               src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
+                               nr = 1 << folio_order(folio);
+                               addr = migrate->start + i * PAGE_SIZE;
+                               if (migrate_vma_split_pages(migrate, i, addr,
+                                                               folio)) {
+                                       src_pfns[i] &= ~(MIGRATE_PFN_MIGRATE |
+                                                        MIGRATE_PFN_COMPOUND);
+                                       goto next;
+                               }
                        } else if ((src_pfns[i] & MIGRATE_PFN_MIGRATE) &&
                                (dst_pfns[i] & MIGRATE_PFN_COMPOUND) &&
                                !(src_pfns[i] & MIGRATE_PFN_COMPOUND)) {
@@ -1127,12 +1169,17 @@ static void __migrate_device_pages(unsigned long 
*src_pfns,
                BUG_ON(folio_test_writeback(folio));
 
                if (migrate && migrate->fault_page == page)
-                       extra_cnt = 1;
-               r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
-               if (r != MIGRATEPAGE_SUCCESS)
-                       src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
-               else
-                       folio_migrate_flags(newfolio, folio);
+                       extra_cnt++;
+               for (j = 0; j < nr && i + j < npages; j++) {
+                       folio = page_folio(migrate_pfn_to_page(src_pfns[i+j]));
+                       newfolio = 
page_folio(migrate_pfn_to_page(dst_pfns[i+j]));
+
+                       r = folio_migrate_mapping(mapping, newfolio, folio, 
extra_cnt);
+                       if (r != MIGRATEPAGE_SUCCESS)
+                               src_pfns[i+j] &= ~MIGRATE_PFN_MIGRATE;
+                       else
+                               folio_migrate_flags(newfolio, folio);
+               }
 next:
                i += nr;
        }
-- 
2.50.1

Reply via email to