When the CPU touches a zone device THP entry, the data needs to be migrated back to the CPU, call migrate_to_ram() on these pages via do_huge_pmd_device_private() fault handling helper.
Cc: Andrew Morton <a...@linux-foundation.org> Cc: David Hildenbrand <da...@redhat.com> Cc: Zi Yan <z...@nvidia.com> Cc: Joshua Hahn <joshua.hah...@gmail.com> Cc: Rakie Kim <rakie....@sk.com> Cc: Byungchul Park <byungc...@sk.com> Cc: Gregory Price <gou...@gourry.net> Cc: Ying Huang <ying.hu...@linux.alibaba.com> Cc: Alistair Popple <apop...@nvidia.com> Cc: Oscar Salvador <osalva...@suse.de> Cc: Lorenzo Stoakes <lorenzo.stoa...@oracle.com> Cc: Baolin Wang <baolin.w...@linux.alibaba.com> Cc: "Liam R. Howlett" <liam.howl...@oracle.com> Cc: Nico Pache <npa...@redhat.com> Cc: Ryan Roberts <ryan.robe...@arm.com> Cc: Dev Jain <dev.j...@arm.com> Cc: Barry Song <bao...@kernel.org> Cc: Lyude Paul <ly...@redhat.com> Cc: Danilo Krummrich <d...@kernel.org> Cc: David Airlie <airl...@gmail.com> Cc: Simona Vetter <sim...@ffwll.ch> Cc: Ralph Campbell <rcampb...@nvidia.com> Cc: Mika Penttilä <mpent...@redhat.com> Cc: Matthew Brost <matthew.br...@intel.com> Cc: Francois Dugast <francois.dug...@intel.com> Signed-off-by: Balbir Singh <balb...@nvidia.com> --- include/linux/huge_mm.h | 7 +++++++ mm/huge_memory.c | 36 ++++++++++++++++++++++++++++++++++++ mm/memory.c | 6 ++++-- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7748489fde1b..a4880fe98e46 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -474,6 +474,8 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); +vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf); + extern struct folio *huge_zero_folio; extern unsigned long huge_zero_pfn; @@ -632,6 +634,11 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) return 0; } +static inline vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf) +{ + return 0; +} + static inline bool is_huge_zero_folio(const struct folio *folio) { return false; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2495e3fdbfae..8888140e57a3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1267,6 +1267,42 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) } +vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret = 0; + spinlock_t *ptl; + swp_entry_t swp_entry; + struct page *page; + + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + + ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) { + spin_unlock(ptl); + return 0; + } + + swp_entry = pmd_to_swp_entry(vmf->orig_pmd); + page = pfn_swap_entry_to_page(swp_entry); + vmf->page = page; + vmf->pte = NULL; + if (trylock_page(vmf->page)) { + get_page(page); + spin_unlock(ptl); + ret = page_pgmap(page)->ops->migrate_to_ram(vmf); + unlock_page(vmf->page); + put_page(page); + } else { + spin_unlock(ptl); + } + + return ret; +} + /* * always: directly stall for all thp allocations * defer: wake kswapd and fail if not immediately available diff --git a/mm/memory.c b/mm/memory.c index 92fd18a5d8d1..6c87f043eea1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6152,8 +6152,10 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); if (unlikely(is_swap_pmd(vmf.orig_pmd))) { - VM_BUG_ON(thp_migration_supported() && - !is_pmd_migration_entry(vmf.orig_pmd)); + if (is_device_private_entry( + pmd_to_swp_entry(vmf.orig_pmd))) + return do_huge_pmd_device_private(&vmf); + if (is_pmd_migration_entry(vmf.orig_pmd)) pmd_migration_entry_wait(mm, vmf.pmd); return 0; -- 2.50.1