Convert alloc_anon_folio() to pass __GFP_ZERO instead of zeroing at the 
callsite.
Use vma_alloc_folio_user_addr() to pass the folio-aligned address
for NUMA policy and the raw vmf->address for cache-friendly zeroing.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 mm/memory.c | 17 +++++------------
 1 file changed, 5 insertions(+), 12 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 07778814b4a8..83ec73791fae 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4662,7 +4662,8 @@ static struct folio *alloc_swap_folio(struct vm_fault 
*vmf)
        gfp = vma_thp_gfp_mask(vma);
        while (orders) {
                addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
-               folio = vma_alloc_folio(gfp, order, vma, addr);
+               folio = vma_alloc_folio_user_addr(gfp, order, vma, addr,
+                                         vmf->address);
                if (folio) {
                        if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
                                                            gfp, entry))
@@ -5176,10 +5177,11 @@ static struct folio *alloc_anon_folio(struct vm_fault 
*vmf)
                goto fallback;
 
        /* Try allocating the highest of the remaining orders. */
-       gfp = vma_thp_gfp_mask(vma);
+       gfp = vma_thp_gfp_mask(vma) | __GFP_ZERO;
        while (orders) {
                addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
-               folio = vma_alloc_folio(gfp, order, vma, addr);
+               folio = vma_alloc_folio_user_addr(gfp, order, vma, addr,
+                                         vmf->address);
                if (folio) {
                        if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
                                count_mthp_stat(order, 
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
@@ -5187,15 +5189,6 @@ static struct folio *alloc_anon_folio(struct vm_fault 
*vmf)
                                goto next;
                        }
                        folio_throttle_swaprate(folio, gfp);
-                       /*
-                        * When a folio is not zeroed during allocation
-                        * (__GFP_ZERO not used) or user folios require special
-                        * handling, folio_zero_user() is used to make sure
-                        * that the page corresponding to the faulting address
-                        * will be hot in the cache after zeroing.
-                        */
-                       if (user_alloc_needs_zeroing())
-                               folio_zero_user(folio, vmf->address);
                        return folio;
                }
 next:
-- 
MST


Reply via email to