In preparation for const-ifying the 'info' field of 'struct vm_fault',
rework __collapse_huge_page_swapin() to avoid continously updating
vmf.info.address and instead populate a new 'struct vm_fault' on the
stack for each page being processed.

Cc: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Signed-off-by: Will Deacon <w...@kernel.org>
---
 mm/khugepaged.c | 41 ++++++++++++++++++++---------------------
 1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 4494c90075fb..86c51a5d92d2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -991,40 +991,43 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, 
unsigned long address,
 
 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
-                                       unsigned long address, pmd_t *pmd,
+                                       unsigned long haddr, pmd_t *pmd,
                                        int referenced)
 {
        int swapped_in = 0;
        vm_fault_t ret = 0;
-       struct vm_fault vmf = {
-               .info = {
-                       .vma = vma,
-                       .address = address,
-                       .pgoff = linear_page_index(vma, address),
-               },
-               .flags = FAULT_FLAG_ALLOW_RETRY,
-               .pmd = pmd,
-       };
-
-       vmf.pte = pte_offset_map(pmd, address);
-       for (; vmf.info.address < address + HPAGE_PMD_NR*PAGE_SIZE;
-                       vmf.pte++, vmf.info.address += PAGE_SIZE) {
+       unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
+
+       for (address = haddr; address < end; address += PAGE_SIZE) {
+               struct vm_fault vmf = {
+                       .info = {
+                               .vma = vma,
+                               .address = address,
+                               .pgoff = linear_page_index(vma, haddr),
+                       },
+                       .flags = FAULT_FLAG_ALLOW_RETRY,
+                       .pmd = pmd,
+               };
+
+               vmf.pte = pte_offset_map(pmd, address);
                vmf.orig_pte = *vmf.pte;
-               if (!is_swap_pte(vmf.orig_pte))
+               if (!is_swap_pte(vmf.orig_pte)) {
+                       pte_unmap(vmf.pte);
                        continue;
+               }
                swapped_in++;
                ret = do_swap_page(&vmf);
 
                /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock 
*/
                if (ret & VM_FAULT_RETRY) {
                        mmap_read_lock(mm);
-                       if (hugepage_vma_revalidate(mm, address, 
&vmf.info.vma)) {
+                       if (hugepage_vma_revalidate(mm, haddr, &vma)) {
                                /* vma is no longer available, don't continue 
to swapin */
                                trace_mm_collapse_huge_page_swapin(mm, 
swapped_in, referenced, 0);
                                return false;
                        }
                        /* check if the pmd is still valid */
-                       if (mm_find_pmd(mm, address) != pmd) {
+                       if (mm_find_pmd(mm, haddr) != pmd) {
                                trace_mm_collapse_huge_page_swapin(mm, 
swapped_in, referenced, 0);
                                return false;
                        }
@@ -1033,11 +1036,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct 
*mm,
                        trace_mm_collapse_huge_page_swapin(mm, swapped_in, 
referenced, 0);
                        return false;
                }
-               /* pte is unmapped now, we need to map it */
-               vmf.pte = pte_offset_map(pmd, vmf.info.address);
        }
-       vmf.pte--;
-       pte_unmap(vmf.pte);
 
        /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
        if (swapped_in)
-- 
2.30.0.284.gd98b1dd5eaa7-goog

Reply via email to