The __free_huge_page_pmd_vmemmap and __remap_huge_page_pmd_vmemmap are
almost the same code. So introduce remap_free_huge_page_pmd_vmemmap
helper to simplify the code.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 mm/hugetlb_vmemmap.c | 108 +++++++++++++++++++++------------------------------
 1 file changed, 45 insertions(+), 63 deletions(-)

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 361c4174e222..06e2b8a7b7c8 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -252,6 +252,47 @@ static inline int freed_vmemmap_hpage_dec(struct page 
*page)
        return atomic_dec_return_relaxed(&page->_mapcount) + 1;
 }
 
+static inline void free_vmemmap_page_list(struct list_head *list)
+{
+       struct page *page, *next;
+
+       list_for_each_entry_safe(page, next, list, lru) {
+               list_del(&page->lru);
+               free_vmemmap_page(page);
+       }
+}
+
+typedef void (*remap_pte_fn)(struct page *reuse, pte_t *ptep,
+                            unsigned long start, unsigned long end,
+                            struct list_head *pages);
+
+static void remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+                                       unsigned long addr,
+                                       struct list_head *pages,
+                                       remap_pte_fn remap_fn)
+{
+       unsigned long next;
+       unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
+       unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
+       struct page *reuse = NULL;
+
+       flush_cache_vunmap(start, end);
+
+       addr = start;
+       do {
+               pte_t *ptep;
+
+               ptep = pte_offset_kernel(pmd, addr);
+               if (!reuse)
+                       reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+               next = vmemmap_hpage_addr_end(addr, end);
+               remap_fn(reuse, ptep, addr, next, pages);
+       } while (pmd++, addr = next, addr != end);
+
+       flush_tlb_kernel_range(start, end);
+}
+
 static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
                                          unsigned long start,
                                          unsigned long end,
@@ -286,31 +327,6 @@ static void __remap_huge_page_pte_vmemmap(struct page 
*reuse, pte_t *ptep,
        }
 }
 
-static void __remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
-                                         unsigned long addr,
-                                         struct list_head *remap_pages)
-{
-       unsigned long next;
-       unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
-       unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
-       struct page *reuse = NULL;
-
-       addr = start;
-       do {
-               pte_t *ptep;
-
-               ptep = pte_offset_kernel(pmd, addr);
-               if (!reuse)
-                       reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-               next = vmemmap_hpage_addr_end(addr, end);
-               __remap_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-                                             remap_pages);
-       } while (pmd++, addr = next, addr != end);
-
-       flush_tlb_kernel_range(start, end);
-}
-
 static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head 
*list)
 {
        int i;
@@ -339,8 +355,8 @@ void alloc_huge_page_vmemmap(struct hstate *h, struct page 
*head)
        BUG_ON(!pmd);
 
        ptl = vmemmap_pmd_lock(pmd);
-       __remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head,
-                                     &remap_pages);
+       remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &remap_pages,
+                                   __remap_huge_page_pte_vmemmap);
        if (!freed_vmemmap_hpage_dec(pmd_page(*pmd))) {
                /*
                 * Todo:
@@ -350,16 +366,6 @@ void alloc_huge_page_vmemmap(struct hstate *h, struct page 
*head)
        spin_unlock(ptl);
 }
 
-static inline void free_vmemmap_page_list(struct list_head *list)
-{
-       struct page *page, *next;
-
-       list_for_each_entry_safe(page, next, list, lru) {
-               list_del(&page->lru);
-               free_vmemmap_page(page);
-       }
-}
-
 static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
                                         unsigned long start,
                                         unsigned long end,
@@ -382,31 +388,6 @@ static void __free_huge_page_pte_vmemmap(struct page 
*reuse, pte_t *ptep,
        }
 }
 
-static void __free_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
-                                        unsigned long addr,
-                                        struct list_head *free_pages)
-{
-       unsigned long next;
-       unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
-       unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
-       struct page *reuse = NULL;
-
-       addr = start;
-       do {
-               pte_t *ptep;
-
-               ptep = pte_offset_kernel(pmd, addr);
-               if (!reuse)
-                       reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-               next = vmemmap_hpage_addr_end(addr, end);
-               __free_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-                                            free_pages);
-       } while (pmd++, addr = next, addr != end);
-
-       flush_tlb_kernel_range(start, end);
-}
-
 static void split_vmemmap_pmd(pmd_t *pmd, pte_t *pte_p, unsigned long addr)
 {
        int i;
@@ -465,7 +446,8 @@ void free_huge_page_vmemmap(struct hstate *h, struct page 
*head)
        if (vmemmap_pmd_huge(pmd))
                split_vmemmap_huge_page(head, pmd);
 
-       __free_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages);
+       remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages,
+                                   __free_huge_page_pte_vmemmap);
        freed_vmemmap_hpage_inc(pmd_page(*pmd));
        spin_unlock(ptl);
 
-- 
2.11.0

Reply via email to