ppc64 supports pgd hugetlb entries. Add code to handle hugetlb pgd entries to
follow_page_mask so that ppc64 can switch to it to handle hugetlbe entries.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com>
---
 include/linux/hugetlb.h | 3 +++
 mm/gup.c                | 7 +++++++
 mm/hugetlb.c            | 9 +++++++++
 3 files changed, 19 insertions(+)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d3a4be0022d8..04b73a9c8b4b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -124,6 +124,9 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned 
long address,
                                pmd_t *pmd, int flags);
 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
                                pud_t *pud, int flags);
+struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+                            pgd_t *pgd, int flags);
+
 int pmd_huge(pmd_t pmd);
 int pud_huge(pud_t pud);
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
diff --git a/mm/gup.c b/mm/gup.c
index 0e18fd5f65b4..74a25e33dddb 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -382,6 +382,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
                return no_page_table(vma, flags);
 
+       if (pgd_huge(*pgd)) {
+               page = follow_huge_pgd(mm, address, pgd, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+
        if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
                page = follow_huge_pd(vma, address,
                                      __hugepd(pgd_val(*pgd)), flags,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b02faa1079bd..eb39a7496de7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4702,6 +4702,15 @@ follow_huge_pud(struct mm_struct *mm, unsigned long 
address,
        return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
 }
 
+struct page * __weak
+follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int 
flags)
+{
+       if (flags & FOLL_GET)
+               return NULL;
+
+       return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> 
PAGE_SHIFT);
+}
+
 #ifdef CONFIG_MEMORY_FAILURE
 
 /*
-- 
2.7.4

Reply via email to