Kernel pagetable pages for PTE fragments never go through the standard init sequence which can cause inaccuracies in utilization statistics reported at places like /proc and /sysfs interfaces etc. Also the allocated page misses out on pagetable lock and page flag init as well. Fix it by making sure all pages allocated for either user process or kernel PTE fragments go through same initialization.
Signed-off-by: Anshuman Khandual <khand...@linux.vnet.ibm.com> --- Changes in V2: - Call the destructor function during free for all cases arch/powerpc/mm/pgtable-book3s64.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index c1f4ca4..a820ee6 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -335,23 +335,21 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm) static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) { + gfp_t gfp_mask = PGALLOC_GFP; void *ret = NULL; struct page *page; - if (!kernel) { - page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - } else { - page = alloc_page(PGALLOC_GFP); - if (!page) - return NULL; - } + if (!kernel) + gfp_mask |= __GFP_ACCOUNT; + page = alloc_page(gfp_mask); + if (!page) + return NULL; + + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } ret = page_address(page); /* @@ -391,8 +389,7 @@ void pte_fragment_free(unsigned long *table, int kernel) struct page *page = virt_to_page(table); if (put_page_testzero(page)) { - if (!kernel) - pgtable_page_dtor(page); + pgtable_page_dtor(page); free_unref_page(page); } } -- 1.8.3.1