From: "Kirill A. Shutemov" <kirill.shute...@linux.intel.com>
All code paths seems covered. Now we can map huge zero page on read page fault. Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com> --- mm/huge_memory.c | 10 ++++++++++ 1 files changed, 10 insertions(+), 0 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 995894f..c788445 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -750,6 +750,16 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_OOM; if (unlikely(khugepaged_enter(vma))) return VM_FAULT_OOM; + if (!(flags & FAULT_FLAG_WRITE)) { + pgtable_t pgtable; + pgtable = pte_alloc_one(mm, haddr); + if (unlikely(!pgtable)) + goto out; + spin_lock(&mm->page_table_lock); + set_huge_zero_page(pgtable, mm, vma, haddr, pmd); + spin_unlock(&mm->page_table_lock); + return 0; + } page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), vma, haddr, numa_node_id(), 0); if (unlikely(!page)) { -- 1.7.7.6 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/