The zero page is not replicated between nodes and is often shared between
processes. The data is read-only and likely to be cached in local CPUs
if heavily accessed meaning that the remote memory access cost is less
of a concern. This patch prevents trapping faults on the zero pages. For
tasks using the zero page this will reduce the number of PTE updates,
TLB flushes and hinting faults.

[pet...@infradead.org: Correct use of is_huge_zero_page]
Signed-off-by: Mel Gorman <mgor...@suse.de>
---
 mm/huge_memory.c |  7 ++++++-
 mm/memory.c      |  1 +
 mm/mprotect.c    | 10 +++++++++-
 3 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 94d0739..40f75a6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1303,6 +1303,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
                goto out_unlock;
 
        page = pmd_page(pmd);
+       BUG_ON(is_huge_zero_page(page));
        page_nid = page_to_nid(page);
        count_vm_numa_event(NUMA_HINT_FAULTS);
        if (page_nid == this_nid)
@@ -1488,8 +1489,12 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t 
*pmd,
                } else {
                        struct page *page = pmd_page(*pmd);
 
-                       /* only check non-shared pages */
+                       /*
+                        * Only check non-shared pages. See change_pte_range
+                        * for comment on why the zero page is not modified
+                        */
                        if (page_mapcount(page) == 1 &&
+                           !is_huge_zero_page(page) &&
                            !pmd_numa(*pmd)) {
                                entry = pmd_mknuma(entry);
                        }
diff --git a/mm/memory.c b/mm/memory.c
index c20f872..86c3caf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3575,6 +3575,7 @@ int do_numa_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
                pte_unmap_unlock(ptep, ptl);
                return 0;
        }
+       BUG_ON(is_zero_pfn(page_to_pfn(page)));
 
        page_nid = page_to_nid(page);
        target_nid = numa_migrate_prep(page, vma, addr, page_nid);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2bbb648..faa499e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -62,7 +62,15 @@ static unsigned long change_pte_range(struct vm_area_struct 
*vma, pmd_t *pmd,
                                struct page *page;
 
                                page = vm_normal_page(vma, addr, oldpte);
-                               if (page) {
+
+                               /*
+                                * Do not trap faults against the zero page.
+                                * The read-only data is likely to be
+                                * read-cached on the local CPU cache and it
+                                * is less useful to know about local vs remote
+                                * hits on the zero page
+                                */
+                               if (page && !is_zero_pfn(page_to_pfn(page))) {
                                        int this_nid = page_to_nid(page);
                                        if (last_nid == -1)
                                                last_nid = this_nid;
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to