Commit-ID:  d63326928611600ad65baff54a70f53b02b3cdfe
Gitweb:     https://git.kernel.org/tip/d63326928611600ad65baff54a70f53b02b3cdfe
Author:     Rick Edgecombe <rick.p.edgeco...@intel.com>
AuthorDate: Thu, 25 Apr 2019 17:11:35 -0700
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Tue, 30 Apr 2019 12:37:57 +0200

mm/hibernation: Make hibernation handle unmapped pages

Make hibernate handle unmapped pages on the direct map when
CONFIG_ARCH_HAS_SET_ALIAS=y is set. These functions allow for setting pages
to invalid configurations, so now hibernate should check if the pages have
valid mappings and handle if they are unmapped when doing a hibernate
save operation.

Previously this checking was already done when CONFIG_DEBUG_PAGEALLOC=y
was configured. It does not appear to have a big hibernating performance
impact. The speed of the saving operation before this change was measured
as 819.02 MB/s, and after was measured at 813.32 MB/s.

Before:
[    4.670938] PM: Wrote 171996 kbytes in 0.21 seconds (819.02 MB/s)

After:
[    4.504714] PM: Wrote 178932 kbytes in 0.22 seconds (813.32 MB/s)

Signed-off-by: Rick Edgecombe <rick.p.edgeco...@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Acked-by: Pavel Machek <pa...@ucw.cz>
Cc: <a...@linux-foundation.org>
Cc: <ard.biesheu...@linaro.org>
Cc: <deneen.t.d...@intel.com>
Cc: <kernel-harden...@lists.openwall.com>
Cc: <kris...@linux.intel.com>
Cc: <linux_...@icloud.com>
Cc: <will.dea...@arm.com>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Nadav Amit <nadav.a...@gmail.com>
Cc: Rafael J. Wysocki <r...@rjwysocki.net>
Cc: Rik van Riel <r...@surriel.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: https://lkml.kernel.org/r/20190426001143.4983-16-na...@vmware.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/mm/pageattr.c  |  4 ----
 include/linux/mm.h      | 18 ++++++------------
 kernel/power/snapshot.c |  5 +++--
 mm/page_alloc.c         |  7 +++++--
 4 files changed, 14 insertions(+), 20 deletions(-)

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3574550192c6..daf4d645e537 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -2257,7 +2257,6 @@ int set_direct_map_default_noflush(struct page *page)
        return __set_pages_p(page, 1);
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
 void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
        if (PageHighMem(page))
@@ -2302,11 +2301,8 @@ bool kernel_page_present(struct page *page)
        pte = lookup_address((unsigned long)page_address(page), &level);
        return (pte_val(*pte) & _PAGE_PRESENT);
 }
-
 #endif /* CONFIG_HIBERNATION */
 
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
 int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
                                   unsigned numpages, unsigned long page_flags)
 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b10c21630f5..083d7b4863ed 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2610,37 +2610,31 @@ static inline void kernel_poison_pages(struct page 
*page, int numpages,
                                        int enable) { }
 #endif
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
 extern bool _debug_pagealloc_enabled;
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);
 
 static inline bool debug_pagealloc_enabled(void)
 {
-       return _debug_pagealloc_enabled;
+       return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
 }
 
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
 static inline void
 kernel_map_pages(struct page *page, int numpages, int enable)
 {
-       if (!debug_pagealloc_enabled())
-               return;
-
        __kernel_map_pages(page, numpages, enable);
 }
 #ifdef CONFIG_HIBERNATION
 extern bool kernel_page_present(struct page *page);
 #endif /* CONFIG_HIBERNATION */
-#else  /* CONFIG_DEBUG_PAGEALLOC */
+#else  /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
 static inline void
 kernel_map_pages(struct page *page, int numpages, int enable) {}
 #ifdef CONFIG_HIBERNATION
 static inline bool kernel_page_present(struct page *page) { return true; }
 #endif /* CONFIG_HIBERNATION */
-static inline bool debug_pagealloc_enabled(void)
-{
-       return false;
-}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
 
 #ifdef __HAVE_ARCH_GATE_AREA
 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f08a1e4ee1d4..bc9558ab1e5b 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1342,8 +1342,9 @@ static inline void do_copy_page(long *dst, long *src)
  * safe_copy_page - Copy a page in a safe way.
  *
  * Check if the page we are going to copy is marked as present in the kernel
- * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
- * and in that case kernel_page_present() always returns 'true').
+ * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
+ * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case 
kernel_page_present()
+ * always returns 'true'.
  */
 static void safe_copy_page(void *dst, struct page *s_page)
 {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c02cff1ed56e..59661106da16 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1144,7 +1144,9 @@ static __always_inline bool free_pages_prepare(struct 
page *page,
        }
        arch_free_page(page, order);
        kernel_poison_pages(page, 1 << order, 0);
-       kernel_map_pages(page, 1 << order, 0);
+       if (debug_pagealloc_enabled())
+               kernel_map_pages(page, 1 << order, 0);
+
        kasan_free_nondeferred_pages(page, order);
 
        return true;
@@ -2014,7 +2016,8 @@ inline void post_alloc_hook(struct page *page, unsigned 
int order,
        set_page_refcounted(page);
 
        arch_alloc_page(page, order);
-       kernel_map_pages(page, 1 << order, 1);
+       if (debug_pagealloc_enabled())
+               kernel_map_pages(page, 1 << order, 1);
        kasan_alloc_pages(page, order);
        kernel_poison_pages(page, 1 << order, 1);
        set_page_owner(page, order, gfp_flags);

Reply via email to