When trying to soft-offline a free page, we need to first take it off the
buddy allocator.
Once we know is out of reach, we can safely flag it as poisoned.

take_page_off_buddy will be used to take a page meant to be poisoned off
the buddy allocator. take_page_off_buddy calls break_down_buddy_pages,
which splits a higher-order page in case our page belongs to one.

Once the page is under our control, we call page_handle_poison to set it
as poisoned and grab a refcount on it.

Signed-off-by: Oscar Salvador <osalva...@suse.de>
---
 include/linux/page-flags.h |  1 +
 mm/memory-failure.c        | 18 ++++++----
 mm/page_alloc.c            | 68 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 81 insertions(+), 6 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 7182103583d2..d0fdb59794d8 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -429,6 +429,7 @@ PAGEFLAG(HWPoison, hwpoison, PF_ANY)
 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
 #define __PG_HWPOISON (1UL << PG_hwpoison)
 extern bool set_hwpoison_free_buddy_page(struct page *page);
+extern bool take_page_off_buddy(struct page *page);
 #else
 PAGEFLAG_FALSE(HWPoison)
 static inline bool set_hwpoison_free_buddy_page(struct page *page)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9d80beb841a3..de274356f8c7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -65,6 +65,13 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
 
 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
 
+static void page_handle_poison(struct page *page)
+{
+       SetPageHWPoison(page);
+       page_ref_inc(page);
+       num_poisoned_pages_inc();
+}
+
 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
 
 u32 hwpoison_filter_enable = 0;
@@ -1884,14 +1891,13 @@ static int soft_offline_in_use_page(struct page *page, 
int flags)
 
 static int soft_offline_free_page(struct page *page)
 {
-       int rc = dissolve_free_huge_page(page);
+       int rc = -EBUSY;
 
-       if (!rc) {
-               if (set_hwpoison_free_buddy_page(page))
-                       num_poisoned_pages_inc();
-               else
-                       rc = -EBUSY;
+       if (!dissolve_free_huge_page(page) && take_page_off_buddy(page)) {
+               page_handle_poison(page);
+               rc = 0;
        }
+
        return rc;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3c30be2e8e76..e02da891d8a9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8758,6 +8758,74 @@ bool is_free_buddy_page(struct page *page)
 }
 
 #ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Break down a higher-order page in sub-pages, and keep our target out of
+ * buddy allocator.
+ */
+static void break_down_buddy_pages(struct zone *zone, struct page *page,
+                                  struct page *target, int low, int high,
+                                  int migratetype)
+{
+       unsigned long size = 1 << high;
+       struct page *current_buddy, *next_page;
+
+       while (high > low) {
+               high--;
+               size >>= 1;
+
+               if (target >= &page[size]) {
+                       next_page = page + size;
+                       current_buddy = page;
+               } else {
+                       next_page = page;
+                       current_buddy = page + size;
+               }
+
+               if (set_page_guard(zone, current_buddy, high, migratetype))
+                       continue;
+
+               if (current_buddy != target) {
+                       add_to_free_list(current_buddy, zone, high, 
migratetype);
+                       set_page_order(current_buddy, high);
+                       page = next_page;
+               }
+       }
+}
+
+/*
+ * Take a page that will be marked as poisoned off the buddy allocator.
+ */
+bool take_page_off_buddy(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+       unsigned long pfn = page_to_pfn(page);
+       unsigned long flags;
+       unsigned int order;
+       bool ret = false;
+
+       spin_lock_irqsave(&zone->lock, flags);
+       for (order = 0; order < MAX_ORDER; order++) {
+               struct page *page_head = page - (pfn & ((1 << order) - 1));
+               int buddy_order = page_order(page_head);
+
+               if (PageBuddy(page_head) && buddy_order >= order) {
+                       unsigned long pfn_head = page_to_pfn(page_head);
+                       int migratetype = get_pfnblock_migratetype(page_head,
+                                                                  pfn_head);
+
+                       del_page_from_free_list(page_head, zone, buddy_order);
+                       break_down_buddy_pages(zone, page_head, page, 0,
+                                               buddy_order, migratetype);
+                       ret = true;
+                       break;
+               }
+               if (page_count(page_head) > 0)
+                       break;
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+       return ret;
+}
+
 /*
  * Set PG_hwpoison flag if a given page is confirmed to be a free page.  This
  * test is performed under the zone lock to prevent a race against page
-- 
2.26.2

Reply via email to