Let's drop these checks; these are conditions the core migration code
must make sure will hold either way, no need to double check.

Acked-by: Zi Yan <z...@nvidia.com>
Reviewed-by: Sergey Senozhatsky <senozhat...@chromium.org>
Acked-by: Harry Yoo <harry....@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoa...@oracle.com>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/zpdesc.h   | 5 -----
 mm/zsmalloc.c | 5 -----
 2 files changed, 10 deletions(-)

diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index d3df316e5bb7b..5cb7e3de43952 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -168,11 +168,6 @@ static inline void __zpdesc_clear_zsmalloc(struct zpdesc 
*zpdesc)
        __ClearPageZsmalloc(zpdesc_page(zpdesc));
 }
 
-static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc)
-{
-       return PageIsolated(zpdesc_page(zpdesc));
-}
-
 static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
 {
        return page_zone(zpdesc_page(zpdesc));
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 999b513c7fdff..7f1431f2be98f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1719,8 +1719,6 @@ static bool zs_page_isolate(struct page *page, 
isolate_mode_t mode)
         * Page is locked so zspage couldn't be destroyed. For detail, look at
         * lock_zspage in free_zspage.
         */
-       VM_BUG_ON_PAGE(PageIsolated(page), page);
-
        return true;
 }
 
@@ -1739,8 +1737,6 @@ static int zs_page_migrate(struct page *newpage, struct 
page *page,
        unsigned long old_obj, new_obj;
        unsigned int obj_idx;
 
-       VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
-
        /* The page is locked, so this pointer must remain valid */
        zspage = get_zspage(zpdesc);
        pool = zspage->pool;
@@ -1811,7 +1807,6 @@ static int zs_page_migrate(struct page *newpage, struct 
page *page,
 
 static void zs_page_putback(struct page *page)
 {
-       VM_BUG_ON_PAGE(!PageIsolated(page), page);
 }
 
 static const struct movable_operations zsmalloc_mops = {
-- 
2.49.0


Reply via email to