Signed-off-by: Cody P Schafer <c...@linux.vnet.ibm.com>
Reviewed-by: Seth Jennings <sjenn...@linux.vnet.ibm.com>
---
 mm/zswap.c | 16 ++--------------
 1 file changed, 2 insertions(+), 14 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index deda2b6..5c853b2 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -790,26 +790,14 @@ static void zswap_frontswap_invalidate_page(unsigned 
type, pgoff_t offset)
 static void zswap_frontswap_invalidate_area(unsigned type)
 {
        struct zswap_tree *tree = zswap_trees[type];
-       struct rb_node *node;
-       struct zswap_entry *entry;
+       struct zswap_entry *entry, *n;
 
        if (!tree)
                return;
 
        /* walk the tree and free everything */
        spin_lock(&tree->lock);
-       /*
-        * TODO: Even though this code should not be executed because
-        * the try_to_unuse() in swapoff should have emptied the tree,
-        * it is very wasteful to rebalance the tree after every
-        * removal when we are freeing the whole tree.
-        *
-        * If post-order traversal code is ever added to the rbtree
-        * implementation, it should be used here.
-        */
-       while ((node = rb_first(&tree->rbroot))) {
-               entry = rb_entry(node, struct zswap_entry, rbnode);
-               rb_erase(&entry->rbnode, &tree->rbroot);
+       rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
                zbud_free(tree->pool, entry->handle);
                zswap_entry_cache_free(entry);
                atomic_dec(&zswap_stored_pages);
-- 
1.8.3.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to