From 0bd42136f7ae4ea1375da34c32838fb35eee8c59 Mon Sep 17 00:00:00 2001 From: Cody P Schafer Date: Wed, 11 Sep 2013 14:25:33 -0700 Subject: [PATCH] mm/zswap: use postorder iteration when destroying rbtree Signed-off-by: Cody P Schafer Reviewed-by: Seth Jennings Cc: David Woodhouse Cc: Rik van Riel Cc: Michel Lespinasse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zswap.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index efed4c8b7f5b..841e35f1db22 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -790,26 +790,14 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) static void zswap_frontswap_invalidate_area(unsigned type) { struct zswap_tree *tree = zswap_trees[type]; - struct rb_node *node; - struct zswap_entry *entry; + struct zswap_entry *entry, *n; if (!tree) return; /* walk the tree and free everything */ spin_lock(&tree->lock); - /* - * TODO: Even though this code should not be executed because - * the try_to_unuse() in swapoff should have emptied the tree, - * it is very wasteful to rebalance the tree after every - * removal when we are freeing the whole tree. - * - * If post-order traversal code is ever added to the rbtree - * implementation, it should be used here. - */ - while ((node = rb_first(&tree->rbroot))) { - entry = rb_entry(node, struct zswap_entry, rbnode); - rb_erase(&entry->rbnode, &tree->rbroot); + rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) { zbud_free(tree->pool, entry->handle); zswap_entry_cache_free(entry); atomic_dec(&zswap_stored_pages); -- 2.30.2