si->swap_map[] of the swap entries in cluster needs to be cleared
during freeing.  Previously, this is done in the caller of
swap_free_cluster().  This may cause code duplication (one user now,
will add more users later) and lock/unlock cluster unnecessarily.  In
this patch, the clearing code is moved to swap_free_cluster() to avoid
the downside.

Signed-off-by: "Huang, Ying" <ying.hu...@intel.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Michal Hocko <mho...@suse.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Shaohua Li <s...@kernel.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: Minchan Kim <minc...@kernel.org>
---
 mm/swapfile.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index ef974bbd7715..97a1bd1a7c9a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -933,6 +933,7 @@ static void swap_free_cluster(struct swap_info_struct *si, 
unsigned long idx)
        struct swap_cluster_info *ci;
 
        ci = lock_cluster(si, offset);
+       memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
        cluster_set_count_flag(ci, 0, 0);
        free_cluster(si, idx);
        unlock_cluster(ci);
@@ -1309,9 +1310,6 @@ void put_swap_page(struct page *page, swp_entry_t entry)
                if (free_entries == SWAPFILE_CLUSTER) {
                        unlock_cluster_or_swap_info(si, ci);
                        spin_lock(&si->lock);
-                       ci = lock_cluster(si, offset);
-                       memset(map, 0, SWAPFILE_CLUSTER);
-                       unlock_cluster(ci);
                        mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
                        swap_free_cluster(si, idx);
                        spin_unlock(&si->lock);
-- 
2.16.4

Reply via email to