In f2fs_try_to_free_nids(), .nid_list_lock spinlock critical region will
increase as expected shrink number increase, to avoid spining other CPUs
for long time, it's better to implement like extent cache and nats
shrinker.

Signed-off-by: Chao Yu <yuch...@huawei.com>
---
v2:
- fix unlock wrong spinlock.
 fs/f2fs/node.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 4da0d8713df5..ad0b14f4dab8 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2488,7 +2488,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, 
nid_t nid)
 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
-       struct free_nid *i, *next;
        int nr = nr_shrink;
 
        if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
@@ -2498,14 +2497,22 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int 
nr_shrink)
                return 0;
 
        spin_lock(&nm_i->nid_list_lock);
-       list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
-               if (nr_shrink <= 0 ||
-                               nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
+       while (nr_shrink) {
+               struct free_nid *i;
+
+               if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
                        break;
 
+               i = list_first_entry(&nm_i->free_nid_list,
+                                       struct free_nid, list);
+               list_del(&i->list);
+               spin_unlock(&nm_i->nid_list_lock);
+
                __remove_free_nid(sbi, i, FREE_NID);
                kmem_cache_free(free_nid_slab, i);
                nr_shrink--;
+
+               spin_lock(&nm_i->nid_list_lock);
        }
        spin_unlock(&nm_i->nid_list_lock);
        mutex_unlock(&nm_i->build_lock);
-- 
2.18.0.rc1

Reply via email to