This deadlock is hitting Android users (Pixel 3/3a/4) with Magisk, due
to frequent umount/mount operations that trigger quota_sync, hitting
the race. See https://github.com/topjohnwu/Magisk/issues/3171 for
additional impact discussion.

In commit db6ec53b7e03, we added a semaphore to protect quota flags.
As part of this commit, we changed f2fs_quota_sync to call
f2fs_lock_op, in an attempt to prevent an AB/BA type deadlock with
quota_sem locking in block_operation.  However, rwsem in Linux is not
recursive. Therefore, the following deadlock can occur:

f2fs_quota_sync
down_read(cp_rwsem) // f2fs_lock_op
filemap_fdatawrite
f2fs_write_data_pages
...
                                   block_opertaion
                                   down_write(cp_rwsem) - marks rwsem as
                                                          "writer pending"
down_read_trylock(cp_rwsem) - fails as there is
                              a writer pending.
                              Code keeps on trying,
                              live-locking the filesystem.

We solve this by creating a new rwsem, used specifically to
synchronize this case, instead of attempting to reuse an existing
lock.

Signed-off-by: Shachar Raindel <shach...@gmail.com>

Fixes: db6ec53b7e03 f2fs: add a rw_sem to cover quota flag changes
---
 fs/f2fs/f2fs.h  |  3 +++
 fs/f2fs/super.c | 13 +++++++++++--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index cb700d797296..b3e55137be7f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1448,6 +1448,7 @@ struct f2fs_sb_info {
        struct inode *meta_inode;               /* cache meta blocks */
        struct mutex cp_mutex;                  /* checkpoint procedure lock */
        struct rw_semaphore cp_rwsem;           /* blocking FS operations */
+       struct rw_semaphore cp_quota_rwsem;     /* blocking quota sync 
operations */
        struct rw_semaphore node_write;         /* locking node writes */
        struct rw_semaphore node_change;        /* locking node change */
        wait_queue_head_t cp_wait;
@@ -1961,12 +1962,14 @@ static inline void f2fs_unlock_op(struct f2fs_sb_info 
*sbi)
 
 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 {
+       down_write(&sbi->cp_quota_rwsem);
        down_write(&sbi->cp_rwsem);
 }
 
 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 {
        up_write(&sbi->cp_rwsem);
+       up_write(&sbi->cp_quota_rwsem);
 }
 
 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 00eff2f51807..5ce61147d7e5 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2209,8 +2209,16 @@ int f2fs_quota_sync(struct super_block *sb, int type)
         *  f2fs_dquot_commit
         *                            block_operation
         *                            down_read(quota_sem)
+        *
+        * However, we cannot use the cp_rwsem to prevent this
+        * deadlock, as the cp_rwsem is taken for read inside the
+        * f2fs_dquot_commit code, and rwsem is not recursive.
+        *
+        * We therefore use a special lock to synchronize
+        * f2fs_quota_sync with block_operations, as this is the only
+        * place where such recursion occurs.
         */
-       f2fs_lock_op(sbi);
+       down_read(&sbi->cp_quota_rwsem);
 
        down_read(&sbi->quota_sem);
        ret = dquot_writeback_dquots(sb, type);
@@ -2251,7 +2259,7 @@ int f2fs_quota_sync(struct super_block *sb, int type)
        if (ret)
                set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
        up_read(&sbi->quota_sem);
-       f2fs_unlock_op(sbi);
+       up_read(&sbi->cp_quota_rwsem);
        return ret;
 }
 
@@ -3599,6 +3607,7 @@ static int f2fs_fill_super(struct super_block *sb, void 
*data, int silent)
 
        init_rwsem(&sbi->cp_rwsem);
        init_rwsem(&sbi->quota_sem);
+       init_rwsem(&sbi->cp_quota_rwsem);
        init_waitqueue_head(&sbi->cp_wait);
        init_sb_info(sbi);
 
-- 
2.29.2

Reply via email to