On 22/02/17 15:09, Andreas Gruenbacher wrote:
From: Andrew Price <anpr...@redhat.com>
We must hold the rcu read lock across looking up glocks and trying to
bump their refcount to prevent the glocks from being freed in between.
Signed-off-by: Andreas Gruenbacher <agrue...@redhat.com>
Signed-off-by: Andrew Price <anpr...@redhat.com>
Thanks,
Andy
---
fs/gfs2/glock.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 94f50ca..1d60f5f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -658,9 +658,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
struct kmem_cache *cachep;
int ret, tries = 0;
+ rcu_read_lock();
gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
if (gl && !lockref_get_not_dead(&gl->gl_lockref))
gl = NULL;
+ rcu_read_unlock();
*glp = gl;
if (gl)
@@ -728,15 +730,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (ret == -EEXIST) {
ret = 0;
+ rcu_read_lock();
tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
if (++tries < 100) {
+ rcu_read_unlock();
cond_resched();
goto again;
}
tmp = NULL;
ret = -ENOMEM;
}
+ rcu_read_unlock();
} else {
WARN_ON_ONCE(ret);
}