[Cluster-devel] cluster/gfs-kernel/src/gfs glock.c

2008-01-29 Thread bmarzins
CVSROOT:/cvs/cluster
Module name:cluster
Branch: RHEL4
Changes by: [EMAIL PROTECTED]   2008-01-29 22:21:45

Modified files:
gfs-kernel/src/gfs: glock.c 

Log message:
Fix for bz #419391.  gfs_glock_dq was traversing the gl_holders list 
without
holding the gl_spin spinlock, this was causing a problem when the list 
item
it was currently looking at got removed from the list.  The solution is 
to
not traverse the list, because it is unncessary. Unfortunately, there 
is also
a bug in this section of code, where you can't guarantee that you will 
not
cache a glock held with GL_NOCACHE.  Fixing this issue requires 
significantly
more work.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=clusteronly_with_tag=RHEL4r1=1.20.2.6r2=1.20.2.7

--- cluster/gfs-kernel/src/gfs/glock.c  2007/06/26 20:34:10 1.20.2.6
+++ cluster/gfs-kernel/src/gfs/glock.c  2008/01/29 22:21:45 1.20.2.7
@@ -1608,8 +1608,6 @@
struct gfs_sbd *sdp = gl-gl_sbd;
struct gfs_glock_operations *glops = gl-gl_ops;
struct list_head *pos;
-   struct gfs_holder *tmp_gh = NULL;
-   int count = 0;
 
atomic_inc(gl-gl_sbd-sd_glock_dq_calls);
 
@@ -1620,14 +1618,13 @@
set_bit(GLF_SYNC, gl-gl_flags);
 
/* Don't cache glock; request demote to unlock at inter-node scope */
-   if (gh-gh_flags  GL_NOCACHE) {
-   list_for_each(pos, gl-gl_holders) {
-   tmp_gh = list_entry(pos, struct gfs_holder, gh_list);
-   ++count;
-   }
-   if (tmp_gh == gh  count == 1)
+   if (gh-gh_flags  GL_NOCACHE  gl-gl_holders.next == gh-gh_list 
+   gl-gl_holders.prev == gh-gh_list)
+   /* There's a race here.  If there are two holders, and both
+* are dq'ed at almost the same time, you can't guarantee that
+* you will call handle_callback. Fixing this will require
+* some refactoring */
handle_callback(gl, LM_ST_UNLOCKED);
-   }
 
lock_on_glock(gl);
 



[Cluster-devel] cluster/gfs-kernel/src/gfs glock.c

2008-01-27 Thread fabbione
CVSROOT:/cvs/cluster
Module name:cluster
Changes by: [EMAIL PROTECTED]   2008-01-28 06:40:25

Modified files:
gfs-kernel/src/gfs: glock.c 

Log message:
Remove unused variable

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=clusterr1=1.36r2=1.37

--- cluster/gfs-kernel/src/gfs/glock.c  2008/01/24 20:42:00 1.36
+++ cluster/gfs-kernel/src/gfs/glock.c  2008/01/28 06:40:25 1.37
@@ -1617,7 +1617,6 @@
struct gfs_glock *gl = gh-gh_gl;
struct gfs_sbd *sdp = gl-gl_sbd;
struct gfs_glock_operations *glops = gl-gl_ops;
-   struct list_head *pos;
 
atomic_inc(gl-gl_sbd-sd_glock_dq_calls);
 



[Cluster-devel] cluster/gfs-kernel/src/gfs glock.c

2008-01-24 Thread bmarzins
CVSROOT:/cvs/cluster
Module name:cluster
Changes by: [EMAIL PROTECTED]   2008-01-24 20:42:01

Modified files:
gfs-kernel/src/gfs: glock.c 

Log message:
Fix for bz #426291.  gfs_glock_dq was traversing the gl_holders list 
without
holding the gl_spin spinlock, this was causing a problem when the list 
item
it was currently looking at got removed from the list.  The solution is 
to
not traverse the list, because it is unncessary. Unfortunately, there 
is also
a bug in this section of code, where you can't guarantee that you will 
not
cache a glock held with GL_NOCACHE.  Fixing this issue requires 
significantly
more work.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=clusterr1=1.35r2=1.36

--- cluster/gfs-kernel/src/gfs/glock.c  2007/06/26 19:43:59 1.35
+++ cluster/gfs-kernel/src/gfs/glock.c  2008/01/24 20:42:00 1.36
@@ -1618,8 +1618,6 @@
struct gfs_sbd *sdp = gl-gl_sbd;
struct gfs_glock_operations *glops = gl-gl_ops;
struct list_head *pos;
-   struct gfs_holder *tmp_gh = NULL;
-   int count = 0;
 
atomic_inc(gl-gl_sbd-sd_glock_dq_calls);
 
@@ -1630,14 +1628,13 @@
set_bit(GLF_SYNC, gl-gl_flags);
 
/* Don't cache glock; request demote to unlock at inter-node scope */
-   if (gh-gh_flags  GL_NOCACHE) {
-   list_for_each(pos, gl-gl_holders) {
-   tmp_gh = list_entry(pos, struct gfs_holder, gh_list);
-   ++count;
-   }
-   if (tmp_gh == gh  count == 1)
-   handle_callback(gl, LM_ST_UNLOCKED);
-   }
+   if (gh-gh_flags  GL_NOCACHE  gl-gl_holders.next == gh-gh_list 
+   gl-gl_holders.prev == gh-gh_list)
+   /* There's a race here.  If there are two holders, and both
+* are dq'ed at almost the same time, you can't guarantee that
+* you will call handle_callback. Fixing this will require
+* some refactoring */
+   handle_callback(gl, LM_ST_UNLOCKED);
 
lock_on_glock(gl);