* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-01-25 22:38 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-01-25 22:38 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL4
Changes by: wcheng at sourceware.org 2007-01-25 22:38:39
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 214239 - trimming glock
Forgot to increase gl_count in previous check-in.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.20.2.4&r2=1.20.2.5
--- cluster/gfs-kernel/src/gfs/glock.c 2007/01/22 07:43:52 1.20.2.4
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/01/25 22:38:39 1.20.2.5
@@ -2550,17 +2550,19 @@
/*
* If the associated inode glock has been in unlocked
- * state for a while, try to purge it.
+ * state, try to purge it.
*/
if (trylock_on_glock(i_gl)) {
if (i_gl->gl_state == LM_ST_UNLOCKED) {
*p_count = *p_count - 1;
unlock_on_glock(i_gl);
+ atomic_inc(&gl->gl_count);
gfs_iopen_go_callback(gl, LM_ST_UNLOCKED);
handle_callback(gl, LM_ST_UNLOCKED);
spin_lock(&gl->gl_spin);
run_queue(gl);
spin_unlock(&gl->gl_spin);
+ glock_put(gl);
} else
unlock_on_glock(i_gl);
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 17:38 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 17:38 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: wcheng at sourceware.org 2007-06-26 17:38:07
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 239729:
Previous CVS check-in did a last minute change with the way purge count
was calculated in glock trimming patch. The intention was to trim glocks
evenly across all the hash buckets and apparently the size of hash array
was overlooked. It ends up with zero trimming count most of the time.
This virtually makes glock trimming patch a void feature.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.29.2.2&r2=1.29.2.3
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/17 05:16:52 1.29.2.2
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 17:38:07 1.29.2.3
@@ -2496,13 +2496,12 @@
static int
examine_bucket(glock_examiner examiner,
struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket,
- unsigned int purge_nr)
+ unsigned int *purge_nr)
{
struct glock_plug plug;
struct list_head *tmp;
struct gfs_glock *gl;
int entries;
- unsigned int p_cnt=purge_nr;
/* Add "plug" to end of bucket list, work back up list from there */
memset(&plug.gl_flags, 0, sizeof(unsigned long));
@@ -2543,7 +2542,7 @@
write_unlock(&bucket->hb_lock);
- examiner(gl, &p_cnt);
+ examiner(gl, &purge_nr);
}
}
@@ -2655,11 +2654,11 @@
purge_nr = 0;
else
purge_nr = (atomic_read(&sdp->sd_glock_count) -
- atomic_read(&sdp->sd_glock_held_count)) *
- sdp->sd_tune.gt_glock_purge / 100 / GFS_GL_HASH_SIZE;
+ atomic_read(&sdp->sd_glock_count)) *
+ sdp->sd_tune.gt_glock_purge / 100;
for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
- examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], purge_nr);
+ examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], &purge_nr);
cond_resched();
}
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 17:50 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 17:50 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2007-06-26 17:50:06
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
RedHat bugzilla 239727:
Previous CVS check-in did a last minute change with the way purge count
was calculated. The intention was to trim glocks evenly across all the
hash buckets and apparently the size of hash array was overlooked. It
ends up with zero trimming count most of the time. This virtually makes
glock trimming patch a void feature.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&r1=1.32&r2=1.33
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/17 05:34:53 1.32
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 17:50:06 1.33
@@ -2496,13 +2496,12 @@
static int
examine_bucket(glock_examiner examiner,
struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket,
- unsigned int purge_nr)
+ unsigned int *purge_nr)
{
struct glock_plug plug;
struct list_head *tmp;
struct gfs_glock *gl;
int entries;
- unsigned int p_cnt=purge_nr;
/* Add "plug" to end of bucket list, work back up list from there */
memset(&plug.gl_flags, 0, sizeof(unsigned long));
@@ -2543,7 +2542,7 @@
write_unlock(&bucket->hb_lock);
- examiner(gl, &p_cnt);
+ examiner(gl, &purge_nr);
}
}
@@ -2655,11 +2654,11 @@
purge_nr = 0;
else
purge_nr = (atomic_read(&sdp->sd_glock_count) -
- atomic_read(&sdp->sd_glock_held_count)) *
- sdp->sd_tune.gt_glock_purge / 100 / GFS_GL_HASH_SIZE;
+ atomic_read(&sdp->sd_glock_count)) *
+ sdp->sd_tune.gt_glock_purge / 100;
for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
- examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], purge_nr);
+ examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], &purge_nr);
cond_resched();
}
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 18:30 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 18:30 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: wcheng at sourceware.org 2007-06-26 18:30:01
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 239729:
Hit the mouse too soon before commit ... Hopefully this time it is all
correct.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.29.2.3&r2=1.29.2.4
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 17:38:07 1.29.2.3
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 18:30:01 1.29.2.4
@@ -2653,8 +2653,7 @@
if (!sdp->sd_tune.gt_glock_purge)
purge_nr = 0;
else
- purge_nr = (atomic_read(&sdp->sd_glock_count) -
- atomic_read(&sdp->sd_glock_count)) *
+ purge_nr = atomic_read(&sdp->sd_glock_count) *
sdp->sd_tune.gt_glock_purge / 100;
for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 18:38 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 18:38 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2007-06-26 18:38:01
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 239729:
Accidentally moved the wrong patch - fix previous check-in.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&r1=1.33&r2=1.34
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 17:50:06 1.33
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 18:38:01 1.34
@@ -2653,8 +2653,7 @@
if (!sdp->sd_tune.gt_glock_purge)
purge_nr = 0;
else
- purge_nr = (atomic_read(&sdp->sd_glock_count) -
- atomic_read(&sdp->sd_glock_count)) *
+ purge_nr = atomic_read(&sdp->sd_glock_count) *
sdp->sd_tune.gt_glock_purge / 100;
for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 19:42 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 19:42 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: wcheng at sourceware.org 2007-06-26 19:42:32
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 239729
The purge_nr is already a pointer - another mistake in today's check-in.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.29.2.4&r2=1.29.2.5
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 18:30:01 1.29.2.4
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 19:42:31 1.29.2.5
@@ -2542,7 +2542,7 @@
write_unlock(&bucket->hb_lock);
- examiner(gl, &purge_nr);
+ examiner(gl, purge_nr);
}
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 19:43 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 19:43 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2007-06-26 19:43:59
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 239729:
The purge_nr in glock_scan is already a pointer. Fix error in today's check-in.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&r1=1.34&r2=1.35
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 18:38:01 1.34
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 19:43:59 1.35
@@ -2542,7 +2542,7 @@
write_unlock(&bucket->hb_lock);
- examiner(gl, &purge_nr);
+ examiner(gl, purge_nr);
}
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 20:34 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 20:34 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL4
Changes by: wcheng at sourceware.org 2007-06-26 20:34:16
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 245776:
Previous CVS check-in did a last minute change with the way purge count
was calculated. The intention was to trim glocks evenly across all the
hash buckets and apparently the size of hash array was overlooked. It
ends up with zero trimming count most of the time. Fix this oversight.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.20.2.5&r2=1.20.2.6
--- cluster/gfs-kernel/src/gfs/glock.c 2007/01/25 22:38:39 1.20.2.5
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 20:34:10 1.20.2.6
@@ -2484,13 +2484,12 @@
static int
examine_bucket(glock_examiner examiner,
struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket,
- unsigned int purge_nr)
+ unsigned int *purge_nr)
{
struct glock_plug plug;
struct list_head *tmp;
struct gfs_glock *gl;
int entries;
- unsigned int p_cnt=purge_nr;
/* Add "plug" to end of bucket list, work back up list from there */
memset(&plug.gl_flags, 0, sizeof(unsigned long));
@@ -2531,7 +2530,7 @@
write_unlock(&bucket->hb_lock);
- examiner(gl, &p_cnt);
+ examiner(gl, purge_nr);
}
}
@@ -2641,12 +2640,11 @@
if (!sdp->sd_tune.gt_glock_purge)
purge_nr = 0;
else
- purge_nr = (atomic_read(&sdp->sd_glock_count) -
- atomic_read(&sdp->sd_glock_held_count)) *
- sdp->sd_tune.gt_glock_purge / 100 / GFS_GL_HASH_SIZE;
+ purge_nr = atomic_read(&sdp->sd_glock_count) *
+ sdp->sd_tune.gt_glock_purge / 100;
for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
- examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], purge_nr);
+ examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], &purge_nr);
cond_resched();
}
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2007-06-26 20:39 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-26 20:39 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL45
Changes by: wcheng at sourceware.org 2007-06-26 20:39:25
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Bugzilla 245776:
Previous CVS check-in did a last minute change with the way purge count
was calculated. The intention was to trim glocks evenly across all the
hash buckets and apparently the size of hash array was overlooked. It
ends up with zero trimming count most of the time. Fix this oversight.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL45&r1=1.20.2.5&r2=1.20.2.5.2.1
--- cluster/gfs-kernel/src/gfs/glock.c 2007/01/25 22:38:39 1.20.2.5
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 20:39:24 1.20.2.5.2.1
@@ -2484,13 +2484,12 @@
static int
examine_bucket(glock_examiner examiner,
struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket,
- unsigned int purge_nr)
+ unsigned int *purge_nr)
{
struct glock_plug plug;
struct list_head *tmp;
struct gfs_glock *gl;
int entries;
- unsigned int p_cnt=purge_nr;
/* Add "plug" to end of bucket list, work back up list from there */
memset(&plug.gl_flags, 0, sizeof(unsigned long));
@@ -2531,7 +2530,7 @@
write_unlock(&bucket->hb_lock);
- examiner(gl, &p_cnt);
+ examiner(gl, purge_nr);
}
}
@@ -2641,12 +2640,11 @@
if (!sdp->sd_tune.gt_glock_purge)
purge_nr = 0;
else
- purge_nr = (atomic_read(&sdp->sd_glock_count) -
- atomic_read(&sdp->sd_glock_held_count)) *
- sdp->sd_tune.gt_glock_purge / 100 / GFS_GL_HASH_SIZE;
+ purge_nr = atomic_read(&sdp->sd_glock_count) *
+ sdp->sd_tune.gt_glock_purge / 100;
for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
- examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], purge_nr);
+ examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], &purge_nr);
cond_resched();
}
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2008-01-24 20:08 bmarzins
0 siblings, 0 replies; 15+ messages in thread
From: bmarzins @ 2008-01-24 20:08 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL51
Changes by: bmarzins at sourceware.org 2008-01-24 20:08:43
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Fix for bz #426291. gfs_glock_dq was traversing the gl_holders list without
holding the gl_spin spinlock, this was causing a problem when the list item
it was currently looking at got removed from the list. The solution is to
not traverse the list, because it is unncessary. Unfortunately, there is also
a bug in this section of code, where you can't guarantee that you will not
cache a glock held with GL_NOCACHE. Fixing this issue requires significantly
more work.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL51&r1=1.29.2.5&r2=1.29.2.5.2.1
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 19:42:31 1.29.2.5
+++ cluster/gfs-kernel/src/gfs/glock.c 2008/01/24 20:08:43 1.29.2.5.2.1
@@ -1618,8 +1618,6 @@
struct gfs_sbd *sdp = gl->gl_sbd;
struct gfs_glock_operations *glops = gl->gl_ops;
struct list_head *pos;
- struct gfs_holder *tmp_gh = NULL;
- int count = 0;
atomic_inc(&gl->gl_sbd->sd_glock_dq_calls);
@@ -1630,14 +1628,13 @@
set_bit(GLF_SYNC, &gl->gl_flags);
/* Don't cache glock; request demote to unlock at inter-node scope */
- if (gh->gh_flags & GL_NOCACHE) {
- list_for_each(pos, &gl->gl_holders) {
- tmp_gh = list_entry(pos, struct gfs_holder, gh_list);
- ++count;
- }
- if (tmp_gh == gh && count == 1)
- handle_callback(gl, LM_ST_UNLOCKED);
- }
+ if (gh->gh_flags & GL_NOCACHE && gl->gl_holders.next == &gh->gh_list &&
+ gl->gl_holders.prev == &gh->gh_list)
+ /* There's a race here. If there are two holders, and both
+ * are dq'ed at almost the same time, you can't guarantee that
+ * you will call handle_callback. Fixing this will require
+ * some refactoring */
+ handle_callback(gl, LM_ST_UNLOCKED);
lock_on_glock(gl);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2008-01-24 20:25 bmarzins
0 siblings, 0 replies; 15+ messages in thread
From: bmarzins @ 2008-01-24 20:25 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: bmarzins at sourceware.org 2008-01-24 20:25:10
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Fix for bz #426291. gfs_glock_dq was traversing the gl_holders list without
holding the gl_spin spinlock, this was causing a problem when the list item
it was currently looking at got removed from the list. The solution is to
not traverse the list, because it is unncessary. Unfortunately, there is also
a bug in this section of code, where you can't guarantee that you will not
cache a glock held with GL_NOCACHE. Fixing this issue requires significantly
more work.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.29.2.5&r2=1.29.2.6
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 19:42:31 1.29.2.5
+++ cluster/gfs-kernel/src/gfs/glock.c 2008/01/24 20:25:10 1.29.2.6
@@ -1618,8 +1618,6 @@
struct gfs_sbd *sdp = gl->gl_sbd;
struct gfs_glock_operations *glops = gl->gl_ops;
struct list_head *pos;
- struct gfs_holder *tmp_gh = NULL;
- int count = 0;
atomic_inc(&gl->gl_sbd->sd_glock_dq_calls);
@@ -1630,14 +1628,13 @@
set_bit(GLF_SYNC, &gl->gl_flags);
/* Don't cache glock; request demote to unlock at inter-node scope */
- if (gh->gh_flags & GL_NOCACHE) {
- list_for_each(pos, &gl->gl_holders) {
- tmp_gh = list_entry(pos, struct gfs_holder, gh_list);
- ++count;
- }
- if (tmp_gh == gh && count == 1)
- handle_callback(gl, LM_ST_UNLOCKED);
- }
+ if (gh->gh_flags & GL_NOCACHE && gl->gl_holders.next == &gh->gh_list &&
+ gl->gl_holders.prev == &gh->gh_list)
+ /* There's a race here. If there are two holders, and both
+ * are dq'ed at almost the same time, you can't guarantee that
+ * you will call handle_callback. Fixing this will require
+ * some refactoring */
+ handle_callback(gl, LM_ST_UNLOCKED);
lock_on_glock(gl);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2008-01-24 20:42 bmarzins
0 siblings, 0 replies; 15+ messages in thread
From: bmarzins @ 2008-01-24 20:42 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: bmarzins at sourceware.org 2008-01-24 20:42:01
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Fix for bz #426291. gfs_glock_dq was traversing the gl_holders list without
holding the gl_spin spinlock, this was causing a problem when the list item
it was currently looking at got removed from the list. The solution is to
not traverse the list, because it is unncessary. Unfortunately, there is also
a bug in this section of code, where you can't guarantee that you will not
cache a glock held with GL_NOCACHE. Fixing this issue requires significantly
more work.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&r1=1.35&r2=1.36
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 19:43:59 1.35
+++ cluster/gfs-kernel/src/gfs/glock.c 2008/01/24 20:42:00 1.36
@@ -1618,8 +1618,6 @@
struct gfs_sbd *sdp = gl->gl_sbd;
struct gfs_glock_operations *glops = gl->gl_ops;
struct list_head *pos;
- struct gfs_holder *tmp_gh = NULL;
- int count = 0;
atomic_inc(&gl->gl_sbd->sd_glock_dq_calls);
@@ -1630,14 +1628,13 @@
set_bit(GLF_SYNC, &gl->gl_flags);
/* Don't cache glock; request demote to unlock at inter-node scope */
- if (gh->gh_flags & GL_NOCACHE) {
- list_for_each(pos, &gl->gl_holders) {
- tmp_gh = list_entry(pos, struct gfs_holder, gh_list);
- ++count;
- }
- if (tmp_gh == gh && count == 1)
- handle_callback(gl, LM_ST_UNLOCKED);
- }
+ if (gh->gh_flags & GL_NOCACHE && gl->gl_holders.next == &gh->gh_list &&
+ gl->gl_holders.prev == &gh->gh_list)
+ /* There's a race here. If there are two holders, and both
+ * are dq'ed at almost the same time, you can't guarantee that
+ * you will call handle_callback. Fixing this will require
+ * some refactoring */
+ handle_callback(gl, LM_ST_UNLOCKED);
lock_on_glock(gl);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2008-01-24 22:23 bmarzins
0 siblings, 0 replies; 15+ messages in thread
From: bmarzins @ 2008-01-24 22:23 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL51
Changes by: bmarzins at sourceware.org 2008-01-24 22:23:47
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Oops. Reverted fix for bz #426291. It isn't going into RHEL 5.1
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL51&r1=1.29.2.5.2.1&r2=1.29.2.5.2.2
--- cluster/gfs-kernel/src/gfs/glock.c 2008/01/24 20:08:43 1.29.2.5.2.1
+++ cluster/gfs-kernel/src/gfs/glock.c 2008/01/24 22:23:47 1.29.2.5.2.2
@@ -1618,6 +1618,8 @@
struct gfs_sbd *sdp = gl->gl_sbd;
struct gfs_glock_operations *glops = gl->gl_ops;
struct list_head *pos;
+ struct gfs_holder *tmp_gh = NULL;
+ int count = 0;
atomic_inc(&gl->gl_sbd->sd_glock_dq_calls);
@@ -1628,13 +1630,14 @@
set_bit(GLF_SYNC, &gl->gl_flags);
/* Don't cache glock; request demote to unlock at inter-node scope */
- if (gh->gh_flags & GL_NOCACHE && gl->gl_holders.next == &gh->gh_list &&
- gl->gl_holders.prev == &gh->gh_list)
- /* There's a race here. If there are two holders, and both
- * are dq'ed at almost the same time, you can't guarantee that
- * you will call handle_callback. Fixing this will require
- * some refactoring */
- handle_callback(gl, LM_ST_UNLOCKED);
+ if (gh->gh_flags & GL_NOCACHE) {
+ list_for_each(pos, &gl->gl_holders) {
+ tmp_gh = list_entry(pos, struct gfs_holder, gh_list);
+ ++count;
+ }
+ if (tmp_gh == gh && count == 1)
+ handle_callback(gl, LM_ST_UNLOCKED);
+ }
lock_on_glock(gl);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2008-01-28 6:40 fabbione
0 siblings, 0 replies; 15+ messages in thread
From: fabbione @ 2008-01-28 6:40 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: fabbione at sourceware.org 2008-01-28 06:40:25
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Remove unused variable
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&r1=1.36&r2=1.37
--- cluster/gfs-kernel/src/gfs/glock.c 2008/01/24 20:42:00 1.36
+++ cluster/gfs-kernel/src/gfs/glock.c 2008/01/28 06:40:25 1.37
@@ -1617,7 +1617,6 @@
struct gfs_glock *gl = gh->gh_gl;
struct gfs_sbd *sdp = gl->gl_sbd;
struct gfs_glock_operations *glops = gl->gl_ops;
- struct list_head *pos;
atomic_inc(&gl->gl_sbd->sd_glock_dq_calls);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c
@ 2008-01-29 22:21 bmarzins
0 siblings, 0 replies; 15+ messages in thread
From: bmarzins @ 2008-01-29 22:21 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL4
Changes by: bmarzins at sourceware.org 2008-01-29 22:21:45
Modified files:
gfs-kernel/src/gfs: glock.c
Log message:
Fix for bz #419391. gfs_glock_dq was traversing the gl_holders list without
holding the gl_spin spinlock, this was causing a problem when the list item
it was currently looking at got removed from the list. The solution is to
not traverse the list, because it is unncessary. Unfortunately, there is also
a bug in this section of code, where you can't guarantee that you will not
cache a glock held with GL_NOCACHE. Fixing this issue requires significantly
more work.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.20.2.6&r2=1.20.2.7
--- cluster/gfs-kernel/src/gfs/glock.c 2007/06/26 20:34:10 1.20.2.6
+++ cluster/gfs-kernel/src/gfs/glock.c 2008/01/29 22:21:45 1.20.2.7
@@ -1608,8 +1608,6 @@
struct gfs_sbd *sdp = gl->gl_sbd;
struct gfs_glock_operations *glops = gl->gl_ops;
struct list_head *pos;
- struct gfs_holder *tmp_gh = NULL;
- int count = 0;
atomic_inc(&gl->gl_sbd->sd_glock_dq_calls);
@@ -1620,14 +1618,13 @@
set_bit(GLF_SYNC, &gl->gl_flags);
/* Don't cache glock; request demote to unlock at inter-node scope */
- if (gh->gh_flags & GL_NOCACHE) {
- list_for_each(pos, &gl->gl_holders) {
- tmp_gh = list_entry(pos, struct gfs_holder, gh_list);
- ++count;
- }
- if (tmp_gh == gh && count == 1)
+ if (gh->gh_flags & GL_NOCACHE && gl->gl_holders.next == &gh->gh_list &&
+ gl->gl_holders.prev == &gh->gh_list)
+ /* There's a race here. If there are two holders, and both
+ * are dq'ed at almost the same time, you can't guarantee that
+ * you will call handle_callback. Fixing this will require
+ * some refactoring */
handle_callback(gl, LM_ST_UNLOCKED);
- }
lock_on_glock(gl);
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2008-01-29 22:21 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-01-24 22:23 [Cluster-devel] cluster/gfs-kernel/src/gfs glock.c bmarzins
-- strict thread matches above, loose matches on Subject: below --
2008-01-29 22:21 bmarzins
2008-01-28 6:40 fabbione
2008-01-24 20:42 bmarzins
2008-01-24 20:25 bmarzins
2008-01-24 20:08 bmarzins
2007-06-26 20:39 wcheng
2007-06-26 20:34 wcheng
2007-06-26 19:43 wcheng
2007-06-26 19:42 wcheng
2007-06-26 18:38 wcheng
2007-06-26 18:30 wcheng
2007-06-26 17:50 wcheng
2007-06-26 17:38 wcheng
2007-01-25 22:38 wcheng
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).