* [Cluster-devel] [GFS2 PATCH] Eliminate gl_req_bh
@ 2008-01-29 19:56 Bob Peterson
2008-01-30 13:58 ` Steven Whitehouse
0 siblings, 1 reply; 2+ messages in thread
From: Bob Peterson @ 2008-01-29 19:56 UTC (permalink / raw)
To: cluster-devel.redhat.com
Hi,
This patch further reduces the memory needs of GFS2 by
eliminating the gl_req_bh variable from struct gfs2_glock.
Regards,
Bob Peterson
Red Hat GFS
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
--
fs/gfs2/glock.c | 107 ++++++++++++++++++++++++++----------------------------
fs/gfs2/incore.h | 1 -
2 files changed, 51 insertions(+), 57 deletions(-)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 23d040a..78cd1cd 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -338,7 +338,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_ip = 0;
gl->gl_ops = glops;
gl->gl_req_gh = NULL;
- gl->gl_req_bh = NULL;
gl->gl_vn = 0;
gl->gl_stamp = jiffies;
gl->gl_tchange = jiffies;
@@ -738,6 +737,50 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
}
/**
+ * drop_bh - Called after a lock module unlock completes
+ * @gl: the glock
+ * @ret: the return status
+ *
+ * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
+ * Doesn't drop the reference on the glock the top half took out
+ *
+ */
+
+static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
+{
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ const struct gfs2_glock_operations *glops = gl->gl_ops;
+ struct gfs2_holder *gh = gl->gl_req_gh;
+
+ gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
+ gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
+ gfs2_assert_warn(sdp, !ret);
+
+ state_change(gl, LM_ST_UNLOCKED);
+
+ if (glops->go_inval)
+ glops->go_inval(gl, DIO_METADATA);
+
+ if (gh) {
+ spin_lock(&gl->gl_spin);
+ list_del_init(&gh->gh_list);
+ gh->gh_error = 0;
+ spin_unlock(&gl->gl_spin);
+ }
+
+ spin_lock(&gl->gl_spin);
+ gfs2_demote_wake(gl);
+ gl->gl_req_gh = NULL;
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ spin_unlock(&gl->gl_spin);
+
+ gfs2_glock_put(gl);
+
+ if (gh)
+ gfs2_holder_wake(gh);
+}
+
+/**
* xmote_bh - Called after the lock module is done acquiring a lock
* @gl: The glock in question
* @ret: the int returned from the lock module
@@ -752,6 +795,11 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
int prev_state = gl->gl_state;
int op_done = 1;
+ if ((ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
+ drop_bh(gl, ret);
+ return;
+ }
+
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
@@ -777,7 +825,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
} else {
spin_lock(&gl->gl_spin);
if (gl->gl_state != gl->gl_demote_state) {
- gl->gl_req_bh = NULL;
spin_unlock(&gl->gl_spin);
gfs2_glock_drop_th(gl);
gfs2_glock_put(gl);
@@ -819,7 +866,6 @@ out:
if (op_done) {
spin_lock(&gl->gl_spin);
gl->gl_req_gh = NULL;
- gl->gl_req_bh = NULL;
clear_bit(GLF_LOCK, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
}
@@ -858,7 +904,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
gfs2_assert_warn(sdp, state != gl->gl_state);
gfs2_glock_hold(gl);
- gl->gl_req_bh = xmote_bh;
lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
@@ -872,51 +917,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
}
/**
- * drop_bh - Called after a lock module unlock completes
- * @gl: the glock
- * @ret: the return status
- *
- * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
- * Doesn't drop the reference on the glock the top half took out
- *
- */
-
-static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
-{
- struct gfs2_sbd *sdp = gl->gl_sbd;
- const struct gfs2_glock_operations *glops = gl->gl_ops;
- struct gfs2_holder *gh = gl->gl_req_gh;
-
- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
- gfs2_assert_warn(sdp, !ret);
-
- state_change(gl, LM_ST_UNLOCKED);
-
- if (glops->go_inval)
- glops->go_inval(gl, DIO_METADATA);
-
- if (gh) {
- spin_lock(&gl->gl_spin);
- list_del_init(&gh->gh_list);
- gh->gh_error = 0;
- spin_unlock(&gl->gl_spin);
- }
-
- spin_lock(&gl->gl_spin);
- gfs2_demote_wake(gl);
- gl->gl_req_gh = NULL;
- gl->gl_req_bh = NULL;
- clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
-
- gfs2_glock_put(gl);
-
- if (gh)
- gfs2_holder_wake(gh);
-}
-
-/**
* gfs2_glock_drop_th - call into the lock module to unlock a lock
* @gl: the glock
*
@@ -936,7 +936,6 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
gfs2_glock_hold(gl);
- gl->gl_req_bh = drop_bh;
ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
@@ -965,8 +964,7 @@ static void do_cancels(struct gfs2_holder *gh)
while (gl->gl_req_gh != gh &&
!test_bit(HIF_HOLDER, &gh->gh_iflags) &&
!list_empty(&gh->gh_list)) {
- if (gl->gl_req_bh && !(gl->gl_req_gh &&
- (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
+ if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
spin_unlock(&gl->gl_spin);
gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
msleep(100);
@@ -1036,7 +1034,6 @@ static int glock_wait_internal(struct gfs2_holder *gh)
spin_lock(&gl->gl_spin);
gl->gl_req_gh = NULL;
- gl->gl_req_bh = NULL;
clear_bit(GLF_LOCK, &gl->gl_flags);
run_queue(gl);
spin_unlock(&gl->gl_spin);
@@ -1527,8 +1524,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
gl = gfs2_glock_find(sdp, &async->lc_name);
if (gfs2_assert_warn(sdp, gl))
return;
- if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
- gl->gl_req_bh(gl, async->lc_ret);
+ xmote_bh(gl, async->lc_ret);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
up_read(&gfs2_umount_flush_sem);
@@ -1889,7 +1885,6 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
print_dbg(gi, " gl_owner = -1\n");
print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
- print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
print_dbg(gi, " reclaim = %s\n",
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 217ecb0..599fb9c 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -191,7 +191,6 @@ struct gfs2_glock {
const struct gfs2_glock_operations *gl_ops;
struct gfs2_holder *gl_req_gh;
- gfs2_glop_bh_t gl_req_bh;
void *gl_lock;
char *gl_lvb;
^ permalink raw reply related [flat|nested] 2+ messages in thread* [Cluster-devel] [GFS2 PATCH] Eliminate gl_req_bh
2008-01-29 19:56 [Cluster-devel] [GFS2 PATCH] Eliminate gl_req_bh Bob Peterson
@ 2008-01-30 13:58 ` Steven Whitehouse
0 siblings, 0 replies; 2+ messages in thread
From: Steven Whitehouse @ 2008-01-30 13:58 UTC (permalink / raw)
To: cluster-devel.redhat.com
Hi,
Now in the -nmw git tree. Thanks,
Steve.
On Tue, 2008-01-29 at 13:56 -0600, Bob Peterson wrote:
> Hi,
>
> This patch further reduces the memory needs of GFS2 by
> eliminating the gl_req_bh variable from struct gfs2_glock.
>
> Regards,
>
> Bob Peterson
> Red Hat GFS
>
> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
> --
> fs/gfs2/glock.c | 107 ++++++++++++++++++++++++++----------------------------
> fs/gfs2/incore.h | 1 -
> 2 files changed, 51 insertions(+), 57 deletions(-)
>
> diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
> index 23d040a..78cd1cd 100644
> --- a/fs/gfs2/glock.c
> +++ b/fs/gfs2/glock.c
> @@ -338,7 +338,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
> gl->gl_ip = 0;
> gl->gl_ops = glops;
> gl->gl_req_gh = NULL;
> - gl->gl_req_bh = NULL;
> gl->gl_vn = 0;
> gl->gl_stamp = jiffies;
> gl->gl_tchange = jiffies;
> @@ -738,6 +737,50 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
> }
>
> /**
> + * drop_bh - Called after a lock module unlock completes
> + * @gl: the glock
> + * @ret: the return status
> + *
> + * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
> + * Doesn't drop the reference on the glock the top half took out
> + *
> + */
> +
> +static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
> +{
> + struct gfs2_sbd *sdp = gl->gl_sbd;
> + const struct gfs2_glock_operations *glops = gl->gl_ops;
> + struct gfs2_holder *gh = gl->gl_req_gh;
> +
> + gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
> + gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
> + gfs2_assert_warn(sdp, !ret);
> +
> + state_change(gl, LM_ST_UNLOCKED);
> +
> + if (glops->go_inval)
> + glops->go_inval(gl, DIO_METADATA);
> +
> + if (gh) {
> + spin_lock(&gl->gl_spin);
> + list_del_init(&gh->gh_list);
> + gh->gh_error = 0;
> + spin_unlock(&gl->gl_spin);
> + }
> +
> + spin_lock(&gl->gl_spin);
> + gfs2_demote_wake(gl);
> + gl->gl_req_gh = NULL;
> + clear_bit(GLF_LOCK, &gl->gl_flags);
> + spin_unlock(&gl->gl_spin);
> +
> + gfs2_glock_put(gl);
> +
> + if (gh)
> + gfs2_holder_wake(gh);
> +}
> +
> +/**
> * xmote_bh - Called after the lock module is done acquiring a lock
> * @gl: The glock in question
> * @ret: the int returned from the lock module
> @@ -752,6 +795,11 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
> int prev_state = gl->gl_state;
> int op_done = 1;
>
> + if ((ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
> + drop_bh(gl, ret);
> + return;
> + }
> +
> gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
> gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
> gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
> @@ -777,7 +825,6 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
> } else {
> spin_lock(&gl->gl_spin);
> if (gl->gl_state != gl->gl_demote_state) {
> - gl->gl_req_bh = NULL;
> spin_unlock(&gl->gl_spin);
> gfs2_glock_drop_th(gl);
> gfs2_glock_put(gl);
> @@ -819,7 +866,6 @@ out:
> if (op_done) {
> spin_lock(&gl->gl_spin);
> gl->gl_req_gh = NULL;
> - gl->gl_req_bh = NULL;
> clear_bit(GLF_LOCK, &gl->gl_flags);
> spin_unlock(&gl->gl_spin);
> }
> @@ -858,7 +904,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
> gfs2_assert_warn(sdp, state != gl->gl_state);
>
> gfs2_glock_hold(gl);
> - gl->gl_req_bh = xmote_bh;
>
> lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
>
> @@ -872,51 +917,6 @@ static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
> }
>
> /**
> - * drop_bh - Called after a lock module unlock completes
> - * @gl: the glock
> - * @ret: the return status
> - *
> - * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
> - * Doesn't drop the reference on the glock the top half took out
> - *
> - */
> -
> -static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
> -{
> - struct gfs2_sbd *sdp = gl->gl_sbd;
> - const struct gfs2_glock_operations *glops = gl->gl_ops;
> - struct gfs2_holder *gh = gl->gl_req_gh;
> -
> - gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
> - gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
> - gfs2_assert_warn(sdp, !ret);
> -
> - state_change(gl, LM_ST_UNLOCKED);
> -
> - if (glops->go_inval)
> - glops->go_inval(gl, DIO_METADATA);
> -
> - if (gh) {
> - spin_lock(&gl->gl_spin);
> - list_del_init(&gh->gh_list);
> - gh->gh_error = 0;
> - spin_unlock(&gl->gl_spin);
> - }
> -
> - spin_lock(&gl->gl_spin);
> - gfs2_demote_wake(gl);
> - gl->gl_req_gh = NULL;
> - gl->gl_req_bh = NULL;
> - clear_bit(GLF_LOCK, &gl->gl_flags);
> - spin_unlock(&gl->gl_spin);
> -
> - gfs2_glock_put(gl);
> -
> - if (gh)
> - gfs2_holder_wake(gh);
> -}
> -
> -/**
> * gfs2_glock_drop_th - call into the lock module to unlock a lock
> * @gl: the glock
> *
> @@ -936,7 +936,6 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
> gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
>
> gfs2_glock_hold(gl);
> - gl->gl_req_bh = drop_bh;
>
> ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
>
> @@ -965,8 +964,7 @@ static void do_cancels(struct gfs2_holder *gh)
> while (gl->gl_req_gh != gh &&
> !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
> !list_empty(&gh->gh_list)) {
> - if (gl->gl_req_bh && !(gl->gl_req_gh &&
> - (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
> + if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
> spin_unlock(&gl->gl_spin);
> gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
> msleep(100);
> @@ -1036,7 +1034,6 @@ static int glock_wait_internal(struct gfs2_holder *gh)
>
> spin_lock(&gl->gl_spin);
> gl->gl_req_gh = NULL;
> - gl->gl_req_bh = NULL;
> clear_bit(GLF_LOCK, &gl->gl_flags);
> run_queue(gl);
> spin_unlock(&gl->gl_spin);
> @@ -1527,8 +1524,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
> gl = gfs2_glock_find(sdp, &async->lc_name);
> if (gfs2_assert_warn(sdp, gl))
> return;
> - if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
> - gl->gl_req_bh(gl, async->lc_ret);
> + xmote_bh(gl, async->lc_ret);
> if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
> gfs2_glock_put(gl);
> up_read(&gfs2_umount_flush_sem);
> @@ -1889,7 +1885,6 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
> print_dbg(gi, " gl_owner = -1\n");
> print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
> print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
> - print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
> print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
> print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
> print_dbg(gi, " reclaim = %s\n",
> diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
> index 217ecb0..599fb9c 100644
> --- a/fs/gfs2/incore.h
> +++ b/fs/gfs2/incore.h
> @@ -191,7 +191,6 @@ struct gfs2_glock {
> const struct gfs2_glock_operations *gl_ops;
>
> struct gfs2_holder *gl_req_gh;
> - gfs2_glop_bh_t gl_req_bh;
>
> void *gl_lock;
> char *gl_lvb;
>
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2008-01-30 13:58 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-01-29 19:56 [Cluster-devel] [GFS2 PATCH] Eliminate gl_req_bh Bob Peterson
2008-01-30 13:58 ` Steven Whitehouse
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).