cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] [PATCH 1/2] gfs2: only use lvb on glocks that need it
@ 2012-11-14 18:46 David Teigland
  2012-11-15 11:24 ` Steven Whitehouse
  0 siblings, 1 reply; 2+ messages in thread
From: David Teigland @ 2012-11-14 18:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Save the effort of allocating, reading and writing
the lvb for most glocks that do not use it.

Signed-off-by: David Teigland <teigland@redhat.com>
---
 fs/gfs2/glock.c    |   27 +++++++++++++++++++++------
 fs/gfs2/glops.c    |    3 ++-
 fs/gfs2/incore.h   |    3 ++-
 fs/gfs2/lock_dlm.c |   12 +++++++-----
 4 files changed, 32 insertions(+), 13 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f3a5edb..dbe6e71 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -107,10 +107,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
 {
 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
 
-	if (gl->gl_ops->go_flags & GLOF_ASPACE)
+	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
-	else
+	} else {
+		kfree(gl->gl_lvb);
 		kmem_cache_free(gfs2_glock_cachep, gl);
+	}
 }
 
 void gfs2_glock_free(struct gfs2_glock *gl)
@@ -547,7 +549,10 @@ __acquires(&gl->gl_spin)
 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
 		/* lock_dlm */
 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
-		GLOCK_BUG_ON(gl, ret);
+		if (ret) {
+			printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
+			GLOCK_BUG_ON(gl, 1);
+		}
 	} else { /* lock_nolock */
 		finish_xmote(gl, target);
 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
@@ -736,6 +741,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 	if (!gl)
 		return -ENOMEM;
 
+	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
+	gl->gl_lvb = NULL;
+
+	if (glops->go_flags & GLOF_LVB) {
+		gl->gl_lvb = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+		if (!gl->gl_lvb) {
+			kmem_cache_free(cachep, gl);
+			return -ENOMEM;
+		}
+		gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
+	}
+
 	atomic_inc(&sdp->sd_glock_disposal);
 	gl->gl_sbd = sdp;
 	gl->gl_flags = 0;
@@ -753,9 +770,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 	preempt_enable();
 	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
 	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
-	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
-	memset(gl->gl_lvb, 0, 32 * sizeof(char));
-	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
 	gl->gl_tchange = jiffies;
 	gl->gl_object = NULL;
 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
@@ -777,6 +791,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 	tmp = search_bucket(hash, sdp, &name);
 	if (tmp) {
 		spin_unlock_bucket(hash);
+		kfree(gl->gl_lvb);
 		kmem_cache_free(cachep, gl);
 		atomic_dec(&sdp->sd_glock_disposal);
 		gl = tmp;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 32cc4fd..635bd03 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -552,7 +552,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
 	.go_unlock = gfs2_rgrp_go_unlock,
 	.go_dump = gfs2_rgrp_dump,
 	.go_type = LM_TYPE_RGRP,
-	.go_flags = GLOF_ASPACE,
+	.go_flags = GLOF_ASPACE | GLOF_LVB,
 };
 
 const struct gfs2_glock_operations gfs2_trans_glops = {
@@ -577,6 +577,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = {
 
 const struct gfs2_glock_operations gfs2_quota_glops = {
 	.go_type = LM_TYPE_QUOTA,
+	.go_flags = GLOF_LVB,
 };
 
 const struct gfs2_glock_operations gfs2_journal_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 67a39cf..a37baa5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -216,6 +216,7 @@ struct gfs2_glock_operations {
 	const int go_type;
 	const unsigned long go_flags;
 #define GLOF_ASPACE 1
+#define GLOF_LVB    2
 };
 
 enum {
@@ -321,7 +322,7 @@ struct gfs2_glock {
 	ktime_t gl_dstamp;
 	struct gfs2_lkstats gl_stats;
 	struct dlm_lksb gl_lksb;
-	char gl_lvb[32];
+	char *gl_lvb;
 	unsigned long gl_tchange;
 	void *gl_object;
 
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index f6504d3..d28ae37 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -120,7 +120,7 @@ static void gdlm_ast(void *arg)
 	gfs2_update_reply_times(gl);
 	BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
 
-	if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID)
+	if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID && gl->gl_lvb)
 		memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
 
 	switch (gl->gl_lksb.sb_status) {
@@ -203,8 +203,10 @@ static int make_mode(const unsigned int lmstate)
 static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
 		      const int req)
 {
-	u32 lkf = DLM_LKF_VALBLK;
-	u32 lkid = gl->gl_lksb.sb_lkid;
+	u32 lkf = 0;
+
+	if (gl->gl_lvb)
+		lkf |= DLM_LKF_VALBLK;
 
 	if (gfs_flags & LM_FLAG_TRY)
 		lkf |= DLM_LKF_NOQUEUE;
@@ -228,7 +230,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
 			BUG();
 	}
 
-	if (lkid != 0) {
+	if (gl->gl_lksb.sb_lkid != 0) {
 		lkf |= DLM_LKF_CONVERT;
 		if (test_bit(GLF_BLOCKING, &gl->gl_flags))
 			lkf |= DLM_LKF_QUECVT;
@@ -292,7 +294,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
 
 	/* don't want to skip dlm_unlock writing the lvb when lock is ex */
 	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
-	    gl->gl_state != LM_ST_EXCLUSIVE) {
+	    gl->gl_lvb && gl->gl_state != LM_ST_EXCLUSIVE) {
 		gfs2_glock_free(gl);
 		return;
 	}
-- 
1.7.10.1.362.g242cab3



^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [Cluster-devel] [PATCH 1/2] gfs2: only use lvb on glocks that need it
  2012-11-14 18:46 [Cluster-devel] [PATCH 1/2] gfs2: only use lvb on glocks that need it David Teigland
@ 2012-11-15 11:24 ` Steven Whitehouse
  0 siblings, 0 replies; 2+ messages in thread
From: Steven Whitehouse @ 2012-11-15 11:24 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

Both patches pushed to the -nmw tree. Thanks,

Steve.

On Wed, 2012-11-14 at 13:46 -0500, David Teigland wrote:
> Save the effort of allocating, reading and writing
> the lvb for most glocks that do not use it.
> 
> Signed-off-by: David Teigland <teigland@redhat.com>
> ---
>  fs/gfs2/glock.c    |   27 +++++++++++++++++++++------
>  fs/gfs2/glops.c    |    3 ++-
>  fs/gfs2/incore.h   |    3 ++-
>  fs/gfs2/lock_dlm.c |   12 +++++++-----
>  4 files changed, 32 insertions(+), 13 deletions(-)
> 
> diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
> index f3a5edb..dbe6e71 100644
> --- a/fs/gfs2/glock.c
> +++ b/fs/gfs2/glock.c
> @@ -107,10 +107,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
>  {
>  	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
>  
> -	if (gl->gl_ops->go_flags & GLOF_ASPACE)
> +	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
>  		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
> -	else
> +	} else {
> +		kfree(gl->gl_lvb);
>  		kmem_cache_free(gfs2_glock_cachep, gl);
> +	}
>  }
>  
>  void gfs2_glock_free(struct gfs2_glock *gl)
> @@ -547,7 +549,10 @@ __acquires(&gl->gl_spin)
>  	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
>  		/* lock_dlm */
>  		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
> -		GLOCK_BUG_ON(gl, ret);
> +		if (ret) {
> +			printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
> +			GLOCK_BUG_ON(gl, 1);
> +		}
>  	} else { /* lock_nolock */
>  		finish_xmote(gl, target);
>  		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
> @@ -736,6 +741,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
>  	if (!gl)
>  		return -ENOMEM;
>  
> +	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
> +	gl->gl_lvb = NULL;
> +
> +	if (glops->go_flags & GLOF_LVB) {
> +		gl->gl_lvb = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
> +		if (!gl->gl_lvb) {
> +			kmem_cache_free(cachep, gl);
> +			return -ENOMEM;
> +		}
> +		gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
> +	}
> +
>  	atomic_inc(&sdp->sd_glock_disposal);
>  	gl->gl_sbd = sdp;
>  	gl->gl_flags = 0;
> @@ -753,9 +770,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
>  	preempt_enable();
>  	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
>  	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
> -	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
> -	memset(gl->gl_lvb, 0, 32 * sizeof(char));
> -	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
>  	gl->gl_tchange = jiffies;
>  	gl->gl_object = NULL;
>  	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
> @@ -777,6 +791,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
>  	tmp = search_bucket(hash, sdp, &name);
>  	if (tmp) {
>  		spin_unlock_bucket(hash);
> +		kfree(gl->gl_lvb);
>  		kmem_cache_free(cachep, gl);
>  		atomic_dec(&sdp->sd_glock_disposal);
>  		gl = tmp;
> diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
> index 32cc4fd..635bd03 100644
> --- a/fs/gfs2/glops.c
> +++ b/fs/gfs2/glops.c
> @@ -552,7 +552,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
>  	.go_unlock = gfs2_rgrp_go_unlock,
>  	.go_dump = gfs2_rgrp_dump,
>  	.go_type = LM_TYPE_RGRP,
> -	.go_flags = GLOF_ASPACE,
> +	.go_flags = GLOF_ASPACE | GLOF_LVB,
>  };
>  
>  const struct gfs2_glock_operations gfs2_trans_glops = {
> @@ -577,6 +577,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = {
>  
>  const struct gfs2_glock_operations gfs2_quota_glops = {
>  	.go_type = LM_TYPE_QUOTA,
> +	.go_flags = GLOF_LVB,
>  };
>  
>  const struct gfs2_glock_operations gfs2_journal_glops = {
> diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
> index 67a39cf..a37baa5 100644
> --- a/fs/gfs2/incore.h
> +++ b/fs/gfs2/incore.h
> @@ -216,6 +216,7 @@ struct gfs2_glock_operations {
>  	const int go_type;
>  	const unsigned long go_flags;
>  #define GLOF_ASPACE 1
> +#define GLOF_LVB    2
>  };
>  
>  enum {
> @@ -321,7 +322,7 @@ struct gfs2_glock {
>  	ktime_t gl_dstamp;
>  	struct gfs2_lkstats gl_stats;
>  	struct dlm_lksb gl_lksb;
> -	char gl_lvb[32];
> +	char *gl_lvb;
>  	unsigned long gl_tchange;
>  	void *gl_object;
>  
> diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
> index f6504d3..d28ae37 100644
> --- a/fs/gfs2/lock_dlm.c
> +++ b/fs/gfs2/lock_dlm.c
> @@ -120,7 +120,7 @@ static void gdlm_ast(void *arg)
>  	gfs2_update_reply_times(gl);
>  	BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
>  
> -	if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID)
> +	if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID && gl->gl_lvb)
>  		memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
>  
>  	switch (gl->gl_lksb.sb_status) {
> @@ -203,8 +203,10 @@ static int make_mode(const unsigned int lmstate)
>  static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
>  		      const int req)
>  {
> -	u32 lkf = DLM_LKF_VALBLK;
> -	u32 lkid = gl->gl_lksb.sb_lkid;
> +	u32 lkf = 0;
> +
> +	if (gl->gl_lvb)
> +		lkf |= DLM_LKF_VALBLK;
>  
>  	if (gfs_flags & LM_FLAG_TRY)
>  		lkf |= DLM_LKF_NOQUEUE;
> @@ -228,7 +230,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
>  			BUG();
>  	}
>  
> -	if (lkid != 0) {
> +	if (gl->gl_lksb.sb_lkid != 0) {
>  		lkf |= DLM_LKF_CONVERT;
>  		if (test_bit(GLF_BLOCKING, &gl->gl_flags))
>  			lkf |= DLM_LKF_QUECVT;
> @@ -292,7 +294,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
>  
>  	/* don't want to skip dlm_unlock writing the lvb when lock is ex */
>  	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
> -	    gl->gl_state != LM_ST_EXCLUSIVE) {
> +	    gl->gl_lvb && gl->gl_state != LM_ST_EXCLUSIVE) {
>  		gfs2_glock_free(gl);
>  		return;
>  	}




^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2012-11-15 11:24 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-11-14 18:46 [Cluster-devel] [PATCH 1/2] gfs2: only use lvb on glocks that need it David Teigland
2012-11-15 11:24 ` Steven Whitehouse

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).