cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
From: Steven Whitehouse <swhiteho@redhat.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [GFS2 PATCH] gfs2: cleanup: call gfs2_rgrp_ondisk2lvb from gfs2_rgrp_out
Date: Fri, 27 Jul 2018 11:28:50 +0100	[thread overview]
Message-ID: <f2e7dc6f-7617-4ad8-79c9-84b331081dbe@redhat.com> (raw)
In-Reply-To: <580731341.54601358.1532628908738.JavaMail.zimbra@redhat.com>

Hi,



On 26/07/18 19:15, Bob Peterson wrote:
> Hi,
>
> Before this patch gfs2_rgrp_ondisk2lvb was called after every call
> to gfs2_rgrp_out. This patch just calls it directly from within
> gfs2_rgrp_out, and moves the function to be before it so we don't
> need a function prototype.
Yes, this looks like a very good plan. Also, can we tidy up some of the 
other calls relating to allocation of blocks? We have to adjust quotas 
on each alloc/dealloc and we have to adjust statfs too, so it would be 
good to collect all those things together in order to avoid them getting 
out of sync in case one is forgotten. So a useful future project perhaps?

Steve.

> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
> ---
>   fs/gfs2/rgrp.c | 30 +++++++++++++-----------------
>   1 file changed, 13 insertions(+), 17 deletions(-)
>
> diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
> index 1651721fb2b2..5593c7956d07 100644
> --- a/fs/gfs2/rgrp.c
> +++ b/fs/gfs2/rgrp.c
> @@ -1063,6 +1063,18 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
>   	/* rd_data0, rd_data and rd_bitbytes already set from rindex */
>   }
>   
> +static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
> +{
> +	const struct gfs2_rgrp *str = buf;
> +
> +	rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
> +	rgl->rl_flags = str->rg_flags;
> +	rgl->rl_free = str->rg_free;
> +	rgl->rl_dinodes = str->rg_dinodes;
> +	rgl->rl_igeneration = str->rg_igeneration;
> +	rgl->__pad = 0UL;
> +}
> +
>   static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
>   {
>   	struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
> @@ -1085,6 +1097,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
>   	str->rg_crc = cpu_to_be32(crc);
>   
>   	memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
> +	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
>   }
>   
>   static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
> @@ -1099,18 +1112,6 @@ static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
>   	return 1;
>   }
>   
> -static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
> -{
> -	const struct gfs2_rgrp *str = buf;
> -
> -	rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
> -	rgl->rl_flags = str->rg_flags;
> -	rgl->rl_free = str->rg_free;
> -	rgl->rl_dinodes = str->rg_dinodes;
> -	rgl->rl_igeneration = str->rg_igeneration;
> -	rgl->__pad = 0UL;
> -}
> -
>   static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
>   {
>   	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
> @@ -1436,7 +1437,6 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
>   				rgd->rd_flags |= GFS2_RGF_TRIMMED;
>   				gfs2_trans_add_meta(rgd->rd_gl, bh);
>   				gfs2_rgrp_out(rgd, bh->b_data);
> -				gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
>   				gfs2_trans_end(sdp);
>   			}
>   		}
> @@ -2414,7 +2414,6 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
>   
>   	gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
>   	gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
> -	gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
>   
>   	gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
>   	if (dinode)
> @@ -2455,7 +2454,6 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
>   	rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
>   	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
>   	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
> -	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
>   
>   	/* Directories keep their data in the metadata address space */
>   	if (meta || ip->i_depth)
> @@ -2492,7 +2490,6 @@ void gfs2_unlink_di(struct inode *inode)
>   	trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
>   	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
>   	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
> -	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
>   	update_rgrp_lvb_unlinked(rgd, 1);
>   }
>   
> @@ -2513,7 +2510,6 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
>   
>   	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
>   	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
> -	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
>   	update_rgrp_lvb_unlinked(rgd, -1);
>   
>   	gfs2_statfs_change(sdp, 0, +1, -1);
>



      parent reply	other threads:[~2018-07-27 10:28 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <943703971.54601319.1532628869092.JavaMail.zimbra@redhat.com>
2018-07-26 18:15 ` [Cluster-devel] [GFS2 PATCH] gfs2: cleanup: call gfs2_rgrp_ondisk2lvb from gfs2_rgrp_out Bob Peterson
2018-07-26 19:41   ` Andreas Gruenbacher
2018-07-27 10:28   ` Steven Whitehouse [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f2e7dc6f-7617-4ad8-79c9-84b331081dbe@redhat.com \
    --to=swhiteho@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).