From mboxrd@z Thu Jan 1 00:00:00 1970 From: Abhi Das Date: Wed, 18 Jul 2018 23:04:15 -0500 Subject: [Cluster-devel] [PATCH v3 1/2] gfs2: Pass write offset to gfs2_write_calc_reserv Message-ID: <1531973055-40114-1-git-send-email-adas@redhat.com> List-Id: To: cluster-devel.redhat.com MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Pass the offset of the write to gfs2_write_calc_reserv so that we can then compute a better upper bound of the number of indirect blocks required. Fixed comments in quota.c:do_sync() to better explain block reservation calculation for quotas. Signed-off-by: Andreas Gruenbacher Acked-by: Abhi Das --- fs/gfs2/bmap.c | 4 ++-- fs/gfs2/bmap.h | 2 ++ fs/gfs2/file.c | 12 ++++++------ fs/gfs2/quota.c | 46 ++++++++++++++++++++++++---------------------- 4 files changed, 34 insertions(+), 30 deletions(-) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 89f1f7d..7d3bb32 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1003,8 +1003,8 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, alloc_required = unstuff || iomap->type == IOMAP_HOLE; if (alloc_required || gfs2_is_jdata(ip)) - gfs2_write_calc_reserv(ip, iomap->length, &data_blocks, - &ind_blocks); + gfs2_write_calc_reserv(ip, iomap->offset, iomap->length, + &data_blocks, &ind_blocks); if (alloc_required) { struct gfs2_alloc_parms ap = { diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h index 6b18fb3..6497053 100644 --- a/fs/gfs2/bmap.h +++ b/fs/gfs2/bmap.h @@ -22,6 +22,7 @@ struct page; /** * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file * @ip: the file + * @pos: file offset of the write * @len: the number of bytes to be written to the file * @data_blocks: returns the number of data blocks required * @ind_blocks: returns the number of indirect blocks required @@ -29,6 +30,7 @@ struct page; */ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, + u64 pos, unsigned int len, unsigned int *data_blocks, unsigned int *ind_blocks) diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 08369c6..93f59f9 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -435,7 +435,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) if (ret) goto out_unlock; - gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); + gfs2_write_calc_reserv(ip, pos, PAGE_SIZE, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; ret = gfs2_quota_lock_check(ip, &ap); if (ret) @@ -918,7 +918,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, * * Returns: void, but @len, @data_blocks and @ind_blocks are filled in. */ -static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len, +static void calc_max_reserv(struct gfs2_inode *ip, loff_t pos, loff_t *len, unsigned int *data_blocks, unsigned int *ind_blocks, unsigned int max_blocks) { @@ -936,7 +936,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len, *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; if (*len > max) { *len = max; - gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); + gfs2_write_calc_reserv(ip, pos, max, data_blocks, ind_blocks); } } @@ -969,7 +969,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t gfs2_size_hint(file, offset, len); - gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); + gfs2_write_calc_reserv(ip, offset, PAGE_SIZE, &data_blocks, &ind_blocks); ap.min_target = data_blocks + ind_blocks; while (len > 0) { @@ -991,7 +991,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t * calculate a more realistic 'bytes' to serve as a good * starting point for the number of bytes we may be able * to write */ - gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); + gfs2_write_calc_reserv(ip, offset, bytes, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; error = gfs2_quota_lock_check(ip, &ap); @@ -1014,7 +1014,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t /* Almost done. Calculate bytes that can be written using * max_blks. We also recompute max_bytes, data_blocks and * ind_blocks */ - calc_max_reserv(ip, &max_bytes, &data_blocks, + calc_max_reserv(ip, offset, &max_bytes, &data_blocks, &ind_blocks, max_blks); rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 0efae7a..3591ba1 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -870,22 +870,18 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; - unsigned int data_blocks, ind_blocks; struct gfs2_holder *ghs, i_gh; unsigned int qx, x; struct gfs2_quota_data *qd; unsigned reserved; loff_t offset; - unsigned int nalloc = 0, blocks; + unsigned int blocks; int error; error = gfs2_rsqa_alloc(ip); if (error) return error; - gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), - &data_blocks, &ind_blocks); - ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); if (!ghs) return -ENOMEM; @@ -903,32 +899,38 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) if (error) goto out; + /* We add 3 blocks to account for: + * a) 1 blk for unstuffing inode if stuffed + * b) 1 blk for inode size update + * c) 1 blk in case quota straddles page boundary and two blks need to + * be updated instead of 1 + * These blocks are added to the reservation unconditionally, but they + * will be released to the rgrp if they're not allocated during the + * transaction + */ + blocks = RES_DINODE + num_qd + 3; + reserved = 1; for (x = 0; x < num_qd; x++) { + unsigned int data_blocks, ind_blocks; + offset = qd2offset(qda[x]); + gfs2_write_calc_reserv(ip, offset, sizeof(struct gfs2_quota), + &data_blocks, &ind_blocks); + blocks += data_blocks; if (gfs2_write_alloc_required(ip, offset, - sizeof(struct gfs2_quota))) - nalloc++; + sizeof(struct gfs2_quota))) { + blocks += ind_blocks; + reserved += data_blocks + ind_blocks; + } } - /* - * 1 blk for unstuffing inode if stuffed. We add this extra - * block to the reservation unconditionally. If the inode - * doesn't need unstuffing, the block will be released to the - * rgrp since it won't be allocated during the transaction - */ - /* +3 in the end for unstuffing block, inode size update block - * and another block in case quota straddles page boundary and - * two blocks need to be updated instead of 1 */ - blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; - - reserved = 1 + (nalloc * (data_blocks + ind_blocks)); ap.target = reserved; error = gfs2_inplace_reserve(ip, &ap); if (error) goto out_alloc; - if (nalloc) - blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; + if (reserved > 1) + blocks += gfs2_rg_blocks(ip, reserved) + RES_STATFS; error = gfs2_trans_begin(sdp, blocks, 0); if (error) @@ -1716,7 +1718,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, alloc_required = 1; if (alloc_required) { struct gfs2_alloc_parms ap = { .aflags = 0, }; - gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), + gfs2_write_calc_reserv(ip, offset, sizeof(struct gfs2_quota), &data_blocks, &ind_blocks); blocks = 1 + data_blocks + ind_blocks; ap.target = blocks; -- 2.4.11