From: Abhi Das <adas@redhat.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [GFS2 2/3] gfs2: allow quota_check and inplace_reserve to return available blocks
Date: Tue, 24 Feb 2015 23:17:15 -0600 [thread overview]
Message-ID: <1424841436-64093-3-git-send-email-adas@redhat.com> (raw)
In-Reply-To: <1424841436-64093-1-git-send-email-adas@redhat.com>
struct gfs2_alloc_parms is passed to gfs2_quota_check() and
gfs2_inplace_reserve() with ap->target containing the number of
blocks being requested for allocation in the current operation.
We add a new field to struct gfs2_alloc_parms called 'allowed'.
gfs2_quota_check() and gfs2_inplace_reserve() return the max
blocks allowed by quota and rgrps respectively in 'allowed'.
A new flag GFS2_AF_NO_ERR, when set, tells gfs2_quota_check() to
not return -EDQUOT when there are only 'x' blocks available to
allocate. Where, 0 < x < ap->target. The assumption is that the
caller is ok with just 'x' blocks and will likely proceed with
allocating them. When there is no quota violation, 'allowed' is
set to the maximum number of blocks quotas will allow.
If gfs2_inplace_reserve() is successful in finding an rgrp with
more than the requested number of free blocks, 'allowed' is set
to the total number of free blocks in that rgrp. If not, -ENOSPC
is returned and 'allowed' is set to the maximum number of free
blocks that were found in any rgrp.
Signed-off-by: Abhi Das <adas@redhat.com>
---
fs/gfs2/incore.h | 1 +
fs/gfs2/quota.c | 34 +++++++++++++++++++++++++++-------
fs/gfs2/rgrp.c | 12 ++++++++++--
fs/gfs2/rgrp.h | 6 ++++--
4 files changed, 42 insertions(+), 11 deletions(-)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 3a4ea50..bff2d7f 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -303,6 +303,7 @@ struct gfs2_blkreserv {
struct gfs2_alloc_parms {
u64 target;
u32 aflags;
+ u64 allowed;
};
enum {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 964a769..91e77ae 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1094,15 +1094,32 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
return 0;
}
+/**
+ * gfs2_quota_check - check if allocating new blocks will exceed quota
+ * @ip: The inode for which this check is being performed
+ * @uid: The uid to check against
+ * @gid: The gid to check against
+ * @ap: The allocation parameters. ap->target contains the requested
+ * blocks.
+ *
+ * Returns: 0 on success, ap->allowed is set to the number of blocks
+ * availble
+ * -EDQUOT on quota violation, ap->allowed is set to number of
+ * blocks available. Note: If GFS2_AF_NO_ERR is set,
+ * -EDQUOT is not returned if atleast 1 block can be
+ * allocated w/o exceeding quota, regardless of what
+ * was requested in ap->target.
+ */
int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qd;
- s64 value;
+ s64 value, warn, limit, avail = 0;
unsigned int x;
int error = 0;
+ ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
return 0;
@@ -1116,28 +1133,31 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
qid_eq(qd->qd_id, make_kqid_gid(gid))))
continue;
+ warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
+ limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
spin_lock(&qd_lock);
value += qd->qd_change + ap->target;
spin_unlock(&qd_lock);
- if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
+ avail = limit - value - (s64)ap->target;
+ if (limit && limit < value) {
print_message(qd, "exceeded");
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
- error = -EDQUOT;
+ if (!(ap->aflags & GFS2_AF_NO_ERR) || avail <= 0)
+ error = -EDQUOT;
break;
- } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
- (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
+ } else if (warn && warn < value &&
time_after_eq(jiffies, qd->qd_last_warn +
- gfs2_tune_get(sdp,
- gt_quota_warn_period) * HZ)) {
+ gfs2_tune_get(sdp, gt_quota_warn_period) * HZ)) {
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
error = print_message(qd, "warning");
qd->qd_last_warn = jiffies;
}
}
+ ap->allowed = avail >= 0 ? avail : 0;
return error;
}
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 9150207..8975522 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1947,9 +1947,13 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
* @ap: the allocation parameters
*
* Returns: errno
+ * If error is -ENOSPC, ap->allowed is set to the maximum number
+ * of blocks available for allocation.
+ * On success, ap->allowed is set to the blocks available for
+ * allocation in the selected rgrp
*/
-int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
@@ -1957,7 +1961,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
int error = 0, rg_locked, flags = 0;
u64 last_unlinked = NO_BLOCK;
int loops = 0;
- u32 skip = 0;
+ u32 skip = 0, avail = 0;
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
@@ -2029,7 +2033,10 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
/* If rgrp has enough free space, use it */
if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
ip->i_rgd = rs->rs_rbm.rgd;
+ ap->allowed = ip->i_rgd->rd_free_clone;
return 0;
+ } else if (rs->rs_rbm.rgd->rd_free_clone > avail) {
+ avail = rs->rs_rbm.rgd->rd_free_clone;
}
check_rgrp:
@@ -2068,6 +2075,7 @@ next_rgrp:
gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
}
+ ap->allowed = avail;
return -ENOSPC;
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index b104f4a..42714c9 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -40,8 +40,10 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
-#define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
+#define GFS2_AF_ORLOV 1
+#define GFS2_AF_NO_ERR 2
+
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
--
1.8.1.4
next prev parent reply other threads:[~2015-02-25 5:17 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-02-25 5:17 [Cluster-devel] [GFS2 0/3] fallocate and quota fixes Abhi Das
2015-02-25 5:17 ` [Cluster-devel] [GFS2 1/3] gfs2: perform quota checks against allocation parameters Abhi Das
2015-02-25 5:17 ` Abhi Das [this message]
2015-02-25 5:17 ` [Cluster-devel] [GFS2 3/3] gfs2: allow fallocate to max out quotas/fs efficiently Abhi Das
2015-02-25 9:19 ` [Cluster-devel] [GFS2 0/3] fallocate and quota fixes Steven Whitehouse
2015-02-25 14:06 ` Bob Peterson
-- strict thread matches above, loose matches on Subject: below --
2015-03-18 7:36 Abhi Das
2015-03-18 7:36 ` [Cluster-devel] [GFS2 2/3] gfs2: allow quota_check and inplace_reserve to return available blocks Abhi Das
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1424841436-64093-3-git-send-email-adas@redhat.com \
--to=adas@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).