From: Christoph Hellwig <hch@lst.de>
To: Carlos Maiolino <cem@kernel.org>
Cc: linux-xfs@vger.kernel.org, "Darrick J. Wong" <djwong@kernel.org>
Subject: [PATCH 04/18] xfs: remove xfs_dqunlock and friends
Date: Mon, 10 Nov 2025 14:22:56 +0100 [thread overview]
Message-ID: <20251110132335.409466-5-hch@lst.de> (raw)
In-Reply-To: <20251110132335.409466-1-hch@lst.de>
There's really no point in wrapping the basic mutex operations. Remove
the wrapper to ease lock analysis annotations and make the code a litte
easier to read.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
---
fs/xfs/scrub/quota.c | 4 ++--
fs/xfs/scrub/quota_repair.c | 6 +++---
fs/xfs/scrub/quotacheck_repair.c | 8 ++++----
fs/xfs/xfs_dquot.c | 14 +++++++-------
fs/xfs/xfs_dquot.h | 19 ++-----------------
fs/xfs/xfs_dquot_item.c | 6 +++---
fs/xfs/xfs_qm.c | 30 +++++++++++++++---------------
fs/xfs/xfs_qm_syscalls.c | 4 ++--
fs/xfs/xfs_trans_dquot.c | 18 +++++++++---------
9 files changed, 47 insertions(+), 62 deletions(-)
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 58d6d4ed2853..c78cf9f96cf6 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -158,9 +158,9 @@ xchk_quota_item(
* However, dqiterate gave us a locked dquot, so drop the dquot lock to
* get the ILOCK.
*/
- xfs_dqunlock(dq);
+ mutex_unlock(&dq->q_qlock);
xchk_ilock(sc, XFS_ILOCK_SHARED);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
/*
* Except for the root dquot, the actual dquot we got must either have
diff --git a/fs/xfs/scrub/quota_repair.c b/fs/xfs/scrub/quota_repair.c
index 8f4c8d41f308..8c89c6cc2950 100644
--- a/fs/xfs/scrub/quota_repair.c
+++ b/fs/xfs/scrub/quota_repair.c
@@ -187,9 +187,9 @@ xrep_quota_item(
* dqiterate gave us a locked dquot, so drop the dquot lock to get the
* ILOCK_EXCL.
*/
- xfs_dqunlock(dq);
+ mutex_unlock(&dq->q_qlock);
xchk_ilock(sc, XFS_ILOCK_EXCL);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
error = xrep_quota_item_bmap(sc, dq, &dirty);
xchk_iunlock(sc, XFS_ILOCK_EXCL);
@@ -258,7 +258,7 @@ xrep_quota_item(
}
xfs_trans_log_dquot(sc->tp, dq);
error = xfs_trans_roll(&sc->tp);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
return error;
}
diff --git a/fs/xfs/scrub/quotacheck_repair.c b/fs/xfs/scrub/quotacheck_repair.c
index dd8554c755b5..415314911499 100644
--- a/fs/xfs/scrub/quotacheck_repair.c
+++ b/fs/xfs/scrub/quotacheck_repair.c
@@ -53,9 +53,9 @@ xqcheck_commit_dquot(
int error = 0;
/* Unlock the dquot just long enough to allocate a transaction. */
- xfs_dqunlock(dq);
+ mutex_unlock(&dq->q_qlock);
error = xchk_trans_alloc(xqc->sc, 0);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
if (error)
return error;
@@ -122,7 +122,7 @@ xqcheck_commit_dquot(
* dquot).
*/
error = xrep_trans_commit(xqc->sc);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
return error;
out_unlock:
@@ -131,7 +131,7 @@ xqcheck_commit_dquot(
xchk_trans_cancel(xqc->sc);
/* Re-lock the dquot so the caller can put the reference. */
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
return error;
}
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 79e14ee1d7a0..c2326cee7fae 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -31,7 +31,7 @@
*
* ip->i_lock
* qi->qi_tree_lock
- * dquot->q_qlock (xfs_dqlock() and friends)
+ * dquot->q_qlock
* dquot->q_flush (xfs_dqflock() and friends)
* qi->qi_lru_lock
*
@@ -816,9 +816,9 @@ xfs_qm_dqget_cache_lookup(
return NULL;
}
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
if (dqp->q_flags & XFS_DQFLAG_FREEING) {
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_freeing(dqp);
delay(1);
@@ -865,7 +865,7 @@ xfs_qm_dqget_cache_insert(
}
/* Return a locked dquot to the caller, with a reference taken. */
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
dqp->q_nrefs = 1;
qi->qi_dquots++;
@@ -1051,7 +1051,7 @@ xfs_qm_dqget_inode(
if (dqp1) {
xfs_qm_dqdestroy(dqp);
dqp = dqp1;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
goto dqret;
}
} else {
@@ -1136,7 +1136,7 @@ xfs_qm_dqput(
if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
}
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
}
/*
@@ -1152,7 +1152,7 @@ xfs_qm_dqrele(
trace_xfs_dqrele(dqp);
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
/*
* We don't care to flush it if the dquot is dirty here.
* That will create stutters that we want to avoid.
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 61217adf5ba5..10c39b8cdd03 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -121,21 +121,6 @@ static inline void xfs_dqfunlock(struct xfs_dquot *dqp)
complete(&dqp->q_flush);
}
-static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp)
-{
- return mutex_trylock(&dqp->q_qlock);
-}
-
-static inline void xfs_dqlock(struct xfs_dquot *dqp)
-{
- mutex_lock(&dqp->q_qlock);
-}
-
-static inline void xfs_dqunlock(struct xfs_dquot *dqp)
-{
- mutex_unlock(&dqp->q_qlock);
-}
-
static inline int
xfs_dquot_type(const struct xfs_dquot *dqp)
{
@@ -246,9 +231,9 @@ void xfs_dquot_detach_buf(struct xfs_dquot *dqp);
static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
{
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
dqp->q_nrefs++;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return dqp;
}
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 271b195ebb93..b374cd9f1900 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -132,7 +132,7 @@ xfs_qm_dquot_logitem_push(
if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED;
- if (!xfs_dqlock_nowait(dqp))
+ if (!mutex_trylock(&dqp->q_qlock))
return XFS_ITEM_LOCKED;
/*
@@ -177,7 +177,7 @@ xfs_qm_dquot_logitem_push(
out_relock_ail:
spin_lock(&lip->li_ailp->ail_lock);
out_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return rval;
}
@@ -195,7 +195,7 @@ xfs_qm_dquot_logitem_release(
* transaction layer, within trans_commit. Hence, no LI_HOLD flag
* for the logitem.
*/
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
}
STATIC void
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 18a19947bbdb..3e88bea9a465 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -128,7 +128,7 @@ xfs_qm_dqpurge(
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
int error = -EAGAIN;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
goto out_unlock;
@@ -177,7 +177,7 @@ xfs_qm_dqpurge(
!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
qi->qi_dquots--;
@@ -194,7 +194,7 @@ xfs_qm_dqpurge(
return 0;
out_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return error;
}
@@ -329,7 +329,7 @@ xfs_qm_dqattach_one(
* that the dquot returned is the one that should go in the inode.
*/
*IO_idqpp = dqp;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return 0;
}
@@ -468,7 +468,7 @@ xfs_qm_dquot_isolate(
struct xfs_qm_isolate *isol = arg;
enum lru_status ret = LRU_SKIP;
- if (!xfs_dqlock_nowait(dqp))
+ if (!mutex_trylock(&dqp->q_qlock))
goto out_miss_busy;
/*
@@ -494,7 +494,7 @@ xfs_qm_dquot_isolate(
* the freelist and try again.
*/
if (dqp->q_nrefs) {
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
trace_xfs_dqreclaim_want(dqp);
@@ -519,7 +519,7 @@ xfs_qm_dquot_isolate(
* Prevent lookups now that we are past the point of no return.
*/
dqp->q_flags |= XFS_DQFLAG_FREEING;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
ASSERT(dqp->q_nrefs == 0);
list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
@@ -529,7 +529,7 @@ xfs_qm_dquot_isolate(
return LRU_REMOVED;
out_miss_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
out_miss_busy:
trace_xfs_dqreclaim_busy(dqp);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
@@ -1467,7 +1467,7 @@ xfs_qm_flush_one(
struct xfs_buf *bp = NULL;
int error = 0;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
if (dqp->q_flags & XFS_DQFLAG_FREEING)
goto out_unlock;
if (!XFS_DQ_IS_DIRTY(dqp))
@@ -1489,7 +1489,7 @@ xfs_qm_flush_one(
xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
out_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return error;
}
@@ -1952,7 +1952,7 @@ xfs_qm_vop_dqalloc(
/*
* Get the ilock in the right order.
*/
- xfs_dqunlock(uq);
+ mutex_unlock(&uq->q_qlock);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
@@ -1974,7 +1974,7 @@ xfs_qm_vop_dqalloc(
ASSERT(error != -ENOENT);
goto error_rele;
}
- xfs_dqunlock(gq);
+ mutex_unlock(&gq->q_qlock);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
@@ -1992,7 +1992,7 @@ xfs_qm_vop_dqalloc(
ASSERT(error != -ENOENT);
goto error_rele;
}
- xfs_dqunlock(pq);
+ mutex_unlock(&pq->q_qlock);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
@@ -2079,7 +2079,7 @@ xfs_qm_vop_chown(
* back now.
*/
tp->t_flags |= XFS_TRANS_DIRTY;
- xfs_dqlock(prevdq);
+ mutex_lock(&prevdq->q_qlock);
if (isrt) {
ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
prevdq->q_rtb.reserved -= ip->i_delayed_blks;
@@ -2087,7 +2087,7 @@ xfs_qm_vop_chown(
ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
prevdq->q_blk.reserved -= ip->i_delayed_blks;
}
- xfs_dqunlock(prevdq);
+ mutex_unlock(&prevdq->q_qlock);
/*
* Take an extra reference, because the inode is going to keep
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 0c78f30fa4a3..59ef382900fe 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -303,13 +303,13 @@ xfs_qm_scall_setqlim(
}
defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
if (error)
goto out_rele;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
xfs_trans_dqjoin(tp, dqp);
/*
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 765456bf3428..c842ce06acd6 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -393,7 +393,7 @@ xfs_trans_dqlockedjoin(
unsigned int i;
ASSERT(q[0].qt_dquot != NULL);
if (q[1].qt_dquot == NULL) {
- xfs_dqlock(q[0].qt_dquot);
+ mutex_lock(&q[0].qt_dquot->q_qlock);
xfs_trans_dqjoin(tp, q[0].qt_dquot);
} else if (q[2].qt_dquot == NULL) {
xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
@@ -693,7 +693,7 @@ xfs_trans_unreserve_and_mod_dquots(
locked = already_locked;
if (qtrx->qt_blk_res) {
if (!locked) {
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
locked = true;
}
dqp->q_blk.reserved -=
@@ -701,7 +701,7 @@ xfs_trans_unreserve_and_mod_dquots(
}
if (qtrx->qt_ino_res) {
if (!locked) {
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
locked = true;
}
dqp->q_ino.reserved -=
@@ -710,14 +710,14 @@ xfs_trans_unreserve_and_mod_dquots(
if (qtrx->qt_rtblk_res) {
if (!locked) {
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
locked = true;
}
dqp->q_rtb.reserved -=
(xfs_qcnt_t)qtrx->qt_rtblk_res;
}
if (locked && !already_locked)
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
}
}
@@ -820,7 +820,7 @@ xfs_trans_dqresv(
struct xfs_dquot_res *blkres;
struct xfs_quota_limits *qlim;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
@@ -887,16 +887,16 @@ xfs_trans_dqresv(
XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
goto error_corrupt;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return 0;
error_return:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
return -ENOSPC;
return -EDQUOT;
error_corrupt:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
return -EFSCORRUPTED;
--
2.47.3
next prev parent reply other threads:[~2025-11-10 13:23 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-10 13:22 cleanup quota locking v2 Christoph Hellwig
2025-11-10 13:22 ` [PATCH 01/18] xfs: don't leak a locked dquot when xfs_dquot_attach_buf fails Christoph Hellwig
2025-11-10 18:03 ` Darrick J. Wong
2025-11-10 13:22 ` [PATCH 02/18] xfs: make qi_dquots a 64-bit value Christoph Hellwig
2025-11-10 18:04 ` Darrick J. Wong
2025-11-10 13:22 ` [PATCH 03/18] xfs: don't treat all radix_tree_insert errors as -EEXIST Christoph Hellwig
2025-11-10 18:04 ` Darrick J. Wong
2025-11-10 13:22 ` Christoph Hellwig [this message]
2025-11-10 13:22 ` [PATCH 05/18] xfs: use a lockref for the xfs_dquot reference count Christoph Hellwig
2025-11-10 13:22 ` [PATCH 06/18] xfs: remove xfs_qm_dqput and optimize dropping dquot references Christoph Hellwig
2025-11-10 18:12 ` Darrick J. Wong
2025-11-10 13:22 ` [PATCH 07/18] xfs: consolidate q_qlock locking in xfs_qm_dqget and xfs_qm_dqget_inode Christoph Hellwig
2025-11-10 13:23 ` [PATCH 08/18] xfs: xfs_qm_dqattach_one is never called with a non-NULL *IO_idqpp Christoph Hellwig
2025-11-10 13:23 ` [PATCH 09/18] xfs: fold xfs_qm_dqattach_one into xfs_qm_dqget_inode Christoph Hellwig
2025-11-10 13:23 ` [PATCH 10/18] xfs: return the dquot unlocked from xfs_qm_dqget Christoph Hellwig
2025-11-10 13:23 ` [PATCH 11/18] xfs: remove q_qlock locking in xfs_qm_scall_setqlim Christoph Hellwig
2025-11-10 13:23 ` [PATCH 12/18] xfs: push q_qlock acquisition from xchk_dquot_iter to the callers Christoph Hellwig
2025-11-10 13:23 ` [PATCH 13/18] xfs: move q_qlock locking into xchk_quota_item Christoph Hellwig
2025-11-10 13:23 ` [PATCH 14/18] xfs: move q_qlock locking into xqcheck_compare_dquot Christoph Hellwig
2025-11-10 18:13 ` Darrick J. Wong
2025-11-10 13:23 ` [PATCH 15/18] xfs: move quota locking into xqcheck_commit_dquot Christoph Hellwig
2025-11-10 18:13 ` Darrick J. Wong
2025-11-10 13:23 ` [PATCH 16/18] xfs: move quota locking into xrep_quota_item Christoph Hellwig
2025-11-10 18:14 ` Darrick J. Wong
2025-11-10 13:23 ` [PATCH 17/18] xfs: move xfs_dquot_tree calls into xfs_qm_dqget_cache_{lookup,insert} Christoph Hellwig
2025-11-10 13:23 ` [PATCH 18/18] xfs: reduce ilock roundtrips in xfs_qm_vop_dqalloc Christoph Hellwig
2025-11-10 18:19 ` Darrick J. Wong
2025-11-11 8:54 ` Christoph Hellwig
2025-11-11 11:00 ` cleanup quota locking v2 Carlos Maiolino
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251110132335.409466-5-hch@lst.de \
--to=hch@lst.de \
--cc=cem@kernel.org \
--cc=djwong@kernel.org \
--cc=linux-xfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).