The Linux Kernel Mailing List
 help / color / mirror / Atom feed
* [PATCH] xfs: snapshot current CIL sequence under xc_push_lock
@ 2026-05-06  1:58 Cen Zhang
  2026-05-07  3:41 ` Dave Chinner
  0 siblings, 1 reply; 4+ messages in thread
From: Cen Zhang @ 2026-05-06  1:58 UTC (permalink / raw)
  To: cem; +Cc: linux-xfs, linux-kernel, baijiaju1990, Cen Zhang

xlog_cil_force() and xlog_cil_flush() both use the current CIL
checkpoint sequence to request an immediate push.  They currently read
xc_current_sequence before xlog_cil_push_now() takes xc_push_lock.

The CIL push worker advances xc_current_sequence under xc_push_lock while
switching to a new checkpoint context.  If a current-checkpoint force reads
an old sequence and then publishes it as xc_push_seq after the worker has
moved to the next context, xlog_cil_push_work() can treat the request as a
previously pushed sequence and skip it via the push_seq < ctx->sequence
check.  That can leave the current dirty checkpoint unqueued by a
force/flush request.

Make xlog_cil_push_now() resolve push_seq == 0 to the current sequence
while holding xc_push_lock, and return the resolved sequence for tracing
and waiting.  Route xlog_cil_force() and xlog_cil_flush() through that path
so the current-sequence snapshot and xc_push_seq publication are serialized
with the context switch.

Signed-off-by: Cen Zhang <zzzccc427@gmail.com>
---
 fs/xfs/xfs_log_cil.c  | 34 +++++++++++++++++-----------------
 fs/xfs/xfs_log_priv.h |  2 +-
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index edc368938f30..6a1ced3c9314 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -1707,24 +1707,25 @@ xlog_cil_push_background(
  * mechanism. Hence in this case we need to pass a flag to the push work to
  * indicate it needs to flush the commit record itself.
  */
-static void
+static xfs_csn_t
 xlog_cil_push_now(
 	struct xlog	*log,
-	xfs_lsn_t	push_seq,
+	xfs_csn_t	push_seq,
 	bool		async)
 {
 	struct xfs_cil	*cil = log->l_cilp;
 
 	if (!cil)
-		return;
-
-	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
+		return 0;
 
 	/* start on any pending background push to minimise wait time on it */
 	if (!async)
 		flush_workqueue(cil->xc_push_wq);
 
 	spin_lock(&cil->xc_push_lock);
+	if (!push_seq)
+		push_seq = cil->xc_current_sequence;
+	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
 
 	/*
 	 * If this is an async flush request, we always need to set the
@@ -1742,12 +1743,13 @@ xlog_cil_push_now(
 	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
 	    push_seq <= cil->xc_push_seq) {
 		spin_unlock(&cil->xc_push_lock);
-		return;
+		return push_seq;
 	}
 
 	cil->xc_push_seq = push_seq;
 	queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
 	spin_unlock(&cil->xc_push_lock);
+	return push_seq;
 }
 
 bool
@@ -1880,16 +1882,16 @@ void
 xlog_cil_flush(
 	struct xlog	*log)
 {
-	xfs_csn_t	seq = log->l_cilp->xc_current_sequence;
+	struct xfs_cil	*cil = log->l_cilp;
+	xfs_csn_t	seq = xlog_cil_push_now(log, 0, true);
 
 	trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
-	xlog_cil_push_now(log, seq, true);
 
 	/*
 	 * If the CIL is empty, make sure that any previous checkpoint that may
 	 * still be in an active iclog is pushed to stable storage.
 	 */
-	if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
+	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
 		xfs_log_force(log->l_mp, 0);
 }
 
@@ -1911,12 +1913,7 @@ xlog_cil_force_seq(
 	struct xfs_cil		*cil = log->l_cilp;
 	struct xfs_cil_ctx	*ctx;
 	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
-
-	ASSERT(sequence <= cil->xc_current_sequence);
-
-	if (!sequence)
-		sequence = cil->xc_current_sequence;
-	trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
+	bool			traced = false;
 
 	/*
 	 * check to see if we need to force out the current context.
@@ -1924,7 +1921,11 @@ xlog_cil_force_seq(
 	 * so no need to deal with it here.
 	 */
 restart:
-	xlog_cil_push_now(log, sequence, false);
+	sequence = xlog_cil_push_now(log, sequence, false);
+	if (!traced) {
+		trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
+		traced = true;
+	}
 
 	/*
 	 * See if we can find a previous sequence still committing.
@@ -2066,4 +2067,3 @@ xlog_cil_destroy(
 	destroy_workqueue(cil->xc_push_wq);
 	kfree(cil);
 }
-
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index cf1e4ce61a8c..4ae98d3800ea 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -582,7 +582,7 @@ xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
 static inline void
 xlog_cil_force(struct xlog *log)
 {
-	xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
+	xlog_cil_force_seq(log, 0);
 }
 
 /*

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-05-12  5:34 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-06  1:58 [PATCH] xfs: snapshot current CIL sequence under xc_push_lock Cen Zhang
2026-05-07  3:41 ` Dave Chinner
2026-05-07  4:34   ` Cen Zhang
2026-05-12  5:34     ` Dave Chinner

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox