public inbox for linux-xfs@vger.kernel.org
 help / color / mirror / Atom feed
From: Dave Chinner <david@fromorbit.com>
To: xfs@oss.sgi.com
Subject: [PATCH 3/3] xfs: wait on IO completion inside an IO context
Date: Fri, 23 Jul 2010 20:41:18 +1000	[thread overview]
Message-ID: <1279881678-1660-4-git-send-email-david@fromorbit.com> (raw)
In-Reply-To: <1279881678-1660-1-git-send-email-david@fromorbit.com>

From: Dave Chinner <dchinner@redhat.com>

To wait while IOs drain from the inode while inside an ioend context, we have
to wait until the inode io count drops to 1 - the reference we hold - rather
than zero. Add functionality to the ioend wait subsystem to do this.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/linux-2.6/xfs_aops.c |   50 +++++++++++++++++++++++++++++++++++++++++-
 fs/xfs/xfs_inode.h          |   13 ++++++-----
 2 files changed, 55 insertions(+), 8 deletions(-)

diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 5682490..ec499f2 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -64,6 +64,9 @@ xfs_ioend_init(void)
 		init_waitqueue_head(&xfs_ioend_wq[i]);
 }
 
+/*
+ * wait for all IO to drain from the inode
+ */
 void
 xfs_ioend_wait(
 	xfs_inode_t	*ip)
@@ -73,12 +76,55 @@ xfs_ioend_wait(
 	wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
 }
 
+/*
+ * If we have an active ioend in the caller context (e.g.
+ * xfs_get_blocks_direct) we need to wait for the count to drop to one before
+ * we are woken.
+ *
+ * For this to work in the context of concurrent callers (concurrent direct IO),
+ * this function must be called with the iolock held exclusively to prevent
+ * other IOs blocking here and preventing the count from ever dropping to 1.
+ */
+STATIC void
+xfs_ioend_wait_excl(
+	xfs_inode_t	*ip)
+{
+	wait_queue_head_t *wq = to_ioend_wq(ip);
+
+	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+	xfs_iflags_set(ip, XFS_IIOEND_WAIT_EXCL);
+	wait_event(*wq, (atomic_read(&ip->i_iocount) == 1));
+	xfs_iflags_clear(ip, XFS_IIOEND_WAIT_EXCL);
+}
+
 STATIC void
 xfs_ioend_wake(
 	xfs_inode_t	*ip)
 {
-	if (atomic_dec_and_test(&ip->i_iocount))
+	if (atomic_dec_and_test(&ip->i_iocount)) {
+		wake_up(to_ioend_wq(ip));
+		return;
+	}
+
+	/*
+	 * do an unlocked check for an exclusive wait before trying to get
+	 * spinlocks to avoid hurting the normal path too much. We can do this
+	 * check unlocked because if the flag is not set here and this is the
+	 * last IO remaining (i.e. iocount == 1 after the above decrement),
+	 * then any code that enters xfs_ioend_wait_excl() will now see that
+	 * i_iocount == 1 and return immediately. Hence we don't need to issue
+	 * a wakeup in this case, and it keeps the common case overhead as low
+	 * as possible.
+	 */
+	smp_rmb();
+	if (!__xfs_iflags_test(ip, XFS_IIOEND_WAIT_EXCL))
+		return;
+
+	spin_lock(&ip->i_flags_lock);
+	if (atomic_read(&ip->i_iocount) == 1 &&
+	    __xfs_iflags_test(ip, XFS_IIOEND_WAIT_EXCL))
 		wake_up(to_ioend_wq(ip));
+	spin_unlock(&ip->i_flags_lock);
 }
 
 void
@@ -1334,7 +1380,7 @@ remap:
 	    (flags & GET_BLOCKS_UNALIGNED) && !iolock_changed) {
 		xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
 		xfs_ilock(XFS_I(inode), XFS_IOLOCK_EXCL);
-		xfs_ioend_wait(XFS_I(inode));
+		xfs_ioend_wait_excl(XFS_I(inode));
 		iolock_changed = 1;
 		goto remap;
 	}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0898c54..40b5203 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -357,12 +357,13 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
 /*
  * In-core inode flags.
  */
-#define XFS_IRECLAIM    0x0001  /* we have started reclaiming this inode    */
-#define XFS_ISTALE	0x0002	/* inode has been staled */
-#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */
-#define XFS_INEW	0x0008	/* inode has just been allocated */
-#define XFS_IFILESTREAM	0x0010	/* inode is in a filestream directory */
-#define XFS_ITRUNCATED	0x0020	/* truncated down so flush-on-close */
+#define XFS_IRECLAIM		0x0001  /* have started reclaiming this inode */
+#define XFS_ISTALE		0x0002	/* inode has been staled */
+#define XFS_IRECLAIMABLE	0x0004	/* inode can be reclaimed */
+#define XFS_INEW		0x0008	/* inode has just been allocated */
+#define XFS_IFILESTREAM		0x0010	/* inode is in a filestream directory */
+#define XFS_ITRUNCATED		0x0020	/* truncated down so flush-on-close */
+#define XFS_IIOEND_WAIT_EXCL	0x0040	/* io completion waiter in io context */
 
 /*
  * Flags for inode locking.
-- 
1.7.1

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

  parent reply	other threads:[~2010-07-23 10:38 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-07-23 10:41 [RFC, PATCH 0/3] serialise concurrent direct IO sub-block zeroing Dave Chinner
2010-07-23 10:41 ` [PATCH 1/3] fs: get_blocks needs an unaligned mapping flag Dave Chinner
2010-07-23 15:29   ` Alex Elder
2010-07-23 10:41 ` [PATCH 2/3] xfs: serialise unaligned direct IO into unwritten extents Dave Chinner
2010-07-23 15:30   ` Alex Elder
2010-07-23 10:41 ` Dave Chinner [this message]
2010-07-23 15:30   ` [PATCH 3/3] xfs: wait on IO completion inside an IO context Alex Elder
2010-07-23 19:20 ` [RFC, PATCH 0/3] serialise concurrent direct IO sub-block zeroing Eric Sandeen
2010-07-24  0:09   ` Dave Chinner
2010-07-24 11:12     ` Alex Elder
2010-07-25 11:37       ` Dave Chinner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1279881678-1660-4-git-send-email-david@fromorbit.com \
    --to=david@fromorbit.com \
    --cc=xfs@oss.sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox