linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] writeback: Do not sync data dirtied after sync start
@ 2013-09-26 19:23 Jan Kara
  2013-09-27  0:55 ` Dave Chinner
  2013-09-28  0:31 ` Fengguang Wu
  0 siblings, 2 replies; 7+ messages in thread
From: Jan Kara @ 2013-09-26 19:23 UTC (permalink / raw)
  To: Al Viro; +Cc: Wu Fengguang, linux-fsdevel, Jan Kara

When there are processes heavily creating small files while sync(2) is
running, it can easily happen that quite some new files are created
between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2). That can happen
especially if there are several busy filesystems (remember that sync
traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
fs before starting it on another fs). Because WB_SYNC_ALL pass is slow
(e.g. causes a transaction commit and cache flush for each inode in
ext3), resulting sync(2) times are rather large.

The following script reproduces the problem:

function run_writers
{
  for (( i = 0; i < 10; i++ )); do
    mkdir $1/dir$i
    for (( j = 0; j < 40000; j++ )); do
      dd if=/dev/zero of=$1/dir$i/$j bs=4k count=4 &>/dev/null
    done &
  done
}

for dir in "$@"; do
  run_writers $dir
done

sleep 40
time sync
======

Fix the problem by disregarding inodes dirtied after sync(2) was called
in the WB_SYNC_ALL pass. To allow for this, sync_inodes_sb() now takes a
time stamp when sync has started which is used for setting up work for
flusher threads.

To give some numbers, when above script is run on two ext4 filesystems on
simple SATA drive, the average sync time from 10 runs is 267.549 seconds
with standard deviation 104.799426. With the patched kernel, the average
sync time from 10 runs is 2.995 seconds with standard deviation 0.096.

Signed-off-by: Jan Kara <jack@suse.cz>
---
 fs/fs-writeback.c         | 17 ++++++++---------
 fs/sync.c                 | 15 +++++++++------
 fs/xfs/xfs_super.c        |  2 +-
 include/linux/writeback.h |  2 +-
 4 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 9f4935b..70837da 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -39,7 +39,7 @@
 struct wb_writeback_work {
 	long nr_pages;
 	struct super_block *sb;
-	unsigned long *older_than_this;
+	unsigned long older_than_this;
 	enum writeback_sync_modes sync_mode;
 	unsigned int tagged_writepages:1;
 	unsigned int for_kupdate:1;
@@ -248,8 +248,7 @@ static int move_expired_inodes(struct list_head *delaying_queue,
 
 	while (!list_empty(delaying_queue)) {
 		inode = wb_inode(delaying_queue->prev);
-		if (work->older_than_this &&
-		    inode_dirtied_after(inode, *work->older_than_this))
+		if (inode_dirtied_after(inode, work->older_than_this))
 			break;
 		list_move(&inode->i_wb_list, &tmp);
 		moved++;
@@ -791,12 +790,11 @@ static long wb_writeback(struct bdi_writeback *wb,
 {
 	unsigned long wb_start = jiffies;
 	long nr_pages = work->nr_pages;
-	unsigned long oldest_jif;
 	struct inode *inode;
 	long progress;
 
-	oldest_jif = jiffies;
-	work->older_than_this = &oldest_jif;
+	if (!work->older_than_this)
+		work->older_than_this = jiffies;
 
 	spin_lock(&wb->list_lock);
 	for (;;) {
@@ -830,10 +828,10 @@ static long wb_writeback(struct bdi_writeback *wb,
 		 * safe.
 		 */
 		if (work->for_kupdate) {
-			oldest_jif = jiffies -
+			work->older_than_this = jiffies -
 				msecs_to_jiffies(dirty_expire_interval * 10);
 		} else if (work->for_background)
-			oldest_jif = jiffies;
+			work->older_than_this = jiffies;
 
 		trace_writeback_start(wb->bdi, work);
 		if (list_empty(&wb->b_io))
@@ -1350,13 +1348,14 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
  * This function writes and waits on any dirty inode belonging to this
  * super_block.
  */
-void sync_inodes_sb(struct super_block *sb)
+void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct wb_writeback_work work = {
 		.sb		= sb,
 		.sync_mode	= WB_SYNC_ALL,
 		.nr_pages	= LONG_MAX,
+		.older_than_this = older_than_this,
 		.range_cyclic	= 0,
 		.done		= &done,
 		.reason		= WB_REASON_SYNC,
diff --git a/fs/sync.c b/fs/sync.c
index 905f3f6..ff96f99 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -27,10 +27,11 @@
  * wait == 1 case since in that case write_inode() functions do
  * sync_dirty_buffer() and thus effectively write one block at a time.
  */
-static int __sync_filesystem(struct super_block *sb, int wait)
+static int __sync_filesystem(struct super_block *sb, int wait,
+			     unsigned long start)
 {
 	if (wait)
-		sync_inodes_sb(sb);
+		sync_inodes_sb(sb, start);
 	else
 		writeback_inodes_sb(sb, WB_REASON_SYNC);
 
@@ -47,6 +48,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
 int sync_filesystem(struct super_block *sb)
 {
 	int ret;
+	unsigned long start = jiffies;
 
 	/*
 	 * We need to be protected against the filesystem going from
@@ -60,17 +62,17 @@ int sync_filesystem(struct super_block *sb)
 	if (sb->s_flags & MS_RDONLY)
 		return 0;
 
-	ret = __sync_filesystem(sb, 0);
+	ret = __sync_filesystem(sb, 0, start);
 	if (ret < 0)
 		return ret;
-	return __sync_filesystem(sb, 1);
+	return __sync_filesystem(sb, 1, start);
 }
 EXPORT_SYMBOL_GPL(sync_filesystem);
 
 static void sync_inodes_one_sb(struct super_block *sb, void *arg)
 {
 	if (!(sb->s_flags & MS_RDONLY))
-		sync_inodes_sb(sb);
+		sync_inodes_sb(sb, *((unsigned long *)arg));
 }
 
 static void sync_fs_one_sb(struct super_block *sb, void *arg)
@@ -102,9 +104,10 @@ static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
 SYSCALL_DEFINE0(sync)
 {
 	int nowait = 0, wait = 1;
+	unsigned long start = jiffies;
 
 	wakeup_flusher_threads(0, WB_REASON_SYNC);
-	iterate_supers(sync_inodes_one_sb, NULL);
+	iterate_supers(sync_inodes_one_sb, &start);
 	iterate_supers(sync_fs_one_sb, &nowait);
 	iterate_supers(sync_fs_one_sb, &wait);
 	iterate_bdevs(fdatawrite_one_bdev, NULL);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 15188cc..8968f50 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -918,7 +918,7 @@ xfs_flush_inodes(
 	struct super_block	*sb = mp->m_super;
 
 	if (down_read_trylock(&sb->s_umount)) {
-		sync_inodes_sb(sb);
+		sync_inodes_sb(sb, jiffies);
 		up_read(&sb->s_umount);
 	}
 }
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 021b8a3..fc0e432 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -97,7 +97,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
 int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
 int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
 				  enum wb_reason reason);
-void sync_inodes_sb(struct super_block *);
+void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
 void inode_wait_for_writeback(struct inode *inode);
 
-- 
1.8.1.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] writeback: Do not sync data dirtied after sync start
  2013-09-26 19:23 [PATCH v2] writeback: Do not sync data dirtied after sync start Jan Kara
@ 2013-09-27  0:55 ` Dave Chinner
  2013-09-27  9:37   ` Jan Kara
  2013-09-28  0:31 ` Fengguang Wu
  1 sibling, 1 reply; 7+ messages in thread
From: Dave Chinner @ 2013-09-27  0:55 UTC (permalink / raw)
  To: Jan Kara; +Cc: Al Viro, Wu Fengguang, linux-fsdevel

On Thu, Sep 26, 2013 at 09:23:58PM +0200, Jan Kara wrote:
> When there are processes heavily creating small files while sync(2) is
> running, it can easily happen that quite some new files are created
> between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2). That can happen
> especially if there are several busy filesystems (remember that sync
> traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
> fs before starting it on another fs). Because WB_SYNC_ALL pass is slow
> (e.g. causes a transaction commit and cache flush for each inode in
> ext3), resulting sync(2) times are rather large.

Yup, that can be a problem.

Build warning form the patch:

In file included from include/trace/ftrace.h:575:0,
                 from include/trace/define_trace.h:90,
                 from include/trace/events/writeback.h:603,
                 from fs/fs-writeback.c:89:
include/trace/events/writeback.h: In function ¿ftrace_raw_event_writeback_queue_io¿:
include/trace/events/writeback.h:277:1: warning: initialization makes pointer from integer without a cast [enabled by default]
In file included from include/trace/ftrace.h:711:0,
                 from include/trace/define_trace.h:90,
                 from include/trace/events/writeback.h:603,
                 from fs/fs-writeback.c:89:
include/trace/events/writeback.h: In function ¿perf_trace_writeback_queue_io¿:
include/trace/events/writeback.h:277:1: warning: initialization makes pointer from integer without a cast [enabled by default]

> The following script reproduces the problem:
> 
> function run_writers
> {
>   for (( i = 0; i < 10; i++ )); do
>     mkdir $1/dir$i
>     for (( j = 0; j < 40000; j++ )); do
>       dd if=/dev/zero of=$1/dir$i/$j bs=4k count=4 &>/dev/null
>     done &
>   done
> }
> 
> for dir in "$@"; do
>   run_writers $dir
> done
> 
> sleep 40
> time sync
> ======
> 
> Fix the problem by disregarding inodes dirtied after sync(2) was called
> in the WB_SYNC_ALL pass. To allow for this, sync_inodes_sb() now takes a
> time stamp when sync has started which is used for setting up work for
> flusher threads.
> 
> To give some numbers, when above script is run on two ext4 filesystems on
> simple SATA drive, the average sync time from 10 runs is 267.549 seconds
> with standard deviation 104.799426. With the patched kernel, the average
> sync time from 10 runs is 2.995 seconds with standard deviation 0.096.

Hmmmm. 2.8 seconds on my XFS perf VM without the patch. Ok, try a
smaller VM backed by single spindle of spinning rust rather than
SSDs. Over 10 runs I see:

kernel		min	max	av
vanilla		0.18s	4.46s	1.63s
patched		0.14s	0.45s	0.28s

Definitely an improvement, but nowhere near the numbers you are
seeing for ext4 - maybe XFS isn't as susceptible to this problem
as ext4. Nope, ext4 on an unpatched kernel gives 1.66/6.81/3.12s,
(which is less than your patched kernel results :) but means
so it must be something else configuration/hardware related.

Anyway, the change looks good, it just needs the above warning fixed...

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com
--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] writeback: Do not sync data dirtied after sync start
  2013-09-27  0:55 ` Dave Chinner
@ 2013-09-27  9:37   ` Jan Kara
  2013-09-27 23:22     ` Dave Chinner
  0 siblings, 1 reply; 7+ messages in thread
From: Jan Kara @ 2013-09-27  9:37 UTC (permalink / raw)
  To: Dave Chinner; +Cc: Jan Kara, Al Viro, Wu Fengguang, linux-fsdevel

On Fri 27-09-13 10:55:53, Dave Chinner wrote:
> On Thu, Sep 26, 2013 at 09:23:58PM +0200, Jan Kara wrote:
> > When there are processes heavily creating small files while sync(2) is
> > running, it can easily happen that quite some new files are created
> > between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2). That can happen
> > especially if there are several busy filesystems (remember that sync
> > traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
> > fs before starting it on another fs). Because WB_SYNC_ALL pass is slow
> > (e.g. causes a transaction commit and cache flush for each inode in
> > ext3), resulting sync(2) times are rather large.
> 
> Yup, that can be a problem.
> 
> Build warning form the patch:
> 
> In file included from include/trace/ftrace.h:575:0,
>                  from include/trace/define_trace.h:90,
>                  from include/trace/events/writeback.h:603,
>                  from fs/fs-writeback.c:89:
> include/trace/events/writeback.h: In function ¿ftrace_raw_event_writeback_queue_io¿:
> include/trace/events/writeback.h:277:1: warning: initialization makes pointer from integer without a cast [enabled by default]
> In file included from include/trace/ftrace.h:711:0,
>                  from include/trace/define_trace.h:90,
>                  from include/trace/events/writeback.h:603,
>                  from fs/fs-writeback.c:89:
> include/trace/events/writeback.h: In function ¿perf_trace_writeback_queue_io¿:
> include/trace/events/writeback.h:277:1: warning: initialization makes pointer from integer without a cast [enabled by default]
  Thanks for catching this. I'll send v3 in a minute.

> > The following script reproduces the problem:
> > 
> > function run_writers
> > {
> >   for (( i = 0; i < 10; i++ )); do
> >     mkdir $1/dir$i
> >     for (( j = 0; j < 40000; j++ )); do
> >       dd if=/dev/zero of=$1/dir$i/$j bs=4k count=4 &>/dev/null
> >     done &
> >   done
> > }
> > 
> > for dir in "$@"; do
> >   run_writers $dir
> > done
> > 
> > sleep 40
> > time sync
> > ======
> > 
> > Fix the problem by disregarding inodes dirtied after sync(2) was called
> > in the WB_SYNC_ALL pass. To allow for this, sync_inodes_sb() now takes a
> > time stamp when sync has started which is used for setting up work for
> > flusher threads.
> > 
> > To give some numbers, when above script is run on two ext4 filesystems on
> > simple SATA drive, the average sync time from 10 runs is 267.549 seconds
> > with standard deviation 104.799426. With the patched kernel, the average
> > sync time from 10 runs is 2.995 seconds with standard deviation 0.096.
> 
> Hmmmm. 2.8 seconds on my XFS perf VM without the patch. Ok, try a
> smaller VM backed by single spindle of spinning rust rather than
> SSDs. Over 10 runs I see:
> 
> kernel		min	max	av
> vanilla		0.18s	4.46s	1.63s
> patched		0.14s	0.45s	0.28s
> 
> Definitely an improvement, but nowhere near the numbers you are
> seeing for ext4 - maybe XFS isn't as susceptible to this problem
> as ext4. Nope, ext4 on an unpatched kernel gives 1.66/6.81/3.12s,
> (which is less than your patched kernel results :) but means
> so it must be something else configuration/hardware related.
  Have you really used *two* (or more) busy filesystems? That makes the
problem an order of magnitude worse for me. The numbers I've posted are for
such situation...

								Honza
-- 
Jan Kara <jack@suse.cz>
SUSE Labs, CR
--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] writeback: Do not sync data dirtied after sync start
  2013-09-27  9:37   ` Jan Kara
@ 2013-09-27 23:22     ` Dave Chinner
  0 siblings, 0 replies; 7+ messages in thread
From: Dave Chinner @ 2013-09-27 23:22 UTC (permalink / raw)
  To: Jan Kara; +Cc: Al Viro, Wu Fengguang, linux-fsdevel

On Fri, Sep 27, 2013 at 11:37:45AM +0200, Jan Kara wrote:
> On Fri 27-09-13 10:55:53, Dave Chinner wrote:
> > On Thu, Sep 26, 2013 at 09:23:58PM +0200, Jan Kara wrote:
> > > When there are processes heavily creating small files while sync(2) is
> > > running, it can easily happen that quite some new files are created
> > > between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2). That can happen
> > > especially if there are several busy filesystems (remember that sync
> > > traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
> > > fs before starting it on another fs). Because WB_SYNC_ALL pass is slow
> > > (e.g. causes a transaction commit and cache flush for each inode in
> > > ext3), resulting sync(2) times are rather large.
....
> > > To give some numbers, when above script is run on two ext4 filesystems on
> > > simple SATA drive, the average sync time from 10 runs is 267.549 seconds
> > > with standard deviation 104.799426. With the patched kernel, the average
> > > sync time from 10 runs is 2.995 seconds with standard deviation 0.096.
> > 
> > Hmmmm. 2.8 seconds on my XFS perf VM without the patch. Ok, try a
> > smaller VM backed by single spindle of spinning rust rather than
> > SSDs. Over 10 runs I see:
> > 
> > kernel		min	max	av
> > vanilla		0.18s	4.46s	1.63s
> > patched		0.14s	0.45s	0.28s
> > 
> > Definitely an improvement, but nowhere near the numbers you are
> > seeing for ext4 - maybe XFS isn't as susceptible to this problem
> > as ext4. Nope, ext4 on an unpatched kernel gives 1.66/6.81/3.12s,
> > (which is less than your patched kernel results :) but means
> > so it must be something else configuration/hardware related.
>   Have you really used *two* (or more) busy filesystems? That makes the
> problem an order of magnitude worse for me. The numbers I've posted are for
> such situation...

I had to bump it up to 5 active filesystems before it fell off the
order of magnitude cliff. Still not the ~260s times you were seeing
- only about ~30s per sync - but you ar right in that there
definitely is a load point where things go really bad. Now I've found
that point, I can confirm that the patch fixes it :)

Cheers,

Dave.
-- 
Dave Chinner
david@fromorbit.com

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] writeback: Do not sync data dirtied after sync start
  2013-09-26 19:23 [PATCH v2] writeback: Do not sync data dirtied after sync start Jan Kara
  2013-09-27  0:55 ` Dave Chinner
@ 2013-09-28  0:31 ` Fengguang Wu
  2013-09-30  9:31   ` Jan Kara
  1 sibling, 1 reply; 7+ messages in thread
From: Fengguang Wu @ 2013-09-28  0:31 UTC (permalink / raw)
  To: Jan Kara; +Cc: Al Viro, linux-fsdevel

Hi Jan,

On Thu, Sep 26, 2013 at 09:23:58PM +0200, Jan Kara wrote:
> When there are processes heavily creating small files while sync(2) is
> running, it can easily happen that quite some new files are created
> between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2). That can happen
> especially if there are several busy filesystems (remember that sync
> traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
> fs before starting it on another fs). Because WB_SYNC_ALL pass is slow
> (e.g. causes a transaction commit and cache flush for each inode in
> ext3), resulting sync(2) times are rather large.

This is a very good change. An old problem that may worth noting here
is that inode_dirtied_after() has a workaround for the inodes whose
->dirtied_when is never updated due to being constantly redirtied.
That workaround still leaves a small time window that sync() may skip
a should-be-synced inode. Since the problem existed before this patch
so I'm fine with this change.

Reviewed-by: Fengguang Wu <fengguang.wu@intel.com>

Thanks!

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] writeback: Do not sync data dirtied after sync start
  2013-09-28  0:31 ` Fengguang Wu
@ 2013-09-30  9:31   ` Jan Kara
  2013-10-03 13:20     ` Fengguang Wu
  0 siblings, 1 reply; 7+ messages in thread
From: Jan Kara @ 2013-09-30  9:31 UTC (permalink / raw)
  To: Fengguang Wu; +Cc: Jan Kara, Al Viro, linux-fsdevel

On Sat 28-09-13 08:31:19, Wu Fengguang wrote:
> Hi Jan,
> 
> On Thu, Sep 26, 2013 at 09:23:58PM +0200, Jan Kara wrote:
> > When there are processes heavily creating small files while sync(2) is
> > running, it can easily happen that quite some new files are created
> > between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2). That can happen
> > especially if there are several busy filesystems (remember that sync
> > traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
> > fs before starting it on another fs). Because WB_SYNC_ALL pass is slow
> > (e.g. causes a transaction commit and cache flush for each inode in
> > ext3), resulting sync(2) times are rather large.
> 
> This is a very good change. An old problem that may worth noting here
> is that inode_dirtied_after() has a workaround for the inodes whose
> ->dirtied_when is never updated due to being constantly redirtied.
> That workaround still leaves a small time window that sync() may skip
> a should-be-synced inode. Since the problem existed before this patch
> so I'm fine with this change.
  Thanks for review. Do you mean the situation when jiffies in
inode->dirtied_when essentially wrap around on 32-bit systems wrt current
time? Yes, that is still a problem for which I don't know a better fix than
the current workaround.

								Honza
-- 
Jan Kara <jack@suse.cz>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] writeback: Do not sync data dirtied after sync start
  2013-09-30  9:31   ` Jan Kara
@ 2013-10-03 13:20     ` Fengguang Wu
  0 siblings, 0 replies; 7+ messages in thread
From: Fengguang Wu @ 2013-10-03 13:20 UTC (permalink / raw)
  To: Jan Kara; +Cc: Al Viro, linux-fsdevel

On Mon, Sep 30, 2013 at 11:31:49AM +0200, Jan Kara wrote:
> On Sat 28-09-13 08:31:19, Wu Fengguang wrote:
> > Hi Jan,
> > 
> > On Thu, Sep 26, 2013 at 09:23:58PM +0200, Jan Kara wrote:
> > > When there are processes heavily creating small files while sync(2) is
> > > running, it can easily happen that quite some new files are created
> > > between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2). That can happen
> > > especially if there are several busy filesystems (remember that sync
> > > traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
> > > fs before starting it on another fs). Because WB_SYNC_ALL pass is slow
> > > (e.g. causes a transaction commit and cache flush for each inode in
> > > ext3), resulting sync(2) times are rather large.
> > 
> > This is a very good change. An old problem that may worth noting here
> > is that inode_dirtied_after() has a workaround for the inodes whose
> > ->dirtied_when is never updated due to being constantly redirtied.
> > That workaround still leaves a small time window that sync() may skip
> > a should-be-synced inode. Since the problem existed before this patch
> > so I'm fine with this change.
>   Thanks for review. Do you mean the situation when jiffies in
> inode->dirtied_when essentially wrap around on 32-bit systems wrt current
> time?

Yes.

> Yes, that is still a problem for which I don't know a better fix than
> the current workaround.

OK, that's fine. I don't have ideas on this, either.

Thanks,
Fengguang

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-10-03 13:20 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-09-26 19:23 [PATCH v2] writeback: Do not sync data dirtied after sync start Jan Kara
2013-09-27  0:55 ` Dave Chinner
2013-09-27  9:37   ` Jan Kara
2013-09-27 23:22     ` Dave Chinner
2013-09-28  0:31 ` Fengguang Wu
2013-09-30  9:31   ` Jan Kara
2013-10-03 13:20     ` Fengguang Wu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).