* [PATCH 1/3] writeback: simplify the write back thread queue
@ 2010-06-19 21:07 Christoph Hellwig
2010-06-20 8:40 ` Artem Bityutskiy
2010-06-21 7:47 ` Jens Axboe
0 siblings, 2 replies; 7+ messages in thread
From: Christoph Hellwig @ 2010-06-19 21:07 UTC (permalink / raw)
To: axboe; +Cc: linux-fsdevel
First remove items from work_list as soon as we start working on them. This
means we don't have to track any pending or visited state and can get
rid of all the RCU magic freeing the work items - we can simply free
them once the operation has finished. Second use a real completion for
tracking synchronous requests - if the caller sets the completion pointer
we complete it, otherwise use it as a boolean indicator that we can free
the work item directly. Third unify struct wb_writeback_args and struct
bdi_work into a single data structure, wb_writeback_work. Previous we
set all parameters into a struct wb_writeback_args, copied it into
struct bdi_work, copied it again on the stack to use it there. Instead
of just allocate one structure dynamically or on the stack and use it
all the way through the stack.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Index: linux-2.6/fs/fs-writeback.c
===================================================================
--- linux-2.6.orig/fs/fs-writeback.c 2010-06-19 19:08:19.589011809 +0200
+++ linux-2.6/fs/fs-writeback.c 2010-06-19 19:39:59.184265823 +0200
@@ -38,43 +38,18 @@ int nr_pdflush_threads;
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
-struct wb_writeback_args {
+struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
enum writeback_sync_modes sync_mode;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
-};
-/*
- * Work items for the bdi_writeback threads
- */
-struct bdi_work {
struct list_head list; /* pending work list */
- struct rcu_head rcu_head; /* for RCU free/clear of work */
-
- unsigned long seen; /* threads that have seen this work */
- atomic_t pending; /* number of threads still to do work */
-
- struct wb_writeback_args args; /* writeback arguments */
-
- unsigned long state; /* flag bits, see WS_* */
-};
-
-enum {
- WS_INPROGRESS = 0,
- WS_ONSTACK,
+ struct completion *done; /* set if the caller waits */
};
-static inline void bdi_work_init(struct bdi_work *work,
- struct wb_writeback_args *args)
-{
- INIT_RCU_HEAD(&work->rcu_head);
- work->args = *args;
- __set_bit(WS_INPROGRESS, &work->state);
-}
-
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
@@ -87,49 +62,11 @@ int writeback_in_progress(struct backing
return !list_empty(&bdi->work_list);
}
-static void bdi_work_free(struct rcu_head *head)
+static void bdi_queue_work(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
{
- struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
-
- clear_bit(WS_INPROGRESS, &work->state);
- smp_mb__after_clear_bit();
- wake_up_bit(&work->state, WS_INPROGRESS);
-
- if (!test_bit(WS_ONSTACK, &work->state))
- kfree(work);
-}
-
-static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
-{
- /*
- * The caller has retrieved the work arguments from this work,
- * drop our reference. If this is the last ref, delete and free it
- */
- if (atomic_dec_and_test(&work->pending)) {
- struct backing_dev_info *bdi = wb->bdi;
-
- spin_lock(&bdi->wb_lock);
- list_del_rcu(&work->list);
- spin_unlock(&bdi->wb_lock);
-
- call_rcu(&work->rcu_head, bdi_work_free);
- }
-}
-
-static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
-{
- work->seen = bdi->wb_mask;
- BUG_ON(!work->seen);
- atomic_set(&work->pending, bdi->wb_cnt);
- BUG_ON(!bdi->wb_cnt);
-
- /*
- * list_add_tail_rcu() contains the necessary barriers to
- * make sure the above stores are seen before the item is
- * noticed on the list
- */
spin_lock(&bdi->wb_lock);
- list_add_tail_rcu(&work->list, &bdi->work_list);
+ list_add_tail(&work->list, &bdi->work_list);
spin_unlock(&bdi->wb_lock);
/*
@@ -146,55 +83,29 @@ static void bdi_queue_work(struct backin
}
}
-/*
- * Used for on-stack allocated work items. The caller needs to wait until
- * the wb threads have acked the work before it's safe to continue.
- */
-static void bdi_wait_on_work_done(struct bdi_work *work)
-{
- wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait,
- TASK_UNINTERRUPTIBLE);
-}
-
-static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
- struct wb_writeback_args *args)
+static void
+__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+ bool range_cyclic, bool for_background)
{
- struct bdi_work *work;
+ struct wb_writeback_work *work;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- bdi_work_init(work, args);
- bdi_queue_work(bdi, work);
- } else {
- struct bdi_writeback *wb = &bdi->wb;
-
- if (wb->task)
- wake_up_process(wb->task);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ if (bdi->wb.task)
+ wake_up_process(bdi->wb.task);
+ return;
}
-}
-
-/**
- * bdi_queue_work_onstack - start and wait for writeback
- * @sb: write inodes from this super_block
- *
- * Description:
- * This function initiates writeback and waits for the operation to
- * complete. Callers must hold the sb s_umount semaphore for
- * reading, to avoid having the super disappear before we are done.
- */
-static void bdi_queue_work_onstack(struct wb_writeback_args *args)
-{
- struct bdi_work work;
- bdi_work_init(&work, args);
- __set_bit(WS_ONSTACK, &work.state);
+ work->sync_mode = WB_SYNC_NONE;
+ work->nr_pages = nr_pages;
+ work->range_cyclic = range_cyclic;
+ work->for_background = for_background;
- bdi_queue_work(args->sb->s_bdi, &work);
- bdi_wait_on_work_done(&work);
+ bdi_queue_work(bdi, work);
}
/**
@@ -210,13 +121,7 @@ static void bdi_queue_work_onstack(struc
*/
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
{
- struct wb_writeback_args args = {
- .sync_mode = WB_SYNC_NONE,
- .nr_pages = nr_pages,
- .range_cyclic = 1,
- };
-
- bdi_alloc_queue_work(bdi, &args);
+ __bdi_start_writeback(bdi, nr_pages, true, false);
}
/**
@@ -230,13 +135,7 @@ void bdi_start_writeback(struct backing_
*/
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
- struct wb_writeback_args args = {
- .sync_mode = WB_SYNC_NONE,
- .nr_pages = LONG_MAX,
- .for_background = 1,
- .range_cyclic = 1,
- };
- bdi_alloc_queue_work(bdi, &args);
+ __bdi_start_writeback(bdi, LONG_MAX, true, true);
}
/*
@@ -694,14 +593,14 @@ static inline bool over_bground_thresh(v
* all dirty pages if they are all attached to "old" mappings.
*/
static long wb_writeback(struct bdi_writeback *wb,
- struct wb_writeback_args *args)
+ struct wb_writeback_work *work)
{
struct writeback_control wbc = {
- .sync_mode = args->sync_mode,
+ .sync_mode = work->sync_mode,
.older_than_this = NULL,
- .for_kupdate = args->for_kupdate,
- .for_background = args->for_background,
- .range_cyclic = args->range_cyclic,
+ .for_kupdate = work->for_kupdate,
+ .for_background = work->for_background,
+ .range_cyclic = work->range_cyclic,
};
unsigned long oldest_jif;
long wrote = 0;
@@ -721,24 +620,24 @@ static long wb_writeback(struct bdi_writ
/*
* Stop writeback when nr_pages has been consumed
*/
- if (args->nr_pages <= 0)
+ if (work->nr_pages <= 0)
break;
/*
* For background writeout, stop when we are below the
* background dirty threshold
*/
- if (args->for_background && !over_bground_thresh())
+ if (work->for_background && !over_bground_thresh())
break;
wbc.more_io = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
- if (args->sb)
- __writeback_inodes_sb(args->sb, wb, &wbc);
+ if (work->sb)
+ __writeback_inodes_sb(work->sb, wb, &wbc);
else
writeback_inodes_wb(wb, &wbc);
- args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
/*
@@ -774,31 +673,21 @@ static long wb_writeback(struct bdi_writ
}
/*
- * Return the next bdi_work struct that hasn't been processed by this
- * wb thread yet. ->seen is initially set for each thread that exists
- * for this device, when a thread first notices a piece of work it
- * clears its bit. Depending on writeback type, the thread will notify
- * completion on either receiving the work (WB_SYNC_NONE) or after
- * it is done (WB_SYNC_ALL).
+ * Return the next wb_writeback_work struct that hasn't been processed yet.
*/
-static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
- struct bdi_writeback *wb)
+static struct wb_writeback_work *
+get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb)
{
- struct bdi_work *work, *ret = NULL;
-
- rcu_read_lock();
+ struct wb_writeback_work *work = NULL;
- list_for_each_entry_rcu(work, &bdi->work_list, list) {
- if (!test_bit(wb->nr, &work->seen))
- continue;
- clear_bit(wb->nr, &work->seen);
-
- ret = work;
- break;
+ spin_lock(&bdi->wb_lock);
+ if (!list_empty(&bdi->work_list)) {
+ work = list_entry(bdi->work_list.next,
+ struct wb_writeback_work, list);
+ list_del_init(&work->list);
}
-
- rcu_read_unlock();
- return ret;
+ spin_unlock(&bdi->wb_lock);
+ return work;
}
static long wb_check_old_data_flush(struct bdi_writeback *wb)
@@ -823,14 +712,14 @@ static long wb_check_old_data_flush(stru
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
if (nr_pages) {
- struct wb_writeback_args args = {
+ struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.for_kupdate = 1,
.range_cyclic = 1,
};
- return wb_writeback(wb, &args);
+ return wb_writeback(wb, &work);
}
return 0;
@@ -842,33 +731,27 @@ static long wb_check_old_data_flush(stru
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
struct backing_dev_info *bdi = wb->bdi;
- struct bdi_work *work;
+ struct wb_writeback_work *work;
long wrote = 0;
while ((work = get_next_work_item(bdi, wb)) != NULL) {
- struct wb_writeback_args args = work->args;
-
/*
* Override sync mode, in case we must wait for completion
+ * because this thread is exiting now.
*/
if (force_wait)
- work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
+ work->sync_mode = WB_SYNC_ALL;
- /*
- * If this isn't a data integrity operation, just notify
- * that we have seen this work and we are now starting it.
- */
- if (!test_bit(WS_ONSTACK, &work->state))
- wb_clear_pending(wb, work);
-
- wrote += wb_writeback(wb, &args);
+ wrote += wb_writeback(wb, work);
/*
- * This is a data integrity writeback, so only do the
- * notification when we have completed the work.
+ * Notify the caller of completion if this is a synchronous
+ * work item, otherwise just free it.
*/
- if (test_bit(WS_ONSTACK, &work->state))
- wb_clear_pending(wb, work);
+ if (work->done)
+ complete(work->done);
+ else
+ kfree(work);
}
/*
@@ -931,14 +814,9 @@ int bdi_writeback_task(struct bdi_writeb
void wakeup_flusher_threads(long nr_pages)
{
struct backing_dev_info *bdi;
- struct wb_writeback_args args = {
- .sync_mode = WB_SYNC_NONE,
- };
- if (nr_pages) {
- args.nr_pages = nr_pages;
- } else {
- args.nr_pages = global_page_state(NR_FILE_DIRTY) +
+ if (!nr_pages) {
+ nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
}
@@ -946,7 +824,7 @@ void wakeup_flusher_threads(long nr_page
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
- bdi_alloc_queue_work(bdi, &args);
+ __bdi_start_writeback(bdi, nr_pages, false, false);
}
rcu_read_unlock();
}
@@ -1155,17 +1033,20 @@ void writeback_inodes_sb(struct super_bl
{
unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
- struct wb_writeback_args args = {
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_NONE,
+ .done = &done,
};
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- args.nr_pages = nr_dirty + nr_unstable +
+ work.nr_pages = nr_dirty + nr_unstable +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
- bdi_queue_work_onstack(&args);
+ bdi_queue_work(sb->s_bdi, &work);
+ wait_for_completion(&done);
}
EXPORT_SYMBOL(writeback_inodes_sb);
@@ -1197,16 +1078,20 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idl
*/
void sync_inodes_sb(struct super_block *sb)
{
- struct wb_writeback_args args = {
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.range_cyclic = 0,
+ .done = &done,
};
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- bdi_queue_work_onstack(&args);
+ bdi_queue_work(sb->s_bdi, &work);
+ wait_for_completion(&done);
+
wait_sb_inodes(sb);
}
EXPORT_SYMBOL(sync_inodes_sb);
Index: linux-2.6/include/linux/backing-dev.h
===================================================================
--- linux-2.6.orig/include/linux/backing-dev.h 2010-06-19 19:28:41.701254021 +0200
+++ linux-2.6/include/linux/backing-dev.h 2010-06-19 19:28:47.304030037 +0200
@@ -82,8 +82,6 @@ struct backing_dev_info {
struct bdi_writeback wb; /* default writeback info for this bdi */
spinlock_t wb_lock; /* protects update side of wb_list */
struct list_head wb_list; /* the flusher threads hanging off this bdi */
- unsigned long wb_mask; /* bitmask of registered tasks */
- unsigned int wb_cnt; /* number of registered tasks */
struct list_head work_list;
Index: linux-2.6/mm/backing-dev.c
===================================================================
--- linux-2.6.orig/mm/backing-dev.c 2010-06-19 19:28:04.784253323 +0200
+++ linux-2.6/mm/backing-dev.c 2010-06-19 19:29:22.634069290 +0200
@@ -104,15 +104,13 @@ static int bdi_debug_stats_show(struct s
"b_more_io: %8lu\n"
"bdi_list: %8u\n"
"state: %8lx\n"
- "wb_mask: %8lx\n"
- "wb_list: %8u\n"
- "wb_cnt: %8u\n",
+ "wb_list: %8u\n",
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
K(bdi_thresh), K(dirty_thresh),
K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
- !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
- !list_empty(&bdi->wb_list), bdi->wb_cnt);
+ !list_empty(&bdi->bdi_list), bdi->state,
+ !list_empty(&bdi->wb_list));
#undef K
return 0;
@@ -674,12 +672,6 @@ int bdi_init(struct backing_dev_info *bd
bdi_wb_init(&bdi->wb, bdi);
- /*
- * Just one thread support for now, hard code mask and count
- */
- bdi->wb_mask = 1;
- bdi->wb_cnt = 1;
-
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
err = percpu_counter_init(&bdi->bdi_stat[i], 0);
if (err)
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/3] writeback: simplify the write back thread queue
2010-06-19 21:07 [PATCH 1/3] writeback: simplify the write back thread queue Christoph Hellwig
@ 2010-06-20 8:40 ` Artem Bityutskiy
2010-06-20 9:00 ` Christoph Hellwig
2010-06-21 7:47 ` Jens Axboe
1 sibling, 1 reply; 7+ messages in thread
From: Artem Bityutskiy @ 2010-06-20 8:40 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: axboe, linux-fsdevel
On Sat, 2010-06-19 at 23:07 +0200, Christoph Hellwig wrote:
> First remove items from work_list as soon as we start working on them. This
> means we don't have to track any pending or visited state and can get
> rid of all the RCU magic freeing the work items - we can simply free
> them once the operation has finished. Second use a real completion for
> tracking synchronous requests - if the caller sets the completion pointer
> we complete it, otherwise use it as a boolean indicator that we can free
> the work item directly. Third unify struct wb_writeback_args and struct
> bdi_work into a single data structure, wb_writeback_work. Previous we
> set all parameters into a struct wb_writeback_args, copied it into
> struct bdi_work, copied it again on the stack to use it there. Instead
> of just allocate one structure dynamically or on the stack and use it
> all the way through the stack.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Christoph, thanks for simplifying this. I wonder also, why do we need
the bdi_pending_list - for me it looks redundant.
Also, do we need the forker task? It hurts because it wakes up every 5
sec jut to check whether it has to fork something and to waste the
battery energy. Do we really need to bdi threads to kill themselves
after 5 minutes of inactivity?
I'm going to work on optimizing the forker per-bdi threads wake-ups. But
for the forker - it seems it is better to just get rid of it completely.
Jens, what do you think?
--
Best Regards,
Artem Bityutskiy (Артём Битюцкий)
--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/3] writeback: simplify the write back thread queue
2010-06-20 8:40 ` Artem Bityutskiy
@ 2010-06-20 9:00 ` Christoph Hellwig
0 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2010-06-20 9:00 UTC (permalink / raw)
To: Artem Bityutskiy; +Cc: Christoph Hellwig, axboe, linux-fsdevel
On Sun, Jun 20, 2010 at 11:40:58AM +0300, Artem Bityutskiy wrote:
> Christoph, thanks for simplifying this. I wonder also, why do we need
> the bdi_pending_list - for me it looks redundant.
>
> Also, do we need the forker task? It hurts because it wakes up every 5
> sec jut to check whether it has to fork something and to waste the
> battery energy. Do we really need to bdi threads to kill themselves
> after 5 minutes of inactivity?
I don't like the design very much either. I think the problem is that
we currently don't have an interface to tell whether a bdi is actually
used for a filesystem. We only need the flusher thread any filesystem
is using a bdi currently. I've started looking a this, but it's not
that easy. First I need to sort out the current bdi_init/register/
unregister/destroy interface which has grown organicly and currenly
isn't exacly symmetric. After that I can look into a new interface
to start/stop the thread on an otherwise fully set up bdi, which should
allow getting rid of the forker and it's complications.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/3] writeback: simplify the write back thread queue
2010-06-19 21:07 [PATCH 1/3] writeback: simplify the write back thread queue Christoph Hellwig
2010-06-20 8:40 ` Artem Bityutskiy
@ 2010-06-21 7:47 ` Jens Axboe
2010-06-21 7:49 ` Christoph Hellwig
1 sibling, 1 reply; 7+ messages in thread
From: Jens Axboe @ 2010-06-21 7:47 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: linux-fsdevel
On 2010-06-19 23:07, Christoph Hellwig wrote:
> First remove items from work_list as soon as we start working on them. This
> means we don't have to track any pending or visited state and can get
> rid of all the RCU magic freeing the work items - we can simply free
> them once the operation has finished. Second use a real completion for
> tracking synchronous requests - if the caller sets the completion pointer
> we complete it, otherwise use it as a boolean indicator that we can free
> the work item directly. Third unify struct wb_writeback_args and struct
> bdi_work into a single data structure, wb_writeback_work. Previous we
> set all parameters into a struct wb_writeback_args, copied it into
> struct bdi_work, copied it again on the stack to use it there. Instead
> of just allocate one structure dynamically or on the stack and use it
> all the way through the stack.
I'm fine with this, we can always bring back support for > 1 writeback
thread per bdi when the need arises.
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/3] writeback: simplify the write back thread queue
2010-06-21 7:47 ` Jens Axboe
@ 2010-06-21 7:49 ` Christoph Hellwig
2010-06-21 7:56 ` Jens Axboe
0 siblings, 1 reply; 7+ messages in thread
From: Christoph Hellwig @ 2010-06-21 7:49 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-fsdevel
On Mon, Jun 21, 2010 at 09:47:05AM +0200, Jens Axboe wrote:
> I'm fine with this, we can always bring back support for > 1 writeback
> thread per bdi when the need arises.
Note that this patch should actually make an eventual implementation
of > 1 writeback thread simpler if we want to go there - removing the
item from the list early means there is no need to deal with any kind of
race avoidance.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/3] writeback: simplify the write back thread queue
2010-06-21 7:49 ` Christoph Hellwig
@ 2010-06-21 7:56 ` Jens Axboe
2010-06-21 8:01 ` Jens Axboe
0 siblings, 1 reply; 7+ messages in thread
From: Jens Axboe @ 2010-06-21 7:56 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: linux-fsdevel
On 2010-06-21 09:49, Christoph Hellwig wrote:
> On Mon, Jun 21, 2010 at 09:47:05AM +0200, Jens Axboe wrote:
>> I'm fine with this, we can always bring back support for > 1 writeback
>> thread per bdi when the need arises.
>
> Note that this patch should actually make an eventual implementation
> of > 1 writeback thread simpler if we want to go there - removing the
> item from the list early means there is no need to deal with any kind of
> race avoidance.
Yes, I suspect we'll need to drive it differently than it was setup for,
so no complaints on that side.
What are the patches against? They don't apply on top of my for-2.6.36
branch that has the other wb bits.
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/3] writeback: simplify the write back thread queue
2010-06-21 7:56 ` Jens Axboe
@ 2010-06-21 8:01 ` Jens Axboe
0 siblings, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2010-06-21 8:01 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: linux-fsdevel
On 2010-06-21 09:56, Jens Axboe wrote:
> What are the patches against? They don't apply on top of my for-2.6.36
> branch that has the other wb bits.
Nevermind, that was a user error.
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2010-06-21 8:01 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-06-19 21:07 [PATCH 1/3] writeback: simplify the write back thread queue Christoph Hellwig
2010-06-20 8:40 ` Artem Bityutskiy
2010-06-20 9:00 ` Christoph Hellwig
2010-06-21 7:47 ` Jens Axboe
2010-06-21 7:49 ` Christoph Hellwig
2010-06-21 7:56 ` Jens Axboe
2010-06-21 8:01 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).