--- 1.87/fs/buffer.c Thu Jul 3 13:43:56 2003 +++ edited/fs/buffer.c Fri Jul 4 13:01:28 2003 @@ -83,6 +83,8 @@ static int nr_unused_buffer_heads; static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED; static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); +static DECLARE_WAIT_QUEUE_HEAD(bdflush_progress_wait); +static atomic_t bdflush_generation = ATOMIC_INIT(0); static int grow_buffers(kdev_t dev, unsigned long block, int size); static int osync_buffers_list(struct list_head *); @@ -1036,7 +1038,6 @@ if (state < 0) return; - wakeup_bdflush(); /* * And if we're _really_ out of balance, wait for @@ -1044,9 +1045,20 @@ * This will throttle heavy writers. */ if (state > 0) { - spin_lock(&lru_list_lock); - write_some_buffers(NODEV); - } + int gen = atomic_read(&bdflush_generation); + DECLARE_WAITQUEUE(wait, current); + add_wait_queue_exclusive(&bdflush_progress_wait, &wait); + do { + set_current_state(TASK_UNINTERRUPTIBLE); + wakeup_bdflush(); + if (gen != atomic_read(&bdflush_generation)) + break; + schedule(); + } while(gen == atomic_read(&bdflush_generation)); + remove_wait_queue(&bdflush_progress_wait, &wait); + set_current_state(TASK_RUNNING); + } else + wakeup_bdflush(); } EXPORT_SYMBOL(balance_dirty); @@ -2947,6 +2959,16 @@ return 0; } +static void poke_throttled_writers(int all) { + atomic_inc(&bdflush_generation); + smp_mb(); + if (waitqueue_active(&bdflush_progress_wait)) { + if (all) + wake_up_all(&bdflush_progress_wait); + else + wake_up(&bdflush_progress_wait); + } +} /* * This is the actual bdflush daemon itself. It used to be started from * the syscall above, but now we launch it ourselves internally with @@ -2955,6 +2977,7 @@ int bdflush(void *startup) { struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); /* * We have a bare-bones task_struct, and really should fill @@ -2992,12 +3015,21 @@ while (ndirty > 0) { spin_lock(&lru_list_lock); - if (!write_some_buffers(NODEV)) + if (!write_some_buffers(NODEV)) { + poke_throttled_writers(0); break; + } + poke_throttled_writers(0); ndirty -= NRSYNC; } - if (ndirty > 0 || bdflush_stop()) - interruptible_sleep_on(&bdflush_wait); + if (ndirty > 0 || bdflush_stop()) { + add_wait_queue(&bdflush_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + poke_throttled_writers(1); + schedule(); + remove_wait_queue(&bdflush_wait, &wait); + set_current_state(TASK_RUNNING); + } } }