--- linux-2.6.15/kernel/rcupdate.c 2006-01-03 04:21:10.000000000 +0100 +++ linux-2.6.15-edum/kernel/rcupdate.c 2006-01-06 13:32:02.000000000 +0100 @@ -71,14 +71,14 @@ /* Fake initialization required by compiler */ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; -static int maxbatch = 10000; +static int maxbatch = 100; #ifndef __HAVE_ARCH_CMPXCHG /* * We use an array of spinlocks for the rcurefs -- similar to ones in sparc * 32 bit atomic_t implementations, and a hash function similar to that * for our refcounting needs. - * Can't help multiprocessors which donot have cmpxchg :( + * Can't help multiprocessors which dont have cmpxchg :( */ spinlock_t __rcuref_hash[RCUREF_HASH_SIZE] = { @@ -110,9 +110,19 @@ *rdp->nxttail = head; rdp->nxttail = &head->next; - if (unlikely(++rdp->count > 10000)) - set_need_resched(); - +/* + * OOM avoidance : If we queued too many items in this queue, + * free the oldest entry (from the donelist only to respect + * RCU constraints) + */ + if (unlikely(++rdp->count > 10000 && (head = rdp->donelist))) { + rdp->count--; + rdp->donelist = head->next; + if (!rdp->donelist) + rdp->donetail = &rdp->donelist; + local_irq_restore(flags); + return head->func(head); + } local_irq_restore(flags); } @@ -148,12 +158,19 @@ rdp = &__get_cpu_var(rcu_bh_data); *rdp->nxttail = head; rdp->nxttail = &head->next; - rdp->count++; /* - * Should we directly call rcu_do_batch() here ? - * if (unlikely(rdp->count > 10000)) - * rcu_do_batch(rdp); + * OOM avoidance : If we queued too many items in this queue, + * free the oldest entry (from the donelist only to respect + * RCU constraints) */ + if (unlikely(++rdp->count > 10000 && (head = rdp->donelist))) { + rdp->count--; + rdp->donelist = head->next; + if (!rdp->donelist) + rdp->donetail = &rdp->donelist; + local_irq_restore(flags); + return head->func(head); + } local_irq_restore(flags); } @@ -208,19 +225,20 @@ */ static void rcu_do_batch(struct rcu_data *rdp) { - struct rcu_head *next, *list; - int count = 0; + struct rcu_head *next = NULL, *list; + int count = maxbatch; list = rdp->donelist; while (list) { - next = rdp->donelist = list->next; + next = list->next; list->func(list); list = next; rdp->count--; - if (++count >= maxbatch) + if (--count <= 0) break; } - if (!rdp->donelist) + rdp->donelist = next; + if (!next) rdp->donetail = &rdp->donelist; else tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); @@ -344,11 +362,9 @@ static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, struct rcu_head **tail) { - local_irq_disable(); *this_rdp->nxttail = list; if (list) this_rdp->nxttail = tail; - local_irq_enable(); } static void __rcu_offline_cpu(struct rcu_data *this_rdp, @@ -362,9 +378,12 @@ if (rcp->cur != rcp->completed) cpu_quiet(rdp->cpu, rcp, rsp); spin_unlock_bh(&rsp->lock); + local_irq_disable(); rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); - + this_rdp->count += rdp->count; + rdp->count = 0; + local_irq_enable(); } static void rcu_offline_cpu(int cpu) {