struct percpu_counter { atomic_long_t count; atomic_long_t *counters; }; #ifdef CONFIG_SMP void percpu_counter_mod(struct percpu_counter *fbc, long amount) { long old, new; atomic_long_t *pcount; pcount = per_cpu_ptr(fbc->counters, get_cpu()); start: old = atomic_long_read(pcount); new = old + amount; if (new >= FBC_BATCH || new <= -FBC_BATCH) { if (unlikely(atomic_long_cmpxchg(pcount, old, 0) != old)) goto start; atomic_long_add(new, &fbc->count); } else atomic_long_add(amount, pcount); put_cpu(); } EXPORT_SYMBOL(percpu_counter_mod); long percpu_counter_read_accurate(struct percpu_counter *fbc) { long res = 0; int cpu; atomic_long_t *pcount; for_each_cpu(cpu) { pcount = per_cpu_ptr(fbc->counters, cpu); /* dont dirty cache line if not necessary */ if (atomic_long_read(pcount)) res += atomic_long_xchg(pcount, 0); } return res; } EXPORT_SYMBOL(percpu_counter_read_accurate); #endif /* CONFIG_SMP */