* [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow
@ 2006-04-10 17:59 Mingming Cao
2006-04-10 22:18 ` Andrew Morton
2006-04-21 14:59 ` [PATCH 2/2] ext3 free blocks counter initialization fix Mingming Cao
0 siblings, 2 replies; 7+ messages in thread
From: Mingming Cao @ 2006-04-10 17:59 UTC (permalink / raw)
To: akpm; +Cc: kiran, Laurent Vivier, linux-kernel, ext2-devel, linux-fsdevel
[PATCH 1/3] - Currently the"long" type counter maintained in percpu
counter could have issue when handling a counter that is a unsigned long
type. Most cases this could be easily fixed by casting the returned
value to "unsigned long". But for the "overflow" issue, i.e. because of
the percpu global counter is a approsimate value, there is a
possibility that at some point the global counter is close to the max
limit (oxffff_feee) but after updating from a local counter a positive
value, the global counter becomes a small value (i.e.0x 00000012).
This patch tries to avoid this overflow happen. When updating from a
local counter to the global counter, add a check to see if the updated
value is less than before if we are doing an positive add, or if the
updated value is greater than before if we are doing an negative add.
Either way we should postpone the updating from this local counter to
the global counter.
Signed-Off-By: Mingming Cao <cmm@us.ibm.com>
---
linux-2.6.16-cmm/include/linux/percpu_counter.h | 12 ++++++
linux-2.6.16-cmm/lib/percpu_counter.c | 44 ++++++++++++++++++++----
2 files changed, 49 insertions(+), 7 deletions(-)
diff -puN lib/percpu_counter.c~percpu_counter_unsigned_long_fix lib/percpu_counter.c
--- linux-2.6.16/lib/percpu_counter.c~percpu_counter_unsigned_long_fix 2006-04-05 10:03:07.000000000 -0700
+++ linux-2.6.16-cmm/lib/percpu_counter.c 2006-04-05 10:13:58.000000000 -0700
@@ -5,28 +5,47 @@
#include <linux/percpu_counter.h>
#include <linux/module.h>
-static void __percpu_counter_mod(struct percpu_counter *fbc, long amount)
+static void __percpu_counter_mod(struct percpu_counter *fbc, long amount,
+ int llcheck)
{
long count;
long *pcount;
int cpu = smp_processor_id();
+ unsigned long before, after;
+ int update = 1;
pcount = per_cpu_ptr(fbc->counters, cpu);
count = *pcount + amount;
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
spin_lock(&fbc->lock);
- fbc->count += count;
- *pcount = 0;
+ before = fbc->count;
+ after = before + count;
+ if (llcheck && ((count > 0 && after < before) ||
+ ( count < 0 && after > before)))
+ update = 0;
+
+ if (update) {
+ fbc->count = after;
+ *pcount = 0;
+ }
spin_unlock(&fbc->lock);
} else {
*pcount = count;
}
}
+void percpu_counter_mod_ll(struct percpu_counter *fbc, long amount)
+{
+ preempt_disable();
+ __percpu_counter_mod(fbc, amount, 1);
+ preempt_enable();
+}
+EXPORT_SYMBOL(percpu_counter_mod_ll);
+
void percpu_counter_mod(struct percpu_counter *fbc, long amount)
{
preempt_disable();
- __percpu_counter_mod(fbc, amount);
+ __percpu_counter_mod(fbc, amount, 0);
preempt_enable();
}
EXPORT_SYMBOL(percpu_counter_mod);
@@ -34,7 +53,7 @@ EXPORT_SYMBOL(percpu_counter_mod);
void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount)
{
local_bh_disable();
- __percpu_counter_mod(fbc, amount);
+ __percpu_counter_mod(fbc, amount, 0);
local_bh_enable();
}
EXPORT_SYMBOL(percpu_counter_mod_bh);
@@ -43,7 +62,7 @@ EXPORT_SYMBOL(percpu_counter_mod_bh);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-long percpu_counter_sum(struct percpu_counter *fbc)
+long __percpu_counter_sum(struct percpu_counter *fbc, int llcheck)
{
long ret;
int cpu;
@@ -55,9 +74,20 @@ long percpu_counter_sum(struct percpu_co
ret += *pcount;
}
spin_unlock(&fbc->lock);
- return ret < 0 ? 0 : ret;
+ if (!llcheck && ret < 0)
+ ret = 0;
+ return ret;
+}
+long percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc, 0);
}
EXPORT_SYMBOL(percpu_counter_sum);
+long percpu_counter_sum_ll(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc, 1);
+}
+EXPORT_SYMBOL(percpu_counter_sum_ll);
/*
* Returns zero if the counter is within limit. Returns non zero if counter
diff -puN include/linux/percpu_counter.h~percpu_counter_unsigned_long_fix include/linux/percpu_counter.h
--- linux-2.6.16/include/linux/percpu_counter.h~percpu_counter_unsigned_long_fix 2006-04-05 10:03:07.000000000 -0700
+++ linux-2.6.16-cmm/include/linux/percpu_counter.h 2006-04-05 10:16:38.000000000 -0700
@@ -40,7 +40,9 @@ static inline void percpu_counter_destro
}
void percpu_counter_mod(struct percpu_counter *fbc, long amount);
+void percpu_counter_mod_ll(struct percpu_counter *fbc, long amount);
long percpu_counter_sum(struct percpu_counter *fbc);
+long percpu_counter_sum_ll(struct percpu_counter *fbc);
void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount);
int percpu_counter_exceeds(struct percpu_counter *fbc, long limit);
@@ -120,10 +122,20 @@ static inline void percpu_counter_inc(st
percpu_counter_mod(fbc, 1);
}
+static inline void percpu_counter_inc_ll(struct percpu_counter *fbc)
+{
+ percpu_counter_mod_ll(fbc, 1);
+}
+
static inline void percpu_counter_dec(struct percpu_counter *fbc)
{
percpu_counter_mod(fbc, -1);
}
+static inline void percpu_counter_dec_ll(struct percpu_counter *fbc)
+{
+ percpu_counter_mod_ll(fbc, -1);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
_
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow
2006-04-10 17:59 [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow Mingming Cao
@ 2006-04-10 22:18 ` Andrew Morton
2006-04-11 1:09 ` Mingming Cao
2006-04-21 14:59 ` [PATCH 2/2] ext3 free blocks counter initialization fix Mingming Cao
1 sibling, 1 reply; 7+ messages in thread
From: Andrew Morton @ 2006-04-10 22:18 UTC (permalink / raw)
To: cmm; +Cc: kiran, Laurent.Vivier, linux-kernel, ext2-devel, linux-fsdevel
Mingming Cao <cmm@us.ibm.com> wrote:
>
> [PATCH 1/3] - Currently the"long" type counter maintained in percpu
> counter could have issue when handling a counter that is a unsigned long
> type. Most cases this could be easily fixed by casting the returned
> value to "unsigned long". But for the "overflow" issue, i.e. because of
> the percpu global counter is a approsimate value, there is a
> possibility that at some point the global counter is close to the max
> limit (oxffff_feee) but after updating from a local counter a positive
> value, the global counter becomes a small value (i.e.0x 00000012).
>
> This patch tries to avoid this overflow happen. When updating from a
> local counter to the global counter, add a check to see if the updated
> value is less than before if we are doing an positive add, or if the
> updated value is greater than before if we are doing an negative add.
> Either way we should postpone the updating from this local counter to
> the global counter.
>
>
> -static void __percpu_counter_mod(struct percpu_counter *fbc, long amount)
> +static void __percpu_counter_mod(struct percpu_counter *fbc, long amount,
> + int llcheck)
Confused. What does "ll" stand for throughout this patch?
Whatever it is, I suspect we need to choose something better ;)
> {
> long count;
> long *pcount;
> int cpu = smp_processor_id();
> + unsigned long before, after;
> + int update = 1;
>
> pcount = per_cpu_ptr(fbc->counters, cpu);
> count = *pcount + amount;
> if (count >= FBC_BATCH || count <= -FBC_BATCH) {
> spin_lock(&fbc->lock);
> - fbc->count += count;
> - *pcount = 0;
> + before = fbc->count;
> + after = before + count;
> + if (llcheck && ((count > 0 && after < before) ||
> + ( count < 0 && after > before)))
> + update = 0;
> +
> + if (update) {
> + fbc->count = after;
> + *pcount = 0;
> + }
The above bit of magic deserves an explanatory comment.
> spin_unlock(&fbc->lock);
> } else {
> *pcount = count;
> }
> }
>
> +void percpu_counter_mod_ll(struct percpu_counter *fbc, long amount)
> +{
> + preempt_disable();
> + __percpu_counter_mod(fbc, amount, 1);
> + preempt_enable();
> +}
> +EXPORT_SYMBOL(percpu_counter_mod_ll);
An introductory comment which describes the difference between this and
percpu_counter_mod() would be helpful, please.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow
2006-04-10 22:18 ` Andrew Morton
@ 2006-04-11 1:09 ` Mingming Cao
2006-04-11 7:54 ` Andreas Dilger
0 siblings, 1 reply; 7+ messages in thread
From: Mingming Cao @ 2006-04-11 1:09 UTC (permalink / raw)
To: Andrew Morton
Cc: kiran, Laurent.Vivier, linux-kernel, ext2-devel, linux-fsdevel
On Mon, 2006-04-10 at 15:18 -0700, Andrew Morton wrote:
> Mingming Cao <cmm@us.ibm.com> wrote:
> >
> > [PATCH 1/3] - Currently the"long" type counter maintained in percpu
> > counter could have issue when handling a counter that is a unsigned long
> > type. Most cases this could be easily fixed by casting the returned
> > value to "unsigned long". But for the "overflow" issue, i.e. because of
> > the percpu global counter is a approsimate value, there is a
> > possibility that at some point the global counter is close to the max
> > limit (oxffff_feee) but after updating from a local counter a positive
> > value, the global counter becomes a small value (i.e.0x 00000012).
> >
> > This patch tries to avoid this overflow happen. When updating from a
> > local counter to the global counter, add a check to see if the updated
> > value is less than before if we are doing an positive add, or if the
> > updated value is greater than before if we are doing an negative add.
> > Either way we should postpone the updating from this local counter to
> > the global counter.
> >
> >
> > -static void __percpu_counter_mod(struct percpu_counter *fbc, long amount)
> > +static void __percpu_counter_mod(struct percpu_counter *fbc, long amount,
> > + int llcheck)
>
> Confused. What does "ll" stand for throughout this patch?
>
> Whatever it is, I suspect we need to choose something better ;)
>
Probably "ul" fits better than "ll"-- this llcheck is a flag that should
only need to turn on for unsigned long type counter.
> > {
> > long count;
> > long *pcount;
> > int cpu = smp_processor_id();
> > + unsigned long before, after;
> > + int update = 1;
> >
> > pcount = per_cpu_ptr(fbc->counters, cpu);
> > count = *pcount + amount;
> > if (count >= FBC_BATCH || count <= -FBC_BATCH) {
> > spin_lock(&fbc->lock);
> > - fbc->count += count;
> > - *pcount = 0;
> > + before = fbc->count;
> > + after = before + count;
> > + if (llcheck && ((count > 0 && after < before) ||
> > + ( count < 0 && after > before)))
> > + update = 0;
> > +
> > + if (update) {
> > + fbc->count = after;
> > + *pcount = 0;
> > + }
>
> The above bit of magic deserves an explanatory comment.
>
Certainly. How about this? Does this look still confusing?
Signed-Off-By: Mingming Cao <cmm@us.ibm.com>
---
linux-2.6.16-cmm/include/linux/percpu_counter.h | 12 ++
linux-2.6.16-cmm/lib/percpu_counter.c | 103 ++++++++++++++++++++++--
2 files changed, 108 insertions(+), 7 deletions(-)
diff -puN lib/percpu_counter.c~percpu_counter_unsigned_long_fix lib/percpu_counter.c
--- linux-2.6.16/lib/percpu_counter.c~percpu_counter_unsigned_long_fix 2006-04-10 17:10:36.000000000 -0700
+++ linux-2.6.16-cmm/lib/percpu_counter.c 2006-04-10 17:59:50.000000000 -0700
@@ -5,28 +5,89 @@
#include <linux/percpu_counter.h>
#include <linux/module.h>
-static void __percpu_counter_mod(struct percpu_counter *fbc, long amount)
+static void __percpu_counter_mod(struct percpu_counter *fbc, long amount,
+ int ul_overflow_check)
{
long count;
long *pcount;
int cpu = smp_processor_id();
+ unsigned long before, after;
+ int update = 1;
pcount = per_cpu_ptr(fbc->counters, cpu);
count = *pcount + amount;
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
spin_lock(&fbc->lock);
- fbc->count += count;
- *pcount = 0;
+ before = fbc->count;
+ after = before + count;
+ /*
+ * Since the percpu counter need a signed value for the
+ * global counter, and we are using percpu counter in some
+ * places to support unsigned long type of counter,
+ * we need to check whether the update will cause overflow
+ * (i.e. before the global counter (fbc->count) is 0xfffffeee
+ * and the local counter (*pcount +amount) value is 290
+ * then we will end up with a bogus small global counter value
+ * 0x00000123.) That's why we introduce a extra check here
+ * to support unsigned long type of counter.
+ *
+ * Before updating the global counter, if we detect the
+ * updated new value will cause overflow, then we should not
+ * do the update from this local counter at this moment. (i.e.
+ * the local counter will not be cleared right now). The update
+ * will be deferred at some point until either other local
+ * counter updated the global counter first, or the local
+ * counter's value will not cause global counter overflow.
+ *
+ * To check whether an update will cause overflow:
+ * if we see the new value for the global counter is less than
+ * before, and the update is intend to increase the
+ * global counter(positive add), then this is an overflow case.
+ *
+ * Or if we see the new value is greater than before but we
+ * were intend to decrease the global counter (negative add),
+ * then this is an overflow.
+ */
+
+ if (ul_overflow_check && ((count > 0 && after < before) ||
+ ( count < 0 && after > before)))
+ update = 0;
+
+ if (update) {
+ fbc->count = after;
+ *pcount = 0;
+ }
spin_unlock(&fbc->lock);
} else {
*pcount = count;
}
}
+/*
+ * percpu_counter_mod_ul() turns on the flag to check
+ * the possible overflow update for unsigned long type
+ * counter. This function is added to support unsigned long
+ * type of counter.
+ *
+ * If the user of percpu counter is a type of unsigned long
+ * and is possible to reach the maximum of the data type allowed,
+ * and the changed amount is less than, say, 0x8000_0000 on 32 bit (i.e. there is
+ * no question about the updated value is -1 or a big number positive
+ * value), then it should use this function to update the
+ * counter instead of using percpu_counter_mod().
+ *
+ */
+void percpu_counter_mod_ul(struct percpu_counter *fbc, long amount)
+{
+ preempt_disable();
+ __percpu_counter_mod(fbc, amount, 1);
+ preempt_enable();
+}
+EXPORT_SYMBOL(percpu_counter_mod_ul);
void percpu_counter_mod(struct percpu_counter *fbc, long amount)
{
preempt_disable();
- __percpu_counter_mod(fbc, amount);
+ __percpu_counter_mod(fbc, amount, 0);
preempt_enable();
}
EXPORT_SYMBOL(percpu_counter_mod);
@@ -34,7 +95,7 @@ EXPORT_SYMBOL(percpu_counter_mod);
void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount)
{
local_bh_disable();
- __percpu_counter_mod(fbc, amount);
+ __percpu_counter_mod(fbc, amount, 0);
local_bh_enable();
}
EXPORT_SYMBOL(percpu_counter_mod_bh);
@@ -42,8 +103,12 @@ EXPORT_SYMBOL(percpu_counter_mod_bh);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
+ *
+ * There are users of the percpu counter that are unsigned long type value
+ * so a real value of very large number is possible seems a negative value here.
+ * Add a check for that case.
*/
-long percpu_counter_sum(struct percpu_counter *fbc)
+long __percpu_counter_sum(struct percpu_counter *fbc, int ul_check)
{
long ret;
int cpu;
@@ -55,9 +120,33 @@ long percpu_counter_sum(struct percpu_co
ret += *pcount;
}
spin_unlock(&fbc->lock);
- return ret < 0 ? 0 : ret;
+ if (!ul_check && ret < 0)
+ ret = 0;
+ return ret;
+}
+long percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc, 0);
}
EXPORT_SYMBOL(percpu_counter_sum);
+/*
+ * percpu_counter_sum_ul() turns on the flag to check
+ * the possible case where a real value is a large positive value
+ * but shows up as a negative value. This function is added as part of
+ * of support for unsigned long type counter.
+ *
+ * If the user of percpu counter is a type of unsigned long
+ * and is possible to greater than 0x8000_0000 and unlikely to be
+ * a negative value (i.e. free ext3 block counters),
+ * then it should use this function to sum up the
+ * counter instead of using percpu_counter_sum().
+ *
+ */
+long percpu_counter_sum_ul(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc, 1);
+}
+EXPORT_SYMBOL(percpu_counter_sum_ul);
/*
* Returns zero if the counter is within limit. Returns non zero if counter
diff -puN include/linux/percpu_counter.h~percpu_counter_unsigned_long_fix include/linux/percpu_counter.h
--- linux-2.6.16/include/linux/percpu_counter.h~percpu_counter_unsigned_long_fix 2006-04-10 17:10:36.000000000 -0700
+++ linux-2.6.16-cmm/include/linux/percpu_counter.h 2006-04-10 18:05:00.000000000 -0700
@@ -40,7 +40,9 @@ static inline void percpu_counter_destro
}
void percpu_counter_mod(struct percpu_counter *fbc, long amount);
+void percpu_counter_mod_ul(struct percpu_counter *fbc, long amount);
long percpu_counter_sum(struct percpu_counter *fbc);
+long percpu_counter_sum_ul(struct percpu_counter *fbc);
void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount);
int percpu_counter_exceeds(struct percpu_counter *fbc, long limit);
@@ -120,10 +122,20 @@ static inline void percpu_counter_inc(st
percpu_counter_mod(fbc, 1);
}
+static inline void percpu_counter_inc_ul(struct percpu_counter *fbc)
+{
+ percpu_counter_mod_ul(fbc, 1);
+}
+
static inline void percpu_counter_dec(struct percpu_counter *fbc)
{
percpu_counter_mod(fbc, -1);
}
+static inline void percpu_counter_dec_ul(struct percpu_counter *fbc)
+{
+ percpu_counter_mod_ul(fbc, -1);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
_
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow
2006-04-11 1:09 ` Mingming Cao
@ 2006-04-11 7:54 ` Andreas Dilger
2006-04-11 17:23 ` Mingming Cao
0 siblings, 1 reply; 7+ messages in thread
From: Andreas Dilger @ 2006-04-11 7:54 UTC (permalink / raw)
To: Mingming Cao
Cc: Andrew Morton, kiran, Laurent.Vivier, linux-kernel, ext2-devel,
linux-fsdevel
On Apr 10, 2006 18:09 -0700, Mingming Cao wrote:
> +static void __percpu_counter_mod(struct percpu_counter *fbc, long amount,
> + int ul_overflow_check)
> {
> + * Before updating the global counter, if we detect the
> + * updated new value will cause overflow, then we should not
> + * do the update from this local counter at this moment. (i.e.
> + * the local counter will not be cleared right now). The update
> + * will be deferred at some point until either other local
> + * counter updated the global counter first, or the local
> + * counter's value will not cause global counter overflow.
Wouldn't it be better to update the counter by the maximum amount possible
to avoid overflow/underflow?
Cheers, Andreas
--
Andreas Dilger
Principal Software Engineer
Cluster File Systems, Inc.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow
2006-04-11 7:54 ` Andreas Dilger
@ 2006-04-11 17:23 ` Mingming Cao
0 siblings, 0 replies; 7+ messages in thread
From: Mingming Cao @ 2006-04-11 17:23 UTC (permalink / raw)
To: Andreas Dilger
Cc: Andrew Morton, kiran, Laurent.Vivier, linux-kernel, ext2-devel,
linux-fsdevel
On Tue, 2006-04-11 at 01:54 -0600, Andreas Dilger wrote:
> On Apr 10, 2006 18:09 -0700, Mingming Cao wrote:
> > +static void __percpu_counter_mod(struct percpu_counter *fbc, long amount,
> > + int ul_overflow_check)
> > {
> > + * Before updating the global counter, if we detect the
> > + * updated new value will cause overflow, then we should not
> > + * do the update from this local counter at this moment. (i.e.
> > + * the local counter will not be cleared right now). The update
> > + * will be deferred at some point until either other local
> > + * counter updated the global counter first, or the local
> > + * counter's value will not cause global counter overflow.
>
> Wouldn't it be better to update the counter by the maximum amount possible
> to avoid overflow/underflow?
>
Yep. Thanks for pointing this out.:)
Mingming
> Cheers, Andreas
> --
> Andreas Dilger
> Principal Software Engineer
> Cluster File Systems, Inc.
>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 2/2] ext3 free blocks counter initialization fix
2006-04-10 17:59 [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow Mingming Cao
2006-04-10 22:18 ` Andrew Morton
@ 2006-04-21 14:59 ` Mingming Cao
2006-04-24 22:51 ` [PATCH 2/2] Set initial value when calling percpu counter initialization routine Mingming Cao
1 sibling, 1 reply; 7+ messages in thread
From: Mingming Cao @ 2006-04-21 14:59 UTC (permalink / raw)
To: akpm; +Cc: kiran, LaurentVivier, sct, linux-kernel, ext2-devel,
linux-fsdevel
[PATCH 2] - Change ext3 to make use of the new percpu counter initialize
routine to init the free blocks counter, instead of using
percpu_counter_mod() indirectly.
Signed-Off-By: Mingming Cao <cmm@us.ibm.com>
---
linux-2.6.16-cmm/fs/ext3/super.c | 9 +++------
1 files changed, 3 insertions(+), 6 deletions(-)
diff -puN fs/ext3/super.c~ext3_64bit_percpu_counter_fix fs/ext3/super.c
--- linux-2.6.16/fs/ext3/super.c~ext3_64bit_percpu_counter_fix
2006-04-21 00:02:45.000000000 -0700
+++ linux-2.6.16-cmm/fs/ext3/super.c 2006-04-21 00:02:45.000000000 -0700
@@ -1583,9 +1583,6 @@ static int ext3_fill_super (struct super
goto failed_mount;
}
- percpu_counter_init(&sbi->s_freeblocks_counter);
- percpu_counter_init(&sbi->s_freeinodes_counter);
- percpu_counter_init(&sbi->s_dirs_counter);
bgl_lock_init(&sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
@@ -1727,11 +1724,11 @@ static int ext3_fill_super (struct super
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
"writeback");
- percpu_counter_mod(&sbi->s_freeblocks_counter,
+ percpu_counter_ll_init(&sbi->s_freeblocks_counter,
ext3_count_free_blocks(sb));
- percpu_counter_mod(&sbi->s_freeinodes_counter,
+ percpu_counter_ll_init(&sbi->s_freeinodes_counter,
ext3_count_free_inodes(sb));
- percpu_counter_mod(&sbi->s_dirs_counter,
+ percpu_counter_ll_init(&sbi->s_dirs_counter,
ext3_count_dirs(sb));
lock_kernel();
_
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 2/2] Set initial value when calling percpu counter initialization routine
2006-04-21 14:59 ` [PATCH 2/2] ext3 free blocks counter initialization fix Mingming Cao
@ 2006-04-24 22:51 ` Mingming Cao
0 siblings, 0 replies; 7+ messages in thread
From: Mingming Cao @ 2006-04-24 22:51 UTC (permalink / raw)
To: akpm; +Cc: kiran, LaurentVivier, sct, linux-kernel, ext2-devel,
linux-fsdevel
[PATCH 2] - Updates in the users of the percpu counters, to
make use of the new percpu_counter_init() routine now take additional
parameter to allow user to pass the initial value of the global counter.
Signed-Off-By: Mingming Cao <cmm@us.ibm.com>---
linux-2.6.16-cmm/fs/ext2/super.c | 9 +++------
linux-2.6.16-cmm/fs/ext3/super.c | 9 +++------
linux-2.6.16-cmm/fs/file_table.c | 2 +-
linux-2.6.16-cmm/net/core/sock.c | 2 +-
linux-2.6.16-cmm/net/decnet/af_decnet.c | 2 +-
linux-2.6.16-cmm/net/ipv4/proc.c | 2 +-
linux-2.6.16-cmm/net/ipv4/tcp.c | 2 +-
7 files changed, 11 insertions(+), 17 deletions(-)
diff -puN fs/ext3/super.c~ext3_64bit_percpu_counter_fix fs/ext3/super.c
--- linux-2.6.16/fs/ext3/super.c~ext3_64bit_percpu_counter_fix 2006-04-24 14:19:18.000000000 -0700
+++ linux-2.6.16-cmm/fs/ext3/super.c 2006-04-24 14:19:18.000000000 -0700
@@ -1583,9 +1583,6 @@ static int ext3_fill_super (struct super
goto failed_mount;
}
- percpu_counter_init(&sbi->s_freeblocks_counter);
- percpu_counter_init(&sbi->s_freeinodes_counter);
- percpu_counter_init(&sbi->s_dirs_counter);
bgl_lock_init(&sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
@@ -1727,11 +1724,11 @@ static int ext3_fill_super (struct super
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
"writeback");
- percpu_counter_mod(&sbi->s_freeblocks_counter,
+ percpu_counter_init(&sbi->s_freeblocks_counter,
ext3_count_free_blocks(sb));
- percpu_counter_mod(&sbi->s_freeinodes_counter,
+ percpu_counter_init(&sbi->s_freeinodes_counter,
ext3_count_free_inodes(sb));
- percpu_counter_mod(&sbi->s_dirs_counter,
+ percpu_counter_init(&sbi->s_dirs_counter,
ext3_count_dirs(sb));
lock_kernel();
diff -puN fs/file_table.c~ext3_64bit_percpu_counter_fix fs/file_table.c
--- linux-2.6.16/fs/file_table.c~ext3_64bit_percpu_counter_fix 2006-04-24 14:19:18.000000000 -0700
+++ linux-2.6.16-cmm/fs/file_table.c 2006-04-24 14:19:18.000000000 -0700
@@ -300,5 +300,5 @@ void __init files_init(unsigned long mem
if (files_stat.max_files < NR_FILE)
files_stat.max_files = NR_FILE;
files_defer_init();
- percpu_counter_init(&nr_files);
+ percpu_counter_init(&nr_files, 0);
}
diff -puN net/decnet/af_decnet.c~ext3_64bit_percpu_counter_fix net/decnet/af_decnet.c
--- linux-2.6.16/net/decnet/af_decnet.c~ext3_64bit_percpu_counter_fix 2006-04-24 14:19:18.000000000 -0700
+++ linux-2.6.16-cmm/net/decnet/af_decnet.c 2006-04-24 14:19:18.000000000 -0700
@@ -2384,7 +2384,7 @@ static int __init decnet_init(void)
if (rc != 0)
goto out;
- percpu_counter_init(&decnet_memory_allocated);
+ percpu_counter_init(&decnet_memory_allocated, 0);
dn_neigh_init();
dn_dev_init();
dn_route_init();
diff -puN net/ipv4/tcp.c~ext3_64bit_percpu_counter_fix net/ipv4/tcp.c
--- linux-2.6.16/net/ipv4/tcp.c~ext3_64bit_percpu_counter_fix 2006-04-24 14:19:18.000000000 -0700
+++ linux-2.6.16-cmm/net/ipv4/tcp.c 2006-04-24 14:19:18.000000000 -0700
@@ -2178,7 +2178,7 @@ void __init tcp_init(void)
"(established %d bind %d)\n",
tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
- percpu_counter_init(&tcp_memory_allocated);
+ percpu_counter_init(&tcp_memory_allocated, 0);
tcp_register_congestion_control(&tcp_reno);
}
diff -puN fs/ext2/super.c~ext3_64bit_percpu_counter_fix fs/ext2/super.c
--- linux-2.6.16/fs/ext2/super.c~ext3_64bit_percpu_counter_fix 2006-04-24 14:19:18.000000000 -0700
+++ linux-2.6.16-cmm/fs/ext2/super.c 2006-04-24 14:19:18.000000000 -0700
@@ -834,9 +834,6 @@ static int ext2_fill_super(struct super_
printk ("EXT2-fs: not enough memory\n");
goto failed_mount;
}
- percpu_counter_init(&sbi->s_freeblocks_counter);
- percpu_counter_init(&sbi->s_freeinodes_counter);
- percpu_counter_init(&sbi->s_dirs_counter);
bgl_lock_init(&sbi->s_blockgroup_lock);
sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
GFP_KERNEL);
@@ -886,11 +883,11 @@ static int ext2_fill_super(struct super_
ext2_warning(sb, __FUNCTION__,
"mounting ext3 filesystem as ext2");
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
- percpu_counter_mod(&sbi->s_freeblocks_counter,
+ percpu_counter_init(&sbi->s_freeblocks_counter,
ext2_count_free_blocks(sb));
- percpu_counter_mod(&sbi->s_freeinodes_counter,
+ percpu_counter_init(&sbi->s_freeinodes_counter,
ext2_count_free_inodes(sb));
- percpu_counter_mod(&sbi->s_dirs_counter,
+ percpu_counter_init(&sbi->s_dirs_counter,
ext2_count_dirs(sb));
return 0;
diff -puN net/ipv4/proc.c~ext3_64bit_percpu_counter_fix net/ipv4/proc.c
--- linux-2.6.16/net/ipv4/proc.c~ext3_64bit_percpu_counter_fix 2006-04-24 14:19:18.000000000 -0700
+++ linux-2.6.16-cmm/net/ipv4/proc.c 2006-04-24 14:19:18.000000000 -0700
@@ -61,7 +61,7 @@ static int fold_prot_inuse(struct proto
static int sockstat_seq_show(struct seq_file *seq, void *v)
{
socket_seq_show(seq);
- seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %lu\n",
+ seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %llu\n",
fold_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count),
tcp_death_row.tw_count, read_sockets_allocated(&tcp_prot),
percpu_counter_read_positive(&tcp_memory_allocated));
diff -puN net/core/sock.c~ext3_64bit_percpu_counter_fix net/core/sock.c
--- linux-2.6.16/net/core/sock.c~ext3_64bit_percpu_counter_fix 2006-04-24 14:19:18.000000000 -0700
+++ linux-2.6.16-cmm/net/core/sock.c 2006-04-24 14:19:18.000000000 -0700
@@ -1783,7 +1783,7 @@ static char proto_method_implemented(con
static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
{
- seq_printf(seq, "%-9s %4u %6d %6lu %-3s %6u %-3s %-10s "
+ seq_printf(seq, "%-9s %4u %6d %6llu %-3s %6u %-3s %-10s "
"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
proto->name,
proto->obj_size,
_
-------------------------------------------------------
Using Tomcat but need to do more? Need to support web services, security?
Get stuff done quickly with pre-integrated technology to make your job easier
Download IBM WebSphere Application Server v.1.0.1 based on Apache Geronimo
http://sel.as-us.falkag.net/sel?cmd=lnk&kid=120709&bid=263057&dat=121642
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2006-04-24 22:51 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-04-10 17:59 [RFC][PATCH 1/3] per cpu counter fixes for unsigned long type counter overflow Mingming Cao
2006-04-10 22:18 ` Andrew Morton
2006-04-11 1:09 ` Mingming Cao
2006-04-11 7:54 ` Andreas Dilger
2006-04-11 17:23 ` Mingming Cao
2006-04-21 14:59 ` [PATCH 2/2] ext3 free blocks counter initialization fix Mingming Cao
2006-04-24 22:51 ` [PATCH 2/2] Set initial value when calling percpu counter initialization routine Mingming Cao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).