* [PATCH 1/3][NET] gen_estimator: faster gen_kill_estimator
@ 2008-01-20 23:46 Jarek Poplawski
2008-01-21 10:35 ` David Miller
2008-01-21 22:31 ` [PATCH 1/3 v2][NET] " Jarek Poplawski
0 siblings, 2 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-20 23:46 UTC (permalink / raw)
To: netdev; +Cc: Badalian Vyacheslav, Patrick McHardy, jamal, David Miller
gen_kill_estimator() is called during qdisc_destroy() with BHs disabled,
and each time does searching for each list member. This can block soft
interrupts for quite a long time when many classes are used. This patch
changes this by storing a pointer to internal gen_estimator structures
with gnet_stats_rate_est structures used by clients of gen_estimator.
This method removes currently possibile registering in gen_estimator the
same structures more than once, but it isn't used after all. (There is
added a warning if gen_new_estimator() is called with structures being
used by gen_estimator already.)
Reported-by: Badalian Vyacheslav <slavon@bigtelecom.ru>
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
[needs more testing]
---
include/linux/gen_stats.h | 2 ++
net/core/gen_estimator.c | 36 +++++++++++++++++++++++++++++++++---
2 files changed, 35 insertions(+), 3 deletions(-)
diff -Nurp 2.6.24-rc8-mm1-/include/linux/gen_stats.h 2.6.24-rc8-mm1+/include/linux/gen_stats.h
--- 2.6.24-rc8-mm1-/include/linux/gen_stats.h 2007-10-09 22:31:38.000000000 +0200
+++ 2.6.24-rc8-mm1+/include/linux/gen_stats.h 2008-01-20 20:37:08.000000000 +0100
@@ -28,11 +28,13 @@ struct gnet_stats_basic
* struct gnet_stats_rate_est - rate estimator
* @bps: current byte rate
* @pps: current packet rate
+ * @gen_estimator: internal data
*/
struct gnet_stats_rate_est
{
__u32 bps;
__u32 pps;
+ unsigned long gen_estimator;
};
/**
diff -Nurp 2.6.24-rc8-mm1-/net/core/gen_estimator.c 2.6.24-rc8-mm1+/net/core/gen_estimator.c
--- 2.6.24-rc8-mm1-/net/core/gen_estimator.c 2008-01-19 17:54:45.000000000 +0100
+++ 2.6.24-rc8-mm1+/net/core/gen_estimator.c 2008-01-20 20:58:35.000000000 +0100
@@ -139,6 +139,9 @@ skip:
rcu_read_unlock();
}
+static void gen_kill_estimator_find(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est);
+
/**
* gen_new_estimator - create a new rate estimator
* @bstats: basic statistics
@@ -171,6 +174,10 @@ int gen_new_estimator(struct gnet_stats_
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
+ if (rate_est->gen_estimator)
+ /* not sure: not zeroed in alloc or reused */
+ gen_kill_estimator_find(bstats, rate_est);
+
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL)
return -ENOBUFS;
@@ -184,6 +191,7 @@ int gen_new_estimator(struct gnet_stats_
est->avbps = rate_est->bps<<5;
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
+ rate_est->gen_estimator = (unsigned long)est;
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
@@ -209,13 +217,30 @@ static void __gen_kill_estimator(struct
* @bstats: basic statistics
* @rate_est: rate estimator statistics
*
- * Removes the rate estimator specified by &bstats and &rate_est
- * and deletes the timer.
+ * Removes the rate estimator specified by &bstats and &rate_est.
*
* NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est)
+ struct gnet_stats_rate_est *rate_est)
+{
+ if (rate_est && rate_est->gen_estimator) {
+ struct gen_estimator *e;
+
+ e = (struct gen_estimator *)rate_est->gen_estimator;
+
+ rate_est->gen_estimator = 0;
+ write_lock_bh(&est_lock);
+ e->bstats = NULL;
+ write_unlock_bh(&est_lock);
+
+ list_del_rcu(&e->list);
+ call_rcu(&e->e_rcu, __gen_kill_estimator);
+ }
+}
+
+static void gen_kill_estimator_find(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est)
{
int idx;
struct gen_estimator *e, *n;
@@ -236,6 +261,11 @@ void gen_kill_estimator(struct gnet_stat
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
+
+ WARN_ON_ONCE(1); /* gen_new_estimator() repeated? */
+ rate_est->gen_estimator = 0;
+
+ return;
}
}
}
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3][NET] gen_estimator: faster gen_kill_estimator
2008-01-20 23:46 [PATCH 1/3][NET] gen_estimator: faster gen_kill_estimator Jarek Poplawski
@ 2008-01-21 10:35 ` David Miller
2008-01-21 10:56 ` Jarek Poplawski
2008-01-21 22:31 ` [PATCH 1/3 v2][NET] " Jarek Poplawski
1 sibling, 1 reply; 13+ messages in thread
From: David Miller @ 2008-01-21 10:35 UTC (permalink / raw)
To: jarkao2; +Cc: netdev, slavon, kaber, hadi
From: Jarek Poplawski <jarkao2@gmail.com>
Date: Mon, 21 Jan 2008 00:46:59 +0100
I like the technique you used to fix this bug, but:
> diff -Nurp 2.6.24-rc8-mm1-/include/linux/gen_stats.h 2.6.24-rc8-mm1+/include/linux/gen_stats.h
> --- 2.6.24-rc8-mm1-/include/linux/gen_stats.h 2007-10-09 22:31:38.000000000 +0200
> +++ 2.6.24-rc8-mm1+/include/linux/gen_stats.h 2008-01-20 20:37:08.000000000 +0100
> @@ -28,11 +28,13 @@ struct gnet_stats_basic
> * struct gnet_stats_rate_est - rate estimator
> * @bps: current byte rate
> * @pps: current packet rate
> + * @gen_estimator: internal data
> */
> struct gnet_stats_rate_est
> {
> __u32 bps;
> __u32 pps;
> + unsigned long gen_estimator;
> };
>
> /**
Sorry, this structure is exported to userspace so we can't
change it like that.
It is an attribute passed over netlink sockets.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3][NET] gen_estimator: faster gen_kill_estimator
2008-01-21 10:35 ` David Miller
@ 2008-01-21 10:56 ` Jarek Poplawski
0 siblings, 0 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-21 10:56 UTC (permalink / raw)
To: David Miller; +Cc: netdev, slavon, kaber, hadi
On Mon, Jan 21, 2008 at 02:35:15AM -0800, David Miller wrote:
...
> Sorry, this structure is exported to userspace so we can't
> change it like that.
>
> It is an attribute passed over netlink sockets.
>
Thanks for finding this! I'll try to rethink this (especially why
my "tests" seemed to show something could be working there...).
Jarek P.
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-20 23:46 [PATCH 1/3][NET] gen_estimator: faster gen_kill_estimator Jarek Poplawski
2008-01-21 10:35 ` David Miller
@ 2008-01-21 22:31 ` Jarek Poplawski
2008-01-21 22:41 ` Jarek Poplawski
2008-01-22 0:29 ` David Miller
1 sibling, 2 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-21 22:31 UTC (permalink / raw)
To: netdev; +Cc: Badalian Vyacheslav, Patrick McHardy, jamal, David Miller
So, let's try something easy first: #ifdef __KERNEL__. (I know there
are many esthetes around, but since this subject looks quite dirty...)
Alternatively we could change an api, and as a matter of fact there was
such a try some time ago, but is it really worth of such a mess?
Regards,
Jarek P.
-----------> (take 2)
gen_kill_estimator() is called during qdisc_destroy() with BHs disabled,
and each time does searching for each list member. This can block soft
interrupts for quite a long time when many classes are used. This patch
changes this by storing pointers to internal gen_estimator structures
with gnet_stats_rate_est structures used by clients of gen_estimator.
This method removes currently possible registering in gen_estimator the
same structures more than once, but it isn't used after all. (There is
added a warning if gen_new_estimator() is called with structures being
used by gen_estimator already.) Thanks to David Miller for pointing an
error in the first version of this patch.
Reported-by: Badalian Vyacheslav <slavon@bigtelecom.ru>
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
[needs more testing]
---
include/linux/gen_stats.h | 4 ++++
net/core/gen_estimator.c | 36 +++++++++++++++++++++++++++++++++---
2 files changed, 37 insertions(+), 3 deletions(-)
diff -Nurp 2.6.24-rc8-mm1-/include/linux/gen_stats.h 2.6.24-rc8-mm1+/include/linux/gen_stats.h
--- 2.6.24-rc8-mm1-/include/linux/gen_stats.h 2007-10-09 22:31:38.000000000 +0200
+++ 2.6.24-rc8-mm1+/include/linux/gen_stats.h 2008-01-21 22:53:47.000000000 +0100
@@ -28,11 +28,15 @@ struct gnet_stats_basic
* struct gnet_stats_rate_est - rate estimator
* @bps: current byte rate
* @pps: current packet rate
+ * @gen_estimator: internal data
*/
struct gnet_stats_rate_est
{
__u32 bps;
__u32 pps;
+#ifdef __KERNEL__
+ unsigned long gen_estimator;
+#endif
};
/**
diff -Nurp 2.6.24-rc8-mm1-/net/core/gen_estimator.c 2.6.24-rc8-mm1+/net/core/gen_estimator.c
--- 2.6.24-rc8-mm1-/net/core/gen_estimator.c 2008-01-19 17:54:45.000000000 +0100
+++ 2.6.24-rc8-mm1+/net/core/gen_estimator.c 2008-01-20 20:58:35.000000000 +0100
@@ -139,6 +139,9 @@ skip:
rcu_read_unlock();
}
+static void gen_kill_estimator_find(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est);
+
/**
* gen_new_estimator - create a new rate estimator
* @bstats: basic statistics
@@ -171,6 +174,10 @@ int gen_new_estimator(struct gnet_stats_
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
+ if (rate_est->gen_estimator)
+ /* not sure: not zeroed in alloc or reused */
+ gen_kill_estimator_find(bstats, rate_est);
+
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL)
return -ENOBUFS;
@@ -184,6 +191,7 @@ int gen_new_estimator(struct gnet_stats_
est->avbps = rate_est->bps<<5;
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
+ rate_est->gen_estimator = (unsigned long)est;
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
@@ -209,13 +217,30 @@ static void __gen_kill_estimator(struct
* @bstats: basic statistics
* @rate_est: rate estimator statistics
*
- * Removes the rate estimator specified by &bstats and &rate_est
- * and deletes the timer.
+ * Removes the rate estimator specified by &bstats and &rate_est.
*
* NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est)
+ struct gnet_stats_rate_est *rate_est)
+{
+ if (rate_est && rate_est->gen_estimator) {
+ struct gen_estimator *e;
+
+ e = (struct gen_estimator *)rate_est->gen_estimator;
+
+ rate_est->gen_estimator = 0;
+ write_lock_bh(&est_lock);
+ e->bstats = NULL;
+ write_unlock_bh(&est_lock);
+
+ list_del_rcu(&e->list);
+ call_rcu(&e->e_rcu, __gen_kill_estimator);
+ }
+}
+
+static void gen_kill_estimator_find(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est)
{
int idx;
struct gen_estimator *e, *n;
@@ -236,6 +261,11 @@ void gen_kill_estimator(struct gnet_stat
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
+
+ WARN_ON_ONCE(1); /* gen_new_estimator() repeated? */
+ rate_est->gen_estimator = 0;
+
+ return;
}
}
}
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-21 22:31 ` [PATCH 1/3 v2][NET] " Jarek Poplawski
@ 2008-01-21 22:41 ` Jarek Poplawski
2008-01-22 0:29 ` David Miller
1 sibling, 0 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-21 22:41 UTC (permalink / raw)
To: netdev; +Cc: Badalian Vyacheslav, Patrick McHardy, jamal, David Miller
On Mon, Jan 21, 2008 at 11:31:37PM +0100, Jarek Poplawski wrote:
>
> So, let's try something easy first: #ifdef __KERNEL__. (I know there
...
SORRY!!! Of course this is still wrong, I withdraw this patch.
Jarek P.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-21 22:31 ` [PATCH 1/3 v2][NET] " Jarek Poplawski
2008-01-21 22:41 ` Jarek Poplawski
@ 2008-01-22 0:29 ` David Miller
2008-01-22 7:21 ` Jarek Poplawski
2008-01-25 22:00 ` [PATCH v3][NET] " Jarek Poplawski
1 sibling, 2 replies; 13+ messages in thread
From: David Miller @ 2008-01-22 0:29 UTC (permalink / raw)
To: jarkao2; +Cc: netdev, slavon, kaber, hadi
From: Jarek Poplawski <jarkao2@gmail.com>
Date: Mon, 21 Jan 2008 23:31:37 +0100
>
> So, let's try something easy first: #ifdef __KERNEL__. (I know there
> are many esthetes around, but since this subject looks quite dirty...)
You can't do this, the attribute is copied to the user netlink SKB
using sizeof(struct gnet_stats_rate_est) as the size (or more
specifically sizeof(*p) where p is a pointer to this structure).
Therefore this new patch has the same exact problem.
Fix this right, make a structure like:
struct kernel_gnet_stats_rate_est {
struct gnet_stats_rate_est est;
void *gen_estimator;
}
And update all the code as needed.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-22 0:29 ` David Miller
@ 2008-01-22 7:21 ` Jarek Poplawski
2008-01-22 7:26 ` Jarek Poplawski
2008-01-22 11:42 ` jamal
2008-01-25 22:00 ` [PATCH v3][NET] " Jarek Poplawski
1 sibling, 2 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-22 7:21 UTC (permalink / raw)
To: David Miller; +Cc: netdev, slavon, kaber, hadi
On 22-01-2008 01:29, David Miller wrote:
...
> Fix this right, make a structure like:
>
> struct kernel_gnet_stats_rate_est {
> struct gnet_stats_rate_est est;
> void *gen_estimator;
> }
>
> And update all the code as needed.
Thanks! I'll try this...
...But, as a matter of fact I've thought about something similar (of
course much worse), and I was afraid of doing quite a lot of changes
at once, maybe again skip something like here. So, maybe one more
tiny RFC here...
I've tried this from the other side: to make alternative, new api of
gen_estimator functions, and then the rest of changes without hurry.
This looks not very nice, but IMHO should be safer (especially
considering my 'knowledge' of this code and current changes). There
is this not very nice additional parameter used e.g. in
ngen_new_estimator(), but it seems, after all changes, this should
be more visible how and where this could be optimized. (And, after
all, this new pointer shouldn't be used very often, so could sit a
bit further.)
Anyway, if you don't like this idea, let me know and I'll try yours.
It will only need more time for this.
This is done against 2.6.24-rc8-mm1 with this 3/3 cosmetic patch.
I'll send soon part 2: htb patch to use this.
Regards,
Jarek P.
---
include/net/gen_stats.h | 11 +++++
net/core/gen_estimator.c | 99 ++++++++++++++++++++++++++++++++++++++++++-----
2 files changed, 101 insertions(+), 9 deletions(-)
diff -Nurp 2.6.24-rc8-mm1-p3-/include/net/gen_stats.h 2.6.24-rc8-mm1-p3+/include/net/gen_stats.h
--- 2.6.24-rc8-mm1-p3-/include/net/gen_stats.h 2007-10-09 22:31:38.000000000 +0200
+++ 2.6.24-rc8-mm1-p3+/include/net/gen_stats.h 2008-01-22 00:13:48.000000000 +0100
@@ -46,4 +46,15 @@ extern int gen_replace_estimator(struct
struct gnet_stats_rate_est *rate_est,
spinlock_t *stats_lock, struct rtattr *opt);
+extern int ngen_new_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock, struct rtattr *opt,
+ unsigned long *pgen_estimator);
+extern void ngen_kill_estimator(unsigned long *pgen_estimator);
+
+extern int ngen_replace_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock, struct rtattr *opt,
+ unsigned long *pgen_estimator);
+
#endif
diff -Nurp 2.6.24-rc8-mm1-p3-/net/core/gen_estimator.c 2.6.24-rc8-mm1-p3+/net/core/gen_estimator.c
--- 2.6.24-rc8-mm1-p3-/net/core/gen_estimator.c 2008-01-22 00:01:30.000000000 +0100
+++ 2.6.24-rc8-mm1-p3+/net/core/gen_estimator.c 2008-01-22 00:22:37.000000000 +0100
@@ -140,26 +140,30 @@ skip:
}
/**
- * gen_new_estimator - create a new rate estimator
+ * ngen_new_estimator - create a new rate estimator (new version)
* @bstats: basic statistics
* @rate_est: rate estimator statistics
* @stats_lock: statistics lock
* @opt: rate estimator configuration TLV
+ * @pgen_estimator: pointer to return ngen_new_estimator data
*
* Creates a new rate estimator with &bstats as source and &rate_est
* as destination. A new timer with the interval specified in the
* configuration TLV is created. Upon each interval, the latest statistics
* will be read from &bstats and the estimated rate will be stored in
* &rate_est with the statistics lock grabed during this period.
+ * Called directly for pgen_estimator and possibility of fast kill
+ * or indirectly by gen_new_estimator.
*
- * Returns 0 on success or a negative error code.
+ * Returns 0 and data pointed by &pgen_estimator on success
+ * or a negative error code.
*
* NOTE: Called under rtnl_mutex
*/
-int gen_new_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est,
- spinlock_t *stats_lock,
- struct rtattr *opt)
+int ngen_new_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock, struct rtattr *opt,
+ unsigned long *pgen_estimator)
{
struct gen_estimator *est;
struct gnet_estimator *parm = RTA_DATA(opt);
@@ -184,6 +188,7 @@ int gen_new_estimator(struct gnet_stats_
est->avbps = rate_est->bps<<5;
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
+ *pgen_estimator = (unsigned long)est;
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
@@ -197,6 +202,32 @@ int gen_new_estimator(struct gnet_stats_
return 0;
}
+/**
+ * gen_new_estimator - create a new rate estimator
+ * @bstats: basic statistics
+ * @rate_est: rate estimator statistics
+ * @stats_lock: statistics lock
+ * @opt: rate estimator configuration TLV
+ *
+ * Creates a new rate estimator with &bstats as source and &rate_est
+ * as destination. A new timer with the interval specified in the
+ * configuration TLV is created. Upon each interval, the latest statistics
+ * will be read from &bstats and the estimated rate will be stored in
+ * &rate_est with the statistics lock grabed during this period.
+ *
+ * Returns 0 on success or a negative error code.
+ *
+ * NOTE: Called under rtnl_mutex
+ */
+int gen_new_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock, struct rtattr *opt)
+{
+ unsigned long dump;
+
+ return ngen_new_estimator(bstats, rate_est, stats_lock, opt, &dump);
+}
+
static void __gen_kill_estimator(struct rcu_head *head)
{
struct gen_estimator *e = container_of(head,
@@ -209,8 +240,7 @@ static void __gen_kill_estimator(struct
* @bstats: basic statistics
* @rate_est: rate estimator statistics
*
- * Removes the rate estimator specified by &bstats and &rate_est
- * and deletes the timer.
+ * Removes the rate estimator specified by &bstats and &rate_est.
*
* NOTE: Called under rtnl_mutex
*/
@@ -241,6 +271,32 @@ void gen_kill_estimator(struct gnet_stat
}
/**
+ * ngen_kill_estimator - remove a rate estimator (new version)
+ * @pgen_estimator: gen_estimator data got from ngen_new_estimator
+ *
+ * Removes the rate estimator specified by &pgen_estimator
+ * and replaces it with 0.
+ *
+ * NOTE: Called under rtnl_mutex
+ */
+void ngen_kill_estimator(unsigned long *pgen_estimator)
+{
+ if (pgen_estimator && *pgen_estimator) {
+ struct gen_estimator *e;
+
+ e = (struct gen_estimator *)*pgen_estimator;
+ *pgen_estimator = 0;
+
+ write_lock_bh(&est_lock);
+ e->bstats = NULL;
+ write_unlock_bh(&est_lock);
+
+ list_del_rcu(&e->list);
+ call_rcu(&e->e_rcu, __gen_kill_estimator);
+ }
+}
+
+/**
* gen_replace_estimator - replace rate estimator configuration
* @bstats: basic statistics
* @rate_est: rate estimator statistics
@@ -260,7 +316,32 @@ int gen_replace_estimator(struct gnet_st
return gen_new_estimator(bstats, rate_est, stats_lock, opt);
}
+/**
+ * ngen_replace_estimator - replace rate estimator configuration (new version)
+ * @bstats: basic statistics
+ * @rate_est: rate estimator statistics
+ * @stats_lock: statistics lock
+ * @opt: rate estimator configuration TLV
+ * @pgen_estimator: gen_estimator data got from ngen_new_estimator
+ *
+ * Replaces the configuration of a rate estimator by calling
+ * ngen_kill_estimator and ngen_new_estimator.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int ngen_replace_estimator(struct gnet_stats_basic *bstats,
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock, struct rtattr *opt,
+ unsigned long *pgen_estimator)
+{
+ ngen_kill_estimator(pgen_estimator);
+ return ngen_new_estimator(bstats, rate_est, stats_lock, opt,
+ pgen_estimator);
+}
-EXPORT_SYMBOL(gen_kill_estimator);
EXPORT_SYMBOL(gen_new_estimator);
+EXPORT_SYMBOL(gen_kill_estimator);
EXPORT_SYMBOL(gen_replace_estimator);
+EXPORT_SYMBOL(ngen_new_estimator);
+EXPORT_SYMBOL(ngen_kill_estimator);
+EXPORT_SYMBOL(ngen_replace_estimator);
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-22 7:21 ` Jarek Poplawski
@ 2008-01-22 7:26 ` Jarek Poplawski
2008-01-22 11:42 ` jamal
1 sibling, 0 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-22 7:26 UTC (permalink / raw)
To: David Miller; +Cc: netdev, slavon, kaber, hadi
On Tue, Jan 22, 2008 at 08:21:52AM +0100, Jarek Poplawski wrote:
...
Part 2 of mini RFC:
HTB changes to use new, faster gen_estimator functions._
This is done against 2.6.24-rc8-mm1.
Thanks,
Jarek P.
---
diff -Nurp 2.6.24-rc8-mm1-/net/sched/sch_htb.c 2.6.24-rc8-mm1+/net/sched/sch_htb.c
--- 2.6.24-rc8-mm1-/net/sched/sch_htb.c 2008-01-19 17:54:49.000000000 +0100
+++ 2.6.24-rc8-mm1+/net/sched/sch_htb.c 2008-01-22 00:00:31.000000000 +0100
@@ -127,6 +127,7 @@ struct htb_class {
int prio; /* For parent to leaf return possible here */
int quantum; /* we do backup. Finally full replacement */
/* of un.leaf originals should be done. */
+ unsigned long gen_estimator; /* ngen_new_estimator() data */
};
static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
@@ -1195,7 +1196,7 @@ static void htb_destroy_class(struct Qdi
BUG_TRAP(cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ ngen_kill_estimator(&cl->gen_estimator);
qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil);
@@ -1348,9 +1349,10 @@ static int htb_change_class(struct Qdisc
if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
goto failure;
- gen_new_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev->queue_lock,
- tca[TCA_RATE-1] ? : &est.rta);
+ ngen_new_estimator(&cl->bstats, &cl->rate_est,
+ &sch->dev->queue_lock,
+ tca[TCA_RATE-1] ? : &est.rta,
+ &cl->gen_estimator);
cl->refcnt = 1;
INIT_LIST_HEAD(&cl->sibling);
INIT_HLIST_NODE(&cl->hlist);
@@ -1404,9 +1406,10 @@ static int htb_change_class(struct Qdisc
parent ? &parent->children : &q->root);
} else {
if (tca[TCA_RATE-1])
- gen_replace_estimator(&cl->bstats, &cl->rate_est,
- &sch->dev->queue_lock,
- tca[TCA_RATE-1]);
+ ngen_replace_estimator(&cl->bstats, &cl->rate_est,
+ &sch->dev->queue_lock,
+ tca[TCA_RATE-1],
+ &cl->gen_estimator);
sch_tree_lock(sch);
}
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-22 7:21 ` Jarek Poplawski
2008-01-22 7:26 ` Jarek Poplawski
@ 2008-01-22 11:42 ` jamal
2008-01-22 12:29 ` Jarek Poplawski
1 sibling, 1 reply; 13+ messages in thread
From: jamal @ 2008-01-22 11:42 UTC (permalink / raw)
To: Jarek Poplawski; +Cc: David Miller, netdev, slavon, kaber
On Tue, 2008-22-01 at 08:21 +0100, Jarek Poplawski wrote:
> On 22-01-2008 01:29, David Miller wrote:
> ...
> > Fix this right, make a structure like:
> >
> > struct kernel_gnet_stats_rate_est {
> > struct gnet_stats_rate_est est;
> > void *gen_estimator;
> > }
> >
> > And update all the code as needed.
>
> Thanks!
> I'll try this...
Jarek,
That looks different from the suggestion from Dave.
May i throw in another bone? Theoretically i can see why it would be a
really bad idea to walk 50K estimators every time you delete one - which
is horrible if you are trying to destroy the say 50K of them and gets
worse as the number of schedulers with 50K classes goes up.
But i am wondering why a simpler list couldnt be walked, meaning:
In gen_kill_estimator(), instead of:
for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
Would deriving a better initial index be a big improvement?
for (e = elist[est->interval].list; e; e = e->next) {
cheers,
jamal
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-22 11:42 ` jamal
@ 2008-01-22 12:29 ` Jarek Poplawski
2008-01-22 13:54 ` jamal
0 siblings, 1 reply; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-22 12:29 UTC (permalink / raw)
To: jamal; +Cc: David Miller, netdev, slavon, kaber
On Tue, Jan 22, 2008 at 06:42:07AM -0500, jamal wrote:
...
> Jarek,
>
> That looks different from the suggestion from Dave.
Hmm..., I'm not sure you mean my or your suggestion here, but you
are right anyway...
> May i throw in another bone? Theoretically i can see why it would be a
> really bad idea to walk 50K estimators every time you delete one - which
> is horrible if you are trying to destroy the say 50K of them and gets
> worse as the number of schedulers with 50K classes goes up.
>
> But i am wondering why a simpler list couldnt be walked, meaning:
>
> In gen_kill_estimator(), instead of:
>
> for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
>
> Would deriving a better initial index be a big improvement?
> for (e = elist[est->interval].list; e; e = e->next) {
Maybe I miss something, but there still could be a lot of this walking
and IMHO any such longer waiting with BHs disabled is hard to accept
with current memory sizes and low-latencies prices. And currently time
seems to be even more precious here: RCU can't even free any
gen_estimator memory during such large qdisc with classes deletion.
Thanks,
Jarek P.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-22 12:29 ` Jarek Poplawski
@ 2008-01-22 13:54 ` jamal
2008-01-22 19:22 ` Jarek Poplawski
0 siblings, 1 reply; 13+ messages in thread
From: jamal @ 2008-01-22 13:54 UTC (permalink / raw)
To: Jarek Poplawski; +Cc: David Miller, netdev, slavon, kaber
On Tue, 2008-22-01 at 13:29 +0100, Jarek Poplawski wrote:
> On Tue, Jan 22, 2008 at 06:42:07AM -0500, jamal wrote:
> ...
> > Jarek,
> >
> > That looks different from the suggestion from Dave.
>
> Hmm..., I'm not sure you mean my or your suggestion here, but you
> are right anyway...
Your idea to grab a pointer to the estimator so you can quickly find it
upon destruction is a good one.
The challenge was not to break the ABI to user space.
Dave suggested to use a different struct for the kernel side and
maintain the user one as is[1]. Your patch didnt do this, hence my
statement;->
> Maybe I miss something, but there still could be a lot of this walking
Indeed, that is possible in the case of many estimators configured with
the same interval - because they will all fall in the same table bucket
and the idx is not that useful to begin with.
I was wrong given the nature of interval - the majority of the users
will have an estimator interval of say 1 sec which will put everything
in one bucket still.
We could introduce a proper index that will allow proper distribution
and have that stored by the class. I am not sure i made sense.
But you are coding - and your idea sounds better.
cheers,
jamal
[1] This is _not uncommon_ (note the usage of double negation here for
emphasis;->) technique actually; ones that go further for example can be
found all over the net/sched code (struct tcf_police vs tc_police) etc.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3 v2][NET] gen_estimator: faster gen_kill_estimator
2008-01-22 13:54 ` jamal
@ 2008-01-22 19:22 ` Jarek Poplawski
0 siblings, 0 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-22 19:22 UTC (permalink / raw)
To: jamal; +Cc: David Miller, netdev, slavon, kaber
On Tue, Jan 22, 2008 at 08:54:28AM -0500, jamal wrote:
> On Tue, 2008-22-01 at 13:29 +0100, Jarek Poplawski wrote:
> > On Tue, Jan 22, 2008 at 06:42:07AM -0500, jamal wrote:
> > ...
> > > Jarek,
> > >
> > > That looks different from the suggestion from Dave.
> >
> > Hmm..., I'm not sure you mean my or your suggestion here, but you
> > are right anyway...
>
> Your idea to grab a pointer to the estimator so you can quickly find it
> upon destruction is a good one.
Let's say it's quite common in the kernel type of idea...
> The challenge was not to break the ABI to user space.
> Dave suggested to use a different struct for the kernel side and
> maintain the user one as is[1]. Your patch didnt do this, hence my
> statement;->
Sure! David's idea is very good, but before reading his message I
decided to try something 'safer'. I simply don't know these structures
like you or David, and after these mistakes at the beginning, I tried
to avoid the next. But now I see it shouldn't be so hard, and I'll do
this David's way, I hope!
> > Maybe I miss something, but there still could be a lot of this walking
>
> Indeed, that is possible in the case of many estimators configured with
> the same interval - because they will all fall in the same table bucket
> and the idx is not that useful to begin with.
>
> I was wrong given the nature of interval - the majority of the users
> will have an estimator interval of say 1 sec which will put everything
> in one bucket still.
> We could introduce a proper index that will allow proper distribution
> and have that stored by the class. I am not sure i made sense.
> But you are coding - and your idea sounds better.
As a matter of fact, I've considered yesterday some additional hash
table too, but wasn't sure it's worth of the savings. It seems there
should be considered optimization of these estimator structures yet.
But since there are now a few similar reports about misterious
problems during deletion of large qdisc, it would be nice to remove
main suspects around this at any price...
Many thanks for your concern with this! (And, if miss something again,
don't be afraid to make it straight & clear; I really prefer to know
the truth about my mistakes, than polite silence.)
Regards,
Jarek P.
> [1] This is _not uncommon_ (note the usage of double negation here for
> emphasis;->) technique actually; ones that go further for example can be
> found all over the net/sched code (struct tcf_police vs tc_police) etc.
I think David's points were very clear and of course right!; it seems now
that only my 'usual' way of friendly joking about this could be more
challenge than any double-negation and turns friendly fire insted...
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3][NET] gen_estimator: faster gen_kill_estimator
2008-01-22 0:29 ` David Miller
2008-01-22 7:21 ` Jarek Poplawski
@ 2008-01-25 22:00 ` Jarek Poplawski
1 sibling, 0 replies; 13+ messages in thread
From: Jarek Poplawski @ 2008-01-25 22:00 UTC (permalink / raw)
To: David Miller; +Cc: netdev, slavon, kaber, hadi, shemminger
On Mon, Jan 21, 2008 at 04:29:18PM -0800, David Miller wrote:
...
> Fix this right, make a structure like:
>
> struct kernel_gnet_stats_rate_est {
> struct gnet_stats_rate_est est;
> void *gen_estimator;
> }
>
> And update all the code as needed.
Hi,
Here is a patch which uses this idea, I hope mostly as expected. This
new structure has to replace the older one in most kernel places. I was
uncertain of gnet_stats_copy_rate_est(), the change wasn't necessary here
but could be considered for uniformity reasons. Currently it's unchanged.
Of course, I admit it's not the last version, and any suggestions are
welcomed (including updating to some later version - I see there is some
queue waiting with sched changes).
Regards,
Jarek P.
--------------> (take 3)
gen_kill_estimator() is called during qdisc_destroy() with BHs disabled,
and each time does searching for each list member. This can block soft
interrupts for quite a long time when many classes are used.
This patch changes this by storing pointers to internal gen_estimator
structures. New kernel_gnet_stats_rate_est structure is created for this
as a wrapper around gnet_stats_rate_est being a part of userspace API.
Respectively all callers of gen_estimator functions, and structures used
by them to store rate_est are modified.
This method removes currently possibile registering in gen_estimator the
same structures more than once, but it isn't used after all.
Thanks to David Miller for pointing errors in first versions of this patch
and for suggesting proper solution.
Reported-by: Badalian Vyacheslav <slavon@bigtelecom.ru>
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
---
Documentation/networking/gen_stats.txt | 10 +++--
include/linux/gen_stats.h | 13 ++++++
include/net/act_api.h | 4 +-
include/net/gen_stats.h | 8 ++--
include/net/netfilter/xt_rateest.h | 2 +-
include/net/sch_generic.h | 2 +-
net/core/gen_estimator.c | 65 ++++++++++++++------------------
net/netfilter/xt_RATEEST.c | 4 +-
net/netfilter/xt_rateest.c | 4 +-
net/sched/act_api.c | 7 +--
net/sched/act_police.c | 7 +--
net/sched/sch_api.c | 6 +-
net/sched/sch_cbq.c | 10 ++--
net/sched/sch_generic.c | 2 +-
net/sched/sch_hfsc.c | 10 ++--
net/sched/sch_htb.c | 12 +++---
16 files changed, 85 insertions(+), 81 deletions(-)
diff --git a/Documentation/networking/gen_stats.txt b/Documentation/networking/gen_stats.txt
index 70e6275..fc45f94 100644
--- a/Documentation/networking/gen_stats.txt
+++ b/Documentation/networking/gen_stats.txt
@@ -6,10 +6,12 @@ Statistic counters are grouped into structs:
Struct TLV type Description
----------------------------------------------------------------------
gnet_stats_basic TCA_STATS_BASIC Basic statistics
-gnet_stats_rate_est TCA_STATS_RATE_EST Rate estimator
+gnet_stats_rate_est* TCA_STATS_RATE_EST Rate estimator
gnet_stats_queue TCA_STATS_QUEUE Queue statistics
none TCA_STATS_APP Application specific
+* From v2.6.25 kernel uses internal struct kernel_gnet_stats_rate_est
+ for gen_estimator functions calls.
Collecting:
-----------
@@ -106,9 +108,9 @@ In the kernel when setting up:
From now on, every time you dump my_rate_est_stats it will contain
up-to-date info.
-Once you are done, call gen_kill_estimator(my_basicstats,
-my_rate_est_stats) Make sure that my_basicstats and my_rate_est_stats
-are still valid (i.e still exist) at the time of making this call.
+Once you are done, call gen_kill_estimator(my_rate_est_stats). Make
+sure that my_rate_est_stats is still valid (i.e still exists) at the
+time of making this call.
Authors:
diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
index 13f4e74..12b76d2 100644
--- a/include/linux/gen_stats.h
+++ b/include/linux/gen_stats.h
@@ -35,6 +35,19 @@ struct gnet_stats_rate_est
__u32 pps;
};
+#ifdef __KERNEL__
+/**
+ * struct kernel_gnet_stats_rate_est - rate estimator wrapper
+ * @est: rate estimator
+ * @gen_estimator: internal data
+ */
+struct kernel_gnet_stats_rate_est
+{
+ struct gnet_stats_rate_est est;
+ void *gen_estimator;
+};
+#endif
+
/**
* struct gnet_stats_queue - queuing statistics
* @qlen: queue length
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c5ac61a..c02ce27 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -18,7 +18,7 @@ struct tcf_common {
struct tcf_t tcfc_tm;
struct gnet_stats_basic tcfc_bstats;
struct gnet_stats_queue tcfc_qstats;
- struct gnet_stats_rate_est tcfc_rate_est;
+ struct kernel_gnet_stats_rate_est tcfc_k_rate_est;
spinlock_t tcfc_lock;
};
#define tcf_next common.tcfc_next
@@ -30,7 +30,7 @@ struct tcf_common {
#define tcf_tm common.tcfc_tm
#define tcf_bstats common.tcfc_bstats
#define tcf_qstats common.tcfc_qstats
-#define tcf_rate_est common.tcfc_rate_est
+#define tcf_k_rate_est common.tcfc_k_rate_est
#define tcf_lock common.tcfc_lock
struct tcf_police {
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 8cd8185..af131f6 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -38,12 +38,12 @@ extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
extern int gnet_stats_finish_copy(struct gnet_dump *d);
extern int gen_new_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est,
+ struct kernel_gnet_stats_rate_est *k_rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est);
+extern void gen_kill_estimator(struct kernel_gnet_stats_rate_est *k_rate_est);
+
extern int gen_replace_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est,
+ struct kernel_gnet_stats_rate_est *k_rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 65d594d..60aca83 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -7,7 +7,7 @@ struct xt_rateest {
unsigned int refcnt;
spinlock_t lock;
struct gnet_estimator params;
- struct gnet_stats_rate_est rstats;
+ struct kernel_gnet_stats_rate_est k_rstats;
struct gnet_stats_basic bstats;
};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index ab502ec..ea6f5f4 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -42,7 +42,7 @@ struct Qdisc
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est rate_est;
+ struct kernel_gnet_stats_rate_est k_rate_est;
spinlock_t *stats_lock;
struct rcu_head q_rcu;
int (*reshape_fail)(struct sk_buff *skb,
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 57abe82..986fc84 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -142,22 +142,23 @@ skip:
/**
* gen_new_estimator - create a new rate estimator
* @bstats: basic statistics
- * @rate_est: rate estimator statistics
+ * @k_rate_est: rate estimator statistics and internal data
* @stats_lock: statistics lock
* @opt: rate estimator configuration TLV
*
- * Creates a new rate estimator with &bstats as source and &rate_est
+ * Creates a new rate estimator with &bstats as source and &k_rate_est->est
* as destination. A new timer with the interval specified in the
* configuration TLV is created. Upon each interval, the latest statistics
* will be read from &bstats and the estimated rate will be stored in
- * &rate_est with the statistics lock grabed during this period.
+ * &k_rate_est->est with the statistics lock grabed during this period.
+ * &k_rate_est also stores internal data required for gen_kill_estimator.
*
* Returns 0 on success or a negative error code.
*
* NOTE: Called under rtnl_mutex
*/
int gen_new_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est,
+ struct kernel_gnet_stats_rate_est *k_rate_est,
spinlock_t *stats_lock,
struct nlattr *opt)
{
@@ -172,18 +173,19 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
return -EINVAL;
est = kzalloc(sizeof(*est), GFP_KERNEL);
+ k_rate_est->gen_estimator = est;
if (est == NULL)
return -ENOBUFS;
idx = parm->interval + 2;
est->bstats = bstats;
- est->rate_est = rate_est;
+ est->rate_est = &k_rate_est->est;
est->stats_lock = stats_lock;
est->ewma_log = parm->ewma_log;
est->last_bytes = bstats->bytes;
- est->avbps = rate_est->bps<<5;
+ est->avbps = k_rate_est->est.bps << 5;
est->last_packets = bstats->packets;
- est->avpps = rate_est->pps<<10;
+ est->avpps = k_rate_est->est.pps << 10;
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
@@ -206,44 +208,33 @@ static void __gen_kill_estimator(struct rcu_head *head)
/**
* gen_kill_estimator - remove a rate estimator
- * @bstats: basic statistics
- * @rate_est: rate estimator statistics
+ * @k_rate_est: rate estimator statistics and internal data
*
- * Removes the rate estimator specified by &bstats and &rate_est
- * and deletes the timer.
+ * Removes the rate estimator specified by &k_rate_est.
*
* NOTE: Called under rtnl_mutex
*/
-void gen_kill_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est)
+void gen_kill_estimator(struct kernel_gnet_stats_rate_est *k_rate_est)
{
- int idx;
- struct gen_estimator *e, *n;
-
- for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
-
- /* Skip non initialized indexes */
- if (!elist[idx].timer.function)
- continue;
-
- list_for_each_entry_safe(e, n, &elist[idx].list, list) {
- if (e->rate_est != rate_est || e->bstats != bstats)
- continue;
-
- write_lock_bh(&est_lock);
- e->bstats = NULL;
- write_unlock_bh(&est_lock);
-
- list_del_rcu(&e->list);
- call_rcu(&e->e_rcu, __gen_kill_estimator);
- }
+ if (k_rate_est && k_rate_est->gen_estimator) {
+ struct gen_estimator *e;
+
+ e = (struct gen_estimator *)k_rate_est->gen_estimator;
+ k_rate_est->gen_estimator = NULL;
+
+ write_lock_bh(&est_lock);
+ e->bstats = NULL;
+ write_unlock_bh(&est_lock);
+
+ list_del_rcu(&e->list);
+ call_rcu(&e->e_rcu, __gen_kill_estimator);
}
}
/**
* gen_replace_estimator - replace rate estimator configuration
* @bstats: basic statistics
- * @rate_est: rate estimator statistics
+ * @k_rate_est: rate estimator statistics and internal data
* @stats_lock: statistics lock
* @opt: rate estimator configuration TLV
*
@@ -253,11 +244,11 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
* Returns 0 on success or a negative error code.
*/
int gen_replace_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est,
+ struct kernel_gnet_stats_rate_est *k_rate_est,
spinlock_t *stats_lock, struct nlattr *opt)
{
- gen_kill_estimator(bstats, rate_est);
- return gen_new_estimator(bstats, rate_est, stats_lock, opt);
+ gen_kill_estimator(k_rate_est);
+ return gen_new_estimator(bstats, k_rate_est, stats_lock, opt);
}
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 24c73ba..ac5a4ec 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -63,7 +63,7 @@ void xt_rateest_put(struct xt_rateest *est)
mutex_lock(&xt_rateest_mutex);
if (--est->refcnt == 0) {
hlist_del(&est->list);
- gen_kill_estimator(&est->bstats, &est->rstats);
+ gen_kill_estimator(&est->k_rstats);
kfree(est);
}
mutex_unlock(&xt_rateest_mutex);
@@ -134,7 +134,7 @@ xt_rateest_tg_checkentry(const char *tablename,
cfg.est.interval = info->interval;
cfg.est.ewma_log = info->ewma_log;
- if (gen_new_estimator(&est->bstats, &est->rstats, &est->lock,
+ if (gen_new_estimator(&est->bstats, &est->k_rstats, &est->lock,
&cfg.opt) < 0)
goto err2;
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index fdb86a5..e2ba9c7 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -29,7 +29,7 @@ static bool xt_rateest_mt(const struct sk_buff *skb,
bool ret = true;
spin_lock_bh(&info->est1->lock);
- r = &info->est1->rstats;
+ r = &info->est1->k_rstats.est;
if (info->flags & XT_RATEEST_MATCH_DELTA) {
bps1 = info->bps1 >= r->bps ? info->bps1 - r->bps : 0;
pps1 = info->pps1 >= r->pps ? info->pps1 - r->pps : 0;
@@ -44,7 +44,7 @@ static bool xt_rateest_mt(const struct sk_buff *skb,
pps2 = info->pps2;
} else {
spin_lock_bh(&info->est2->lock);
- r = &info->est2->rstats;
+ r = &info->est2->k_rstats.est;
if (info->flags & XT_RATEEST_MATCH_DELTA) {
bps2 = info->bps2 >= r->bps ? info->bps2 - r->bps : 0;
pps2 = info->pps2 >= r->pps ? info->pps2 - r->pps : 0;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index ebd21d2..edfb238 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -34,8 +34,7 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
write_lock_bh(hinfo->lock);
*p1p = p->tcfc_next;
write_unlock_bh(hinfo->lock);
- gen_kill_estimator(&p->tcfc_bstats,
- &p->tcfc_rate_est);
+ gen_kill_estimator(&p->tcfc_k_rate_est);
kfree(p);
return;
}
@@ -226,7 +225,7 @@ struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_acti
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
if (est)
- gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
+ gen_new_estimator(&p->tcfc_bstats, &p->tcfc_k_rate_est,
&p->tcfc_lock, est);
a->priv = (void *) p;
return p;
@@ -605,7 +604,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
goto errout;
if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
- gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
+ gnet_stats_copy_rate_est(&d, &h->tcf_k_rate_est.est) < 0 ||
gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
goto errout;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 3af5759..c0ca461 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -105,8 +105,7 @@ static void tcf_police_destroy(struct tcf_police *p)
write_lock_bh(&police_lock);
*p1p = p->tcf_next;
write_unlock_bh(&police_lock);
- gen_kill_estimator(&p->tcf_bstats,
- &p->tcf_rate_est);
+ gen_kill_estimator(&p->tcf_k_rate_est);
if (p->tcfp_R_tab)
qdisc_put_rtab(p->tcfp_R_tab);
if (p->tcfp_P_tab)
@@ -215,7 +214,7 @@ override:
*(u32*)nla_data(tb[TCA_POLICE_AVRATE]);
if (est)
gen_replace_estimator(&police->tcf_bstats,
- &police->tcf_rate_est,
+ &police->tcf_k_rate_est,
&police->tcf_lock, est);
spin_unlock_bh(&police->tcf_lock);
@@ -272,7 +271,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
police->tcf_bstats.packets++;
if (police->tcfp_ewma_rate &&
- police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
+ police->tcf_k_rate_est.est.bps >= police->tcfp_ewma_rate) {
police->tcf_qstats.overlimits++;
spin_unlock(&police->tcf_lock);
return police->tcf_action;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7abb028..87b8113 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -511,7 +511,7 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
if (tca[TCA_RATE]) {
- err = gen_new_estimator(&sch->bstats, &sch->rate_est,
+ err = gen_new_estimator(&sch->bstats, &sch->k_rate_est,
sch->stats_lock,
tca[TCA_RATE]);
if (err) {
@@ -553,7 +553,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
return err;
}
if (tca[TCA_RATE])
- gen_replace_estimator(&sch->bstats, &sch->rate_est,
+ gen_replace_estimator(&sch->bstats, &sch->k_rate_est,
sch->stats_lock, tca[TCA_RATE]);
return 0;
}
@@ -847,7 +847,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure;
if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
- gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(&d, &q->k_rate_est.est) < 0 ||
gnet_stats_copy_queue(&d, &q->qstats) < 0)
goto nla_put_failure;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 5c8667e..cbf24bd 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -131,7 +131,7 @@ struct cbq_class
psched_time_t penalized;
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est rate_est;
+ struct kernel_gnet_stats_rate_est k_rate_est;
struct tc_cbq_xstats xstats;
struct tcf_proto *filter_list;
@@ -1627,7 +1627,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
cl->xstats.undertime = cl->undertime - q->now;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->k_rate_est.est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1;
@@ -1698,7 +1698,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
tcf_destroy_chain(cl->filter_list);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->k_rate_est);
if (cl != &q->link)
kfree(cl);
}
@@ -1844,7 +1844,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
sch_tree_unlock(sch);
if (tca[TCA_RATE])
- gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ gen_replace_estimator(&cl->bstats, &cl->k_rate_est,
&sch->dev->queue_lock,
tca[TCA_RATE]);
return 0;
@@ -1932,7 +1932,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
sch_tree_unlock(sch);
if (tca[TCA_RATE])
- gen_new_estimator(&cl->bstats, &cl->rate_est,
+ gen_new_estimator(&cl->bstats, &cl->k_rate_est,
&sch->dev->queue_lock, tca[TCA_RATE]);
*arg = (unsigned long)cl;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 10b5c08..37206e7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -509,7 +509,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
return;
list_del(&qdisc->list);
- gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
+ gen_kill_estimator(&qdisc->k_rate_est);
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 4e6a164..31b9752 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -118,7 +118,7 @@ struct hfsc_class
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est rate_est;
+ struct kernel_gnet_stats_rate_est k_rate_est;
unsigned int level; /* class level in hierarchy */
struct tcf_proto *filter_list; /* filter list */
unsigned int filter_cnt; /* filter count */
@@ -1051,7 +1051,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
sch_tree_unlock(sch);
if (tca[TCA_RATE])
- gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ gen_replace_estimator(&cl->bstats, &cl->k_rate_est,
&sch->dev->queue_lock,
tca[TCA_RATE]);
return 0;
@@ -1107,7 +1107,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
sch_tree_unlock(sch);
if (tca[TCA_RATE])
- gen_new_estimator(&cl->bstats, &cl->rate_est,
+ gen_new_estimator(&cl->bstats, &cl->k_rate_est,
&sch->dev->queue_lock, tca[TCA_RATE]);
*arg = (unsigned long)cl;
return 0;
@@ -1120,7 +1120,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
tcf_destroy_chain(cl->filter_list);
qdisc_destroy(cl->qdisc);
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->k_rate_est);
if (cl != &q->root)
kfree(cl);
}
@@ -1371,7 +1371,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.rtwork = cl->cl_cumul;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->k_rate_est.est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 3b3ff64..cee8c01 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -54,7 +54,7 @@
#define HTB_HSIZE 16 /* classid hash size */
#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
-#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
+#define HTB_VER 0x30012 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
@@ -73,7 +73,7 @@ struct htb_class {
u32 classid;
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est rate_est;
+ struct kernel_gnet_stats_rate_est k_rate_est;
struct tc_htb_xstats xstats; /* our special stats */
int refcnt; /* usage count of this class */
@@ -1104,7 +1104,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
cl->xstats.ctokens = cl->ctokens;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->k_rate_est.est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1;
@@ -1195,7 +1195,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
BUG_TRAP(cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
- gen_kill_estimator(&cl->bstats, &cl->rate_est);
+ gen_kill_estimator(&cl->k_rate_est);
qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil);
@@ -1348,7 +1348,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
goto failure;
- gen_new_estimator(&cl->bstats, &cl->rate_est,
+ gen_new_estimator(&cl->bstats, &cl->k_rate_est,
&sch->dev->queue_lock,
tca[TCA_RATE] ? : &est.nla);
cl->refcnt = 1;
@@ -1404,7 +1404,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
parent ? &parent->children : &q->root);
} else {
if (tca[TCA_RATE])
- gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ gen_replace_estimator(&cl->bstats, &cl->k_rate_est,
&sch->dev->queue_lock,
tca[TCA_RATE]);
sch_tree_lock(sch);
^ permalink raw reply related [flat|nested] 13+ messages in thread
end of thread, other threads:[~2008-01-25 21:58 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-01-20 23:46 [PATCH 1/3][NET] gen_estimator: faster gen_kill_estimator Jarek Poplawski
2008-01-21 10:35 ` David Miller
2008-01-21 10:56 ` Jarek Poplawski
2008-01-21 22:31 ` [PATCH 1/3 v2][NET] " Jarek Poplawski
2008-01-21 22:41 ` Jarek Poplawski
2008-01-22 0:29 ` David Miller
2008-01-22 7:21 ` Jarek Poplawski
2008-01-22 7:26 ` Jarek Poplawski
2008-01-22 11:42 ` jamal
2008-01-22 12:29 ` Jarek Poplawski
2008-01-22 13:54 ` jamal
2008-01-22 19:22 ` Jarek Poplawski
2008-01-25 22:00 ` [PATCH v3][NET] " Jarek Poplawski
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).