From: Patrick McHardy <kaber@trash.net>
To: Michal Vanco <vanco@satro.sk>
Cc: netdev@oss.sgi.com
Subject: Re: 2.6.11 on AMD64 traps
Date: Thu, 10 Mar 2005 00:57:56 +0100 [thread overview]
Message-ID: <422F8D84.8010103@trash.net> (raw)
In-Reply-To: <200503100050.05582.vanco@satro.sk>
[-- Attachment #1: Type: text/plain, Size: 942 bytes --]
Michal Vanco wrote:
> On Thursday 10 March 2005 00:27, Patrick McHardy wrote:
>
>>And here it is. Could you redo your timing-test with this patch please?
>
> Wonderfull:
>
> # time ip route show | wc -l; time netstat -rn | wc -l
> 155991
>
> real 0m1.110s
> user 0m0.441s
> sys 0m1.100s
> 155991
>
> real 0m1.435s
> user 0m1.026s
> sys 0m0.436s
>
> It seems that netlink is still little bit faster than /proc, but it doesn't
> make any sense in case like this.
The system time is higher with netlink. It also repeatedly needs to
skip over the entries, its just not as bad as with seq_file because
more entries are dumped at once. dumping over netlink could be improved
in a simlar way, but there is no room in netlink_callback->args for the
pointers and the genid.
> Will this patch be included in future kernels?
I hope so. Dave, are you fine with making /proc more efficient than
netlink ? :)
Regards
Patrick
[-- Attachment #2: x --]
[-- Type: text/plain, Size: 3012 bytes --]
# This is a BitKeeper generated diff -Nru style patch.
#
# ChangeSet
# 2005/03/10 00:07:21+01:00 kaber@coreworks.de
# [IPV4]: Speed up sequential reading of /proc/net/route
#
# Cacheing the current position reduces complexity from O(n^2)
# to O(n).
#
# Signed-off-by: Patrick McHardy <kaber@trash.net>
#
# net/ipv4/fib_hash.c
# 2005/03/10 00:07:11+01:00 kaber@coreworks.de +22 -1
# [IPV4]: Speed up sequential reading of /proc/net/route
#
# Cacheing the current position reduces complexity from O(n^2)
# to O(n).
#
# Signed-off-by: Patrick McHardy <kaber@trash.net>
#
diff -Nru a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
--- a/net/ipv4/fib_hash.c 2005-03-10 00:07:43 +01:00
+++ b/net/ipv4/fib_hash.c 2005-03-10 00:07:43 +01:00
@@ -93,6 +93,7 @@
}
static DEFINE_RWLOCK(fib_hash_lock);
+static unsigned int fib_hash_genid;
#define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
@@ -181,6 +182,7 @@
fz->fz_hashmask = new_hashmask;
fz->fz_divisor = new_divisor;
fn_rebuild_zone(fz, old_ht, old_divisor);
+ fib_hash_genid++;
write_unlock_bh(&fib_hash_lock);
fz_hash_free(old_ht, old_divisor);
@@ -236,6 +238,7 @@
table->fn_zones[i]->fz_next = fz;
}
table->fn_zones[z] = fz;
+ fib_hash_genid++;
write_unlock_bh(&fib_hash_lock);
return fz;
}
@@ -451,6 +454,7 @@
fa->fa_scope = r->rtm_scope;
state = fa->fa_state;
fa->fa_state &= ~FA_S_ACCESSED;
+ fib_hash_genid++;
write_unlock_bh(&fib_hash_lock);
fib_release_info(fi_drop);
@@ -515,6 +519,7 @@
fib_insert_node(fz, new_f);
list_add_tail(&new_fa->fa_list,
(fa ? &fa->fa_list : &f->fn_alias));
+ fib_hash_genid++;
write_unlock_bh(&fib_hash_lock);
if (new_f)
@@ -600,6 +605,7 @@
hlist_del(&f->fn_hash);
kill_fn = 1;
}
+ fib_hash_genid++;
write_unlock_bh(&fib_hash_lock);
if (fa->fa_state & FA_S_ACCESSED)
@@ -637,6 +643,7 @@
hlist_del(&f->fn_hash);
kill_f = 1;
}
+ fib_hash_genid++;
write_unlock_bh(&fib_hash_lock);
fn_free_alias(fa);
@@ -801,6 +808,9 @@
struct hlist_head *hash_head;
struct fib_node *fn;
struct fib_alias *fa;
+ loff_t pos;
+ unsigned int genid;
+ int valid;
};
static struct fib_alias *fib_get_first(struct seq_file *seq)
@@ -812,6 +822,9 @@
iter->hash_head = NULL;
iter->fn = NULL;
iter->fa = NULL;
+ iter->pos = 0;
+ iter->genid = fib_hash_genid;
+ iter->valid = 1;
for (iter->zone = table->fn_zone_list; iter->zone;
iter->zone = iter->zone->fz_next) {
@@ -916,12 +929,20 @@
}
}
out:
+ iter->pos++;
return fa;
}
static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
{
- struct fib_alias *fa = fib_get_first(seq);
+ struct fib_iter_state *iter = seq->private;
+ struct fib_alias *fa;
+
+ if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
+ fa = iter->fa;
+ pos -= iter->pos;
+ } else
+ fa = fib_get_first(seq);
if (fa)
while (pos && (fa = fib_get_next(seq)))
next prev parent reply other threads:[~2005-03-09 23:57 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <200503081900.18686.vanco@satro.sk>
2005-03-08 18:35 ` 2.6.11 on AMD64 traps Andre Tomt
2005-03-09 19:45 ` Patrick McHardy
2005-03-09 20:24 ` Michal Vanco
2005-03-09 20:34 ` Patrick McHardy
2005-03-09 20:42 ` Michal Vanco
2005-03-09 21:05 ` Patrick McHardy
2005-03-09 21:17 ` Michal Vanco
2005-03-09 23:27 ` Patrick McHardy
2005-03-09 23:50 ` Michal Vanco
2005-03-09 23:57 ` Patrick McHardy [this message]
2005-03-10 0:43 ` David S. Miller
2005-03-11 2:20 ` David S. Miller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=422F8D84.8010103@trash.net \
--to=kaber@trash.net \
--cc=netdev@oss.sgi.com \
--cc=vanco@satro.sk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).