netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Stephen Hemminger <shemminger@osdl.org>
To: David Miller <davem@davemloft.net>
Cc: netdev@vger.kernel.org
Subject: [RFC 2/7] neighbour: convert neighbour hash table to hlist
Date: Mon, 14 Aug 2006 14:20:06 -0700	[thread overview]
Message-ID: <20060814212143.202090681@localhost.localdomain> (raw)
In-Reply-To: 20060814212004.606140865@localhost.localdomain

[-- Attachment #1: hlist1.patch --]
[-- Type: text/plain, Size: 11819 bytes --]

Change the neighbour table hash list to hlist from list.h
to allow for easier later conversion to RCU.

Signed-off-by: Stephen Hemminger <shemminger@osdl.org>

---
 include/net/neighbour.h |    6 -
 net/core/neighbour.c    |  160 +++++++++++++++++++++++++-----------------------
 2 files changed, 88 insertions(+), 78 deletions(-)

--- net-2.6.19.orig/include/net/neighbour.h
+++ net-2.6.19/include/net/neighbour.h
@@ -88,10 +88,10 @@ struct neigh_statistics
 
 struct neighbour
 {
-	struct neighbour	*next;
+	struct hlist_node	hlist;
 	struct neigh_table	*tbl;
 	struct neigh_parms	*parms;
-	struct net_device		*dev;
+	struct net_device	*dev;
 	unsigned long		used;
 	unsigned long		confirmed;
 	unsigned long		updated;
@@ -161,7 +161,7 @@ struct neigh_table
 	unsigned long		last_rand;
 	kmem_cache_t		*kmem_cachep;
 	struct neigh_statistics	*stats;
-	struct neighbour	**hash_buckets;
+	struct hlist_head	*hash_buckets;
 	unsigned int		hash_mask;
 	__u32			hash_rnd;
 	unsigned int		hash_chain_gc;
--- net-2.6.19.orig/net/core/neighbour.c
+++ net-2.6.19/net/core/neighbour.c
@@ -126,10 +126,11 @@ static int neigh_forced_gc(struct neigh_
 
 	write_lock_bh(&tbl->lock);
 	for (i = 0; i <= tbl->hash_mask; i++) {
-		struct neighbour *n, **np;
+		struct neighbour *n;
+		struct hlist_node *node, *tmp;
 
-		np = &tbl->hash_buckets[i];
-		while ((n = *np) != NULL) {
+		hlist_for_each_entry_safe(n, node, tmp,
+					  &tbl->hash_buckets[i], hlist) {
 			/* Neighbour record may be discarded if:
 			 * - nobody refers to it.
 			 * - it is not permanent
@@ -137,7 +138,7 @@ static int neigh_forced_gc(struct neigh_
 			write_lock(&n->lock);
 			if (atomic_read(&n->refcnt) == 1 &&
 			    !(n->nud_state & NUD_PERMANENT)) {
-				*np	= n->next;
+				hlist_del(&n->hlist);
 				n->dead = 1;
 				shrunk	= 1;
 				write_unlock(&n->lock);
@@ -145,7 +146,6 @@ static int neigh_forced_gc(struct neigh_
 				continue;
 			}
 			write_unlock(&n->lock);
-			np = &n->next;
 		}
 	}
 
@@ -181,14 +181,15 @@ static void neigh_flush_dev(struct neigh
 	int i;
 
 	for (i = 0; i <= tbl->hash_mask; i++) {
-		struct neighbour *n, **np = &tbl->hash_buckets[i];
+		struct hlist_node *node, *tmp;
+		struct neighbour *n;
 
-		while ((n = *np) != NULL) {
-			if (dev && n->dev != dev) {
-				np = &n->next;
+		hlist_for_each_entry_safe(n, node, tmp,
+					  &tbl->hash_buckets[i], hlist) {
+			if (dev && n->dev != dev)
 				continue;
-			}
-			*np = n->next;
+
+			hlist_del(&n->hlist);
 			write_lock(&n->lock);
 			neigh_del_timer(n);
 			n->dead = 1;
@@ -279,23 +280,20 @@ out_entries:
 	goto out;
 }
 
-static struct neighbour **neigh_hash_alloc(unsigned int entries)
+static struct hlist_head *neigh_hash_alloc(unsigned int entries)
 {
-	unsigned long size = entries * sizeof(struct neighbour *);
-	struct neighbour **ret;
+	unsigned long size = entries * sizeof(struct hlist_head);
 
-	if (size <= PAGE_SIZE) {
-		ret = kzalloc(size, GFP_ATOMIC);
-	} else {
-		ret = (struct neighbour **)
+	if (size <= PAGE_SIZE)
+		return kzalloc(size, GFP_ATOMIC);
+	else
+		return (struct hlist_head *)
 		      __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
-	}
-	return ret;
 }
 
-static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
+static void neigh_hash_free(struct hlist_head *hash, unsigned int entries)
 {
-	unsigned long size = entries * sizeof(struct neighbour *);
+	unsigned long size = entries * sizeof(struct hlist_head);
 
 	if (size <= PAGE_SIZE)
 		kfree(hash);
@@ -305,7 +303,7 @@ static void neigh_hash_free(struct neigh
 
 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
 {
-	struct neighbour **new_hash, **old_hash;
+	struct hlist_head *new_hash, *old_hash;
 	unsigned int i, new_hash_mask, old_entries;
 
 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
@@ -321,16 +319,15 @@ static void neigh_hash_grow(struct neigh
 
 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
 	for (i = 0; i < old_entries; i++) {
-		struct neighbour *n, *next;
+		struct neighbour *n;
+		struct hlist_node *node, *tmp;
 
-		for (n = old_hash[i]; n; n = next) {
+		hlist_for_each_entry_safe(n, node, tmp, &old_hash[i], hlist) {
 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
 
 			hash_val &= new_hash_mask;
-			next = n->next;
-
-			n->next = new_hash[hash_val];
-			new_hash[hash_val] = n;
+			hlist_del(&n->hlist);
+			hlist_add_head(&n->hlist, &new_hash[hash_val]);
 		}
 	}
 	tbl->hash_buckets = new_hash;
@@ -343,19 +340,22 @@ struct neighbour *neigh_lookup(struct ne
 			       struct net_device *dev)
 {
 	struct neighbour *n;
+	struct hlist_node *tmp;
 	int key_len = tbl->key_len;
 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
 	
 	NEIGH_CACHE_STAT_INC(tbl, lookups);
 
 	read_lock_bh(&tbl->lock);
-	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
+	hlist_for_each_entry(n, tmp, &tbl->hash_buckets[hash_val], hlist) {
 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
 			neigh_hold(n);
 			NEIGH_CACHE_STAT_INC(tbl, hits);
-			break;
+			goto found;
 		}
 	}
+	n = NULL;
+found:
 	read_unlock_bh(&tbl->lock);
 	return n;
 }
@@ -363,19 +363,22 @@ struct neighbour *neigh_lookup(struct ne
 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
 {
 	struct neighbour *n;
+	struct hlist_node *tmp;
 	int key_len = tbl->key_len;
 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
 
 	NEIGH_CACHE_STAT_INC(tbl, lookups);
 
 	read_lock_bh(&tbl->lock);
-	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
+	hlist_for_each_entry(n, tmp, &tbl->hash_buckets[hash_val], hlist) {
 		if (!memcmp(n->primary_key, pkey, key_len)) {
 			neigh_hold(n);
 			NEIGH_CACHE_STAT_INC(tbl, hits);
-			break;
+			goto found;
 		}
 	}
+	n = NULL;
+found:
 	read_unlock_bh(&tbl->lock);
 	return n;
 }
@@ -387,6 +390,7 @@ struct neighbour *neigh_create(struct ne
 	int key_len = tbl->key_len;
 	int error;
 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+	struct hlist_node *tmp;
 
 	if (!n) {
 		rc = ERR_PTR(-ENOBUFS);
@@ -424,7 +428,7 @@ struct neighbour *neigh_create(struct ne
 		goto out_tbl_unlock;
 	}
 
-	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
+	hlist_for_each_entry(n1, tmp, &tbl->hash_buckets[hash_val], hlist) {
 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
 			neigh_hold(n1);
 			rc = n1;
@@ -432,8 +436,7 @@ struct neighbour *neigh_create(struct ne
 		}
 	}
 
-	n->next = tbl->hash_buckets[hash_val];
-	tbl->hash_buckets[hash_val] = n;
+	hlist_add_head(&n->hlist, &tbl->hash_buckets[hash_val]);
 	n->dead = 0;
 	neigh_hold(n);
 	write_unlock_bh(&tbl->lock);
@@ -635,7 +638,9 @@ static void neigh_connect(struct neighbo
 static void neigh_periodic_timer(unsigned long arg)
 {
 	struct neigh_table *tbl = (struct neigh_table *)arg;
-	struct neighbour *n, **np;
+	struct neighbour *n;
+	struct hlist_head *head;
+	struct hlist_node *node, *tmp;
 	unsigned long expire, now = jiffies;
 
 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
@@ -654,19 +659,17 @@ static void neigh_periodic_timer(unsigne
 				neigh_rand_reach_time(p->base_reachable_time);
 	}
 
-	np = &tbl->hash_buckets[tbl->hash_chain_gc];
+	head = &tbl->hash_buckets[tbl->hash_chain_gc];
 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
 
-	while ((n = *np) != NULL) {
+	hlist_for_each_entry_safe(n, node, tmp, head, hlist) {
 		unsigned int state;
 
 		write_lock(&n->lock);
 
 		state = n->nud_state;
-		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
-			write_unlock(&n->lock);
+		if (state & (NUD_PERMANENT | NUD_IN_TIMER))
 			goto next_elt;
-		}
 
 		if (time_before(n->used, n->confirmed))
 			n->used = n->confirmed;
@@ -674,16 +677,14 @@ static void neigh_periodic_timer(unsigne
 		if (atomic_read(&n->refcnt) == 1 &&
 		    (state == NUD_FAILED ||
 		     time_after(now, n->used + n->parms->gc_staletime))) {
-			*np = n->next;
+			hlist_del(&n->hlist);
 			n->dead = 1;
 			write_unlock(&n->lock);
 			neigh_release(n);
 			continue;
 		}
+	next_elt:
 		write_unlock(&n->lock);
-
-next_elt:
-		np = &n->next;
 	}
 
  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
@@ -1980,27 +1981,29 @@ nla_put_failure:
 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 			    struct netlink_callback *cb)
 {
-	struct neighbour *n;
 	int rc, h, s_h = cb->args[1];
 	int idx, s_idx = idx = cb->args[2];
 
 	for (h = 0; h <= tbl->hash_mask; h++) {
+		struct neighbour *n;
+		struct hlist_node *tmp;
+
 		if (h < s_h)
 			continue;
 		if (h > s_h)
 			s_idx = 0;
 		read_lock_bh(&tbl->lock);
-		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
-			if (idx < s_idx)
-				continue;
-			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+		idx = 0;
+		hlist_for_each_entry(n, tmp, &tbl->hash_buckets[h], hlist) {
+			if (idx >= s_idx &&
+			    neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
 					    cb->nlh->nlmsg_seq,
-					    RTM_NEWNEIGH,
-					    NLM_F_MULTI) <= 0) {
+					    RTM_NEWNEIGH, NLM_F_MULTI) <= 0) {
 				read_unlock_bh(&tbl->lock);
 				rc = -1;
 				goto out;
 			}
+			++idx;
 		}
 		read_unlock_bh(&tbl->lock);
 	}
@@ -2044,10 +2047,10 @@ void neigh_for_each(struct neigh_table *
 
 	read_lock_bh(&tbl->lock);
 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
-		struct neighbour *n;
+		struct hlist_node *p;
 
-		for (n = tbl->hash_buckets[chain]; n; n = n->next)
-			cb(n, cookie);
+		hlist_for_each(p, &tbl->hash_buckets[chain])
+			cb(hlist_entry(p, struct neighbour, hlist), cookie);
 	}
 	read_unlock_bh(&tbl->lock);
 }
@@ -2060,19 +2063,19 @@ void __neigh_for_each_release(struct nei
 	int chain;
 
 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
-		struct neighbour *n, **np;
+		struct neighbour *n;
+		struct hlist_node *tmp, *next;
 
-		np = &tbl->hash_buckets[chain];
-		while ((n = *np) != NULL) {
+		hlist_for_each_entry_safe(n, tmp, next,
+					  &tbl->hash_buckets[chain], hlist) {
 			int release;
 
 			write_lock(&n->lock);
 			release = cb(n);
 			if (release) {
-				*np = n->next;
+				hlist_del(&n->hlist);
 				n->dead = 1;
-			} else
-				np = &n->next;
+			}
 			write_unlock(&n->lock);
 			if (release)
 				neigh_release(n);
@@ -2092,33 +2095,39 @@ static struct neighbour *neigh_get_first
 
 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
-		n = tbl->hash_buckets[bucket];
+		struct hlist_node *tmp;
 
-		while (n) {
+		hlist_for_each_entry(n, tmp, &tbl->hash_buckets[bucket], hlist) {
 			if (state->neigh_sub_iter) {
 				loff_t fakep = 0;
 				void *v;
 
 				v = state->neigh_sub_iter(state, n, &fakep);
 				if (!v)
-					goto next;
+					continue;
 			}
+
 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
-				break;
+				goto found;
 			if (n->nud_state & ~NUD_NOARP)
-				break;
-		next:
-			n = n->next;
+				goto found;
 		}
-
-		if (n)
-			break;
+		n = NULL;
 	}
+found:
 	state->bucket = bucket;
 
 	return n;
 }
 
+static struct neighbour *next_neigh(struct hlist_node *node)
+{
+	if (node)
+		return hlist_entry(node, struct neighbour, hlist);
+	else
+		return NULL;
+}
+
 static struct neighbour *neigh_get_next(struct seq_file *seq,
 					struct neighbour *n,
 					loff_t *pos)
@@ -2131,7 +2140,8 @@ static struct neighbour *neigh_get_next(
 		if (v)
 			return n;
 	}
-	n = n->next;
+
+	n = next_neigh(n->hlist.next);
 
 	while (1) {
 		while (n) {
@@ -2147,7 +2157,7 @@ static struct neighbour *neigh_get_next(
 			if (n->nud_state & ~NUD_NOARP)
 				break;
 		next:
-			n = n->next;
+			n = next_neigh(n->hlist.next);
 		}
 
 		if (n)
@@ -2156,7 +2166,7 @@ static struct neighbour *neigh_get_next(
 		if (++state->bucket > tbl->hash_mask)
 			break;
 
-		n = tbl->hash_buckets[state->bucket];
+		n = next_neigh(tbl->hash_buckets[state->bucket].first);
 	}
 
 	if (n && pos)

--


  parent reply	other threads:[~2006-08-14 21:24 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-08-14 21:20 [RFC 0/7] neighbour table lockless read Stephen Hemminger
2006-08-14 21:20 ` [RFC 1/7] net neighbor: convert top level list to RCU Stephen Hemminger
2006-08-14 21:20 ` Stephen Hemminger [this message]
2006-08-14 21:20 ` [RFC 3/7] neighbour: convert pneigh hash table to hlist Stephen Hemminger
2006-08-14 21:20 ` [RFC 4/7] net neighbour: convert to RCU Stephen Hemminger
2006-08-14 21:20 ` [RFC 5/7] neighbour: convert lookup to sequence lock Stephen Hemminger
2006-08-14 22:22   ` Thomas Graf
2006-08-14 22:37     ` Stephen Hemminger
2006-08-14 23:17       ` Thomas Graf
2006-08-15  0:42         ` Alexey Kuznetsov
2006-08-14 21:20 ` [RFC 6/7] neighbour: convert hard header cache to sequence number Stephen Hemminger
2006-08-14 21:20 ` [RFC 7/7] neighbour: reduce exports Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20060814212143.202090681@localhost.localdomain \
    --to=shemminger@osdl.org \
    --cc=davem@davemloft.net \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).