netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] inetpeer: Don't disable BH for initial fast RCU lookup.
@ 2011-03-08 22:59 David Miller
  2011-03-13 10:04 ` Eric Dumazet
  0 siblings, 1 reply; 5+ messages in thread
From: David Miller @ 2011-03-08 22:59 UTC (permalink / raw)
  To: netdev; +Cc: eric.dumazet


If modifications on other cpus are ok, then modifications to
the tree during lookup done by the local cpu are ok too.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 net/ipv4/inetpeer.c |   18 +++++++++---------
 1 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index f604ffd..6442c35 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -206,16 +206,16 @@ static int addr_compare(const struct inetpeer_addr *a,
 })
 
 /*
- * Called with rcu_read_lock_bh()
+ * Called with rcu_read_lock()
  * Because we hold no lock against a writer, its quite possible we fall
  * in an endless loop.
  * But every pointer we follow is guaranteed to be valid thanks to RCU.
  * We exit from this function if number of links exceeds PEER_MAXDEPTH
  */
-static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
-				       struct inet_peer_base *base)
+static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
+				    struct inet_peer_base *base)
 {
-	struct inet_peer *u = rcu_dereference_bh(base->root);
+	struct inet_peer *u = rcu_dereference(base->root);
 	int count = 0;
 
 	while (u != peer_avl_empty) {
@@ -231,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
 			return u;
 		}
 		if (cmp == -1)
-			u = rcu_dereference_bh(u->avl_left);
+			u = rcu_dereference(u->avl_left);
 		else
-			u = rcu_dereference_bh(u->avl_right);
+			u = rcu_dereference(u->avl_right);
 		if (unlikely(++count == PEER_MAXDEPTH))
 			break;
 	}
@@ -470,11 +470,11 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
 	/* Look up for the address quickly, lockless.
 	 * Because of a concurrent writer, we might not find an existing entry.
 	 */
-	rcu_read_lock_bh();
+	rcu_read_lock();
 	sequence = read_seqbegin(&base->lock);
-	p = lookup_rcu_bh(daddr, base);
+	p = lookup_rcu(daddr, base);
 	invalidated = read_seqretry(&base->lock, sequence);
-	rcu_read_unlock_bh();
+	rcu_read_unlock();
 
 	if (p) {
 		/* The existing node has been found.
-- 
1.7.4.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] inetpeer: Don't disable BH for initial fast RCU lookup.
  2011-03-08 22:59 [PATCH] inetpeer: Don't disable BH for initial fast RCU lookup David Miller
@ 2011-03-13 10:04 ` Eric Dumazet
  2011-03-13 23:42   ` David Miller
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Dumazet @ 2011-03-13 10:04 UTC (permalink / raw)
  To: David Miller; +Cc: netdev

Le mardi 08 mars 2011 à 14:59 -0800, David Miller a écrit :
> If modifications on other cpus are ok, then modifications to
> the tree during lookup done by the local cpu are ok too.
> 
> Signed-off-by: David S. Miller <davem@davemloft.net>
> ---
>  net/ipv4/inetpeer.c |   18 +++++++++---------
>  1 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
> index f604ffd..6442c35 100644
> --- a/net/ipv4/inetpeer.c
> +++ b/net/ipv4/inetpeer.c
> @@ -206,16 +206,16 @@ static int addr_compare(const struct inetpeer_addr *a,
>  })
>  
>  /*
> - * Called with rcu_read_lock_bh()
> + * Called with rcu_read_lock()
>   * Because we hold no lock against a writer, its quite possible we fall
>   * in an endless loop.
>   * But every pointer we follow is guaranteed to be valid thanks to RCU.
>   * We exit from this function if number of links exceeds PEER_MAXDEPTH
>   */
> -static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
> -				       struct inet_peer_base *base)
> +static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
> +				    struct inet_peer_base *base)
>  {
> -	struct inet_peer *u = rcu_dereference_bh(base->root);
> +	struct inet_peer *u = rcu_dereference(base->root);
>  	int count = 0;
>  
>  	while (u != peer_avl_empty) {
> @@ -231,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
>  			return u;
>  		}
>  		if (cmp == -1)
> -			u = rcu_dereference_bh(u->avl_left);
> +			u = rcu_dereference(u->avl_left);
>  		else
> -			u = rcu_dereference_bh(u->avl_right);
> +			u = rcu_dereference(u->avl_right);
>  		if (unlikely(++count == PEER_MAXDEPTH))
>  			break;
>  	}
> @@ -470,11 +470,11 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
>  	/* Look up for the address quickly, lockless.
>  	 * Because of a concurrent writer, we might not find an existing entry.
>  	 */
> -	rcu_read_lock_bh();
> +	rcu_read_lock();
>  	sequence = read_seqbegin(&base->lock);
> -	p = lookup_rcu_bh(daddr, base);
> +	p = lookup_rcu(daddr, base);
>  	invalidated = read_seqretry(&base->lock, sequence);
> -	rcu_read_unlock_bh();
> +	rcu_read_unlock();
>  
>  	if (p) {
>  		/* The existing node has been found.

David, I am not sure this is safe, since we use call_rcu_bh() when
freeing one item. One cpu could decide to kfree() one item while another
cpu could still use it.

rcu_read_lock_bh() was signalling to others cpu we were in a softirq
section, so we were delaying a possible kfree().




^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] inetpeer: Don't disable BH for initial fast RCU lookup.
  2011-03-13 10:04 ` Eric Dumazet
@ 2011-03-13 23:42   ` David Miller
  2011-03-14  4:15     ` [PATCH net-next-2.6] inetpeer: should use call_rcu() variant Eric Dumazet
  0 siblings, 1 reply; 5+ messages in thread
From: David Miller @ 2011-03-13 23:42 UTC (permalink / raw)
  To: eric.dumazet; +Cc: netdev

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Sun, 13 Mar 2011 11:04:09 +0100

> David, I am not sure this is safe, since we use call_rcu_bh() when
> freeing one item. One cpu could decide to kfree() one item while another
> cpu could still use it.
> 
> rcu_read_lock_bh() was signalling to others cpu we were in a softirq
> section, so we were delaying a possible kfree().

Ok, could we use normal call_rcu() to solve this then?

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH net-next-2.6] inetpeer: should use call_rcu() variant
  2011-03-13 23:42   ` David Miller
@ 2011-03-14  4:15     ` Eric Dumazet
  2011-03-14  6:22       ` David Miller
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Dumazet @ 2011-03-14  4:15 UTC (permalink / raw)
  To: David Miller; +Cc: netdev

Le dimanche 13 mars 2011 à 16:42 -0700, David Miller a écrit :
> From: Eric Dumazet <eric.dumazet@gmail.com>
> Date: Sun, 13 Mar 2011 11:04:09 +0100
> 
> > David, I am not sure this is safe, since we use call_rcu_bh() when
> > freeing one item. One cpu could decide to kfree() one item while another
> > cpu could still use it.
> > 
> > rcu_read_lock_bh() was signalling to others cpu we were in a softirq
> > section, so we were delaying a possible kfree().
> 
> Ok, could we use normal call_rcu() to solve this then?

Yes, this should be good.

Thanks

[PATCH net-next-2.6] inetpeer: should use call_rcu() variant

After commit 7b46ac4e77f3224a (inetpeer: Don't disable BH for initial
fast RCU lookup.), we should use call_rcu() to wait proper RCU grace
period.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
---
 net/ipv4/inetpeer.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 86b1d08..dd1b20e 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -399,7 +399,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
 	write_sequnlock_bh(&base->lock);
 
 	if (do_free)
-		call_rcu_bh(&p->rcu, inetpeer_free_rcu);
+		call_rcu(&p->rcu, inetpeer_free_rcu);
 	else
 		/* The node is used again.  Decrease the reference counter
 		 * back.  The loop "cleanup -> unlink_from_unused



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next-2.6] inetpeer: should use call_rcu() variant
  2011-03-14  4:15     ` [PATCH net-next-2.6] inetpeer: should use call_rcu() variant Eric Dumazet
@ 2011-03-14  6:22       ` David Miller
  0 siblings, 0 replies; 5+ messages in thread
From: David Miller @ 2011-03-14  6:22 UTC (permalink / raw)
  To: eric.dumazet; +Cc: netdev

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Mon, 14 Mar 2011 05:15:31 +0100

> [PATCH net-next-2.6] inetpeer: should use call_rcu() variant
> 
> After commit 7b46ac4e77f3224a (inetpeer: Don't disable BH for initial
> fast RCU lookup.), we should use call_rcu() to wait proper RCU grace
> period.
> 
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>

Applied, thanks a lot Eric.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2011-03-14  6:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-03-08 22:59 [PATCH] inetpeer: Don't disable BH for initial fast RCU lookup David Miller
2011-03-13 10:04 ` Eric Dumazet
2011-03-13 23:42   ` David Miller
2011-03-14  4:15     ` [PATCH net-next-2.6] inetpeer: should use call_rcu() variant Eric Dumazet
2011-03-14  6:22       ` David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).