netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jesper Dangaard Brouer <brouer@redhat.com>
To: Eric Dumazet <eric.dumazet@gmail.com>,
	"David S. Miller" <davem@davemloft.net>,
	Florian Westphal <fw@strlen.de>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>,
	netdev@vger.kernel.org, Pablo Neira Ayuso <pablo@netfilter.org>,
	Thomas Graf <tgraf@suug.ch>, Cong Wang <amwang@redhat.com>,
	"Patrick McHardy" <kaber@trash.net>,
	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
	Herbert Xu <herbert@gondor.hengli.com.au>
Subject: [net-next PATCH V2 6/9] net: frag, implement dynamic percpu alloc of frag_cpu_limit
Date: Thu, 29 Nov 2012 17:14:42 +0100	[thread overview]
Message-ID: <20121129161343.17754.96847.stgit@dragon> (raw)
In-Reply-To: <20121129161019.17754.29670.stgit@dragon>

Use the percpu API to implement dynamic per CPU allocation
of the frag_cpu_limit in struct netns_frags.  This replaces
the static array percpu[NR_CPUS].

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
Its the first time I use the percpu API, please let me know
if I'm using it correctly.

 include/net/inet_frag.h  |   39 ++++++++++++++++++++++++++-------------
 net/ipv4/inet_fragment.c |   34 +++++++++++++++++++++++-----------
 2 files changed, 49 insertions(+), 24 deletions(-)

diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 8421904..3eadf42 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -3,6 +3,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
+#include <linux/percpu.h>
 
 /* Need to maintain these resource limits per CPU, else we will kill
  * performance due to cache-line bouncing
@@ -16,7 +17,7 @@ struct frag_cpu_limit {
 struct netns_frags {
 	int			nqueues;
 
-	struct frag_cpu_limit	percpu[NR_CPUS];
+	struct frag_cpu_limit __percpu *percpu;
 
 	/* sysctls */
 	int			timeout;
@@ -92,26 +93,32 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
 static inline void inet_frag_lru_move(struct inet_frag_queue *q)
 {
 	int cpu = q->cpu_alloc;
-	spin_lock(&q->net->percpu[cpu].lru_lock);
-	list_move_tail(&q->lru_list, &q->net->percpu[cpu].lru_list);
-	spin_unlock(&q->net->percpu[cpu].lru_lock);
+	struct frag_cpu_limit *percpu = per_cpu_ptr(q->net->percpu, cpu);
+
+	spin_lock(&percpu->lru_lock);
+	list_move_tail(&q->lru_list, &percpu->lru_list);
+	spin_unlock(&percpu->lru_lock);
 }
 
 static inline void inet_frag_lru_del(struct inet_frag_queue *q)
 {
 	int cpu = q->cpu_alloc;
-	spin_lock(&q->net->percpu[cpu].lru_lock);
+	struct frag_cpu_limit *percpu = per_cpu_ptr(q->net->percpu, cpu);
+
+	spin_lock(&percpu->lru_lock);
 	list_del(&q->lru_list);
-	spin_unlock(&q->net->percpu[cpu].lru_lock);
+	spin_unlock(&percpu->lru_lock);
 }
 
 static inline void inet_frag_lru_add(struct netns_frags *nf,
 				     struct inet_frag_queue *q)
 {
 	int cpu = q->cpu_alloc;
-	spin_lock(&nf->percpu[cpu].lru_lock);
-	list_add_tail(&q->lru_list, &nf->percpu[cpu].lru_list);
-	spin_unlock(&nf->percpu[cpu].lru_lock);
+	struct frag_cpu_limit *percpu = per_cpu_ptr(nf->percpu, cpu);
+
+	spin_lock(&percpu->lru_lock);
+	list_add_tail(&q->lru_list, &percpu->lru_list);
+	spin_unlock(&percpu->lru_lock);
 }
 
 /* Memory Tracking Functions. */
@@ -119,21 +126,27 @@ static inline void inet_frag_lru_add(struct netns_frags *nf,
 static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
 {
 	int cpu = q->cpu_alloc;
-	atomic_sub(i, &q->net->percpu[cpu].mem);
+	struct frag_cpu_limit *percpu = per_cpu_ptr(q->net->percpu, cpu);
+	atomic_sub(i, &percpu->mem);
 }
 
 static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
 {
 	int cpu = q->cpu_alloc;
-	atomic_add(i, &q->net->percpu[cpu].mem);
+	struct frag_cpu_limit *percpu = per_cpu_ptr(q->net->percpu, cpu);
+	atomic_add(i, &percpu->mem);
 }
 
 static inline int sum_frag_mem_limit(struct netns_frags *nf)
 {
 	unsigned int sum = 0;
 	int cpu;
-	for_each_possible_cpu(cpu)
-		sum += atomic_read(&nf->percpu[cpu].mem);
+
+	for_each_possible_cpu(cpu) {
+		struct frag_cpu_limit *percpu = per_cpu_ptr(nf->percpu, cpu);
+
+		sum += atomic_read(&percpu->mem);
+	}
 	return sum;
 }
 
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 068aabe..0099f0c 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,7 +25,8 @@
 
 static inline int frag_mem_limit_on_cpu(struct netns_frags *nf, int on_cpu)
 {
-	return atomic_read(&nf->percpu[on_cpu].mem);
+	struct frag_cpu_limit *percpu = per_cpu_ptr(nf->percpu, on_cpu);
+	return atomic_read(&percpu->mem);
 }
 
 static inline int frag_mem_limit(struct netns_frags *nf)
@@ -81,14 +82,22 @@ void inet_frags_init(struct inet_frags *f)
 }
 EXPORT_SYMBOL(inet_frags_init);
 
-static void inet_frags_init_percpu_limit(struct netns_frags *nf)
+static int inet_frags_init_percpu_limit(struct netns_frags *nf)
 {
 	int cpu;
+
+	nf->percpu = alloc_percpu(struct frag_cpu_limit);
+	if (!nf->percpu)
+		return -ENOMEM;
+
 	for_each_possible_cpu(cpu) {
-		INIT_LIST_HEAD(&nf->percpu[cpu].lru_list);
-		spin_lock_init(&nf->percpu[cpu].lru_lock);
-		atomic_set(&nf->percpu[cpu].mem, 0);
+		struct frag_cpu_limit *percpu = per_cpu_ptr(nf->percpu, cpu);
+
+		INIT_LIST_HEAD(&percpu->lru_list);
+		spin_lock_init(&percpu->lru_lock);
+		atomic_set(&percpu->mem, 0);
 	}
+	return 1;
 }
 
 void inet_frags_init_net(struct netns_frags *nf)
@@ -113,6 +122,8 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
 	for_each_possible_cpu(cpu)
 		inet_frag_evictor(nf, f, true, cpu);
 	local_bh_enable();
+
+	free_percpu(nf->percpu);
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
 
@@ -184,6 +195,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f,
 	struct inet_frag_queue *q;
 	int work, evicted = 0;
 	int cpu = (likely(on_cpu < 0)) ? smp_processor_id() : on_cpu;
+	struct frag_cpu_limit *percpu = per_cpu_ptr(nf->percpu, cpu);
 
 	if (!force) {
 		if (frag_mem_limit_on_cpu(nf, cpu) <= nf->high_thresh)
@@ -192,14 +204,14 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f,
 
 	work = frag_mem_limit_on_cpu(nf, cpu) - nf->low_thresh;
 	while (work > 0) {
-		spin_lock(&nf->percpu[cpu].lru_lock);
+		spin_lock(&percpu->lru_lock);
 
-		if (list_empty(&nf->percpu[cpu].lru_list)) {
-			spin_unlock(&nf->percpu[cpu].lru_lock);
+		if (list_empty(&percpu->lru_list)) {
+			spin_unlock(&percpu->lru_lock);
 			break;
 		}
 
-		q = list_first_entry(&nf->percpu[cpu].lru_list,
+		q = list_first_entry(&percpu->lru_list,
 				struct inet_frag_queue, lru_list);
 
 		/* queue entry is warm, i.e. new frags are arriving
@@ -209,12 +221,12 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f,
 		 * completes.
 		 */
 		if (!force && q->creation_ts == (u32) jiffies) {
-			spin_unlock(&nf->percpu[cpu].lru_lock);
+			spin_unlock(&percpu->lru_lock);
 			break;
 		}
 
 		atomic_inc(&q->refcnt);
-		spin_unlock(&nf->percpu[cpu].lru_lock);
+		spin_unlock(&percpu->lru_lock);
 
 		spin_lock(&q->lock);
 		if (!(q->last_in & INET_FRAG_COMPLETE))

  parent reply	other threads:[~2012-11-29 16:16 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-11-29 16:10 [net-next PATCH V2 0/9] net: fragmentation performance scalability on NUMA/SMP systems Jesper Dangaard Brouer
2012-11-29 16:11 ` [net-next PATCH V2 1/9] net: frag evictor, avoid killing warm frag queues Jesper Dangaard Brouer
2012-11-29 17:44   ` David Miller
2012-11-29 22:17     ` Jesper Dangaard Brouer
2012-11-29 23:01       ` Eric Dumazet
2012-11-30 10:04         ` Jesper Dangaard Brouer
2012-11-30 14:52           ` Eric Dumazet
2012-11-30 15:45             ` Jesper Dangaard Brouer
2012-11-30 16:37               ` Eric Dumazet
2012-11-30 21:37                 ` Jesper Dangaard Brouer
2012-11-30 22:25                   ` Eric Dumazet
2012-11-30 23:23                     ` Jesper Dangaard Brouer
2012-11-30 23:47                       ` Stephen Hemminger
2012-12-01  0:03                         ` Eric Dumazet
2012-12-01  0:13                           ` Stephen Hemminger
2012-11-30 23:58                       ` Eric Dumazet
2012-12-04 13:30                         ` [net-next PATCH V3-evictor] " Jesper Dangaard Brouer
2012-12-04 14:32                           ` [net-next PATCH V3-evictor] net: frag evictor,avoid " David Laight
2012-12-04 14:47                           ` [net-next PATCH V3-evictor] net: frag evictor, avoid " Eric Dumazet
2012-12-04 17:51                             ` Jesper Dangaard Brouer
2012-12-05  9:24                           ` Jesper Dangaard Brouer
2012-12-06 12:26                             ` Jesper Dangaard Brouer
2012-12-06 12:32                               ` Florian Westphal
2012-12-06 13:29                                 ` David Laight
2012-12-06 21:38                                   ` David Miller
2012-12-06 13:55                                 ` Jesper Dangaard Brouer
2012-12-06 14:47                                   ` Eric Dumazet
2012-12-06 15:23                                     ` Jesper Dangaard Brouer
2012-11-29 23:32       ` [net-next PATCH V2 1/9] " Eric Dumazet
2012-11-30 12:01       ` Jesper Dangaard Brouer
2012-11-30 14:57         ` Eric Dumazet
2012-11-29 16:11 ` [net-next PATCH V2 2/9] net: frag cache line adjust inet_frag_queue.net Jesper Dangaard Brouer
2012-11-29 16:12 ` [net-next PATCH V2 3/9] net: frag, move LRU list maintenance outside of rwlock Jesper Dangaard Brouer
2012-11-29 17:43   ` Eric Dumazet
2012-11-29 17:48     ` David Miller
2012-11-29 17:54       ` Eric Dumazet
2012-11-29 18:05         ` David Miller
2012-11-29 18:24           ` Eric Dumazet
2012-11-29 18:31             ` David Miller
2012-11-29 18:33               ` Eric Dumazet
2012-11-29 18:36                 ` David Miller
2012-11-29 22:33         ` Jesper Dangaard Brouer
2012-11-29 16:12 ` [net-next PATCH V2 4/9] net: frag helper functions for mem limit tracking Jesper Dangaard Brouer
2012-11-29 16:13 ` [net-next PATCH V2 5/9] net: frag, per CPU resource, mem limit and LRU list accounting Jesper Dangaard Brouer
2012-11-29 17:06   ` Eric Dumazet
2012-11-29 17:31     ` David Miller
2012-12-03 14:02     ` Jesper Dangaard Brouer
2012-12-03 17:25       ` David Miller
2012-11-29 16:14 ` Jesper Dangaard Brouer [this message]
2012-11-29 16:15 ` [net-next PATCH V2 7/9] net: frag, move nqueues counter under LRU lock protection Jesper Dangaard Brouer
2012-11-29 16:15 ` [net-next PATCH V2 8/9] net: frag queue locking per hash bucket Jesper Dangaard Brouer
2012-11-29 17:08   ` Eric Dumazet
2012-11-30 12:55     ` Jesper Dangaard Brouer
2012-11-29 16:16 ` [net-next PATCH V2 9/9] net: increase frag queue hash size and cache-line Jesper Dangaard Brouer
2012-11-29 16:39   ` [net-next PATCH V2 9/9] net: increase frag queue hash size andcache-line David Laight
2012-11-29 16:55   ` [net-next PATCH V2 9/9] net: increase frag queue hash size and cache-line Eric Dumazet
2012-11-29 20:53     ` Jesper Dangaard Brouer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20121129161343.17754.96847.stgit@dragon \
    --to=brouer@redhat.com \
    --cc=amwang@redhat.com \
    --cc=davem@davemloft.net \
    --cc=eric.dumazet@gmail.com \
    --cc=fw@strlen.de \
    --cc=herbert@gondor.hengli.com.au \
    --cc=kaber@trash.net \
    --cc=netdev@vger.kernel.org \
    --cc=pablo@netfilter.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=tgraf@suug.ch \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).