netfilter-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pablo Neira Ayuso <pablo@netfilter.org>
To: netfilter-devel@vger.kernel.org
Cc: davem@davemloft.net, netdev@vger.kernel.org
Subject: [PATCH 30/51] ipvs: convert sched_lock to spin lock
Date: Sat,  6 Apr 2013 14:17:29 +0200	[thread overview]
Message-ID: <1365250670-14993-31-git-send-email-pablo@netfilter.org> (raw)
In-Reply-To: <1365250670-14993-1-git-send-email-pablo@netfilter.org>

From: Julian Anastasov <ja@ssi.bg>

As all read_locks are gone spin lock is preferred.

Signed-off-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>
---
 include/net/ip_vs.h              |    2 +-
 net/netfilter/ipvs/ip_vs_ctl.c   |    2 +-
 net/netfilter/ipvs/ip_vs_lblc.c  |   18 +++++++++---------
 net/netfilter/ipvs/ip_vs_lblcr.c |   26 +++++++++++++-------------
 net/netfilter/ipvs/ip_vs_rr.c    |   10 +++++-----
 net/netfilter/ipvs/ip_vs_wrr.c   |    8 ++++----
 6 files changed, 33 insertions(+), 33 deletions(-)

diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4990de6..4a7bc63 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -734,7 +734,7 @@ struct ip_vs_service {
 
 	/* for scheduling */
 	struct ip_vs_scheduler	*scheduler;    /* bound scheduler object */
-	rwlock_t		sched_lock;    /* lock sched_data */
+	spinlock_t		sched_lock;    /* lock sched_data */
 	void			*sched_data;   /* scheduler application data */
 
 	/* alternate persistence engine */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index d022726..2bfd807 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1219,7 +1219,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
 	svc->net = net;
 
 	INIT_LIST_HEAD(&svc->destinations);
-	rwlock_init(&svc->sched_lock);
+	spin_lock_init(&svc->sched_lock);
 	spin_lock_init(&svc->stats.lock);
 
 	/* Bind the scheduler */
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index c7ff978..ffef8a1 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -194,7 +194,7 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
 
 /*
  * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
- * address to a server. Called under write lock.
+ * address to a server. Called under spin lock.
  */
 static inline struct ip_vs_lblc_entry *
 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
@@ -242,7 +242,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
 	struct hlist_node *next;
 	int i;
 
-	write_lock_bh(&svc->sched_lock);
+	spin_lock_bh(&svc->sched_lock);
 	tbl->dead = 1;
 	for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
 		hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
@@ -250,7 +250,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
 			atomic_dec(&tbl->entries);
 		}
 	}
-	write_unlock_bh(&svc->sched_lock);
+	spin_unlock_bh(&svc->sched_lock);
 }
 
 static int sysctl_lblc_expiration(struct ip_vs_service *svc)
@@ -274,7 +274,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
 	for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
-		write_lock(&svc->sched_lock);
+		spin_lock(&svc->sched_lock);
 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
 			if (time_before(now,
 					en->lastuse +
@@ -284,7 +284,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
 			ip_vs_lblc_free(en);
 			atomic_dec(&tbl->entries);
 		}
-		write_unlock(&svc->sched_lock);
+		spin_unlock(&svc->sched_lock);
 	}
 	tbl->rover = j;
 }
@@ -330,7 +330,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
 	for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
-		write_lock(&svc->sched_lock);
+		spin_lock(&svc->sched_lock);
 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
 			if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
 				continue;
@@ -339,7 +339,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
 			atomic_dec(&tbl->entries);
 			goal--;
 		}
-		write_unlock(&svc->sched_lock);
+		spin_unlock(&svc->sched_lock);
 		if (goal <= 0)
 			break;
 	}
@@ -527,10 +527,10 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	}
 
 	/* If we fail to create a cache entry, we'll just use the valid dest */
-	write_lock(&svc->sched_lock);
+	spin_lock(&svc->sched_lock);
 	if (!tbl->dead)
 		ip_vs_lblc_new(tbl, &iph.daddr, dest);
-	write_unlock(&svc->sched_lock);
+	spin_unlock(&svc->sched_lock);
 
 out:
 	IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 6049b85..cdfe6a9 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -368,7 +368,7 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
 
 /*
  * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
- * IP address to a server. Called under write lock.
+ * IP address to a server. Called under spin lock.
  */
 static inline struct ip_vs_lblcr_entry *
 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
@@ -412,14 +412,14 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
 	struct ip_vs_lblcr_entry *en;
 	struct hlist_node *next;
 
-	write_lock_bh(&svc->sched_lock);
+	spin_lock_bh(&svc->sched_lock);
 	tbl->dead = 1;
 	for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
 		hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
 			ip_vs_lblcr_free(en);
 		}
 	}
-	write_unlock_bh(&svc->sched_lock);
+	spin_unlock_bh(&svc->sched_lock);
 }
 
 static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
@@ -443,7 +443,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
 	for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
-		write_lock(&svc->sched_lock);
+		spin_lock(&svc->sched_lock);
 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
 			if (time_after(en->lastuse +
 				       sysctl_lblcr_expiration(svc), now))
@@ -452,7 +452,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
 			ip_vs_lblcr_free(en);
 			atomic_dec(&tbl->entries);
 		}
-		write_unlock(&svc->sched_lock);
+		spin_unlock(&svc->sched_lock);
 	}
 	tbl->rover = j;
 }
@@ -498,7 +498,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
 	for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
-		write_lock(&svc->sched_lock);
+		spin_lock(&svc->sched_lock);
 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
 			if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
 				continue;
@@ -507,7 +507,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
 			atomic_dec(&tbl->entries);
 			goal--;
 		}
-		write_unlock(&svc->sched_lock);
+		spin_unlock(&svc->sched_lock);
 		if (goal <= 0)
 			break;
 	}
@@ -678,7 +678,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 		if (atomic_read(&en->set.size) > 1 &&
 		    time_after(jiffies, en->set.lastmod +
 				sysctl_lblcr_expiration(svc))) {
-			write_lock(&svc->sched_lock);
+			spin_lock(&svc->sched_lock);
 			if (atomic_read(&en->set.size) > 1) {
 				struct ip_vs_dest *m;
 
@@ -686,7 +686,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 				if (m)
 					ip_vs_dest_set_erase(&en->set, m);
 			}
-			write_unlock(&svc->sched_lock);
+			spin_unlock(&svc->sched_lock);
 		}
 
 		/* If the destination is not overloaded, use it */
@@ -701,10 +701,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 		}
 
 		/* Update our cache entry */
-		write_lock(&svc->sched_lock);
+		spin_lock(&svc->sched_lock);
 		if (!tbl->dead)
 			ip_vs_dest_set_insert(&en->set, dest, true);
-		write_unlock(&svc->sched_lock);
+		spin_unlock(&svc->sched_lock);
 		goto out;
 	}
 
@@ -716,10 +716,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	}
 
 	/* If we fail to create a cache entry, we'll just use the valid dest */
-	write_lock(&svc->sched_lock);
+	spin_lock(&svc->sched_lock);
 	if (!tbl->dead)
 		ip_vs_lblcr_new(tbl, &iph.daddr, dest);
-	write_unlock(&svc->sched_lock);
+	spin_unlock(&svc->sched_lock);
 
 out:
 	IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 3942890..aa4601f 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -39,14 +39,14 @@ static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest)
 {
 	struct list_head *p;
 
-	write_lock_bh(&svc->sched_lock);
+	spin_lock_bh(&svc->sched_lock);
 	p = (struct list_head *) svc->sched_data;
 	/* dest is already unlinked, so p->prev is not valid but
 	 * p->next is valid, use it to reach previous entry.
 	 */
 	if (p == &dest->n_list)
 		svc->sched_data = p->next->prev;
-	write_unlock_bh(&svc->sched_lock);
+	spin_unlock_bh(&svc->sched_lock);
 	return 0;
 }
 
@@ -63,7 +63,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 
 	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
-	write_lock(&svc->sched_lock);
+	spin_lock(&svc->sched_lock);
 	p = (struct list_head *) svc->sched_data;
 	last = dest = list_entry(p, struct ip_vs_dest, n_list);
 
@@ -85,13 +85,13 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	} while (pass < 2 && p != &svc->destinations);
 
 stop:
-	write_unlock(&svc->sched_lock);
+	spin_unlock(&svc->sched_lock);
 	ip_vs_scheduler_err(svc, "no destination available");
 	return NULL;
 
   out:
 	svc->sched_data = &dest->n_list;
-	write_unlock(&svc->sched_lock);
+	spin_unlock(&svc->sched_lock);
 	IP_VS_DBG_BUF(6, "RR: server %s:%u "
 		      "activeconns %d refcnt %d weight %d\n",
 		      IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index a74fd9b..b173ef9 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -145,7 +145,7 @@ static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc,
 {
 	struct ip_vs_wrr_mark *mark = svc->sched_data;
 
-	write_lock_bh(&svc->sched_lock);
+	spin_lock_bh(&svc->sched_lock);
 	mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
 	mark->di = ip_vs_wrr_gcd_weight(svc);
 	mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
@@ -153,7 +153,7 @@ static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc,
 		mark->cw = mark->mw;
 	else if (mark->di > 1)
 		mark->cw = (mark->cw / mark->di) * mark->di + 1;
-	write_unlock_bh(&svc->sched_lock);
+	spin_unlock_bh(&svc->sched_lock);
 	return 0;
 }
 
@@ -170,7 +170,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 
 	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
-	write_lock(&svc->sched_lock);
+	spin_lock(&svc->sched_lock);
 	dest = mark->cl;
 	/* No available dests? */
 	if (mark->mw == 0)
@@ -222,7 +222,7 @@ found:
 	mark->cl = dest;
 
   out:
-	write_unlock(&svc->sched_lock);
+	spin_unlock(&svc->sched_lock);
 	return dest;
 
 err_noavail:
-- 
1.7.10.4

  parent reply	other threads:[~2013-04-06 12:17 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-04-06 12:16 [PATCH 00/51] netfilter updates for net-next Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 01/51] ipvs: avoid routing by TOS for real server Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 02/51] ipvs: prefer NETDEV_DOWN event to free cached dsts Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 03/51] ipvs: convert the IP_VS_XMIT macros to functions Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 04/51] ipvs: rename functions related to dst_cache reset Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 05/51] ipvs: no need to reroute anymore on DNAT over loopback Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 06/51] ipvs: do not use skb_share_check Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 07/51] ipvs: consolidate all dst checks on transmit in one place Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 08/51] ipvs: optimize dst usage for real server Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 09/51] ipvs: convert app locks Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 10/51] ipvs: remove rs_lock by using RCU Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 11/51] ipvs: convert locks used in persistence engines Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 12/51] ipvs: convert connection locking Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 13/51] ipvs: reorder keys in connection structure Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 14/51] ipvs: avoid kmem_cache_zalloc in ip_vs_conn_new Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 15/51] ipvs: change ip_vs_sched_lock to mutex Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 16/51] ipvs: preparations for using rcu in schedulers Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 17/51] ipvs: add ip_vs_dest_hold and ip_vs_dest_put Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 18/51] ipvs: convert dh scheduler to rcu Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 19/51] ipvs: convert lblc " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 20/51] ipvs: convert lblcr " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 21/51] ipvs: convert lc " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 22/51] ipvs: convert nq " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 23/51] ipvs: convert rr " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 24/51] ipvs: convert sed " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 25/51] ipvs: convert sh " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 26/51] ipvs: convert wlc " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 27/51] ipvs: convert wrr " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 28/51] ipvs: reorganize dest trash Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 29/51] ipvs: do not expect result from done_service Pablo Neira Ayuso
2013-04-06 12:17 ` Pablo Neira Ayuso [this message]
2013-04-06 12:17 ` [PATCH 31/51] ipvs: convert dests to rcu Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 32/51] ipvs: convert services " Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 33/51] ipvs: do not disable bh for long time Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 34/51] netfilter: use IS_ENABLE to replace if defined in TRACE target Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 35/51] netfilter: xt_NFQUEUE: introduce CPU fanout Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 36/51] netfilter: xt_NFQUEUE: coalesce IPv4 and IPv6 hashing Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 37/51] netfilter: fix struct ip6t_frag field description Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 38/51] netfilter: make /proc/net/netfilter pernet Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 39/51] netfilter: nf_log: prepare net namespace support for loggers Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 40/51] netfilter: ebt_log: add net namespace support for ebt_log Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 41/51] netfilter: xt_LOG: add net namespace support for xt_LOG Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 42/51] netfilter: ebt_ulog: add net namespace support for ebt_ulog Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 43/51] netfilter: ipt_ULOG: add net namespace support for ipt_ULOG Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 44/51] netfilter: nfnetlink_log: add net namespace support for nfnetlink_log Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 45/51] netfilter: enable per netns support for nf_loggers Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 46/51] netfilter: nfnetlink_queue: add net namespace support for nfnetlink_queue Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 47/51] netfilter: remove unneeded variable proc_net_netfilter Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 48/51] netfilter: implement RFC3168 5.3 (ecn protection) for ipv6 fragmentation handling Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 49/51] netfilter: ipv4: propagate routing errors from ip_route_me_harder() Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 50/51] netfilter: ipv6: propagate routing errors from ip6_route_me_harder() Pablo Neira Ayuso
2013-04-06 12:17 ` [PATCH 51/51] netfilter: nat: propagate errors from xfrm_me_harder() Pablo Neira Ayuso
2013-04-06 13:14 ` [PATCH 00/51] netfilter updates for net-next Julian Anastasov
2013-04-06 13:52   ` Pablo Neira Ayuso
2013-04-07 16:27 ` David Miller
2013-04-08 16:06   ` Pablo Neira Ayuso

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1365250670-14993-31-git-send-email-pablo@netfilter.org \
    --to=pablo@netfilter.org \
    --cc=davem@davemloft.net \
    --cc=netdev@vger.kernel.org \
    --cc=netfilter-devel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).