public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Peter Zijlstra <peterz@infradead.org>,
	"David S . Miller" <davem@davemloft.net>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Frederic Weisbecker <frederic@kernel.org>,
	"Paul E . McKenney" <paulmck@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@kernel.org>,
	Mauro Carvalho Chehab <mchehab@s-opensource.com>
Subject: [RFC PATCH 22/30] seqlock: Prepare write_seq[un]lock_bh() for handling softirq mask
Date: Thu, 11 Oct 2018 01:12:09 +0200	[thread overview]
Message-ID: <1539213137-13953-23-git-send-email-frederic@kernel.org> (raw)
In-Reply-To: <1539213137-13953-1-git-send-email-frederic@kernel.org>

From: Frederic Weisbecker <fweisbec@gmail.com>

This pair of function is implemented on top of spin_[un]lock_bh() that
is going to handle a softirq mask in order to apply finegrained vector
disablement. The lock function is going to return the previous vectors
enabled mask prior to the last call to local_bh_disable(), following a
similar model to that of local_irq_save/restore. Subsequent calls to
local_bh_disable() and friends can then stack up:

	bh = local_bh_disable(vec_mask);
		bh2 = write_seqlock_bh(...) {
			return spin_lock_bh(...);
		}
		...
		write_sequnlock_bh(..., bh2) {
			spin_unlock_bh(..., bh2);
		}
	local_bh_enable(bh);

To prepare for that, make write_seqlock_bh() able to return a saved
vector enabled mask and pass it back to write_sequnlock_bh(). Then plug
the whole with spin_[un]lock_bh().

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Mauro Carvalho Chehab <mchehab@s-opensource.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 include/linux/seqlock.h    | 21 +++++++++++++--------
 net/core/neighbour.c       |  5 +++--
 net/ipv4/inetpeer.c        |  5 +++--
 net/ipv4/sysctl_net_ipv4.c |  5 +++--
 net/ipv4/tcp_metrics.c     |  5 +++--
 net/rxrpc/conn_service.c   |  4 ++--
 6 files changed, 27 insertions(+), 18 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index c22e19c..720e6e0 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -455,16 +455,19 @@ static inline void write_sequnlock(seqlock_t *sl)
 	spin_unlock(&sl->lock);
 }
 
-static inline void write_seqlock_bh(seqlock_t *sl)
+static inline unsigned int write_seqlock_bh(seqlock_t *sl, unsigned int mask)
 {
-	spin_lock_bh(&sl->lock, SOFTIRQ_ALL_MASK);
+	unsigned int bh;
+	bh = spin_lock_bh(&sl->lock, mask);
 	write_seqcount_begin(&sl->seqcount);
+	return bh;
 }
 
-static inline void write_sequnlock_bh(seqlock_t *sl)
+static inline void write_sequnlock_bh(seqlock_t *sl,
+				      unsigned int bh)
 {
 	write_seqcount_end(&sl->seqcount);
-	spin_unlock_bh(&sl->lock, 0);
+	spin_unlock_bh(&sl->lock, bh);
 }
 
 static inline void write_seqlock_irq(seqlock_t *sl)
@@ -542,14 +545,16 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
 		read_sequnlock_excl(lock);
 }
 
-static inline void read_seqlock_excl_bh(seqlock_t *sl)
+static inline unsigned int read_seqlock_excl_bh(seqlock_t *sl,
+						unsigned int mask)
 {
-	spin_lock_bh(&sl->lock, SOFTIRQ_ALL_MASK);
+	return spin_lock_bh(&sl->lock, mask);
 }
 
-static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+static inline void read_sequnlock_excl_bh(seqlock_t *sl,
+					  unsigned int bh)
 {
-	spin_unlock_bh(&sl->lock, 0);
+	spin_unlock_bh(&sl->lock, bh);
 }
 
 static inline void read_seqlock_excl_irq(seqlock_t *sl)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ec55470..733449e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1083,6 +1083,7 @@ EXPORT_SYMBOL(__neigh_event_send);
 static void neigh_update_hhs(struct neighbour *neigh)
 {
 	struct hh_cache *hh;
+	unsigned int bh;
 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
 		= NULL;
 
@@ -1092,9 +1093,9 @@ static void neigh_update_hhs(struct neighbour *neigh)
 	if (update) {
 		hh = &neigh->hh;
 		if (hh->hh_len) {
-			write_seqlock_bh(&hh->hh_lock);
+			bh = write_seqlock_bh(&hh->hh_lock, SOFTIRQ_ALL_MASK);
 			update(hh, neigh->dev, neigh->ha);
-			write_sequnlock_bh(&hh->hh_lock);
+			write_sequnlock_bh(&hh->hh_lock, bh);
 		}
 	}
 }
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d757b96..224d30e 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -182,6 +182,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
 	struct rb_node **pp, *parent;
 	unsigned int gc_cnt, seq;
 	int invalidated;
+	unsigned int bh;
 
 	/* Attempt a lockless lookup first.
 	 * Because of a concurrent writer, we might not find an existing entry.
@@ -203,7 +204,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
 	 * At least, nodes should be hot in our cache.
 	 */
 	parent = NULL;
-	write_seqlock_bh(&base->lock);
+	bh = write_seqlock_bh(&base->lock, SOFTIRQ_ALL_MASK);
 
 	gc_cnt = 0;
 	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
@@ -228,7 +229,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
 	}
 	if (gc_cnt)
 		inet_peer_gc(base, gc_stack, gc_cnt);
-	write_sequnlock_bh(&base->lock);
+	write_sequnlock_bh(&base->lock, bh);
 
 	return p;
 }
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b92f422..b6d1d52 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -56,15 +56,16 @@ static int sysctl_tcp_low_latency __read_mostly;
 static void set_local_port_range(struct net *net, int range[2])
 {
 	bool same_parity = !((range[0] ^ range[1]) & 1);
+	unsigned int bh;
 
-	write_seqlock_bh(&net->ipv4.ip_local_ports.lock);
+	bh = write_seqlock_bh(&net->ipv4.ip_local_ports.lock, SOFTIRQ_ALL_MASK);
 	if (same_parity && !net->ipv4.ip_local_ports.warned) {
 		net->ipv4.ip_local_ports.warned = true;
 		pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
 	}
 	net->ipv4.ip_local_ports.range[0] = range[0];
 	net->ipv4.ip_local_ports.range[1] = range[1];
-	write_sequnlock_bh(&net->ipv4.ip_local_ports.lock);
+	write_sequnlock_bh(&net->ipv4.ip_local_ports.lock, bh);
 }
 
 /* Validate changes from /proc interface. */
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index fd6ba88..c65d499 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -574,6 +574,7 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 			    u16 try_exp)
 {
 	struct dst_entry *dst = __sk_dst_get(sk);
+	unsigned int bh;
 	struct tcp_metrics_block *tm;
 
 	if (!dst)
@@ -583,7 +584,7 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 	if (tm) {
 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
 
-		write_seqlock_bh(&fastopen_seqlock);
+		bh = write_seqlock_bh(&fastopen_seqlock, SOFTIRQ_ALL_MASK);
 		if (mss)
 			tfom->mss = mss;
 		if (cookie && cookie->len > 0)
@@ -596,7 +597,7 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 			tfom->last_syn_loss = jiffies;
 		} else
 			tfom->syn_loss = 0;
-		write_sequnlock_bh(&fastopen_seqlock);
+		write_sequnlock_bh(&fastopen_seqlock, bh);
 	}
 	rcu_read_unlock();
 }
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index 80773a5..e253cd9 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -71,7 +71,7 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
 	struct rxrpc_conn_proto k = conn->proto;
 	struct rb_node **pp, *parent;
 
-	write_seqlock_bh(&peer->service_conn_lock);
+	write_seqlock_bh(&peer->service_conn_lock, SOFTIRQ_ALL_MASK);
 
 	pp = &peer->service_conns.rb_node;
 	parent = NULL;
@@ -191,7 +191,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
 {
 	struct rxrpc_peer *peer = conn->params.peer;
 
-	write_seqlock_bh(&peer->service_conn_lock);
+	write_seqlock_bh(&peer->service_conn_lock, SOFTIRQ_ALL_MASK);
 	if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
 		rb_erase(&conn->service_node, &peer->service_conns);
 	write_sequnlock_bh(&peer->service_conn_lock);
-- 
2.7.4


  parent reply	other threads:[~2018-10-10 23:14 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-10 23:11 [RFC PATCH 00/30] softirq: Make softirqs soft-interruptible (+ per vector disablement) Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 01/30] x86: Revert "x86/irq: Demote irq_cpustat_t::__softirq_pending to u16" Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 02/30] arch/softirq: Rename softirq_pending fields to softirq_data Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 03/30] softirq: Implement local_softirq_pending() below softirq vector definition Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 04/30] softirq: Normalize softirq_pending naming scheme Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 05/30] softirq: Convert softirq_pending_set() to softirq_pending_nand() Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 06/30] softirq: Introduce disabled softirq vectors bits Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 07/30] softirq: Rename _local_bh_enable() to local_bh_enable_no_softirq() Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 08/30] softirq: Move vectors bits to bottom_half.h Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 09/30] x86: Init softirq enabled field Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 10/30] softirq: Check enabled bits on the softirq loop Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 11/30] net: Prepare netif_tx_lock_bh/netif_tx_unlock_bh() for handling softirq mask Frederic Weisbecker
2018-10-10 23:11 ` [RFC PATCH 12/30] rcu: Prepare rcu_read_[un]lock_bh() " Frederic Weisbecker
2018-10-16  5:28   ` Joel Fernandes
2018-10-17  0:44     ` Frederic Weisbecker
2018-10-17  0:55       ` Joel Fernandes
2018-10-10 23:12 ` [RFC PATCH 13/30] net: Prepare tcp_get_md5sig_pool() " Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 14/30] softirq: Introduce local_bh_disable_all() Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 15/30] net: Prepare [un]lock_sock_fast() for handling softirq mask Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 16/30] net: Prepare nf_log_buf_open() " Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 17/30] isdn: Prepare isdn_net_get_locked_lp() " Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 18/30] softirq: Prepare local_bh_disable() " Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 19/30] diva: Prepare diva_os_enter_spin_lock() " Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 20/30] tg3: Prepare tg3_full_[un]lock() " Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 21/30] locking: Prepare spin_lock_bh() " Frederic Weisbecker
2018-10-10 23:12 ` Frederic Weisbecker [this message]
2018-10-10 23:12 ` [RFC PATCH 23/30] rwlock: Prepare write_[un]lock_bh() " Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 24/30] softirq: Introduce Local_bh_enter/exit() Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 25/30] softirq: Push down softirq mask to __local_bh_disable_ip() Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 26/30] softirq: Increment the softirq offset on top of enabled bits Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 27/30] softirq: Swap softirq serving VS disable on preempt mask layout Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 28/30] softirq: Disable vector on execution Frederic Weisbecker
2018-10-10 23:12 ` [RFC PATCH 29/30] softirq: Make softirq processing softinterruptible Frederic Weisbecker
2018-10-16  4:15   ` Pavan Kondeti
2018-10-17  0:26     ` Frederic Weisbecker
2018-10-22  8:12       ` Pavan Kondeti
2018-10-10 23:12 ` [RFC PATCH 30/30] softirq: Tasklet/net-rx fixup Frederic Weisbecker
2018-10-16 22:03 ` [RFC PATCH 00/30] softirq: Make softirqs soft-interruptible (+ per vector disablement) Jonathan Corbet
2018-10-16 23:37   ` Richard Cochran
2018-10-17  1:20   ` Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1539213137-13953-23-git-send-email-frederic@kernel.org \
    --to=frederic@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=davem@davemloft.net \
    --cc=fweisbec@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mchehab@s-opensource.com \
    --cc=mingo@kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox