netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next 1/2] sunvnet: Process Rx data packets in a BH handler
@ 2014-10-01 18:56 Sowmini Varadhan
  2014-10-01 19:09 ` Eric Dumazet
  0 siblings, 1 reply; 3+ messages in thread
From: Sowmini Varadhan @ 2014-10-01 18:56 UTC (permalink / raw)
  To: davem, raghuram.kothakota, sowmini.varadhan; +Cc: netdev


Move VIO DATA processing out of interrupt context,
and into a bottom-half handler (vnet_event_bh())

Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
Acked-by: Raghuram Kothakota <raghuram.kothakota@oracle.com>

---
 drivers/net/ethernet/sun/sunvnet.c | 126 ++++++++++++++++++++++++-------------
 drivers/net/ethernet/sun/sunvnet.h |  10 ++-
 2 files changed, 91 insertions(+), 45 deletions(-)

diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1262697..e2aacf5 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -274,6 +274,7 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
 	return skb;
 }
 
+/* reads in exactly one sk_buff */
 static int vnet_rx_one(struct vnet_port *port, unsigned int len,
 		       struct ldc_trans_cookie *cookies, int ncookies)
 {
@@ -311,9 +312,8 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
 
 	dev->stats.rx_packets++;
 	dev->stats.rx_bytes += len;
-
-	netif_rx(skb);
-
+	/* BH context cannot call netif_receive_skb */
+	netif_rx_ni(skb);
 	return 0;
 
 out_free_skb:
@@ -534,7 +534,10 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
 	struct net_device *dev;
 	struct vnet *vp;
 	u32 end;
+	unsigned long flags;
 	struct vio_net_desc *desc;
+	bool need_trigger = false;
+
 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
 		return 0;
 
@@ -545,21 +548,17 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
 	/* sync for race conditions with vnet_start_xmit() and tell xmit it
 	 * is time to send a trigger.
 	 */
+	spin_lock_irqsave(&port->vio.lock, flags);
 	dr->cons = next_idx(end, dr);
 	desc = vio_dring_entry(dr, dr->cons);
-	if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
-		/* vnet_start_xmit() just populated this dring but missed
-		 * sending the "start" LDC message to the consumer.
-		 * Send a "start" trigger on its behalf.
-		 */
-		if (__vnet_tx_trigger(port, dr->cons) > 0)
-			port->start_cons = false;
-		else
-			port->start_cons = true;
-	} else {
-		port->start_cons = true;
-	}
+	if (desc->hdr.state == VIO_DESC_READY && !port->start_cons)
+		need_trigger = true;
+	else
+		port->start_cons = true; /* vnet_start_xmit will send trigger */
+	spin_unlock_irqrestore(&port->vio.lock, flags);
 
+	if (need_trigger && __vnet_tx_trigger(port, dr->cons) <= 0)
+		port->start_cons = true;
 
 	vp = port->vp;
 	dev = vp->dev;
@@ -617,33 +616,13 @@ static void maybe_tx_wakeup(unsigned long param)
 	netif_tx_unlock(dev);
 }
 
-static void vnet_event(void *arg, int event)
+static void vnet_event_bh(struct work_struct *work)
 {
-	struct vnet_port *port = arg;
+	struct vnet_port *port = container_of(work, struct vnet_port, rx_work);
 	struct vio_driver_state *vio = &port->vio;
-	unsigned long flags;
 	int tx_wakeup, err;
 
-	spin_lock_irqsave(&vio->lock, flags);
-
-	if (unlikely(event == LDC_EVENT_RESET ||
-		     event == LDC_EVENT_UP)) {
-		vio_link_state_change(vio, event);
-		spin_unlock_irqrestore(&vio->lock, flags);
-
-		if (event == LDC_EVENT_RESET) {
-			port->rmtu = 0;
-			vio_port_up(vio);
-		}
-		return;
-	}
-
-	if (unlikely(event != LDC_EVENT_DATA_READY)) {
-		pr_warn("Unexpected LDC event %d\n", event);
-		spin_unlock_irqrestore(&vio->lock, flags);
-		return;
-	}
-
+	mutex_lock(&port->vnet_rx_mutex);
 	tx_wakeup = err = 0;
 	while (1) {
 		union {
@@ -691,14 +670,41 @@ static void vnet_event(void *arg, int event)
 		if (err == -ECONNRESET)
 			break;
 	}
-	spin_unlock(&vio->lock);
-	/* Kick off a tasklet to wake the queue.  We cannot call
-	 * maybe_tx_wakeup directly here because we could deadlock on
-	 * netif_tx_lock() with dev_watchdog()
-	 */
 	if (unlikely(tx_wakeup && err != -ECONNRESET))
 		tasklet_schedule(&port->vp->vnet_tx_wakeup);
+	mutex_unlock(&port->vnet_rx_mutex);
+	vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+}
+
+static void vnet_event(void *arg, int event)
+{
+	struct vnet_port *port = arg;
+	struct vio_driver_state *vio = &port->vio;
+	unsigned long flags;
 
+	spin_lock_irqsave(&vio->lock, flags);
+
+	if (unlikely(event == LDC_EVENT_RESET ||
+		     event == LDC_EVENT_UP)) {
+		vio_link_state_change(vio, event);
+		spin_unlock_irqrestore(&vio->lock, flags);
+
+		if (event == LDC_EVENT_RESET)
+			vio_port_up(vio);
+		return;
+	}
+
+	if (unlikely(event != LDC_EVENT_DATA_READY)) {
+		pr_warn("Unexpected LDC event %d\n", event);
+		spin_unlock_irqrestore(&vio->lock, flags);
+		return;
+	}
+
+	if ((port->flags & VNET_PORT_DEAD) == 0) {
+		vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
+		queue_work(port->rx_workq, &port->rx_work);
+	}
+	spin_unlock(&vio->lock);
 	local_irq_restore(flags);
 }
 
@@ -750,6 +756,11 @@ static inline bool port_is_up(struct vnet_port *vnet)
 {
 	struct vio_driver_state *vio = &vnet->vio;
 
+	/* Should never hit a DEAD port here: we are holding the vnet lock,
+	 * and the list cleanup and VNET_PORT_DEAD marking gets done
+	 * under the vnet lock as well.
+	 */
+	BUG_ON(vnet->flags & VNET_PORT_DEAD);
 	return !!(vio->hs_state & VIO_HS_COMPLETE);
 }
 
@@ -1487,6 +1498,23 @@ static void print_version(void)
 	printk_once(KERN_INFO "%s", version);
 }
 
+static int vnet_workq_enable(struct vnet_port *port)
+{
+	port->rx_workq = alloc_workqueue(dev_name(&port->vio.vdev->dev),
+					 WQ_HIGHPRI|WQ_UNBOUND, 1);
+	if (!port->rx_workq)
+		return -ENOMEM;
+	mutex_init(&port->vnet_rx_mutex);
+	INIT_WORK(&port->rx_work, vnet_event_bh);
+	return 0;
+}
+
+static void vnet_workq_disable(struct vnet_port *port)
+{
+	flush_workqueue(port->rx_workq);
+	destroy_workqueue(port->rx_workq);
+}
+
 const char *remote_macaddr_prop = "remote-mac-address";
 
 static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -1536,6 +1564,10 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 	if (err)
 		goto err_out_free_port;
 
+	err = vnet_workq_enable(port);
+	if (err)
+		goto err_out_free_port;
+
 	err = vnet_port_alloc_tx_bufs(port);
 	if (err)
 		goto err_out_free_ldc;
@@ -1572,6 +1604,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 
 err_out_free_ldc:
 	vio_ldc_free(&port->vio);
+	destroy_workqueue(port->rx_workq);
 
 err_out_free_port:
 	kfree(port);
@@ -1589,14 +1622,21 @@ static int vnet_port_remove(struct vio_dev *vdev)
 		struct vnet *vp = port->vp;
 		unsigned long flags;
 
+		vio_set_intr(port->vio.vdev->rx_ino, HV_INTR_DISABLED);
 		del_timer_sync(&port->vio.timer);
 		del_timer_sync(&port->clean_timer);
 
+		/* VNET_PORT_DEAD disallows any more vnet_event_bh
+		 * scheduling and prevents new refs to the port
+		 */
 		spin_lock_irqsave(&vp->lock, flags);
+		port->flags |= VNET_PORT_DEAD;
 		list_del(&port->list);
 		hlist_del(&port->hash);
 		spin_unlock_irqrestore(&vp->lock, flags);
 
+		vnet_workq_disable(port);
+
 		vnet_port_free_tx_bufs(port);
 		vio_ldc_free(&port->vio);
 
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index c911045..1182ec6 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -2,7 +2,7 @@
 #define _SUNVNET_H
 
 #include <linux/interrupt.h>
-
+#include <linux/workqueue.h>
 #define DESC_NCOOKIES(entry_size)	\
 	((entry_size) - sizeof(struct vio_net_desc))
 
@@ -41,7 +41,8 @@ struct vnet_port {
 	struct hlist_node	hash;
 	u8			raddr[ETH_ALEN];
 	u8			switch_port;
-	u8			__pad;
+	u8			flags;
+#define	VNET_PORT_DEAD	0x01
 
 	struct vnet		*vp;
 
@@ -56,6 +57,11 @@ struct vnet_port {
 	struct timer_list	clean_timer;
 
 	u64			rmtu;
+
+	struct mutex            vnet_rx_mutex; /* serializes rx_workq */
+	struct work_struct      rx_work;
+	struct workqueue_struct *rx_workq;
+
 };
 
 static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
-- 
1.8.4.2

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH net-next 1/2] sunvnet: Process Rx data packets in a BH handler
  2014-10-01 18:56 [PATCH net-next 1/2] sunvnet: Process Rx data packets in a BH handler Sowmini Varadhan
@ 2014-10-01 19:09 ` Eric Dumazet
  2014-10-01 19:39   ` Sowmini Varadhan
  0 siblings, 1 reply; 3+ messages in thread
From: Eric Dumazet @ 2014-10-01 19:09 UTC (permalink / raw)
  To: Sowmini Varadhan; +Cc: davem, raghuram.kothakota, netdev

On Wed, 2014-10-01 at 14:56 -0400, Sowmini Varadhan wrote:
> Move VIO DATA processing out of interrupt context,
> and into a bottom-half handler (vnet_event_bh())
> 
> Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
> Acked-by: Raghuram Kothakota <raghuram.kothakota@oracle.com>
> 
> ---
>  drivers/net/ethernet/sun/sunvnet.c | 126 ++++++++++++++++++++++++-------------
>  drivers/net/ethernet/sun/sunvnet.h |  10 ++-
>  2 files changed, 91 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
> index 1262697..e2aacf5 100644
> --- a/drivers/net/ethernet/sun/sunvnet.c
> +++ b/drivers/net/ethernet/sun/sunvnet.c
> @@ -274,6 +274,7 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
>  	return skb;
>  }
>  
> +/* reads in exactly one sk_buff */
>  static int vnet_rx_one(struct vnet_port *port, unsigned int len,
>  		       struct ldc_trans_cookie *cookies, int ncookies)
>  {
> @@ -311,9 +312,8 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
>  
>  	dev->stats.rx_packets++;
>  	dev->stats.rx_bytes += len;
> -
> -	netif_rx(skb);
> -
> +	/* BH context cannot call netif_receive_skb */
> +	netif_rx_ni(skb);

Really ? What about the standard and less expensive netif_receive_skb ?



>  	u64			rmtu;
> +
> +	struct mutex            vnet_rx_mutex; /* serializes rx_workq */
> +	struct work_struct      rx_work;
> +	struct workqueue_struct *rx_workq;
> +
>  };
>  

Could you describe in the changelog why all this is needed ?

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH net-next 1/2] sunvnet: Process Rx data packets in a BH handler
  2014-10-01 19:09 ` Eric Dumazet
@ 2014-10-01 19:39   ` Sowmini Varadhan
  0 siblings, 0 replies; 3+ messages in thread
From: Sowmini Varadhan @ 2014-10-01 19:39 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: davem, raghuram.kothakota, netdev

On (10/01/14 12:09), Eric Dumazet wrote:
> > -
> > +	/* BH context cannot call netif_receive_skb */
> > +	netif_rx_ni(skb);
> 
> Really ? What about the standard and less expensive netif_receive_skb ?

I can't use netif_receive_skb in this case:
the TCP retransmit timers are softirq context. They can pre-empt here, 
and result in a deadlock on socket locks. E.g.,

tcp_write_timer+0xc/0xa0 <-- wants sk_lock
call_timer_fn+0x24/0x120
run_timer_softirq+0x214/0x2a0
__do_softirq+0xb8/0x200
do_softirq+0x8c/0xc0
local_bh_enable+0xac/0xc0
ip_finish_output+0x254/0x4a0
ip_output+0xc4/0xe0
ip_local_out+0x2c/0x40
ip_queue_xmit+0x140/0x3c0
tcp_transmit_skb+0x448/0x740
tcp_write_xmit+0x220/0x480
__tcp_push_pending_frames+0x38/0x100
tcp_rcv_established+0x214/0x780
tcp_v4_do_rcv+0x154/0x300
tcp_v4_rcv+0x6cc/0xa60   <-- takes sk_lock
  :
netif_receive_skb
 

Ideally I would have liked to  use netif_receive_skb (it boosts perf)
but I had to back off for this reason.

> > +
> > +	struct mutex            vnet_rx_mutex; /* serializes rx_workq */
> > +	struct work_struct      rx_work;
> > +	struct workqueue_struct *rx_workq;
> > +
> >  };
> 
> Could you describe in the changelog why all this is needed ?

So I gave a short summary in the cover letter, but more details

- processing packets in ldc_rx context risks live-lock
- I experimented with a few things, including NAPI, and just using a simple tasklet
  to take care of the data packet handling. With Both NAPI and tasklet, I'm able
  to use netif_receive_skb safely, however, mpstat shows that one CPU ends up
  doing all the processing, and scaling was inhibited.
- further, with  NAPI the budget gets in the way. 

Regarding your other comments"
  "You basically found a way to overcome NAPI standard limits (budget of 64)"
As I said in the cover letter, coercing a budget on sunvnet ends up actually
hurting perf significantly, as we end up sending additional stop/start messages.
To achieve that budget, we'd have to keep a lot more state in vnet to remember
the position in the stream but *not* send a STOP/START, and instead resume
at the next napi_schedule from where we left off.

Doing all this would end up just re-inventing much of the code in process_backlog
anyway.

--Sowmini

 

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2014-10-01 19:39 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-10-01 18:56 [PATCH net-next 1/2] sunvnet: Process Rx data packets in a BH handler Sowmini Varadhan
2014-10-01 19:09 ` Eric Dumazet
2014-10-01 19:39   ` Sowmini Varadhan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).