linux-can.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC V2 PATCH 1/2] can: rx-fifo: Increase MB size limit from 32 to 64
@ 2014-10-09 10:18 David Jander
  2014-10-09 10:18 ` [RFC V2 PATCH 2/2] can: rx-fifo: Change to do controller off-load in interrupt and NAPI poll David Jander
  0 siblings, 1 reply; 3+ messages in thread
From: David Jander @ 2014-10-09 10:18 UTC (permalink / raw)
  To: Marc Kleine-Budde
  Cc: Wolfgang Grandegger, linux-can, Alexander Stein, David Jander

Signed-off-by: David Jander <david@protonic.nl>
---

Changed since V1:
 - Bug-fix: Change "~0U" to "~0LLU".

 drivers/net/can/dev.c   | 24 ++++++++++++------------
 include/linux/can/dev.h | 12 ++++++------
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1e53e9..930b9f4 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -289,20 +289,20 @@ static unsigned int can_rx_fifo_inc(struct can_rx_fifo *fifo, unsigned int *val)
 		return (*val)--;
 }
 
-static u32 can_rx_fifo_mask_low(struct can_rx_fifo *fifo)
+static u64 can_rx_fifo_mask_low(struct can_rx_fifo *fifo)
 {
 	if (fifo->inc)
-		return ~0U >> (32 + fifo->low_first - fifo->high_first) << fifo->low_first;
+		return ~0LLU >> (64 + fifo->low_first - fifo->high_first) << fifo->low_first;
 	else
-		return ~0U >> (32 - fifo->low_first + fifo->high_first) << (fifo->high_first + 1);
+		return ~0LLU >> (64 - fifo->low_first + fifo->high_first) << (fifo->high_first + 1);
 }
 
-static u32 can_rx_fifo_mask_high(struct can_rx_fifo *fifo)
+static u64 can_rx_fifo_mask_high(struct can_rx_fifo *fifo)
 {
 	if (fifo->inc)
-		return ~0U >> (32 + fifo->high_first - fifo->high_last - 1) << fifo->high_first;
+		return ~0LLU >> (64 + fifo->high_first - fifo->high_last - 1) << fifo->high_first;
 	else
-		return ~0U >> (32 - fifo->high_first + fifo->high_last - 1) << fifo->high_last;
+		return ~0LLU >> (64 - fifo->high_first + fifo->high_last - 1) << fifo->high_last;
 }
 
 int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo)
@@ -331,7 +331,7 @@ int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo)
 
 	netdev_dbg(dev, "%s: low_first=%d, high_first=%d, high_last=%d\n", __func__,
 		   fifo->low_first, fifo->high_first, fifo->high_last);
-	netdev_dbg(dev, "%s: mask_low=0x%08x mask_high=0x%08x\n", __func__,
+	netdev_dbg(dev, "%s: mask_low=0x%016llx mask_high=0x%016llx\n", __func__,
 		   fifo->mask_low, fifo->mask_high);
 
 	return 0;
@@ -341,14 +341,14 @@ EXPORT_SYMBOL_GPL(can_rx_fifo_add);
 int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota)
 {
 	int received = 0;
-	u32 pending;
+	u64 pending;
 	unsigned int mb;
 
 	do {
 		pending = fifo->read_pending(fifo);
 		pending &= fifo->active;
 
-		if (!(pending & BIT(fifo->next))) {
+		if (!(pending & BIT_ULL(fifo->next))) {
 			/*
 			 * Wrap around only if:
 			 * - we are in the upper group and
@@ -356,7 +356,7 @@ int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota)
 			 *   of the lower group.
 			 */
 			if (can_rx_fifo_ge(fifo, fifo->next, fifo->high_first) &&
-			    (pending & BIT(fifo->low_first))) {
+			    (pending & BIT_ULL(fifo->low_first))) {
 				fifo->next = fifo->low_first;
 
 				fifo->active |= fifo->mask_high;
@@ -369,7 +369,7 @@ int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota)
 		mb = can_rx_fifo_inc(fifo, &fifo->next);
 
 		/* disable mailbox */
-		fifo->active &= ~BIT(mb);
+		fifo->active &= ~BIT_ULL(mb);
 		fifo->mailbox_disable(fifo, mb);
 
 		fifo->mailbox_receive(fifo, mb);
@@ -387,7 +387,7 @@ int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota)
 }
 EXPORT_SYMBOL_GPL(can_rx_fifo_poll);
 
-u32 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo)
+u64 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo)
 {
 	return fifo->active;
 }
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index e9468d0..ed46f7d 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -71,14 +71,14 @@ struct can_rx_fifo {
 	unsigned int high_first;
 	unsigned int high_last;		/* not needed during runtime */
 
-	u32 (*read_pending)(struct can_rx_fifo *rx_fifo);
-	void (*mailbox_enable_mask)(struct can_rx_fifo *rx_fifo, u32 mask);
+	u64 (*read_pending)(struct can_rx_fifo *rx_fifo);
+	void (*mailbox_enable_mask)(struct can_rx_fifo *rx_fifo, u64 mask);
 	void (*mailbox_disable)(struct can_rx_fifo *rx_fifo, unsigned int mb);
 	void (*mailbox_receive)(struct can_rx_fifo *rx_fifo, unsigned int mb);
 
-	u32 mask_low;
-	u32 mask_high;
-	u32 active;
+	u64 mask_low;
+	u64 mask_high;
+	u64 active;
 
 	unsigned int next;
 
@@ -128,7 +128,7 @@ u8 can_len2dlc(u8 len);
 
 int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo);
 int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota);
-u32 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo);
+u64 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo);
 
 struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
 void free_candev(struct net_device *dev);
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [RFC V2 PATCH 2/2] can: rx-fifo: Change to do controller off-load in interrupt and NAPI poll
  2014-10-09 10:18 [RFC V2 PATCH 1/2] can: rx-fifo: Increase MB size limit from 32 to 64 David Jander
@ 2014-10-09 10:18 ` David Jander
  2014-10-09 12:19   ` David Jander
  0 siblings, 1 reply; 3+ messages in thread
From: David Jander @ 2014-10-09 10:18 UTC (permalink / raw)
  To: Marc Kleine-Budde
  Cc: Wolfgang Grandegger, linux-can, Alexander Stein, David Jander

WIP
The idea is to use rx-fifo from interrupt context and take away the need
for NAPI polling from the driver. Currently no support for error-handling
is included.

Signed-off-by: David Jander <david@protonic.nl>
---

Changed since V1:
 - Nearly total re-write.
 - Removed support for fifo-in-napi-poll.

 drivers/net/can/dev.c   | 213 +++++++++++++++++++++++++++++++++++++-----------
 include/linux/can/dev.h |  21 +++--
 2 files changed, 180 insertions(+), 54 deletions(-)

diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 930b9f4..9abd5a4 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -26,6 +26,7 @@
 #include <linux/can/skb.h>
 #include <linux/can/netlink.h>
 #include <linux/can/led.h>
+#include <linux/circ_buf.h>
 #include <net/rtnetlink.h>
 
 #define MOD_DESC "CAN device driver interface"
@@ -281,6 +282,14 @@ static bool can_rx_fifo_ge(struct can_rx_fifo *fifo, unsigned int a, unsigned in
 		return a <= b;
 }
 
+static bool can_rx_fifo_le(struct can_rx_fifo *fifo, unsigned int a, unsigned int b)
+{
+	if (fifo->inc)
+		return a <= b;
+	else
+		return a >= b;
+}
+
 static unsigned int can_rx_fifo_inc(struct can_rx_fifo *fifo, unsigned int *val)
 {
 	if (fifo->inc)
@@ -305,27 +314,100 @@ static u64 can_rx_fifo_mask_high(struct can_rx_fifo *fifo)
 		return ~0LLU >> (64 - fifo->high_first + fifo->high_last - 1) << fifo->high_last;
 }
 
+static int can_rx_fifo_read_napi_frame(struct can_rx_fifo *fifo, int index)
+{
+	struct net_device *dev = fifo->dev;
+	struct net_device_stats *stats = &dev->stats;
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	skb = alloc_can_skb(dev, &cf);
+	if (unlikely(!skb)) {
+		stats->rx_dropped++;
+		return 0;
+	}
+
+	memcpy(cf, &fifo->ring[index], sizeof(*cf));
+
+	netif_receive_skb(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+
+	can_led_event(dev, CAN_LED_EVENT_RX);
+
+	return 1;
+}
+
+static int can_rx_fifo_napi_poll(struct napi_struct *napi, int quota)
+{
+	struct can_rx_fifo *fifo = container_of(napi, struct can_rx_fifo, napi);
+	int work_done = 0;
+	int ret;
+	unsigned int head;
+	unsigned int tail;
+
+restart_poll:
+	/* handle mailboxes */
+	head = smp_load_acquire(&fifo->ring_head);
+	tail = fifo->ring_tail;
+	while ((CIRC_CNT(head, tail, fifo->ring_size) >= 1) &&
+			(work_done < quota)) {
+		ret = can_rx_fifo_read_napi_frame(fifo, tail);
+		work_done += ret;
+		tail = (tail + 1) & (fifo->ring_size -1);
+		smp_store_release(&fifo->ring_tail, tail);
+	}
+
+	if (work_done < quota) {
+		napi_complete(napi);
+
+		/* Check if there was another interrupt */
+		head = smp_load_acquire(&fifo->ring_head);
+		if ((CIRC_CNT(head, tail, fifo->ring_size) >= 1) &&
+		    napi_reschedule(&fifo->napi))
+			goto restart_poll;
+	}
+
+	return work_done;
+}
+
 int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo)
 {
+	unsigned int weight;
 	fifo->dev = dev;
 
 	if ((fifo->low_first < fifo->high_first) &&
-	    (fifo->high_first < fifo->high_last))
+	    (fifo->high_first < fifo->high_last)) {
 		fifo->inc = true;
-	else if ((fifo->low_first > fifo->high_first) &&
-		 (fifo->high_first > fifo->high_last))
+		weight = fifo->high_last - fifo->low_first;
+	} else if ((fifo->low_first > fifo->high_first) &&
+		 (fifo->high_first > fifo->high_last)) {
 		fifo->inc = false;
-	else
+		weight = fifo->low_first - fifo->high_last;
+	} else
 		return -EINVAL;
 
-	if (!fifo->read_pending || !fifo->mailbox_enable_mask ||
-	    !fifo->mailbox_disable || !fifo->mailbox_receive)
+	if (!fifo->mailbox_enable_mask || !fifo->mailbox_move_to_buffer ||
+	    !fifo->mailbox_enable)
 		return -EINVAL;
 
+	/* Make ring-buffer a sensible size that is a power of 2 */
+	fifo->ring_size = (2 << fls(weight));
+	fifo->ring = kzalloc(sizeof(struct can_frame) * fifo->ring_size,
+			     GFP_KERNEL);
+	if (!fifo->ring)
+		return -ENOMEM;
+
+	fifo->ring_head = fifo->ring_tail = 0;
+
+	/* Take care of NAPI handling */
+	netif_napi_add(dev, &fifo->napi, can_rx_fifo_napi_poll, weight);
+
 	/* init variables */
 	fifo->mask_low = can_rx_fifo_mask_low(fifo);
 	fifo->mask_high = can_rx_fifo_mask_high(fifo);
-	fifo->next = fifo->low_first;
+	fifo->second_first = 0;
 	fifo->active = fifo->mask_low | fifo->mask_high;
 	fifo->mailbox_enable_mask(fifo, fifo->active);
 
@@ -338,60 +420,95 @@ int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo)
 }
 EXPORT_SYMBOL_GPL(can_rx_fifo_add);
 
-int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota)
+static unsigned int can_rx_fifo_offload_if_full(struct can_rx_fifo *fifo, unsigned int n)
+{
+	unsigned int head = fifo->ring_head;
+	unsigned int tail = ACCESS_ONCE(fifo->ring_tail);
+	unsigned int ret = 0;
+
+	if (CIRC_SPACE(head, tail, fifo->ring_size) >= 1) {
+		ret = fifo->mailbox_move_to_buffer(fifo, &fifo->ring[head], n);
+		if (ret)
+			smp_store_release(&fifo->ring_head,
+				(head + 1) & (fifo->ring_size - 1));
+	} else {
+		ret = fifo->mailbox_move_to_buffer(fifo, &fifo->overflow, n);
+		if (ret)
+			fifo->dev->stats.rx_dropped++;
+	}
+	return ret;
+}
+
+int can_rx_fifo_irq_offload(struct can_rx_fifo *fifo)
 {
-	int received = 0;
-	u64 pending;
-	unsigned int mb;
-
-	do {
-		pending = fifo->read_pending(fifo);
-		pending &= fifo->active;
-
-		if (!(pending & BIT_ULL(fifo->next))) {
-			/*
-			 * Wrap around only if:
-			 * - we are in the upper group and
-			 * - there is a CAN frame in the first mailbox
-			 *   of the lower group.
-			 */
-			if (can_rx_fifo_ge(fifo, fifo->next, fifo->high_first) &&
-			    (pending & BIT_ULL(fifo->low_first))) {
-				fifo->next = fifo->low_first;
-
-				fifo->active |= fifo->mask_high;
-				fifo->mailbox_enable_mask(fifo, fifo->mask_high);
-			} else {
-				break;
-			}
+	unsigned int i;
+	unsigned int ret;
+	unsigned int received = 0;
+
+	if (fifo->second_first) {
+		for (i = fifo->high_first;
+		     can_rx_fifo_le(fifo, i, fifo->high_last);
+		     can_rx_fifo_inc(fifo, &i)) {
+			received += can_rx_fifo_offload_if_full(fifo, i);
+			fifo->active |= BIT_ULL(i);
+			fifo->mailbox_enable(fifo, i);
 		}
+	}
 
-		mb = can_rx_fifo_inc(fifo, &fifo->next);
+	/* Copy and disable FULL MBs */
+	for (i = fifo->low_first; can_rx_fifo_le(fifo, i, fifo->high_last);
+			can_rx_fifo_inc(fifo, &i)) {
+		if (!(fifo->active & BIT_ULL(i)))
+			continue;
+		ret = can_rx_fifo_offload_if_full(fifo, i);
+		if (!ret)
+			break;
+		received += ret;
+		fifo->active &= ~BIT_ULL(i);
+	}
 
-		/* disable mailbox */
-		fifo->active &= ~BIT_ULL(mb);
-		fifo->mailbox_disable(fifo, mb);
+	if (can_rx_fifo_ge(fifo, i, fifo->high_first) && fifo->second_first)
+		netdev_warn(fifo->dev, "%s: RX order cannot be guaranteed."
+			" (count=%d)\n", __FUNCTION__, i);
 
-		fifo->mailbox_receive(fifo, mb);
+	fifo->second_first = false;
 
-		if (fifo->next == fifo->high_first) {
-			fifo->active |= fifo->mask_low;
-			fifo->mailbox_enable_mask(fifo, fifo->mask_low);
-		}
+	/* No EMPTY MB in first half? */
+	if (can_rx_fifo_ge(fifo, i, fifo->high_first)) {
+		/* Re-enable all disabled MBs */
+		fifo->active = fifo->mask_low | fifo->mask_high;
+		fifo->mailbox_enable_mask(fifo, fifo->active);
+
+		/* Next time we need to check the second half first */
+		fifo->second_first = true;
+	}
 
-		received++;
-		quota--;
-	} while (quota);
+	if (received)
+		napi_schedule(&fifo->napi);
 
 	return received;
 }
-EXPORT_SYMBOL_GPL(can_rx_fifo_poll);
+EXPORT_SYMBOL_GPL(can_rx_fifo_irq_offload);
+
+void can_rx_fifo_napi_enable(struct can_rx_fifo *fifo)
+{
+	napi_enable(&fifo->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_fifo_napi_enable);
+
+void can_rx_fifo_napi_disable(struct can_rx_fifo *fifo)
+{
+	napi_disable(&fifo->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_fifo_napi_disable);
 
-u64 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo)
+void can_rx_fifo_del(struct can_rx_fifo *fifo)
 {
-	return fifo->active;
+	if (fifo->ring)
+		kfree(fifo->ring);
+	netif_napi_del(&fifo->napi);
 }
-EXPORT_SYMBOL_GPL(can_rx_fifo_get_active_mb_mask);
+EXPORT_SYMBOL_GPL(can_rx_fifo_del);
 
 /*
  * Local echo of CAN messages
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index ed46f7d..64a8de3 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -71,18 +71,25 @@ struct can_rx_fifo {
 	unsigned int high_first;
 	unsigned int high_last;		/* not needed during runtime */
 
-	u64 (*read_pending)(struct can_rx_fifo *rx_fifo);
 	void (*mailbox_enable_mask)(struct can_rx_fifo *rx_fifo, u64 mask);
-	void (*mailbox_disable)(struct can_rx_fifo *rx_fifo, unsigned int mb);
-	void (*mailbox_receive)(struct can_rx_fifo *rx_fifo, unsigned int mb);
+	void (*mailbox_enable)(struct can_rx_fifo *rx_fifo, unsigned int mb);
+	unsigned int (*mailbox_move_to_buffer)(struct can_rx_fifo *rx_fifo,
+		struct can_frame *frame, unsigned int mb);
 
 	u64 mask_low;
 	u64 mask_high;
 	u64 active;
 
-	unsigned int next;
+	unsigned int second_first;
 
 	bool inc;
+
+	struct can_frame *ring;
+	struct can_frame overflow;
+	size_t ring_size;
+	unsigned int ring_head;
+	unsigned int ring_tail;
+	struct napi_struct napi;
 };
 
 /*
@@ -127,8 +134,10 @@ u8 can_dlc2len(u8 can_dlc);
 u8 can_len2dlc(u8 len);
 
 int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo);
-int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota);
-u64 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo);
+int can_rx_fifo_irq_offload(struct can_rx_fifo *fifo);
+void can_rx_fifo_napi_enable(struct can_rx_fifo *fifo);
+void can_rx_fifo_napi_disable(struct can_rx_fifo *fifo);
+void can_rx_fifo_del(struct can_rx_fifo *fifo);
 
 struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
 void free_candev(struct net_device *dev);
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [RFC V2 PATCH 2/2] can: rx-fifo: Change to do controller off-load in interrupt and NAPI poll
  2014-10-09 10:18 ` [RFC V2 PATCH 2/2] can: rx-fifo: Change to do controller off-load in interrupt and NAPI poll David Jander
@ 2014-10-09 12:19   ` David Jander
  0 siblings, 0 replies; 3+ messages in thread
From: David Jander @ 2014-10-09 12:19 UTC (permalink / raw)
  To: Marc Kleine-Budde; +Cc: Wolfgang Grandegger, linux-can, Alexander Stein


Hi Marc,

On Thu,  9 Oct 2014 12:18:04 +0200
David Jander <david@protonic.nl> wrote:

> WIP
> The idea is to use rx-fifo from interrupt context and take away the need
> for NAPI polling from the driver. Currently no support for error-handling
> is included.

About how to implement error handling...
I see two options, but I don't know which is the right one:

1.- Insert hooks for error- and state polling in the NAPI poll handler (see
below) or

2.- Do error- and state handling from interrupt and insert proper error frames
in the circular buffer?

The advantage of 1. over 2. is that it takes less (and unnecessary?) work in
the interrupt handler. We want can_rx_fifo_irq_offload() to be as short and
lean as possible, right?

OTOH, option 2.- might ensure better synchronization between frame reception
and error reporting.... no idea if that's important.

> Signed-off-by: David Jander <david@protonic.nl>
> ---
> 
> Changed since V1:
>  - Nearly total re-write.
>  - Removed support for fifo-in-napi-poll.
> 
>  drivers/net/can/dev.c   | 213
> +++++++++++++++++++++++++++++++++++++----------- include/linux/can/dev.h |
> 21 +++-- 2 files changed, 180 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
> index 930b9f4..9abd5a4 100644
> --- a/drivers/net/can/dev.c
> +++ b/drivers/net/can/dev.c
> @@ -26,6 +26,7 @@
>  #include <linux/can/skb.h>
>  #include <linux/can/netlink.h>
>  #include <linux/can/led.h>
> +#include <linux/circ_buf.h>
>  #include <net/rtnetlink.h>
>  
>  #define MOD_DESC "CAN device driver interface"
> @@ -281,6 +282,14 @@ static bool can_rx_fifo_ge(struct can_rx_fifo *fifo,
> unsigned int a, unsigned in return a <= b;
>  }
>  
> +static bool can_rx_fifo_le(struct can_rx_fifo *fifo, unsigned int a,
> unsigned int b) +{
> +	if (fifo->inc)
> +		return a <= b;
> +	else
> +		return a >= b;
> +}
> +
>  static unsigned int can_rx_fifo_inc(struct can_rx_fifo *fifo, unsigned int
> *val) {
>  	if (fifo->inc)
> @@ -305,27 +314,100 @@ static u64 can_rx_fifo_mask_high(struct can_rx_fifo
> *fifo) return ~0LLU >> (64 - fifo->high_first + fifo->high_last - 1) <<
> fifo->high_last; }
>  
> +static int can_rx_fifo_read_napi_frame(struct can_rx_fifo *fifo, int index)
> +{
> +	struct net_device *dev = fifo->dev;
> +	struct net_device_stats *stats = &dev->stats;
> +	struct sk_buff *skb;
> +	struct can_frame *cf;
> +
> +	skb = alloc_can_skb(dev, &cf);
> +	if (unlikely(!skb)) {
> +		stats->rx_dropped++;
> +		return 0;
> +	}
> +
> +	memcpy(cf, &fifo->ring[index], sizeof(*cf));
> +
> +	netif_receive_skb(skb);
> +
> +	stats->rx_packets++;
> +	stats->rx_bytes += cf->can_dlc;
> +
> +	can_led_event(dev, CAN_LED_EVENT_RX);
> +
> +	return 1;
> +}
> +
> +static int can_rx_fifo_napi_poll(struct napi_struct *napi, int quota)
> +{
> +	struct can_rx_fifo *fifo = container_of(napi, struct can_rx_fifo,
> napi);
> +	int work_done = 0;
> +	int ret;
> +	unsigned int head;
> +	unsigned int tail;
> +
> +restart_poll:
> +	/* handle mailboxes */
> +	head = smp_load_acquire(&fifo->ring_head);
> +	tail = fifo->ring_tail;
> +	while ((CIRC_CNT(head, tail, fifo->ring_size) >= 1) &&
> +			(work_done < quota)) {
> +		ret = can_rx_fifo_read_napi_frame(fifo, tail);
> +		work_done += ret;
> +		tail = (tail + 1) & (fifo->ring_size -1);
> +		smp_store_release(&fifo->ring_tail, tail);
> +	}

Option 1. (see above): Insert hooks here, like:

	if (work_done < quota)
		work_done += fifo->mailbox_poll_state(fifo);

	if (work_done < quota)
		work_done += fifo->mailbox_poll_error(fifo);

> +
> +	if (work_done < quota) {
> +		napi_complete(napi);
> +
> +		/* Check if there was another interrupt */
> +		head = smp_load_acquire(&fifo->ring_head);
> +		if ((CIRC_CNT(head, tail, fifo->ring_size) >= 1) &&
> +		    napi_reschedule(&fifo->napi))
> +			goto restart_poll;
> +	}
> +
> +	return work_done;
> +}
> +
>  int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo)
>  {
> +	unsigned int weight;
>  	fifo->dev = dev;
>  
>  	if ((fifo->low_first < fifo->high_first) &&
> -	    (fifo->high_first < fifo->high_last))
> +	    (fifo->high_first < fifo->high_last)) {
>  		fifo->inc = true;
> -	else if ((fifo->low_first > fifo->high_first) &&
> -		 (fifo->high_first > fifo->high_last))
> +		weight = fifo->high_last - fifo->low_first;
> +	} else if ((fifo->low_first > fifo->high_first) &&
> +		 (fifo->high_first > fifo->high_last)) {
>  		fifo->inc = false;
> -	else
> +		weight = fifo->low_first - fifo->high_last;
> +	} else
>  		return -EINVAL;
>  
> -	if (!fifo->read_pending || !fifo->mailbox_enable_mask ||
> -	    !fifo->mailbox_disable || !fifo->mailbox_receive)
> +	if (!fifo->mailbox_enable_mask || !fifo->mailbox_move_to_buffer ||
> +	    !fifo->mailbox_enable)
>  		return -EINVAL;
>  
> +	/* Make ring-buffer a sensible size that is a power of 2 */
> +	fifo->ring_size = (2 << fls(weight));
> +	fifo->ring = kzalloc(sizeof(struct can_frame) * fifo->ring_size,
> +			     GFP_KERNEL);
> +	if (!fifo->ring)
> +		return -ENOMEM;
> +
> +	fifo->ring_head = fifo->ring_tail = 0;
> +
> +	/* Take care of NAPI handling */
> +	netif_napi_add(dev, &fifo->napi, can_rx_fifo_napi_poll, weight);
> +
>  	/* init variables */
>  	fifo->mask_low = can_rx_fifo_mask_low(fifo);
>  	fifo->mask_high = can_rx_fifo_mask_high(fifo);
> -	fifo->next = fifo->low_first;
> +	fifo->second_first = 0;
>  	fifo->active = fifo->mask_low | fifo->mask_high;
>  	fifo->mailbox_enable_mask(fifo, fifo->active);
>  
> @@ -338,60 +420,95 @@ int can_rx_fifo_add(struct net_device *dev, struct
> can_rx_fifo *fifo) }
>  EXPORT_SYMBOL_GPL(can_rx_fifo_add);
>  
> -int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota)
> +static unsigned int can_rx_fifo_offload_if_full(struct can_rx_fifo *fifo,
> unsigned int n) +{
> +	unsigned int head = fifo->ring_head;
> +	unsigned int tail = ACCESS_ONCE(fifo->ring_tail);
> +	unsigned int ret = 0;
> +
> +	if (CIRC_SPACE(head, tail, fifo->ring_size) >= 1) {
> +		ret = fifo->mailbox_move_to_buffer(fifo, &fifo->ring[head],
> n);
> +		if (ret)
> +			smp_store_release(&fifo->ring_head,
> +				(head + 1) & (fifo->ring_size - 1));
> +	} else {
> +		ret = fifo->mailbox_move_to_buffer(fifo, &fifo->overflow,
> n);
> +		if (ret)
> +			fifo->dev->stats.rx_dropped++;
> +	}
> +	return ret;
> +}
> +
> +int can_rx_fifo_irq_offload(struct can_rx_fifo *fifo)
>  {
> -	int received = 0;
> -	u64 pending;
> -	unsigned int mb;
> -
> -	do {
> -		pending = fifo->read_pending(fifo);
> -		pending &= fifo->active;
> -
> -		if (!(pending & BIT_ULL(fifo->next))) {
> -			/*
> -			 * Wrap around only if:
> -			 * - we are in the upper group and
> -			 * - there is a CAN frame in the first mailbox
> -			 *   of the lower group.
> -			 */
> -			if (can_rx_fifo_ge(fifo, fifo->next,
> fifo->high_first) &&
> -			    (pending & BIT_ULL(fifo->low_first))) {
> -				fifo->next = fifo->low_first;
> -
> -				fifo->active |= fifo->mask_high;
> -				fifo->mailbox_enable_mask(fifo,
> fifo->mask_high);
> -			} else {
> -				break;
> -			}
> +	unsigned int i;
> +	unsigned int ret;
> +	unsigned int received = 0;
> +
> +	if (fifo->second_first) {
> +		for (i = fifo->high_first;
> +		     can_rx_fifo_le(fifo, i, fifo->high_last);
> +		     can_rx_fifo_inc(fifo, &i)) {
> +			received += can_rx_fifo_offload_if_full(fifo, i);
> +			fifo->active |= BIT_ULL(i);
> +			fifo->mailbox_enable(fifo, i);
>  		}
> +	}
>  
> -		mb = can_rx_fifo_inc(fifo, &fifo->next);
> +	/* Copy and disable FULL MBs */
> +	for (i = fifo->low_first; can_rx_fifo_le(fifo, i, fifo->high_last);
> +			can_rx_fifo_inc(fifo, &i)) {
> +		if (!(fifo->active & BIT_ULL(i)))
> +			continue;
> +		ret = can_rx_fifo_offload_if_full(fifo, i);
> +		if (!ret)
> +			break;
> +		received += ret;
> +		fifo->active &= ~BIT_ULL(i);
> +	}
>  
> -		/* disable mailbox */
> -		fifo->active &= ~BIT_ULL(mb);
> -		fifo->mailbox_disable(fifo, mb);
> +	if (can_rx_fifo_ge(fifo, i, fifo->high_first) && fifo->second_first)
> +		netdev_warn(fifo->dev, "%s: RX order cannot be guaranteed."
> +			" (count=%d)\n", __FUNCTION__, i);
>  
> -		fifo->mailbox_receive(fifo, mb);
> +	fifo->second_first = false;
>  
> -		if (fifo->next == fifo->high_first) {
> -			fifo->active |= fifo->mask_low;
> -			fifo->mailbox_enable_mask(fifo, fifo->mask_low);
> -		}
> +	/* No EMPTY MB in first half? */
> +	if (can_rx_fifo_ge(fifo, i, fifo->high_first)) {
> +		/* Re-enable all disabled MBs */
> +		fifo->active = fifo->mask_low | fifo->mask_high;
> +		fifo->mailbox_enable_mask(fifo, fifo->active);
> +
> +		/* Next time we need to check the second half first */
> +		fifo->second_first = true;
> +	}
>  
> -		received++;
> -		quota--;
> -	} while (quota);
> +	if (received)
> +		napi_schedule(&fifo->napi);
>  
>  	return received;
>  }
> -EXPORT_SYMBOL_GPL(can_rx_fifo_poll);
> +EXPORT_SYMBOL_GPL(can_rx_fifo_irq_offload);
> +
> +void can_rx_fifo_napi_enable(struct can_rx_fifo *fifo)
> +{
> +	napi_enable(&fifo->napi);
> +}
> +EXPORT_SYMBOL_GPL(can_rx_fifo_napi_enable);
> +
> +void can_rx_fifo_napi_disable(struct can_rx_fifo *fifo)
> +{
> +	napi_disable(&fifo->napi);
> +}
> +EXPORT_SYMBOL_GPL(can_rx_fifo_napi_disable);
>  
> -u64 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo)
> +void can_rx_fifo_del(struct can_rx_fifo *fifo)
>  {
> -	return fifo->active;
> +	if (fifo->ring)
> +		kfree(fifo->ring);
> +	netif_napi_del(&fifo->napi);
>  }
> -EXPORT_SYMBOL_GPL(can_rx_fifo_get_active_mb_mask);
> +EXPORT_SYMBOL_GPL(can_rx_fifo_del);
>  
>  /*
>   * Local echo of CAN messages
> diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
> index ed46f7d..64a8de3 100644
> --- a/include/linux/can/dev.h
> +++ b/include/linux/can/dev.h
> @@ -71,18 +71,25 @@ struct can_rx_fifo {
>  	unsigned int high_first;
>  	unsigned int high_last;		/* not needed during runtime
> */ 
> -	u64 (*read_pending)(struct can_rx_fifo *rx_fifo);
>  	void (*mailbox_enable_mask)(struct can_rx_fifo *rx_fifo, u64 mask);
> -	void (*mailbox_disable)(struct can_rx_fifo *rx_fifo, unsigned int
> mb);
> -	void (*mailbox_receive)(struct can_rx_fifo *rx_fifo, unsigned int
> mb);
> +	void (*mailbox_enable)(struct can_rx_fifo *rx_fifo, unsigned int
> mb);
> +	unsigned int (*mailbox_move_to_buffer)(struct can_rx_fifo *rx_fifo,
> +		struct can_frame *frame, unsigned int mb);
>  
>  	u64 mask_low;
>  	u64 mask_high;
>  	u64 active;
>  
> -	unsigned int next;
> +	unsigned int second_first;
>  
>  	bool inc;
> +
> +	struct can_frame *ring;
> +	struct can_frame overflow;
> +	size_t ring_size;
> +	unsigned int ring_head;
> +	unsigned int ring_tail;
> +	struct napi_struct napi;
>  };
>  
>  /*
> @@ -127,8 +134,10 @@ u8 can_dlc2len(u8 can_dlc);
>  u8 can_len2dlc(u8 len);
>  
>  int can_rx_fifo_add(struct net_device *dev, struct can_rx_fifo *fifo);
> -int can_rx_fifo_poll(struct can_rx_fifo *fifo, int quota);
> -u64 can_rx_fifo_get_active_mb_mask(const struct can_rx_fifo *fifo);
> +int can_rx_fifo_irq_offload(struct can_rx_fifo *fifo);
> +void can_rx_fifo_napi_enable(struct can_rx_fifo *fifo);
> +void can_rx_fifo_napi_disable(struct can_rx_fifo *fifo);
> +void can_rx_fifo_del(struct can_rx_fifo *fifo);
>  
>  struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
>  void free_candev(struct net_device *dev);



-- 
David Jander
Protonic Holland.
tel.: +31 (0) 229 212928
fax.: +31 (0) 229 210930
Factorij 36 / 1689 AL Zwaag

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2014-10-09 12:19 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-10-09 10:18 [RFC V2 PATCH 1/2] can: rx-fifo: Increase MB size limit from 32 to 64 David Jander
2014-10-09 10:18 ` [RFC V2 PATCH 2/2] can: rx-fifo: Change to do controller off-load in interrupt and NAPI poll David Jander
2014-10-09 12:19   ` David Jander

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).