linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
@ 2015-05-07 15:38 Xander Huff
  2015-05-14 17:10 ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 22+ messages in thread
From: Xander Huff @ 2015-05-07 15:38 UTC (permalink / raw)
  To: linux-arm-kernel

The driver currently registers a pair of irq handlers using
request_threaded_irq(), however the synchronization mechanism
between the hardirq and the threadedirq handler is a regular spinlock.

Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can
sleep, and is thus not able to be acquired from a hardirq handler.
Because the set of operations performed under the spinlock is already
minimal and bounded, it is a suitable candidate for being a raw_spinlock.

This patch should not impact behavior on !PREEMPT_RT builds.

Signed-off-by: Xander Huff <xander.huff@ni.com>
Reviewed-by: Joe Hershberger <joe.hershberger@ni.com>
Reviewed-by: Ben Shelton <ben.shelton@ni.com>
Reviewed-by: Josh Cartwright <joshc@ni.com>
---
 drivers/iio/adc/xilinx-xadc-core.c | 34 +++++++++++++++++-----------------
 drivers/iio/adc/xilinx-xadc.h      |  2 +-
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index a221f73..4bdf03b 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -163,7 +163,7 @@ static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
 	uint32_t tmp;
 	int ret;
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
 			XADC_ZYNQ_INT_DFIFO_GTH);
 
@@ -177,7 +177,7 @@ static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
 
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 
 	ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
 	if (ret == 0)
@@ -200,7 +200,7 @@ static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
 	cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ, reg, 0);
 	cmd[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP, 0, 0);
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
 			XADC_ZYNQ_INT_DFIFO_GTH);
 	xadc_zynq_drain_fifo(xadc);
@@ -213,7 +213,7 @@ static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
 
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 	ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
 	if (ret == 0)
 		ret = -EIO;
@@ -252,7 +252,7 @@ static void xadc_zynq_unmask_worker(struct work_struct *work)
 
 	misc_sts &= XADC_ZYNQ_INT_ALARM_MASK;
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 
 	/* Clear those bits which are not active anymore */
 	unmask = (xadc->zynq_masked_alarm ^ misc_sts) & xadc->zynq_masked_alarm;
@@ -266,7 +266,7 @@ static void xadc_zynq_unmask_worker(struct work_struct *work)
 
 	xadc_zynq_update_intmsk(xadc, 0, 0);
 
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 
 	/* if still pending some alarm re-trigger the timer */
 	if (xadc->zynq_masked_alarm) {
@@ -281,10 +281,10 @@ static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
 	struct xadc *xadc = iio_priv(indio_dev);
 	unsigned int alarm;
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 	alarm = xadc->zynq_alarm;
 	xadc->zynq_alarm = 0;
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 
 	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
 
@@ -309,7 +309,7 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 	if (!status)
 		return IRQ_NONE;
 
-	spin_lock(&xadc->lock);
+	raw_spin_lock(&xadc->lock);
 
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
 
@@ -330,7 +330,7 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 		xadc_zynq_update_intmsk(xadc, 0, 0);
 		ret = IRQ_WAKE_THREAD;
 	}
-	spin_unlock(&xadc->lock);
+	raw_spin_unlock(&xadc->lock);
 
 	return ret;
 }
@@ -419,7 +419,7 @@ static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
 	/* Move OT to bit 7 */
 	alarm = ((alarm & 0x08) << 4) | ((alarm & 0xf0) >> 1) | (alarm & 0x07);
 
-	spin_lock_irqsave(&xadc->lock, flags);
+	raw_spin_lock_irqsave(&xadc->lock, flags);
 
 	/* Clear previous interrupts if any. */
 	xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
@@ -428,7 +428,7 @@ static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_ALARM_MASK,
 		~alarm & XADC_ZYNQ_INT_ALARM_MASK);
 
-	spin_unlock_irqrestore(&xadc->lock, flags);
+	raw_spin_unlock_irqrestore(&xadc->lock, flags);
 }
 
 static const struct xadc_ops xadc_zynq_ops = {
@@ -520,12 +520,12 @@ static void xadc_axi_update_alarm(struct xadc *xadc, unsigned int alarm)
 	alarm = ((alarm & 0x07) << 1) | ((alarm & 0x08) >> 3) |
 			((alarm & 0xf0) << 6);
 
-	spin_lock_irqsave(&xadc->lock, flags);
+	raw_spin_lock_irqsave(&xadc->lock, flags);
 	xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
 	val &= ~XADC_AXI_INT_ALARM_MASK;
 	val |= alarm;
 	xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
-	spin_unlock_irqrestore(&xadc->lock, flags);
+	raw_spin_unlock_irqrestore(&xadc->lock, flags);
 }
 
 static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
@@ -674,7 +674,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
 		xadc->trigger = NULL;
 	}
 
-	spin_lock_irqsave(&xadc->lock, flags);
+	raw_spin_lock_irqsave(&xadc->lock, flags);
 	xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
 	xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
 	if (state)
@@ -682,7 +682,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
 	else
 		val &= ~XADC_AXI_INT_EOS;
 	xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
-	spin_unlock_irqrestore(&xadc->lock, flags);
+	raw_spin_unlock_irqrestore(&xadc->lock, flags);
 
 err_out:
 	mutex_unlock(&xadc->mutex);
@@ -1175,7 +1175,7 @@ static int xadc_probe(struct platform_device *pdev)
 	xadc->ops = id->data;
 	init_completion(&xadc->completion);
 	mutex_init(&xadc->mutex);
-	spin_lock_init(&xadc->lock);
+	raw_spin_lock_init(&xadc->lock);
 	INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index c7487e8..d945e25 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -66,7 +66,7 @@ struct xadc {
 	struct delayed_work zynq_unmask_work;
 
 	struct mutex mutex;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	struct completion completion;
 };
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
  2015-05-07 15:38 [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock Xander Huff
@ 2015-05-14 17:10 ` Sebastian Andrzej Siewior
  2015-05-14 22:45   ` Xander Huff
  0 siblings, 1 reply; 22+ messages in thread
From: Sebastian Andrzej Siewior @ 2015-05-14 17:10 UTC (permalink / raw)
  To: linux-arm-kernel

* Xander Huff | 2015-05-07 10:38:10 [-0500]:

>The driver currently registers a pair of irq handlers using
>request_threaded_irq(), however the synchronization mechanism
>between the hardirq and the threadedirq handler is a regular spinlock.
>
>Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can
>sleep, and is thus not able to be acquired from a hardirq handler.
>Because the set of operations performed under the spinlock is already
>minimal and bounded, it is a suitable candidate for being a raw_spinlock.
>
>This patch should not impact behavior on !PREEMPT_RT builds.
>
>Signed-off-by: Xander Huff <xander.huff@ni.com>
>Reviewed-by: Joe Hershberger <joe.hershberger@ni.com>
>Reviewed-by: Ben Shelton <ben.shelton@ni.com>
>Reviewed-by: Josh Cartwright <joshc@ni.com>

Do you have any cyclictest numbers how much the latency increases on
heavy iio usage? Traditionally the primary handler should be very quick
/ short.

Sebastian

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
  2015-05-14 17:10 ` Sebastian Andrzej Siewior
@ 2015-05-14 22:45   ` Xander Huff
  2015-05-18 21:17     ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 22+ messages in thread
From: Xander Huff @ 2015-05-14 22:45 UTC (permalink / raw)
  To: linux-arm-kernel

On 5/14/2015 12:10 PM, Sebastian Andrzej Siewior wrote:
>
> Do you have any cyclictest numbers how much the latency increases on
> heavy iio usage? Traditionally the primary handler should be very quick
> / short.
>
> Sebastian
>
With no other processes running, I got the following results after a couple of 
hours on one of our devices:

admin at Xander-roboRIO:~# cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 0.01 0.07 0.12 1/176 1473

T: 0 ( 1373) P:98 I:1000 C:6503872 Min:      9 Act:   13 Avg:   13 Max:      51
T: 1 ( 1374) P:98 I:1500 C:4335914 Min:      9 Act:   12 Avg:   13 Max:      49

With a VI reading all default handles (raw, offset, scale, sampling_frequency) 
in /sys/bus/iio/devices/iio:device0 constantly in a while loop, I got the 
following results after a couple hours on the same device:

admin at Xander-roboRIO:~# cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 6.93 7.30 7.47 3/182 1530

T: 0 ( 1487) P:98 I:1000 C:4497008 Min:     11 Act:   20 Avg:   21 Max:      69
T: 1 ( 1488) P:98 I:1500 C:2998005 Min:     11 Act:   20 Avg:   22 Max:      59

-- 
Xander Huff
Staff Software Engineer
National Instruments

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
  2015-05-14 22:45   ` Xander Huff
@ 2015-05-18 21:17     ` Sebastian Andrzej Siewior
  2015-05-23 11:36       ` Jonathan Cameron
  2015-06-07 15:29       ` Jonathan Cameron
  0 siblings, 2 replies; 22+ messages in thread
From: Sebastian Andrzej Siewior @ 2015-05-18 21:17 UTC (permalink / raw)
  To: linux-arm-kernel

* Xander Huff | 2015-05-14 17:45:04 [-0500]:

>With no other processes running, I got the following results after a
>couple of hours on one of our devices:
>
>admin at Xander-roboRIO:~# cyclictest -S -m -p 98
># /dev/cpu_dma_latency set to 0us
>policy: fifo: loadavg: 0.01 0.07 0.12 1/176 1473
>
>T: 0 ( 1373) P:98 I:1000 C:6503872 Min:      9 Act:   13 Avg:   13 Max:      51
>T: 1 ( 1374) P:98 I:1500 C:4335914 Min:      9 Act:   12 Avg:   13 Max:      49
>
>With a VI reading all default handles (raw, offset, scale,
>sampling_frequency) in /sys/bus/iio/devices/iio:device0 constantly in
>a while loop, I got the following results after a couple hours on the
>same device:
>
>admin at Xander-roboRIO:~# cyclictest -S -m -p 98
># /dev/cpu_dma_latency set to 0us
>policy: fifo: loadavg: 6.93 7.30 7.47 3/182 1530
>
>T: 0 ( 1487) P:98 I:1000 C:4497008 Min:     11 Act:   20 Avg:   21 Max:      69
>T: 1 ( 1488) P:98 I:1500 C:2998005 Min:     11 Act:   20 Avg:   22 Max:      59

So there is an increase. And there is even a for-loop and I don't know
how deep it is nested. Anyway, do you think it is worth it or would it
be better to get rid of the raw-locks and simply push everything into
threaded context?

Sebastian

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
  2015-05-18 21:17     ` Sebastian Andrzej Siewior
@ 2015-05-23 11:36       ` Jonathan Cameron
  2015-06-07 15:29       ` Jonathan Cameron
  1 sibling, 0 replies; 22+ messages in thread
From: Jonathan Cameron @ 2015-05-23 11:36 UTC (permalink / raw)
  To: linux-arm-kernel

On 18/05/15 22:17, Sebastian Andrzej Siewior wrote:
> * Xander Huff | 2015-05-14 17:45:04 [-0500]:
> 
>> With no other processes running, I got the following results after a
>> couple of hours on one of our devices:
>>
>> admin at Xander-roboRIO:~# cyclictest -S -m -p 98
>> # /dev/cpu_dma_latency set to 0us
>> policy: fifo: loadavg: 0.01 0.07 0.12 1/176 1473
>>
>> T: 0 ( 1373) P:98 I:1000 C:6503872 Min:      9 Act:   13 Avg:   13 Max:      51
>> T: 1 ( 1374) P:98 I:1500 C:4335914 Min:      9 Act:   12 Avg:   13 Max:      49
>>
>> With a VI reading all default handles (raw, offset, scale,
>> sampling_frequency) in /sys/bus/iio/devices/iio:device0 constantly in
>> a while loop, I got the following results after a couple hours on the
>> same device:
>>
>> admin at Xander-roboRIO:~# cyclictest -S -m -p 98
>> # /dev/cpu_dma_latency set to 0us
>> policy: fifo: loadavg: 6.93 7.30 7.47 3/182 1530
>>
>> T: 0 ( 1487) P:98 I:1000 C:4497008 Min:     11 Act:   20 Avg:   21 Max:      69
>> T: 1 ( 1488) P:98 I:1500 C:2998005 Min:     11 Act:   20 Avg:   22 Max:      59
> 
> So there is an increase. And there is even a for-loop and I don't know
> how deep it is nested. Anyway, do you think it is worth it or would it
> be better to get rid of the raw-locks and simply push everything into
> threaded context?
Certainly looks like that wants to be tried out...

Jonathan
> 
> Sebastian
> --
> To unsubscribe from this list: send the line "unsubscribe linux-iio" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
  2015-05-18 21:17     ` Sebastian Andrzej Siewior
  2015-05-23 11:36       ` Jonathan Cameron
@ 2015-06-07 15:29       ` Jonathan Cameron
  2015-06-08 14:44         ` Xander Huff
  2015-07-08 21:38         ` [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context Xander Huff
  1 sibling, 2 replies; 22+ messages in thread
From: Jonathan Cameron @ 2015-06-07 15:29 UTC (permalink / raw)
  To: linux-arm-kernel

On 18/05/15 22:17, Sebastian Andrzej Siewior wrote:
> * Xander Huff | 2015-05-14 17:45:04 [-0500]:
> 
>> With no other processes running, I got the following results after a
>> couple of hours on one of our devices:
>>
>> admin at Xander-roboRIO:~# cyclictest -S -m -p 98
>> # /dev/cpu_dma_latency set to 0us
>> policy: fifo: loadavg: 0.01 0.07 0.12 1/176 1473
>>
>> T: 0 ( 1373) P:98 I:1000 C:6503872 Min:      9 Act:   13 Avg:   13 Max:      51
>> T: 1 ( 1374) P:98 I:1500 C:4335914 Min:      9 Act:   12 Avg:   13 Max:      49
>>
>> With a VI reading all default handles (raw, offset, scale,
>> sampling_frequency) in /sys/bus/iio/devices/iio:device0 constantly in
>> a while loop, I got the following results after a couple hours on the
>> same device:
>>
>> admin at Xander-roboRIO:~# cyclictest -S -m -p 98
>> # /dev/cpu_dma_latency set to 0us
>> policy: fifo: loadavg: 6.93 7.30 7.47 3/182 1530
>>
>> T: 0 ( 1487) P:98 I:1000 C:4497008 Min:     11 Act:   20 Avg:   21 Max:      69
>> T: 1 ( 1488) P:98 I:1500 C:2998005 Min:     11 Act:   20 Avg:   22 Max:      59
> 
> So there is an increase. And there is even a for-loop and I don't know
> how deep it is nested. Anyway, do you think it is worth it or would it
> be better to get rid of the raw-locks and simply push everything into
> threaded context?
> 
Certainly seems likely to be a better way forward to me but I don't
really mind either way.

J

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
  2015-06-07 15:29       ` Jonathan Cameron
@ 2015-06-08 14:44         ` Xander Huff
  2015-07-08 21:38         ` [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context Xander Huff
  1 sibling, 0 replies; 22+ messages in thread
From: Xander Huff @ 2015-06-08 14:44 UTC (permalink / raw)
  To: linux-arm-kernel

On 6/7/2015 10:29 AM, Jonathan Cameron wrote:
> On 18/05/15 22:17, Sebastian Andrzej Siewior wrote:
>> * Xander Huff | 2015-05-14 17:45:04 [-0500]:
>>
>>> With no other processes running, I got the following results after a
>>> couple of hours on one of our devices:
>>>
>>> admin at Xander-roboRIO:~# cyclictest -S -m -p 98
>>> # /dev/cpu_dma_latency set to 0us
>>> policy: fifo: loadavg: 0.01 0.07 0.12 1/176 1473
>>>
>>> T: 0 ( 1373) P:98 I:1000 C:6503872 Min:      9 Act:   13 Avg:   13 Max:      51
>>> T: 1 ( 1374) P:98 I:1500 C:4335914 Min:      9 Act:   12 Avg:   13 Max:      49
>>>
>>> With a VI reading all default handles (raw, offset, scale,
>>> sampling_frequency) in /sys/bus/iio/devices/iio:device0 constantly in
>>> a while loop, I got the following results after a couple hours on the
>>> same device:
>>>
>>> admin at Xander-roboRIO:~# cyclictest -S -m -p 98
>>> # /dev/cpu_dma_latency set to 0us
>>> policy: fifo: loadavg: 6.93 7.30 7.47 3/182 1530
>>>
>>> T: 0 ( 1487) P:98 I:1000 C:4497008 Min:     11 Act:   20 Avg:   21 Max:      69
>>> T: 1 ( 1488) P:98 I:1500 C:2998005 Min:     11 Act:   20 Avg:   22 Max:      59
>>
>> So there is an increase. And there is even a for-loop and I don't know
>> how deep it is nested. Anyway, do you think it is worth it or would it
>> be better to get rid of the raw-locks and simply push everything into
>> threaded context?
>>
> Certainly seems likely to be a better way forward to me but I don't
> really mind either way.
>
> J
>
We'll be giving Sebastian's suggestion a try, as time allows.

-- 
Xander Huff
Staff Software Engineer
National Instruments

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-06-07 15:29       ` Jonathan Cameron
  2015-06-08 14:44         ` Xander Huff
@ 2015-07-08 21:38         ` Xander Huff
  2015-07-14 14:28           ` Sebastian Andrzej Siewior
       [not found]           ` <CAKfKVtGTO81wLWk50M303t2WjcBEMEt_LUnsJ3_WmvR_3izUjg@mail.gmail.com>
  1 sibling, 2 replies; 22+ messages in thread
From: Xander Huff @ 2015-07-08 21:38 UTC (permalink / raw)
  To: linux-arm-kernel

The driver currently registers a pair of irq handlers using
request_threaded_irq(), however the synchronization mechanism between the
hardirq and the threadedirq handler is a regular spinlock.

Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
and is thus not able to be acquired from a hardirq handler. This patch gets
rid of the hardirq handler and pushes all interrupt handling into the
threaded context.

To validate that this change has no impact on RT performance, here are
cyclictest values with no processes running:

$ sudo cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 0.05 0.04 0.05 1/237 828
T: 0 ( 1861) P:98 I:1000 C:56925046 Min: 9 Act: 12 Avg: 12 Max: 71
T: 1 ( 1862) P:98 I:1500 C:37950030 Min: 9 Act: 12 Avg: 13 Max: 74

Then, all xadc raw handles were accessed in a continuous loop via
/sys/bus/iio/devices/iio:device0:

$ sudo cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 7.81 7.64 7.541 2/247 5751
T: 0 ( 1568) P:98 I:1000 C:23845515 Min: 11 Act: 22 Avg: 21 Max: 71
T: 1 ( 1569) P:98 I:1500 C:15897239 Min: 11 Act: 21 Avg: 22 Max: 68

Signed-off-by: Xander Huff <xander.huff@ni.com>
---
 drivers/iio/adc/xilinx-xadc-core.c | 35 ++++++++++++-----------------------
 1 file changed, 12 insertions(+), 23 deletions(-)

diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index ce93bd8..b309ad3 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -279,28 +279,9 @@ static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
 {
 	struct iio_dev *indio_dev = devid;
 	struct xadc *xadc = iio_priv(indio_dev);
-	unsigned int alarm;
-
-	spin_lock_irq(&xadc->lock);
-	alarm = xadc->zynq_alarm;
-	xadc->zynq_alarm = 0;
-	spin_unlock_irq(&xadc->lock);
-
-	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
-
-	/* unmask the required interrupts in timer. */
-	schedule_delayed_work(&xadc->zynq_unmask_work,
-			msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
-{
-	struct iio_dev *indio_dev = devid;
-	struct xadc *xadc = iio_priv(indio_dev);
 	irqreturn_t ret = IRQ_HANDLED;
 	uint32_t status;
+	unsigned int alarm;
 
 	xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
 
@@ -312,7 +293,6 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 	spin_lock(&xadc->lock);
 
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
-
 	if (status & XADC_ZYNQ_INT_DFIFO_GTH) {
 		xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
 			XADC_ZYNQ_INT_DFIFO_GTH);
@@ -330,8 +310,18 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 		xadc_zynq_update_intmsk(xadc, 0, 0);
 		ret = IRQ_WAKE_THREAD;
 	}
+
+	alarm = xadc->zynq_alarm;
+	xadc->zynq_alarm = 0;
+
 	spin_unlock(&xadc->lock);
 
+	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
+
+	/* unmask the required interrupts in timer. */
+	schedule_delayed_work(&xadc->zynq_unmask_work,
+			msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
+
 	return ret;
 }
 
@@ -436,7 +426,6 @@ static const struct xadc_ops xadc_zynq_ops = {
 	.write = xadc_zynq_write_adc_reg,
 	.setup = xadc_zynq_setup,
 	.get_dclk_rate = xadc_zynq_get_dclk_rate,
-	.interrupt_handler = xadc_zynq_interrupt_handler,
 	.threaded_interrupt_handler = xadc_zynq_threaded_interrupt_handler,
 	.update_alarm = xadc_zynq_update_alarm,
 };
@@ -1225,7 +1214,7 @@ static int xadc_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_free_samplerate_trigger;
 
-	ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
+	ret = request_threaded_irq(irq, 0,
 				xadc->ops->threaded_interrupt_handler,
 				0, dev_name(&pdev->dev), indio_dev);
 	if (ret)
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-07-08 21:38         ` [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context Xander Huff
@ 2015-07-14 14:28           ` Sebastian Andrzej Siewior
  2015-07-15 15:59             ` Xander Huff
       [not found]           ` <CAKfKVtGTO81wLWk50M303t2WjcBEMEt_LUnsJ3_WmvR_3izUjg@mail.gmail.com>
  1 sibling, 1 reply; 22+ messages in thread
From: Sebastian Andrzej Siewior @ 2015-07-14 14:28 UTC (permalink / raw)
  To: linux-arm-kernel

* Xander Huff | 2015-07-08 16:38:22 [-0500]:

> drivers/iio/adc/xilinx-xadc-core.c | 35 ++++++++++++-----------------------
> 1 file changed, 12 insertions(+), 23 deletions(-)
>
>diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
>index ce93bd8..b309ad3 100644
>--- a/drivers/iio/adc/xilinx-xadc-core.c
>+++ b/drivers/iio/adc/xilinx-xadc-core.c
>@@ -1225,7 +1214,7 @@ static int xadc_probe(struct platform_device *pdev)
> 	if (ret)
> 		goto err_free_samplerate_trigger;
> 
>-	ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
>+	ret = request_threaded_irq(irq, 0,

NULL not 0
Other than that, it looks good. Thanks.
If you managed to get this merged upstream then feel free to ping me and
I pick this into -RT.

> 				xadc->ops->threaded_interrupt_handler,
> 				0, dev_name(&pdev->dev), indio_dev);
> 	if (ret)

Sebastian

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context
       [not found]           ` <CAKfKVtGTO81wLWk50M303t2WjcBEMEt_LUnsJ3_WmvR_3izUjg@mail.gmail.com>
@ 2015-07-15 15:57             ` Xander Huff
  2015-07-20 23:14             ` [PATCH v3] " Xander Huff
  1 sibling, 0 replies; 22+ messages in thread
From: Xander Huff @ 2015-07-15 15:57 UTC (permalink / raw)
  To: linux-arm-kernel

On 7/9/2015 12:03 AM, Shubhrajyoti Datta wrote:
>     @@ -330,8 +310,18 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq,
>     void *devid)
>                      xadc_zynq_update_intmsk(xadc, 0, 0);
>                      ret = IRQ_WAKE_THREAD;
>              }
>     +
>     +       alarm = xadc->zynq_alarm;
>     +       xadc->zynq_alarm = 0;
>     +
>              spin_unlock(&xadc->lock);
>
>     +       xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
>     +
>     +       /* unmask the required interrupts in timer. */
>     +       schedule_delayed_work(&xadc->zynq_unmask_work,
>     +                       msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
>     +
>
> Now that we are in the threaded context do we need the  work queue stuff.
Seems reasonable to try to remove it. I'll do some testing and send out a v3 if 
it works, thanks.

-- 
Xander Huff
Staff Software Engineer
National Instruments

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-07-14 14:28           ` Sebastian Andrzej Siewior
@ 2015-07-15 15:59             ` Xander Huff
  0 siblings, 0 replies; 22+ messages in thread
From: Xander Huff @ 2015-07-15 15:59 UTC (permalink / raw)
  To: linux-arm-kernel

On 7/14/2015 9:28 AM, Sebastian Andrzej Siewior wrote:
> * Xander Huff | 2015-07-08 16:38:22 [-0500]:
>
>> drivers/iio/adc/xilinx-xadc-core.c | 35 ++++++++++++-----------------------
>> 1 file changed, 12 insertions(+), 23 deletions(-)
>>
>> diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
>> index ce93bd8..b309ad3 100644
>> --- a/drivers/iio/adc/xilinx-xadc-core.c
>> +++ b/drivers/iio/adc/xilinx-xadc-core.c
>> @@ -1225,7 +1214,7 @@ static int xadc_probe(struct platform_device *pdev)
>> 	if (ret)
>> 		goto err_free_samplerate_trigger;
>>
>> -	ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
>> +	ret = request_threaded_irq(irq, 0,
>
> NULL not 0
> Other than that, it looks good. Thanks.
> If you managed to get this merged upstream then feel free to ping me and
> I pick this into -RT.
>
>> 				xadc->ops->threaded_interrupt_handler,
>> 				0, dev_name(&pdev->dev), indio_dev);
>> 	if (ret)
>
> Sebastian
>
Haven't got this merged in yet. I'm addressing another concern from Shubhrajyoti 
Datta and will include this change in my v3 I'll send out sometime next week, 
thanks!

-- 
Xander Huff
Staff Software Engineer
National Instruments

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context
       [not found]           ` <CAKfKVtGTO81wLWk50M303t2WjcBEMEt_LUnsJ3_WmvR_3izUjg@mail.gmail.com>
  2015-07-15 15:57             ` Xander Huff
@ 2015-07-20 23:14             ` Xander Huff
  2015-07-24 12:38               ` Lars-Peter Clausen
  1 sibling, 1 reply; 22+ messages in thread
From: Xander Huff @ 2015-07-20 23:14 UTC (permalink / raw)
  To: linux-arm-kernel

The driver currently registers a pair of irq handlers using
request_threaded_irq(), however the synchronization mechanism between the
hardirq and the threadedirq handler is a regular spinlock.

Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
and is thus not able to be acquired from a hardirq handler. This patch gets
rid of the hardirq handler and pushes all interrupt handling into the
threaded context.

To validate that this change has no impact on RT performance, here are
cyclictest values with no processes running:

$ sudo cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 0.05 0.04 0.05 1/237 828
T: 0 ( 1861) P:98 I:1000 C:56925046 Min: 9 Act: 12 Avg: 12 Max: 71
T: 1 ( 1862) P:98 I:1500 C:37950030 Min: 9 Act: 12 Avg: 13 Max: 74

Then, all xadc raw handles were accessed in a continuous loop via
/sys/bus/iio/devices/iio:device0:

$ sudo cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 7.81 7.64 7.541 2/247 5751
T: 0 ( 1568) P:98 I:1000 C:23845515 Min: 11 Act: 22 Avg: 21 Max: 71
T: 1 ( 1569) P:98 I:1500 C:15897239 Min: 11 Act: 21 Avg: 22 Max: 68

Signed-off-by: Xander Huff <xander.huff@ni.com>
---
 drivers/iio/adc/xilinx-xadc-core.c | 37 ++++++++-----------------------------
 1 file changed, 8 insertions(+), 29 deletions(-)

diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index ce93bd8..e16afdb 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -267,40 +267,15 @@ static void xadc_zynq_unmask_worker(struct work_struct *work)
 	xadc_zynq_update_intmsk(xadc, 0, 0);
 
 	spin_unlock_irq(&xadc->lock);
-
-	/* if still pending some alarm re-trigger the timer */
-	if (xadc->zynq_masked_alarm) {
-		schedule_delayed_work(&xadc->zynq_unmask_work,
-				msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
-	}
 }
 
 static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
 {
 	struct iio_dev *indio_dev = devid;
 	struct xadc *xadc = iio_priv(indio_dev);
-	unsigned int alarm;
-
-	spin_lock_irq(&xadc->lock);
-	alarm = xadc->zynq_alarm;
-	xadc->zynq_alarm = 0;
-	spin_unlock_irq(&xadc->lock);
-
-	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
-
-	/* unmask the required interrupts in timer. */
-	schedule_delayed_work(&xadc->zynq_unmask_work,
-			msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
-{
-	struct iio_dev *indio_dev = devid;
-	struct xadc *xadc = iio_priv(indio_dev);
 	irqreturn_t ret = IRQ_HANDLED;
 	uint32_t status;
+	unsigned int alarm;
 
 	xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
 
@@ -312,7 +287,6 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 	spin_lock(&xadc->lock);
 
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
-
 	if (status & XADC_ZYNQ_INT_DFIFO_GTH) {
 		xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
 			XADC_ZYNQ_INT_DFIFO_GTH);
@@ -330,8 +304,14 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 		xadc_zynq_update_intmsk(xadc, 0, 0);
 		ret = IRQ_WAKE_THREAD;
 	}
+
+	alarm = xadc->zynq_alarm;
+	xadc->zynq_alarm = 0;
+
 	spin_unlock(&xadc->lock);
 
+	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
+
 	return ret;
 }
 
@@ -436,7 +416,6 @@ static const struct xadc_ops xadc_zynq_ops = {
 	.write = xadc_zynq_write_adc_reg,
 	.setup = xadc_zynq_setup,
 	.get_dclk_rate = xadc_zynq_get_dclk_rate,
-	.interrupt_handler = xadc_zynq_interrupt_handler,
 	.threaded_interrupt_handler = xadc_zynq_threaded_interrupt_handler,
 	.update_alarm = xadc_zynq_update_alarm,
 };
@@ -1225,7 +1204,7 @@ static int xadc_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_free_samplerate_trigger;
 
-	ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
+	ret = request_threaded_irq(irq, NULL,
 				xadc->ops->threaded_interrupt_handler,
 				0, dev_name(&pdev->dev), indio_dev);
 	if (ret)
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-07-20 23:14             ` [PATCH v3] " Xander Huff
@ 2015-07-24 12:38               ` Lars-Peter Clausen
  2015-08-03 20:18                 ` Xander Huff
  2015-08-04  5:34                 ` [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context Shubhrajyoti Datta
  0 siblings, 2 replies; 22+ messages in thread
From: Lars-Peter Clausen @ 2015-07-24 12:38 UTC (permalink / raw)
  To: linux-arm-kernel

Hi,

Sorry, but I don't think this patch has been sufficiently tested against a 
mainline kernel. The driver wont even probe the way it is right now.

On 07/21/2015 01:14 AM, Xander Huff wrote:
> The driver currently registers a pair of irq handlers using
> request_threaded_irq(), however the synchronization mechanism between the
> hardirq and the threadedirq handler is a regular spinlock.

If everything runs in threaded context we don't really need the spinlock 
anymore and can use the mutex throughout.

>
> Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
> and is thus not able to be acquired from a hardirq handler. This patch gets
> rid of the hardirq handler and pushes all interrupt handling into the
> threaded context.

We actually might as well run everything in the hardirq handler (which will 
be threaded in PREEMPT_RT). The reason why we have the threaded handler is 
because xadc_handle_event() used to sleep, but it doesn't do this anymore.

> Signed-off-by: Xander Huff <xander.huff@ni.com>
> ---
>   drivers/iio/adc/xilinx-xadc-core.c | 37 ++++++++-----------------------------
>   1 file changed, 8 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
> index ce93bd8..e16afdb 100644
> --- a/drivers/iio/adc/xilinx-xadc-core.c
> +++ b/drivers/iio/adc/xilinx-xadc-core.c
> @@ -267,40 +267,15 @@ static void xadc_zynq_unmask_worker(struct work_struct *work)
>   	xadc_zynq_update_intmsk(xadc, 0, 0);
>
>   	spin_unlock_irq(&xadc->lock);
> -
> -	/* if still pending some alarm re-trigger the timer */
> -	if (xadc->zynq_masked_alarm) {
> -		schedule_delayed_work(&xadc->zynq_unmask_work,
> -				msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
> -	}
>   }
>
>   static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
>   {
>   	struct iio_dev *indio_dev = devid;
>   	struct xadc *xadc = iio_priv(indio_dev);
> -	unsigned int alarm;
> -
> -	spin_lock_irq(&xadc->lock);
> -	alarm = xadc->zynq_alarm;
> -	xadc->zynq_alarm = 0;
> -	spin_unlock_irq(&xadc->lock);
> -
> -	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
> -
> -	/* unmask the required interrupts in timer. */
> -	schedule_delayed_work(&xadc->zynq_unmask_work,
> -			msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
> -

With nobody scheduling the unmask worker interrupts will stay disabled 
indefinitely after they fired once, that's not very useful.

> -	return IRQ_HANDLED;
> -}
> -
> -static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
> -{
> -	struct iio_dev *indio_dev = devid;
> -	struct xadc *xadc = iio_priv(indio_dev);
>   	irqreturn_t ret = IRQ_HANDLED;
>   	uint32_t status;
> +	unsigned int alarm;
>
>   	xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
>
> @@ -312,7 +287,6 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
>   	spin_lock(&xadc->lock);
>
>   	xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
> -
>   	if (status & XADC_ZYNQ_INT_DFIFO_GTH) {
>   		xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
>   			XADC_ZYNQ_INT_DFIFO_GTH);
> @@ -330,8 +304,14 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
>   		xadc_zynq_update_intmsk(xadc, 0, 0);
>   		ret = IRQ_WAKE_THREAD;
>   	}
> +
> +	alarm = xadc->zynq_alarm;
> +	xadc->zynq_alarm = 0;

With all running in the same handler we don't need those anymore.

> +
>   	spin_unlock(&xadc->lock);
>
> +	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
> +
>   	return ret;
>   }
>
> @@ -436,7 +416,6 @@ static const struct xadc_ops xadc_zynq_ops = {
>   	.write = xadc_zynq_write_adc_reg,
>   	.setup = xadc_zynq_setup,
>   	.get_dclk_rate = xadc_zynq_get_dclk_rate,
> -	.interrupt_handler = xadc_zynq_interrupt_handler,

The corresponding field should be removed from the xadc_ops struct and then 
you'll also notice that interrupts now don't work anymore for the AXI 
interface version.

>   	.threaded_interrupt_handler = xadc_zynq_threaded_interrupt_handler,
>   	.update_alarm = xadc_zynq_update_alarm,
>   };
> @@ -1225,7 +1204,7 @@ static int xadc_probe(struct platform_device *pdev)
>   	if (ret)
>   		goto err_free_samplerate_trigger;
>
> -	ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
> +	ret = request_threaded_irq(irq, NULL,
>   				xadc->ops->threaded_interrupt_handler,
>   				0, dev_name(&pdev->dev), indio_dev);
>   	if (ret)
>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-07-24 12:38               ` Lars-Peter Clausen
@ 2015-08-03 20:18                 ` Xander Huff
  2015-08-04  8:01                   ` Lars-Peter Clausen
  2015-08-04  5:34                 ` [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context Shubhrajyoti Datta
  1 sibling, 1 reply; 22+ messages in thread
From: Xander Huff @ 2015-08-03 20:18 UTC (permalink / raw)
  To: linux-arm-kernel

On 7/24/2015 7:38 AM, Lars-Peter Clausen wrote:
> Hi,
>
> Sorry, but I don't think this patch has been sufficiently tested against a
> mainline kernel. The driver wont even probe the way it is right now.
>
Thanks for your responses. I'm not sure why it doesn't probe for you since I was 
able to do a fair amount of tests with it on our device.

I'll get to work on your suggestions and hopefully having a v4 out sometime next 
week.

-- 
Xander Huff
Staff Software Engineer
National Instruments

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-07-24 12:38               ` Lars-Peter Clausen
  2015-08-03 20:18                 ` Xander Huff
@ 2015-08-04  5:34                 ` Shubhrajyoti Datta
  2015-08-04  8:05                   ` Lars-Peter Clausen
  1 sibling, 1 reply; 22+ messages in thread
From: Shubhrajyoti Datta @ 2015-08-04  5:34 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Jul 24, 2015 at 6:08 PM, Lars-Peter Clausen <lars@metafoo.de> wrote:
> Hi,
>
> Sorry, but I don't think this patch has been sufficiently tested against a
> mainline kernel. The driver wont even probe the way it is right now.
>
> On 07/21/2015 01:14 AM, Xander Huff wrote:
>>
>> The driver currently registers a pair of irq handlers using
>> request_threaded_irq(), however the synchronization mechanism between the
>> hardirq and the threadedirq handler is a regular spinlock.
>
>
> If everything runs in threaded context we don't really need the spinlock
> anymore and can use the mutex throughout.

that should be better from the performance point of view.

>
>>
>> Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
>> and is thus not able to be acquired from a hardirq handler. This patch
>> gets
>> rid of the hardirq handler and pushes all interrupt handling into the
>> threaded context.
>
>
> We actually might as well run everything in the hardirq handler (which will
> be threaded in PREEMPT_RT). The reason why we have the threaded handler is
> because xadc_handle_event() used to sleep, but it doesn't do this anymore.

The point is why have the hard irq. If we use hardirq then not mutex
can be used and spinlock will
be busy.

is there something i may be missing?
>
>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-08-03 20:18                 ` Xander Huff
@ 2015-08-04  8:01                   ` Lars-Peter Clausen
  2015-08-11 23:00                     ` [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context Xander Huff
  0 siblings, 1 reply; 22+ messages in thread
From: Lars-Peter Clausen @ 2015-08-04  8:01 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/03/2015 10:18 PM, Xander Huff wrote:
> On 7/24/2015 7:38 AM, Lars-Peter Clausen wrote:
>> Hi,
>>
>> Sorry, but I don't think this patch has been sufficiently tested against a
>> mainline kernel. The driver wont even probe the way it is right now.
>>
> Thanks for your responses. I'm not sure why it doesn't probe for you since I
> was able to do a fair amount of tests with it on our device.

genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq 157
xadc: probe of f8007100.xadc failed with error -22

You can' request a threaded IRQ without having IRQF_ONESHOT set, since this
will effectively result in a IRQ storm.

> 
> I'll get to work on your suggestions and hopefully having a v4 out sometime
> next week.
> 

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-08-04  5:34                 ` [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context Shubhrajyoti Datta
@ 2015-08-04  8:05                   ` Lars-Peter Clausen
  2015-08-07  3:55                     ` Shubhrajyoti Datta
  0 siblings, 1 reply; 22+ messages in thread
From: Lars-Peter Clausen @ 2015-08-04  8:05 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/04/2015 07:34 AM, Shubhrajyoti Datta wrote:
> On Fri, Jul 24, 2015 at 6:08 PM, Lars-Peter Clausen <lars@metafoo.de> wrote:
>> Hi,
>>
>> Sorry, but I don't think this patch has been sufficiently tested against a
>> mainline kernel. The driver wont even probe the way it is right now.
>>
>> On 07/21/2015 01:14 AM, Xander Huff wrote:
>>>
>>> The driver currently registers a pair of irq handlers using
>>> request_threaded_irq(), however the synchronization mechanism between the
>>> hardirq and the threadedirq handler is a regular spinlock.
>>
>>
>> If everything runs in threaded context we don't really need the spinlock
>> anymore and can use the mutex throughout.
> 
> that should be better from the performance point of view.
> 
>>
>>>
>>> Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
>>> and is thus not able to be acquired from a hardirq handler. This patch
>>> gets
>>> rid of the hardirq handler and pushes all interrupt handling into the
>>> threaded context.
>>
>>
>> We actually might as well run everything in the hardirq handler (which will
>> be threaded in PREEMPT_RT). The reason why we have the threaded handler is
>> because xadc_handle_event() used to sleep, but it doesn't do this anymore.
> 
> The point is why have the hard irq. If we use hardirq then not mutex
> can be used and spinlock will
> be busy.

Well there is no need to use a threaded IRQ. The interrupt handler is quite
small and doesn't take too much time and doesn't have any delays or sleeps
in it either.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context
  2015-08-04  8:05                   ` Lars-Peter Clausen
@ 2015-08-07  3:55                     ` Shubhrajyoti Datta
  0 siblings, 0 replies; 22+ messages in thread
From: Shubhrajyoti Datta @ 2015-08-07  3:55 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Aug 4, 2015 at 1:35 PM, Lars-Peter Clausen <lars@metafoo.de> wrote:
>
> Well there is no need to use a threaded IRQ. The interrupt handler is quite
> small and doesn't take too much time and doesn't have any delays or sleeps
> in it either.
>
>
Ok thanks for the explanation.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context
  2015-08-04  8:01                   ` Lars-Peter Clausen
@ 2015-08-11 23:00                     ` Xander Huff
  2015-08-12 15:17                       ` Lars-Peter Clausen
  0 siblings, 1 reply; 22+ messages in thread
From: Xander Huff @ 2015-08-11 23:00 UTC (permalink / raw)
  To: linux-arm-kernel

The driver currently registers a pair of irq handlers using
request_threaded_irq(), however the synchronization mechanism between the
hardirq and the threadedirq handler is a regular spinlock.

Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
and is thus not able to be acquired from a hardirq handler. This patch gets
rid of the threaded handler and pushes all interrupt handling into the
hardirq context, and uses request_irq().

To validate that this change has no impact on RT performance, here are
cyclictest values with no processes running:

$ sudo cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 0.00 0.01 0.05 1/174 2539
T: 0 ( 1405) P:98 I:1000 C:167010520 Min: 9 Act: 12 Avg: 12 Max: 75
T: 1 ( 1862) P:98 I:1500 C:111340339 Min: 9 Act: 12 Avg: 12 Max: 73

Then, all xadc raw handles were accessed in a continuous loop via
/sys/bus/iio/devices/iio:device0:

$ sudo cyclictest -S -m -p 98
# /dev/cpu_dma_latency set to 0us
policy: fifo: loadavg: 7.84 7.70 7.63 3/182 4260
T: 0 ( 2559) P:98 I:1000 C:241557018 Min: 11 Act: 18 Avg: 21 Max: 74
T: 1 ( 2560) P:98 I:1500 C:161038006 Min: 10 Act: 21 Avg: 20 Max: 73

Signed-off-by: Xander Huff <xander.huff@ni.com>
---
 drivers/iio/adc/xilinx-xadc-core.c | 37 ++++++++++---------------------------
 drivers/iio/adc/xilinx-xadc.h      |  2 --
 2 files changed, 10 insertions(+), 29 deletions(-)

diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index ce93bd8..0370624 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -273,33 +273,13 @@ static void xadc_zynq_unmask_worker(struct work_struct *work)
 		schedule_delayed_work(&xadc->zynq_unmask_work,
 				msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
 	}
-}
-
-static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
-{
-	struct iio_dev *indio_dev = devid;
-	struct xadc *xadc = iio_priv(indio_dev);
-	unsigned int alarm;
-
-	spin_lock_irq(&xadc->lock);
-	alarm = xadc->zynq_alarm;
-	xadc->zynq_alarm = 0;
-	spin_unlock_irq(&xadc->lock);
-
-	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
 
-	/* unmask the required interrupts in timer. */
-	schedule_delayed_work(&xadc->zynq_unmask_work,
-			msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
-
-	return IRQ_HANDLED;
 }
 
 static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 {
 	struct iio_dev *indio_dev = devid;
 	struct xadc *xadc = iio_priv(indio_dev);
-	irqreturn_t ret = IRQ_HANDLED;
 	uint32_t status;
 
 	xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
@@ -321,18 +301,23 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 
 	status &= XADC_ZYNQ_INT_ALARM_MASK;
 	if (status) {
-		xadc->zynq_alarm |= status;
 		xadc->zynq_masked_alarm |= status;
 		/*
 		 * mask the current event interrupt,
 		 * unmask it when the interrupt is no more active.
 		 */
 		xadc_zynq_update_intmsk(xadc, 0, 0);
-		ret = IRQ_WAKE_THREAD;
+
+		xadc_handle_events(indio_dev,
+				xadc_zynq_transform_alarm(status));
+
+		/* unmask the required interrupts in timer. */
+		schedule_delayed_work(&xadc->zynq_unmask_work,
+				msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
 	}
 	spin_unlock(&xadc->lock);
 
-	return ret;
+	return IRQ_HANDLED;
 }
 
 #define XADC_ZYNQ_TCK_RATE_MAX 50000000
@@ -437,7 +422,6 @@ static const struct xadc_ops xadc_zynq_ops = {
 	.setup = xadc_zynq_setup,
 	.get_dclk_rate = xadc_zynq_get_dclk_rate,
 	.interrupt_handler = xadc_zynq_interrupt_handler,
-	.threaded_interrupt_handler = xadc_zynq_threaded_interrupt_handler,
 	.update_alarm = xadc_zynq_update_alarm,
 };
 
@@ -1225,9 +1209,8 @@ static int xadc_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_free_samplerate_trigger;
 
-	ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
-				xadc->ops->threaded_interrupt_handler,
-				0, dev_name(&pdev->dev), indio_dev);
+	ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
+			dev_name(&pdev->dev), indio_dev);
 	if (ret)
 		goto err_clk_disable_unprepare;
 
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 54adc50..f6f0819 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -60,7 +60,6 @@ struct xadc {
 
 	enum xadc_external_mux_mode external_mux_mode;
 
-	unsigned int zynq_alarm;
 	unsigned int zynq_masked_alarm;
 	unsigned int zynq_intmask;
 	struct delayed_work zynq_unmask_work;
@@ -79,7 +78,6 @@ struct xadc_ops {
 	void (*update_alarm)(struct xadc *, unsigned int);
 	unsigned long (*get_dclk_rate)(struct xadc *);
 	irqreturn_t (*interrupt_handler)(int, void *);
-	irqreturn_t (*threaded_interrupt_handler)(int, void *);
 
 	unsigned int flags;
 };
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context
  2015-08-11 23:00                     ` [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context Xander Huff
@ 2015-08-12 15:17                       ` Lars-Peter Clausen
  2015-08-12 16:33                         ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 22+ messages in thread
From: Lars-Peter Clausen @ 2015-08-12 15:17 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/12/2015 01:00 AM, Xander Huff wrote:
> The driver currently registers a pair of irq handlers using
> request_threaded_irq(), however the synchronization mechanism between the
> hardirq and the threadedirq handler is a regular spinlock.
> 
> Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
> and is thus not able to be acquired from a hardirq handler. This patch gets
> rid of the threaded handler and pushes all interrupt handling into the
> hardirq context, and uses request_irq().
> 
> To validate that this change has no impact on RT performance, here are
> cyclictest values with no processes running:
> 
> $ sudo cyclictest -S -m -p 98
> # /dev/cpu_dma_latency set to 0us
> policy: fifo: loadavg: 0.00 0.01 0.05 1/174 2539
> T: 0 ( 1405) P:98 I:1000 C:167010520 Min: 9 Act: 12 Avg: 12 Max: 75
> T: 1 ( 1862) P:98 I:1500 C:111340339 Min: 9 Act: 12 Avg: 12 Max: 73
> 
> Then, all xadc raw handles were accessed in a continuous loop via
> /sys/bus/iio/devices/iio:device0:
> 
> $ sudo cyclictest -S -m -p 98
> # /dev/cpu_dma_latency set to 0us
> policy: fifo: loadavg: 7.84 7.70 7.63 3/182 4260
> T: 0 ( 2559) P:98 I:1000 C:241557018 Min: 11 Act: 18 Avg: 21 Max: 74
> T: 1 ( 2560) P:98 I:1500 C:161038006 Min: 10 Act: 21 Avg: 20 Max: 73
> 
> Signed-off-by: Xander Huff <xander.huff@ni.com>

Looks good, thanks.

Acked-by: Lars-Peter Clausen <lars@metafoo.de>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context
  2015-08-12 15:17                       ` Lars-Peter Clausen
@ 2015-08-12 16:33                         ` Sebastian Andrzej Siewior
  2015-08-15 19:55                           ` Jonathan Cameron
  0 siblings, 1 reply; 22+ messages in thread
From: Sebastian Andrzej Siewior @ 2015-08-12 16:33 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/12/2015 05:17 PM, Lars-Peter Clausen wrote:
> On 08/12/2015 01:00 AM, Xander Huff wrote:
>> Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
>> and is thus not able to be acquired from a hardirq handler. This patch gets
>> rid of the threaded handler and pushes all interrupt handling into the
>> hardirq context, and uses request_irq().
>>
>> To validate that this change has no impact on RT performance, here are
>> cyclictest values with no processes running:
> 
> Looks good, thanks.
> 
> Acked-by: Lars-Peter Clausen <lars@metafoo.de>

Yes, I'm fine with the rework, too.

Sebastian

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context
  2015-08-12 16:33                         ` Sebastian Andrzej Siewior
@ 2015-08-15 19:55                           ` Jonathan Cameron
  0 siblings, 0 replies; 22+ messages in thread
From: Jonathan Cameron @ 2015-08-15 19:55 UTC (permalink / raw)
  To: linux-arm-kernel

On 12/08/15 17:33, Sebastian Andrzej Siewior wrote:
> On 08/12/2015 05:17 PM, Lars-Peter Clausen wrote:
>> On 08/12/2015 01:00 AM, Xander Huff wrote:
>>> Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can sleep,
>>> and is thus not able to be acquired from a hardirq handler. This patch gets
>>> rid of the threaded handler and pushes all interrupt handling into the
>>> hardirq context, and uses request_irq().
>>>
>>> To validate that this change has no impact on RT performance, here are
>>> cyclictest values with no processes running:
>>
>> Looks good, thanks.
>>
>> Acked-by: Lars-Peter Clausen <lars@metafoo.de>
> 
> Yes, I'm fine with the rework, too.
Formal Acked-by would be good but I'll take this on the basis of Lars' one
and that the code clearly should work from a read through.

Applied to the togreg branch of iio.git.  Thanks for following through
the various twists of this one. Will initially be pushed out as testing.
Note this has almost certainly (unless Linus announces a delay) missed the
upcoming merge window so will be lined up for the 4.4 one.

Jonathan
> 
> Sebastian
> --
> To unsubscribe from this list: send the line "unsubscribe linux-iio" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2015-08-15 19:55 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-05-07 15:38 [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock Xander Huff
2015-05-14 17:10 ` Sebastian Andrzej Siewior
2015-05-14 22:45   ` Xander Huff
2015-05-18 21:17     ` Sebastian Andrzej Siewior
2015-05-23 11:36       ` Jonathan Cameron
2015-06-07 15:29       ` Jonathan Cameron
2015-06-08 14:44         ` Xander Huff
2015-07-08 21:38         ` [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context Xander Huff
2015-07-14 14:28           ` Sebastian Andrzej Siewior
2015-07-15 15:59             ` Xander Huff
     [not found]           ` <CAKfKVtGTO81wLWk50M303t2WjcBEMEt_LUnsJ3_WmvR_3izUjg@mail.gmail.com>
2015-07-15 15:57             ` Xander Huff
2015-07-20 23:14             ` [PATCH v3] " Xander Huff
2015-07-24 12:38               ` Lars-Peter Clausen
2015-08-03 20:18                 ` Xander Huff
2015-08-04  8:01                   ` Lars-Peter Clausen
2015-08-11 23:00                     ` [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context Xander Huff
2015-08-12 15:17                       ` Lars-Peter Clausen
2015-08-12 16:33                         ` Sebastian Andrzej Siewior
2015-08-15 19:55                           ` Jonathan Cameron
2015-08-04  5:34                 ` [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context Shubhrajyoti Datta
2015-08-04  8:05                   ` Lars-Peter Clausen
2015-08-07  3:55                     ` Shubhrajyoti Datta

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).