linux-iio.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Xander Huff <xander.huff@ni.com>
To: jic23@kernel.org, knaack.h@gmx.de, lars@metafoo.de, pmeerw@pmeerw.net
Cc: michal.simek@xilinx.com, soren.brinkmann@xilinx.com,
	linux-iio@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	linux-rt-users@vger.kernel.org, ben.shelton@ni.com,
	joe.hershberger@ni.com, joshc@ni.com,
	Xander Huff <xander.huff@ni.com>
Subject: [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock
Date: Thu, 7 May 2015 10:38:10 -0500	[thread overview]
Message-ID: <1431013090-18996-1-git-send-email-xander.huff@ni.com> (raw)

The driver currently registers a pair of irq handlers using
request_threaded_irq(), however the synchronization mechanism
between the hardirq and the threadedirq handler is a regular spinlock.

Unfortunately, this breaks PREEMPT_RT builds, where a spinlock can
sleep, and is thus not able to be acquired from a hardirq handler.
Because the set of operations performed under the spinlock is already
minimal and bounded, it is a suitable candidate for being a raw_spinlock.

This patch should not impact behavior on !PREEMPT_RT builds.

Signed-off-by: Xander Huff <xander.huff@ni.com>
Reviewed-by: Joe Hershberger <joe.hershberger@ni.com>
Reviewed-by: Ben Shelton <ben.shelton@ni.com>
Reviewed-by: Josh Cartwright <joshc@ni.com>
---
 drivers/iio/adc/xilinx-xadc-core.c | 34 +++++++++++++++++-----------------
 drivers/iio/adc/xilinx-xadc.h      |  2 +-
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index a221f73..4bdf03b 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -163,7 +163,7 @@ static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
 	uint32_t tmp;
 	int ret;
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
 			XADC_ZYNQ_INT_DFIFO_GTH);
 
@@ -177,7 +177,7 @@ static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
 
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 
 	ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
 	if (ret == 0)
@@ -200,7 +200,7 @@ static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
 	cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ, reg, 0);
 	cmd[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP, 0, 0);
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
 			XADC_ZYNQ_INT_DFIFO_GTH);
 	xadc_zynq_drain_fifo(xadc);
@@ -213,7 +213,7 @@ static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
 
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 	ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
 	if (ret == 0)
 		ret = -EIO;
@@ -252,7 +252,7 @@ static void xadc_zynq_unmask_worker(struct work_struct *work)
 
 	misc_sts &= XADC_ZYNQ_INT_ALARM_MASK;
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 
 	/* Clear those bits which are not active anymore */
 	unmask = (xadc->zynq_masked_alarm ^ misc_sts) & xadc->zynq_masked_alarm;
@@ -266,7 +266,7 @@ static void xadc_zynq_unmask_worker(struct work_struct *work)
 
 	xadc_zynq_update_intmsk(xadc, 0, 0);
 
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 
 	/* if still pending some alarm re-trigger the timer */
 	if (xadc->zynq_masked_alarm) {
@@ -281,10 +281,10 @@ static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
 	struct xadc *xadc = iio_priv(indio_dev);
 	unsigned int alarm;
 
-	spin_lock_irq(&xadc->lock);
+	raw_spin_lock_irq(&xadc->lock);
 	alarm = xadc->zynq_alarm;
 	xadc->zynq_alarm = 0;
-	spin_unlock_irq(&xadc->lock);
+	raw_spin_unlock_irq(&xadc->lock);
 
 	xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
 
@@ -309,7 +309,7 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 	if (!status)
 		return IRQ_NONE;
 
-	spin_lock(&xadc->lock);
+	raw_spin_lock(&xadc->lock);
 
 	xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
 
@@ -330,7 +330,7 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
 		xadc_zynq_update_intmsk(xadc, 0, 0);
 		ret = IRQ_WAKE_THREAD;
 	}
-	spin_unlock(&xadc->lock);
+	raw_spin_unlock(&xadc->lock);
 
 	return ret;
 }
@@ -419,7 +419,7 @@ static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
 	/* Move OT to bit 7 */
 	alarm = ((alarm & 0x08) << 4) | ((alarm & 0xf0) >> 1) | (alarm & 0x07);
 
-	spin_lock_irqsave(&xadc->lock, flags);
+	raw_spin_lock_irqsave(&xadc->lock, flags);
 
 	/* Clear previous interrupts if any. */
 	xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
@@ -428,7 +428,7 @@ static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
 	xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_ALARM_MASK,
 		~alarm & XADC_ZYNQ_INT_ALARM_MASK);
 
-	spin_unlock_irqrestore(&xadc->lock, flags);
+	raw_spin_unlock_irqrestore(&xadc->lock, flags);
 }
 
 static const struct xadc_ops xadc_zynq_ops = {
@@ -520,12 +520,12 @@ static void xadc_axi_update_alarm(struct xadc *xadc, unsigned int alarm)
 	alarm = ((alarm & 0x07) << 1) | ((alarm & 0x08) >> 3) |
 			((alarm & 0xf0) << 6);
 
-	spin_lock_irqsave(&xadc->lock, flags);
+	raw_spin_lock_irqsave(&xadc->lock, flags);
 	xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
 	val &= ~XADC_AXI_INT_ALARM_MASK;
 	val |= alarm;
 	xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
-	spin_unlock_irqrestore(&xadc->lock, flags);
+	raw_spin_unlock_irqrestore(&xadc->lock, flags);
 }
 
 static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
@@ -674,7 +674,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
 		xadc->trigger = NULL;
 	}
 
-	spin_lock_irqsave(&xadc->lock, flags);
+	raw_spin_lock_irqsave(&xadc->lock, flags);
 	xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
 	xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
 	if (state)
@@ -682,7 +682,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
 	else
 		val &= ~XADC_AXI_INT_EOS;
 	xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
-	spin_unlock_irqrestore(&xadc->lock, flags);
+	raw_spin_unlock_irqrestore(&xadc->lock, flags);
 
 err_out:
 	mutex_unlock(&xadc->mutex);
@@ -1175,7 +1175,7 @@ static int xadc_probe(struct platform_device *pdev)
 	xadc->ops = id->data;
 	init_completion(&xadc->completion);
 	mutex_init(&xadc->mutex);
-	spin_lock_init(&xadc->lock);
+	raw_spin_lock_init(&xadc->lock);
 	INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index c7487e8..d945e25 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -66,7 +66,7 @@ struct xadc {
 	struct delayed_work zynq_unmask_work;
 
 	struct mutex mutex;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	struct completion completion;
 };
-- 
1.9.1


             reply	other threads:[~2015-05-07 15:38 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-07 15:38 Xander Huff [this message]
2015-05-14 17:10 ` [PATCH] iio: adc: xilinx-xadc: Convert to raw spinlock Sebastian Andrzej Siewior
2015-05-14 22:45   ` Xander Huff
2015-05-18 21:17     ` Sebastian Andrzej Siewior
2015-05-23 11:36       ` Jonathan Cameron
2015-06-07 15:29       ` Jonathan Cameron
2015-06-08 14:44         ` Xander Huff
2015-07-08 21:38         ` [PATCH v2] iio: adc: xilinx-xadc: Push interrupts into threaded context Xander Huff
2015-07-09  5:03           ` Shubhrajyoti Datta
2015-07-15 15:57             ` Xander Huff
2015-07-20 23:14             ` [PATCH v3] " Xander Huff
2015-07-24 12:38               ` Lars-Peter Clausen
2015-08-03 20:18                 ` Xander Huff
2015-08-04  8:01                   ` Lars-Peter Clausen
2015-08-11 23:00                     ` [PATCH v4] iio: adc: xilinx-xadc: Push interrupts into hardirq context Xander Huff
2015-08-12 15:17                       ` Lars-Peter Clausen
2015-08-12 16:33                         ` Sebastian Andrzej Siewior
2015-08-15 19:55                           ` Jonathan Cameron
2015-08-04  5:34                 ` [PATCH v3] iio: adc: xilinx-xadc: Push interrupts into threaded context Shubhrajyoti Datta
2015-08-04  8:05                   ` Lars-Peter Clausen
2015-08-07  3:55                     ` Shubhrajyoti Datta
2015-07-14 14:28           ` [PATCH v2] " Sebastian Andrzej Siewior
2015-07-15 15:59             ` Xander Huff

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1431013090-18996-1-git-send-email-xander.huff@ni.com \
    --to=xander.huff@ni.com \
    --cc=ben.shelton@ni.com \
    --cc=jic23@kernel.org \
    --cc=joe.hershberger@ni.com \
    --cc=joshc@ni.com \
    --cc=knaack.h@gmx.de \
    --cc=lars@metafoo.de \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-iio@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=michal.simek@xilinx.com \
    --cc=pmeerw@pmeerw.net \
    --cc=soren.brinkmann@xilinx.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).