dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Philipp Zabel <p.zabel@pengutronix.de>
To: linux-arm-kernel@lists.infradead.org
Cc: kernel@pengutronix.de,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	dri-devel@lists.freedesktop.org,
	Fabio Estevam <festevam@gmail.com>
Subject: [PATCH 1/2] staging: drm/imx: use generic irqchip
Date: Fri, 21 Jun 2013 14:52:17 +0200	[thread overview]
Message-ID: <1371819139-22039-2-git-send-email-p.zabel@pengutronix.de> (raw)
In-Reply-To: <1371819139-22039-1-git-send-email-p.zabel@pengutronix.de>

This depends on the patch "genirq: Generic chip: Add linear irq domain support"
and removes the custom IPU irq_chip and irq_domain_ops. Instead, the generic
irq chip implementation is reused.

Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
---
 drivers/staging/imx-drm/ipu-v3/ipu-common.c | 90 +++++++++--------------------
 1 file changed, 26 insertions(+), 64 deletions(-)

diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index d629d6d..c135c66 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -986,53 +986,6 @@ static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
 	chained_irq_exit(chip, desc);
 }
 
-static void ipu_ack_irq(struct irq_data *d)
-{
-	struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
-	irq_hw_number_t irq = d->hwirq;
-
-	ipu_cm_write(ipu, 1 << (irq % 32), IPU_INT_STAT(irq / 32));
-}
-
-static void ipu_unmask_irq(struct irq_data *d)
-{
-	struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
-	irq_hw_number_t irq = d->hwirq;
-	unsigned long flags;
-	u32 reg;
-
-	spin_lock_irqsave(&ipu->lock, flags);
-
-	reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
-	reg |= 1 << (irq % 32);
-	ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
-
-	spin_unlock_irqrestore(&ipu->lock, flags);
-}
-
-static void ipu_mask_irq(struct irq_data *d)
-{
-	struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
-	irq_hw_number_t irq = d->hwirq;
-	unsigned long flags;
-	u32 reg;
-
-	spin_lock_irqsave(&ipu->lock, flags);
-
-	reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
-	reg &= ~(1 << (irq % 32));
-	ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
-
-	spin_unlock_irqrestore(&ipu->lock, flags);
-}
-
-static struct irq_chip ipu_irq_chip = {
-	.name = "IPU",
-	.irq_ack = ipu_ack_irq,
-	.irq_mask = ipu_mask_irq,
-	.irq_unmask = ipu_unmask_irq,
-};
-
 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
 		enum ipu_channel_irq irq_type)
 {
@@ -1171,32 +1124,39 @@ err_register:
 	return ret;
 }
 
-static int ipu_irq_map(struct irq_domain *h, unsigned int irq,
-		       irq_hw_number_t hw)
-{
-	struct ipu_soc *ipu = h->host_data;
-
-	irq_set_chip_and_handler(irq, &ipu_irq_chip, handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID);
-	irq_set_chip_data(irq, ipu);
-
-	return 0;
-}
-
-const struct irq_domain_ops ipu_irq_domain_ops = {
-	.map = ipu_irq_map,
-	.xlate = irq_domain_xlate_onecell,
-};
 
 static int ipu_irq_init(struct ipu_soc *ipu)
 {
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+	int ret, i;
+
 	ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
-					    &ipu_irq_domain_ops, ipu);
+					    &irq_generic_chip_ops, ipu);
 	if (!ipu->domain) {
 		dev_err(ipu->dev, "failed to add irq domain\n");
 		return -ENODEV;
 	}
 
+	ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
+					     handle_level_irq, 0, IRQF_VALID, 0);
+	if (ret < 0) {
+		dev_err(ipu->dev, "failed to alloc generic irq chips\n");
+		irq_domain_remove(ipu->domain);
+		return ret;
+	}
+
+	for (i = 0; i < IPU_NUM_IRQS; i += 32) {
+		gc = irq_get_domain_generic_chip(ipu->domain, i);
+		gc->reg_base = ipu->cm_reg;
+		ct = gc->chip_types;
+		ct->chip.irq_ack = irq_gc_ack_set_bit;
+		ct->chip.irq_mask = irq_gc_mask_clr_bit;
+		ct->chip.irq_unmask = irq_gc_mask_set_bit;
+		ct->regs.ack = IPU_INT_STAT(i / 32);
+		ct->regs.mask = IPU_INT_CTRL(i / 32);
+	}
+
 	irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
 	irq_set_handler_data(ipu->irq_sync, ipu);
 	irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
@@ -1214,6 +1174,8 @@ static void ipu_irq_exit(struct ipu_soc *ipu)
 	irq_set_chained_handler(ipu->irq_sync, NULL);
 	irq_set_handler_data(ipu->irq_sync, NULL);
 
+	/* TODO: remove irq_domain_generic_chips */
+
 	for (i = 0; i < IPU_NUM_IRQS; i++) {
 		irq = irq_linear_revmap(ipu->domain, i);
 		if (irq)
-- 
1.8.3.1

  reply	other threads:[~2013-06-21 12:52 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-06-21 12:52 [PATCH 0/2] IPU generic irq chip Philipp Zabel
2013-06-21 12:52 ` Philipp Zabel [this message]
2013-06-24 12:59   ` [PATCH 1/2] staging: drm/imx: use generic irqchip Grant Likely
2013-06-21 12:52 ` [PATCH 2/2] staging: drm/imx: use generic irq chip unused field to block out invalid irqs Philipp Zabel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1371819139-22039-2-git-send-email-p.zabel@pengutronix.de \
    --to=p.zabel@pengutronix.de \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=festevam@gmail.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=kernel@pengutronix.de \
    --cc=linux-arm-kernel@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).