From: sunil.kovvuri@gmail.com
To: netdev@vger.kernel.org, davem@davemloft.net
Cc: arnd@arndb.de, linux-soc@vger.kernel.org,
Linu Cherian <lcherian@marvell.com>,
Sunil Goutham <sgoutham@marvell.com>
Subject: [PATCH 19/20] octeontx2-af: Add interrupt handlers for Master Enable event
Date: Fri, 9 Nov 2018 00:06:00 +0530 [thread overview]
Message-ID: <1541702161-30673-20-git-send-email-sunil.kovvuri@gmail.com> (raw)
In-Reply-To: <1541702161-30673-1-git-send-email-sunil.kovvuri@gmail.com>
From: Linu Cherian <lcherian@marvell.com>
- Add interrupt handlers for Master Enable events from PFs
and Master Enable events from VFs of AF
- Master Enable is required for the MSIX delivery to work
- Master Enable bit trap handler doesn't have to do any anything
other than clearing the TRPEND bit, since the enable/disable
requirements are already taken care using mbox requests/flr handler.
Signed-off-by: Linu Cherian <lcherian@marvell.com>
Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
---
drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 113 ++++++++++++++++++++++++
1 file changed, 113 insertions(+)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 1ef104a..395819d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1889,6 +1889,67 @@ static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
return IRQ_HANDLED;
}
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+ int vf;
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (vf = 0; vf < 64; vf++) {
+ if (intr & (1ULL << vf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+ /* clear interrupt */
+ rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+ }
+ }
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfset;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ for (vfset = 0; vfset <= 1; vfset++) {
+ intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+ if (intr)
+ rvu_me_handle_vfset(rvu, vfset, intr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+ BIT_ULL(pf));
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+ BIT_ULL(pf));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
@@ -1901,6 +1962,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ /* Disable the PF ME interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
for (irq = 0; irq < rvu->num_vec; irq++) {
if (rvu->irq_allocated[irq])
free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
@@ -1989,6 +2054,26 @@ static int rvu_register_interrupts(struct rvu *rvu)
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ /* Register ME interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ "RVUAF ME");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+ rvu_me_pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for ME\n");
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+ /* Enable ME interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
if (!rvu_afvf_msix_vectors_num_ok(rvu))
return 0;
@@ -2049,6 +2134,30 @@ static int rvu_register_interrupts(struct rvu *rvu)
}
rvu->irq_allocated[offset] = true;
+ /* Register ME interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
return 0;
fail:
@@ -2108,12 +2217,14 @@ static void rvu_disable_afvf_intr(struct rvu *rvu)
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
if (vfs <= 64)
return;
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
INTR_MASK(vfs - 64));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
}
static void rvu_enable_afvf_intr(struct rvu *rvu)
@@ -2130,6 +2241,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
/* FLR */
rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
/* Same for remaining VFs, if any. */
if (vfs <= 64)
@@ -2141,6 +2253,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
}
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
--
2.7.4
next prev parent reply other threads:[~2018-11-09 4:14 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-11-08 18:35 [PATCH 00/20] octeontx2-af: NPC MCAM support and FLR handling sunil.kovvuri
2018-11-08 18:35 ` [PATCH 01/20] octeontx2-af: Support to modify min/max allowed packet lengths sunil.kovvuri
2018-11-08 18:35 ` [PATCH 02/20] octeontx2-af: Support to get NIX HW constants from AF sunil.kovvuri
2018-11-08 18:35 ` [PATCH 03/20] octeontx2-af: Relax resource lock into mutex sunil.kovvuri
2018-11-08 18:35 ` [PATCH 04/20] octeontx2-af: NPC MCAM entry alloc/free support sunil.kovvuri
2018-11-08 22:22 ` David Miller
2018-11-08 18:35 ` [PATCH 05/20] octeontx2-af: MCAM entry installation support sunil.kovvuri
2018-11-08 18:35 ` [PATCH 06/20] octeontx2-af: Support for NPC MCAM counters sunil.kovvuri
2018-11-08 18:35 ` [PATCH 07/20] octeontx2-af: Map or unmap NPC MCAM entry and counter sunil.kovvuri
2018-11-08 18:35 ` [PATCH 08/20] octeontx2-af: Alloc and config NPC MCAM entry at a time sunil.kovvuri
2018-11-08 20:43 ` Arnd Bergmann
2018-11-09 4:20 ` Sunil Kovvuri
2018-11-09 11:02 ` Arnd Bergmann
2018-11-09 17:13 ` Sunil Kovvuri
2018-11-09 21:06 ` Arnd Bergmann
2018-11-12 10:31 ` Sunil Kovvuri
2018-11-08 18:35 ` [PATCH 09/20] octeontx2-af: Add MKEX default profile sunil.kovvuri
2018-11-08 18:35 ` [PATCH 10/20] octeontx2-af: Support to enable/disable default MCAM entries sunil.kovvuri
2018-11-08 18:35 ` [PATCH 11/20] octeontx2-af: Add support for stripping STAG/CTAG sunil.kovvuri
2018-11-08 20:47 ` Arnd Bergmann
2018-11-09 4:29 ` Sunil Kovvuri
2018-11-09 11:12 ` Arnd Bergmann
2018-11-09 17:06 ` Sunil Kovvuri
2018-11-08 18:35 ` [PATCH 12/20] octeontx2-af: Verify NPA/SSO/NIX PF_FUNC mapping sunil.kovvuri
2018-11-08 18:35 ` [PATCH 13/20] octeontx2-af: Add FLR interrupt handler sunil.kovvuri
2018-11-08 20:50 ` Arnd Bergmann
2018-11-08 18:35 ` [PATCH 14/20] octeontx2-af: Teardown NPA, NIX LF upon receiving FLR sunil.kovvuri
2018-11-08 18:35 ` [PATCH 15/20] octeontx2-af: Mbox communication support btw AF and it's VFs sunil.kovvuri
2018-11-08 18:35 ` [PATCH 16/20] octeontx2-af: Enable sriov on AF to create VFs sunil.kovvuri
2018-11-08 18:35 ` [PATCH 17/20] octeontx2-af: Configure AF VFs to talk over LBK channels sunil.kovvuri
2018-11-08 18:35 ` [PATCH 18/20] octeontx2-af: Add FLR handling support for AF's VFs sunil.kovvuri
2018-11-08 18:36 ` sunil.kovvuri [this message]
2018-11-08 18:36 ` [PATCH 20/20] octeontx2-af: Workarounds for HW errata sunil.kovvuri
2018-11-08 21:02 ` [PATCH 00/20] octeontx2-af: NPC MCAM support and FLR handling Arnd Bergmann
2018-11-09 4:34 ` Sunil Kovvuri
2018-11-09 11:08 ` Arnd Bergmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1541702161-30673-20-git-send-email-sunil.kovvuri@gmail.com \
--to=sunil.kovvuri@gmail.com \
--cc=arnd@arndb.de \
--cc=davem@davemloft.net \
--cc=lcherian@marvell.com \
--cc=linux-soc@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=sgoutham@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).