From: Anshumali Gaur <agaur@marvell.com>
To: <conor.dooley@microchip.com>, <ulf.hansson@linaro.org>,
<arnd@arndb.de>, <linus.walleij@linaro.org>,
<nikita.shubin@maquefel.me>, <alexander.sverdlin@gmail.com>,
<vkoul@kernel.org>, <cyy@cyyself.name>,
<krzysztof.kozlowski@linaro.org>, <linux-kernel@vger.kernel.org>,
<sgoutham@marvell.com>
Cc: Anshumali Gaur <agaur@marvell.com>
Subject: [PATCH v2 4/4] soc: marvell: rvu-pf: Handle function level reset (FLR) IRQs for VFs
Date: Tue, 1 Oct 2024 16:19:48 +0530 [thread overview]
Message-ID: <20241001104948.2779665-5-agaur@marvell.com> (raw)
In-Reply-To: <20241001104948.2779665-1-agaur@marvell.com>
Added PCIe FLR interrupt handler for VFs. When FLR is triggered for VFs,
parent PF gets an interrupt. PF creates a mbox message and sends it to
RVU Admin function (AF). AF cleans up all the resources attached to that
specific VF and acks the PF that FLR is handled.
Signed-off-by: Anshumali Gaur <agaur@marvell.com>
---
drivers/soc/marvell/rvu_gen_pf/gen_pf.c | 232 +++++++++++++++++++++++-
drivers/soc/marvell/rvu_gen_pf/gen_pf.h | 7 +
2 files changed, 238 insertions(+), 1 deletion(-)
diff --git a/drivers/soc/marvell/rvu_gen_pf/gen_pf.c b/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
index 624c55123a19..8c65ba9069ac 100644
--- a/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
+++ b/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
@@ -618,6 +618,15 @@ static void rvu_gen_pf_queue_vf_work(struct mbox *mw, struct workqueue_struct *m
}
}
+static void rvu_gen_pf_flr_wq_destroy(struct gen_pf_dev *pfdev)
+{
+ if (!pfdev->flr_wq)
+ return;
+ destroy_workqueue(pfdev->flr_wq);
+ pfdev->flr_wq = NULL;
+ devm_kfree(pfdev->dev, pfdev->flr_wrk);
+}
+
static irqreturn_t rvu_gen_pf_pfvf_mbox_intr_handler(int irq, void *pf_irq)
{
struct gen_pf_dev *pfdev = (struct gen_pf_dev *)(pf_irq);
@@ -691,6 +700,211 @@ static int rvu_gen_pf_register_pfvf_mbox_intr(struct gen_pf_dev *pfdev, int numv
return 0;
}
+static void rvu_gen_pf_flr_handler(struct work_struct *work)
+{
+ struct flr_work *flrwork = container_of(work, struct flr_work, work);
+ struct gen_pf_dev *pfdev = flrwork->pfdev;
+ struct mbox *mbox = &pfdev->mbox;
+ struct msg_req *req;
+ int vf, reg = 0;
+
+ vf = flrwork - pfdev->flr_wrk;
+
+ mutex_lock(&mbox->lock);
+ req = gen_pf_mbox_alloc_msg_vf_flr(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+ req->hdr.pcifunc &= ~RVU_PFVF_FUNC_MASK;
+ req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ if (!rvu_gen_pf_sync_mbox_msg(&pfdev->mbox)) {
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* clear transcation pending bit */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFTRPENDX(reg));
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1SX(reg));
+ }
+
+ mutex_unlock(&mbox->lock);
+}
+
+static irqreturn_t rvu_gen_pf_me_intr_handler(int irq, void *pf_irq)
+{
+ struct gen_pf_dev *pfdev = (struct gen_pf_dev *)pf_irq;
+ int vf, reg, num_reg = 1;
+ u64 intr;
+
+ if (pfdev->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = readq(pfdev->reg_base + RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* clear trpend bit */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFTRPENDX(reg));
+ /* clear interrupt */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFME_INTX(reg));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_gen_pf_flr_intr_handler(int irq, void *pf_irq)
+{
+ struct gen_pf_dev *pfdev = (struct gen_pf_dev *)pf_irq;
+ int reg, dev, vf, start_vf, num_reg = 1;
+ u64 intr;
+
+ if (pfdev->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = readq(pfdev->reg_base + RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(pfdev->flr_wq, &pfdev->flr_wrk[dev].work);
+ /* Clear interrupt */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INTX(reg));
+ /* Disable the interrupt */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1CX(reg));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int rvu_gen_pf_register_flr_me_intr(struct gen_pf_dev *pfdev, int numvfs)
+{
+ char *irq_name;
+ int ret;
+
+ /* Register ME interrupt handler*/
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_ME0", rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFME0),
+ rvu_gen_pf_me_intr_handler, 0, irq_name, pfdev);
+
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for ME0\n");
+ }
+
+ /* Register FLR interrupt handler */
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_FLR0", rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFFLR0),
+ rvu_gen_pf_flr_intr_handler, 0, irq_name, pfdev);
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for FLR0\n");
+ return ret;
+ }
+
+ if (numvfs > 64) {
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_ME1",
+ rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pfdev->pdev, RVU_PF_INT_VEC_VFME1),
+ rvu_gen_pf_me_intr_handler, 0, irq_name, pfdev);
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for ME1\n");
+ }
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_FLR1",
+ rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pfdev->pdev, RVU_PF_INT_VEC_VFFLR1),
+ rvu_gen_pf_flr_intr_handler, 0, irq_name, pfdev);
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for FLR1\n");
+ return ret;
+ }
+ }
+
+ /* Enable ME interrupt for all VFs*/
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INTX(0));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1SX(0));
+
+ /* Enable FLR interrupt for all VFs*/
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INTX(0));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1SX(0));
+
+ if (numvfs > 64) {
+ numvfs -= 64;
+
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INTX(1));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1SX(1));
+
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INTX(1));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1SX(1));
+ }
+ return 0;
+}
+
+static void rvu_gen_pf_disable_flr_me_intr(struct gen_pf_dev *pfdev)
+{
+ int irq, vfs = pfdev->total_vfs;
+
+ /* Disable VFs ME interrupts */
+ writeq(INTR_MASK(vfs), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1CX(0));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(irq, pfdev);
+
+ /* Disable VFs FLR interrupts */
+ writeq(INTR_MASK(vfs), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1CX(0));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(irq, pfdev);
+
+ if (vfs <= 64)
+ return;
+
+ writeq(INTR_MASK(vfs - 64), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1CX(1));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(irq, pfdev);
+
+ writeq(INTR_MASK(vfs - 64), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1CX(1));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(irq, pfdev);
+}
+
+static int rvu_gen_pf_flr_init(struct gen_pf_dev *pfdev, int num_vfs)
+{
+ int vf;
+
+ pfdev->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
+ if (!pfdev->flr_wq)
+ return -ENOMEM;
+
+ pfdev->flr_wrk = devm_kcalloc(pfdev->dev, num_vfs,
+ sizeof(struct flr_work), GFP_KERNEL);
+ if (!pfdev->flr_wrk) {
+ destroy_workqueue(pfdev->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ pfdev->flr_wrk[vf].pfdev = pfdev;
+ INIT_WORK(&pfdev->flr_wrk[vf].work, rvu_gen_pf_flr_handler);
+ }
+
+ return 0;
+}
+
static int rvu_gen_pf_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct gen_pf_dev *pfdev = pci_get_drvdata(pdev);
@@ -705,11 +919,25 @@ static int rvu_gen_pf_sriov_enable(struct pci_dev *pdev, int numvfs)
if (ret)
goto free_mbox;
+ ret = rvu_gen_pf_flr_init(pfdev, numvfs);
+ if (ret)
+ goto free_intr;
+
+ ret = rvu_gen_pf_register_flr_me_intr(pfdev, numvfs);
+ if (ret)
+ goto free_flr;
+
ret = pci_enable_sriov(pdev, numvfs);
if (ret)
- return ret;
+ goto free_flr_intr;
return numvfs;
+free_flr_intr:
+ rvu_gen_pf_disable_flr_me_intr(pfdev);
+free_flr:
+ rvu_gen_pf_flr_wq_destroy(pfdev);
+free_intr:
+ rvu_gen_pf_disable_pfvf_mbox_intr(pfdev, numvfs);
free_mbox:
rvu_gen_pf_pfvf_mbox_destroy(pfdev);
return ret;
@@ -725,6 +953,8 @@ static int rvu_gen_pf_sriov_disable(struct pci_dev *pdev)
pci_disable_sriov(pdev);
+ rvu_gen_pf_disable_flr_me_intr(pfdev);
+ rvu_gen_pf_flr_wq_destroy(pfdev);
rvu_gen_pf_disable_pfvf_mbox_intr(pfdev, numvfs);
rvu_gen_pf_pfvf_mbox_destroy(pfdev);
diff --git a/drivers/soc/marvell/rvu_gen_pf/gen_pf.h b/drivers/soc/marvell/rvu_gen_pf/gen_pf.h
index ad651b97b661..7aacb84df07a 100644
--- a/drivers/soc/marvell/rvu_gen_pf/gen_pf.h
+++ b/drivers/soc/marvell/rvu_gen_pf/gen_pf.h
@@ -16,6 +16,11 @@
struct gen_pf_dev;
+struct flr_work {
+ struct work_struct work;
+ struct gen_pf_dev *pfdev;
+};
+
struct mbox {
struct otx2_mbox mbox;
struct work_struct mbox_wrk;
@@ -33,6 +38,8 @@ struct gen_pf_dev {
struct device *dev;
void __iomem *reg_base;
char *irq_name;
+ struct workqueue_struct *flr_wq;
+ struct flr_work *flr_wrk;
struct work_struct mbox_wrk;
struct work_struct mbox_wrk_up;
--
2.25.1
prev parent reply other threads:[~2024-10-01 10:50 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-01 10:49 [PATCH v2 0/4] soc: marvell: Add a general purpose RVU physical Anshumali Gaur
2024-10-01 10:49 ` [PATCH v2 1/4] soc: marvell: Add a general purpose RVU PF driver Anshumali Gaur
2024-10-01 11:15 ` Krzysztof Kozlowski
2024-10-01 10:49 ` [PATCH v2 2/4] soc: marvell: rvu-pf: Add PF to AF mailbox communication support Anshumali Gaur
2024-10-01 11:17 ` Krzysztof Kozlowski
2024-10-01 10:49 ` [PATCH v2 3/4] soc: marvell: rvu-pf: Add mailbox communication btw RVU VFs and PF Anshumali Gaur
2024-10-01 10:49 ` Anshumali Gaur [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241001104948.2779665-5-agaur@marvell.com \
--to=agaur@marvell.com \
--cc=alexander.sverdlin@gmail.com \
--cc=arnd@arndb.de \
--cc=conor.dooley@microchip.com \
--cc=cyy@cyyself.name \
--cc=krzysztof.kozlowski@linaro.org \
--cc=linus.walleij@linaro.org \
--cc=linux-kernel@vger.kernel.org \
--cc=nikita.shubin@maquefel.me \
--cc=sgoutham@marvell.com \
--cc=ulf.hansson@linaro.org \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox