From: Nirmal Patel <nirmal.patel@linux.intel.com>
To: Nam Cao <namcao@linutronix.de>
Cc: "Marc Zyngier" <maz@kernel.org>,
"Thomas Gleixner" <tglx@linutronix.de>,
"Lorenzo Pieralisi" <lpieralisi@kernel.org>,
"Krzysztof Wilczyński" <kwilczynski@kernel.org>,
"Manivannan Sadhasivam" <mani@kernel.org>,
"Rob Herring" <robh@kernel.org>,
"Bjorn Helgaas" <bhelgaas@google.com>,
linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org,
"Karthikeyan Mitran" <m.karthikeyan@mobiveil.co.in>,
"Hou Zhiqiang" <Zhiqiang.Hou@nxp.com>,
"Thomas Petazzoni" <thomas.petazzoni@bootlin.com>,
"Pali Rohár" <pali@kernel.org>,
"K . Y . Srinivasan" <kys@microsoft.com>,
"Haiyang Zhang" <haiyangz@microsoft.com>,
"Wei Liu" <wei.liu@kernel.org>,
"Dexuan Cui" <decui@microsoft.com>,
"Joyce Ooi" <joyce.ooi@intel.com>,
"Jim Quinlan" <jim2101024@gmail.com>,
"Nicolas Saenz Julienne" <nsaenz@kernel.org>,
"Florian Fainelli" <florian.fainelli@broadcom.com>,
"Broadcom internal kernel review list"
<bcm-kernel-feedback-list@broadcom.com>,
"Ray Jui" <rjui@broadcom.com>,
"Scott Branden" <sbranden@broadcom.com>,
"Ryder Lee" <ryder.lee@mediatek.com>,
"Jianjun Wang" <jianjun.wang@mediatek.com>,
"Marek Vasut" <marek.vasut+renesas@gmail.com>,
"Yoshihiro Shimoda" <yoshihiro.shimoda.uh@renesas.com>,
"Michal Simek" <michal.simek@amd.com>,
"Daire McNamara" <daire.mcnamara@microchip.com>,
"Jonathan Derrick" <jonathan.derrick@linux.dev>,
"Matthias Brugger" <matthias.bgg@gmail.com>,
"AngeloGioacchino Del Regno"
<angelogioacchino.delregno@collabora.com>,
linux-arm-kernel@lists.infradead.org,
linux-hyperv@vger.kernel.org,
linux-rpi-kernel@lists.infradead.org,
linux-mediatek@lists.infradead.org,
linux-renesas-soc@vger.kernel.org
Subject: Re: [PATCH 16/16] PCI: vmd: Switch to msi_create_parent_irq_domain()
Date: Wed, 16 Jul 2025 11:10:09 -0700 [thread overview]
Message-ID: <20250716111009.000022ff@linux.intel.com> (raw)
In-Reply-To: <de3f1d737831b251e9cd2cbf9e4c732a5bbba13a.1750858083.git.namcao@linutronix.de>
On Thu, 26 Jun 2025 16:48:06 +0200
Nam Cao <namcao@linutronix.de> wrote:
> Move away from the legacy MSI domain setup, switch to use
> msi_create_parent_irq_domain().
>
> Signed-off-by: Nam Cao <namcao@linutronix.de>
> ---
> Cc: Nirmal Patel <nirmal.patel@linux.intel.com>
> Cc: Jonathan Derrick <jonathan.derrick@linux.dev>
> ---
> drivers/pci/controller/Kconfig | 1 +
> drivers/pci/controller/vmd.c | 160
> +++++++++++++++++---------------- 2 files changed, 82 insertions(+),
> 79 deletions(-)
>
> diff --git a/drivers/pci/controller/Kconfig
> b/drivers/pci/controller/Kconfig index 8f56ffd029ba2..41748d083b933
> 100644 --- a/drivers/pci/controller/Kconfig
> +++ b/drivers/pci/controller/Kconfig
> @@ -156,6 +156,7 @@ config PCI_IXP4XX
> config VMD
> depends on PCI_MSI && X86_64 && !UML
> tristate "Intel Volume Management Device Driver"
> + select IRQ_MSI_LIB
> help
> Adds support for the Intel Volume Management Device (VMD).
> VMD is a secondary PCI host bridge that allows PCI Express root ports,
> diff --git a/drivers/pci/controller/vmd.c
> b/drivers/pci/controller/vmd.c index d9b893bf4e456..38693a9487d9b
> 100644 --- a/drivers/pci/controller/vmd.c
> +++ b/drivers/pci/controller/vmd.c
> @@ -7,6 +7,7 @@
> #include <linux/device.h>
> #include <linux/interrupt.h>
> #include <linux/irq.h>
> +#include <linux/irqchip/irq-msi-lib.h>
> #include <linux/kernel.h>
> #include <linux/module.h>
> #include <linux/msi.h>
> @@ -174,9 +175,6 @@ static void vmd_compose_msi_msg(struct irq_data
> *data, struct msi_msg *msg) msg->arch_addr_lo.destid_0_7 =
> index_from_irqs(vmd, irq); }
>
> -/*
> - * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask
> ops.
> - */
> static void vmd_irq_enable(struct irq_data *data)
> {
> struct vmd_irq *vmdirq = data->chip_data;
> @@ -186,7 +184,11 @@ static void vmd_irq_enable(struct irq_data *data)
> list_add_tail_rcu(&vmdirq->node,
> &vmdirq->irq->irq_list); vmdirq->enabled = true;
> }
> +}
>
> +static void vmd_pci_msi_enable(struct irq_data *data)
> +{
> + vmd_irq_enable(data->parent_data);
> data->chip->irq_unmask(data);
> }
>
> @@ -194,8 +196,6 @@ static void vmd_irq_disable(struct irq_data *data)
> {
> struct vmd_irq *vmdirq = data->chip_data;
>
> - data->chip->irq_mask(data);
> -
> scoped_guard(raw_spinlock_irqsave, &list_lock) {
> if (vmdirq->enabled) {
> list_del_rcu(&vmdirq->node);
> @@ -204,19 +204,17 @@ static void vmd_irq_disable(struct irq_data
> *data) }
> }
>
> +static void vmd_pci_msi_disable(struct irq_data *data)
> +{
> + data->chip->irq_mask(data);
> + vmd_irq_disable(data->parent_data);
> +}
> +
> static struct irq_chip vmd_msi_controller = {
> .name = "VMD-MSI",
> - .irq_enable = vmd_irq_enable,
> - .irq_disable = vmd_irq_disable,
> .irq_compose_msi_msg = vmd_compose_msi_msg,
> };
>
> -static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
> - msi_alloc_info_t *arg)
> -{
> - return 0;
> -}
> -
> /*
> * XXX: We can be even smarter selecting the best IRQ once we solve
> the
> * affinity problem.
> @@ -250,100 +248,110 @@ static struct vmd_irq_list
> *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d return
> &vmd->irqs[best]; }
>
> -static int vmd_msi_init(struct irq_domain *domain, struct
> msi_domain_info *info,
> - unsigned int virq, irq_hw_number_t hwirq,
> - msi_alloc_info_t *arg)
> +static void vmd_msi_free(struct irq_domain *domain, unsigned int
> virq, unsigned int nr_irqs); +
> +static int vmd_msi_alloc(struct irq_domain *domain, unsigned int
> virq, unsigned int nr_irqs,
> + void *arg)
Is this wrapped in 80 columns? I can see few lines are more than 80.
Disregard this if it is wrapped and it can be my claws mail client
issue.
> {
> - struct msi_desc *desc = arg->desc;
> - struct vmd_dev *vmd =
> vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
> - struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq),
> GFP_KERNEL);
> + struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc;
> + struct vmd_dev *vmd = domain->host_data;
> + struct vmd_irq *vmdirq;
>
> - if (!vmdirq)
> - return -ENOMEM;
> + for (int i = 0; i < nr_irqs; ++i) {
> + vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
> + if (!vmdirq) {
> + vmd_msi_free(domain, virq, i);
> + return -ENOMEM;
> + }
>
> - INIT_LIST_HEAD(&vmdirq->node);
> - vmdirq->irq = vmd_next_irq(vmd, desc);
> - vmdirq->virq = virq;
> + INIT_LIST_HEAD(&vmdirq->node);
> + vmdirq->irq = vmd_next_irq(vmd, desc);
> + vmdirq->virq = virq + i;
> +
> + irq_domain_set_info(domain, virq + i,
> vmdirq->irq->virq, &vmd_msi_controller,
> + vmdirq, handle_untracked_irq,
> vmd, NULL);
> + }
>
> - irq_domain_set_info(domain, virq, vmdirq->irq->virq,
> info->chip, vmdirq,
> - handle_untracked_irq, vmd, NULL);
> return 0;
> }
>
> -static void vmd_msi_free(struct irq_domain *domain,
> - struct msi_domain_info *info, unsigned int
> virq) +static void vmd_msi_free(struct irq_domain *domain, unsigned
> int virq, unsigned int nr_irqs) {
> struct vmd_irq *vmdirq = irq_get_chip_data(virq);
>
> - synchronize_srcu(&vmdirq->irq->srcu);
> + for (int i = 0; i < nr_irqs; ++i) {
> + synchronize_srcu(&vmdirq->irq->srcu);
>
> - /* XXX: Potential optimization to rebalance */
> - scoped_guard(raw_spinlock_irq, &list_lock)
> - vmdirq->irq->count--;
> + /* XXX: Potential optimization to rebalance */
> + scoped_guard(raw_spinlock_irq, &list_lock)
> + vmdirq->irq->count--;
>
> - kfree(vmdirq);
> + kfree(vmdirq);
> + }
> }
>
> -static int vmd_msi_prepare(struct irq_domain *domain, struct device
> *dev,
> - int nvec, msi_alloc_info_t *arg)
> +static const struct irq_domain_ops vmd_msi_domain_ops = {
> + .alloc = vmd_msi_alloc,
> + .free = vmd_msi_free,
> +};
> +
> +static bool vmd_init_dev_msi_info(struct device *dev, struct
> irq_domain *domain,
> + struct irq_domain *real_parent,
> struct msi_domain_info *info) {
> - struct pci_dev *pdev = to_pci_dev(dev);
> - struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
> + if (WARN_ON_ONCE(info->bus_token !=
> DOMAIN_BUS_PCI_DEVICE_MSIX))
> + return false;
>
> - if (nvec > vmd->msix_count)
> - return vmd->msix_count;
> + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent,
> info))
> + return false;
>
> - memset(arg, 0, sizeof(*arg));
> - return 0;
> + info->chip->irq_enable = vmd_pci_msi_enable;
> + info->chip->irq_disable = vmd_pci_msi_disable;
> + return true;
> }
>
> -static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc
> *desc) -{
> - arg->desc = desc;
> -}
> +#define VMD_MSI_FLAGS_SUPPORTED
> (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX) +#define
> VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS |
> MSI_FLAG_NO_AFFINITY) -static struct msi_domain_ops
> vmd_msi_domain_ops = {
> - .get_hwirq = vmd_get_hwirq,
> - .msi_init = vmd_msi_init,
> - .msi_free = vmd_msi_free,
> - .msi_prepare = vmd_msi_prepare,
> - .set_desc = vmd_set_desc,
> +static const struct msi_parent_ops vmd_msi_parent_ops = {
> + .supported_flags = VMD_MSI_FLAGS_SUPPORTED,
> + .required_flags = VMD_MSI_FLAGS_REQUIRED,
> + .bus_select_token = DOMAIN_BUS_VMD_MSI,
> + .bus_select_mask = MATCH_PCI_MSI,
> + .prefix = "VMD-",
> + .init_dev_msi_info = vmd_init_dev_msi_info,
> };
>
> -static struct msi_domain_info vmd_msi_domain_info = {
> - .flags = MSI_FLAG_USE_DEF_DOM_OPS |
> MSI_FLAG_USE_DEF_CHIP_OPS |
> - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
> - .ops = &vmd_msi_domain_ops,
> - .chip = &vmd_msi_controller,
> -};
> -
> -static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
> -{
> - u16 reg;
> -
> - pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®);
> - reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
> - (reg | VMCONFIG_MSI_REMAP);
> - pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
> -}
> -
> static int vmd_create_irq_domain(struct vmd_dev *vmd)
> {
> - struct fwnode_handle *fn;
> + struct irq_domain_info info = {
> + .size = vmd->msix_count,
> + .ops = &vmd_msi_domain_ops,
> + .host_data = vmd,
> + };
>
> - fn = irq_domain_alloc_named_id_fwnode("VMD-MSI",
> vmd->sysdata.domain);
> - if (!fn)
> + info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI",
> vmd->sysdata.domain);
> + if (!info.fwnode)
> return -ENODEV;
>
> - vmd->irq_domain = pci_msi_create_irq_domain(fn,
> &vmd_msi_domain_info, NULL);
> + vmd->irq_domain = msi_create_parent_irq_domain(&info,
> &vmd_msi_parent_ops); if (!vmd->irq_domain) {
> - irq_domain_free_fwnode(fn);
> + irq_domain_free_fwnode(info.fwnode);
> return -ENODEV;
> }
>
> return 0;
> }
>
> +static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
> +{
> + u16 reg;
> +
> + pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®);
> + reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
> + (reg | VMCONFIG_MSI_REMAP);
> + pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
> +}
> +
> static void vmd_remove_irq_domain(struct vmd_dev *vmd)
> {
> /*
> @@ -874,12 +882,6 @@ static int vmd_enable_domain(struct vmd_dev
> *vmd, unsigned long features) ret = vmd_create_irq_domain(vmd);
> if (ret)
> return ret;
> -
> - /*
> - * Override the IRQ domain bus token so the domain
> can be
> - * distinguished from a regular PCI/MSI domain.
> - */
> - irq_domain_update_bus_token(vmd->irq_domain,
> DOMAIN_BUS_VMD_MSI); } else {
> vmd_set_msi_remapping(vmd, false);
> }
next prev parent reply other threads:[~2025-07-16 18:10 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-26 14:47 [PATCH 00/16] PCI: MSI parent domain conversion Nam Cao
2025-06-26 14:47 ` [PATCH 01/16] PCI: dwc: Switch to msi_create_parent_irq_domain() Nam Cao
2025-07-03 13:19 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 02/16] PCI: mobiveil: " Nam Cao
2025-07-03 13:20 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 03/16] PCI: aardvark: " Nam Cao
2025-07-03 13:21 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 04/16] PCI: altera-msi: " Nam Cao
2025-07-03 13:22 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 05/16] PCI: brcmstb: " Nam Cao
2025-06-30 19:18 ` Florian Fainelli
2025-07-03 13:23 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 06/16] PCI: iproc: " Nam Cao
2025-06-30 19:17 ` Florian Fainelli
2025-07-03 13:23 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 07/16] PCI: mediatek-gen3: " Nam Cao
2025-07-03 13:24 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 08/16] PCI: mediatek: " Nam Cao
2025-07-03 13:25 ` Thomas Gleixner
2025-06-26 14:47 ` [PATCH 09/16] PCI: rcar-host: " Nam Cao
2025-07-03 13:26 ` Thomas Gleixner
2025-06-26 14:48 ` [PATCH 10/16] PCI: xilinx-xdma: " Nam Cao
2025-07-03 13:27 ` Thomas Gleixner
2025-06-26 14:48 ` [PATCH 11/16] PCI: xilinx-nwl: " Nam Cao
2025-07-03 13:28 ` Thomas Gleixner
2025-06-26 14:48 ` [PATCH 12/16] PCI: xilinx: " Nam Cao
2025-07-03 13:29 ` Thomas Gleixner
2025-06-26 14:48 ` [PATCH 13/16] PCI: plda: " Nam Cao
2025-07-03 13:30 ` Thomas Gleixner
2025-06-26 14:48 ` [PATCH 14/16] PCI: hv: " Nam Cao
2025-07-03 13:33 ` Thomas Gleixner
2025-07-03 17:41 ` Michael Kelley
2025-07-03 19:59 ` Thomas Gleixner
2025-07-03 20:15 ` Michael Kelley
2025-07-03 21:00 ` Nam Cao
2025-07-03 21:52 ` Thomas Gleixner
2025-07-03 21:21 ` Thomas Gleixner
2025-07-04 2:27 ` Michael Kelley
2025-07-04 4:32 ` Nam Cao
2025-07-04 4:58 ` Michael Kelley
2025-07-05 3:51 ` Michael Kelley
2025-07-05 9:46 ` Nam Cao
2025-07-05 10:02 ` Nam Cao
2025-07-07 19:04 ` Michael Kelley
2025-06-26 14:48 ` [PATCH 15/16] PCI: vmd: Convert to lock guards Nam Cao
2025-07-03 13:34 ` Thomas Gleixner
2025-06-26 14:48 ` [PATCH 16/16] PCI: vmd: Switch to msi_create_parent_irq_domain() Nam Cao
2025-07-03 13:37 ` Thomas Gleixner
2025-07-16 18:10 ` Nirmal Patel [this message]
2025-07-16 19:41 ` Bjorn Helgaas
2025-07-16 19:52 ` Antonio Quartulli
2025-07-16 20:12 ` Nam Cao
2025-07-16 20:31 ` Bjorn Helgaas
2025-07-03 17:28 ` [PATCH 00/16] PCI: MSI parent domain conversion Bjorn Helgaas
2025-07-04 4:48 ` Nam Cao
2025-07-07 6:20 ` Manivannan Sadhasivam
2025-07-07 7:43 ` Manivannan Sadhasivam
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250716111009.000022ff@linux.intel.com \
--to=nirmal.patel@linux.intel.com \
--cc=Zhiqiang.Hou@nxp.com \
--cc=angelogioacchino.delregno@collabora.com \
--cc=bcm-kernel-feedback-list@broadcom.com \
--cc=bhelgaas@google.com \
--cc=daire.mcnamara@microchip.com \
--cc=decui@microsoft.com \
--cc=florian.fainelli@broadcom.com \
--cc=haiyangz@microsoft.com \
--cc=jianjun.wang@mediatek.com \
--cc=jim2101024@gmail.com \
--cc=jonathan.derrick@linux.dev \
--cc=joyce.ooi@intel.com \
--cc=kwilczynski@kernel.org \
--cc=kys@microsoft.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mediatek@lists.infradead.org \
--cc=linux-pci@vger.kernel.org \
--cc=linux-renesas-soc@vger.kernel.org \
--cc=linux-rpi-kernel@lists.infradead.org \
--cc=lpieralisi@kernel.org \
--cc=m.karthikeyan@mobiveil.co.in \
--cc=mani@kernel.org \
--cc=marek.vasut+renesas@gmail.com \
--cc=matthias.bgg@gmail.com \
--cc=maz@kernel.org \
--cc=michal.simek@amd.com \
--cc=namcao@linutronix.de \
--cc=nsaenz@kernel.org \
--cc=pali@kernel.org \
--cc=rjui@broadcom.com \
--cc=robh@kernel.org \
--cc=ryder.lee@mediatek.com \
--cc=sbranden@broadcom.com \
--cc=tglx@linutronix.de \
--cc=thomas.petazzoni@bootlin.com \
--cc=wei.liu@kernel.org \
--cc=yoshihiro.shimoda.uh@renesas.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).