From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Oliver O'Halloran <oohall@gmail.com>, linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH 7/7] powerpc/powernv/npu: Move IOMMU group setup into npu-dma.c
Date: Mon, 6 Apr 2020 19:54:09 +1000 [thread overview]
Message-ID: <09cb94cb-a57f-078f-c5e0-b844d798b3a5@ozlabs.ru> (raw)
In-Reply-To: <20200406030745.24595-8-oohall@gmail.com>
On 06/04/2020 13:07, Oliver O'Halloran wrote:
> The NVlink IOMMU group setup is only relevant to NVLink devices so move
> it into the NPU containment zone. This let us remove some prototypes in
> pci.h and staticfy some function definitions.
>
> Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> ---
> arch/powerpc/platforms/powernv/npu-dma.c | 54 +++++++++++++++++++-
> arch/powerpc/platforms/powernv/pci-ioda.c | 60 +++--------------------
> arch/powerpc/platforms/powernv/pci.h | 6 +--
> 3 files changed, 60 insertions(+), 60 deletions(-)
>
> diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
> index df27b8d7e78f..abeaa533b976 100644
> --- a/arch/powerpc/platforms/powernv/npu-dma.c
> +++ b/arch/powerpc/platforms/powernv/npu-dma.c
> @@ -15,6 +15,7 @@
>
> #include <asm/debugfs.h>
> #include <asm/powernv.h>
> +#include <asm/ppc-pci.h>
> #include <asm/opal.h>
>
> #include "pci.h"
> @@ -425,7 +426,8 @@ static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
> ++npucomp->pe_num;
> }
>
> -struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
> +static struct iommu_table_group *
> + pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
> {
> struct iommu_table_group *compound_group;
> struct npu_comp *npucomp;
> @@ -491,7 +493,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
> return compound_group;
> }
>
> -struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
> +static struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
> {
> struct iommu_table_group *table_group;
> struct npu_comp *npucomp;
> @@ -534,6 +536,54 @@ struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
>
> return table_group;
> }
> +
> +void pnv_pci_npu_setup_iommu_groups(void)
> +{
> + struct pci_controller *hose;
> + struct pnv_phb *phb;
> + struct pnv_ioda_pe *pe;
> +
> + /*
> + * For non-nvlink devices the IOMMU group is registered when the PE is
> + * configured and devices are added to the group when the per-device
> + * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is
> + * only initialise for "normal" IODA PHBs.
> + *
> + * For NVLink devices we need to ensure the NVLinks and the GPU end up
> + * in the same IOMMU group, so that's handled here.
> + */
> + list_for_each_entry(hose, &hose_list, list_node) {
> + phb = hose->private_data;
> +
> + if (phb->type == PNV_PHB_IODA2)
> + list_for_each_entry(pe, &phb->ioda.pe_list, list)
> + pnv_try_setup_npu_table_group(pe);
> + }
> +
> + /*
> + * Now we have all PHBs discovered, time to add NPU devices to
> + * the corresponding IOMMU groups.
> + */
> + list_for_each_entry(hose, &hose_list, list_node) {
> + unsigned long pgsizes;
> +
> + phb = hose->private_data;
> +
> + if (phb->type != PNV_PHB_NPU_NVLINK)
> + continue;
> +
> + pgsizes = pnv_ioda_parse_tce_sizes(phb);
> + list_for_each_entry(pe, &phb->ioda.pe_list, list) {
> + /*
> + * IODA2 bridges get this set up from
> + * pci_controller_ops::setup_bridge but NPU bridges
> + * do not have this hook defined so we do it here.
> + */
> + pe->table_group.pgsizes = pgsizes;
> + pnv_npu_compound_attach(pe);
> + }
> + }
> +}
> #endif /* CONFIG_IOMMU_API */
>
> int pnv_npu2_init(struct pci_controller *hose)
> diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
> index c020ade3a846..dba0c2c09f61 100644
> --- a/arch/powerpc/platforms/powernv/pci-ioda.c
> +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
> @@ -1288,7 +1288,7 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
> pnv_ioda_setup_npu_PE(pdev);
> }
>
> -static void pnv_pci_ioda_setup_PEs(void)
> +static void pnv_pci_ioda_setup_nvlink(void)
> {
> struct pci_controller *hose;
> struct pnv_phb *phb;
> @@ -1312,6 +1312,11 @@ static void pnv_pci_ioda_setup_PEs(void)
> list_for_each_entry(pe, &phb->ioda.pe_list, list)
> pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
> }
> +
> +#ifdef CONFIG_IOMMU_API
> + /* setup iommu groups so we can do nvlink pass-thru */
> + pnv_pci_npu_setup_iommu_groups();
> +#endif
> }
>
> #ifdef CONFIG_PCI_IOV
> @@ -2584,56 +2589,6 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
> .take_ownership = pnv_ioda2_take_ownership,
> .release_ownership = pnv_ioda2_release_ownership,
> };
> -
> -static void pnv_pci_ioda_setup_iommu_api(void)
> -{
> - struct pci_controller *hose;
> - struct pnv_phb *phb;
> - struct pnv_ioda_pe *pe;
> -
> - /*
> - * For non-nvlink devices the IOMMU group is registered when the PE is
> - * configured and devices are added to the group when the per-device
> - * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is
> - * only initialise for "normal" IODA PHBs.
> - *
> - * For NVLink devices we need to ensure the NVLinks and the GPU end up
> - * in the same IOMMU group, so that's handled here.
> - */
> - list_for_each_entry(hose, &hose_list, list_node) {
> - phb = hose->private_data;
> -
> - if (phb->type == PNV_PHB_IODA2)
> - list_for_each_entry(pe, &phb->ioda.pe_list, list)
> - pnv_try_setup_npu_table_group(pe);
> - }
> -
> - /*
> - * Now we have all PHBs discovered, time to add NPU devices to
> - * the corresponding IOMMU groups.
> - */
> - list_for_each_entry(hose, &hose_list, list_node) {
> - unsigned long pgsizes;
> -
> - phb = hose->private_data;
> -
> - if (phb->type != PNV_PHB_NPU_NVLINK)
> - continue;
> -
> - pgsizes = pnv_ioda_parse_tce_sizes(phb);
> - list_for_each_entry(pe, &phb->ioda.pe_list, list) {
> - /*
> - * IODA2 bridges get this set up from
> - * pci_controller_ops::setup_bridge but NPU bridges
> - * do not have this hook defined so we do it here.
> - */
> - pe->table_group.pgsizes = pgsizes;
> - pnv_npu_compound_attach(pe);
> - }
> - }
> -}
> -#else /* !CONFIG_IOMMU_API */
> -static void pnv_pci_ioda_setup_iommu_api(void) { };
> #endif
>
> static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
> @@ -3132,8 +3087,7 @@ static void pnv_pci_enable_bridges(void)
>
> static void pnv_pci_ioda_fixup(void)
> {
> - pnv_pci_ioda_setup_PEs();
> - pnv_pci_ioda_setup_iommu_api();
> + pnv_pci_ioda_setup_nvlink();
> pnv_pci_ioda_create_dbgfs();
>
> pnv_pci_enable_bridges();
> diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
> index 0c5845a1f05d..20941ef2706e 100644
> --- a/arch/powerpc/platforms/powernv/pci.h
> +++ b/arch/powerpc/platforms/powernv/pci.h
> @@ -209,11 +209,7 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
> /* Nvlink functions */
> extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
> extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
> -extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
> -extern struct iommu_table_group *pnv_try_setup_npu_table_group(
> - struct pnv_ioda_pe *pe);
> -extern struct iommu_table_group *pnv_npu_compound_attach(
> - struct pnv_ioda_pe *pe);
> +extern void pnv_pci_npu_setup_iommu_groups(void);
>
> /* pci-ioda-tce.c */
> #define POWERNV_IOMMU_DEFAULT_LEVELS 2
>
--
Alexey
next prev parent reply other threads:[~2020-04-06 10:06 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-06 3:07 Make PowerNV IOMMU group setup saner (and fix it for hotpug) Oliver O'Halloran
2020-04-06 3:07 ` [PATCH 1/7] powerpc/powernv/npu: Clean up compound table group initialisation Oliver O'Halloran
2020-04-06 9:48 ` Alexey Kardashevskiy
2020-06-09 5:29 ` Michael Ellerman
2020-04-06 3:07 ` [PATCH 2/7] powerpc/powernv/iov: Don't add VFs to iommu group during PE config Oliver O'Halloran
2020-04-06 9:49 ` Alexey Kardashevskiy
2020-04-06 3:07 ` [PATCH 3/7] powerpc/powernv/pci: Register iommu group at PE DMA setup Oliver O'Halloran
2020-04-06 9:51 ` Alexey Kardashevskiy
2020-04-06 3:07 ` [PATCH 4/7] powerpc/powernv/pci: Add device to iommu group during dma_dev_setup() Oliver O'Halloran
2020-04-06 9:51 ` Alexey Kardashevskiy
2020-04-06 3:07 ` [PATCH 5/7] powerpc/powernv/pci: Delete old iommu recursive iommu setup Oliver O'Halloran
2020-04-06 9:53 ` Alexey Kardashevskiy
2020-04-06 3:07 ` [PATCH 6/7] powerpc/powernv/pci: Move tce size parsing to pci-ioda-tce.c Oliver O'Halloran
2020-04-06 9:53 ` Alexey Kardashevskiy
2020-04-06 3:07 ` [PATCH 7/7] powerpc/powernv/npu: Move IOMMU group setup into npu-dma.c Oliver O'Halloran
2020-04-06 9:54 ` Alexey Kardashevskiy [this message]
2020-04-06 9:56 ` Make PowerNV IOMMU group setup saner (and fix it for hotpug) Alexey Kardashevskiy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=09cb94cb-a57f-078f-c5e0-b844d798b3a5@ozlabs.ru \
--to=aik@ozlabs.ru \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=oohall@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).