From: Vasant Hegde via iommu <iommu@lists.linux-foundation.org>
To: <joro@8bytes.org>, <iommu@lists.linux.dev>
Cc: iommu@lists.linux-foundation.org, Vasant Hegde <vasant.hegde@amd.com>
Subject: [PATCH v3 RESEND 15/35] iommu/amd: Convert to use rlookup_amd_iommu helper function
Date: Wed, 6 Jul 2022 17:08:05 +0530 [thread overview]
Message-ID: <20220706113825.25582-16-vasant.hegde@amd.com> (raw)
In-Reply-To: <20220706113825.25582-1-vasant.hegde@amd.com>
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Use rlookup_amd_iommu() helper function which will give per PCI
segment rlookup_table.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
---
drivers/iommu/amd/iommu.c | 64 +++++++++++++++++++++++----------------
1 file changed, 38 insertions(+), 26 deletions(-)
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cfecd072e7a6..19db4d54c337 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -229,13 +229,17 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct amd_iommu *iommu;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
return 0;
- amd_iommu_rlookup_table[alias] =
- amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return 0;
+
+ amd_iommu_set_rlookup_table(iommu, alias);
memcpy(amd_iommu_dev_table[alias].data,
amd_iommu_dev_table[devid].data,
sizeof(amd_iommu_dev_table[alias].data));
@@ -366,7 +370,7 @@ static bool check_device(struct device *dev)
if (devid > amd_iommu_last_bdf)
return false;
- if (amd_iommu_rlookup_table[devid] == NULL)
+ if (rlookup_amd_iommu(dev) == NULL)
return false;
return true;
@@ -1270,7 +1274,9 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
int qdep;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
@@ -1295,7 +1301,9 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
u16 alias;
int ret;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
if (dev_is_pci(dev_data->dev))
pdev = to_pci_dev(dev_data->dev);
@@ -1525,8 +1533,8 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void set_dte_entry(u16 devid, struct protection_domain *domain,
- bool ats, bool ppr)
+static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
@@ -1545,8 +1553,6 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
flags |= DTE_FLAG_IOTLB;
if (ppr) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (iommu_feature(iommu, FEATURE_EPHSUP))
pte_root |= 1ULL << DEV_ENTRY_PPR;
}
@@ -1590,8 +1596,6 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
* entries for the old domain ID that is being overwritten
*/
if (old_domid) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
amd_iommu_flush_tlb_domid(iommu, old_domid);
}
}
@@ -1611,7 +1615,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct amd_iommu *iommu;
bool ats;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1623,7 +1629,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
clone_aliases(iommu, dev_data->dev);
@@ -1635,7 +1641,9 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
/* Update data structures */
dev_data->domain = NULL;
@@ -1813,13 +1821,14 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
- int ret, devid;
+ int ret;
if (!check_device(dev))
return ERR_PTR(-ENODEV);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
@@ -1849,13 +1858,14 @@ static void amd_iommu_probe_finalize(struct device *dev)
static void amd_iommu_release_device(struct device *dev)
{
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
return;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
@@ -1884,7 +1894,7 @@ static void update_device_table(struct protection_domain *domain)
if (!iommu)
continue;
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
clone_aliases(iommu, dev_data->dev);
}
@@ -2072,7 +2082,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
@@ -2081,7 +2090,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (dev_data->domain != NULL)
detach_device(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return;
@@ -2108,7 +2117,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return -EINVAL;
@@ -2493,8 +2502,9 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
continue;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
-
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ continue;
build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
qdep, address, size);
@@ -2656,7 +2666,9 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
struct iommu_cmd cmd;
dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return -ENODEV;
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
--
2.31.1
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
WARNING: multiple messages have this Message-ID (diff)
From: Vasant Hegde <vasant.hegde@amd.com>
To: <joro@8bytes.org>, <iommu@lists.linux.dev>
Cc: <iommu@lists.linux-foundation.org>,
<suravee.suthikulpanit@amd.com>,
Vasant Hegde <vasant.hegde@amd.com>
Subject: [PATCH v3 RESEND 15/35] iommu/amd: Convert to use rlookup_amd_iommu helper function
Date: Wed, 6 Jul 2022 17:08:05 +0530 [thread overview]
Message-ID: <20220706113825.25582-16-vasant.hegde@amd.com> (raw)
Message-ID: <20220706113805.TlW6MbMMwu-6hp7MoMrvuq33Fdgm5vY6p5ixepkNTJo@z> (raw)
In-Reply-To: <20220706113825.25582-1-vasant.hegde@amd.com>
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Use rlookup_amd_iommu() helper function which will give per PCI
segment rlookup_table.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
---
drivers/iommu/amd/iommu.c | 64 +++++++++++++++++++++++----------------
1 file changed, 38 insertions(+), 26 deletions(-)
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cfecd072e7a6..19db4d54c337 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -229,13 +229,17 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct amd_iommu *iommu;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
return 0;
- amd_iommu_rlookup_table[alias] =
- amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return 0;
+
+ amd_iommu_set_rlookup_table(iommu, alias);
memcpy(amd_iommu_dev_table[alias].data,
amd_iommu_dev_table[devid].data,
sizeof(amd_iommu_dev_table[alias].data));
@@ -366,7 +370,7 @@ static bool check_device(struct device *dev)
if (devid > amd_iommu_last_bdf)
return false;
- if (amd_iommu_rlookup_table[devid] == NULL)
+ if (rlookup_amd_iommu(dev) == NULL)
return false;
return true;
@@ -1270,7 +1274,9 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
int qdep;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
@@ -1295,7 +1301,9 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
u16 alias;
int ret;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
if (dev_is_pci(dev_data->dev))
pdev = to_pci_dev(dev_data->dev);
@@ -1525,8 +1533,8 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void set_dte_entry(u16 devid, struct protection_domain *domain,
- bool ats, bool ppr)
+static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
@@ -1545,8 +1553,6 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
flags |= DTE_FLAG_IOTLB;
if (ppr) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (iommu_feature(iommu, FEATURE_EPHSUP))
pte_root |= 1ULL << DEV_ENTRY_PPR;
}
@@ -1590,8 +1596,6 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
* entries for the old domain ID that is being overwritten
*/
if (old_domid) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
amd_iommu_flush_tlb_domid(iommu, old_domid);
}
}
@@ -1611,7 +1615,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct amd_iommu *iommu;
bool ats;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1623,7 +1629,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
clone_aliases(iommu, dev_data->dev);
@@ -1635,7 +1641,9 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
/* Update data structures */
dev_data->domain = NULL;
@@ -1813,13 +1821,14 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
- int ret, devid;
+ int ret;
if (!check_device(dev))
return ERR_PTR(-ENODEV);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
@@ -1849,13 +1858,14 @@ static void amd_iommu_probe_finalize(struct device *dev)
static void amd_iommu_release_device(struct device *dev)
{
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
return;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
@@ -1884,7 +1894,7 @@ static void update_device_table(struct protection_domain *domain)
if (!iommu)
continue;
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
clone_aliases(iommu, dev_data->dev);
}
@@ -2072,7 +2082,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
@@ -2081,7 +2090,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (dev_data->domain != NULL)
detach_device(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return;
@@ -2108,7 +2117,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return -EINVAL;
@@ -2493,8 +2502,9 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
continue;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
-
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ continue;
build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
qdep, address, size);
@@ -2656,7 +2666,9 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
struct iommu_cmd cmd;
dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return -ENODEV;
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
--
2.31.1
next prev parent reply other threads:[~2022-07-06 11:46 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-06 11:37 [PATCH v3 RESEND 00/35] iommu/amd: Add multiple PCI segments support Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 01/35] iommu/amd: Update struct iommu_dev_data definition Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 02/35] iommu/amd: Introduce pci segment structure Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 03/35] iommu/amd: Introduce per PCI segment device table Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 04/35] iommu/amd: Introduce per PCI segment rlookup table Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 05/35] iommu/amd: Introduce per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 06/35] iommu/amd: Introduce per PCI segment dev_data_list Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 07/35] iommu/amd: Introduce per PCI segment old_dev_tbl_cpy Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 08/35] iommu/amd: Introduce per PCI segment alias_table Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:37 ` [PATCH v3 RESEND 09/35] iommu/amd: Introduce per PCI segment unity map list Vasant Hegde via iommu
2022-07-06 11:37 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 10/35] iommu/amd: Introduce per PCI segment last_bdf Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 11/35] iommu/amd: Introduce per PCI segment device table size Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 12/35] iommu/amd: Introduce per PCI segment alias " Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 13/35] iommu/amd: Introduce per PCI segment rlookup " Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 14/35] iommu/amd: Convert to use per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` Vasant Hegde via iommu [this message]
2022-07-06 11:38 ` [PATCH v3 RESEND 15/35] iommu/amd: Convert to use rlookup_amd_iommu helper function Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 16/35] iommu/amd: Update irq_remapping_alloc to use IOMMU lookup " Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 17/35] iommu/amd: Introduce struct amd_ir_data.iommu Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 18/35] iommu/amd: Update amd_irte_ops functions Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 19/35] iommu/amd: Update alloc_irq_table and alloc_irq_index Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 20/35] iommu/amd: Convert to use per PCI segment rlookup_table Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 21/35] iommu/amd: Update set_dte_entry and clear_dte_entry Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 22/35] iommu/amd: Update iommu_ignore_device Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 23/35] iommu/amd: Update dump_dte_entry Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 24/35] iommu/amd: Update set_dte_irq_entry Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 25/35] iommu/amd: Update (un)init_device_table_dma() Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 26/35] iommu/amd: Update set_dev_entry_bit() and get_dev_entry_bit() Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 27/35] iommu/amd: Remove global amd_iommu_[dev_table/alias_table/last_bdf] Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 28/35] iommu/amd: Flush upto last_bdf only Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 29/35] iommu/amd: Introduce get_device_sbdf_id() helper function Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 30/35] iommu/amd: Include PCI segment ID when initialize IOMMU Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 31/35] iommu/amd: Specify PCI segment ID when getting pci device Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 32/35] iommu/amd: Add PCI segment support for ivrs_[ioapic/hpet/acpihid] commands Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 33/35] iommu/amd: Print PCI segment ID in error log messages Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 34/35] iommu/amd: Update device_state structure to include PCI seg ID Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-06 11:38 ` [PATCH v3 RESEND 35/35] iommu/amd: Update amd_iommu_fault " Vasant Hegde via iommu
2022-07-06 11:38 ` Vasant Hegde
2022-07-07 7:41 ` [PATCH v3 RESEND 00/35] iommu/amd: Add multiple PCI segments support Joerg Roedel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220706113825.25582-16-vasant.hegde@amd.com \
--to=iommu@lists.linux-foundation.org \
--cc=iommu@lists.linux.dev \
--cc=joro@8bytes.org \
--cc=vasant.hegde@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox