* [PATCH 07/13] add domain_flush_cache
@ 2008-12-02 14:22 Han, Weidong
2008-12-04 17:12 ` Mark McLoughlin
0 siblings, 1 reply; 3+ messages in thread
From: Han, Weidong @ 2008-12-02 14:22 UTC (permalink / raw)
To: 'Avi Kivity', Woodhouse, David, 'Jesse Barnes'
Cc: 'Joerg Roedel', Kay, Allen M, Yu, Fenghua,
'kvm@vger.kernel.org',
'iommu@lists.linux-foundation.org'
[-- Attachment #1: Type: text/plain, Size: 5140 bytes --]
For some common low level functions which will be also used by virtual machine usage, use domain_flush_cache instead of __iommu_flush_cache.
Signed-off-by: Weidong Han <weidong.han@intel.com>
---
drivers/pci/intel-iommu.c | 40 ++++++++++++++++++++++++----------------
1 files changed, 24 insertions(+), 16 deletions(-)
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 429aff4..b00a8f2 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -200,6 +200,13 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return NULL;
}
+static void domain_flush_cache(struct dmar_domain *domain,
+ void *addr, int size)
+{
+ if (!domain->iommu_coherency)
+ clflush_cache_range(addr, size);
+}
+
static struct intel_iommu *device_find_matched_iommu(u8 bus, u8 devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -316,7 +323,6 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
int level = agaw_to_level(domain->agaw);
int offset;
unsigned long flags;
- struct intel_iommu *iommu = domain_get_iommu(domain);
BUG_ON(!domain->pgd);
@@ -340,8 +346,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
flags);
return NULL;
}
- __iommu_flush_cache(iommu, tmp_page,
- PAGE_SIZE);
+ domain_flush_cache(domain, tmp_page, PAGE_SIZE);
dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
/*
* high level table always sets r/w, last level page
@@ -349,7 +354,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
*/
dma_set_pte_readable(*pte);
dma_set_pte_writable(*pte);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
parent = phys_to_virt(dma_pte_addr(*pte));
level--;
@@ -386,14 +391,13 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
{
struct dma_pte *pte = NULL;
- struct intel_iommu *iommu = domain_get_iommu(domain);
/* get last level pte */
pte = dma_addr_level_pte(domain, addr, 1);
if (pte) {
dma_clear_pte(*pte);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
}
@@ -422,7 +426,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
int addr_width = agaw_to_width(domain->agaw);
struct dma_pte *pte;
int total = agaw_to_level(domain->agaw);
- struct intel_iommu *iommu = domain_get_iommu(domain);
int level;
u64 tmp;
@@ -442,8 +445,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
free_pgtable_page(
phys_to_virt(dma_pte_addr(*pte)));
dma_clear_pte(*pte);
- __iommu_flush_cache(iommu,
- pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
tmp += level_size(level);
}
@@ -1158,12 +1160,16 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
u8 bus, u8 devfn)
{
struct context_entry *context;
- struct intel_iommu *iommu = domain_get_iommu(domain);
+ struct intel_iommu *iommu;
unsigned long flags;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
+
+ iommu = device_find_matched_iommu(bus, devfn);
+ if (!iommu)
+ return -ENODEV;
context = device_to_context_entry(iommu, bus, devfn);
if (!context)
return -ENOMEM;
@@ -1225,12 +1231,15 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
tmp->bus->number, tmp->devfn);
}
-static int domain_context_mapped(struct dmar_domain *domain,
- struct pci_dev *pdev)
+static int domain_context_mapped(struct pci_dev *pdev)
{
int ret;
struct pci_dev *tmp, *parent;
- struct intel_iommu *iommu = domain_get_iommu(domain);
+ struct intel_iommu *iommu;
+
+ iommu = device_find_matched_iommu(pdev->bus->number, pdev->devfn);
+ if (!iommu)
+ return -ENODEV;
ret = device_context_mapped(iommu,
pdev->bus->number, pdev->devfn);
@@ -1265,7 +1274,6 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
struct dma_pte *pte;
int index;
int addr_width = agaw_to_width(domain->agaw);
- struct intel_iommu *iommu = domain_get_iommu(domain);
hpa &= (((u64)1) << addr_width) - 1;
@@ -1285,7 +1293,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
BUG_ON(dma_pte_addr(*pte));
dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(*pte, prot);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
start_pfn++;
index++;
}
@@ -1819,7 +1827,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
}
/* make sure context mapping is ok */
- if (unlikely(!domain_context_mapped(domain, pdev))) {
+ if (unlikely(!domain_context_mapped(pdev))) {
ret = domain_context_mapping(domain, pdev);
if (ret) {
printk(KERN_ERR
--
1.5.1
[-- Attachment #2: 0007-add-domain_flush_cache.patch --]
[-- Type: application/octet-stream, Size: 5174 bytes --]
From 81470a134e4d4fae1420121b393ae2cc6417ab3b Mon Sep 17 00:00:00 2001
From: Weidong Han <weidong.han@intel.com>
Date: Tue, 2 Dec 2008 16:24:32 +0800
Subject: [PATCH] add domain_flush_cache
For some low level functions which will be also used by virtual machine usage, use domain_flush_cache instead of __iommu_flush_cache.
Signed-off-by: Weidong Han <weidong.han@intel.com>
---
drivers/pci/intel-iommu.c | 40 ++++++++++++++++++++++++----------------
1 files changed, 24 insertions(+), 16 deletions(-)
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 429aff4..b00a8f2 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -200,6 +200,13 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return NULL;
}
+static void domain_flush_cache(struct dmar_domain *domain,
+ void *addr, int size)
+{
+ if (!domain->iommu_coherency)
+ clflush_cache_range(addr, size);
+}
+
static struct intel_iommu *device_find_matched_iommu(u8 bus, u8 devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -316,7 +323,6 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
int level = agaw_to_level(domain->agaw);
int offset;
unsigned long flags;
- struct intel_iommu *iommu = domain_get_iommu(domain);
BUG_ON(!domain->pgd);
@@ -340,8 +346,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
flags);
return NULL;
}
- __iommu_flush_cache(iommu, tmp_page,
- PAGE_SIZE);
+ domain_flush_cache(domain, tmp_page, PAGE_SIZE);
dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
/*
* high level table always sets r/w, last level page
@@ -349,7 +354,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
*/
dma_set_pte_readable(*pte);
dma_set_pte_writable(*pte);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
parent = phys_to_virt(dma_pte_addr(*pte));
level--;
@@ -386,14 +391,13 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
{
struct dma_pte *pte = NULL;
- struct intel_iommu *iommu = domain_get_iommu(domain);
/* get last level pte */
pte = dma_addr_level_pte(domain, addr, 1);
if (pte) {
dma_clear_pte(*pte);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
}
@@ -422,7 +426,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
int addr_width = agaw_to_width(domain->agaw);
struct dma_pte *pte;
int total = agaw_to_level(domain->agaw);
- struct intel_iommu *iommu = domain_get_iommu(domain);
int level;
u64 tmp;
@@ -442,8 +445,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
free_pgtable_page(
phys_to_virt(dma_pte_addr(*pte)));
dma_clear_pte(*pte);
- __iommu_flush_cache(iommu,
- pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
tmp += level_size(level);
}
@@ -1158,12 +1160,16 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
u8 bus, u8 devfn)
{
struct context_entry *context;
- struct intel_iommu *iommu = domain_get_iommu(domain);
+ struct intel_iommu *iommu;
unsigned long flags;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
+
+ iommu = device_find_matched_iommu(bus, devfn);
+ if (!iommu)
+ return -ENODEV;
context = device_to_context_entry(iommu, bus, devfn);
if (!context)
return -ENOMEM;
@@ -1225,12 +1231,15 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
tmp->bus->number, tmp->devfn);
}
-static int domain_context_mapped(struct dmar_domain *domain,
- struct pci_dev *pdev)
+static int domain_context_mapped(struct pci_dev *pdev)
{
int ret;
struct pci_dev *tmp, *parent;
- struct intel_iommu *iommu = domain_get_iommu(domain);
+ struct intel_iommu *iommu;
+
+ iommu = device_find_matched_iommu(pdev->bus->number, pdev->devfn);
+ if (!iommu)
+ return -ENODEV;
ret = device_context_mapped(iommu,
pdev->bus->number, pdev->devfn);
@@ -1265,7 +1274,6 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
struct dma_pte *pte;
int index;
int addr_width = agaw_to_width(domain->agaw);
- struct intel_iommu *iommu = domain_get_iommu(domain);
hpa &= (((u64)1) << addr_width) - 1;
@@ -1285,7 +1293,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
BUG_ON(dma_pte_addr(*pte));
dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(*pte, prot);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
start_pfn++;
index++;
}
@@ -1819,7 +1827,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
}
/* make sure context mapping is ok */
- if (unlikely(!domain_context_mapped(domain, pdev))) {
+ if (unlikely(!domain_context_mapped(pdev))) {
ret = domain_context_mapping(domain, pdev);
if (ret) {
printk(KERN_ERR
--
1.5.1
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [PATCH 07/13] add domain_flush_cache
2008-12-02 14:22 [PATCH 07/13] add domain_flush_cache Han, Weidong
@ 2008-12-04 17:12 ` Mark McLoughlin
2008-12-05 1:36 ` Han, Weidong
0 siblings, 1 reply; 3+ messages in thread
From: Mark McLoughlin @ 2008-12-04 17:12 UTC (permalink / raw)
To: Han, Weidong
Cc: 'Avi Kivity', Woodhouse, David, 'Jesse Barnes',
Yu, Fenghua, 'iommu@lists.linux-foundation.org',
'kvm@vger.kernel.org'
On Tue, 2008-12-02 at 22:22 +0800, Han, Weidong wrote:
> For some common low level functions which will be also used by virtual
> machine usage, use domain_flush_cache instead of __iommu_flush_cache.
>
> Signed-off-by: Weidong Han <weidong.han@intel.com>
> ---
> drivers/pci/intel-iommu.c | 40 ++++++++++++++++++++++++----------------
> 1 files changed, 24 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
> index 429aff4..b00a8f2 100644
> --- a/drivers/pci/intel-iommu.c
> +++ b/drivers/pci/intel-iommu.c
> @@ -200,6 +200,13 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
> return NULL;
> }
>
> +static void domain_flush_cache(struct dmar_domain *domain,
> + void *addr, int size)
> +{
> + if (!domain->iommu_coherency)
> + clflush_cache_range(addr, size);
> +}
This is quite unfortunate; __iommu_flush_cache() is essentially
identical:
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(addr, size);
}
Is there no way we can use a single function for both purposes?
Cheers,
Mark.
^ permalink raw reply [flat|nested] 3+ messages in thread* RE: [PATCH 07/13] add domain_flush_cache
2008-12-04 17:12 ` Mark McLoughlin
@ 2008-12-05 1:36 ` Han, Weidong
0 siblings, 0 replies; 3+ messages in thread
From: Han, Weidong @ 2008-12-05 1:36 UTC (permalink / raw)
To: 'Mark McLoughlin'
Cc: 'Avi Kivity', Woodhouse, David, 'Jesse Barnes',
Yu, Fenghua, 'iommu@lists.linux-foundation.org',
'kvm@vger.kernel.org'
Mark McLoughlin wrote:
> On Tue, 2008-12-02 at 22:22 +0800, Han, Weidong wrote:
>
>> For some common low level functions which will be also used by
>> virtual
>> machine usage, use domain_flush_cache instead of __iommu_flush_cache.
>>
>> Signed-off-by: Weidong Han <weidong.han@intel.com>
>> ---
>> drivers/pci/intel-iommu.c | 40
>> ++++++++++++++++++++++++---------------- 1 files changed, 24
>> insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
>> index 429aff4..b00a8f2 100644
>> --- a/drivers/pci/intel-iommu.c
>> +++ b/drivers/pci/intel-iommu.c
>> @@ -200,6 +200,13 @@ static struct intel_iommu
>> *domain_get_iommu(struct dmar_domain *domain) return NULL; }
>>
>> +static void domain_flush_cache(struct dmar_domain *domain,
>> + void *addr, int size)
>> +{
>> + if (!domain->iommu_coherency)
>> + clflush_cache_range(addr, size);
>> +}
>
> This is quite unfortunate; __iommu_flush_cache() is essentially
> identical:
>
> static inline void __iommu_flush_cache(
> struct intel_iommu *iommu, void *addr, int size)
> {
> if (!ecap_coherent(iommu->ecap))
> clflush_cache_range(addr, size);
> }
>
> Is there no way we can use a single function for both purposes?
>
it's not easy, some functions (e.g. qi_submit_sync() in dmar.c) don't have dmar_domain struct, whereas, domain_flush_cache is necessary for kvm vtd case.
Regards,
Weidong
> Cheers,
> Mark.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2008-12-05 1:36 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-12-02 14:22 [PATCH 07/13] add domain_flush_cache Han, Weidong
2008-12-04 17:12 ` Mark McLoughlin
2008-12-05 1:36 ` Han, Weidong
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox