public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 2/4][VTD] modifications to intel-iommu.c.
@ 2008-06-10  0:41 Kay, Allen M
  2008-06-10 10:14 ` Muli Ben-Yehuda
  2008-06-20 18:13 ` Avi Kivity
  0 siblings, 2 replies; 4+ messages in thread
From: Kay, Allen M @ 2008-06-10  0:41 UTC (permalink / raw)
  To: kvm
  Cc: Amit Shah, Muli Ben-Yehuda, Ben-Ami Yassour, Avi Kivity,
	Anthony Liguori, Chris Wright, Han, Weidong

[-- Attachment #1: Type: text/plain, Size: 223 bytes --]

Modification to intel-iommu.c to support vt-d page table and context
table mapping in kvm.  Mods to dmar.c and iova.c are due to header file
moves to include/linux.

Signed-off-by: Allen M. Kay <allen.m.kay@intel.com>

[-- Attachment #2: intel_iommu_mods.patch --]
[-- Type: application/octet-stream, Size: 4720 bytes --]

diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f941f60..a58a5b0 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -26,8 +26,8 @@
 
 #include <linux/pci.h>
 #include <linux/dmar.h>
-#include "iova.h"
-#include "intel-iommu.h"
+#include <linux/iova.h>
+#include <linux/intel-iommu.h>
 
 #undef PREFIX
 #define PREFIX "DMAR:"
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 66c0fd2..a694e62 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -20,6 +20,7 @@
  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  */
 
+#undef DEBUG
 #include <linux/init.h>
 #include <linux/bitmap.h>
 #include <linux/debugfs.h>
@@ -33,8 +34,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/mempool.h>
 #include <linux/timer.h>
-#include "iova.h"
-#include "intel-iommu.h"
+#include <linux/iova.h>
+#include <linux/intel-iommu.h>
 #include <asm/proto.h> /* force_iommu in this header in x86-64*/
 #include <asm/cacheflush.h>
 #include <asm/gart.h>
@@ -160,7 +161,7 @@ static inline void *alloc_domain_mem(void)
 	return iommu_kmem_cache_alloc(iommu_domain_cache);
 }
 
-static inline void free_domain_mem(void *vaddr)
+static void free_domain_mem(void *vaddr)
 {
 	kmem_cache_free(iommu_domain_cache, vaddr);
 }
@@ -1263,11 +1264,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
 	if (!context)
 		return -ENOMEM;
 	spin_lock_irqsave(&iommu->lock, flags);
-	if (context_present(*context)) {
-		spin_unlock_irqrestore(&iommu->lock, flags);
-		return 0;
-	}
 
+	context_clear_entry(*context);
 	context_set_domain_id(*context, domain->id);
 	context_set_address_width(*context, domain->agaw);
 	context_set_address_root(*context, virt_to_phys(domain->pgd));
@@ -1414,7 +1412,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
  * find_domain
  * Note: we use struct pci_dev->dev.archdata.iommu stores the info
  */
-struct dmar_domain *
+static struct dmar_domain *
 find_domain(struct pci_dev *pdev)
 {
 	struct device_domain_info *info;
@@ -2408,3 +2406,96 @@ int __init intel_iommu_init(void)
 	return 0;
 }
 
+void kvm_intel_iommu_domain_exit(struct dmar_domain *domain)
+{
+	u64 end;
+
+	/* Domain 0 is reserved, so dont process it */
+	if (!domain)
+		return;
+
+	end = DOMAIN_MAX_ADDR(domain->gaw);
+	end = end & (~PAGE_MASK_4K);
+
+	/* clear ptes */
+	dma_pte_clear_range(domain, 0, end);
+
+	/* free page tables */
+	dma_pte_free_pagetable(domain, 0, end);
+
+	iommu_free_domain(domain);
+	free_domain_mem(domain);
+}
+EXPORT_SYMBOL_GPL(kvm_intel_iommu_domain_exit);
+
+struct dmar_domain *kvm_intel_iommu_domain_alloc(struct pci_dev *pdev)
+{
+	struct dmar_drhd_unit *drhd;
+	struct dmar_domain *domain;
+	struct intel_iommu *iommu;
+
+	drhd = dmar_find_matched_drhd_unit(pdev);
+	if (!drhd) {
+		printk(KERN_ERR "kvm_intel_iommu_domain_alloc: drhd == NULL\n");
+		return NULL;
+	}
+
+	iommu = drhd->iommu;
+	if (!iommu) {
+		printk(KERN_ERR
+			"kvm_intel_iommu_domain_alloc: iommu == NULL\n");
+		return NULL;
+	}
+	domain = iommu_alloc_domain(iommu);
+	if (!domain) {
+		printk(KERN_ERR
+			"kvm_intel_iommu_domain_alloc: domain == NULL\n");
+		return NULL;
+	}
+	if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+		printk(KERN_ERR
+			"kvm_intel_iommu_domain_alloc: domain_init() failed\n");
+		kvm_intel_iommu_domain_exit(domain);
+		return NULL;
+	}
+	return domain;
+}
+EXPORT_SYMBOL_GPL(kvm_intel_iommu_domain_alloc);
+
+int kvm_intel_iommu_context_mapping(
+	struct dmar_domain *domain, struct pci_dev *pdev)
+{
+	int rc;
+	rc = domain_context_mapping(domain, pdev);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(kvm_intel_iommu_context_mapping);
+
+int kvm_intel_iommu_page_mapping(
+	struct dmar_domain *domain, dma_addr_t iova,
+	u64 hpa, size_t size, int prot)
+{
+	int rc;
+	rc = domain_page_mapping(domain, iova, hpa, size, prot);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(kvm_intel_iommu_page_mapping);
+
+void kvm_intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+{
+	kvm_intel_iommu_detach_dev(domain, bus, devfn);
+}
+EXPORT_SYMBOL_GPL(kvm_intel_iommu_detach_dev);
+
+struct dmar_domain *
+kvm_intel_iommu_find_domain(struct pci_dev *pdev)
+{
+	return find_domain(pdev);
+}
+EXPORT_SYMBOL_GPL(kvm_intel_iommu_find_domain);
+
+int kvm_intel_iommu_found(void)
+{
+	return g_num_of_iommus;
+}
+EXPORT_SYMBOL_GPL(kvm_intel_iommu_found);
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 3ef4ac0..2287116 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -7,7 +7,7 @@
  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  */
 
-#include "iova.h"
+#include <linux/iova.h>
 
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 2/4][VTD] modifications to intel-iommu.c.
  2008-06-10  0:41 [PATCH 2/4][VTD] modifications to intel-iommu.c Kay, Allen M
@ 2008-06-10 10:14 ` Muli Ben-Yehuda
  2008-06-20 18:13 ` Avi Kivity
  1 sibling, 0 replies; 4+ messages in thread
From: Muli Ben-Yehuda @ 2008-06-10 10:14 UTC (permalink / raw)
  To: Kay, Allen M
  Cc: kvm, Amit Shah, Ben-Ami Yassour1, Avi Kivity, Anthony Liguori,
	Chris Wright, Han, Weidong

On Mon, Jun 09, 2008 at 05:41:29PM -0700, Kay, Allen M wrote:

> Modification to intel-iommu.c to support vt-d page table and context
> table mapping in kvm.  Mods to dmar.c and iova.c are due to header
> file moves to include/linux.

Hi Allen,

Minor comments below, patches to follow up.

> Signed-off-by: Allen M. Kay <allen.m.kay@intel.com>
> 
> diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
> index 66c0fd2..a694e62 100644
> --- a/drivers/pci/intel-iommu.c
> +++ b/drivers/pci/intel-iommu.c
> @@ -20,6 +20,7 @@
>   * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
>   */
>  
> +#undef DEBUG

Nit: this should be submitted upstream seperately.

>  #include <linux/init.h>
>  #include <linux/bitmap.h>
>  #include <linux/debugfs.h>
> @@ -33,8 +34,8 @@
>  #include <linux/dma-mapping.h>
>  #include <linux/mempool.h>
>  #include <linux/timer.h>
> -#include "iova.h"
> -#include "intel-iommu.h"
> +#include <linux/iova.h>
> +#include <linux/intel-iommu.h>
>  #include <asm/proto.h> /* force_iommu in this header in x86-64*/
>  #include <asm/cacheflush.h>
>  #include <asm/gart.h>
> @@ -160,7 +161,7 @@ static inline void *alloc_domain_mem(void)
>  	return iommu_kmem_cache_alloc(iommu_domain_cache);
>  }
>  
> -static inline void free_domain_mem(void *vaddr)
> +static void free_domain_mem(void *vaddr)
>  {
>  	kmem_cache_free(iommu_domain_cache, vaddr);
>  }
> @@ -1263,11 +1264,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
>  	if (!context)
>  		return -ENOMEM;
>  	spin_lock_irqsave(&iommu->lock, flags);
> -	if (context_present(*context)) {
> -		spin_unlock_irqrestore(&iommu->lock, flags);
> -		return 0;
> -	}
>  
> +	context_clear_entry(*context);
>  	context_set_domain_id(*context, domain->id);
>  	context_set_address_width(*context, domain->agaw);
>  	context_set_address_root(*context, virt_to_phys(domain->pgd));
> @@ -1414,7 +1412,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
>   * find_domain
>   * Note: we use struct pci_dev->dev.archdata.iommu stores the info
>   */
> -struct dmar_domain *
> +static struct dmar_domain *
>  find_domain(struct pci_dev *pdev)
>  {
>  	struct device_domain_info *info;
> @@ -2408,3 +2406,96 @@ int __init intel_iommu_init(void)
>  	return 0;
>  }
>  
> +void kvm_intel_iommu_domain_exit(struct dmar_domain *domain)
> +{
> +	u64 end;
> +
> +	/* Domain 0 is reserved, so dont process it */
> +	if (!domain)
> +		return;
> +
> +	end = DOMAIN_MAX_ADDR(domain->gaw);
> +	end = end & (~PAGE_MASK_4K);
> +
> +	/* clear ptes */
> +	dma_pte_clear_range(domain, 0, end);
> +
> +	/* free page tables */
> +	dma_pte_free_pagetable(domain, 0, end);
> +
> +	iommu_free_domain(domain);
> +	free_domain_mem(domain);
> +}
> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_domain_exit);
> +
> +struct dmar_domain *kvm_intel_iommu_domain_alloc(struct pci_dev *pdev)
> +{
> +	struct dmar_drhd_unit *drhd;
> +	struct dmar_domain *domain;
> +	struct intel_iommu *iommu;
> +
> +	drhd = dmar_find_matched_drhd_unit(pdev);
> +	if (!drhd) {
> +		printk(KERN_ERR "kvm_intel_iommu_domain_alloc: drhd == NULL\n");
> +		return NULL;
> +	}
> +
> +	iommu = drhd->iommu;
> +	if (!iommu) {
> +		printk(KERN_ERR
> +			"kvm_intel_iommu_domain_alloc: iommu == NULL\n");
> +		return NULL;
> +	}
> +	domain = iommu_alloc_domain(iommu);
> +	if (!domain) {
> +		printk(KERN_ERR
> +			"kvm_intel_iommu_domain_alloc: domain == NULL\n");
> +		return NULL;
> +	}
> +	if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
> +		printk(KERN_ERR
> +			"kvm_intel_iommu_domain_alloc: domain_init() failed\n");
> +		kvm_intel_iommu_domain_exit(domain);
> +		return NULL;
> +	}
> +	return domain;
> +}
> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_domain_alloc);
> +
> +int kvm_intel_iommu_context_mapping(
> +	struct dmar_domain *domain, struct pci_dev *pdev)
> +{
> +	int rc;
> +	rc = domain_context_mapping(domain, pdev);
> +	return rc;
> +}

Unnecessary wrapper.

> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_context_mapping);
> +
> +int kvm_intel_iommu_page_mapping(
> +	struct dmar_domain *domain, dma_addr_t iova,
> +	u64 hpa, size_t size, int prot)
> +{
> +	int rc;
> +	rc = domain_page_mapping(domain, iova, hpa, size, prot);
> +	return rc;
> +}

Unnecessary wrapper.

> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_page_mapping);
> +
> +void kvm_intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
> +{
> +	kvm_intel_iommu_detach_dev(domain, bus, devfn);
> +}

This doesn't look correct--we should probably tear down the domain's
page tables and context here.

> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_detach_dev);
> +
> +struct dmar_domain *
> +kvm_intel_iommu_find_domain(struct pci_dev *pdev)
> +{
> +	return find_domain(pdev);
> +}

Unnecessary wrapper.

Cheers,
Muli

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 2/4][VTD] modifications to intel-iommu.c.
  2008-06-10  0:41 [PATCH 2/4][VTD] modifications to intel-iommu.c Kay, Allen M
  2008-06-10 10:14 ` Muli Ben-Yehuda
@ 2008-06-20 18:13 ` Avi Kivity
  2008-06-21  4:42   ` Han, Weidong
  1 sibling, 1 reply; 4+ messages in thread
From: Avi Kivity @ 2008-06-20 18:13 UTC (permalink / raw)
  To: Kay, Allen M
  Cc: kvm, Amit Shah, Muli Ben-Yehuda, Ben-Ami Yassour, Anthony Liguori,
	Chris Wright, Han, Weidong

Kay, Allen M wrote:
> Modification to intel-iommu.c to support vt-d page table and context
> table mapping in kvm.  Mods to dmar.c and iova.c are due to header file
> moves to include/linux.
>   

> diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
> index f941f60..a58a5b0 100644
> --- a/drivers/pci/dmar.c
> +++ b/drivers/pci/dmar.c
> @@ -26,8 +26,8 @@
>  
>  #include <linux/pci.h>
>  #include <linux/dmar.h>
> -#include "iova.h"
> -#include "intel-iommu.h"
> +#include <linux/iova.h>
> +#include <linux/intel-iommu.h>

This should have been done in the file movement patch to avoid breaking 
the build. 

>
>  
> +void kvm_intel_iommu_domain_exit(struct dmar_domain *domain)

This should be a generic API, not a kvm specific one.

> +{
> +    u64 end;
> +
> +    /* Domain 0 is reserved, so dont process it */
> +    if (!domain)
> +        return;

'domain' here is a pointer, not an identifier.

>
> +int kvm_intel_iommu_context_mapping(
> +    struct dmar_domain *domain, struct pci_dev *pdev)
> +{
> +    int rc;
> +    rc = domain_context_mapping(domain, pdev);
> +    return rc;
> +}
> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_context_mapping);

What does the return value mean?

> +
> +int kvm_intel_iommu_page_mapping(
> +    struct dmar_domain *domain, dma_addr_t iova,
> +    u64 hpa, size_t size, int prot)
> +{
> +    int rc;
> +    rc = domain_page_mapping(domain, iova, hpa, size, prot);
> +    return rc;
> +}
> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_page_mapping);

The function name makes it sound like it's retrieving information.  If 
it does something, put a verb in there.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH 2/4][VTD] modifications to intel-iommu.c.
  2008-06-20 18:13 ` Avi Kivity
@ 2008-06-21  4:42   ` Han, Weidong
  0 siblings, 0 replies; 4+ messages in thread
From: Han, Weidong @ 2008-06-21  4:42 UTC (permalink / raw)
  To: Avi Kivity, Kay, Allen M
  Cc: kvm, Amit Shah, Muli Ben-Yehuda, Ben-Ami Yassour, Anthony Liguori,
	Chris Wright

Avi Kivity wrote:
> Kay, Allen M wrote:
>> Modification to intel-iommu.c to support vt-d page table and context
>> table mapping in kvm.  Mods to dmar.c and iova.c are due to header
>> file moves to include/linux. 
>> 
> 
>> diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
>> index f941f60..a58a5b0 100644
>> --- a/drivers/pci/dmar.c
>> +++ b/drivers/pci/dmar.c
>> @@ -26,8 +26,8 @@
>> 
>>  #include <linux/pci.h>
>>  #include <linux/dmar.h>
>> -#include "iova.h"
>> -#include "intel-iommu.h"
>> +#include <linux/iova.h>
>> +#include <linux/intel-iommu.h>
> 
> This should have been done in the file movement patch to avoid
> breaking the build.
> 
>> 
>> 
>> +void kvm_intel_iommu_domain_exit(struct dmar_domain *domain)
> 
> This should be a generic API, not a kvm specific one.
> 
>> +{
>> +    u64 end;
>> +
>> +    /* Domain 0 is reserved, so dont process it */ +    if (!domain)
>> +        return;
> 
> 'domain' here is a pointer, not an identifier.
> 
>> 
>> +int kvm_intel_iommu_context_mapping(
>> +    struct dmar_domain *domain, struct pci_dev *pdev) +{
>> +    int rc;
>> +    rc = domain_context_mapping(domain, pdev);
>> +    return rc;
>> +}
>> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_context_mapping);
> 
> What does the return value mean?

It means it whether mapping context succeeded or failed. It must
succeed, or VT-d can't work for the device. I found the return value of
kvm_intel_iommu_context_mapping() was not used, I will add checks for
them.

> 
>> +
>> +int kvm_intel_iommu_page_mapping(
>> +    struct dmar_domain *domain, dma_addr_t iova,
>> +    u64 hpa, size_t size, int prot)
>> +{
>> +    int rc;
>> +    rc = domain_page_mapping(domain, iova, hpa, size, prot); +   
>> return rc; +}
>> +EXPORT_SYMBOL_GPL(kvm_intel_iommu_page_mapping);
> 
> The function name makes it sound like it's retrieving information.  If
> it does something, put a verb in there.

We use the names just like what the kernel VT-d code use. It keeps
consistent.

Randy (Weidong)


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2008-06-21  4:42 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-06-10  0:41 [PATCH 2/4][VTD] modifications to intel-iommu.c Kay, Allen M
2008-06-10 10:14 ` Muli Ben-Yehuda
2008-06-20 18:13 ` Avi Kivity
2008-06-21  4:42   ` Han, Weidong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox