public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Amit Shah <amit.shah@qumranet.com>
To: "Ben-Ami Yassour1" <BENAMI@il.ibm.com>
Cc: kvm@vger.kernel.org, muli@il.ibm.com, anthony@codemonkey.ws,
	jbarnes@virtuousgeek.org, david.woodhouse@intel.com,
	mark.gross@intel.com, benami@il.ibm.com, weidong.han@intel.com,
	allen.m.kay@intel.com, Avi Kivity <avi@qumranet.com>
Subject: Re: [PATCH 2/2] KVM: Device Assignment with VT-d
Date: Wed, 3 Sep 2008 22:22:42 +0530	[thread overview]
Message-ID: <200809032222.43043.amit.shah@qumranet.com> (raw)
In-Reply-To: <1219740935-19701-3-git-send-email-amit.shah@qumranet.com>

There are a couple of things here that might need some error handling:

* On Tuesday 26 August 2008 14:25:35 Amit Shah wrote:
> From: Ben-Ami Yassour <benami@il.ibm.com>
>
> Based on a patch by: Kay, Allen M <allen.m.kay@intel.com>
>
> This patch enables PCI device assignment based on VT-d support.
> When a device is assigned to the guest, the guest memory is pinned and
> the mapping is updated in the VT-d IOMMU.
>
> [Amit: Expose KVM_CAP_IOMMU so we can check if an IOMMU is present
> and also control enable/disable from userspace]
>
> Signed-off-by: Kay, Allen M <allen.m.kay@intel.com>
> Signed-off-by: Weidong Han <weidong.han@intel.com>
> Signed-off-by: Ben-Ami Yassour <benami@il.ibm.com>
> Signed-off-by: Amit Shah <amit.shah@qumranet.com>


> +#include <linux/list.h>
> +#include <linux/kvm_host.h>
> +#include <linux/pci.h>
> +#include <linux/dmar.h>
> +#include <linux/intel-iommu.h>
> +
> +static int kvm_iommu_unmap_memslots(struct kvm *kvm);
> +
> +int kvm_iommu_map_pages(struct kvm *kvm,
> +			  gfn_t base_gfn, unsigned long npages)
> +{
> +	gfn_t gfn = base_gfn;
> +	pfn_t pfn;
> +	int i, rc;
> +	struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
> +
> +	/* check if iommu exists and in use */
> +	if (!domain)
> +		return 0;
> +
> +	for (i = 0; i < npages; i++) {
> +		/* check if already mapped */
> +		pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
> +						     gfn_to_gpa(gfn));
> +		if (pfn && !is_mmio_pfn(pfn))
> +			continue;
> +
> +		pfn = gfn_to_pfn(kvm, gfn);
> +		if (!is_mmio_pfn(pfn)) {
> +			rc = intel_iommu_page_mapping(domain,
> +						      gfn_to_gpa(gfn),
> +						      pfn_to_hpa(pfn),
> +						      PAGE_SIZE,
> +						      DMA_PTE_READ |
> +						      DMA_PTE_WRITE);
> +			if (rc) {
> +				kvm_release_pfn_clean(pfn);
> +				printk(KERN_DEBUG "kvm_iommu_map_pages:"
> +				       "iommu failed to map pfn=%lx\n", pfn);
> +				return rc;
> +			}
> +		} else {
> +			printk(KERN_DEBUG "kvm_iommu_map_page:"
> +			       "invalid pfn=%lx\n", pfn);
> +			return 0;
> +		}

In the error case, this function should itself call unmap_pages so that either 
all pages are mapped or none are. Also makes it easier to bail out in the two 
places this function gets called.

> +
> +		gfn++;
> +	}
> +	return 0;
> +}
> +
> +static int kvm_iommu_map_memslots(struct kvm *kvm)
> +{
> +	int i, rc;
> +
> +	down_read(&kvm->slots_lock);
> +	for (i = 0; i < kvm->nmemslots; i++) {
> +		rc = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
> +					 kvm->memslots[i].npages);
> +		if (rc) {
> +			up_read(&kvm->slots_lock);
> +			return rc;
> +		}
> +	}
> +	up_read(&kvm->slots_lock);
> +	return 0;
> +}
> +
> +int kvm_iommu_map_guest(struct kvm *kvm,
> +			struct kvm_assigned_dev_kernel *assigned_dev)
> +{
> +	struct pci_dev *pdev = NULL;
> +	int rc;
> +
> +	if (!intel_iommu_found()) {
> +		printk(KERN_ERR "intel iommu not found\n");
> +		return -ENODEV;
> +	}
> +
> +	printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n",
> +	       assigned_dev->host_busnr,
> +	       PCI_SLOT(assigned_dev->host_devfn),
> +	       PCI_FUNC(assigned_dev->host_devfn));
> +
> +	pdev = assigned_dev->dev;
> +
> +	if (pdev == NULL) {
> +		if (kvm->arch.intel_iommu_domain) {
> +			intel_iommu_domain_exit(kvm->arch.intel_iommu_domain);
> +			kvm->arch.intel_iommu_domain = NULL;
> +		}
> +		return -ENODEV;
> +	}
> +
> +	kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev);

check if we really got the domain

> +
> +	rc = kvm_iommu_map_memslots(kvm);
> +	if (rc)
> +		goto out_unmap;
> +
> +	intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
> +			       pdev->bus->number, pdev->devfn);
> +
> +	rc = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
> +					 pdev);

This function name (as Mark points out) doesn't make much sense; can this be 
changed?

> +	if (rc) {
> +		printk(KERN_ERR "Domain context map for %s failed",
> +		       pci_name(pdev));
> +		goto out_unmap;
> +	}
> +	return 0;
> +
> +out_unmap:
> +	kvm_iommu_unmap_memslots(kvm);
> +	return rc;
> +}
> +
> +static void kvm_iommu_put_pages(struct kvm *kvm,
> +			       gfn_t base_gfn, unsigned long npages)
> +{
> +	gfn_t gfn = base_gfn;
> +	pfn_t pfn;
> +	struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
> +	int i;
> +
> +	for (i = 0; i < npages; i++) {
> +		pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
> +						     gfn_to_gpa(gfn));
> +		kvm_release_pfn_clean(pfn);
> +		gfn++;
> +	}
> +}
> +
> +static int kvm_iommu_unmap_memslots(struct kvm *kvm)
> +{
> +	int i;
> +	down_read(&kvm->slots_lock);
> +	for (i = 0; i < kvm->nmemslots; i++) {
> +		kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
> +				    kvm->memslots[i].npages);
> +	}
> +	up_read(&kvm->slots_lock);
> +
> +	return 0;
> +}
> +
> +int kvm_iommu_unmap_guest(struct kvm *kvm)
> +{
> +	struct kvm_assigned_dev_kernel *entry;
> +	struct pci_dev *pdev = NULL;
> +	struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
> +
> +	/* check if iommu exists and in use */
> +	if (!domain)
> +		return 0;
> +
> +	list_for_each_entry(entry, &kvm->arch.assigned_dev_head, list) {
> +		printk(KERN_DEBUG "VT-d unmap: host bdf = %x:%x:%x\n",
> +		       entry->host_busnr,
> +		       PCI_SLOT(entry->host_devfn),
> +		       PCI_FUNC(entry->host_devfn));
> +
> +		for_each_pci_dev(pdev) {
> +			if ((pdev->bus->number == entry->host_busnr) &&
> +			    (pdev->devfn == entry->host_devfn))
> +				break;
> +		}

We store the PCI dev in entry->dev; no need to scan this entire list.

> +
> +		if (pdev == NULL)
> +			return -ENODEV;
> +
> +		/* detach kvm dmar domain */
> +		intel_iommu_detach_dev(domain,
> +				       pdev->bus->number, pdev->devfn);
> +	}
> +	kvm_iommu_unmap_memslots(kvm);
> +	intel_iommu_domain_exit(domain);
> +	return 0;
> +}
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index bfc7c33..38ab48b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -35,6 +35,7 @@
>  #include <linux/module.h>
>  #include <linux/mman.h>
>  #include <linux/highmem.h>
> +#include <linux/intel-iommu.h>
>
>  #include <asm/uaccess.h>
>  #include <asm/msr.h>
> @@ -276,9 +277,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
>
>  	list_add(&match->list, &kvm->arch.assigned_dev_head);
>
> +	if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
> +		r = kvm_iommu_map_guest(kvm, match);
> +		if (r)
> +			goto out_list_del;
> +	}
> +
>  out:
>  	mutex_unlock(&kvm->lock);
>  	return r;
> +out_list_del:
> +	list_del(&match->list);
> +	pci_release_regions(dev);
>  out_disable:
>  	pci_disable_device(dev);
>  out_put:
> @@ -1145,6 +1155,9 @@ int kvm_dev_ioctl_check_extension(long ext)
>  	case KVM_CAP_PV_MMU:
>  		r = !tdp_enabled;
>  		break;
> +	case KVM_CAP_IOMMU:
> +		r = intel_iommu_found();
> +		break;
>  	default:
>  		r = 0;
>  		break;
> @@ -4264,6 +4277,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>
>  void kvm_arch_destroy_vm(struct kvm *kvm)
>  {
> +	kvm_iommu_unmap_guest(kvm);
>  	kvm_free_assigned_devices(kvm);
>  	kvm_free_pit(kvm);
>  	kfree(kvm->arch.vpic);
> diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
> index 982b6b2..fcc8088 100644
> --- a/include/asm-x86/kvm_host.h
> +++ b/include/asm-x86/kvm_host.h
> @@ -364,6 +364,7 @@ struct kvm_arch{
>  	 */
>  	struct list_head active_mmu_pages;
>  	struct list_head assigned_dev_head;
> +	struct dmar_domain *intel_iommu_domain;
>  	struct kvm_pic *vpic;
>  	struct kvm_ioapic *vioapic;
>  	struct kvm_pit *vpit;
> @@ -513,6 +514,8 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t
> gpa, int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
>  		  gpa_t addr, unsigned long *ret);
>
> +int is_mmio_pfn(pfn_t pfn);
> +
>  extern bool tdp_enabled;
>
>  enum emulation_result {
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index ef4bc6f..4269be1 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -384,6 +384,7 @@ struct kvm_trace_rec {
>  #define KVM_CAP_COALESCED_MMIO 15
>  #define KVM_CAP_SYNC_MMU 16  /* Changes to host mmap are reflected in
> guest */ #define KVM_CAP_DEVICE_ASSIGNMENT 17
> +#define KVM_CAP_IOMMU 18
>
>  /*
>   * ioctls for VM fds
> @@ -495,4 +496,6 @@ struct kvm_assigned_irq {
>  	__u32 flags;
>  };
>
> +#define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
> +
>  #endif
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index a18aaad..b703890 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -285,6 +285,33 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
>  int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
>  void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
>
> +#ifdef CONFIG_DMAR
> +int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
> +			unsigned long npages);
> +int kvm_iommu_map_guest(struct kvm *kvm,
> +			struct kvm_assigned_dev_kernel *assigned_dev);
> +int kvm_iommu_unmap_guest(struct kvm *kvm);
> +#else /* CONFIG_DMAR */
> +static inline int kvm_iommu_map_pages(struct kvm *kvm,
> +				      gfn_t base_gfn,
> +				      unsigned long npages)
> +{
> +	return 0;
> +}
> +
> +static inline int kvm_iommu_map_guest(struct kvm *kvm,
> +				      struct kvm_assigned_dev_kernel
> +				      *assigned_dev)
> +{
> +	return -ENODEV;
> +}
> +
> +static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
> +{
> +	return 0;
> +}
> +#endif /* CONFIG_DMAR */
> +
>  static inline void kvm_guest_enter(void)
>  {
>  	account_system_vtime(current);
> @@ -307,6 +334,11 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn)
>  	return (gpa_t)gfn << PAGE_SHIFT;
>  }
>
> +static inline hpa_t pfn_to_hpa(pfn_t pfn)
> +{
> +	return (hpa_t)pfn << PAGE_SHIFT;
> +}
> +

This can be a separate patch.

>  static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
>  {
>  	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 0309571..191bfe1 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -41,6 +41,7 @@
>  #include <linux/pagemap.h>
>  #include <linux/mman.h>
>  #include <linux/swap.h>
> +#include <linux/intel-iommu.h>
>
>  #include <asm/processor.h>
>  #include <asm/io.h>
> @@ -76,7 +77,7 @@ static inline int valid_vcpu(int n)
>  	return likely(n >= 0 && n < KVM_MAX_VCPUS);
>  }
>
> -static inline int is_mmio_pfn(pfn_t pfn)
> +inline int is_mmio_pfn(pfn_t pfn)
>  {
>  	if (pfn_valid(pfn))
>  		return PageReserved(pfn_to_page(pfn));
> @@ -578,6 +579,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
>  	}
>
>  	kvm_free_physmem_slot(&old, &new);
> +
> +	/* map the pages in iommu page table */
> +	r = kvm_iommu_map_pages(kvm, base_gfn, npages);
> +	if (r)
> +		goto out_free;

Doing the unmapping in the map function will mean we don't have to check for 
error return values here.

> +
>  	return 0;
>
>  out_free:



  parent reply	other threads:[~2008-09-03 16:52 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-08-26  8:55 VT-d support for device assignment Amit Shah
2008-08-26  8:55 ` [PATCH 1/2] VT-d: changes to support KVM Amit Shah
2008-08-26  8:55   ` [PATCH 2/2] KVM: Device Assignment with VT-d Amit Shah
2008-08-26 10:28     ` Zhang, Xiantao
2008-08-26 10:35       ` Amit Shah
2008-08-26 10:42         ` Zhang, Xiantao
2008-08-26 10:57           ` Amit Shah
2008-08-26 11:04             ` Zhang, Xiantao
2008-08-26 14:41           ` Avi Kivity
2008-08-26 15:09             ` Han, Weidong
2008-09-03 16:52     ` Amit Shah [this message]
2008-09-09  7:18       ` Han, Weidong
2008-08-26  9:10 ` VT-d support for device assignment Avi Kivity
2008-08-26 14:11   ` Amit Shah
2008-08-26 14:38     ` Avi Kivity
  -- strict thread matches above, loose matches on Subject: below --
2008-09-09 13:51 [PATCH 2/2] KVM: Device Assignment with VT-d Han, Weidong
2008-09-09 14:39 ` Amit Shah
2008-09-09 15:05   ` Han, Weidong
2008-09-09 14:44 VT-d support for device assignment Amit Shah
2008-09-09 14:44 ` [PATCH 1/2] VT-d: Changes to support KVM Amit Shah
2008-09-09 14:44   ` [PATCH 2/2] KVM: Device Assignment with VT-d Amit Shah
2008-09-09 15:37 VT-d support for device assignment Amit Shah
2008-09-09 15:37 ` [PATCH 1/2] VT-d: Changes to support KVM Amit Shah
2008-09-09 15:37   ` [PATCH 2/2] KVM: Device Assignment with VT-d Amit Shah
2008-09-11  7:21     ` Han, Weidong
2008-09-14  0:49       ` Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=200809032222.43043.amit.shah@qumranet.com \
    --to=amit.shah@qumranet.com \
    --cc=BENAMI@il.ibm.com \
    --cc=allen.m.kay@intel.com \
    --cc=anthony@codemonkey.ws \
    --cc=avi@qumranet.com \
    --cc=david.woodhouse@intel.com \
    --cc=jbarnes@virtuousgeek.org \
    --cc=kvm@vger.kernel.org \
    --cc=mark.gross@intel.com \
    --cc=muli@il.ibm.com \
    --cc=weidong.han@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox