public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Kirti Wankhede <kwankhede@nvidia.com>
To: Alex Williamson <alex.williamson@redhat.com>
Cc: Yan Zhao <yan.y.zhao@intel.com>, <cjia@nvidia.com>,
	<kevin.tian@intel.com>, <ziye.yang@intel.com>,
	<changpeng.liu@intel.com>, <yi.l.liu@intel.com>,
	<mlevitsk@redhat.com>, <eskultet@redhat.com>, <cohuck@redhat.com>,
	<dgilbert@redhat.com>, <jonathan.davies@nutanix.com>,
	<eauger@redhat.com>, <aik@ozlabs.ru>, <pasic@linux.ibm.com>,
	<felipe@nutanix.com>, <Zhengxiao.zx@Alibaba-inc.com>,
	<shuangtai.tst@alibaba-inc.com>, <Ken.Xue@amd.com>,
	<zhi.a.wang@intel.com>, <qemu-devel@nongnu.org>,
	<kvm@vger.kernel.org>
Subject: Re: [PATCH Kernel v20 5/8] vfio iommu: Implementation of ioctl for dirty pages tracking
Date: Fri, 15 May 2020 21:03:00 +0530	[thread overview]
Message-ID: <8eb95e03-87f3-677a-698e-826ee0060deb@nvidia.com> (raw)
In-Reply-To: <20200515091533.53259392@w520.home>



On 5/15/2020 8:45 PM, Alex Williamson wrote:
> On Fri, 15 May 2020 16:44:38 +0530
> Kirti Wankhede <kwankhede@nvidia.com> wrote:
> 
>> On 5/15/2020 3:35 PM, Yan Zhao wrote:
>>> On Fri, May 15, 2020 at 02:07:44AM +0530, Kirti Wankhede wrote:
>>>> VFIO_IOMMU_DIRTY_PAGES ioctl performs three operations:
>>>> - Start dirty pages tracking while migration is active
>>>> - Stop dirty pages tracking.
>>>> - Get dirty pages bitmap. Its user space application's responsibility to
>>>>     copy content of dirty pages from source to destination during migration.
>>>>
>>>> To prevent DoS attack, memory for bitmap is allocated per vfio_dma
>>>> structure. Bitmap size is calculated considering smallest supported page
>>>> size. Bitmap is allocated for all vfio_dmas when dirty logging is enabled
>>>>
>>>> Bitmap is populated for already pinned pages when bitmap is allocated for
>>>> a vfio_dma with the smallest supported page size. Update bitmap from
>>>> pinning functions when tracking is enabled. When user application queries
>>>> bitmap, check if requested page size is same as page size used to
>>>> populated bitmap. If it is equal, copy bitmap, but if not equal, return
>>>> error.
>>>>
>>>> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com>
>>>> Reviewed-by: Neo Jia <cjia@nvidia.com>
>>>>
>>>> Fixed error reported by build bot by changing pgsize type from uint64_t
>>>> to size_t.
>>>> Reported-by: kbuild test robot <lkp@intel.com>
>>>> ---
>>>>    drivers/vfio/vfio_iommu_type1.c | 294 +++++++++++++++++++++++++++++++++++++++-
>>>>    1 file changed, 288 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
>>>> index de17787ffece..b76d3b14abfd 100644
>>>> --- a/drivers/vfio/vfio_iommu_type1.c
>>>> +++ b/drivers/vfio/vfio_iommu_type1.c
>>>> @@ -72,6 +72,7 @@ struct vfio_iommu {
>>>>    	uint64_t		pgsize_bitmap;
>>>>    	bool			v2;
>>>>    	bool			nesting;
>>>> +	bool			dirty_page_tracking;
>>>>    };
>>>>    
>>>>    struct vfio_domain {
>>>> @@ -92,6 +93,7 @@ struct vfio_dma {
>>>>    	bool			lock_cap;	/* capable(CAP_IPC_LOCK) */
>>>>    	struct task_struct	*task;
>>>>    	struct rb_root		pfn_list;	/* Ex-user pinned pfn list */
>>>> +	unsigned long		*bitmap;
>>>>    };
>>>>    
>>>>    struct vfio_group {
>>>> @@ -126,6 +128,19 @@ struct vfio_regions {
>>>>    #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)	\
>>>>    					(!list_empty(&iommu->domain_list))
>>>>    
>>>> +#define DIRTY_BITMAP_BYTES(n)	(ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
>>>> +
>>>> +/*
>>>> + * Input argument of number of bits to bitmap_set() is unsigned integer, which
>>>> + * further casts to signed integer for unaligned multi-bit operation,
>>>> + * __bitmap_set().
>>>> + * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
>>>> + * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
>>>> + * system.
>>>> + */
>>>> +#define DIRTY_BITMAP_PAGES_MAX	 ((u64)INT_MAX)
>>>> +#define DIRTY_BITMAP_SIZE_MAX	 DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
>>>> +
>>>>    static int put_pfn(unsigned long pfn, int prot);
>>>>    
>>>>    /*
>>>> @@ -176,6 +191,74 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
>>>>    	rb_erase(&old->node, &iommu->dma_list);
>>>>    }
>>>>    
>>>> +
>>>> +static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
>>>> +{
>>>> +	uint64_t npages = dma->size / pgsize;
>>>> +
>>>> +	if (npages > DIRTY_BITMAP_PAGES_MAX)
>>>> +		return -EINVAL;
>>>> +
>>>> +	dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages), GFP_KERNEL);
>>>> +	if (!dma->bitmap)
>>>> +		return -ENOMEM;
>>>> +
>>>> +	return 0;
>>>> +}
>>>> +
>>>> +static void vfio_dma_bitmap_free(struct vfio_dma *dma)
>>>> +{
>>>> +	kfree(dma->bitmap);
>>>> +	dma->bitmap = NULL;
>>>> +}
>>>> +
>>>> +static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
>>>> +{
>>>> +	struct rb_node *p;
>>>> +
>>>> +	for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
>>>> +		struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
>>>> +
>>>> +		bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) / pgsize, 1);
>>>> +	}
>>>> +}
>>>> +
>>>> +static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
>>>> +{
>>>> +	struct rb_node *n = rb_first(&iommu->dma_list);
>>>> +
>>>> +	for (; n; n = rb_next(n)) {
>>>> +		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
>>>> +		int ret;
>>>> +
>>>> +		ret = vfio_dma_bitmap_alloc(dma, pgsize);
>>>> +		if (ret) {
>>>> +			struct rb_node *p = rb_prev(n);
>>>> +
>>>> +			for (; p; p = rb_prev(p)) {
>>>> +				struct vfio_dma *dma = rb_entry(n,
>>>> +							struct vfio_dma, node);
>>>> +
>>>> +				vfio_dma_bitmap_free(dma);
>>>> +			}
>>>> +			return ret;
>>>> +		}
>>>> +		vfio_dma_populate_bitmap(dma, pgsize);
>>>> +	}
>>>> +	return 0;
>>>> +}
>>>> +
>>>> +static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
>>>> +{
>>>> +	struct rb_node *n = rb_first(&iommu->dma_list);
>>>> +
>>>> +	for (; n; n = rb_next(n)) {
>>>> +		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
>>>> +
>>>> +		vfio_dma_bitmap_free(dma);
>>>> +	}
>>>> +}
>>>> +
>>>>    /*
>>>>     * Helper Functions for host iova-pfn list
>>>>     */
>>>> @@ -568,6 +651,17 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
>>>>    			vfio_unpin_page_external(dma, iova, do_accounting);
>>>>    			goto pin_unwind;
>>>>    		}
>>>> +
>>>> +		if (iommu->dirty_page_tracking) {
>>>> +			unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
>>>> +
>>>> +			/*
>>>> +			 * Bitmap populated with the smallest supported page
>>>> +			 * size
>>>> +			 */
>>>> +			bitmap_set(dma->bitmap,
>>>> +				   (vpfn->iova - dma->iova) >> pgshift, 1);
>>>> +		}
>>>>    	}
>>>>    
>>>>    	ret = i;
>>>> @@ -802,6 +896,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
>>>>    	vfio_unmap_unpin(iommu, dma, true);
>>>>    	vfio_unlink_dma(iommu, dma);
>>>>    	put_task_struct(dma->task);
>>>> +	vfio_dma_bitmap_free(dma);
>>>>    	kfree(dma);
>>>>    	iommu->dma_avail++;
>>>>    }
>>>> @@ -829,6 +924,80 @@ static void vfio_pgsize_bitmap(struct vfio_iommu *iommu)
>>>>    	}
>>>>    }
>>>>    
>>>> +static int update_user_bitmap(u64 __user *bitmap, struct vfio_dma *dma,
>>>> +			      dma_addr_t base_iova, size_t pgsize)
>>>> +{
>>>> +	unsigned long pgshift = __ffs(pgsize);
>>>> +	unsigned long nbits = dma->size >> pgshift;
>>>> +	unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
>>>> +	unsigned long copy_offset = bit_offset / BITS_PER_LONG;
>>>> +	unsigned long shift = bit_offset % BITS_PER_LONG;
>>>> +	unsigned long leftover;
>>>> +
>>>> +	if (shift) {
>>>> +		bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
>>>> +				  nbits + shift);
>>>> +
>>>> +		if (copy_from_user(&leftover, (u64 *)bitmap + copy_offset,
>>>> +				   sizeof(leftover)))
>>>> +			return -EFAULT;
>>>> +
>>>> +		bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift);
>>>> +	}
>>>> +
>>>> +	if (copy_to_user((u64 *)bitmap + copy_offset, dma->bitmap,
>>>> +			 DIRTY_BITMAP_BYTES(nbits + shift)))
>>>> +		return -EFAULT;
>>>> +
>>>> +	return 0;
>>>> +}
>>>> +
>>>> +static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
>>>> +				  dma_addr_t iova, size_t size, size_t pgsize)
>>>> +{
>>>> +	struct vfio_dma *dma;
>>>> +	dma_addr_t i = iova, limit = iova + size;
>>>> +	unsigned long pgshift = __ffs(pgsize);
>>>> +	size_t sz = size;
>>>> +	int ret;
>>>> +
>>>> +	while ((dma = vfio_find_dma(iommu, i, sz))) {
>>> not quite get the logic here.
>>> if (i, i + size) is intersecting with (dma->iova, dma->iova + dma->size),
>>> and a dma is found here, why the whole bitmap is cleared and copied?
>>>    
>>
>> This works with multiple but full vfio_dma, not intersects of vfio_dma,
>> similar to unmap ioctl.
> 
> I don't see that the VFIO_IOMMU_DIRTY_PAGES ioctl validates that like
> VFIO_IOMMU_UNMAP_DMA does though.  We should have the same test as
> vfio_dma_do_unmap() to verify that they user range doesn't bisect a
> mapping, otherwise Yan is right, it looks like we allow the user to
> specify an arbitrary range that might bisect a bitmap, but we clear and
> attempt to copy the entire bitmap to the user buffer regardless.

Sure, updating patch. I'll send next version in some time.

Thanks,
Kirti

  reply	other threads:[~2020-05-15 15:33 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-14 20:37 [PATCH Kernel v20 0/8] Add UAPIs to support migration for VFIO devices Kirti Wankhede
2020-05-14 20:37 ` [PATCH Kernel v20 1/8] vfio: UAPI for migration interface for device state Kirti Wankhede
2020-05-14 20:37 ` [PATCH Kernel v20 2/8] vfio iommu: Remove atomicity of ref_count of pinned pages Kirti Wankhede
2020-05-14 20:37 ` [PATCH Kernel v20 3/8] vfio iommu: Cache pgsize_bitmap in struct vfio_iommu Kirti Wankhede
2020-05-14 20:37 ` [PATCH Kernel v20 4/8] vfio iommu: Add ioctl definition for dirty pages tracking Kirti Wankhede
2020-05-15 10:59   ` Cornelia Huck
2020-05-15 17:35     ` Kirti Wankhede
2020-05-19 15:35       ` Cornelia Huck
2020-05-19 15:53         ` Alex Williamson
2020-05-19 16:00           ` Cornelia Huck
2020-05-14 20:37 ` [PATCH Kernel v20 5/8] vfio iommu: Implementation of ioctl " Kirti Wankhede
2020-05-15  3:27   ` Alex Williamson
2020-05-15  6:14     ` Alex Williamson
2020-05-15 10:05   ` Yan Zhao
2020-05-15 11:14     ` Kirti Wankhede
2020-05-15 15:15       ` Alex Williamson
2020-05-15 15:33         ` Kirti Wankhede [this message]
2020-05-14 20:37 ` [PATCH Kernel v20 6/8] vfio iommu: Update UNMAP_DMA ioctl to get dirty bitmap before unmap Kirti Wankhede
2020-05-15  3:27   ` Alex Williamson
2020-05-15  4:16     ` Kirti Wankhede
2020-05-15  5:47       ` Alex Williamson
2020-05-15  6:47         ` Kirti Wankhede
2020-05-15 13:31           ` Alex Williamson
2020-05-15 15:30             ` Kirti Wankhede
2020-05-15 15:48               ` Alex Williamson
2020-05-14 20:37 ` [PATCH Kernel v20 7/8] vfio iommu: Add migration capability to report supported features Kirti Wankhede
2020-05-14 20:37 ` [PATCH Kernel v20 8/8] vfio: Selective dirty page tracking if IOMMU backed device pins pages Kirti Wankhede
2020-05-15  3:32 ` [PATCH Kernel v20 0/8] Add UAPIs to support migration for VFIO devices Alex Williamson
2020-05-15 10:14   ` Yan Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8eb95e03-87f3-677a-698e-826ee0060deb@nvidia.com \
    --to=kwankhede@nvidia.com \
    --cc=Ken.Xue@amd.com \
    --cc=Zhengxiao.zx@Alibaba-inc.com \
    --cc=aik@ozlabs.ru \
    --cc=alex.williamson@redhat.com \
    --cc=changpeng.liu@intel.com \
    --cc=cjia@nvidia.com \
    --cc=cohuck@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=eauger@redhat.com \
    --cc=eskultet@redhat.com \
    --cc=felipe@nutanix.com \
    --cc=jonathan.davies@nutanix.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=mlevitsk@redhat.com \
    --cc=pasic@linux.ibm.com \
    --cc=qemu-devel@nongnu.org \
    --cc=shuangtai.tst@alibaba-inc.com \
    --cc=yan.y.zhao@intel.com \
    --cc=yi.l.liu@intel.com \
    --cc=zhi.a.wang@intel.com \
    --cc=ziye.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox