qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: Alexander Duyck <aduyck@mirantis.com>
Cc: tianyu.lan@intel.com, yang.zhang.wz@gmail.com,
	alex.williamson@redhat.com, kvm@vger.kernel.org,
	konrad.wilk@oracle.com, linux-pci@vger.kernel.org,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	alexander.duyck@gmail.com, qemu-devel@nongnu.org, agraf@suse.de,
	dgilbert@redhat.com
Subject: Re: [Qemu-devel] [RFC PATCH 3/3] x86: Create dma_mark_dirty to dirty pages used for DMA by VM guest
Date: Mon, 14 Dec 2015 16:00:15 +0200	[thread overview]
Message-ID: <20151214113016-mutt-send-email-mst@redhat.com> (raw)
In-Reply-To: <20151213212831.5410.84365.stgit@localhost.localdomain>

On Sun, Dec 13, 2015 at 01:28:31PM -0800, Alexander Duyck wrote:
> This patch is meant to provide the guest with a way of flagging DMA pages
> as being dirty to the host when using a direct-assign device within a
> guest.  The advantage to this approach is that it is fairly simple, however
> It currently has a singificant impact on device performance in all the
> scenerios where it won't be needed.
> 
> As such this is really meant only as a proof of concept and to get the ball
> rolling in terms of figuring out how best to approach the issue of dirty
> page tracking for a guest that is using a direct assigned device.  In
> addition with just this patch it should be possible to modify current
> migration approaches so that instead of having to hot-remove the device
> before starting the migration this can instead be delayed until the period
> before the final stop and copy.
> 
> Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
> ---
>  arch/arm/include/asm/dma-mapping.h       |    3 ++-
>  arch/arm64/include/asm/dma-mapping.h     |    5 ++---
>  arch/ia64/include/asm/dma.h              |    1 +
>  arch/mips/include/asm/dma-mapping.h      |    1 +
>  arch/powerpc/include/asm/swiotlb.h       |    1 +
>  arch/tile/include/asm/dma-mapping.h      |    1 +
>  arch/unicore32/include/asm/dma-mapping.h |    1 +
>  arch/x86/Kconfig                         |   11 +++++++++++
>  arch/x86/include/asm/swiotlb.h           |   26 ++++++++++++++++++++++++++
>  drivers/xen/swiotlb-xen.c                |    6 ++++++
>  lib/swiotlb.c                            |    6 ++++++
>  11 files changed, 58 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
> index ccb3aa64640d..1962d7b471c7 100644
> --- a/arch/arm/include/asm/dma-mapping.h
> +++ b/arch/arm/include/asm/dma-mapping.h
> @@ -167,7 +167,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return 1;
>  }
>  
> -static inline void dma_mark_clean(void *addr, size_t size) { }
> +static inline void dma_mark_clean(void *addr, size_t size) {}
> +static inline void dma_mark_dirty(void *addr, size_t size) {}
>  
>  extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
>  
> diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
> index 61e08f360e31..8d24fe11c8a3 100644
> --- a/arch/arm64/include/asm/dma-mapping.h
> +++ b/arch/arm64/include/asm/dma-mapping.h
> @@ -84,9 +84,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  	return addr + size - 1 <= *dev->dma_mask;
>  }
>  
> -static inline void dma_mark_clean(void *addr, size_t size)
> -{
> -}
> +static inline void dma_mark_clean(void *addr, size_t size) {}
> +static inline void dma_mark_dirty(void *addr, size_t size) {}
>  
>  #endif	/* __KERNEL__ */
>  #endif	/* __ASM_DMA_MAPPING_H */
> diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
> index 4d97f60f1ef5..d92ebeb2758e 100644
> --- a/arch/ia64/include/asm/dma.h
> +++ b/arch/ia64/include/asm/dma.h
> @@ -20,5 +20,6 @@ extern unsigned long MAX_DMA_ADDRESS;
>  #define free_dma(x)
>  
>  void dma_mark_clean(void *addr, size_t size);
> +static inline void dma_mark_dirty(void *addr, size_t size) {}
>  
>  #endif /* _ASM_IA64_DMA_H */
> diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
> index e604f760c4a0..567f6e03e337 100644
> --- a/arch/mips/include/asm/dma-mapping.h
> +++ b/arch/mips/include/asm/dma-mapping.h
> @@ -28,6 +28,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
>  }
>  
>  static inline void dma_mark_clean(void *addr, size_t size) {}
> +static inline void dma_mark_dirty(void *addr, size_t size) {}
>  
>  #include <asm-generic/dma-mapping-common.h>
>  
> diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
> index de99d6e29430..b694e8399e28 100644
> --- a/arch/powerpc/include/asm/swiotlb.h
> +++ b/arch/powerpc/include/asm/swiotlb.h
> @@ -16,6 +16,7 @@
>  extern struct dma_map_ops swiotlb_dma_ops;
>  
>  static inline void dma_mark_clean(void *addr, size_t size) {}
> +static inline void dma_mark_dirty(void *addr, size_t size) {}
>  
>  extern unsigned int ppc_swiotlb_enable;
>  int __init swiotlb_setup_bus_notifier(void);
> diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
> index 96ac6cce4a32..79953f09e938 100644
> --- a/arch/tile/include/asm/dma-mapping.h
> +++ b/arch/tile/include/asm/dma-mapping.h
> @@ -58,6 +58,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
>  }
>  
>  static inline void dma_mark_clean(void *addr, size_t size) {}
> +static inline void dma_mark_dirty(void *addr, size_t size) {}
>  
>  static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
>  {
> diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
> index 8140e053ccd3..b9d357ab122d 100644
> --- a/arch/unicore32/include/asm/dma-mapping.h
> +++ b/arch/unicore32/include/asm/dma-mapping.h
> @@ -49,6 +49,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
>  }
>  
>  static inline void dma_mark_clean(void *addr, size_t size) {}
> +static inline void dma_mark_dirty(void *addr, size_t size) {}
>  
>  static inline void dma_cache_sync(struct device *dev, void *vaddr,
>  		size_t size, enum dma_data_direction direction)
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index db3622f22b61..f0b09156d7d8 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -841,6 +841,17 @@ config SWIOTLB
>  	  with more than 3 GB of memory.
>  	  If unsure, say Y.
>  
> +config SWIOTLB_PAGE_DIRTYING
> +	bool "SWIOTLB page dirtying"
> +	depends on SWIOTLB
> +	default n
> +	---help---
> +	  SWIOTLB page dirtying support provides a means for the guest to
> +	  trigger write faults on pages which received DMA from the device
> +	  without changing the data contained within.  By doing this the
> +	  guest can then support migration assuming the device and any
> +	  remaining pages are unmapped prior to the CPU itself being halted.
> +
>  config IOMMU_HELPER
>  	def_bool y
>  	depends on CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU
> diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
> index ab05d73e2bb7..7f9f2e76d081 100644
> --- a/arch/x86/include/asm/swiotlb.h
> +++ b/arch/x86/include/asm/swiotlb.h
> @@ -29,6 +29,32 @@ static inline void pci_swiotlb_late_init(void)
>  
>  static inline void dma_mark_clean(void *addr, size_t size) {}
>  
> +/*
> + * Make certain that the pages get marked as dirty
> + * now that the device has completed the DMA transaction.
> + *
> + * Without this we run the risk of a guest migration missing
> + * the pages that the device has written to as they are not
> + * tracked as a part of the dirty page tracking.
> + */
> +static inline void dma_mark_dirty(void *addr, size_t size)
> +{
> +#ifdef CONFIG_SWIOTLB_PAGE_DIRTYING

I like where this is going. However
as distributions don't like shipping multiple kernels,
I think we also need a way to configure this
at runtime, even if enabled at build time.

How about
- mark dirty is enabled at boot if requested (e.g. by kernel command line)
- mark dirty can later be disabled/enabled by sysctl

(Enabling at runtime might be a bit tricky as it has to
 sync with all CPUs - use e.g. RCU for this?).

This way distro can use a guest agent to disable
dirtying until before migration starts.

> +	unsigned long pg_addr, start;
> +
> +	start = (unsigned long)addr;
> +	pg_addr = PAGE_ALIGN(start + size);
> +	start &= ~(sizeof(atomic_t) - 1);
> +
> +	/* trigger a write fault on each page, excluding first page */
> +	while ((pg_addr -= PAGE_SIZE) > start)
> +		atomic_add(0, (atomic_t *)pg_addr);
> +
> +	/* trigger a write fault on first word of DMA */
> +	atomic_add(0, (atomic_t *)start);

start might not be aligned correctly for a cast to atomic_t.
It's harmless to do this for any memory, so I think you should
just do this for 1st byte of all pages including the first one.


> +#endif /* CONFIG_SWIOTLB_PAGE_DIRTYING */
> +}
> +
>  extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
>  					dma_addr_t *dma_handle, gfp_t flags,
>  					struct dma_attrs *attrs);
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index 2154c70e47da..1533b3eefb67 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -456,6 +456,9 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
>  	 */
>  	if (dir == DMA_FROM_DEVICE)
>  		dma_mark_clean(phys_to_virt(paddr), size);
> +
> +	if (dir != DMA_TO_DEVICE)
> +		dma_mark_dirty(phys_to_virt(paddr), size);
>  }
>  EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
>  
> @@ -485,6 +488,9 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
>  
>  	if (dir == DMA_FROM_DEVICE)
>  		dma_mark_clean(phys_to_virt(paddr), size);
> +
> +	if (dir != DMA_TO_DEVICE)
> +		dma_mark_dirty(phys_to_virt(paddr), size);
>  }
>  EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
>  
> diff --git a/lib/swiotlb.c b/lib/swiotlb.c
> index 384ac06217b2..4223d6c54724 100644
> --- a/lib/swiotlb.c
> +++ b/lib/swiotlb.c
> @@ -802,6 +802,9 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
>  	 */
>  	if (dir == DMA_FROM_DEVICE)
>  		dma_mark_clean(phys_to_virt(paddr), size);
> +
> +	if (dir != DMA_TO_DEVICE)
> +		dma_mark_dirty(phys_to_virt(paddr), size);
>  }
>  EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
>  
> @@ -830,6 +833,9 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
>  
>  	if (dir == DMA_FROM_DEVICE)
>  		dma_mark_clean(phys_to_virt(paddr), size);
> +
> +	if (dir != DMA_TO_DEVICE)
> +		dma_mark_dirty(phys_to_virt(paddr), size);
>  }
>  EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
>  

  reply	other threads:[~2015-12-14 14:00 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-13 21:28 [Qemu-devel] [RFC PATCH 0/3] x86: Add support for guest DMA dirty page tracking Alexander Duyck
2015-12-13 21:28 ` [Qemu-devel] [RFC PATCH 1/3] swiotlb: Fold static unmap and sync calls into calling functions Alexander Duyck
2015-12-13 21:28 ` [Qemu-devel] [RFC PATCH 2/3] xen/swiotlb: " Alexander Duyck
2015-12-13 21:28 ` [Qemu-devel] [RFC PATCH 3/3] x86: Create dma_mark_dirty to dirty pages used for DMA by VM guest Alexander Duyck
2015-12-14 14:00   ` Michael S. Tsirkin [this message]
2015-12-14 16:34     ` Alexander Duyck
2015-12-14 17:20       ` Michael S. Tsirkin
2015-12-14 17:59         ` Alexander Duyck
2015-12-14 20:52           ` Michael S. Tsirkin
2015-12-14 22:32             ` Alexander Duyck
2015-12-14  2:27 ` [Qemu-devel] [RFC PATCH 0/3] x86: Add support for guest DMA dirty page tracking Yang Zhang
2015-12-14  4:54   ` Alexander Duyck
2015-12-14  5:22     ` Yang Zhang
2015-12-14  5:46       ` Alexander Duyck
2015-12-14  7:20         ` Yang Zhang
2015-12-14 14:02           ` Michael S. Tsirkin
2016-01-04 20:41 ` Konrad Rzeszutek Wilk
2016-01-05  3:11   ` Alexander Duyck
2016-01-05  9:40     ` Michael S. Tsirkin
2016-01-05 10:01       ` Dr. David Alan Gilbert
2016-01-05 10:35         ` Michael S. Tsirkin
2016-01-05 10:45           ` Dr. David Alan Gilbert
2016-01-05 10:59             ` Michael S. Tsirkin
2016-01-05 11:03               ` Dr. David Alan Gilbert
2016-01-05 11:11                 ` Michael S. Tsirkin
2016-01-05 11:06               ` Michael S. Tsirkin
2016-01-05 11:05             ` Michael S. Tsirkin
2016-01-05 12:43               ` Dr. David Alan Gilbert
2016-01-05 13:16                 ` Michael S. Tsirkin
2016-01-05 18:42                   ` Konrad Rzeszutek Wilk
2016-01-05 16:18       ` Alexander Duyck
2016-06-06  9:18         ` Zhou Jie
2016-06-06 16:04           ` Alex Duyck
2016-06-09 10:14             ` Zhou Jie
2016-06-09 15:39               ` Alexander Duyck
2016-06-12  3:03                 ` Zhou Jie
2016-06-13  1:28                   ` Alexander Duyck

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20151214113016-mutt-send-email-mst@redhat.com \
    --to=mst@redhat.com \
    --cc=aduyck@mirantis.com \
    --cc=agraf@suse.de \
    --cc=alex.williamson@redhat.com \
    --cc=alexander.duyck@gmail.com \
    --cc=dgilbert@redhat.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=qemu-devel@nongnu.org \
    --cc=tianyu.lan@intel.com \
    --cc=x86@kernel.org \
    --cc=yang.zhang.wz@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).