public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoffer Dall <christoffer.dall@linaro.org>
To: Marc Zyngier <marc.zyngier@arm.com>
Cc: kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu,
	Steve Capper <steve.capper@linaro.org>
Subject: Re: [PATCH 3/4] arm/arm64: KVM: Flush caches to memory on unmap
Date: Fri, 9 Jan 2015 13:30:27 +0100	[thread overview]
Message-ID: <20150109123027.GQ21092@cbox> (raw)
In-Reply-To: <1420718349-24152-4-git-send-email-marc.zyngier@arm.com>

On Thu, Jan 08, 2015 at 11:59:08AM +0000, Marc Zyngier wrote:
> Let's assume a guest has created an uncached mapping, and written
> to that page. Let's also assume that the host uses a cache-coherent
> IO subsystem. Let's finally assume that the host is under memory
> pressure and starts to swap things out.
> 
> Before this "uncached" page is evicted, we need to make sure it
> gets invalidated, or the IO subsystem is going to swap out the
> cached view, loosing the data that has been written there.
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm/include/asm/kvm_mmu.h   | 31 +++++++++++++++++++++++++++
>  arch/arm/kvm/mmu.c               | 46 +++++++++++++++++++++++++++-------------
>  arch/arm64/include/asm/kvm_mmu.h | 18 ++++++++++++++++
>  3 files changed, 80 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index 63e0ecc..7ceb836 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -44,6 +44,7 @@
>  
>  #ifndef __ASSEMBLY__
>  
> +#include <linux/highmem.h>
>  #include <asm/cacheflush.h>
>  #include <asm/pgalloc.h>
>  
> @@ -190,6 +191,36 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
>  
>  #define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x))
>  
> +static inline void __kvm_flush_dcache_pte(pte_t pte)
> +{
> +	void *va = kmap_atomic(pte_page(pte));
> +
> +	kvm_flush_dcache_to_poc(va, PAGE_SIZE);
> +
> +	kunmap_atomic(va);
> +}
> +
> +static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
> +{
> +	unsigned long size = PMD_SIZE;
> +	pfn_t pfn = pmd_pfn(pmd);
> +
> +	while (size) {
> +		void *va = kmap_atomic_pfn(pfn);
> +
> +		kvm_flush_dcache_to_poc(va, PAGE_SIZE);
> +
> +		pfn++;
> +		size -= PAGE_SIZE;
> +
> +		kunmap_atomic(va);
> +	}
> +}
> +
> +static inline void __kvm_flush_dcache_pud(pud_t pud)
> +{
> +}
> +
>  void stage2_flush_vm(struct kvm *kvm);
>  
>  #endif	/* !__ASSEMBLY__ */
> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
> index 1dc9778..1f5b793 100644
> --- a/arch/arm/kvm/mmu.c
> +++ b/arch/arm/kvm/mmu.c
> @@ -58,6 +58,21 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
>  		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
>  }
>  
> +static void kvm_flush_dcache_pte(pte_t pte)
> +{
> +	__kvm_flush_dcache_pte(pte);
> +}
> +
> +static void kvm_flush_dcache_pmd(pmd_t pmd)
> +{
> +	__kvm_flush_dcache_pmd(pmd);
> +}
> +
> +static void kvm_flush_dcache_pud(pud_t pud)
> +{
> +	__kvm_flush_dcache_pud(pud);
> +}
> +
>  static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
>  				  int min, int max)
>  {
> @@ -128,9 +143,12 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
>  	start_pte = pte = pte_offset_kernel(pmd, addr);
>  	do {
>  		if (!pte_none(*pte)) {
> +			pte_t old_pte = *pte;
>  			kvm_set_pte(pte, __pte(0));
> -			put_page(virt_to_page(pte));

was this a bug beforehand that we released the page before we flushed
the TLB?

>  			kvm_tlb_flush_vmid_ipa(kvm, addr);
> +			if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
> +				kvm_flush_dcache_pte(old_pte);

this is confusing me: We are only flushing the cache for cached stage-2
mappings?  Weren't we trying to flush the cache for uncached mappings?
(we obviously also need flush a cached stage-2 mapping but where the
guest is mapping it as uncached, but we don't know that easily).

Am I missing something completely here?

In any case we probably need a comment here explaining this.

> +			put_page(virt_to_page(pte));
>  		}
>  	} while (pte++, addr += PAGE_SIZE, addr != end);
>  
> @@ -149,8 +167,10 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
>  		next = kvm_pmd_addr_end(addr, end);
>  		if (!pmd_none(*pmd)) {
>  			if (kvm_pmd_huge(*pmd)) {
> +				pmd_t old_pmd = *pmd;
>  				pmd_clear(pmd);
>  				kvm_tlb_flush_vmid_ipa(kvm, addr);
> +				kvm_flush_dcache_pmd(old_pmd);
>  				put_page(virt_to_page(pmd));
>  			} else {
>  				unmap_ptes(kvm, pmd, addr, next);
> @@ -173,8 +193,10 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
>  		next = kvm_pud_addr_end(addr, end);
>  		if (!pud_none(*pud)) {
>  			if (pud_huge(*pud)) {
> +				pud_t old_pud = *pud;
>  				pud_clear(pud);
>  				kvm_tlb_flush_vmid_ipa(kvm, addr);
> +				kvm_flush_dcache_pud(old_pud);
>  				put_page(virt_to_page(pud));
>  			} else {
>  				unmap_pmds(kvm, pud, addr, next);
> @@ -209,10 +231,8 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
>  
>  	pte = pte_offset_kernel(pmd, addr);
>  	do {
> -		if (!pte_none(*pte)) {
> -			hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
> -			kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
> -		}
> +		if (!pte_none(*pte))
> +			kvm_flush_dcache_pte(*pte);
>  	} while (pte++, addr += PAGE_SIZE, addr != end);
>  }
>  
> @@ -226,12 +246,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
>  	do {
>  		next = kvm_pmd_addr_end(addr, end);
>  		if (!pmd_none(*pmd)) {
> -			if (kvm_pmd_huge(*pmd)) {
> -				hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
> -				kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
> -			} else {
> +			if (kvm_pmd_huge(*pmd))
> +				kvm_flush_dcache_pmd(*pmd);
> +			else
>  				stage2_flush_ptes(kvm, pmd, addr, next);
> -			}
>  		}
>  	} while (pmd++, addr = next, addr != end);
>  }
> @@ -246,12 +264,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
>  	do {
>  		next = kvm_pud_addr_end(addr, end);
>  		if (!pud_none(*pud)) {
> -			if (pud_huge(*pud)) {
> -				hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
> -				kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
> -			} else {
> +			if (pud_huge(*pud))
> +				kvm_flush_dcache_pud(*pud);
> +			else
>  				stage2_flush_pmds(kvm, pud, addr, next);
> -			}
>  		}
>  	} while (pud++, addr = next, addr != end);
>  }
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 14a74f1..b7419f5 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -260,6 +260,24 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
>  
>  #define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
>  
> +static inline void __kvm_flush_dcache_pte(pte_t pte)
> +{
> +	struct page *page = pte_page(pte);
> +	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
> +}
> +
> +static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
> +{
> +	struct page *page = pmd_page(pmd);
> +	kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
> +}
> +
> +static inline void __kvm_flush_dcache_pud(pud_t pud)
> +{
> +	struct page *page = pud_page(pud);
> +	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
> +}
> +
>  void stage2_flush_vm(struct kvm *kvm);
>  
>  #endif /* __ASSEMBLY__ */
> -- 
> 2.1.4
> 
Thanks,

-Christoffer

  reply	other threads:[~2015-01-09 12:30 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-01-08 11:59 [PATCH 0/4] arm/arm64: KVM: Random selection of MM related fixes Marc Zyngier
2015-01-08 11:59 ` [PATCH 1/4] mm: Correct ordering of *_clear_flush_young_notify Marc Zyngier
2015-01-08 13:12   ` Paolo Bonzini
2015-01-08 19:00   ` Andrea Arcangeli
2015-01-12 10:15     ` Steve Capper
2015-01-08 11:59 ` [PATCH 2/4] arm/arm64: KVM: Use set/way op trapping to track the state of the caches Marc Zyngier
2015-01-09 11:19   ` Christoffer Dall
2015-01-09 11:38     ` Marc Zyngier
2015-01-09 12:12       ` Christoffer Dall
2015-01-08 11:59 ` [PATCH 3/4] arm/arm64: KVM: Flush caches to memory on unmap Marc Zyngier
2015-01-09 12:30   ` Christoffer Dall [this message]
2015-01-09 14:35     ` Marc Zyngier
2015-01-11 12:30       ` Christoffer Dall
2015-01-12 11:15         ` Marc Zyngier
2015-01-12 20:13           ` Christoffer Dall
2015-01-13 13:47             ` Christoffer Dall
2015-01-13 13:57               ` Marc Zyngier
2015-01-08 11:59 ` [PATCH 4/4] arm/arm64: KVM: use kernel mapping to perform invalidation on page fault Marc Zyngier
2015-01-08 12:30   ` Peter Maydell
2015-01-08 13:07     ` Marc Zyngier
2015-01-08 13:16       ` Peter Maydell
2015-01-08 15:06         ` Marc Zyngier
2015-01-08 15:21           ` Peter Maydell
2015-01-09 12:50             ` Christoffer Dall
2015-01-09 13:03               ` Peter Maydell
2015-01-09 14:16                 ` Marc Zyngier
2015-01-09 15:28                   ` Peter Maydell
2015-01-09 17:18                     ` Marc Zyngier
2015-01-11 12:33                     ` Christoffer Dall
2015-01-11 17:37                       ` Peter Maydell
2015-01-11 17:58                         ` Christoffer Dall
2015-01-11 18:27                           ` Peter Maydell
2015-01-11 18:38                             ` Christoffer Dall
2015-01-12  9:58                               ` Marc Zyngier
2015-01-12 20:10                                 ` Christoffer Dall
2015-01-13 11:38                                   ` Marc Zyngier
2015-01-13 12:04                                     ` Christoffer Dall
2015-01-13 12:12                                       ` Peter Maydell
2015-01-13 13:35                                         ` Christoffer Dall
2015-01-13 13:41                                           ` Peter Maydell
2015-01-13 13:49                                             ` Christoffer Dall
2015-01-15 12:00                                           ` Mark Rutland
2015-01-15 13:00                                             ` Christoffer Dall
2015-01-15 15:47                                               ` Mark Rutland
2015-01-09 12:51   ` Christoffer Dall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20150109123027.GQ21092@cbox \
    --to=christoffer.dall@linaro.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=marc.zyngier@arm.com \
    --cc=steve.capper@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox