kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
To: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Avi Kivity <avi@redhat.com>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: Re: [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)
Date: Fri, 14 Sep 2012 18:13:11 +0800	[thread overview]
Message-ID: <50530337.2080208@linux.vnet.ibm.com> (raw)
In-Reply-To: <5053000E.3050303@linux.vnet.ibm.com>

On 09/14/2012 05:59 PM, Xiao Guangrong wrote:

> +		return FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true);

Sorry, this was wrong. Update this patch.

[PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)

The only different thing between FNAME(update_pte) and FNAME(pte_prefetch)
is that the former is allowed to prefetch gfn from dirty logged slot, so
introduce a common function to prefetch spte

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/paging_tmpl.h |   50 ++++++++++++++++---------------------------
 1 files changed, 19 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1a738c5..32facf7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -356,35 +356,45 @@ no_present:
 	return true;
 }

-static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			      u64 *spte, const void *pte)
+static void
+FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+		     u64 *spte, pt_element_t gpte, bool no_dirty_log)
 {
-	pt_element_t gpte;
 	unsigned pte_access;
+	gfn_t gfn;
 	pfn_t pfn;

-	gpte = *(const pt_element_t *)pte;
 	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 		return;

 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
+
+	gfn = gpte_to_gfn(gpte);
 	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
-	pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
+	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
+			no_dirty_log && (pte_access & ACC_WRITE_MASK));
 	if (mmu_invalid_pfn(pfn))
 		return;

 	/*
 	 * we call mmu_set_spte() with host_writable = true because that
-	 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
+	 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 	 */
 	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
-		     NULL, PT_PAGE_TABLE_LEVEL,
-		     gpte_to_gfn(gpte), pfn, true, true);
+		     NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);

 	if (!is_error_pfn(pfn))
 		kvm_release_pfn_clean(pfn);
 }

+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+			      u64 *spte, const void *pte)
+{
+	pt_element_t gpte = *(const pt_element_t *)pte;
+
+	return FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
+}
+
 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
 				struct guest_walker *gw, int level)
 {
@@ -428,35 +438,13 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 	spte = sp->spt + i;

 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
-		pt_element_t gpte;
-		unsigned pte_access;
-		gfn_t gfn;
-		pfn_t pfn;
-
 		if (spte == sptep)
 			continue;

 		if (is_shadow_present_pte(*spte))
 			continue;

-		gpte = gptep[i];
-
-		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
-			continue;
-
-		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
-								  true);
-		gfn = gpte_to_gfn(gpte);
-		pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
-				      pte_access & ACC_WRITE_MASK);
-		if (mmu_invalid_pfn(pfn))
-			break;
-
-		mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
-			     NULL, PT_PAGE_TABLE_LEVEL, gfn,
-			     pfn, true, true);
-		if (!is_error_pfn(pfn))
-			kvm_release_pfn_clean(pfn);
+		FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true);
 	}
 }

-- 
1.7.7.6


  reply	other threads:[~2012-09-14 10:13 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-09-14  9:56 [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code Xiao Guangrong
2012-09-14  9:57 ` [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly Xiao Guangrong
2012-09-15 15:13   ` Marcelo Tosatti
2012-09-18  7:46     ` Xiao Guangrong
2012-09-14  9:57 ` [PATCH v2 2/5] KVM: MMU: do not release pfn in mmu_set_spte Xiao Guangrong
2012-09-14  9:58 ` [PATCH v2 3/5] KVM: MMU: cleanup FNAME(page_fault) Xiao Guangrong
2012-09-20 10:54   ` Avi Kivity
2012-09-14  9:59 ` [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end Xiao Guangrong
2012-09-15 15:25   ` Marcelo Tosatti
2012-09-18  8:15     ` Xiao Guangrong
2012-09-18 23:43       ` Marcelo Tosatti
2012-09-20  2:59         ` Xiao Guangrong
2012-09-20 10:57   ` Avi Kivity
2012-09-14  9:59 ` [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte) Xiao Guangrong
2012-09-14 10:13   ` Xiao Guangrong [this message]
2012-09-15 15:31     ` Marcelo Tosatti
2012-09-18  8:26       ` Xiao Guangrong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=50530337.2080208@linux.vnet.ibm.com \
    --to=xiaoguangrong@linux.vnet.ibm.com \
    --cc=avi@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).