public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Alexander Graf <agraf@suse.de>
To: kvm-ppc@vger.kernel.org
Cc: kvm@vger.kernel.org
Subject: [PATCH 1/3] KVM: PPC: e500: Call kvmppc_mmu_map for initial mapping
Date: Thu, 17 Jan 2013 23:50:39 +0100	[thread overview]
Message-ID: <1358463041-25922-2-git-send-email-agraf@suse.de> (raw)
In-Reply-To: <1358463041-25922-1-git-send-email-agraf@suse.de>

When emulating tlbwe, we want to automatically map the entry that just got
written in our shadow TLB map, because chances are quite high that it's
going to be used very soon.

Today this happens explicitly, duplicating all the logic that is in
kvmppc_mmu_map() already. Just call that one instead.

Signed-off-by: Alexander Graf <agraf@suse.de>
---
 arch/powerpc/kvm/e500.h     |    4 ++-
 arch/powerpc/kvm/e500_tlb.c |   45 +++++++++++-------------------------------
 2 files changed, 15 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c70d37e..00f96d8 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -113,8 +113,10 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
 #define KVM_E500_TLB0_SIZE  (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
 #define KVM_E500_TLB1_SIZE  16
 
+#define KVM_E500_INDEX_FORCE_MAP 0x80000000
+
 #define index_of(tlbsel, esel)	(((tlbsel) << 16) | ((esel) & 0xFFFF))
-#define tlbsel_of(index)	((index) >> 16)
+#define tlbsel_of(index)	(((index) >> 16) & 0x3)
 #define esel_of(index)		((index) & 0xFFFF)
 
 #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index cf3f180..eda7be1 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -853,8 +853,8 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-	struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
-	int tlbsel, esel, stlbsel, sesel;
+	struct kvm_book3e_206_tlb_entry *gtlbe;
+	int tlbsel, esel;
 	int recal = 0;
 
 	tlbsel = get_tlb_tlbsel(vcpu);
@@ -892,40 +892,17 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 
 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
-		u64 eaddr;
-		u64 raddr;
+		u64 eaddr = get_tlb_eaddr(gtlbe);
+		u64 raddr = get_tlb_raddr(gtlbe);
 
-		switch (tlbsel) {
-		case 0:
-			/* TLB0 */
+		if (tlbsel == 0) {
 			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
 			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
-
-			stlbsel = 0;
-			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
-			sesel = 0; /* unused */
-
-			break;
-
-		case 1:
-			/* TLB1 */
-			eaddr = get_tlb_eaddr(gtlbe);
-			raddr = get_tlb_raddr(gtlbe);
-
-			/* Create a 4KB mapping on the host.
-			 * If the guest wanted a large page,
-			 * only the first 4KB is mapped here and the rest
-			 * are mapped on the fly. */
-			stlbsel = 1;
-			sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
-				    raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
-			break;
-
-		default:
-			BUG();
 		}
 
-		write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
+		/* Premap the faulting page */
+		kvmppc_mmu_map(vcpu, eaddr, raddr,
+			index_of(tlbsel, esel) | KVM_E500_INDEX_FORCE_MAP);
 	}
 
 	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -1024,9 +1001,11 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 	struct tlbe_priv *priv;
-	struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
+	struct kvm_book3e_206_tlb_entry *gtlbe, stlbe = {};
 	int tlbsel = tlbsel_of(index);
 	int esel = esel_of(index);
+	/* Needed for initial map, where we can't use the cached value */
+	int force_map = index & KVM_E500_INDEX_FORCE_MAP;
 	int stlbsel, sesel;
 
 	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
@@ -1038,7 +1017,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 		priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
 		/* Only triggers after clear_tlb_refs */
-		if (unlikely(!(priv->ref.flags & E500_TLB_VALID)))
+		if (force_map || unlikely(!(priv->ref.flags & E500_TLB_VALID)))
 			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
 		else
 			kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
-- 
1.6.0.2


  reply	other threads:[~2013-01-17 22:50 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-17 22:50 [PATCH 0/3] KVM: PPC: e500: Shadow TLB Improvements Alexander Graf
2013-01-17 22:50 ` Alexander Graf [this message]
2013-01-18  0:11   ` [PATCH 1/3] KVM: PPC: e500: Call kvmppc_mmu_map for initial mapping Scott Wood
2013-01-18  0:20     ` Alexander Graf
2013-01-18  0:29       ` Alexander Graf
2013-01-18  0:35         ` Scott Wood
2013-01-18  0:47       ` Scott Wood
2013-01-18  0:49         ` Alexander Graf
2013-01-18  1:16         ` Alexander Graf
2013-01-17 22:50 ` [PATCH 2/3] KVM: PPC: E500: Split host and guest MMU parts Alexander Graf
2013-01-17 22:50 ` [PATCH 3/3] KVM: PPC: e500: Implement TLB1-in-TLB0 mapping Alexander Graf
2013-01-18  0:31   ` Scott Wood

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1358463041-25922-2-git-send-email-agraf@suse.de \
    --to=agraf@suse.de \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox