From: Punit Agrawal <punit.agrawal@arm.com>
To: kvmarm@lists.cs.columbia.edu, linux-arm-kernel@lists.infradead.org
Cc: kvm@vger.kernel.org, Marc Zyngier <marc.zyngier@arm.com>,
Punit Agrawal <punit.agrawal@arm.com>,
Will Deacon <will.deacon@arm.com>,
linux-kernel@vger.kernel.org,
Peter Zijlstra <peterz@infradead.org>
Subject: [PATCH v3 5/9] arm64: KVM: Handle trappable TLB instructions
Date: Tue, 10 Jan 2017 11:38:52 +0000 [thread overview]
Message-ID: <20170110113856.7183-6-punit.agrawal@arm.com> (raw)
In-Reply-To: <20170110113856.7183-1-punit.agrawal@arm.com>
The ARMv8 architecture allows trapping of TLB maintenane instructions
from EL0/EL1 to higher exception levels. On encountering a trappable TLB
instruction in a guest, an exception is taken to EL2.
Add support to handle emulating the TLB instructions. Also track the
number of emulated operations.
Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm64/include/asm/kvm_asm.h | 2 +
arch/arm64/include/asm/kvm_host.h | 1 +
arch/arm64/kvm/hyp/tlb.c | 75 +++++++++++++++++++++++++++++++++++
arch/arm64/kvm/sys_regs.c | 83 +++++++++++++++++++++++++++++++++++++++
4 files changed, 161 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ec3553eb9349..8b695785d454 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -55,6 +55,8 @@ extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
+extern void __kvm_emulate_tlb_invalidate(struct kvm *kvm, u32 opcode,
+ u64 regval);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e5050388e062..1e83b707f14c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -307,6 +307,7 @@ struct kvm_vcpu_stat {
u64 mmio_exit_user;
u64 mmio_exit_kernel;
u64 exits;
+ u64 tlb_invalidate;
};
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index b2cfbedea582..c14237858d75 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -86,3 +86,78 @@ void __hyp_text __kvm_flush_vm_context(void)
__tlbi(alle1is);
__flush_icache_all(); /* contains a dsb(ish) */
}
+
+/* Intentionally empty functions */
+static void __hyp_text __switch_to_hyp_role_nvhe(void) { }
+static void __hyp_text __switch_to_host_role_nvhe(void) { }
+
+static void __hyp_text __switch_to_hyp_role_vhe(void)
+{
+ u64 hcr = read_sysreg(hcr_el2);
+
+ /*
+ * When VHE is enabled and HCR_EL2.TGE=1, EL1&0 TLB operations
+ * apply to EL2&0 translation regime. As we prepare to emulate
+ * guest TLB operation clear HCR_TGE to target TLB operations
+ * to EL1&0 (guest).
+ */
+ hcr &= ~HCR_TGE;
+ write_sysreg(hcr, hcr_el2);
+}
+
+static void __hyp_text __switch_to_host_role_vhe(void)
+{
+ u64 hcr = read_sysreg(hcr_el2);
+
+ hcr |= HCR_TGE;
+ write_sysreg(hcr, hcr_el2);
+}
+
+static hyp_alternate_select(__switch_to_hyp_role,
+ __switch_to_hyp_role_nvhe,
+ __switch_to_hyp_role_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+static hyp_alternate_select(__switch_to_host_role,
+ __switch_to_host_role_nvhe,
+ __switch_to_host_role_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+static void __hyp_text __switch_to_guest_regime(struct kvm *kvm)
+{
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __switch_to_hyp_role();
+ isb();
+}
+
+static void __hyp_text __switch_to_host_regime(void)
+{
+ __switch_to_host_role();
+ write_sysreg(0, vttbr_el2);
+}
+
+void __hyp_text
+__kvm_emulate_tlb_invalidate(struct kvm *kvm, u32 opcode, u64 regval)
+{
+ kvm = kern_hyp_va(kvm);
+
+ /*
+ * Switch to the guest before performing any TLB operations to
+ * target the appropriate VMID
+ */
+ __switch_to_guest_regime(kvm);
+
+ /*
+ * TLB maintenance operations are broadcast to
+ * inner-shareable domain when HCR_FB is set (default for
+ * KVM).
+ *
+ * Nuke all Stage 1 TLB entries for the VM. This will kill
+ * performance but it's always safe to do as we don't leave
+ * behind any strays in the TLB
+ */
+ __tlbi(vmalle1is);
+ isb();
+
+ __switch_to_host_regime();
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87e7e6608cd8..12ff8cf9f18a 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -791,6 +791,20 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static bool emulate_tlb_invalidate(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 opcode = sys_reg(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ kvm_call_hyp(__kvm_emulate_tlb_invalidate,
+ vcpu->kvm, opcode, p->regval);
+ trace_kvm_tlb_invalidate(*vcpu_pc(vcpu), opcode);
+ ++vcpu->stat.tlb_invalidate;
+
+ return true;
+}
+
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
@@ -842,6 +856,35 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
access_dcsw },
+ /*
+ * ARMv8 ARM: Table C5-4 TLB maintenance instructions
+ * (Ref: ARMv8 ARM C5.1 version: ARM DDI 0487A.j)
+ */
+ /* TLBI VMALLE1IS */
+ { Op0(1), Op1(0), CRn(8), CRm(3), Op2(0), emulate_tlb_invalidate },
+ /* TLBI VAE1IS */
+ { Op0(1), Op1(0), CRn(8), CRm(3), Op2(1), emulate_tlb_invalidate },
+ /* TLBI ASIDE1IS */
+ { Op0(1), Op1(0), CRn(8), CRm(3), Op2(2), emulate_tlb_invalidate },
+ /* TLBI VAAE1IS */
+ { Op0(1), Op1(0), CRn(8), CRm(3), Op2(3), emulate_tlb_invalidate },
+ /* TLBI VALE1IS */
+ { Op0(1), Op1(0), CRn(8), CRm(3), Op2(5), emulate_tlb_invalidate },
+ /* TLBI VAALE1IS */
+ { Op0(1), Op1(0), CRn(8), CRm(3), Op2(7), emulate_tlb_invalidate },
+ /* TLBI VMALLE1 */
+ { Op0(1), Op1(0), CRn(8), CRm(7), Op2(0), emulate_tlb_invalidate },
+ /* TLBI VAE1 */
+ { Op0(1), Op1(0), CRn(8), CRm(7), Op2(1), emulate_tlb_invalidate },
+ /* TLBI ASIDE1 */
+ { Op0(1), Op1(0), CRn(8), CRm(7), Op2(2), emulate_tlb_invalidate },
+ /* TLBI VAAE1 */
+ { Op0(1), Op1(0), CRn(8), CRm(7), Op2(3), emulate_tlb_invalidate },
+ /* TLBI VALE1 */
+ { Op0(1), Op1(0), CRn(8), CRm(7), Op2(5), emulate_tlb_invalidate },
+ /* TLBI VAALE1 */
+ { Op0(1), Op1(0), CRn(8), CRm(7), Op2(7), emulate_tlb_invalidate },
+
DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1),
/* MDCCINT_EL1 */
@@ -1330,6 +1373,46 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
+ /*
+ * TLB operations
+ */
+ /* TLBIALLIS */
+ { Op1( 0), CRn( 8), CRm( 3), Op2( 0), emulate_tlb_invalidate},
+ /* TLBIMVAIS */
+ { Op1( 0), CRn( 8), CRm( 3), Op2( 1), emulate_tlb_invalidate},
+ /* TLBIASIDIS */
+ { Op1( 0), CRn( 8), CRm( 3), Op2( 2), emulate_tlb_invalidate},
+ /* TLBIMVAAIS */
+ { Op1( 0), CRn( 8), CRm( 3), Op2( 3), emulate_tlb_invalidate},
+ /* TLBIMVALIS */
+ { Op1( 0), CRn( 8), CRm( 3), Op2( 5), emulate_tlb_invalidate},
+ /* TLBIMVAALIS */
+ { Op1( 0), CRn( 8), CRm( 3), Op2( 7), emulate_tlb_invalidate},
+ /* ITLBIALL */
+ { Op1( 0), CRn( 8), CRm( 5), Op2( 0), emulate_tlb_invalidate},
+ /* ITLBIMVA */
+ { Op1( 0), CRn( 8), CRm( 5), Op2( 1), emulate_tlb_invalidate},
+ /* ITLBIASID */
+ { Op1( 0), CRn( 8), CRm( 5), Op2( 2), emulate_tlb_invalidate},
+ /* DTLBIALL */
+ { Op1( 0), CRn( 8), CRm( 6), Op2( 0), emulate_tlb_invalidate},
+ /* DTLBIMVA */
+ { Op1( 0), CRn( 8), CRm( 6), Op2( 1), emulate_tlb_invalidate},
+ /* DTLBIASID */
+ { Op1( 0), CRn( 8), CRm( 6), Op2( 2), emulate_tlb_invalidate},
+ /* TLBIALL */
+ { Op1( 0), CRn( 8), CRm( 7), Op2( 0), emulate_tlb_invalidate},
+ /* TLBIMVA */
+ { Op1( 0), CRn( 8), CRm( 7), Op2( 1), emulate_tlb_invalidate},
+ /* TLBIASID */
+ { Op1( 0), CRn( 8), CRm( 7), Op2( 2), emulate_tlb_invalidate},
+ /* TLBIMVAA */
+ { Op1( 0), CRn( 8), CRm( 7), Op2( 3), emulate_tlb_invalidate},
+ /* TLBIMVAL */
+ { Op1( 0), CRn( 8), CRm( 7), Op2( 5), emulate_tlb_invalidate},
+ /* TLBIMVAAL */
+ { Op1( 0), CRn( 8), CRm( 7), Op2( 7), emulate_tlb_invalidate},
+
/* PMU */
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
--
2.11.0
next prev parent reply other threads:[~2017-01-10 11:38 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-01-10 11:38 [PATCH v3 0/9] Add support for monitoring guest TLB operations Punit Agrawal
2017-01-10 11:38 ` [PATCH v3 1/9] arm64/kvm: hyp: tlb: use __tlbi() helper Punit Agrawal
2017-01-10 11:38 ` [PATCH v3 2/9] KVM: Track the pid of the VM process Punit Agrawal
2017-01-10 11:38 ` [PATCH v3 3/9] KVM: Add event to trace tlb invalidations Punit Agrawal
2017-01-10 11:38 ` [PATCH v3 4/9] arm: KVM: Handle trappable TLB instructions Punit Agrawal
2017-01-10 11:38 ` Punit Agrawal [this message]
2017-01-10 11:38 ` [PATCH v3 6/9] kvm: arm/arm64: Add host pmu to support VM introspection Punit Agrawal
2017-01-18 11:21 ` Marc Zyngier
2017-01-18 11:35 ` Mark Rutland
2017-01-18 13:01 ` Punit Agrawal
2017-01-18 13:18 ` Marc Zyngier
2017-01-18 14:51 ` Punit Agrawal
2017-01-18 15:17 ` Mark Rutland
2017-01-18 16:17 ` Punit Agrawal
2017-01-18 18:05 ` Mark Rutland
2017-01-19 16:42 ` Christoffer Dall
2017-01-23 11:21 ` Punit Agrawal
2017-01-18 13:45 ` Will Deacon
2017-01-18 14:58 ` Punit Agrawal
2017-01-18 13:06 ` Punit Agrawal
2017-01-10 11:38 ` [PATCH v3 7/9] kvm: host_pmu: Add support for tracking guest TLB operations Punit Agrawal
2017-01-10 11:38 ` [PATCH v3 8/9] arm: KVM: Enable support for host pmu Punit Agrawal
2017-01-10 11:38 ` [PATCH v3 9/9] arm64: KVM: Enable support for the " Punit Agrawal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170110113856.7183-6-punit.agrawal@arm.com \
--to=punit.agrawal@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=marc.zyngier@arm.com \
--cc=peterz@infradead.org \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox