From: Sasha Levin <sashal@kernel.org>
To: linux-kernel@vger.kernel.org, stable@vger.kernel.org
Cc: Marc Zyngier <marc.zyngier@arm.com>,
Will Deacon <will.deacon@arm.com>,
Sasha Levin <alexander.levin@microsoft.com>,
kvmarm@lists.cs.columbia.edu
Subject: [PATCH AUTOSEL 4.14 63/95] arm64: KVM: Make VHE Stage-2 TLB invalidation operations non-interruptible
Date: Tue, 7 May 2019 01:37:52 -0400 [thread overview]
Message-ID: <20190507053826.31622-63-sashal@kernel.org> (raw)
In-Reply-To: <20190507053826.31622-1-sashal@kernel.org>
From: Marc Zyngier <marc.zyngier@arm.com>
[ Upstream commit c987876a80e7bcb98a839f10dca9ce7fda4feced ]
Contrary to the non-VHE version of the TLB invalidation helpers, the VHE
code has interrupts enabled, meaning that we can take an interrupt in
the middle of such a sequence, and start running something else with
HCR_EL2.TGE cleared.
That's really not a good idea.
Take the heavy-handed option and disable interrupts in
__tlb_switch_to_guest_vhe, restoring them in __tlb_switch_to_host_vhe.
The latter also gain an ISB in order to make sure that TGE really has
taken effect.
Cc: stable@vger.kernel.org
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: James Morse <james.morse@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
---
arch/arm64/kvm/hyp/tlb.c | 35 +++++++++++++++++++++++++----------
1 file changed, 25 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 73464a96c365..db23c6e5c885 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,13 +15,18 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqflags.h>
+
#include <asm/kvm_hyp.h>
#include <asm/tlbflush.h>
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+ unsigned long *flags)
{
u64 val;
+ local_irq_save(*flags);
+
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
@@ -36,7 +41,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
isb();
}
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+ unsigned long *flags)
{
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
@@ -47,7 +53,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+ unsigned long flags)
{
/*
* We're done with the TLB operation, let's restore the host's
@@ -55,9 +62,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ isb();
+ local_irq_restore(flags);
}
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+ unsigned long flags)
{
write_sysreg(0, vttbr_el2);
}
@@ -69,11 +79,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
+ unsigned long flags;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
/*
* We could do so much better if we had the VA as well.
@@ -116,36 +128,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
+ unsigned long flags;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+ unsigned long flags;
/* Switch to requested VMID */
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_flush_vm_context(void)
--
2.20.1
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
prev parent reply other threads:[~2019-05-07 5:40 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20190507053826.31622-1-sashal@kernel.org>
2019-05-07 5:37 ` [PATCH AUTOSEL 4.14 42/95] KVM: arm/arm64: Ensure only THP is candidate for adjustment Sasha Levin
2019-05-07 5:37 ` Sasha Levin [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190507053826.31622-63-sashal@kernel.org \
--to=sashal@kernel.org \
--cc=alexander.levin@microsoft.com \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-kernel@vger.kernel.org \
--cc=marc.zyngier@arm.com \
--cc=stable@vger.kernel.org \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox