linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Marc Zyngier <maz@kernel.org>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org,
	Will Deacon <will@kernel.org>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jiang Yi <giangyi@amazon.com>, James Morse <james.morse@arm.com>,
	Andrew Scull <ascull@google.com>,
	Zenghui Yu <yuzenghui@huawei.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	David Brazdil <dbrazdil@google.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Ard Biesheuvel <ardb@kernel.org>, Fuad Tabba <tabba@google.com>,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 23/24] KVM: arm64: Parametrize exception entry with a target EL
Date: Fri, 29 May 2020 17:01:20 +0100	[thread overview]
Message-ID: <20200529160121.899083-24-maz@kernel.org> (raw)
In-Reply-To: <20200529160121.899083-1-maz@kernel.org>

We currently assume that an exception is delivered to EL1, always.
Once we emulate EL2, this no longer will be the case. To prepare
for this, add a target_mode parameter.

While we're at it, merge the computing of the target PC and PSTATE in
a single function that updates both PC and CPSR after saving their
previous values in the corresponding ELR/SPSR. This ensures that they
are updated in the correct order (a pretty common source of bugs...).

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/ptrace.h |  1 +
 arch/arm64/kvm/inject_fault.c   | 75 +++++++++++++++++----------------
 2 files changed, 39 insertions(+), 37 deletions(-)

diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index bf57308fcd63..953b6a1ce549 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -35,6 +35,7 @@
 #define GIC_PRIO_PSR_I_SET		(1 << 4)
 
 /* Additional SPSR bits not exposed in the UABI */
+#define PSR_MODE_THREAD_BIT	(1 << 0)
 #define PSR_IL_BIT		(1 << 20)
 
 /* AArch32-specific ptrace requests */
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 6aafc2825c1c..e21fdd93027a 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -26,28 +26,12 @@ enum exception_type {
 	except_type_serror	= 0x180,
 };
 
-static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
-{
-	u64 exc_offset;
-
-	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
-	case PSR_MODE_EL1t:
-		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
-		break;
-	case PSR_MODE_EL1h:
-		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
-		break;
-	case PSR_MODE_EL0t:
-		exc_offset = LOWER_EL_AArch64_VECTOR;
-		break;
-	default:
-		exc_offset = LOWER_EL_AArch32_VECTOR;
-	}
-
-	return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
-}
-
 /*
+ * This performs the exception entry at a given EL (@target_mode), stashing PC
+ * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
+ * The EL passed to this function *must* be a non-secure, privileged mode with
+ * bit 0 being set (PSTATE.SP == 1).
+ *
  * When an exception is taken, most PSTATE fields are left unchanged in the
  * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
  * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
@@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
  * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
  * MSB to LSB.
  */
-static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
+static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
+			      enum exception_type type)
 {
-	unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
-	unsigned long old, new;
+	unsigned long sctlr, vbar, old, new, mode;
+	u64 exc_offset;
+
+	mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
+
+	if      (mode == target_mode)
+		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+	else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
+		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+	else if (!(mode & PSR_MODE32_BIT))
+		exc_offset = LOWER_EL_AArch64_VECTOR;
+	else
+		exc_offset = LOWER_EL_AArch32_VECTOR;
+
+	switch (target_mode) {
+	case PSR_MODE_EL1h:
+		vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
+		sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+		vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
+		break;
+	default:
+		/* Don't do that */
+		BUG();
+	}
+
+	*vcpu_pc(vcpu) = vbar + exc_offset + type;
 
 	old = *vcpu_cpsr(vcpu);
 	new = 0;
@@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
 	new |= PSR_I_BIT;
 	new |= PSR_F_BIT;
 
-	new |= PSR_MODE_EL1h;
+	new |= target_mode;
 
-	return new;
+	*vcpu_cpsr(vcpu) = new;
+	vcpu_write_spsr(vcpu, old);
 }
 
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
@@ -116,11 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
 	u32 esr = 0;
 
-	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
-	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
-
-	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
-	vcpu_write_spsr(vcpu, cpsr);
+	enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
 
 	vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 
@@ -148,14 +154,9 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
 
 static void inject_undef64(struct kvm_vcpu *vcpu)
 {
-	unsigned long cpsr = *vcpu_cpsr(vcpu);
 	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
 
-	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
-	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
-
-	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
-	vcpu_write_spsr(vcpu, cpsr);
+	enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
 
 	/*
 	 * Build an unknown exception, depending on the instruction
-- 
2.26.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-05-29 16:08 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-29 16:00 [GIT PULL] KVM/arm64 updates for Linux 5.8 Marc Zyngier
2020-05-29 16:00 ` [PATCH 01/24] KVM: arm64: Move virt/kvm/arm to arch/arm64 Marc Zyngier
2020-05-29 16:00 ` [PATCH 02/24] KVM: arm64: Kill off CONFIG_KVM_ARM_HOST Marc Zyngier
2020-05-29 16:01 ` [PATCH 03/24] KVM: arm64: Update help text Marc Zyngier
2020-05-29 16:01 ` [PATCH 04/24] KVM: arm64: Change CONFIG_KVM to a menuconfig entry Marc Zyngier
2020-05-29 16:01 ` [PATCH 05/24] KVM: arm64: Clean up kvm makefiles Marc Zyngier
2020-05-29 16:01 ` [PATCH 06/24] KVM: arm64: Simplify __kvm_timer_set_cntvoff implementation Marc Zyngier
2020-05-29 16:01 ` [PATCH 07/24] KVM: arm64: Use cpus_have_final_cap for has_vhe() Marc Zyngier
2020-05-29 16:01 ` [PATCH 08/24] KVM: Fix spelling in code comments Marc Zyngier
2020-05-29 16:01 ` [PATCH 09/24] KVM: arm64: Sidestep stage2_unmap_vm() on vcpu reset when S2FWB is supported Marc Zyngier
2020-05-29 16:01 ` [PATCH 10/24] KVM: arm/arm64: Release kvm->mmu_lock in loop to prevent starvation Marc Zyngier
2020-05-29 16:01 ` [PATCH 11/24] KVM: arm64: Clean up the checking for huge mapping Marc Zyngier
2020-05-29 16:01 ` [PATCH 12/24] KVM: arm64: Unify handling THP backed host memory Marc Zyngier
2020-05-29 16:01 ` [PATCH 13/24] KVM: arm64: Support enabling dirty log gradually in small chunks Marc Zyngier
2020-05-29 16:01 ` [PATCH 14/24] KVM: arm64: Make KVM_CAP_MAX_VCPUS compatible with the selected GIC version Marc Zyngier
2020-05-29 16:01 ` [PATCH 15/24] KVM: arm64: Clean up cpu_init_hyp_mode() Marc Zyngier
2020-05-29 16:01 ` [PATCH 16/24] KVM: arm64: Fix incorrect comment on kvm_get_hyp_vector() Marc Zyngier
2020-05-29 16:01 ` [PATCH 17/24] KVM: arm64: Remove obsolete kvm_virt_to_phys abstraction Marc Zyngier
2020-05-29 16:01 ` [PATCH 18/24] KVM: arm64: vgic-v3: Take cpu_if pointer directly instead of vcpu Marc Zyngier
2020-05-29 16:01 ` [PATCH 19/24] KVM: arm64: Refactor vcpu_{read,write}_sys_reg Marc Zyngier
2020-05-29 16:01 ` [PATCH 20/24] KVM: arm64: Add missing reset handlers for PMU emulation Marc Zyngier
2020-05-29 16:01 ` [PATCH 21/24] KVM: arm64: Move sysreg reset check to boot time Marc Zyngier
2020-05-29 16:01 ` [PATCH 22/24] KVM: arm64: Don't use empty structures as CPU reset state Marc Zyngier
2020-05-29 16:01 ` Marc Zyngier [this message]
2020-05-29 16:01 ` [PATCH 24/24] KVM: arm64: Drop obsolete comment about sys_reg ordering Marc Zyngier
2020-06-01  8:27 ` [GIT PULL] KVM/arm64 updates for Linux 5.8 Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200529160121.899083-24-maz@kernel.org \
    --to=maz@kernel.org \
    --cc=alexandru.elisei@arm.com \
    --cc=ardb@kernel.org \
    --cc=ascull@google.com \
    --cc=christoffer.dall@arm.com \
    --cc=dbrazdil@google.com \
    --cc=giangyi@amazon.com \
    --cc=james.morse@arm.com \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=pbonzini@redhat.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    --cc=zhukeqian1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).