From: Binbin Wu <binbin.wu@linux.intel.com>
To: kvm@vger.kernel.org, seanjc@google.com, pbonzini@redhat.com
Cc: binbin.wu@linux.intel.com, kai.huang@intel.com,
chao.gao@intel.com, xuelian.guo@intel.com,
robert.hu@linux.intel.com
Subject: [PATCH v7 4/5] KVM: x86: Untag address when LAM applicable
Date: Tue, 4 Apr 2023 21:09:22 +0800 [thread overview]
Message-ID: <20230404130923.27749-5-binbin.wu@linux.intel.com> (raw)
In-Reply-To: <20230404130923.27749-1-binbin.wu@linux.intel.com>
Untag address for 64-bit memory/mmio operand in instruction emulations
and vmexit handlers when LAM is applicable.
For instruction emulation, untag address in __linearize() before
canonical check. LAM doesn't apply to instruction fetch and invlpg,
use KVM_X86_UNTAG_ADDR_SKIP_LAM to skip LAM untag.
For vmexit handlings related to 64-bit linear address:
- Cases need to untag address
Operand(s) of VMX instructions and INVPCID
Operand(s) of SGX ENCLS
Linear address in INVVPID descriptor.
- Cases LAM doesn't apply to (no change needed)
Operand of INVLPG
Linear address in INVPCID descriptor
Co-developed-by: Robert Hoo <robert.hu@linux.intel.com>
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Tested-by: Xuelian Guo <xuelian.guo@intel.com>
---
arch/x86/kvm/emulate.c | 23 ++++++++++++++++++-----
arch/x86/kvm/kvm_emulate.h | 2 ++
arch/x86/kvm/vmx/nested.c | 4 ++++
arch/x86/kvm/vmx/sgx.c | 1 +
arch/x86/kvm/x86.c | 10 ++++++++++
5 files changed, 35 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a20bec931764..b7df465eccf2 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -688,7 +688,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
- enum x86emul_mode mode, ulong *linear)
+ enum x86emul_mode mode, ulong *linear,
+ u64 untag_flags)
{
struct desc_struct desc;
bool usable;
@@ -701,6 +702,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
+ la = ctxt->ops->untag_addr(ctxt, la, untag_flags);
*linear = la;
va_bits = ctxt_virt_addr_bits(ctxt);
if (!__is_canonical_address(la, va_bits))
@@ -758,7 +760,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
- ctxt->mode, linear);
+ ctxt->mode, linear, 0);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
@@ -771,7 +773,12 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
- rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+ /*
+ * LAM does not apply to addresses used for instruction fetches
+ * or to those that specify the targets of jump and call instructions
+ */
+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
+ &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
@@ -906,9 +913,12 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
+ *
+ * LAM does not apply to addresses used for instruction fetches
+ * or to those that specify the targets of jump and call instructions
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
- &linear);
+ &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
@@ -3433,8 +3443,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
+ unsigned max_size;
- rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+ /* skip untag for invlpg since LAM is not applied to invlpg */
+ rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, false, false,
+ ctxt->mode, &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index ab65f3a47dfd..8d9f782adccb 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -225,6 +225,8 @@ struct x86_emulate_ops {
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+
+ u64 (*untag_addr)(struct x86_emulate_ctxt *ctxt, u64 addr, u64 flags);
};
/* Type, address-of, and value of an instruction's operand. */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d35bda9610e2..48cca88bfd37 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4970,6 +4970,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else
*ret = off;
+ *ret = vmx_untag_addr(vcpu, *ret, 0);
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
@@ -5787,6 +5788,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+ /* invvpid is not valid in compatibility mode */
+ if (is_long_mode(vcpu))
+ operand.gla = vmx_untag_addr(vcpu, operand.gla, 0);
if (!operand.vpid ||
is_noncanonical_address(operand.gla, vcpu))
return nested_vmx_fail(vcpu,
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index 0574030b071f..527f1a902c65 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
if (!IS_ALIGNED(*gva, alignment)) {
fault = true;
} else if (likely(is_64_bit_mode(vcpu))) {
+ *gva = vmx_untag_addr(vcpu, *gva, 0);
fault = is_noncanonical_address(*gva, vcpu);
} else {
*gva &= 0xffffffff;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index aca255e69d0d..18ad38649714 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8218,6 +8218,11 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
kvm_vm_bugged(kvm);
}
+static u64 emulator_untag_addr(struct x86_emulate_ctxt *ctxt, u64 addr, u64 flags)
+{
+ return static_call(kvm_x86_untag_addr)(emul_to_vcpu(ctxt), addr, flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.vm_bugged = emulator_vm_bugged,
.read_gpr = emulator_read_gpr,
@@ -8263,6 +8268,7 @@ static const struct x86_emulate_ops emulate_ops = {
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
+ .untag_addr = emulator_untag_addr,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -13260,6 +13266,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
switch (type) {
case INVPCID_TYPE_INDIV_ADDR:
+ /*
+ * LAM doesn't apply to the linear address in the descriptor,
+ * still need to be canonical
+ */
if ((!pcid_enabled && (operand.pcid != 0)) ||
is_noncanonical_address(operand.gla, vcpu)) {
kvm_inject_gp(vcpu, 0);
--
2.25.1
next prev parent reply other threads:[~2023-04-04 13:09 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-04 13:09 [PATCH v7 0/5] Linear Address Masking (LAM) KVM Enabling Binbin Wu
2023-04-04 13:09 ` [PATCH v7 1/5] KVM: x86: Virtualize CR4.LAM_SUP Binbin Wu
2023-04-04 13:09 ` [PATCH v7 2/5] KVM: x86: Virtualize CR3.LAM_{U48,U57} Binbin Wu
2023-04-06 12:57 ` Huang, Kai
2023-04-09 11:36 ` Binbin Wu
2023-04-11 23:11 ` Huang, Kai
2023-04-12 11:58 ` Huang, Kai
2023-04-13 1:36 ` Binbin Wu
2023-04-13 2:27 ` Huang, Kai
2023-04-13 4:45 ` Binbin Wu
2023-04-13 9:13 ` Huang, Kai
2023-04-21 6:35 ` Binbin Wu
2023-04-21 11:43 ` Huang, Kai
2023-04-21 15:32 ` Chao Gao
2023-04-22 4:51 ` Chao Gao
2023-04-22 8:14 ` Huang, Kai
2023-04-22 3:32 ` Binbin Wu
2023-04-22 4:43 ` Chao Gao
2023-04-27 13:19 ` Huang, Kai
2023-04-29 4:56 ` Binbin Wu
2023-04-25 22:48 ` Huang, Kai
2023-04-26 3:05 ` Chao Gao
2023-04-26 5:13 ` Binbin Wu
2023-04-26 8:44 ` Huang, Kai
2023-04-26 8:50 ` Binbin Wu
2023-04-26 8:43 ` Huang, Kai
2023-04-26 10:52 ` Binbin Wu
2023-04-27 13:23 ` Huang, Kai
2023-04-17 7:24 ` Chao Gao
2023-04-17 8:02 ` Binbin Wu
2023-04-04 13:09 ` [PATCH v7 3/5] KVM: x86: Introduce untag_addr() in kvm_x86_ops Binbin Wu
2023-04-18 3:08 ` Zeng Guang
2023-04-18 3:34 ` Binbin Wu
2023-04-19 2:30 ` Chao Gao
2023-04-19 3:08 ` Binbin Wu
2023-04-21 7:48 ` Binbin Wu
2023-04-21 8:21 ` Chao Gao
2023-04-04 13:09 ` Binbin Wu [this message]
2023-04-06 13:20 ` [PATCH v7 4/5] KVM: x86: Untag address when LAM applicable Huang, Kai
2023-04-10 3:35 ` Binbin Wu
2023-04-18 3:28 ` Zeng Guang
2023-04-18 3:38 ` Binbin Wu
2023-04-19 6:43 ` Chao Gao
2023-04-21 7:57 ` Binbin Wu
2023-04-21 8:36 ` Chao Gao
2023-04-21 9:13 ` Binbin Wu
2023-04-04 13:09 ` [PATCH v7 5/5] KVM: x86: Expose LAM feature to userspace VMM Binbin Wu
2023-04-21 9:40 ` [PATCH v7 0/5] Linear Address Masking (LAM) KVM Enabling Binbin Wu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230404130923.27749-5-binbin.wu@linux.intel.com \
--to=binbin.wu@linux.intel.com \
--cc=chao.gao@intel.com \
--cc=kai.huang@intel.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=robert.hu@linux.intel.com \
--cc=seanjc@google.com \
--cc=xuelian.guo@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).