From: Binbin Wu <binbin.wu@linux.intel.com>
To: kvm@vger.kernel.org, seanjc@google.com, pbonzini@redhat.com
Cc: chao.gao@intel.com, robert.hu@linux.intel.com
Subject: Re: [PATCH v6 6/7] KVM: x86: Untag address when LAM applicable
Date: Mon, 20 Mar 2023 20:04:25 +0800 [thread overview]
Message-ID: <b5a90bf8-5f13-6250-1ecd-c42df9789aba@linux.intel.com> (raw)
In-Reply-To: <20230319084927.29607-7-binbin.wu@linux.intel.com>
On 3/19/2023 4:49 PM, Binbin Wu wrote:
> Untag address for 64-bit memory/mmio operand in instruction emulations
> and vmexit handlers when LAM is applicable.
>
> For instruction emulation, untag address in __linearize() before
> canonical check. LAM doesn't apply to instruction fetch and invlpg,
> use KVM_X86_UNTAG_ADDR_SKIP_LAM to skip LAM untag.
>
> For vmexit handlings related to 64-bit linear address:
> - Cases need to untag address
> Operand(s) of VMX instructions and INVPCID
> Operand(s) of SGX ENCLS
> Linear address in INVVPID descriptor.
> - Cases LAM doesn't apply to (no change needed)
> Operand of INVLPG
> Linear address in INVPCID descriptor
>
> Co-developed-by: Robert Hoo <robert.hu@linux.intel.com>
> Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
> ---
> arch/x86/kvm/emulate.c | 25 +++++++++++++++++--------
> arch/x86/kvm/vmx/nested.c | 2 ++
> arch/x86/kvm/vmx/sgx.c | 1 +
> arch/x86/kvm/x86.c | 4 ++++
> 4 files changed, 24 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index a630c5db971c..c46f0162498e 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -688,7 +688,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
> struct segmented_address addr,
> unsigned *max_size, unsigned size,
> bool write, bool fetch,
> - enum x86emul_mode mode, ulong *linear)
> + enum x86emul_mode mode, ulong *linear,
> + u64 untag_flags)
> {
> struct desc_struct desc;
> bool usable;
> @@ -701,9 +702,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
> *max_size = 0;
> switch (mode) {
> case X86EMUL_MODE_PROT64:
> - *linear = la;
> + *linear = static_call(kvm_x86_untag_addr)(ctxt->vcpu, la, untag_flags);
Per Sean's comment "Derefencing ctxt->vcpu in the emulator is not
allowed" in V5, I will update this as following:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c46f0162498e..5fbce7bb3bc8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -702,7 +702,7 @@ static __always_inline int __linearize(struct
x86_emulate_ctxt *ctxt,
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
- *linear = static_call(kvm_x86_untag_addr)(ctxt->vcpu,
la, untag_flags);
+ *linear = ctxt->ops->untag_addr(ctxt, la, untag_flags);
va_bits = ctxt_virt_addr_bits(ctxt);
if (!__is_canonical_address(*linear, va_bits))
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 2d9662be8333..14b32c7c2abb 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -224,6 +224,8 @@ struct x86_emulate_ops {
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+
+ u64 (*untag_addr)(struct x86_emulate_ctxt *ctxt, u64 addr, u64
flags);
};
/* Type, address-of, and value of an instruction's operand. */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d85f87a19f58..a3560ea7560d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8190,6 +8190,11 @@ static void emulator_vm_bugged(struct
x86_emulate_ctxt *ctxt)
kvm_vm_bugged(kvm);
}
+static u64 emulator_untag_addr(struct x86_emulate_ctxt *ctxt, u64 addr,
u64 flags)
+{
+ return static_call(kvm_x86_untag_addr)(emul_to_vcpu(ctxt), addr,
flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.vm_bugged = emulator_vm_bugged,
.read_gpr = emulator_read_gpr,
@@ -8234,6 +8239,7 @@ static const struct x86_emulate_ops emulate_ops = {
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
+ .untag_addr = emulator_untag_addr,
};
> +
> va_bits = ctxt_virt_addr_bits(ctxt);
> - if (!__is_canonical_address(la, va_bits))
> + if (!__is_canonical_address(*linear, va_bits))
> goto bad;
>
> *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
> @@ -757,8 +759,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
> ulong *linear)
> {
> unsigned max_size;
> - return __linearize(ctxt, addr, &max_size, size, write, false,
> - ctxt->mode, linear);
> + return __linearize(ctxt, addr, &max_size, size, false, false,
> + ctxt->mode, linear, 0);
> }
>
> static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
> @@ -771,7 +773,9 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
>
> if (ctxt->op_bytes != sizeof(unsigned long))
> addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
> - rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
> + /* skip LAM untag for instruction */
> + rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
> + &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
> if (rc == X86EMUL_CONTINUE)
> ctxt->_eip = addr.ea;
> return rc;
> @@ -906,9 +910,11 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
> * __linearize is called with size 0 so that it does not do any
> * boundary check itself. Instead, we use max_size to check
> * against op_size.
> + *
> + * skip LAM untag for instruction
> */
> rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
> - &linear);
> + &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
> if (unlikely(rc != X86EMUL_CONTINUE))
> return rc;
>
> @@ -3433,8 +3439,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
> {
> int rc;
> ulong linear;
> + unsigned max_size;
>
> - rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
> + /* skip untag for invlpg since LAM is not applied to invlpg */
> + rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, false, false,
> + ctxt->mode, &linear, KVM_X86_UNTAG_ADDR_SKIP_LAM);
> if (rc == X86EMUL_CONTINUE)
> ctxt->ops->invlpg(ctxt, linear);
> /* Disable writeback. */
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 2eb258992d63..dd1d28a0d147 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -4970,6 +4970,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
> else
> *ret = off;
>
> + *ret = vmx_untag_addr(vcpu, *ret, 0);
> /* Long mode: #GP(0)/#SS(0) if the memory address is in a
> * non-canonical form. This is the only check on the memory
> * destination for long mode!
> @@ -5787,6 +5788,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
> vpid02 = nested_get_vpid02(vcpu);
> switch (type) {
> case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
> + operand.gla = vmx_untag_addr(vcpu, operand.gla, 0);
> if (!operand.vpid ||
> is_noncanonical_address(operand.gla, vcpu))
> return nested_vmx_fail(vcpu,
> diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
> index 0574030b071f..527f1a902c65 100644
> --- a/arch/x86/kvm/vmx/sgx.c
> +++ b/arch/x86/kvm/vmx/sgx.c
> @@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
> if (!IS_ALIGNED(*gva, alignment)) {
> fault = true;
> } else if (likely(is_64_bit_mode(vcpu))) {
> + *gva = vmx_untag_addr(vcpu, *gva, 0);
> fault = is_noncanonical_address(*gva, vcpu);
> } else {
> *gva &= 0xffffffff;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index e74af72f53ec..d85f87a19f58 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -13233,6 +13233,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
>
> switch (type) {
> case INVPCID_TYPE_INDIV_ADDR:
> + /*
> + * LAM doesn't apply to the linear address in the descriptor,
> + * still need to be canonical
> + */
> if ((!pcid_enabled && (operand.pcid != 0)) ||
> is_noncanonical_address(operand.gla, vcpu)) {
> kvm_inject_gp(vcpu, 0);
next prev parent reply other threads:[~2023-03-20 12:04 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-19 8:49 [PATCH v6 0/7] Linear Address Masking (LAM) KVM Enabling Binbin Wu
2023-03-19 8:49 ` [PATCH v6 1/7] KVM: x86: Explicitly cast ulong to bool in kvm_set_cr3() Binbin Wu
2023-03-20 1:30 ` Binbin Wu
2023-03-19 8:49 ` [PATCH v6 2/7] KVM: VMX: Use is_64_bit_mode() to check 64-bit mode Binbin Wu
2023-03-20 12:36 ` Chao Gao
2023-03-20 12:51 ` Binbin Wu
2023-03-21 21:35 ` Sean Christopherson
2023-03-22 1:09 ` Binbin Wu
2023-03-28 23:33 ` Huang, Kai
2023-03-29 1:27 ` Binbin Wu
2023-03-29 2:04 ` Huang, Kai
2023-03-29 2:08 ` Binbin Wu
2023-03-29 17:34 ` Sean Christopherson
2023-03-29 22:46 ` Huang, Kai
2023-04-03 3:37 ` Binbin Wu
2023-04-03 11:24 ` Huang, Kai
2023-04-03 15:02 ` Sean Christopherson
2023-04-03 23:13 ` Huang, Kai
2023-04-04 1:21 ` Binbin Wu
2023-04-04 1:53 ` Huang, Kai
2023-04-04 2:45 ` Binbin Wu
2023-04-04 3:09 ` Huang, Kai
2023-04-04 3:15 ` Binbin Wu
2023-04-04 3:27 ` Binbin Wu
2023-04-04 1:31 ` Binbin Wu
2023-04-04 6:14 ` Binbin Wu
2023-03-20 22:36 ` Huang, Kai
2023-03-19 8:49 ` [PATCH v6 3/7] KVM: x86: Virtualize CR4.LAM_SUP Binbin Wu
2023-03-19 8:49 ` [PATCH v6 4/7] KVM: x86: Virtualize CR3.LAM_{U48,U57} Binbin Wu
2023-03-30 8:33 ` Yang, Weijiang
2023-03-30 8:40 ` Binbin Wu
2023-03-19 8:49 ` [PATCH v6 5/7] KVM: x86: Introduce untag_addr() in kvm_x86_ops Binbin Wu
2023-03-20 12:07 ` Chao Gao
2023-03-20 12:23 ` Binbin Wu
2023-03-29 1:54 ` Binbin Wu
2023-03-19 8:49 ` [PATCH v6 6/7] KVM: x86: Untag address when LAM applicable Binbin Wu
2023-03-20 11:51 ` Chao Gao
2023-03-20 11:56 ` Binbin Wu
2023-03-20 12:04 ` Binbin Wu [this message]
2023-03-29 5:02 ` Binbin Wu
2023-03-19 8:49 ` [PATCH v6 7/7] KVM: x86: Expose LAM feature to userspace VMM Binbin Wu
2023-03-20 8:57 ` Chao Gao
2023-03-20 12:00 ` Binbin Wu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b5a90bf8-5f13-6250-1ecd-c42df9789aba@linux.intel.com \
--to=binbin.wu@linux.intel.com \
--cc=chao.gao@intel.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=robert.hu@linux.intel.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).