public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Chao Gao <chao.gao@intel.com>
To: Binbin Wu <binbin.wu@linux.intel.com>
Cc: <kvm@vger.kernel.org>, <seanjc@google.com>, <pbonzini@redhat.com>,
	<kai.huang@intel.com>, <xuelian.guo@intel.com>,
	<robert.hu@linux.intel.com>
Subject: Re: [PATCH v7 3/5] KVM: x86: Introduce untag_addr() in kvm_x86_ops
Date: Wed, 19 Apr 2023 10:30:10 +0800	[thread overview]
Message-ID: <ZD9SMgA2h8XUXsBw@chao-env> (raw)
In-Reply-To: <20230404130923.27749-4-binbin.wu@linux.intel.com>

On Tue, Apr 04, 2023 at 09:09:21PM +0800, Binbin Wu wrote:
>Introduce a new interface untag_addr() to kvm_x86_ops to untag the metadata
>from linear address. Implement LAM version in VMX and dummy version in SVM.
>
>When enabled feature like Intel Linear Address Masking or AMD Upper
>Address Ignore, linear address may be tagged with metadata. Linear
>address should be checked for modified canonicality and untagged in
>instrution emulations or vmexit handlings if LAM or UAI is applicable.
>
>Introduce untag_addr() to kvm_x86_ops to hide the code related to vendor
>specific details.
>- For VMX, LAM version is implemented.
>  LAM has a modified canonical check when applicable:
>  * LAM_S48                : [ 1 ][ metadata ][ 1 ]
>                               63               47
>  * LAM_U48                : [ 0 ][ metadata ][ 0 ]
>                               63               47
>  * LAM_S57                : [ 1 ][ metadata ][ 1 ]
>                               63               56
>  * LAM_U57 + 5-lvl paging : [ 0 ][ metadata ][ 0 ]
>                               63               56
>  * LAM_U57 + 4-lvl paging : [ 0 ][ metadata ][ 0...0 ]
>                               63               56..47
>  If LAM is applicable to certain address, untag the metadata bits and
>  replace them with the value of bit 47 (LAM48) or bit 56 (LAM57). Later
>  the untagged address will do legacy canonical check. So that LAM canonical
>  check and mask can be covered by "untag + legacy canonical check".
>

>  For cases LAM is not applicable, 'flags' is passed to the interface
>  to skip untag.

The "flags" can be dropped. Callers can simply skip the call of .untag_addr().

>
>- For SVM, add a dummy version to do nothing, but return the original
>  address.
>
>Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
>Tested-by: Xuelian Guo <xuelian.guo@intel.com>
>---
> arch/x86/include/asm/kvm-x86-ops.h |  1 +
> arch/x86/include/asm/kvm_host.h    |  5 +++
> arch/x86/kvm/svm/svm.c             |  7 ++++
> arch/x86/kvm/vmx/vmx.c             | 60 ++++++++++++++++++++++++++++++
> arch/x86/kvm/vmx/vmx.h             |  2 +
> 5 files changed, 75 insertions(+)
>
>diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
>index 8dc345cc6318..7d63d1b942ac 100644
>--- a/arch/x86/include/asm/kvm-x86-ops.h
>+++ b/arch/x86/include/asm/kvm-x86-ops.h
>@@ -52,6 +52,7 @@ KVM_X86_OP(cache_reg)
> KVM_X86_OP(get_rflags)
> KVM_X86_OP(set_rflags)
> KVM_X86_OP(get_if_flag)
>+KVM_X86_OP(untag_addr)
> KVM_X86_OP(flush_tlb_all)
> KVM_X86_OP(flush_tlb_current)
> KVM_X86_OP_OPTIONAL(tlb_remote_flush)
>diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>index 498d2b5e8dc1..cb674ec826d4 100644
>--- a/arch/x86/include/asm/kvm_host.h
>+++ b/arch/x86/include/asm/kvm_host.h
>@@ -69,6 +69,9 @@
> #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS	(KVM_X86_NOTIFY_VMEXIT_ENABLED | \
> 						 KVM_X86_NOTIFY_VMEXIT_USER)
> 
>+/* flags for kvm_x86_ops::untag_addr() */
>+#define KVM_X86_UNTAG_ADDR_SKIP_LAM	_BITULL(0)
>+
> /* x86-specific vcpu->requests bit members */
> #define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
> #define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
>@@ -1607,6 +1610,8 @@ struct kvm_x86_ops {
> 	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
> 	bool (*get_if_flag)(struct kvm_vcpu *vcpu);
> 
>+	u64 (*untag_addr)(struct kvm_vcpu *vcpu, u64 la, u64 flags);
>+
> 	void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
> 	void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
> 	int  (*tlb_remote_flush)(struct kvm *kvm);
>diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>index 252e7f37e4e2..a6e6bd09642b 100644
>--- a/arch/x86/kvm/svm/svm.c
>+++ b/arch/x86/kvm/svm/svm.c
>@@ -4696,6 +4696,11 @@ static int svm_vm_init(struct kvm *kvm)
> 	return 0;
> }
> 
>+static u64 svm_untag_addr(struct kvm_vcpu *vcpu, u64 addr, u64 flags)
>+{
>+	return addr;
>+}
>+
> static struct kvm_x86_ops svm_x86_ops __initdata = {
> 	.name = KBUILD_MODNAME,
> 
>@@ -4745,6 +4750,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
> 	.set_rflags = svm_set_rflags,
> 	.get_if_flag = svm_get_if_flag,
> 
>+	.untag_addr = svm_untag_addr,
>+
> 	.flush_tlb_all = svm_flush_tlb_current,
> 	.flush_tlb_current = svm_flush_tlb_current,
> 	.flush_tlb_gva = svm_flush_tlb_gva,
>diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>index 4d329ee9474c..73cc495bd0da 100644
>--- a/arch/x86/kvm/vmx/vmx.c
>+++ b/arch/x86/kvm/vmx/vmx.c
>@@ -8137,6 +8137,64 @@ static void vmx_vm_destroy(struct kvm *kvm)
> 	free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
> }
> 
>+
>+#define LAM_S57_EN_MASK (X86_CR4_LAM_SUP | X86_CR4_LA57)
>+
>+static inline int lam_sign_extend_bit(bool user, struct kvm_vcpu *vcpu)

Drop "inline" and let compilers decide whether to inline the function.

And it is better to swap the two parameters to align with the conversion
in kvm.

>+{
>+	u64 cr3, cr4;
>+
>+	if (user) {
>+		cr3 = kvm_read_cr3(vcpu);
>+		if (!!(cr3 & X86_CR3_LAM_U57))

It is weird to use double negation "!!" in if statements. I prefer to drop it.

>+			return 56;
>+		if (!!(cr3 & X86_CR3_LAM_U48))
>+			return 47;
>+	} else {
>+		cr4 = kvm_read_cr4_bits(vcpu, LAM_S57_EN_MASK);
>+		if (cr4 == LAM_S57_EN_MASK)
>+			return 56;
>+		if (!!(cr4 & X86_CR4_LAM_SUP))
>+			return 47;
>+	}
>+	return -1;
>+}
>+
>+/*
>+ * Only called in 64-bit mode.
>+ *
>+ * Metadata bits are [62:48] in LAM48 and [62:57] in LAM57. Mask metadata in
>+ * pointers by sign-extending the value of bit 47 (LAM48) or 56 (LAM57).
>+ * The resulting address after untagging isn't guaranteed to be canonical.
>+ * Callers should perform the original canonical check and raise #GP/#SS if the
>+ * address is non-canonical.
>+ */
>+u64 vmx_untag_addr(struct kvm_vcpu *vcpu, u64 addr, u64 flags)
>+{
>+	int sign_ext_bit;
>+
>+	/*
>+	 * Instead of calling relatively expensive guest_cpuid_has(), just check
>+	 * LAM_U48 in cr3_ctrl_bits. If not set, vCPU doesn't supports LAM.
>+	 */
>+	if (!(vcpu->arch.cr3_ctrl_bits & X86_CR3_LAM_U48) ||
>+	    !!(flags & KVM_X86_UNTAG_ADDR_SKIP_LAM))
>+		return addr;
>+
>+	if(!is_64_bit_mode(vcpu)){
>+		WARN_ONCE(1, "Only be called in 64-bit mode");

use WARN_ON_ONCE() in case it can be triggered by guests, i.e.,

if (WARN_ON_ONCE(!is_64_bit_mode(vcpu))
	return addr;

>+		return addr;
>+	}
>+
>+	sign_ext_bit = lam_sign_extend_bit(!(addr >> 63), vcpu);
>+
>+	if (sign_ext_bit < 0)
>+		return addr;
>+
>+	return (sign_extend64(addr, sign_ext_bit) & ~BIT_ULL(63)) |
>+	       (addr & BIT_ULL(63));
>+}
>+
> static struct kvm_x86_ops vmx_x86_ops __initdata = {
> 	.name = KBUILD_MODNAME,
> 
>@@ -8185,6 +8243,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
> 	.set_rflags = vmx_set_rflags,
> 	.get_if_flag = vmx_get_if_flag,
> 
>+	.untag_addr = vmx_untag_addr,
>+
> 	.flush_tlb_all = vmx_flush_tlb_all,
> 	.flush_tlb_current = vmx_flush_tlb_current,
> 	.flush_tlb_gva = vmx_flush_tlb_gva,
>diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
>index 2acdc54bc34b..79855b9a4aca 100644
>--- a/arch/x86/kvm/vmx/vmx.h
>+++ b/arch/x86/kvm/vmx/vmx.h
>@@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
> u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
> u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
> 
>+u64 vmx_untag_addr(struct kvm_vcpu *vcpu, u64 addr, u64 flags);
>+
> static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
> 					     int type, bool value)
> {
>-- 
>2.25.1
>

  parent reply	other threads:[~2023-04-19  2:32 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-04 13:09 [PATCH v7 0/5] Linear Address Masking (LAM) KVM Enabling Binbin Wu
2023-04-04 13:09 ` [PATCH v7 1/5] KVM: x86: Virtualize CR4.LAM_SUP Binbin Wu
2023-04-04 13:09 ` [PATCH v7 2/5] KVM: x86: Virtualize CR3.LAM_{U48,U57} Binbin Wu
2023-04-06 12:57   ` Huang, Kai
2023-04-09 11:36     ` Binbin Wu
2023-04-11 23:11       ` Huang, Kai
2023-04-12 11:58   ` Huang, Kai
2023-04-13  1:36     ` Binbin Wu
2023-04-13  2:27       ` Huang, Kai
2023-04-13  4:45         ` Binbin Wu
2023-04-13  9:13           ` Huang, Kai
2023-04-21  6:35             ` Binbin Wu
2023-04-21 11:43               ` Huang, Kai
2023-04-21 15:32                 ` Chao Gao
2023-04-22  4:51                   ` Chao Gao
2023-04-22  8:14                     ` Huang, Kai
2023-04-22  3:32                 ` Binbin Wu
2023-04-22  4:43                   ` Chao Gao
2023-04-27 13:19                     ` Huang, Kai
2023-04-29  4:56                       ` Binbin Wu
2023-04-25 22:48                   ` Huang, Kai
2023-04-26  3:05                     ` Chao Gao
2023-04-26  5:13                       ` Binbin Wu
2023-04-26  8:44                         ` Huang, Kai
2023-04-26  8:50                           ` Binbin Wu
2023-04-26  8:43                       ` Huang, Kai
2023-04-26 10:52                         ` Binbin Wu
2023-04-27 13:23                           ` Huang, Kai
2023-04-17  7:24   ` Chao Gao
2023-04-17  8:02     ` Binbin Wu
2023-04-04 13:09 ` [PATCH v7 3/5] KVM: x86: Introduce untag_addr() in kvm_x86_ops Binbin Wu
2023-04-18  3:08   ` Zeng Guang
2023-04-18  3:34     ` Binbin Wu
2023-04-19  2:30   ` Chao Gao [this message]
2023-04-19  3:08     ` Binbin Wu
2023-04-21  7:48       ` Binbin Wu
2023-04-21  8:21         ` Chao Gao
2023-04-04 13:09 ` [PATCH v7 4/5] KVM: x86: Untag address when LAM applicable Binbin Wu
2023-04-06 13:20   ` Huang, Kai
2023-04-10  3:35     ` Binbin Wu
2023-04-18  3:28   ` Zeng Guang
2023-04-18  3:38     ` Binbin Wu
2023-04-19  6:43   ` Chao Gao
2023-04-21  7:57     ` Binbin Wu
2023-04-21  8:36       ` Chao Gao
2023-04-21  9:13         ` Binbin Wu
2023-04-04 13:09 ` [PATCH v7 5/5] KVM: x86: Expose LAM feature to userspace VMM Binbin Wu
2023-04-21  9:40 ` [PATCH v7 0/5] Linear Address Masking (LAM) KVM Enabling Binbin Wu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZD9SMgA2h8XUXsBw@chao-env \
    --to=chao.gao@intel.com \
    --cc=binbin.wu@linux.intel.com \
    --cc=kai.huang@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=robert.hu@linux.intel.com \
    --cc=seanjc@google.com \
    --cc=xuelian.guo@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox