linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Dexuan Cui <decui@microsoft.com>
To: ak@linux.intel.com, arnd@arndb.de, bp@alien8.de,
	brijesh.singh@amd.com, dan.j.williams@intel.com,
	dave.hansen@intel.com, dave.hansen@linux.intel.com,
	haiyangz@microsoft.com, hpa@zytor.com, jane.chu@oracle.com,
	kirill.shutemov@linux.intel.com, kys@microsoft.com,
	linux-hyperv@vger.kernel.org, luto@kernel.org, mingo@redhat.com,
	peterz@infradead.org, rostedt@goodmis.org,
	sathyanarayanan.kuppuswamy@linux.intel.com, seanjc@google.com,
	tglx@linutronix.de, tony.luck@intel.com, wei.liu@kernel.org,
	Jason@zx2c4.com, nik.borisov@suse.com, mikelley@microsoft.com
Cc: x86@kernel.org, linux-kernel@vger.kernel.org,
	linux-arch@vger.kernel.org, Tianyu.Lan@microsoft.com,
	rick.p.edgecombe@intel.com, andavis@redhat.com,
	mheslin@redhat.com, vkuznets@redhat.com, xiaoyao.li@intel.com,
	Dexuan Cui <decui@microsoft.com>
Subject: [PATCH v3 10/10] x86/hyperv: Move the code in ivm.c around to avoid unnecessary ifdef's
Date: Thu, 24 Aug 2023 01:07:12 -0700	[thread overview]
Message-ID: <20230824080712.30327-11-decui@microsoft.com> (raw)
In-Reply-To: <20230824080712.30327-1-decui@microsoft.com>

Group the code this way so that we can avoid too many ifdef's:

  Data only used in an SNP VM with the paravisor;
  Functions only used in an SNP VM with the paravisor;

  Data only used in an SNP VM without the paravisor;
  Functions only used in an SNP VM without the paravisor;

  Functions only used in a TDX VM, with and without the paravisor;

  Functions used in an SNP or TDX VM, when the paravisor is present;

  Functions always used, even in a regular non-CoCo VM.

No functional change.

Signed-off-by: Dexuan Cui <decui@microsoft.com>
---

   This patch appears the first time in v3.

 arch/x86/hyperv/ivm.c | 309 ++++++++++++++++++++----------------------
 1 file changed, 150 insertions(+), 159 deletions(-)

diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 3d48f823582c..8fb3b28670e9 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -30,9 +30,6 @@
 
 #define GHCB_USAGE_HYPERV_CALL	1
 
-static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
-static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
-
 union hv_ghcb {
 	struct ghcb ghcb;
 	struct {
@@ -66,10 +63,10 @@ union hv_ghcb {
 	} hypercall;
 } __packed __aligned(HV_HYP_PAGE_SIZE);
 
-static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
-
+/* Only used in an SNP VM with the paravisor */
 static u16 hv_ghcb_version __ro_after_init;
 
+/* Functions only used in an SNP VM with the paravisor go here. */
 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
 {
 	union hv_ghcb *hv_ghcb;
@@ -247,6 +244,140 @@ static void hv_ghcb_msr_read(u64 msr, u64 *value)
 	local_irq_restore(flags);
 }
 
+/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
+static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
+static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
+static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
+
+/* Functions only used in an SNP VM without the paravisor go here. */
+
+#define hv_populate_vmcb_seg(seg, gdtr_base)			\
+do {								\
+	if (seg.selector) {					\
+		seg.base = 0;					\
+		seg.limit = HV_AP_SEGMENT_LIMIT;		\
+		seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5);	\
+		seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
+	}							\
+} while (0)							\
+
+static int snp_set_vmsa(void *va, bool vmsa)
+{
+	u64 attrs;
+
+	/*
+	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
+	 * using the RMPADJUST instruction. However, for the instruction to
+	 * succeed it must target the permissions of a lesser privileged
+	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
+	 * instruction in the AMD64 APM Volume 3).
+	 */
+	attrs = 1;
+	if (vmsa)
+		attrs |= RMPADJUST_VMSA_PAGE_BIT;
+
+	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
+}
+
+static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
+{
+	int err;
+
+	err = snp_set_vmsa(vmsa, false);
+	if (err)
+		pr_err("clear VMSA page failed (%u), leaking page\n", err);
+	else
+		free_page((unsigned long)vmsa);
+}
+
+int hv_snp_boot_ap(int cpu, unsigned long start_ip)
+{
+	struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
+		__get_free_page(GFP_KERNEL | __GFP_ZERO);
+	struct sev_es_save_area *cur_vmsa;
+	struct desc_ptr gdtr;
+	u64 ret, retry = 5;
+	struct hv_enable_vp_vtl *start_vp_input;
+	unsigned long flags;
+
+	if (!vmsa)
+		return -ENOMEM;
+
+	native_store_gdt(&gdtr);
+
+	vmsa->gdtr.base = gdtr.address;
+	vmsa->gdtr.limit = gdtr.size;
+
+	asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
+	hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
+
+	asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
+	hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
+
+	asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
+	hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
+
+	asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
+	hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
+
+	vmsa->efer = native_read_msr(MSR_EFER);
+
+	asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
+	asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
+	asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
+
+	vmsa->xcr0 = 1;
+	vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
+	vmsa->rip = (u64)secondary_startup_64_no_verify;
+	vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
+
+	/*
+	 * Set the SNP-specific fields for this VMSA:
+	 *   VMPL level
+	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
+	 */
+	vmsa->vmpl = 0;
+	vmsa->sev_features = sev_status >> 2;
+
+	ret = snp_set_vmsa(vmsa, true);
+	if (!ret) {
+		pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
+		free_page((u64)vmsa);
+		return ret;
+	}
+
+	local_irq_save(flags);
+	start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
+	memset(start_vp_input, 0, sizeof(*start_vp_input));
+	start_vp_input->partition_id = -1;
+	start_vp_input->vp_index = cpu;
+	start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
+	*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
+
+	do {
+		ret = hv_do_hypercall(HVCALL_START_VP,
+				      start_vp_input, NULL);
+	} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
+
+	local_irq_restore(flags);
+
+	if (!hv_result_success(ret)) {
+		pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
+		snp_cleanup_vmsa(vmsa);
+		vmsa = NULL;
+	}
+
+	cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
+	/* Free up any previous VMSA page */
+	if (cur_vmsa)
+		snp_cleanup_vmsa(cur_vmsa);
+
+	/* Record the current VMSA page */
+	per_cpu(hv_sev_vmsa, cpu) = vmsa;
+
+	return ret;
+}
+
 #else
 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
@@ -282,6 +413,20 @@ static void hv_tdx_msr_read(u64 msr, u64 *val)
 	else
 		*val = args.r11;
 }
+
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
+{
+	struct tdx_hypercall_args args = { };
+
+	args.r10 = control;
+	args.rdx = param1;
+	args.r8  = param2;
+
+	(void)__tdx_hypercall_ret(&args);
+
+	return args.r11;
+}
+
 #else
 static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
 static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
@@ -309,9 +454,7 @@ void hv_ivm_msr_read(u64 msr, u64 *value)
 	else if (hv_isolation_type_snp())
 		hv_ghcb_msr_read(msr, value);
 }
-#endif
 
-#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
 /*
  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
  *
@@ -432,141 +575,6 @@ static bool hv_is_private_mmio(u64 addr)
 	return false;
 }
 
-#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
-
-#ifdef CONFIG_AMD_MEM_ENCRYPT
-
-#define hv_populate_vmcb_seg(seg, gdtr_base)			\
-do {								\
-	if (seg.selector) {					\
-		seg.base = 0;					\
-		seg.limit = HV_AP_SEGMENT_LIMIT;		\
-		seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5);	\
-		seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
-	}							\
-} while (0)							\
-
-static int snp_set_vmsa(void *va, bool vmsa)
-{
-	u64 attrs;
-
-	/*
-	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
-	 * using the RMPADJUST instruction. However, for the instruction to
-	 * succeed it must target the permissions of a lesser privileged
-	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
-	 * instruction in the AMD64 APM Volume 3).
-	 */
-	attrs = 1;
-	if (vmsa)
-		attrs |= RMPADJUST_VMSA_PAGE_BIT;
-
-	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
-}
-
-static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
-{
-	int err;
-
-	err = snp_set_vmsa(vmsa, false);
-	if (err)
-		pr_err("clear VMSA page failed (%u), leaking page\n", err);
-	else
-		free_page((unsigned long)vmsa);
-}
-
-int hv_snp_boot_ap(int cpu, unsigned long start_ip)
-{
-	struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
-		__get_free_page(GFP_KERNEL | __GFP_ZERO);
-	struct sev_es_save_area *cur_vmsa;
-	struct desc_ptr gdtr;
-	u64 ret, retry = 5;
-	struct hv_enable_vp_vtl *start_vp_input;
-	unsigned long flags;
-
-	if (!vmsa)
-		return -ENOMEM;
-
-	native_store_gdt(&gdtr);
-
-	vmsa->gdtr.base = gdtr.address;
-	vmsa->gdtr.limit = gdtr.size;
-
-	asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
-	hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
-
-	asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
-	hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
-
-	asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
-	hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
-
-	asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
-	hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
-
-	vmsa->efer = native_read_msr(MSR_EFER);
-
-	asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
-	asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
-	asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
-
-	vmsa->xcr0 = 1;
-	vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
-	vmsa->rip = (u64)secondary_startup_64_no_verify;
-	vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
-
-	/*
-	 * Set the SNP-specific fields for this VMSA:
-	 *   VMPL level
-	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
-	 */
-	vmsa->vmpl = 0;
-	vmsa->sev_features = sev_status >> 2;
-
-	ret = snp_set_vmsa(vmsa, true);
-	if (!ret) {
-		pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
-		free_page((u64)vmsa);
-		return ret;
-	}
-
-	local_irq_save(flags);
-	start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
-	memset(start_vp_input, 0, sizeof(*start_vp_input));
-	start_vp_input->partition_id = -1;
-	start_vp_input->vp_index = cpu;
-	start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
-	*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
-
-	do {
-		ret = hv_do_hypercall(HVCALL_START_VP,
-				      start_vp_input, NULL);
-	} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
-
-	local_irq_restore(flags);
-
-	if (!hv_result_success(ret)) {
-		pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
-		snp_cleanup_vmsa(vmsa);
-		vmsa = NULL;
-	}
-
-	cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
-	/* Free up any previous VMSA page */
-	if (cur_vmsa)
-		snp_cleanup_vmsa(cur_vmsa);
-
-	/* Record the current VMSA page */
-	per_cpu(hv_sev_vmsa, cpu) = vmsa;
-
-	return ret;
-}
-
-#endif /* CONFIG_AMD_MEM_ENCRYPT */
-
-#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
-
 void __init hv_vtom_init(void)
 {
 	enum hv_isolation_type type = hv_get_isolation_type();
@@ -654,20 +662,3 @@ bool hv_isolation_type_tdx(void)
 {
 	return static_branch_unlikely(&isolation_type_tdx);
 }
-
-#ifdef CONFIG_INTEL_TDX_GUEST
-
-u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
-{
-	struct tdx_hypercall_args args = { };
-
-	args.r10 = control;
-	args.rdx = param1;
-	args.r8  = param2;
-
-	(void)__tdx_hypercall_ret(&args);
-
-	return args.r11;
-}
-
-#endif
-- 
2.25.1


  parent reply	other threads:[~2023-08-24  8:11 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-24  8:07 [PATCH v3 00/10] Support TDX guests on Hyper-V (the Hyper-V specific part) Dexuan Cui
2023-08-24  8:07 ` [PATCH v3 01/10] x86/hyperv: Add hv_isolation_type_tdx() to detect TDX guests Dexuan Cui
2023-08-24  8:07 ` [PATCH v3 02/10] x86/hyperv: Support hypercalls for fully enlightened " Dexuan Cui
2023-08-24  8:07 ` [PATCH v3 03/10] Drivers: hv: vmbus: Support " Dexuan Cui
2023-08-24  8:07 ` [PATCH v3 04/10] x86/hyperv: Fix serial console interrupts for " Dexuan Cui
2023-08-24  8:07 ` [PATCH v3 05/10] Drivers: hv: vmbus: Support >64 VPs for a fully enlightened TDX/SNP VM Dexuan Cui
2023-08-24  8:07 ` [PATCH v3 06/10] x86/hyperv: Introduce a global variable hyperv_paravisor_present Dexuan Cui
2023-08-24 14:06   ` Michael Kelley (LINUX)
2023-08-24 14:41   ` Tianyu Lan
2023-08-24  8:07 ` [PATCH v3 07/10] Drivers: hv: vmbus: Bring the post_msg_page back for TDX VMs with the paravisor Dexuan Cui
2023-08-24 14:07   ` Michael Kelley (LINUX)
2023-08-24 14:40   ` Tianyu Lan
2023-08-24  8:07 ` [PATCH v3 08/10] x86/hyperv: Use TDX GHCI to access some MSRs in a TDX VM " Dexuan Cui
2023-08-24 14:08   ` Michael Kelley (LINUX)
2023-08-24 14:44   ` Tianyu Lan
2023-12-04 15:10   ` Dave Hansen
2024-01-17  2:22     ` Wei Liu
2023-08-24  8:07 ` [PATCH v3 09/10] x86/hyperv: Remove hv_isolation_type_en_snp Dexuan Cui
2023-08-24 14:09   ` Michael Kelley (LINUX)
2023-08-24 14:45   ` Tianyu Lan
2023-08-24  8:07 ` Dexuan Cui [this message]
2023-08-24 14:10   ` [PATCH v3 10/10] x86/hyperv: Move the code in ivm.c around to avoid unnecessary ifdef's Michael Kelley (LINUX)
2023-08-24 14:46   ` Tianyu Lan
2023-08-25  0:05 ` [PATCH v3 00/10] Support TDX guests on Hyper-V (the Hyper-V specific part) Wei Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230824080712.30327-11-decui@microsoft.com \
    --to=decui@microsoft.com \
    --cc=Jason@zx2c4.com \
    --cc=Tianyu.Lan@microsoft.com \
    --cc=ak@linux.intel.com \
    --cc=andavis@redhat.com \
    --cc=arnd@arndb.de \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=haiyangz@microsoft.com \
    --cc=hpa@zytor.com \
    --cc=jane.chu@oracle.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kys@microsoft.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mheslin@redhat.com \
    --cc=mikelley@microsoft.com \
    --cc=mingo@redhat.com \
    --cc=nik.borisov@suse.com \
    --cc=peterz@infradead.org \
    --cc=rick.p.edgecombe@intel.com \
    --cc=rostedt@goodmis.org \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wei.liu@kernel.org \
    --cc=x86@kernel.org \
    --cc=xiaoyao.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).