linux-coco.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: "Gupta, Pankaj" <pankaj.gupta@amd.com>
To: Tom Lendacky <thomas.lendacky@amd.com>,
	linux-kernel@vger.kernel.org, x86@kernel.org,
	linux-coco@lists.linux.dev, svsm-devel@coconut-svsm.dev
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Andy Lutomirski <luto@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Dan Williams <dan.j.williams@intel.com>,
	Michael Roth <michael.roth@amd.com>,
	Ashish Kalra <ashish.kalra@amd.com>
Subject: Re: [PATCH v3 06/14] x86/sev: Use the SVSM to create a vCPU when not in VMPL0
Date: Fri, 12 Apr 2024 17:28:12 +0200	[thread overview]
Message-ID: <c17d72ad-544f-0c3c-e250-a78041bfcc16@amd.com> (raw)
In-Reply-To: <b91122a1faf096ec89bb49653d7959158a614528.1711405593.git.thomas.lendacky@amd.com>

On 3/25/2024 11:26 PM, Tom Lendacky wrote:
> Using the RMPADJUST instruction, the VSMA attribute can only be changed
> at VMPL0. An SVSM will be present when running at VMPL1 or a lower
> privilege level.
> 
> When an SVSM is present, use the SVSM_CORE_CREATE_VCPU call or the
> SVSM_CORE_DESTROY_VCPU call to perform VMSA attribute changes. Use the
> VMPL level supplied by the SVSM within the VMSA and when starting the
> AP.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>

> ---
>   arch/x86/include/asm/sev.h |  2 ++
>   arch/x86/kernel/sev.c      | 60 +++++++++++++++++++++++++++++++++-----
>   2 files changed, 54 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index 204f0a4857d6..d7be613b7372 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -229,6 +229,8 @@ struct svsm_call {
>   #define SVSM_CORE_CALL(x)		((0ULL << 32) | (x))
>   #define SVSM_CORE_REMAP_CA		0
>   #define SVSM_CORE_PVALIDATE		1
> +#define SVSM_CORE_CREATE_VCPU		2
> +#define SVSM_CORE_DELETE_VCPU		3
>   
>   #ifdef CONFIG_AMD_MEM_ENCRYPT
>   extern void __sev_es_ist_enter(struct pt_regs *regs);
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index d3e182d69d65..ea8b43a0f01b 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -1016,7 +1016,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
>   	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
>   }
>   
> -static int snp_set_vmsa(void *va, bool vmsa)
> +static int base_snp_set_vmsa(void *va, bool vmsa)
>   {
>   	u64 attrs;
>   
> @@ -1034,6 +1034,40 @@ static int snp_set_vmsa(void *va, bool vmsa)
>   	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
>   }
>   
> +static int svsm_snp_set_vmsa(void *va, void *caa, int apic_id, bool vmsa)
> +{
> +	struct svsm_call call = {};
> +	unsigned long flags;
> +	int ret;
> +
> +	local_irq_save(flags);
> +
> +	call.caa = this_cpu_read(svsm_caa);
> +	call.rcx = __pa(va);
> +
> +	if (vmsa) {
> +		/* Protocol 0, Call ID 2 */
> +		call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
> +		call.rdx = __pa(caa);
> +		call.r8  = apic_id;
> +	} else {
> +		/* Protocol 0, Call ID 3 */
> +		call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
> +	}
> +
> +	ret = svsm_protocol(&call);
> +
> +	local_irq_restore(flags);
> +
> +	return ret;
> +}
> +
> +static int snp_set_vmsa(void *va, void *caa, int apic_id, bool vmsa)
> +{
> +	return vmpl ? svsm_snp_set_vmsa(va, caa, apic_id, vmsa)
> +		    : base_snp_set_vmsa(va, vmsa);
> +}
> +
>   #define __ATTR_BASE		(SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
>   #define INIT_CS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
>   #define INIT_DS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
> @@ -1065,11 +1099,11 @@ static void *snp_alloc_vmsa_page(void)
>   	return page_address(p + 1);
>   }
>   
> -static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
> +static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
>   {
>   	int err;
>   
> -	err = snp_set_vmsa(vmsa, false);
> +	err = snp_set_vmsa(vmsa, NULL, apic_id, false);
>   	if (err)
>   		pr_err("clear VMSA page failed (%u), leaking page\n", err);
>   	else
> @@ -1080,6 +1114,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
>   {
>   	struct sev_es_save_area *cur_vmsa, *vmsa;
>   	struct ghcb_state state;
> +	struct svsm_ca *caa;
>   	unsigned long flags;
>   	struct ghcb *ghcb;
>   	u8 sipi_vector;
> @@ -1126,6 +1161,12 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
>   	if (!vmsa)
>   		return -ENOMEM;
>   
> +	/*
> +	 * If an SVSM is present, then the SVSM CAA per-CPU variable will
> +	 * have a value, otherwise it will be NULL.
> +	 */
> +	caa = per_cpu(svsm_caa, cpu);
> +
>   	/* CR4 should maintain the MCE value */
>   	cr4 = native_read_cr4() & X86_CR4_MCE;
>   
> @@ -1173,11 +1214,11 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
>   	 *   VMPL level
>   	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
>   	 */
> -	vmsa->vmpl		= 0;
> +	vmsa->vmpl		= vmpl;
>   	vmsa->sev_features	= sev_status >> 2;
>   
>   	/* Switch the page over to a VMSA page now that it is initialized */
> -	ret = snp_set_vmsa(vmsa, true);
> +	ret = snp_set_vmsa(vmsa, caa, apic_id, true);
>   	if (ret) {
>   		pr_err("set VMSA page failed (%u)\n", ret);
>   		free_page((unsigned long)vmsa);
> @@ -1193,7 +1234,10 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
>   	vc_ghcb_invalidate(ghcb);
>   	ghcb_set_rax(ghcb, vmsa->sev_features);
>   	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
> -	ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
> +	ghcb_set_sw_exit_info_1(ghcb,
> +				((u64)apic_id << 32)	|
> +				((u64)vmpl << 16)	|
> +				SVM_VMGEXIT_AP_CREATE);
>   	ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
>   
>   	sev_es_wr_ghcb_msr(__pa(ghcb));
> @@ -1211,13 +1255,13 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
>   
>   	/* Perform cleanup if there was an error */
>   	if (ret) {
> -		snp_cleanup_vmsa(vmsa);
> +		snp_cleanup_vmsa(vmsa, apic_id);
>   		vmsa = NULL;
>   	}
>   
>   	/* Free up any previous VMSA page */
>   	if (cur_vmsa)
> -		snp_cleanup_vmsa(cur_vmsa);
> +		snp_cleanup_vmsa(cur_vmsa, apic_id);
>   
>   	/* Record the current VMSA page */
>   	per_cpu(sev_vmsa, cpu) = vmsa;

  reply	other threads:[~2024-04-12 15:28 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-25 22:26 [PATCH v3 00/14] Provide SEV-SNP support for running under an SVSM Tom Lendacky
2024-03-25 22:26 ` [PATCH v3 01/14] x86/sev: Rename snp_init() in the boot/compressed/sev.c file Tom Lendacky
2024-04-09 17:09   ` Borislav Petkov
2024-04-09 17:44     ` Tom Lendacky
2024-04-09 17:57       ` Borislav Petkov
2024-04-12 16:19   ` Gupta, Pankaj
2024-03-25 22:26 ` [PATCH v3 02/14] x86/sev: Make the VMPL0 checking function more generic Tom Lendacky
2024-04-12 16:41   ` Gupta, Pankaj
2024-04-17 11:46   ` Borislav Petkov
2024-04-17 20:35     ` Tom Lendacky
2024-04-17 20:50       ` Borislav Petkov
2024-04-18 18:38         ` Tom Lendacky
2024-04-21  7:12           ` Borislav Petkov
2024-03-25 22:26 ` [PATCH v3 03/14] x86/sev: Check for the presence of an SVSM in the SNP Secrets page Tom Lendacky
2024-04-12 17:03   ` Gupta, Pankaj
2024-04-17 20:40   ` Borislav Petkov
2024-04-18 21:17     ` Tom Lendacky
2024-04-22 22:07       ` Borislav Petkov
2024-03-25 22:26 ` [PATCH v3 04/14] x86/sev: Use kernel provided SVSM Calling Areas Tom Lendacky
2024-04-12 16:04   ` Gupta, Pankaj
2024-03-25 22:26 ` [PATCH v3 05/14] x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0 Tom Lendacky
2024-03-25 22:26 ` [PATCH v3 06/14] x86/sev: Use the SVSM to create a vCPU when not in VMPL0 Tom Lendacky
2024-04-12 15:28   ` Gupta, Pankaj [this message]
2024-03-25 22:26 ` [PATCH v3 07/14] x86/sev: Provide SVSM discovery support Tom Lendacky
2024-04-15 16:12   ` Gupta, Pankaj
2024-03-25 22:26 ` [PATCH v3 08/14] x86/sev: Provide guest VMPL level to userspace Tom Lendacky
2024-03-25 22:26 ` [PATCH v3 09/14] virt: sev-guest: Choose the VMPCK key based on executing VMPL Tom Lendacky
2024-04-16  4:54   ` Dan Williams
2024-04-16 15:17     ` Tom Lendacky
2024-04-16 15:47       ` Dan Williams
2024-03-25 22:26 ` [PATCH v3 10/14] configfs-tsm: Allow the privlevel_floor attribute to be updated Tom Lendacky
2024-04-16  4:55   ` Dan Williams
2024-04-16 15:23     ` Tom Lendacky
2024-04-16 15:57       ` Dan Williams
2024-04-16 16:17         ` Tom Lendacky
2024-03-25 22:26 ` [PATCH v3 11/14] x86/sev: Extend the config-fs attestation support for an SVSM Tom Lendacky
2024-04-16  5:37   ` Dan Williams
2024-04-16 15:53     ` Tom Lendacky
2024-04-16 16:19       ` Dan Williams
2024-03-25 22:26 ` [PATCH v3 12/14] fs/configfs: Add a callback to determine attribute visibility Tom Lendacky
2024-04-16  5:46   ` Dan Williams
2024-04-16 16:01     ` Tom Lendacky
2024-04-16 18:25       ` Dan Williams
2024-04-16 19:54         ` Tom Lendacky
2024-04-16 20:03           ` Dan Williams
2024-03-25 22:26 ` [PATCH v3 13/14] x86/sev: Hide SVSM attestation entries if not running under an SVSM Tom Lendacky
2024-04-09 18:12   ` Kuppuswamy Sathyanarayanan
2024-04-12 15:52     ` Tom Lendacky
2024-04-15 19:16       ` Tom Lendacky
2024-04-15 19:48         ` Kuppuswamy Sathyanarayanan
2024-04-15 20:13           ` Tom Lendacky
2024-04-15 21:50             ` Kuppuswamy Sathyanarayanan
2024-04-15 22:03               ` Tom Lendacky
2024-04-16  6:09                 ` Dan Williams
2024-04-16  6:08             ` Dan Williams
2024-04-16  6:05         ` Dan Williams
2024-04-16  5:47   ` Dan Williams
2024-04-16 16:07     ` Tom Lendacky
2024-04-16  6:03   ` Dan Williams
2024-04-16 16:10     ` Tom Lendacky
2024-03-25 22:26 ` [PATCH v3 14/14] x86/sev: Allow non-VMPL0 execution when an SVSM is present Tom Lendacky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c17d72ad-544f-0c3c-e250-a78041bfcc16@amd.com \
    --to=pankaj.gupta@amd.com \
    --cc=ashish.kalra@amd.com \
    --cc=bp@alien8.de \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=michael.roth@amd.com \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=svsm-devel@coconut-svsm.dev \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).