linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Oliver Upton <oliver.upton@linux.dev>
To: Mostafa Saleh <smostafa@google.com>
Cc: maz@kernel.org, linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.linux.dev, linux-kernel@vger.kernel.org,
	tabba@google.com, kaleshsingh@google.com, will@kernel.org,
	catalin.marinas@arm.com, yuzenghui@huawei.com,
	suzuki.poulose@arm.com, james.morse@arm.com
Subject: Re: [PATCH] KVM: arm64: Use different pointer authentication keys for pKVM
Date: Fri, 26 May 2023 20:47:52 +0000	[thread overview]
Message-ID: <ZHEa+HAixbYijQTA@linux.dev> (raw)
In-Reply-To: <20230516141531.791492-1-smostafa@google.com>

On Tue, May 16, 2023 at 02:15:31PM +0000, Mostafa Saleh wrote:
> When the kernel is compiled with CONFIG_ARM64_PTR_AUTH_KERNEL, it
> uses Armv8.3-Pauth for return address protection for the kernel code
> including nvhe code in EL2.
> 
> Same keys are used in both kernel(EL1) and nvhe code(EL2), this is
> fine for nvhe but not when running in protected mode(pKVM) as the host
> can't be trusted.

But we trust it enough to hand pKVM a fresh set of keys before firing
off? I understand there is some degree of initialization required to get
pKVM off the ground, but I question in this case if key handoff is
strictly necessary.

There are potentially other sources of random directly available at EL2,
such as the SMCCC TRNG ABI or FEAT_RNG. Should pKVM prefer one of these
random implementations and only fall back to host-provided keys if
absolutely necessary?

> The keys for the hypervisor are generated from the kernel before it
> de-privileges, each cpu has different keys, this relies on nvhe code
> not being migratable while running.
> 
> This patch adds host/hyp save/restore for the keys.
> For guest/hyp, they are already handled in common kvm code in
> __guest_enter, where they are saved/restored if they are not
> trapped.

Try to avoid "this patch" or any self-referential language in the
changelog. Just directly state what the patch does:

  Similar to guest entry/exit, start context switching the pointer
  authentication keys on host/entry exit if the feature is in use.

> Signed-off-by: Mostafa Saleh <smostafa@google.com>
> ---
>  arch/arm64/kvm/arm.c           | 26 +++++++++++++++++++++++++
>  arch/arm64/kvm/hyp/nvhe/host.S | 35 +++++++++++++++++++++++++++++++++-
>  2 files changed, 60 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 14391826241c..dd03b52f035d 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -51,6 +51,8 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
>  DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
>  DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
>  
> +DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
> +
>  static bool vgic_present;
>  
>  static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
> @@ -2067,6 +2069,26 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
>  	return 0;
>  }
>  
> +static void pkvm_hyp_init_ptrauth(void)
> +{
> +	struct kvm_cpu_context *hyp_ctxt;
> +	int cpu;
> +
> +	for_each_possible_cpu(cpu) {
> +		hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
> +		hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
> +		hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
> +	}
> +}
> +
>  /* Inits Hyp-mode on all online CPUs */
>  static int __init init_hyp_mode(void)
>  {
> @@ -2228,6 +2250,10 @@ static int __init init_hyp_mode(void)
>  	kvm_hyp_init_symbols();
>  
>  	if (is_protected_kvm_enabled()) {
> +		if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
> +		    cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH))
> +			pkvm_hyp_init_ptrauth();
> +
>  		init_cpu_logical_map();
>  
>  		if (!init_psci_relay()) {
> diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
> index b6c0188c4b35..255ba4af911b 100644
> --- a/arch/arm64/kvm/hyp/nvhe/host.S
> +++ b/arch/arm64/kvm/hyp/nvhe/host.S
> @@ -10,6 +10,7 @@
>  #include <asm/kvm_arm.h>
>  #include <asm/kvm_asm.h>
>  #include <asm/kvm_mmu.h>
> +#include <asm/kvm_ptrauth.h>
>  
>  	.text
>  
> @@ -37,10 +38,42 @@ SYM_FUNC_START(__host_exit)
>  
>  	/* Save the host context pointer in x29 across the function call */
>  	mov	x29, x0
> +
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> +alternative_if_not ARM64_HAS_ADDRESS_AUTH
> +b __skip_pauth_save
> +alternative_else_nop_endif
> +
> +alternative_if ARM64_KVM_PROTECTED_MODE
> +	/* Save kernel ptrauth keys. */
> +	add x18, x29, #CPU_APIAKEYLO_EL1
> +	ptrauth_save_state x18, x19, x20
> +
> +	/* Use hyp keys. */
> +	adr_this_cpu x18, kvm_hyp_ctxt, x19
> +	add x18, x18, #CPU_APIAKEYLO_EL1
> +	ptrauth_restore_state x18, x19, x20
> +alternative_else_nop_endif
> +__skip_pauth_save:
> +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
> +
>  	bl	handle_trap
>  
> -	/* Restore host regs x0-x17 */
>  __host_enter_restore_full:
> +	/* Restore kernel keys. */
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> +alternative_if_not ARM64_HAS_ADDRESS_AUTH
> +b __skip_pauth_restore
> +alternative_else_nop_endif
> +
> +alternative_if ARM64_KVM_PROTECTED_MODE
> +	add x18, x29, #CPU_APIAKEYLO_EL1
> +	ptrauth_restore_state x18, x19, x20
> +alternative_else_nop_endif
> +__skip_pauth_restore:
> +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
> +
> +	/* Restore host regs x0-x17 */
>  	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
>  	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
>  	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
> -- 
> 2.40.1.606.ga4b1b128d6-goog
> 

-- 
Thanks,
Oliver

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2023-05-26 20:48 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-16 14:15 [PATCH] KVM: arm64: Use different pointer authentication keys for pKVM Mostafa Saleh
2023-05-26 20:47 ` Oliver Upton [this message]
2023-05-29 11:17   ` Mostafa Saleh
2023-06-08 21:55     ` Will Deacon
     [not found]       ` <ZIbjULC2p5aTZu8w@google.com>
2023-06-12 19:13         ` Oliver Upton
2023-06-13 12:16 ` Oliver Upton
2023-06-13 16:27   ` Mostafa Saleh
2023-06-14 12:28     ` Mostafa Saleh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZHEa+HAixbYijQTA@linux.dev \
    --to=oliver.upton@linux.dev \
    --cc=catalin.marinas@arm.com \
    --cc=james.morse@arm.com \
    --cc=kaleshsingh@google.com \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=smostafa@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).