* [RESEND PATCH v5] x86/boot: Don't return encryption mask from __startup_64()
@ 2025-06-25 16:02 Khalid Ali
2025-06-30 16:42 ` Thomas Gleixner
0 siblings, 1 reply; 2+ messages in thread
From: Khalid Ali @ 2025-06-25 16:02 UTC (permalink / raw)
To: tglx, mingo, bp, dave.hansen
Cc: x86, hpa, linux-kernel, Khalid Ali, Kai Huang
From: Khalid Ali <khaliidcaliy@gmail.com>
Currently, __startup_64() returns encryption mask to the caller, however
caller can directly access the encryption.
The C code can access encryption by including
arch/x86/include/asm/setup.h and calling sme_get_me_mask(). The assembly
code can access directly via "sme_me_mask" variable.
This patches accounts that, by adjusting __startup_64() to not return
encryption mask, and update startup_64() to access "sme_me_mask" only if
CONFIG_AMD_MEM_ENCRYPT is set.
This cleans up the function and does seperation of concern.
__startup_64() should focus on action like encrypting the kernel, and
let the caller retrieve the mask directly.
CHanges in v5:
* Improve commit message for better clarity.
* Fix some issues returned by kernel test robot.
* Add Huang, Kai Ack tag.
Signed-off-by: Khalid Ali <khaliidcaliy@gmail.com>
Acked-by: Kai Huang <kai.huang@intel.com>
---
arch/x86/boot/startup/map_kernel.c | 11 +++--------
arch/x86/include/asm/setup.h | 2 +-
arch/x86/kernel/head_64.S | 8 +++-----
3 files changed, 7 insertions(+), 14 deletions(-)
diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
index 332dbe6688c4..0425d49be16e 100644
--- a/arch/x86/boot/startup/map_kernel.c
+++ b/arch/x86/boot/startup/map_kernel.c
@@ -30,7 +30,7 @@ static inline bool check_la57_support(void)
return true;
}
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+static void __head sme_postprocess_startup(struct boot_params *bp,
pmdval_t *pmd,
unsigned long p2v_offset)
{
@@ -68,11 +68,6 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
}
}
- /*
- * Return the SME encryption mask (if SME is active) to be used as a
- * modifier for the initial pgdir entry programmed into CR3.
- */
- return sme_get_me_mask();
}
/*
@@ -84,7 +79,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
* the 1:1 mapping of memory. Kernel virtual addresses can be determined by
* subtracting p2v_offset from the RIP-relative address.
*/
-unsigned long __head __startup_64(unsigned long p2v_offset,
+void __head __startup_64(unsigned long p2v_offset,
struct boot_params *bp)
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
@@ -213,5 +208,5 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;
- return sme_postprocess_startup(bp, pmd, p2v_offset);
+ sme_postprocess_startup(bp, pmd, p2v_offset);
}
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 692af46603a1..29ea24bb85ff 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -50,7 +50,7 @@ extern unsigned long acpi_realmode_flags;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
-extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
+extern void __startup_64(unsigned long p2v_offset, struct boot_params *bp);
extern void startup_64_setup_gdt_idt(void);
extern void startup_64_load_idt(void *vc_handler);
extern void early_setup_idt(void);
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 4d654ed7982e..c46a175a2a12 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -106,18 +106,16 @@ SYM_CODE_START_NOALIGN(startup_64)
/*
* Perform pagetable fixups. Additionally, if SME is active, encrypt
- * the kernel and retrieve the modifier (SME encryption mask if SME
- * is active) to be added to the initial pgdir entry that will be
- * programmed into CR3.
+ * the kernel.
*/
movq %r15, %rsi
call __startup_64
/* Form the CR3 value being sure to include the CR3 modifier */
- leaq early_top_pgt(%rip), %rcx
- addq %rcx, %rax
+ leaq early_top_pgt(%rip), %rax
#ifdef CONFIG_AMD_MEM_ENCRYPT
+ addq sme_me_mask(%rip), %rax
mov %rax, %rdi
/*
--
2.49.0
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [RESEND PATCH v5] x86/boot: Don't return encryption mask from __startup_64()
2025-06-25 16:02 [RESEND PATCH v5] x86/boot: Don't return encryption mask from __startup_64() Khalid Ali
@ 2025-06-30 16:42 ` Thomas Gleixner
0 siblings, 0 replies; 2+ messages in thread
From: Thomas Gleixner @ 2025-06-30 16:42 UTC (permalink / raw)
To: Khalid Ali, mingo, bp, dave.hansen
Cc: x86, hpa, linux-kernel, Khalid Ali, Kai Huang
Khalid!
On Wed, Jun 25 2025 at 16:02, Khalid Ali wrote:
> From: Khalid Ali <khaliidcaliy@gmail.com>
>
> Currently, __startup_64() returns encryption mask to the caller, however
> caller can directly access the encryption.
>
> The C code can access encryption by including
> arch/x86/include/asm/setup.h and calling sme_get_me_mask(). The assembly
> code can access directly via "sme_me_mask" variable.
>
> This patches accounts that, by adjusting __startup_64() to not return
This patch.... I pointed you to the tip tree documentation before ....
> encryption mask, and update startup_64() to access "sme_me_mask" only if
> CONFIG_AMD_MEM_ENCRYPT is set.
>
> This cleans up the function and does seperation of concern.
> __startup_64() should focus on action like encrypting the kernel, and
> let the caller retrieve the mask directly.
>
> CHanges in v5:
> * Improve commit message for better clarity.
> * Fix some issues returned by kernel test robot.
> * Add Huang, Kai Ack tag.
Please put this behind the --- seperator. It's not part of the change
log. That's documented too.
> Signed-off-by: Khalid Ali <khaliidcaliy@gmail.com>
> Acked-by: Kai Huang <kai.huang@intel.com>
> ---
> arch/x86/boot/startup/map_kernel.c | 11 +++--------
> arch/x86/include/asm/setup.h | 2 +-
> arch/x86/kernel/head_64.S | 8 +++-----
> 3 files changed, 7 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
> index 332dbe6688c4..0425d49be16e 100644
> --- a/arch/x86/boot/startup/map_kernel.c
> +++ b/arch/x86/boot/startup/map_kernel.c
> @@ -30,7 +30,7 @@ static inline bool check_la57_support(void)
> return true;
> }
>
> -static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
> +static void __head sme_postprocess_startup(struct boot_params *bp,
> pmdval_t *pmd,
> unsigned long p2v_offset)
See documentation about parameter alignment.
> {
> @@ -68,11 +68,6 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
> }
> }
>
> - /*
> - * Return the SME encryption mask (if SME is active) to be used as a
> - * modifier for the initial pgdir entry programmed into CR3.
> - */
> - return sme_get_me_mask();
> }
>
> /*
> @@ -84,7 +79,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
> * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
> * subtracting p2v_offset from the RIP-relative address.
> */
> -unsigned long __head __startup_64(unsigned long p2v_offset,
> +void __head __startup_64(unsigned long p2v_offset,
> struct boot_params *bp)
Ditto
> /*
> * Perform pagetable fixups. Additionally, if SME is active, encrypt
> - * the kernel and retrieve the modifier (SME encryption mask if SME
> - * is active) to be added to the initial pgdir entry that will be
> - * programmed into CR3.
> + * the kernel.
Why are you dropping valuable information from that comment, instead of
moving the important information about the SME mask next to the code
which retrieves it?
Thanks,
tglx
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-06-30 16:42 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-25 16:02 [RESEND PATCH v5] x86/boot: Don't return encryption mask from __startup_64() Khalid Ali
2025-06-30 16:42 ` Thomas Gleixner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).