* [PATCH v3] x86/boot: Don't return encryption mask from __startup_64()
@ 2025-06-17 7:36 Khalid Ali
2025-06-17 7:48 ` Ard Biesheuvel
0 siblings, 1 reply; 4+ messages in thread
From: Khalid Ali @ 2025-06-17 7:36 UTC (permalink / raw)
To: tglx, mingo, bp, dave.hansen, x86, ardb
Cc: hpa, linux-kernel, Khalid Ali, kernel test robot
Avoid returning the SME encryption mask from __startup_64(), and instead
let the function handle encryption directly as needed. The encryption
mask is already available to callers and can be accessed via
sme_get_me_mask() in C code, or directly through the sme_me_mask symbol
in assembly, if CONFIG_AMD_MEM_ENCRYPT is enabled.
This change aligns with how secondary_startup_64_no_verify handles SME
and keeps the behavior consistent. For Intel CPUs, SME is not relevant,
so there is no need to retrieve the mask unless CONFIG_AMD_MEM_ENCRYPT
is enabled.
Signed-off-by: Khalid Ali <khaliidcaliy@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202506171012.Ji3c5sJh-lkp@intel.com/
---
arch/x86/boot/startup/map_kernel.c | 11 +++--------
arch/x86/include/asm/setup.h | 2 +-
arch/x86/kernel/head_64.S | 10 ++++------
3 files changed, 8 insertions(+), 15 deletions(-)
diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
index 332dbe6688c4..6fdb340e9147 100644
--- a/arch/x86/boot/startup/map_kernel.c
+++ b/arch/x86/boot/startup/map_kernel.c
@@ -30,7 +30,7 @@ static inline bool check_la57_support(void)
return true;
}
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+static void __head sme_postprocess_startup(struct boot_params *bp,
pmdval_t *pmd,
unsigned long p2v_offset)
{
@@ -68,11 +68,6 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
}
}
- /*
- * Return the SME encryption mask (if SME is active) to be used as a
- * modifier for the initial pgdir entry programmed into CR3.
- */
- return sme_get_me_mask();
}
/*
@@ -84,7 +79,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
* the 1:1 mapping of memory. Kernel virtual addresses can be determined by
* subtracting p2v_offset from the RIP-relative address.
*/
-unsigned long __head __startup_64(unsigned long p2v_offset,
+void __head __startup_64(unsigned long p2v_offset,
struct boot_params *bp)
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
@@ -213,5 +208,5 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;
- return sme_postprocess_startup(bp, pmd, p2v_offset);
+ sme_postprocess_startup(bp, pmd, p2v_offset);
}
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 692af46603a1..29ea24bb85ff 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -50,7 +50,7 @@ extern unsigned long acpi_realmode_flags;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
-extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
+extern void __startup_64(unsigned long p2v_offset, struct boot_params *bp);
extern void startup_64_setup_gdt_idt(void);
extern void startup_64_load_idt(void *vc_handler);
extern void early_setup_idt(void);
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3e9b3a3bd039..4390a28f7dad 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -106,18 +106,16 @@ SYM_CODE_START_NOALIGN(startup_64)
/*
* Perform pagetable fixups. Additionally, if SME is active, encrypt
- * the kernel and retrieve the modifier (SME encryption mask if SME
- * is active) to be added to the initial pgdir entry that will be
- * programmed into CR3.
+ * the kernel.
*/
movq %r15, %rsi
call __startup_64
/* Form the CR3 value being sure to include the CR3 modifier */
- leaq early_top_pgt(%rip), %rcx
- addq %rcx, %rax
-
+ leaq early_top_pgt(%rip), %rax
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
+ addq sme_me_mask(%rip), %rax
mov %rax, %rdi
/*
--
2.49.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v3] x86/boot: Don't return encryption mask from __startup_64()
2025-06-17 7:36 Khalid Ali
@ 2025-06-17 7:48 ` Ard Biesheuvel
0 siblings, 0 replies; 4+ messages in thread
From: Ard Biesheuvel @ 2025-06-17 7:48 UTC (permalink / raw)
To: Khalid Ali
Cc: tglx, mingo, bp, dave.hansen, x86, hpa, linux-kernel,
kernel test robot
On Tue, 17 Jun 2025 at 09:38, Khalid Ali <khaliidcaliy@gmail.com> wrote:
>
> Avoid returning the SME encryption mask from __startup_64(), and instead
> let the function handle encryption directly as needed. The encryption
> mask is already available to callers and can be accessed via
> sme_get_me_mask() in C code, or directly through the sme_me_mask symbol
> in assembly, if CONFIG_AMD_MEM_ENCRYPT is enabled.
>
> This change aligns with how secondary_startup_64_no_verify handles SME
> and keeps the behavior consistent. For Intel CPUs, SME is not relevant,
> so there is no need to retrieve the mask unless CONFIG_AMD_MEM_ENCRYPT
> is enabled.
>
> Signed-off-by: Khalid Ali <khaliidcaliy@gmail.com>
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202506171012.Ji3c5sJh-lkp@intel.com/
Please drop these lines ^^^ (but no need to resend just for that)
As it says on the page:
"If you fix the issue in a separate patch/commit (i.e. not just a new
version of the same patch/commit), kindly add following tags"
> ---
> arch/x86/boot/startup/map_kernel.c | 11 +++--------
> arch/x86/include/asm/setup.h | 2 +-
> arch/x86/kernel/head_64.S | 10 ++++------
> 3 files changed, 8 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
> index 332dbe6688c4..6fdb340e9147 100644
> --- a/arch/x86/boot/startup/map_kernel.c
> +++ b/arch/x86/boot/startup/map_kernel.c
> @@ -30,7 +30,7 @@ static inline bool check_la57_support(void)
> return true;
> }
>
> -static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
> +static void __head sme_postprocess_startup(struct boot_params *bp,
> pmdval_t *pmd,
> unsigned long p2v_offset)
> {
> @@ -68,11 +68,6 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
> }
> }
>
> - /*
> - * Return the SME encryption mask (if SME is active) to be used as a
> - * modifier for the initial pgdir entry programmed into CR3.
> - */
> - return sme_get_me_mask();
> }
>
> /*
> @@ -84,7 +79,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
> * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
> * subtracting p2v_offset from the RIP-relative address.
> */
> -unsigned long __head __startup_64(unsigned long p2v_offset,
> +void __head __startup_64(unsigned long p2v_offset,
> struct boot_params *bp)
> {
> pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
> @@ -213,5 +208,5 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
> for (; i < PTRS_PER_PMD; i++)
> pmd[i] &= ~_PAGE_PRESENT;
>
> - return sme_postprocess_startup(bp, pmd, p2v_offset);
> + sme_postprocess_startup(bp, pmd, p2v_offset);
> }
> diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
> index 692af46603a1..29ea24bb85ff 100644
> --- a/arch/x86/include/asm/setup.h
> +++ b/arch/x86/include/asm/setup.h
> @@ -50,7 +50,7 @@ extern unsigned long acpi_realmode_flags;
>
> extern void reserve_standard_io_resources(void);
> extern void i386_reserve_resources(void);
> -extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
> +extern void __startup_64(unsigned long p2v_offset, struct boot_params *bp);
> extern void startup_64_setup_gdt_idt(void);
> extern void startup_64_load_idt(void *vc_handler);
> extern void early_setup_idt(void);
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index 3e9b3a3bd039..4390a28f7dad 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -106,18 +106,16 @@ SYM_CODE_START_NOALIGN(startup_64)
>
> /*
> * Perform pagetable fixups. Additionally, if SME is active, encrypt
> - * the kernel and retrieve the modifier (SME encryption mask if SME
> - * is active) to be added to the initial pgdir entry that will be
> - * programmed into CR3.
> + * the kernel.
> */
> movq %r15, %rsi
> call __startup_64
>
> /* Form the CR3 value being sure to include the CR3 modifier */
> - leaq early_top_pgt(%rip), %rcx
> - addq %rcx, %rax
> -
> + leaq early_top_pgt(%rip), %rax
> +
> #ifdef CONFIG_AMD_MEM_ENCRYPT
> + addq sme_me_mask(%rip), %rax
> mov %rax, %rdi
>
> /*
> --
> 2.49.0
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v3] x86/boot: Don't return encryption mask from __startup_64()
@ 2025-06-17 8:46 Khalid Ali
2025-06-18 19:34 ` kernel test robot
0 siblings, 1 reply; 4+ messages in thread
From: Khalid Ali @ 2025-06-17 8:46 UTC (permalink / raw)
To: tglx, mingo, bp, dave.hansen, x86, ardb; +Cc: hpa, linux-kernel, Khalid Ali
Avoid returning the SME encryption mask from __startup_64(), and instead
let the function handle encryption directly as needed. The encryption
mask is already available to callers and can be accessed via
sme_get_me_mask() in C code, or directly through the sme_me_mask symbol
in assembly, if CONFIG_AMD_MEM_ENCRYPT is enabled.
This change aligns with how secondary_startup_64_no_verify handles SME
and keeps the behavior consistent. For Intel CPUs, SME is not relevant,
so there is no need to retrieve the mask unless CONFIG_AMD_MEM_ENCRYPT
is enabled.
Signed-off-by: Khalid Ali <khaliidcaliy@gmail.com>
---
arch/x86/boot/startup/map_kernel.c | 11 +++--------
arch/x86/include/asm/setup.h | 2 +-
arch/x86/kernel/head_64.S | 10 ++++------
3 files changed, 8 insertions(+), 15 deletions(-)
diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c
index 332dbe6688c4..6fdb340e9147 100644
--- a/arch/x86/boot/startup/map_kernel.c
+++ b/arch/x86/boot/startup/map_kernel.c
@@ -30,7 +30,7 @@ static inline bool check_la57_support(void)
return true;
}
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+static void __head sme_postprocess_startup(struct boot_params *bp,
pmdval_t *pmd,
unsigned long p2v_offset)
{
@@ -68,11 +68,6 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
}
}
- /*
- * Return the SME encryption mask (if SME is active) to be used as a
- * modifier for the initial pgdir entry programmed into CR3.
- */
- return sme_get_me_mask();
}
/*
@@ -84,7 +79,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
* the 1:1 mapping of memory. Kernel virtual addresses can be determined by
* subtracting p2v_offset from the RIP-relative address.
*/
-unsigned long __head __startup_64(unsigned long p2v_offset,
+void __head __startup_64(unsigned long p2v_offset,
struct boot_params *bp)
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
@@ -213,5 +208,5 @@ unsigned long __head __startup_64(unsigned long p2v_offset,
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;
- return sme_postprocess_startup(bp, pmd, p2v_offset);
+ sme_postprocess_startup(bp, pmd, p2v_offset);
}
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 692af46603a1..29ea24bb85ff 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -50,7 +50,7 @@ extern unsigned long acpi_realmode_flags;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
-extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
+extern void __startup_64(unsigned long p2v_offset, struct boot_params *bp);
extern void startup_64_setup_gdt_idt(void);
extern void startup_64_load_idt(void *vc_handler);
extern void early_setup_idt(void);
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3e9b3a3bd039..4390a28f7dad 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -106,18 +106,16 @@ SYM_CODE_START_NOALIGN(startup_64)
/*
* Perform pagetable fixups. Additionally, if SME is active, encrypt
- * the kernel and retrieve the modifier (SME encryption mask if SME
- * is active) to be added to the initial pgdir entry that will be
- * programmed into CR3.
+ * the kernel.
*/
movq %r15, %rsi
call __startup_64
/* Form the CR3 value being sure to include the CR3 modifier */
- leaq early_top_pgt(%rip), %rcx
- addq %rcx, %rax
-
+ leaq early_top_pgt(%rip), %rax
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
+ addq sme_me_mask(%rip), %rax
mov %rax, %rdi
/*
--
2.49.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v3] x86/boot: Don't return encryption mask from __startup_64()
2025-06-17 8:46 [PATCH v3] x86/boot: Don't return encryption mask from __startup_64() Khalid Ali
@ 2025-06-18 19:34 ` kernel test robot
0 siblings, 0 replies; 4+ messages in thread
From: kernel test robot @ 2025-06-18 19:34 UTC (permalink / raw)
To: Khalid Ali, tglx, mingo, bp, dave.hansen, x86, ardb
Cc: oe-kbuild-all, hpa, linux-kernel, Khalid Ali
Hi Khalid,
kernel test robot noticed the following build warnings:
[auto build test WARNING on tip/x86/core]
[also build test WARNING on tip/master linus/master v6.16-rc2 next-20250618]
[cannot apply to tip/auto-latest]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Khalid-Ali/x86-boot-Don-t-return-encryption-mask-from-__startup_64/20250617-164938
base: tip/x86/core
patch link: https://lore.kernel.org/r/20250617084705.619-1-khaliidcaliy%40gmail.com
patch subject: [PATCH v3] x86/boot: Don't return encryption mask from __startup_64()
config: x86_64-randconfig-161-20250618 (https://download.01.org/0day-ci/archive/20250619/202506190255.i7td2YOj-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202506190255.i7td2YOj-lkp@intel.com/
smatch warnings:
arch/x86/boot/startup/map_kernel.c:211 __startup_64() warn: inconsistent indenting
vim +211 arch/x86/boot/startup/map_kernel.c
72
73 /*
74 * This code is compiled using PIC codegen because it will execute from the
75 * early 1:1 mapping of memory, which deviates from the mapping expected by the
76 * linker. Due to this deviation, taking the address of a global variable will
77 * produce an ambiguous result when using the plain & operator. Instead,
78 * rip_rel_ptr() must be used, which will return the RIP-relative address in
79 * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
80 * subtracting p2v_offset from the RIP-relative address.
81 */
82 void __head __startup_64(unsigned long p2v_offset,
83 struct boot_params *bp)
84 {
85 pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
86 unsigned long physaddr = (unsigned long)rip_rel_ptr(_text);
87 unsigned long va_text, va_end;
88 unsigned long pgtable_flags;
89 unsigned long load_delta;
90 pgdval_t *pgd;
91 p4dval_t *p4d;
92 pudval_t *pud;
93 pmdval_t *pmd, pmd_entry;
94 bool la57;
95 int i;
96
97 la57 = check_la57_support();
98
99 /* Is the address too large? */
100 if (physaddr >> MAX_PHYSMEM_BITS)
101 for (;;);
102
103 /*
104 * Compute the delta between the address I am compiled to run at
105 * and the address I am actually running at.
106 */
107 phys_base = load_delta = __START_KERNEL_map + p2v_offset;
108
109 /* Is the address not 2M aligned? */
110 if (load_delta & ~PMD_MASK)
111 for (;;);
112
113 va_text = physaddr - p2v_offset;
114 va_end = (unsigned long)rip_rel_ptr(_end) - p2v_offset;
115
116 /* Include the SME encryption mask in the fixup value */
117 load_delta += sme_get_me_mask();
118
119 /* Fixup the physical addresses in the page table */
120
121 pgd = rip_rel_ptr(early_top_pgt);
122 pgd[pgd_index(__START_KERNEL_map)] += load_delta;
123
124 if (la57) {
125 p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt);
126 p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
127
128 pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
129 }
130
131 level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta;
132 level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta;
133
134 for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
135 level2_fixmap_pgt[i].pmd += load_delta;
136
137 /*
138 * Set up the identity mapping for the switchover. These
139 * entries should *NOT* have the global bit set! This also
140 * creates a bunch of nonsense entries but that is fine --
141 * it avoids problems around wraparound.
142 */
143
144 pud = &early_pgts[0]->pmd;
145 pmd = &early_pgts[1]->pmd;
146 next_early_pgt = 2;
147
148 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
149
150 if (la57) {
151 p4d = &early_pgts[next_early_pgt++]->pmd;
152
153 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
154 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
155 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
156
157 i = physaddr >> P4D_SHIFT;
158 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
159 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
160 } else {
161 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
162 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
163 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
164 }
165
166 i = physaddr >> PUD_SHIFT;
167 pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
168 pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
169
170 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
171 pmd_entry += sme_get_me_mask();
172 pmd_entry += physaddr;
173
174 for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
175 int idx = i + (physaddr >> PMD_SHIFT);
176
177 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
178 }
179
180 /*
181 * Fixup the kernel text+data virtual addresses. Note that
182 * we might write invalid pmds, when the kernel is relocated
183 * cleanup_highmap() fixes this up along with the mappings
184 * beyond _end.
185 *
186 * Only the region occupied by the kernel image has so far
187 * been checked against the table of usable memory regions
188 * provided by the firmware, so invalidate pages outside that
189 * region. A page table entry that maps to a reserved area of
190 * memory would allow processor speculation into that area,
191 * and on some hardware (particularly the UV platform) even
192 * speculative access to some reserved areas is caught as an
193 * error, causing the BIOS to halt the system.
194 */
195
196 pmd = rip_rel_ptr(level2_kernel_pgt);
197
198 /* invalidate pages before the kernel image */
199 for (i = 0; i < pmd_index(va_text); i++)
200 pmd[i] &= ~_PAGE_PRESENT;
201
202 /* fixup pages that are part of the kernel image */
203 for (; i <= pmd_index(va_end); i++)
204 if (pmd[i] & _PAGE_PRESENT)
205 pmd[i] += load_delta;
206
207 /* invalidate pages after the kernel image */
208 for (; i < PTRS_PER_PMD; i++)
209 pmd[i] &= ~_PAGE_PRESENT;
210
> 211 sme_postprocess_startup(bp, pmd, p2v_offset);
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-06-18 19:34 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-17 8:46 [PATCH v3] x86/boot: Don't return encryption mask from __startup_64() Khalid Ali
2025-06-18 19:34 ` kernel test robot
-- strict thread matches above, loose matches on Subject: below --
2025-06-17 7:36 Khalid Ali
2025-06-17 7:48 ` Ard Biesheuvel
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).