From: "Xin Li (Intel)" <xin@zytor.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
linux-doc@vger.kernel.org
Cc: pbonzini@redhat.com, seanjc@google.com, corbet@lwn.net,
tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
dave.hansen@linux.intel.com, x86@kernel.org, hpa@zytor.com,
xin@zytor.com, luto@kernel.org, peterz@infradead.org,
andrew.cooper3@citrix.com, chao.gao@intel.com, hch@infradead.org
Subject: [PATCH v7 04/21] x86/cea: Prefix event stack names with ESTACK_
Date: Fri, 29 Aug 2025 08:31:32 -0700 [thread overview]
Message-ID: <20250829153149.2871901-5-xin@zytor.com> (raw)
In-Reply-To: <20250829153149.2871901-1-xin@zytor.com>
Add the ESTACK_ prefix to event stack names to improve clarity and
readability. Without the prefix, names like DF, NMI, and DB are too
brief and potentially ambiguous.
This renaming also prepares for converting __this_cpu_ist_top_va from
a macro into a function that accepts an enum exception_stack_ordering
argument, without requiring changes to existing callsites.
Signed-off-by: Xin Li (Intel) <xin@zytor.com>
---
Changes in v7:
* Move rename code to this patch (Dave Hansen).
* Fix a vertical alignment (Dave Hansen).
---
arch/x86/coco/sev/sev-nmi.c | 4 ++--
arch/x86/coco/sev/vc-handle.c | 2 +-
arch/x86/include/asm/cpu_entry_area.h | 26 +++++++++++++-------------
arch/x86/kernel/cpu/common.c | 10 +++++-----
arch/x86/kernel/dumpstack_64.c | 14 +++++++-------
arch/x86/kernel/fred.c | 6 +++---
arch/x86/kernel/traps.c | 2 +-
arch/x86/mm/cpu_entry_area.c | 12 ++++++------
arch/x86/mm/fault.c | 2 +-
9 files changed, 39 insertions(+), 39 deletions(-)
diff --git a/arch/x86/coco/sev/sev-nmi.c b/arch/x86/coco/sev/sev-nmi.c
index d8dfaddfb367..73e34ad7a1a9 100644
--- a/arch/x86/coco/sev/sev-nmi.c
+++ b/arch/x86/coco/sev/sev-nmi.c
@@ -30,7 +30,7 @@ static __always_inline bool on_vc_stack(struct pt_regs *regs)
if (ip_within_syscall_gap(regs))
return false;
- return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
+ return ((sp >= __this_cpu_ist_bottom_va(ESTACK_VC)) && (sp < __this_cpu_ist_top_va(ESTACK_VC)));
}
/*
@@ -82,7 +82,7 @@ void noinstr __sev_es_ist_exit(void)
/* Read IST entry */
ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
- if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
+ if (WARN_ON(ist == __this_cpu_ist_top_va(ESTACK_VC)))
return;
/* Read back old IST entry and write it to the TSS */
diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c
index c3b4acbde0d8..88b6bc518a5a 100644
--- a/arch/x86/coco/sev/vc-handle.c
+++ b/arch/x86/coco/sev/vc-handle.c
@@ -859,7 +859,7 @@ static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
static __always_inline bool is_vc2_stack(unsigned long sp)
{
- return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
+ return (sp >= __this_cpu_ist_bottom_va(ESTACK_VC2) && sp < __this_cpu_ist_top_va(ESTACK_VC2));
}
static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 462fc34f1317..d0f884c28178 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -18,19 +18,19 @@
/* Macro to enforce the same ordering and stack sizes */
#define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
- char DF_stack_guard[guardsize]; \
- char DF_stack[EXCEPTION_STKSZ]; \
- char NMI_stack_guard[guardsize]; \
- char NMI_stack[EXCEPTION_STKSZ]; \
- char DB_stack_guard[guardsize]; \
- char DB_stack[EXCEPTION_STKSZ]; \
- char MCE_stack_guard[guardsize]; \
- char MCE_stack[EXCEPTION_STKSZ]; \
- char VC_stack_guard[guardsize]; \
- char VC_stack[optional_stack_size]; \
- char VC2_stack_guard[guardsize]; \
- char VC2_stack[optional_stack_size]; \
- char IST_top_guard[guardsize]; \
+ char ESTACK_DF_stack_guard[guardsize]; \
+ char ESTACK_DF_stack[EXCEPTION_STKSZ]; \
+ char ESTACK_NMI_stack_guard[guardsize]; \
+ char ESTACK_NMI_stack[EXCEPTION_STKSZ]; \
+ char ESTACK_DB_stack_guard[guardsize]; \
+ char ESTACK_DB_stack[EXCEPTION_STKSZ]; \
+ char ESTACK_MCE_stack_guard[guardsize]; \
+ char ESTACK_MCE_stack[EXCEPTION_STKSZ]; \
+ char ESTACK_VC_stack_guard[guardsize]; \
+ char ESTACK_VC_stack[optional_stack_size]; \
+ char ESTACK_VC2_stack_guard[guardsize]; \
+ char ESTACK_VC2_stack[optional_stack_size]; \
+ char ESTACK_IST_top_guard[guardsize]; \
/* The exception stacks' physical storage. No guard pages required */
struct exception_stacks {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 34a054181c4d..5c888f1783e5 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2307,12 +2307,12 @@ static inline void setup_getcpu(int cpu)
static inline void tss_setup_ist(struct tss_struct *tss)
{
/* Set up the per-CPU TSS IST stacks */
- tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
- tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
- tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
- tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
+ tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(ESTACK_DF);
+ tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(ESTACK_NMI);
+ tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(ESTACK_DB);
+ tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(ESTACK_MCE);
/* Only mapped when SEV-ES is active */
- tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
+ tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(ESTACK_VC);
}
#else /* CONFIG_X86_64 */
static inline void tss_setup_ist(struct tss_struct *tss) { }
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6c5defd6569a..40f51e278171 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -73,7 +73,7 @@ struct estack_pages {
PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = { \
.offs = CEA_ESTACK_OFFS(st), \
.size = CEA_ESTACK_SIZE(st), \
- .type = STACK_TYPE_EXCEPTION + ESTACK_ ##st, }
+ .type = STACK_TYPE_EXCEPTION + st, }
/*
* Array of exception stack page descriptors. If the stack is larger than
@@ -83,12 +83,12 @@ struct estack_pages {
*/
static const
struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
- EPAGERANGE(DF),
- EPAGERANGE(NMI),
- EPAGERANGE(DB),
- EPAGERANGE(MCE),
- EPAGERANGE(VC),
- EPAGERANGE(VC2),
+ EPAGERANGE(ESTACK_DF),
+ EPAGERANGE(ESTACK_NMI),
+ EPAGERANGE(ESTACK_DB),
+ EPAGERANGE(ESTACK_MCE),
+ EPAGERANGE(ESTACK_VC),
+ EPAGERANGE(ESTACK_VC2),
};
static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c
index 816187da3a47..06d944a3d051 100644
--- a/arch/x86/kernel/fred.c
+++ b/arch/x86/kernel/fred.c
@@ -87,7 +87,7 @@ void cpu_init_fred_rsps(void)
FRED_STKLVL(X86_TRAP_DF, FRED_DF_STACK_LEVEL));
/* The FRED equivalents to IST stacks... */
- wrmsrq(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB));
- wrmsrq(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI));
- wrmsrq(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF));
+ wrmsrq(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(ESTACK_DB));
+ wrmsrq(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(ESTACK_NMI));
+ wrmsrq(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(ESTACK_DF));
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 36354b470590..5c9c5ebf5e73 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -954,7 +954,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
info.type > STACK_TYPE_EXCEPTION_LAST)
- sp = __this_cpu_ist_top_va(VC2);
+ sp = __this_cpu_ist_top_va(ESTACK_VC2);
sync:
/*
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 575f863f3c75..9fa371af8abc 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -151,15 +151,15 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
* by guard pages so each stack must be mapped separately. DB2 is
* not mapped; it just exists to catch triple nesting of #DB.
*/
- cea_map_stack(DF);
- cea_map_stack(NMI);
- cea_map_stack(DB);
- cea_map_stack(MCE);
+ cea_map_stack(ESTACK_DF);
+ cea_map_stack(ESTACK_NMI);
+ cea_map_stack(ESTACK_DB);
+ cea_map_stack(ESTACK_MCE);
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
- cea_map_stack(VC);
- cea_map_stack(VC2);
+ cea_map_stack(ESTACK_VC);
+ cea_map_stack(ESTACK_VC2);
}
}
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 998bd807fc7b..1804eb86cc14 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -671,7 +671,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code,
* and then double-fault, though, because we're likely to
* break the console driver and lose most of the stack dump.
*/
- call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*),
+ call_on_stack(__this_cpu_ist_top_va(ESTACK_DF) - sizeof(void*),
handle_stack_overflow,
ASM_CALL_ARG3,
, [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
--
2.51.0
next prev parent reply other threads:[~2025-08-29 15:33 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-29 15:31 [PATCH v7 00/21] Enable FRED with KVM VMX Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 01/21] KVM: VMX: Add support for the secondary VM exit controls Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 02/21] KVM: VMX: Initialize VM entry/exit FRED controls in vmcs_config Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 03/21] KVM: VMX: Disable FRED if FRED consistency checks fail Xin Li (Intel)
2025-08-29 15:31 ` Xin Li (Intel) [this message]
2025-08-29 15:31 ` [PATCH v7 05/21] x86/cea: Export API for per-CPU exception stacks for KVM Xin Li (Intel)
2025-08-29 16:52 ` Xin Li
2025-08-29 15:31 ` [PATCH v7 06/21] KVM: VMX: Initialize VMCS FRED fields Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 07/21] KVM: VMX: Set FRED MSR intercepts Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 08/21] KVM: VMX: Save/restore guest FRED RSP0 Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 09/21] KVM: VMX: Add support for saving and restoring FRED MSRs Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 10/21] KVM: x86: Add a helper to detect if FRED is enabled for a vCPU Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 11/21] KVM: VMX: Virtualize FRED event_data Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 12/21] KVM: VMX: Virtualize FRED nested exception tracking Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 13/21] KVM: x86: Save/restore the nested flag of an exception Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 14/21] KVM: x86: Mark CR4.FRED as not reserved Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 15/21] KVM: VMX: Dump FRED context in dump_vmcs() Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 16/21] KVM: x86: Advertise support for FRED Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 17/21] KVM: nVMX: Add support for the secondary VM exit controls Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 18/21] KVM: nVMX: Add FRED VMCS fields to nested VMX context handling Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 19/21] KVM: nVMX: Add FRED-related VMCS field checks Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 20/21] KVM: nVMX: Add prerequisites to SHADOW_FIELD_R[OW] macros Xin Li (Intel)
2025-08-29 15:31 ` [PATCH v7 21/21] KVM: nVMX: Allow VMX FRED controls Xin Li (Intel)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250829153149.2871901-5-xin@zytor.com \
--to=xin@zytor.com \
--cc=andrew.cooper3@citrix.com \
--cc=bp@alien8.de \
--cc=chao.gao@intel.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=hch@infradead.org \
--cc=hpa@zytor.com \
--cc=kvm@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).