From: Juergen Gross <jgross@suse.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
virtualization@lists.linux.dev
Cc: Juergen Gross <jgross@suse.com>,
Ajay Kaher <ajay.kaher@broadcom.com>,
Alexey Makhalov <alexey.makhalov@broadcom.com>,
Broadcom internal kernel review list
<bcm-kernel-feedback-list@broadcom.com>,
Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
"H. Peter Anvin" <hpa@zytor.com>
Subject: [PATCH v3 16/16] x86/paravirt: Use alternatives for MSR access with paravirt
Date: Wed, 18 Feb 2026 09:21:33 +0100 [thread overview]
Message-ID: <20260218082133.400602-17-jgross@suse.com> (raw)
In-Reply-To: <20260218082133.400602-1-jgross@suse.com>
When not running as Xen PV guest, patch in the optimal MSR instructions
via alternative and use direct calls otherwise.
This will especially have positive effects for performance when not
running as a Xen PV guest with paravirtualization enabled, as there
will be no call overhead for MSR access functions any longer.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V3:
- new patch
---
arch/x86/include/asm/paravirt-msr.h | 101 ++++++++++++++++++++++----
arch/x86/include/asm/paravirt_types.h | 1 +
2 files changed, 86 insertions(+), 16 deletions(-)
diff --git a/arch/x86/include/asm/paravirt-msr.h b/arch/x86/include/asm/paravirt-msr.h
index 4ce690b05600..122a7525ae17 100644
--- a/arch/x86/include/asm/paravirt-msr.h
+++ b/arch/x86/include/asm/paravirt-msr.h
@@ -27,33 +27,103 @@ extern struct pv_msr_ops pv_ops_msr;
#define PV_CALLEE_SAVE_REGS_MSR_THUNK(func) \
__PV_CALLEE_SAVE_REGS_THUNK(func, ".text", MSR)
+#define ASM_CLRERR "xor %[err],%[err]\n"
+
+#define PV_RDMSR_VAR(__msr, __val, __type, __func, __err) \
+ asm volatile( \
+ "1:\n" \
+ ALTERNATIVE_2(PARAVIRT_CALL, \
+ RDMSR_AND_SAVE_RESULT ASM_CLRERR, X86_FEATURE_ALWAYS, \
+ ALT_CALL_INSTR, ALT_XEN_CALL) \
+ "2:\n" \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \
+ : [err] "=d" (__err), [val] "=a" (__val), \
+ ASM_CALL_CONSTRAINT \
+ : paravirt_ptr(pv_ops_msr, __func), "c" (__msr) \
+ : "cc")
+
+#define PV_RDMSR_CONST(__msr, __val, __type, __func, __err) \
+ asm volatile( \
+ "1:\n" \
+ ALTERNATIVE_3(PARAVIRT_CALL, \
+ RDMSR_AND_SAVE_RESULT ASM_CLRERR, X86_FEATURE_ALWAYS, \
+ ASM_RDMSR_IMM ASM_CLRERR, X86_FEATURE_MSR_IMM, \
+ ALT_CALL_INSTR, ALT_XEN_CALL) \
+ "2:\n" \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \
+ : [err] "=d" (__err), [val] "=a" (__val), \
+ ASM_CALL_CONSTRAINT \
+ : paravirt_ptr(pv_ops_msr, __func), \
+ "c" (__msr), [msr] "i" (__msr) \
+ : "cc")
+
+#define PV_WRMSR_VAR(__msr, __val, __type, __func, __err) \
+({ \
+ unsigned long rdx = rdx; \
+ asm volatile( \
+ "1:\n" \
+ ALTERNATIVE_3(PARAVIRT_CALL, \
+ "wrmsr;" ASM_CLRERR, X86_FEATURE_ALWAYS, \
+ ASM_WRMSRNS ASM_CLRERR, X86_FEATURE_WRMSRNS, \
+ ALT_CALL_INSTR, ALT_XEN_CALL) \
+ "2:\n" \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \
+ : [err] "=a" (__err), "=d" (rdx), ASM_CALL_CONSTRAINT \
+ : paravirt_ptr(pv_ops_msr, __func), \
+ "0" (__val), "1" ((__val) >> 32), "c" (__msr) \
+ : "memory", "cc"); \
+})
+
+#define PV_WRMSR_CONST(__msr, __val, __type, __func, __err) \
+({ \
+ unsigned long rdx = rdx; \
+ asm volatile( \
+ "1:\n" \
+ ALTERNATIVE_4(PARAVIRT_CALL, \
+ "wrmsr;" ASM_CLRERR, X86_FEATURE_ALWAYS, \
+ ASM_WRMSRNS ASM_CLRERR, X86_FEATURE_WRMSRNS, \
+ ASM_WRMSRNS_IMM ASM_CLRERR, X86_FEATURE_MSR_IMM,\
+ ALT_CALL_INSTR, ALT_XEN_CALL) \
+ "2:\n" \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, __type, %[err]) \
+ : [err] "=a" (__err), "=d" (rdx), ASM_CALL_CONSTRAINT \
+ : paravirt_ptr(pv_ops_msr, __func), \
+ [val] "0" (__val), "1" ((__val) >> 32), \
+ "c" (__msr), [msr] "i" (__msr) \
+ : "memory", "cc"); \
+})
+
static __always_inline u64 read_msr(u32 msr)
{
u64 val;
+ int err;
- asm volatile(PARAVIRT_CALL
- : "=a" (val), ASM_CALL_CONSTRAINT
- : paravirt_ptr(pv_ops_msr, read_msr), "c" (msr)
- : "rdx");
+ if (__builtin_constant_p(msr))
+ PV_RDMSR_CONST(msr, val, EX_TYPE_RDMSR, read_msr, err);
+ else
+ PV_RDMSR_VAR(msr, val, EX_TYPE_RDMSR, read_msr, err);
return val;
}
static __always_inline void write_msr(u32 msr, u64 val)
{
- asm volatile(PARAVIRT_CALL
- : ASM_CALL_CONSTRAINT
- : paravirt_ptr(pv_ops_msr, write_msr), "c" (msr), "a" (val)
- : "memory", "rdx");
+ int err;
+
+ if (__builtin_constant_p(msr))
+ PV_WRMSR_CONST(msr, val, EX_TYPE_WRMSR, write_msr, err);
+ else
+ PV_WRMSR_VAR(msr, val, EX_TYPE_WRMSR, write_msr, err);
}
static __always_inline int read_msr_safe(u32 msr, u64 *val)
{
int err;
- asm volatile(PARAVIRT_CALL
- : [err] "=d" (err), "=a" (*val), ASM_CALL_CONSTRAINT
- : paravirt_ptr(pv_ops_msr, read_msr_safe), "c" (msr));
+ if (__builtin_constant_p(msr))
+ PV_RDMSR_CONST(msr, *val, EX_TYPE_RDMSR_SAFE, read_msr_safe, err);
+ else
+ PV_RDMSR_VAR(msr, *val, EX_TYPE_RDMSR_SAFE, read_msr_safe, err);
return err ? -EIO : 0;
}
@@ -62,11 +132,10 @@ static __always_inline int write_msr_safe(u32 msr, u64 val)
{
int err;
- asm volatile(PARAVIRT_CALL
- : [err] "=a" (err), ASM_CALL_CONSTRAINT
- : paravirt_ptr(pv_ops_msr, write_msr_safe),
- "c" (msr), "a" (val)
- : "memory", "rdx");
+ if (__builtin_constant_p(msr))
+ PV_WRMSR_CONST(msr, val, EX_TYPE_WRMSR_SAFE, write_msr_safe, err);
+ else
+ PV_WRMSR_VAR(msr, val, EX_TYPE_WRMSR_SAFE, write_msr_safe, err);
return err ? -EIO : 0;
}
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 999a5abe54ed..bdaecc54c6ee 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -451,6 +451,7 @@ extern struct paravirt_patch_template pv_ops;
#endif /* __ASSEMBLER__ */
#define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV)
+#define ALT_XEN_CALL ALT_DIRECT_CALL(X86_FEATURE_XENPV)
#ifdef CONFIG_X86_32
/* save and restore all caller-save registers, except return value */
--
2.53.0
next prev parent reply other threads:[~2026-02-18 8:23 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-18 8:21 [PATCH v3 00/16] x86/msr: Inline rdmsr/wrmsr instructions Juergen Gross
2026-02-18 8:21 ` [PATCH v3 06/16] x86/msr: Move MSR trace calls one function level up Juergen Gross
2026-02-18 8:21 ` [PATCH v3 12/16] x86/paravirt: Split off MSR related hooks into new header Juergen Gross
2026-02-18 8:21 ` [PATCH v3 13/16] x86/paravirt: Prepare support of MSR instruction interfaces Juergen Gross
2026-02-18 8:21 ` [PATCH v3 14/16] x86/paravirt: Switch MSR access pv_ops functions to " Juergen Gross
2026-02-18 8:21 ` Juergen Gross [this message]
2026-02-18 13:49 ` [PATCH v3 16/16] x86/paravirt: Use alternatives for MSR access with paravirt kernel test robot
2026-02-18 15:49 ` Juergen Gross
2026-02-18 20:37 ` [PATCH v3 00/16] x86/msr: Inline rdmsr/wrmsr instructions H. Peter Anvin
2026-02-19 6:28 ` Jürgen Groß
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260218082133.400602-17-jgross@suse.com \
--to=jgross@suse.com \
--cc=ajay.kaher@broadcom.com \
--cc=alexey.makhalov@broadcom.com \
--cc=bcm-kernel-feedback-list@broadcom.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=tglx@kernel.org \
--cc=virtualization@lists.linux.dev \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox