From: Juergen Gross <jgross@suse.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
virtualization@lists.linux.dev
Cc: Juergen Gross <jgross@suse.com>,
Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Ajay Kaher <ajay.kaher@broadcom.com>,
Alexey Makhalov <alexey.makhalov@broadcom.com>,
Broadcom internal kernel review list
<bcm-kernel-feedback-list@broadcom.com>
Subject: [PATCH RFC 09/11] x86/msr: Add macros for preparing to switch rdmsr/wrmsr interfaces
Date: Tue, 28 Apr 2026 12:42:03 +0200 [thread overview]
Message-ID: <20260428104205.916924-10-jgross@suse.com> (raw)
In-Reply-To: <20260428104205.916924-1-jgross@suse.com>
In order to prepare switching the rdmsr(), rdmasr_safe(), wrmsr() and
wrmsr_safe() interfaces to 64-bit values instead of 32-bit pairs, add
macros to call different implementations depending on the number of
passed parameters.
This enables to use the same function/macro names as today while
doing the interface switch per component instead of one go.
At the same time switch the rdmsr related interfaces to inline
functions, avoiding to change variables passed as a parameter to be
changed.
The helper macros will be removed when all users of the current
interfaces have been switched to the new ones.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
arch/x86/include/asm/msr.h | 46 +++++++++++++++++++++++++++++----
arch/x86/include/asm/paravirt.h | 6 ++---
2 files changed, 44 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index a5596d268053..4dd181aedb00 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -12,6 +12,7 @@
#include <uapi/asm/msr.h>
#include <asm/shared/msr.h>
+#include <linux/args.h>
#include <linux/types.h>
#include <linux/percpu.h>
@@ -179,14 +180,14 @@ static inline u64 native_read_pmc(int counter)
* pointer indirection), this allows gcc to optimize better
*/
-#define rdmsr(msr, low, high) \
+#define __rdmsr_3(msr, low, high) \
do { \
u64 __val = native_read_msr((msr)); \
(void)((low) = (u32)__val); \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
-static inline void wrmsr(u32 msr, u32 low, u32 high)
+static inline void __wrmsr_3(u32 msr, u32 low, u32 high)
{
native_write_msr(msr, (u64)high << 32 | low);
}
@@ -206,7 +207,7 @@ static inline int wrmsrq_safe(u32 msr, u64 val)
}
/* rdmsr with exception handling */
-#define rdmsr_safe(msr, low, high) \
+#define __rdmsr_safe_3(msr, low, high) \
({ \
u64 __val; \
int __err = native_read_msr_safe((msr), &__val); \
@@ -243,13 +244,48 @@ static __always_inline void wrmsrns(u32 msr, u64 val)
}
/*
- * Dual u32 version of wrmsrq_safe():
+ * Dual u32 versions of wrmsr_safe():
*/
-static inline int wrmsr_safe(u32 msr, u32 low, u32 high)
+static __always_inline int __wrmsr_safe_3(u32 msr, u32 low, u32 high)
{
return wrmsrq_safe(msr, (u64)high << 32 | low);
}
+/*
+ * u64 versions of rdmsr/wrmsr[_safe]():
+ */
+static __always_inline u64 __rdmsr_1(u32 msr)
+{
+ u64 val;
+
+ rdmsrq(msr, val);
+
+ return val;
+}
+
+static __always_inline void __wrmsr_2(u32 msr, u64 val)
+{
+ wrmsrq(msr, val);
+}
+
+static __always_inline int __rdmsr_safe_2(u32 msr, u64 *p)
+{
+ return rdmsrq_safe(msr, p);
+}
+
+static __always_inline int __wrmsr_safe_2(u32 msr, u64 val)
+{
+ return wrmsrq_safe(msr, val);
+}
+
+/*
+ * Macros for selecting u64 or dual u32 versions of rdmsr/wrmsr[_safe]():
+ */
+#define rdmsr(...) CONCATENATE(__rdmsr_, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+#define wrmsr(...) CONCATENATE(__wrmsr_, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+#define rdmsr_safe(...) CONCATENATE(__rdmsr_safe_, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+#define wrmsr_safe(...) CONCATENATE(__wrmsr_safe_, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
struct msr __percpu *msrs_alloc(void);
void msrs_free(struct msr __percpu *msrs);
int msr_set_bit(u32 msr, u8 bit);
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index cdfe4007443e..359fbc09f132 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -150,14 +150,14 @@ static inline int paravirt_write_msr_safe(u32 msr, u64 val)
return PVOP_CALL2(int, pv_ops, cpu.write_msr_safe, msr, val);
}
-#define rdmsr(msr, val1, val2) \
+#define __rdmsr_3(msr, val1, val2) \
do { \
u64 _l = paravirt_read_msr(msr); \
val1 = (u32)_l; \
val2 = _l >> 32; \
} while (0)
-static __always_inline void wrmsr(u32 msr, u32 low, u32 high)
+static __always_inline void __wrmsr_3(u32 msr, u32 low, u32 high)
{
paravirt_write_msr(msr, (u64)high << 32 | low);
}
@@ -178,7 +178,7 @@ static inline int wrmsrq_safe(u32 msr, u64 val)
}
/* rdmsr with exception handling */
-#define rdmsr_safe(msr, a, b) \
+#define __rdmsr_safe_3(msr, a, b) \
({ \
u64 _l; \
int _err = paravirt_read_msr_safe((msr), &_l); \
--
2.53.0
next prev parent reply other threads:[~2026-04-28 10:43 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-28 10:41 [PATCH RFC 00/11] x86/msr: Reduce MSR access interfaces Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 01/11] x86/msr: Switch rdmsr_on_cpu() to return a 64-bit quantity Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 02/11] x86/msr: Switch all callers of rdmsrq_on_cpu() to use rdmsr_on_cpu() Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 03/11] x86/msr: Switch wrmsr_on_cpu() to use a 64-bit quantity Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 04/11] x86/msr: Switch all callers of wrmsrq_on_cpu() to use wrmsr_on_cpu() Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 05/11] x86/msr: Switch rdmsr_safe_on_cpu() to return a 64-bit quantity Juergen Gross
2026-04-28 10:42 ` [PATCH RFC 06/11] x86/msr: Switch all callers of rdmsrq_safe_on_cpu() to use rdmsr_safe_on_cpu() Juergen Gross
2026-04-28 10:42 ` [PATCH RFC 07/11] x86/msr: Switch wrmsr_safe_on_cpu() to use a 64-bit quantity Juergen Gross
2026-04-28 10:42 ` [PATCH RFC 08/11] x86/msr: Switch all callers of wrmsrq_safe_on_cpu() to use wrmsr_safe_on_cpu() Juergen Gross
2026-04-28 10:42 ` Juergen Gross [this message]
2026-04-28 10:42 ` [PATCH RFC 10/11] x86/events: Switch core parts to use 64-bit rdmsr/wrmsr() variants Juergen Gross
2026-04-28 10:42 ` [PATCH RFC 11/11] x86/cpu/mce: Switch code " Juergen Gross
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260428104205.916924-10-jgross@suse.com \
--to=jgross@suse.com \
--cc=ajay.kaher@broadcom.com \
--cc=alexey.makhalov@broadcom.com \
--cc=bcm-kernel-feedback-list@broadcom.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=tglx@kernel.org \
--cc=virtualization@lists.linux.dev \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox