* [PATCH RFC 01/11] x86/msr: Switch rdmsr_on_cpu() to return a 64-bit quantity
2026-04-28 10:41 [PATCH RFC 00/11] x86/msr: Reduce MSR access interfaces Juergen Gross
@ 2026-04-28 10:41 ` Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 02/11] x86/msr: Switch all callers of rdmsrq_on_cpu() to use rdmsr_on_cpu() Juergen Gross
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2026-04-28 10:41 UTC (permalink / raw)
To: linux-kernel, x86, linux-edac, linux-pm, linux-hwmon
Cc: Juergen Gross, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen, H. Peter Anvin, Tony Luck, Rafael J. Wysocki,
Viresh Kumar, Guenter Roeck, Daniel Lezcano, Zhang Rui,
Lukasz Luba
In order to prepare retiring rdmsrq_on_cpu() switch rdmsr_on_cpu() to
have the same interface as rdmsrq_on_cpu().
Switch all rdmsr_on_cpu() callers to use the new interface.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
arch/x86/include/asm/msr.h | 8 ++---
arch/x86/kernel/cpu/mce/amd.c | 6 ++--
arch/x86/kernel/cpu/mce/inject.c | 8 ++---
arch/x86/lib/msr-smp.c | 5 ++-
drivers/cpufreq/amd_freq_sensitivity.c | 4 +--
drivers/cpufreq/p4-clockmod.c | 32 ++++++++++----------
drivers/cpufreq/speedstep-centrino.c | 27 +++++++++--------
drivers/hwmon/coretemp.c | 12 ++++----
drivers/thermal/intel/x86_pkg_temp_thermal.c | 22 ++++++++------
9 files changed, 63 insertions(+), 61 deletions(-)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9c2ea29e12a9..fcdaeddf4337 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -256,7 +256,7 @@ int msr_set_bit(u32 msr, u8 bit);
int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
@@ -269,9 +269,9 @@ int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */
-static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
- rdmsr(msr_no, *l, *h);
+ rdmsrq(msr_no, *q);
return 0;
}
static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
@@ -292,7 +292,7 @@ static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr __percpu *msrs)
{
- rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h));
+ rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->q));
}
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr __percpu *msrs)
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 6605a0224659..580e90e74e9e 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -969,13 +969,13 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
- u32 lo, hi;
+ struct msr val;
/* CPU might be offline by now */
- if (rdmsr_on_cpu(b->cpu, b->address, &lo, &hi))
+ if (rdmsr_on_cpu(b->cpu, b->address, &val.q))
return -ENODEV;
- return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
+ return sprintf(buf, "%u\n", ((val.h & THRESHOLD_MAX) -
(THRESHOLD_MAX - b->threshold_limit)));
}
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index d02c4f556cd0..fa13a8a4946b 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -316,18 +316,18 @@ static struct notifier_block inject_nb = {
*/
static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
{
- u32 l, h;
+ struct msr val;
int err;
- err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
+ err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &val.q);
if (err) {
pr_err("%s: error reading HWCR\n", __func__);
return err;
}
- enable ? (l |= BIT(18)) : (l &= ~BIT(18));
+ enable ? (val.l |= BIT(18)) : (val.l &= ~BIT(18));
- err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
+ err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, val.l, val.h);
if (err)
pr_err("%s: error writing HWCR\n", __func__);
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index b8f63419e6ae..6e04aabda863 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -31,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
wrmsr(rv->msr_no, reg->l, reg->h);
}
-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
int err;
struct msr_info rv;
@@ -40,8 +40,7 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
rv.msr_no = msr_no;
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
- *l = rv.reg.l;
- *h = rv.reg.h;
+ *q = rv.reg.q;
return err;
}
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 13fed4b9e02b..63896478dcab 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -52,9 +52,9 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
- &actual.l, &actual.h);
+ &actual.q);
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
- &reference.l, &reference.h);
+ &reference.q);
actual.h &= 0x00ffffff;
reference.h &= 0x00ffffff;
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 69c19233fcd4..393c4a5d2021 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -51,24 +51,24 @@ static unsigned int cpufreq_p4_get(unsigned int cpu);
static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
{
- u32 l, h;
+ struct msr val;
if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
return -EINVAL;
- rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &val.q);
- if (l & 0x01)
+ if (val.l & 0x01)
pr_debug("CPU#%d currently thermal throttled\n", cpu);
if (has_N44_O17_errata[cpu] &&
(newstate == DC_25PT || newstate == DC_DFLT))
newstate = DC_38PT;
- rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &val.q);
if (newstate == DC_DISABLE) {
pr_debug("CPU#%d disabling modulation\n", cpu);
- wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, val.l & ~(1<<4), val.h);
} else {
pr_debug("CPU#%d setting duty cycle to %d%%\n",
cpu, ((125 * newstate) / 10));
@@ -77,9 +77,9 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
* bits 3-1 : duty cycle
* bit 0 : reserved
*/
- l = (l & ~14);
- l = l | (1<<4) | ((newstate & 0x7)<<1);
- wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
+ val.l = (val.l & ~14);
+ val.l = val.l | (1<<4) | ((newstate & 0x7)<<1);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, val.l, val.h);
}
return 0;
@@ -205,18 +205,18 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
- u32 l, h;
+ struct msr val;
- rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &val.q);
- if (l & 0x10) {
- l = l >> 1;
- l &= 0x7;
+ if (val.l & 0x10) {
+ val.l = val.l >> 1;
+ val.l &= 0x7;
} else
- l = DC_DISABLE;
+ val.l = DC_DISABLE;
- if (l != DC_DISABLE)
- return stock_freq * l / 8;
+ if (val.l != DC_DISABLE)
+ return stock_freq * val.l / 8;
return stock_freq;
}
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3e6e85a92212..b74c85128377 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -322,11 +322,11 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
/* Return the current CPU frequency in kHz */
static unsigned int get_cur_freq(unsigned int cpu)
{
- unsigned l, h;
+ struct msr val;
unsigned clock_freq;
- rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
- clock_freq = extract_clock(l, cpu, 0);
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &val.q);
+ clock_freq = extract_clock(val.l, cpu, 0);
if (unlikely(clock_freq == 0)) {
/*
@@ -335,8 +335,8 @@ static unsigned int get_cur_freq(unsigned int cpu)
* P-state transition (like TM2). Get the last freq set
* in PERF_CTL.
*/
- rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
- clock_freq = extract_clock(l, cpu, 1);
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &val.q);
+ clock_freq = extract_clock(val.l, cpu, 1);
}
return clock_freq;
}
@@ -417,7 +417,8 @@ static void centrino_cpu_exit(struct cpufreq_policy *policy)
*/
static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
+ unsigned int msr, cpu = policy->cpu;
+ struct msr oldmsr = { .q = 0 };
int retval = 0;
unsigned int j, first_cpu;
struct cpufreq_frequency_table *op_points;
@@ -459,22 +460,22 @@ static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
msr = op_points->driver_data;
if (first_cpu) {
- rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
- if (msr == (oldmsr & 0xffff)) {
+ rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr.q);
+ if (msr == (oldmsr.l & 0xffff)) {
pr_debug("no change needed - msr was and needs "
- "to be %x\n", oldmsr);
+ "to be %x\n", oldmsr.l);
retval = 0;
goto out;
}
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
- oldmsr &= ~0xffff;
+ oldmsr.l &= ~0xffff;
msr &= 0xffff;
- oldmsr |= msr;
+ oldmsr.l |= msr;
}
- wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
+ wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr.l, oldmsr.h);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
@@ -490,7 +491,7 @@ static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
*/
for_each_cpu(j, covered_cpus)
- wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
+ wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr.l, oldmsr.h);
}
retval = 0;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6a0d94711ead..fa02960ffff5 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -356,15 +356,15 @@ static ssize_t show_label(struct device *dev,
static ssize_t show_crit_alarm(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- u32 eax, edx;
+ struct msr val;
struct temp_data *tdata = container_of(devattr, struct temp_data,
sd_attrs[ATTR_CRIT_ALARM]);
mutex_lock(&tdata->update_lock);
- rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+ rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &val.q);
mutex_unlock(&tdata->update_lock);
- return sprintf(buf, "%d\n", (eax >> 5) & 1);
+ return sprintf(buf, "%d\n", (val.l >> 5) & 1);
}
static ssize_t show_tjmax(struct device *dev,
@@ -398,7 +398,7 @@ static ssize_t show_ttarget(struct device *dev,
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- u32 eax, edx;
+ struct msr val;
struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TEMP]);
int tjmax;
@@ -407,14 +407,14 @@ static ssize_t show_temp(struct device *dev,
tjmax = get_tjmax(tdata, dev);
/* Check whether the time interval has elapsed */
if (time_after(jiffies, tdata->last_updated + HZ)) {
- rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+ rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &val.q);
/*
* Ignore the valid bit. In all observed cases the register
* value is either low or zero if the valid bit is 0.
* Return it instead of reporting an error which doesn't
* really help at all.
*/
- tdata->temp = tjmax - ((eax >> 16) & 0xff) * 1000;
+ tdata->temp = tjmax - ((val.l >> 16) & 0xff) * 1000;
tdata->last_updated = jiffies;
}
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 540109761f0a..fc7dbba4f9ca 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -125,8 +125,9 @@ sys_set_trip_temp(struct thermal_zone_device *tzd,
{
struct zone_device *zonedev = thermal_zone_device_priv(tzd);
unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv);
- u32 l, h, mask, shift, intr;
+ u32 mask, shift, intr;
int tj_max, val, ret;
+ struct msr v;
if (temp == THERMAL_TEMP_INVALID)
temp = 0;
@@ -142,7 +143,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd,
return -EINVAL;
ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
- &l, &h);
+ &v.q);
if (ret < 0)
return ret;
@@ -155,20 +156,20 @@ sys_set_trip_temp(struct thermal_zone_device *tzd,
shift = THERM_SHIFT_THRESHOLD0;
intr = THERM_INT_THRESHOLD0_ENABLE;
}
- l &= ~mask;
+ v.l &= ~mask;
/*
* When users space sets a trip temperature == 0, which is indication
* that, it is no longer interested in receiving notifications.
*/
if (!temp) {
- l &= ~intr;
+ v.l &= ~intr;
} else {
- l |= val << shift;
- l |= intr;
+ v.l |= val << shift;
+ v.l |= intr;
}
return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
- l, h);
+ v.l, v.h);
}
/* Thermal zone callback registry */
@@ -277,7 +278,8 @@ static int pkg_temp_thermal_trips_init(int cpu, int tj_max,
struct thermal_trip *trips, int num_trips)
{
unsigned long thres_reg_value;
- u32 mask, shift, eax, edx;
+ u32 mask, shift;
+ struct msr val;
int ret, i;
for (i = 0; i < num_trips; i++) {
@@ -291,11 +293,11 @@ static int pkg_temp_thermal_trips_init(int cpu, int tj_max,
}
ret = rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
- &eax, &edx);
+ &val.q);
if (ret < 0)
return ret;
- thres_reg_value = (eax & mask) >> shift;
+ thres_reg_value = (val.l & mask) >> shift;
trips[i].temperature = thres_reg_value ?
tj_max - thres_reg_value * 1000 : THERMAL_TEMP_INVALID;
--
2.53.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH RFC 02/11] x86/msr: Switch all callers of rdmsrq_on_cpu() to use rdmsr_on_cpu()
2026-04-28 10:41 [PATCH RFC 00/11] x86/msr: Reduce MSR access interfaces Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 01/11] x86/msr: Switch rdmsr_on_cpu() to return a 64-bit quantity Juergen Gross
@ 2026-04-28 10:41 ` Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 03/11] x86/msr: Switch wrmsr_on_cpu() to use a 64-bit quantity Juergen Gross
2026-04-28 10:42 ` [PATCH RFC 11/11] x86/cpu/mce: Switch code to use 64-bit rdmsr/wrmsr() variants Juergen Gross
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2026-04-28 10:41 UTC (permalink / raw)
To: linux-kernel, x86, linux-perf-users, linux-edac, linux-pm,
platform-driver-x86
Cc: Juergen Gross, Peter Zijlstra, Ingo Molnar,
Arnaldo Carvalho de Melo, Namhyung Kim, Mark Rutland,
Alexander Shishkin, Jiri Olsa, Ian Rogers, Adrian Hunter,
James Clark, Thomas Gleixner, Borislav Petkov, Dave Hansen,
H. Peter Anvin, Tony Luck, Rafael J. Wysocki, Viresh Kumar,
Huang Rui, Mario Limonciello, Perry Yuan, K Prateek Nayak,
Srinivas Pandruvada, Len Brown, Hans de Goede, Ilpo Järvinen
Now that rdmsr_on_cpu() has the same interface as rdmsrq_on_cpu(), the
callers of rdmsrq_on_cpu() can be switched to rdmsr_on_cpu() and
rdmsrq_on_cpu() can be removed.
At the same time switch the only user of rdmsrl_on_cpu() to
rdmsr_on_cpu() and drop rdmsrl_on_cpu(), too.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
arch/x86/events/intel/uncore_snbep.c | 2 +-
arch/x86/include/asm/msr.h | 7 ------
arch/x86/kernel/cpu/intel_epb.c | 4 ++--
arch/x86/kernel/cpu/mce/inject.c | 4 ++--
arch/x86/kernel/cpu/microcode/intel.c | 2 +-
arch/x86/lib/msr-smp.c | 15 -------------
drivers/cpufreq/acpi-cpufreq.c | 4 ++--
drivers/cpufreq/amd-pstate.c | 8 +++----
drivers/cpufreq/intel_pstate.c | 22 +++++++++----------
.../intel/uncore-frequency/uncore-frequency.c | 6 ++---
10 files changed, 26 insertions(+), 48 deletions(-)
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 215d33e260ed..fee94698b611 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3695,7 +3695,7 @@ static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
{
u64 msr_value;
- if (rdmsrq_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
+ if (rdmsr_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
return -ENXIO;
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index fcdaeddf4337..8c96fc5c6169 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -258,7 +258,6 @@ int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
@@ -279,11 +278,6 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
wrmsr(msr_no, l, h);
return 0;
}
-static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-{
- rdmsrq(msr_no, *q);
- return 0;
-}
static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
wrmsrq(msr_no, q);
@@ -329,7 +323,6 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
/* Compatibility wrappers: */
#define rdmsrl(msr, val) rdmsrq(msr, val)
#define wrmsrl(msr, val) wrmsrq(msr, val)
-#define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q)
#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
index 2c56f8730f59..cb5a3c299f26 100644
--- a/arch/x86/kernel/cpu/intel_epb.c
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -139,7 +139,7 @@ static ssize_t energy_perf_bias_show(struct device *dev,
u64 epb;
int ret;
- ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsr_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret < 0)
return ret;
@@ -161,7 +161,7 @@ static ssize_t energy_perf_bias_store(struct device *dev,
else if (kstrtou64(buf, 0, &val) || val > MAX_EPB)
return -EINVAL;
- ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsr_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret < 0)
return ret;
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index fa13a8a4946b..78649651c987 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -590,7 +590,7 @@ static int inj_bank_set(void *data, u64 val)
u64 cap;
/* Get bank count on target CPU so we can handle non-uniform values. */
- rdmsrq_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
+ rdmsr_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
n_banks = cap & MCG_BANKCNT_MASK;
if (val >= n_banks) {
@@ -614,7 +614,7 @@ static int inj_bank_set(void *data, u64 val)
if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
u64 ipid;
- if (rdmsrq_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) {
+ if (rdmsr_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) {
pr_err("Error reading IPID on CPU%d\n", m->extcpu);
return -EINVAL;
}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 37ac4afe0972..b05e751ffcca 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -660,7 +660,7 @@ static void stage_microcode(void)
pkg_id = topology_logical_package_id(cpu);
- err = rdmsrq_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &mmio_pa);
+ err = rdmsr_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &mmio_pa);
if (WARN_ON_ONCE(err))
return;
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 6e04aabda863..7c96f003bfe0 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -46,21 +46,6 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
}
EXPORT_SYMBOL(rdmsr_on_cpu);
-int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
- *q = rv.reg.q;
-
- return err;
-}
-EXPORT_SYMBOL(rdmsrq_on_cpu);
-
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
int err;
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 21639d9ac753..43bf1c21c4ca 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -79,11 +79,11 @@ static bool boost_state(unsigned int cpu)
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
+ rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 453084c67327..a6fc22f770c3 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -208,7 +208,7 @@ static u8 msr_get_epp(struct amd_cpudata *cpudata)
u64 value;
int ret;
- ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ ret = rdmsr_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret < 0) {
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
return ret;
@@ -382,7 +382,7 @@ static int amd_pstate_init_floor_perf(struct cpufreq_policy *policy)
if (!cpu_feature_enabled(X86_FEATURE_CPPC_PERF_PRIO))
return 0;
- ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ2, &value);
+ ret = rdmsr_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ2, &value);
if (ret) {
pr_err("failed to read CPPC REQ2 value. Error (%d)\n", ret);
return ret;
@@ -480,7 +480,7 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
+ ret = rdmsr_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
if (ret)
return ret;
@@ -881,7 +881,7 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ ret = rdmsr_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
ret = -EIO;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1292da53e5fc..e5b30a53c49a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -632,8 +632,8 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
* MSR_HWP_REQUEST, so need to read and get EPP.
*/
if (!hwp_req_data) {
- epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
- &hwp_req_data);
+ epp = rdmsr_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ &hwp_req_data);
if (epp)
return epp;
}
@@ -886,7 +886,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
if (ratio <= 0) {
u64 cap;
- rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsr_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
@@ -1187,7 +1187,7 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
- rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsr_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(cpu->hwp_cap_cached, cap);
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
@@ -1269,7 +1269,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
- rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ rdmsr_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
@@ -2156,7 +2156,7 @@ static int core_get_min_pstate(int cpu)
{
u64 value;
- rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsr_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 40) & 0xFF;
}
@@ -2164,7 +2164,7 @@ static int core_get_max_pstate_physical(int cpu)
{
u64 value;
- rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsr_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 8) & 0xFF;
}
@@ -2209,7 +2209,7 @@ static int core_get_max_pstate(int cpu)
int tdp_ratio;
int err;
- rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+ rdmsr_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
@@ -2241,7 +2241,7 @@ static int core_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsr_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (value) & 255;
if (ret <= nont)
@@ -2264,7 +2264,7 @@ static int knl_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsr_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -3318,7 +3318,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
intel_pstate_get_hwp_cap(cpu);
- rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ rdmsr_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index 667f2c8b9594..b9878a4d391b 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -52,7 +52,7 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu
if (data->control_cpu < 0)
return -ENXIO;
- ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ ret = rdmsr_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
if (ret)
return ret;
@@ -77,7 +77,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
if (data->control_cpu < 0)
return -ENXIO;
- ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ ret = rdmsr_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
if (ret)
return ret;
@@ -106,7 +106,7 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
if (data->control_cpu < 0)
return -ENXIO;
- ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
+ ret = rdmsr_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
if (ret)
return ret;
--
2.53.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH RFC 03/11] x86/msr: Switch wrmsr_on_cpu() to use a 64-bit quantity
2026-04-28 10:41 [PATCH RFC 00/11] x86/msr: Reduce MSR access interfaces Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 01/11] x86/msr: Switch rdmsr_on_cpu() to return a 64-bit quantity Juergen Gross
2026-04-28 10:41 ` [PATCH RFC 02/11] x86/msr: Switch all callers of rdmsrq_on_cpu() to use rdmsr_on_cpu() Juergen Gross
@ 2026-04-28 10:41 ` Juergen Gross
2026-04-28 10:42 ` [PATCH RFC 11/11] x86/cpu/mce: Switch code to use 64-bit rdmsr/wrmsr() variants Juergen Gross
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2026-04-28 10:41 UTC (permalink / raw)
To: linux-kernel, x86, linux-perf-users, linux-edac, linux-pm
Cc: Juergen Gross, Peter Zijlstra, Ingo Molnar,
Arnaldo Carvalho de Melo, Namhyung Kim, Mark Rutland,
Alexander Shishkin, Jiri Olsa, Ian Rogers, Adrian Hunter,
James Clark, Thomas Gleixner, Borislav Petkov, Dave Hansen,
H. Peter Anvin, Tony Luck, Rafael J. Wysocki, Viresh Kumar,
Daniel Lezcano, Zhang Rui, Lukasz Luba
In order to prepare retiring wrmsrq_on_cpu() switch wrmsr_on_cpu() to
have the same interface as wrmsrq_on_cpu().
Switch all wrmsr_on_cpu() callers to use the new interface.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
arch/x86/events/intel/ds.c | 11 ++++-------
arch/x86/include/asm/msr.h | 8 ++++----
arch/x86/kernel/cpu/mce/inject.c | 2 +-
arch/x86/lib/msr-smp.c | 5 ++---
drivers/cpufreq/p4-clockmod.c | 4 ++--
drivers/cpufreq/speedstep-centrino.c | 4 ++--
drivers/thermal/intel/x86_pkg_temp_thermal.c | 2 +-
7 files changed, 16 insertions(+), 20 deletions(-)
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 7f0d515c07c5..06d6d06c7a75 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -780,9 +780,7 @@ void init_debug_store_on_cpu(int cpu)
if (!ds)
return;
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
- (u32)((u64)(unsigned long)ds),
- (u32)((u64)(unsigned long)ds >> 32));
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, (u64)(unsigned long)ds);
}
void fini_debug_store_on_cpu(int cpu)
@@ -790,7 +788,7 @@ void fini_debug_store_on_cpu(int cpu)
if (!per_cpu(cpu_hw_events, cpu).ds)
return;
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0);
}
static DEFINE_PER_CPU(void *, insn_buffer);
@@ -1095,8 +1093,7 @@ void init_arch_pebs_on_cpu(int cpu)
* contiguous physical buffer (__alloc_pages_node() with order)
*/
arch_pebs_base = virt_to_phys(cpuc->pebs_vaddr) | PEBS_BUFFER_SHIFT;
- wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, (u32)arch_pebs_base,
- (u32)(arch_pebs_base >> 32));
+ wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, arch_pebs_base);
x86_pmu.pebs_active = 1;
}
@@ -1105,7 +1102,7 @@ inline void fini_arch_pebs_on_cpu(int cpu)
if (!x86_pmu.arch_pebs)
return;
- wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, 0, 0);
+ wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, 0);
}
/*
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 8c96fc5c6169..a004440b4c0a 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -257,7 +257,7 @@ int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
@@ -273,9 +273,9 @@ static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
rdmsrq(msr_no, *q);
return 0;
}
-static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
- wrmsr(msr_no, l, h);
+ wrmsrq(msr_no, q);
return 0;
}
static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
@@ -291,7 +291,7 @@ static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr __percpu *msrs)
{
- wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h));
+ wrmsrq_on_cpu(0, msr_no, raw_cpu_read(msrs->q));
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h)
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 78649651c987..2d75098211b3 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -327,7 +327,7 @@ static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
enable ? (val.l |= BIT(18)) : (val.l &= ~BIT(18));
- err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, val.l, val.h);
+ err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, val.q);
if (err)
pr_err("%s: error writing HWCR\n", __func__);
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 7c96f003bfe0..0b4f3c4e4f82 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -46,7 +46,7 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
}
EXPORT_SYMBOL(rdmsr_on_cpu);
-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
int err;
struct msr_info rv;
@@ -54,8 +54,7 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
memset(&rv, 0, sizeof(rv));
rv.msr_no = msr_no;
- rv.reg.l = l;
- rv.reg.h = h;
+ rv.reg.q = q;
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
return err;
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 393c4a5d2021..409c0210e48a 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -68,7 +68,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &val.q);
if (newstate == DC_DISABLE) {
pr_debug("CPU#%d disabling modulation\n", cpu);
- wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, val.l & ~(1<<4), val.h);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, val.q & ~(1ULL << 4));
} else {
pr_debug("CPU#%d setting duty cycle to %d%%\n",
cpu, ((125 * newstate) / 10));
@@ -79,7 +79,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
*/
val.l = (val.l & ~14);
val.l = val.l | (1<<4) | ((newstate & 0x7)<<1);
- wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, val.l, val.h);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, val.q);
}
return 0;
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index b74c85128377..121cddb1430f 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -475,7 +475,7 @@ static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
oldmsr.l |= msr;
}
- wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr.l, oldmsr.h);
+ wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr.q);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
@@ -491,7 +491,7 @@ static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
*/
for_each_cpu(j, covered_cpus)
- wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr.l, oldmsr.h);
+ wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr.q);
}
retval = 0;
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index fc7dbba4f9ca..e52d35015486 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -169,7 +169,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd,
}
return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
- v.l, v.h);
+ v.q);
}
/* Thermal zone callback registry */
--
2.53.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH RFC 11/11] x86/cpu/mce: Switch code to use 64-bit rdmsr/wrmsr() variants
2026-04-28 10:41 [PATCH RFC 00/11] x86/msr: Reduce MSR access interfaces Juergen Gross
` (2 preceding siblings ...)
2026-04-28 10:41 ` [PATCH RFC 03/11] x86/msr: Switch wrmsr_on_cpu() to use a 64-bit quantity Juergen Gross
@ 2026-04-28 10:42 ` Juergen Gross
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2026-04-28 10:42 UTC (permalink / raw)
To: linux-kernel, x86, linux-edac
Cc: Juergen Gross, Tony Luck, Borislav Petkov, Thomas Gleixner,
Ingo Molnar, Dave Hansen, H. Peter Anvin
Switch the x86 MCE code to use the new 64-bit variants of rdmsr(),
rdmsr_safe(), wrmsr() and wrmsr_safe().
Signed-off-by: Juergen Gross <jgross@suse.com>
---
arch/x86/kernel/cpu/mce/amd.c | 95 ++++++++++++++++---------------
arch/x86/kernel/cpu/mce/core.c | 18 +++---
arch/x86/kernel/cpu/mce/inject.c | 28 ++++-----
arch/x86/kernel/cpu/mce/intel.c | 32 +++++------
arch/x86/kernel/cpu/mce/p5.c | 16 +++---
arch/x86/kernel/cpu/mce/winchip.c | 10 ++--
6 files changed, 100 insertions(+), 99 deletions(-)
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 580e90e74e9e..0a462e5952a4 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -280,11 +280,11 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
u8 *bank_counts = this_cpu_ptr(smca_bank_counts);
const struct smca_hwid *s_hwid;
unsigned int i, hwid_mcatype;
- u32 high, low;
+ struct msr val;
u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);
/* Set appropriate bits in MCA_CONFIG */
- if (!rdmsr_safe(smca_config, &low, &high)) {
+ if (!rdmsr_safe(smca_config, &val.q)) {
/*
* OS is required to set the MCAX bit to acknowledge that it is
* now using the new MSR ranges and new registers under each
@@ -294,7 +294,7 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
*
* MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
*/
- high |= BIT(0);
+ val.h |= BIT(0);
/*
* SMCA sets the Deferred Error Interrupt type per bank.
@@ -307,9 +307,9 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
* APIC based interrupt. First, check that no interrupt has been
* set.
*/
- if ((low & BIT(5)) && !((high >> 5) & 0x3) && data->dfr_intr_en) {
+ if ((val.l & BIT(5)) && !((val.h >> 5) & 0x3) && data->dfr_intr_en) {
__set_bit(bank, data->dfr_intr_banks);
- high |= BIT(5);
+ val.h |= BIT(5);
}
/*
@@ -324,33 +324,33 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
* The OS should set this to inform the platform that the OS is ready
* to handle the MCA Thresholding interrupt.
*/
- if ((low & BIT(10)) && data->thr_intr_en) {
+ if ((val.l & BIT(10)) && data->thr_intr_en) {
__set_bit(bank, data->thr_intr_banks);
- high |= BIT(8);
+ val.h |= BIT(8);
}
- this_cpu_ptr(mce_banks_array)[bank].lsb_in_status = !!(low & BIT(8));
+ this_cpu_ptr(mce_banks_array)[bank].lsb_in_status = !!(val.l & BIT(8));
- if (low & MCI_CONFIG_PADDRV)
+ if (val.l & MCI_CONFIG_PADDRV)
this_cpu_ptr(smca_banks)[bank].paddrv = 1;
- wrmsr(smca_config, low, high);
+ wrmsr(smca_config, val.q);
}
- if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
+ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &val.q)) {
pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
return;
}
- hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
- (high & MCI_IPID_MCATYPE) >> 16);
+ hwid_mcatype = HWID_MCATYPE(val.h & MCI_IPID_HWID,
+ (val.h & MCI_IPID_MCATYPE) >> 16);
for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
s_hwid = &smca_hwid_mcatypes[i];
if (hwid_mcatype == s_hwid->hwid_mcatype) {
this_cpu_ptr(smca_banks)[bank].hwid = s_hwid;
- this_cpu_ptr(smca_banks)[bank].id = low;
+ this_cpu_ptr(smca_banks)[bank].id = val.l;
this_cpu_ptr(smca_banks)[bank].sysfs_id = bank_counts[s_hwid->bank_type]++;
break;
}
@@ -432,50 +432,50 @@ static bool lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
static void threshold_restart_block(void *_tr)
{
struct thresh_restart *tr = _tr;
- u32 hi, lo;
+ struct msr val;
/* sysfs write might race against an offline operation */
if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off)
return;
- rdmsr(tr->b->address, lo, hi);
+ val.q = rdmsr(tr->b->address);
/*
* Reset error count and overflow bit.
* This is done during init or after handling an interrupt.
*/
- if (hi & MASK_OVERFLOW_HI || tr->set_lvt_off) {
- hi &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI);
- hi |= THRESHOLD_MAX - tr->b->threshold_limit;
+ if (val.h & MASK_OVERFLOW_HI || tr->set_lvt_off) {
+ val.h &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI);
+ val.h |= THRESHOLD_MAX - tr->b->threshold_limit;
} else if (tr->old_limit) { /* change limit w/o reset */
- int new_count = (hi & THRESHOLD_MAX) +
+ int new_count = (val.h & THRESHOLD_MAX) +
(tr->old_limit - tr->b->threshold_limit);
- hi = (hi & ~MASK_ERR_COUNT_HI) |
+ val.h = (val.h & ~MASK_ERR_COUNT_HI) |
(new_count & THRESHOLD_MAX);
}
/* clear IntType */
- hi &= ~MASK_INT_TYPE_HI;
+ val.h &= ~MASK_INT_TYPE_HI;
if (!tr->b->interrupt_capable)
goto done;
if (tr->set_lvt_off) {
- if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
+ if (lvt_off_valid(tr->b, tr->lvt_off, val.l, val.h)) {
/* set new lvt offset */
- hi &= ~MASK_LVTOFF_HI;
- hi |= tr->lvt_off << 20;
+ val.h &= ~MASK_LVTOFF_HI;
+ val.h |= tr->lvt_off << 20;
}
}
if (tr->b->interrupt_enable)
- hi |= INT_TYPE_APIC;
+ val.h |= INT_TYPE_APIC;
done:
- hi |= MASK_COUNT_EN_HI;
- wrmsr(tr->b->address, lo, hi);
+ val.h |= MASK_COUNT_EN_HI;
+ wrmsr(tr->b->address, val.q);
}
static void threshold_restart_bank(unsigned int bank, bool intr_en)
@@ -658,12 +658,12 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
return;
}
- rdmsrq(MSR_K7_HWCR, hwcr);
+ hwcr = rdmsr(MSR_K7_HWCR);
/* McStatusWrEn has to be set */
need_toggle = !(hwcr & BIT(18));
if (need_toggle)
- wrmsrq(MSR_K7_HWCR, hwcr | BIT(18));
+ wrmsr(MSR_K7_HWCR, hwcr | BIT(18));
/* Clear CntP bit safely */
for (i = 0; i < num_msrs; i++)
@@ -671,7 +671,7 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
/* restore old settings */
if (need_toggle)
- wrmsrq(MSR_K7_HWCR, hwcr);
+ wrmsr(MSR_K7_HWCR, hwcr);
}
static void amd_apply_cpu_quirks(struct cpuinfo_x86 *c)
@@ -710,7 +710,7 @@ static void smca_enable_interrupt_vectors(void)
if (!mce_flags.smca || !mce_flags.succor)
return;
- if (rdmsrq_safe(MSR_CU_DEF_ERR, &mca_intr_cfg))
+ if (rdmsr_safe(MSR_CU_DEF_ERR, &mca_intr_cfg))
return;
offset = (mca_intr_cfg & SMCA_THR_LVT_OFF) >> 12;
@@ -726,7 +726,8 @@ static void smca_enable_interrupt_vectors(void)
void mce_amd_feature_init(struct cpuinfo_x86 *c)
{
unsigned int bank, block, cpu = smp_processor_id();
- u32 low = 0, high = 0, address = 0;
+ struct msr val = { .q = 0 };
+ u32 address = 0;
int offset = -1;
amd_apply_cpu_quirks(c);
@@ -746,21 +747,21 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
disable_err_thresholding(c, bank);
for (block = 0; block < NR_BLOCKS; ++block) {
- address = get_block_address(address, low, high, bank, block, cpu);
+ address = get_block_address(address, val.l, val.h, bank, block, cpu);
if (!address)
break;
- if (rdmsr_safe(address, &low, &high))
+ if (rdmsr_safe(address, &val.q))
break;
- if (!(high & MASK_VALID_HI))
+ if (!(val.h & MASK_VALID_HI))
continue;
- if (!(high & MASK_CNTP_HI) ||
- (high & MASK_LOCKED_HI))
+ if (!(val.h & MASK_CNTP_HI) ||
+ (val.h & MASK_LOCKED_HI))
continue;
- offset = prepare_threshold_block(bank, block, address, offset, high);
+ offset = prepare_threshold_block(bank, block, address, offset, val.h);
}
}
}
@@ -1083,24 +1084,24 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
u32 address)
{
struct threshold_block *b = NULL;
- u32 low, high;
+ struct msr val;
int err;
if ((bank >= this_cpu_read(mce_num_banks)) || (block >= NR_BLOCKS))
return 0;
- if (rdmsr_safe(address, &low, &high))
+ if (rdmsr_safe(address, &val.q))
return 0;
- if (!(high & MASK_VALID_HI)) {
+ if (!(val.h & MASK_VALID_HI)) {
if (block)
goto recurse;
else
return 0;
}
- if (!(high & MASK_CNTP_HI) ||
- (high & MASK_LOCKED_HI))
+ if (!(val.h & MASK_CNTP_HI) ||
+ (val.h & MASK_LOCKED_HI))
goto recurse;
b = kzalloc_obj(struct threshold_block);
@@ -1112,7 +1113,7 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
b->cpu = cpu;
b->address = address;
b->interrupt_enable = 0;
- b->interrupt_capable = lvt_interrupt_supported(bank, high);
+ b->interrupt_capable = lvt_interrupt_supported(bank, val.h);
b->threshold_limit = get_thr_limit();
if (b->interrupt_capable) {
@@ -1124,13 +1125,13 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
list_add(&b->miscj, &tb->miscj);
- mce_threshold_block_init(b, (high & MASK_LVTOFF_HI) >> 20);
+ mce_threshold_block_init(b, (val.h & MASK_LVTOFF_HI) >> 20);
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(cpu, bank, b));
if (err)
goto out_free;
recurse:
- address = get_block_address(address, low, high, bank, ++block, cpu);
+ address = get_block_address(address, val.l, val.h, bank, ++block, cpu);
if (!address)
return 0;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 8dd424ac5de8..6f684df5db36 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1871,7 +1871,7 @@ static void __mcheck_cpu_cap_init(void)
u64 cap;
u8 b;
- rdmsrq(MSR_IA32_MCG_CAP, cap);
+ cap = rdmsr(MSR_IA32_MCG_CAP);
b = cap & MCG_BANKCNT_MASK;
@@ -1890,9 +1890,9 @@ static void __mcheck_cpu_init_generic(void)
{
u64 cap;
- rdmsrq(MSR_IA32_MCG_CAP, cap);
+ cap = rdmsr(MSR_IA32_MCG_CAP);
if (cap & MCG_CTL_P)
- wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
+ wrmsr(MSR_IA32_MCG_CTL, ~0ULL);
}
static void __mcheck_cpu_init_prepare_banks(void)
@@ -1919,10 +1919,10 @@ static void __mcheck_cpu_init_prepare_banks(void)
if (!b->init)
continue;
- wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl);
- wrmsrq(mca_msr_reg(i, MCA_STATUS), 0);
+ wrmsr(mca_msr_reg(i, MCA_CTL), b->ctl);
+ wrmsr(mca_msr_reg(i, MCA_STATUS), 0);
- rdmsrq(mca_msr_reg(i, MCA_CTL), msrval);
+ msrval = rdmsr(mca_msr_reg(i, MCA_CTL));
b->init = !!msrval;
}
}
@@ -2240,7 +2240,7 @@ void mca_bsp_init(struct cpuinfo_x86 *c)
if (mce_flags.smca)
smca_bsp_init();
- rdmsrq(MSR_IA32_MCG_CAP, cap);
+ cap = rdmsr(MSR_IA32_MCG_CAP);
/* Use accurate RIP reporting if available. */
if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
@@ -2420,7 +2420,7 @@ static void mce_disable_error_reporting(void)
struct mce_bank *b = &mce_banks[i];
if (b->init)
- wrmsrq(mca_msr_reg(i, MCA_CTL), 0);
+ wrmsr(mca_msr_reg(i, MCA_CTL), 0);
}
return;
}
@@ -2776,7 +2776,7 @@ static void mce_reenable_cpu(void)
struct mce_bank *b = &mce_banks[i];
if (b->init)
- wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl);
+ wrmsr(mca_msr_reg(i, MCA_CTL), b->ctl);
}
}
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 2d75098211b3..63ecbda3554e 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -476,27 +476,27 @@ static void prepare_msrs(void *info)
struct mce m = *(struct mce *)info;
u8 b = m.bank;
- wrmsrq(MSR_IA32_MCG_STATUS, m.mcgstatus);
+ wrmsr(MSR_IA32_MCG_STATUS, m.mcgstatus);
if (boot_cpu_has(X86_FEATURE_SMCA)) {
if (m.inject_flags == DFR_INT_INJ) {
- wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
- wrmsrq(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
+ wrmsr(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
+ wrmsr(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
} else {
- wrmsrq(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
- wrmsrq(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
+ wrmsr(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
+ wrmsr(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
}
- wrmsrq(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
+ wrmsr(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
if (m.misc)
- wrmsrq(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
+ wrmsr(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
} else {
- wrmsrq(MSR_IA32_MCx_STATUS(b), m.status);
- wrmsrq(MSR_IA32_MCx_ADDR(b), m.addr);
+ wrmsr(MSR_IA32_MCx_STATUS(b), m.status);
+ wrmsr(MSR_IA32_MCx_ADDR(b), m.addr);
if (m.misc)
- wrmsrq(MSR_IA32_MCx_MISC(b), m.misc);
+ wrmsr(MSR_IA32_MCx_MISC(b), m.misc);
}
}
@@ -742,15 +742,15 @@ static void check_hw_inj_possible(void)
u64 status = MCI_STATUS_VAL, ipid;
/* Check whether bank is populated */
- rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), ipid);
+ ipid = rdmsr(MSR_AMD64_SMCA_MCx_IPID(bank));
if (!ipid)
continue;
toggle_hw_mce_inject(cpu, true);
- wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), status);
- rdmsrq_safe(mca_msr_reg(bank, MCA_STATUS), &status);
- wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), 0);
+ wrmsr_safe(mca_msr_reg(bank, MCA_STATUS), status);
+ rdmsr_safe(mca_msr_reg(bank, MCA_STATUS), &status);
+ wrmsr_safe(mca_msr_reg(bank, MCA_STATUS), 0);
if (!status) {
hw_injection_possible = false;
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index 4655223ba560..98990c0a7a42 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -94,7 +94,7 @@ static bool cmci_supported(int *banks)
if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
return false;
- rdmsrq(MSR_IA32_MCG_CAP, cap);
+ cap = rdmsr(MSR_IA32_MCG_CAP);
*banks = min_t(unsigned, MAX_NR_BANKS, cap & MCG_BANKCNT_MASK);
return !!(cap & MCG_CMCI_P);
}
@@ -106,7 +106,7 @@ static bool lmce_supported(void)
if (mca_cfg.lmce_disabled)
return false;
- rdmsrq(MSR_IA32_MCG_CAP, tmp);
+ tmp = rdmsr(MSR_IA32_MCG_CAP);
/*
* LMCE depends on recovery support in the processor. Hence both
@@ -123,7 +123,7 @@ static bool lmce_supported(void)
* WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally
* locks the MSR in the event that it wasn't already locked by BIOS.
*/
- rdmsrq(MSR_IA32_FEAT_CTL, tmp);
+ tmp = rdmsr(MSR_IA32_FEAT_CTL);
if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED)))
return false;
@@ -141,9 +141,9 @@ static void cmci_set_threshold(int bank, int thresh)
u64 val;
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
+ val = rdmsr(MSR_IA32_MCx_CTL2(bank));
val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
- wrmsrq(MSR_IA32_MCx_CTL2(bank), val | thresh);
+ wrmsr(MSR_IA32_MCx_CTL2(bank), val | thresh);
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
@@ -184,7 +184,7 @@ static bool cmci_skip_bank(int bank, u64 *val)
if (test_bit(bank, mce_banks_ce_disabled))
return true;
- rdmsrq(MSR_IA32_MCx_CTL2(bank), *val);
+ *val = rdmsr(MSR_IA32_MCx_CTL2(bank));
/* Already owned by someone else? */
if (*val & MCI_CTL2_CMCI_EN) {
@@ -232,8 +232,8 @@ static void cmci_claim_bank(int bank, u64 val, int bios_zero_thresh, int *bios_w
struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
val |= MCI_CTL2_CMCI_EN;
- wrmsrq(MSR_IA32_MCx_CTL2(bank), val);
- rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
+ wrmsr(MSR_IA32_MCx_CTL2(bank), val);
+ val = rdmsr(MSR_IA32_MCx_CTL2(bank));
/* If the enable bit did not stick, this bank should be polled. */
if (!(val & MCI_CTL2_CMCI_EN)) {
@@ -324,9 +324,9 @@ static void __cmci_disable_bank(int bank)
if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
return;
- rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
+ val = rdmsr(MSR_IA32_MCx_CTL2(bank));
val &= ~MCI_CTL2_CMCI_EN;
- wrmsrq(MSR_IA32_MCx_CTL2(bank), val);
+ wrmsr(MSR_IA32_MCx_CTL2(bank), val);
__clear_bit(bank, this_cpu_ptr(mce_banks_owned));
if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD)
@@ -430,10 +430,10 @@ void intel_init_lmce(void)
if (!lmce_supported())
return;
- rdmsrq(MSR_IA32_MCG_EXT_CTL, val);
+ val = rdmsr(MSR_IA32_MCG_EXT_CTL);
if (!(val & MCG_EXT_CTL_LMCE_EN))
- wrmsrq(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
+ wrmsr(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
}
void intel_clear_lmce(void)
@@ -443,9 +443,9 @@ void intel_clear_lmce(void)
if (!lmce_supported())
return;
- rdmsrq(MSR_IA32_MCG_EXT_CTL, val);
+ val = rdmsr(MSR_IA32_MCG_EXT_CTL);
val &= ~MCG_EXT_CTL_LMCE_EN;
- wrmsrq(MSR_IA32_MCG_EXT_CTL, val);
+ wrmsr(MSR_IA32_MCG_EXT_CTL, val);
}
/*
@@ -460,10 +460,10 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
case INTEL_SANDYBRIDGE_X:
case INTEL_IVYBRIDGE_X:
case INTEL_HASWELL_X:
- if (rdmsrq_safe(MSR_ERROR_CONTROL, &error_control))
+ if (rdmsr_safe(MSR_ERROR_CONTROL, &error_control))
return;
error_control |= 2;
- wrmsrq_safe(MSR_ERROR_CONTROL, error_control);
+ wrmsr_safe(MSR_ERROR_CONTROL, error_control);
break;
}
}
diff --git a/arch/x86/kernel/cpu/mce/p5.c b/arch/x86/kernel/cpu/mce/p5.c
index 2272ad53fc33..619d92d56136 100644
--- a/arch/x86/kernel/cpu/mce/p5.c
+++ b/arch/x86/kernel/cpu/mce/p5.c
@@ -23,16 +23,16 @@ int mce_p5_enabled __read_mostly;
/* Machine check handler for Pentium class Intel CPUs: */
noinstr void pentium_machine_check(struct pt_regs *regs)
{
- u32 loaddr, hi, lotype;
+ struct msr addr, type;
instrumentation_begin();
- rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
- rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
+ addr.q = rdmsr(MSR_IA32_P5_MC_ADDR);
+ type.q = rdmsr(MSR_IA32_P5_MC_TYPE);
pr_emerg("CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n",
- smp_processor_id(), loaddr, lotype);
+ smp_processor_id(), addr.l, type.l);
- if (lotype & (1<<5)) {
+ if (type.l & (1<<5)) {
pr_emerg("CPU#%d: Possible thermal failure (CPU on fire ?).\n",
smp_processor_id());
}
@@ -44,7 +44,7 @@ noinstr void pentium_machine_check(struct pt_regs *regs)
/* Set up machine check reporting for processors with Intel style MCE: */
void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
- u32 l, h;
+ u64 val;
/* Default P5 to off as its often misconnected: */
if (!mce_p5_enabled)
@@ -55,8 +55,8 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
return;
/* Read registers before enabling: */
- rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
- rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
+ val = rdmsr(MSR_IA32_P5_MC_ADDR);
+ val = rdmsr(MSR_IA32_P5_MC_TYPE);
pr_info("Intel old style machine check architecture supported.\n");
/* Enable MCE: */
diff --git a/arch/x86/kernel/cpu/mce/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c
index 6c99f2941909..58942ad98fca 100644
--- a/arch/x86/kernel/cpu/mce/winchip.c
+++ b/arch/x86/kernel/cpu/mce/winchip.c
@@ -28,12 +28,12 @@ noinstr void winchip_machine_check(struct pt_regs *regs)
/* Set up machine check reporting on the Winchip C6 series */
void winchip_mcheck_init(struct cpuinfo_x86 *c)
{
- u32 lo, hi;
+ struct msr val;
- rdmsr(MSR_IDT_FCR1, lo, hi);
- lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */
- lo &= ~(1<<4); /* Enable MCE */
- wrmsr(MSR_IDT_FCR1, lo, hi);
+ val.q = rdmsr(MSR_IDT_FCR1);
+ val.l |= (1<<2); /* Enable EIERRINT (int 18 MCE) */
+ val.l &= ~(1<<4); /* Enable MCE */
+ wrmsr(MSR_IDT_FCR1, val.q);
cr4_set_bits(X86_CR4_MCE);
--
2.53.0
^ permalink raw reply related [flat|nested] 5+ messages in thread