From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Jan Beulich <JBeulich@suse.com>
Subject: [PATCH v2 3/4] xen/x86: Replace remaining mandatory barriers with SMP barriers
Date: Wed, 16 Aug 2017 12:22:09 +0100 [thread overview]
Message-ID: <1502882530-31700-4-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1502882530-31700-1-git-send-email-andrew.cooper3@citrix.com>
There is no functional change. Xen currently assignes smp_* meaning to
the non-smp_* barriers.
All of these uses are just to deal with shared memory between multiple
processors, so use the smp_*() which are the correct barriers for the purpose.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
v2:
* Drop more unnecessary barriers, rather than converting them to smp
---
xen/arch/x86/acpi/cpu_idle.c | 8 ++++----
xen/arch/x86/cpu/mcheck/barrier.c | 10 +++++-----
xen/arch/x86/cpu/mcheck/mctelem.c | 4 ++--
xen/arch/x86/genapic/x2apic.c | 6 +++---
xen/arch/x86/hpet.c | 2 +-
xen/arch/x86/hvm/ioreq.c | 4 ++--
xen/arch/x86/irq.c | 4 ++--
xen/arch/x86/smpboot.c | 12 ++++++------
xen/arch/x86/time.c | 8 ++++----
xen/include/asm-x86/desc.h | 8 ++++----
xen/include/asm-x86/system.h | 2 +-
11 files changed, 34 insertions(+), 34 deletions(-)
diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 5879ad6..dea834c 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -390,9 +390,9 @@ void mwait_idle_with_hints(unsigned int eax, unsigned int ecx)
if ( boot_cpu_has(X86_FEATURE_CLFLUSH_MONITOR) )
{
- mb();
+ smp_mb();
clflush((void *)&mwait_wakeup(cpu));
- mb();
+ smp_mb();
}
__monitor((void *)&mwait_wakeup(cpu), 0, 0);
@@ -755,10 +755,10 @@ void acpi_dead_idle(void)
* instruction, hence memory fence is necessary to make sure all
* load/store visible before flush cache line.
*/
- mb();
+ smp_mb();
clflush(mwait_ptr);
__monitor(mwait_ptr, 0, 0);
- mb();
+ smp_mb();
__mwait(cx->address, 0);
}
}
diff --git a/xen/arch/x86/cpu/mcheck/barrier.c b/xen/arch/x86/cpu/mcheck/barrier.c
index 7de8e45..a7e5b19 100644
--- a/xen/arch/x86/cpu/mcheck/barrier.c
+++ b/xen/arch/x86/cpu/mcheck/barrier.c
@@ -12,7 +12,7 @@ void mce_barrier_init(struct mce_softirq_barrier *bar)
void mce_barrier_dec(struct mce_softirq_barrier *bar)
{
atomic_inc(&bar->outgen);
- wmb();
+ smp_wmb();
atomic_dec(&bar->val);
}
@@ -24,12 +24,12 @@ void mce_barrier_enter(struct mce_softirq_barrier *bar, bool wait)
return;
atomic_inc(&bar->ingen);
gen = atomic_read(&bar->outgen);
- mb();
+ smp_mb();
atomic_inc(&bar->val);
while ( atomic_read(&bar->val) != num_online_cpus() &&
atomic_read(&bar->outgen) == gen )
{
- mb();
+ smp_mb();
mce_panic_check();
}
}
@@ -42,12 +42,12 @@ void mce_barrier_exit(struct mce_softirq_barrier *bar, bool wait)
return;
atomic_inc(&bar->outgen);
gen = atomic_read(&bar->ingen);
- mb();
+ smp_mb();
atomic_dec(&bar->val);
while ( atomic_read(&bar->val) != 0 &&
atomic_read(&bar->ingen) == gen )
{
- mb();
+ smp_mb();
mce_panic_check();
}
}
diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c
index 1731514..b071dc8 100644
--- a/xen/arch/x86/cpu/mcheck/mctelem.c
+++ b/xen/arch/x86/cpu/mcheck/mctelem.c
@@ -501,9 +501,9 @@ static void mctelem_append_processing(mctelem_class_t which)
ltep->mcte_prev = *procltp;
*procltp = dangling[target];
}
- wmb();
+ smp_wmb();
dangling[target] = NULL;
- wmb();
+ smp_wmb();
}
mctelem_cookie_t mctelem_consume_oldest_begin(mctelem_class_t which)
diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c
index 5fffb31..4779b0d 100644
--- a/xen/arch/x86/genapic/x2apic.c
+++ b/xen/arch/x86/genapic/x2apic.c
@@ -106,12 +106,12 @@ static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector)
* CPU is seen by notified remote CPUs. The WRMSR contained within
* apic_icr_write() can otherwise be executed early.
*
- * The reason mb() is sufficient here is subtle: the register arguments
+ * The reason smp_mb() is sufficient here is subtle: the register arguments
* to WRMSR must depend on a memory read executed after the barrier. This
* is guaranteed by cpu_physical_id(), which reads from a global array (and
* so cannot be hoisted above the barrier even by a clever compiler).
*/
- mb();
+ smp_mb();
local_irq_save(flags);
@@ -135,7 +135,7 @@ static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector)
const cpumask_t *cluster_cpus;
unsigned long flags;
- mb(); /* See above for an explanation. */
+ smp_mb(); /* See above for an explanation. */
local_irq_save(flags);
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index 8229c63..bc7a851 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -608,7 +608,7 @@ void __init hpet_broadcast_init(void)
hpet_events[i].shift = 32;
hpet_events[i].next_event = STIME_MAX;
spin_lock_init(&hpet_events[i].lock);
- wmb();
+ smp_wmb();
hpet_events[i].event_handler = handle_hpet_broadcast;
hpet_events[i].msi.msi_attrib.maskbit = 1;
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index b2a8b0e..e9851f6 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -91,7 +91,7 @@ static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
{
unsigned int state = p->state;
- rmb();
+ smp_rmb();
switch ( state )
{
case STATE_IOREQ_NONE:
@@ -1327,7 +1327,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
}
/* Make the ioreq_t visible /before/ write_pointer. */
- wmb();
+ smp_wmb();
pg->ptrs.write_pointer += qw ? 2 : 1;
/* Canonicalize read/write pointers to prevent their overflow. */
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 57e6c18..ee9afd8 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -759,9 +759,9 @@ void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
ASSERT(spin_is_locked(&desc->lock));
desc->status &= ~IRQ_MOVE_PENDING;
- wmb();
+ smp_wmb();
cpumask_copy(desc->arch.pending_mask, mask);
- wmb();
+ smp_wmb();
desc->status |= IRQ_MOVE_PENDING;
}
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 5b094b4..ee17f6d 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -79,7 +79,7 @@ static enum cpu_state {
CPU_STATE_CALLIN, /* slave -> master: Completed phase 2 */
CPU_STATE_ONLINE /* master -> slave: Go fully online now. */
} cpu_state;
-#define set_cpu_state(state) do { mb(); cpu_state = (state); } while (0)
+#define set_cpu_state(state) do { smp_mb(); cpu_state = (state); } while (0)
void *stack_base[NR_CPUS];
@@ -126,7 +126,7 @@ static void synchronize_tsc_master(unsigned int slave)
for ( i = 1; i <= 5; i++ )
{
tsc_value = rdtsc_ordered();
- wmb();
+ smp_wmb();
atomic_inc(&tsc_count);
while ( atomic_read(&tsc_count) != (i<<1) )
cpu_relax();
@@ -151,7 +151,7 @@ static void synchronize_tsc_slave(unsigned int slave)
{
while ( atomic_read(&tsc_count) != ((i<<1)-1) )
cpu_relax();
- rmb();
+ smp_rmb();
/*
* If a CPU has been physically hotplugged, we may as well write
* to its TSC in spite of X86_FEATURE_TSC_RELIABLE. The platform does
@@ -553,13 +553,13 @@ static int do_boot_cpu(int apicid, int cpu)
}
else if ( cpu_state == CPU_STATE_DEAD )
{
- rmb();
+ smp_rmb();
rc = cpu_error;
}
else
{
boot_error = 1;
- mb();
+ smp_mb();
if ( bootsym(trampoline_cpu_started) == 0xA5 )
/* trampoline started but...? */
printk("Stuck ??\n");
@@ -577,7 +577,7 @@ static int do_boot_cpu(int apicid, int cpu)
/* mark "stuck" area as not stuck */
bootsym(trampoline_cpu_started) = 0;
- mb();
+ smp_mb();
smpboot_restore_warm_reset_vector();
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index b988b94..a7d7d77 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -976,10 +976,10 @@ static void __update_vcpu_system_time(struct vcpu *v, int force)
/* 1. Update guest kernel version. */
_u.version = u->version = version_update_begin(u->version);
- wmb();
+ smp_wmb();
/* 2. Update all other guest kernel fields. */
*u = _u;
- wmb();
+ smp_wmb();
/* 3. Update guest kernel version. */
u->version = version_update_end(u->version);
@@ -1006,10 +1006,10 @@ bool update_secondary_system_time(struct vcpu *v,
update_guest_memory_policy(v, &policy);
return false;
}
- wmb();
+ smp_wmb();
/* 2. Update all other userspace fields. */
__copy_to_guest(user_u, u, 1);
- wmb();
+ smp_wmb();
/* 3. Update userspace version. */
u->version = version_update_end(u->version);
__copy_field_to_guest(user_u, u, version);
diff --git a/xen/include/asm-x86/desc.h b/xen/include/asm-x86/desc.h
index da924bf..9956aae 100644
--- a/xen/include/asm-x86/desc.h
+++ b/xen/include/asm-x86/desc.h
@@ -128,10 +128,10 @@ static inline void _write_gate_lower(volatile idt_entry_t *gate,
#define _set_gate(gate_addr,type,dpl,addr) \
do { \
(gate_addr)->a = 0; \
- wmb(); /* disable gate /then/ rewrite */ \
+ smp_wmb(); /* disable gate /then/ rewrite */ \
(gate_addr)->b = \
((unsigned long)(addr) >> 32); \
- wmb(); /* rewrite /then/ enable gate */ \
+ smp_wmb(); /* rewrite /then/ enable gate */ \
(gate_addr)->a = \
(((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
((unsigned long)(dpl) << 45) | \
@@ -174,11 +174,11 @@ static inline void _update_gate_addr_lower(idt_entry_t *gate, void *addr)
#define _set_tssldt_desc(desc,addr,limit,type) \
do { \
(desc)[0].b = (desc)[1].b = 0; \
- wmb(); /* disable entry /then/ rewrite */ \
+ smp_wmb(); /* disable entry /then/ rewrite */ \
(desc)[0].a = \
((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
(desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \
- wmb(); /* rewrite /then/ enable entry */ \
+ smp_wmb(); /* rewrite /then/ enable entry */ \
(desc)[0].b = \
((u32)(addr) & 0xFF000000U) | \
((u32)(type) << 8) | 0x8000U | \
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index eb498f5..9cb6fd7 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -183,7 +183,7 @@ static always_inline unsigned long __xadd(
#define smp_wmb() wmb()
#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
#define local_irq_disable() asm volatile ( "cli" : : : "memory" )
#define local_irq_enable() asm volatile ( "sti" : : : "memory" )
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-08-16 11:22 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-16 11:22 [PATCH v2 0/4] x86: Corrections to barrier usage Andrew Cooper
2017-08-16 11:22 ` [PATCH v2 1/4] x86/mcheck: Minor cleanup to amd_nonfatal Andrew Cooper
2017-08-16 15:11 ` Jan Beulich
2017-08-18 13:19 ` Tim Deegan
2017-08-16 11:22 ` [PATCH v2 2/4] xen/x86: Drop unnecessary barriers Andrew Cooper
2017-08-16 15:23 ` Jan Beulich
2017-08-16 16:47 ` Andrew Cooper
2017-08-16 17:03 ` Andrew Cooper
2017-08-17 7:50 ` Jan Beulich
2017-08-17 7:48 ` Jan Beulich
2017-08-18 14:47 ` Tim Deegan
2017-08-18 15:04 ` Jan Beulich
2017-08-18 15:13 ` Tim Deegan
2017-08-18 15:07 ` Tim Deegan
2017-08-16 17:18 ` [PATCH v2 2.5/4] xen/x86: Replace mandatory barriers with compiler barriers Andrew Cooper
2017-08-17 8:15 ` Jan Beulich
2017-08-18 13:55 ` [PATCH v2 2/4] xen/x86: Drop unnecessary barriers Tim Deegan
2017-08-18 14:07 ` Tim Deegan
2017-08-18 14:23 ` [PATCH] xen/x86/shadow: adjust barriers around gtable_dirty_version Tim Deegan
2017-08-18 14:26 ` Andrew Cooper
2017-08-16 11:22 ` Andrew Cooper [this message]
2017-08-16 15:42 ` [PATCH v2 3/4] xen/x86: Replace remaining mandatory barriers with SMP barriers Dario Faggioli
2017-08-17 8:37 ` Jan Beulich
2017-08-17 9:35 ` Andrew Cooper
2017-08-17 10:01 ` Jan Beulich
2017-08-16 11:22 ` [PATCH v2 4/4] xen/x86: Correct mandatory and SMP barrier definitions Andrew Cooper
2017-08-16 15:44 ` Dario Faggioli
2017-08-17 8:41 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1502882530-31700-4-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).