* [PATCH] refactor mce code
@ 2012-09-10 14:53 Christoph Egger
0 siblings, 0 replies; only message in thread
From: Christoph Egger @ 2012-09-10 14:53 UTC (permalink / raw)
To: xen-devel@lists.xen.org
[-- Attachment #1: Type: text/plain, Size: 414 bytes --]
Factor common mc code out of intel specific code and move it into common
files.
No functional changes.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85689 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
[-- Attachment #2: xen_mce_refactor.diff --]
[-- Type: text/plain, Size: 8256 bytes --]
# User Christoph Egger
# Date 1347276743 -7200
Factor common mc code out of intel specific code and move it into common files.
No functional changes.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/Makefile
--- a/xen/arch/x86/cpu/mcheck/Makefile
+++ b/xen/arch/x86/cpu/mcheck/Makefile
@@ -2,10 +2,12 @@ obj-y += amd_nonfatal.o
obj-y += k7.o
obj-y += amd_k8.o
obj-y += amd_f10.o
+obj-y += mcbarrier.o
obj-y += mctelem.o
obj-y += mce.o
obj-y += mce-apei.o
obj-y += mce_intel.o
obj-y += mce_amd_quirks.o
+obj-y += mcutil.o
obj-y += non-fatal.o
obj-y += vmce.o
diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcbarrier.c
--- /dev/null
+++ b/xen/arch/x86/cpu/mcheck/mcbarrier.c
@@ -0,0 +1,57 @@
+#include "mcbarrier.h"
+#include "mcutil.h"
+#include "mce.h"
+
+void mce_barrier_init(struct mce_softirq_barrier *bar)
+{
+ atomic_set(&bar->val, 0);
+ atomic_set(&bar->ingen, 0);
+ atomic_set(&bar->outgen, 0);
+}
+
+void mce_barrier_dec(struct mce_softirq_barrier *bar)
+{
+ atomic_inc(&bar->outgen);
+ wmb();
+ atomic_dec(&bar->val);
+}
+
+void mce_barrier_enter(struct mce_softirq_barrier *bar)
+{
+ int gen;
+
+ if (!mce_broadcast)
+ return;
+ atomic_inc(&bar->ingen);
+ gen = atomic_read(&bar->outgen);
+ mb();
+ atomic_inc(&bar->val);
+ while ( atomic_read(&bar->val) != num_online_cpus() &&
+ atomic_read(&bar->outgen) == gen) {
+ mb();
+ mce_panic_check();
+ }
+}
+
+void mce_barrier_exit(struct mce_softirq_barrier *bar)
+{
+ int gen;
+
+ if (!mce_broadcast)
+ return;
+ atomic_inc(&bar->outgen);
+ gen = atomic_read(&bar->ingen);
+ mb();
+ atomic_dec(&bar->val);
+ while ( atomic_read(&bar->val) != 0 &&
+ atomic_read(&bar->ingen) == gen ) {
+ mb();
+ mce_panic_check();
+ }
+}
+
+void mce_barrier(struct mce_softirq_barrier *bar)
+{
+ mce_barrier_enter(bar);
+ mce_barrier_exit(bar);
+}
diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcbarrier.h
--- /dev/null
+++ b/xen/arch/x86/cpu/mcheck/mcbarrier.h
@@ -0,0 +1,44 @@
+#ifndef _MCHECK_BARRIER_H
+#define _MCHECK_BARRIER_H
+
+#include <asm/atomic.h>
+
+/* MCE handling */
+struct mce_softirq_barrier {
+ atomic_t val;
+ atomic_t ingen;
+ atomic_t outgen;
+};
+
+/*
+ * Initialize a barrier. Just set it to 0.
+ */
+void mce_barrier_init(struct mce_softirq_barrier *);
+
+/*
+ * This function will need to be used when offlining a CPU in the
+ * recovery actions.
+ *
+ * Decrement a barrier only. Needed for cases where the CPU
+ * in question can't do it itself (e.g. it is being offlined).
+ */
+void mce_barrier_dec(struct mce_softirq_barrier *);
+
+/*
+ * Increment the generation number and the value. The generation number
+ * is incremented when entering a barrier. This way, it can be checked
+ * on exit if a CPU is trying to re-enter the barrier. This can happen
+ * if the first CPU to make it out immediately exits or re-enters, while
+ * another CPU that is still in the loop becomes otherwise occupied
+ * (e.g. it needs to service an interrupt, etc), missing the value
+ * it's waiting for.
+ *
+ * These barrier functions should always be paired, so that the
+ * counter value will reach 0 again after all CPUs have exited.
+ */
+void mce_barrier_enter(struct mce_softirq_barrier *);
+void mce_barrier_exit(struct mce_softirq_barrier *);
+
+void mce_barrier(struct mce_softirq_barrier *);
+
+#endif /* _MCHECK_BARRIER_H */
diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -16,6 +16,8 @@
#include <asm/apic.h>
#include "mce.h"
#include "x86_mca.h"
+#include "mcbarrier.h"
+#include "mcutil.h"
DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned);
DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks);
@@ -164,13 +166,6 @@ static void intel_init_thermal(struct cp
}
#endif /* CONFIG_X86_MCE_THERMAL */
-/* MCE handling */
-struct mce_softirq_barrier {
- atomic_t val;
- atomic_t ingen;
- atomic_t outgen;
-};
-
static struct mce_softirq_barrier mce_inside_bar, mce_severity_bar;
static struct mce_softirq_barrier mce_trap_bar;
@@ -186,9 +181,6 @@ static atomic_t severity_cpu = ATOMIC_IN
static atomic_t found_error = ATOMIC_INIT(0);
static cpumask_t mce_fatal_cpus;
-static void mce_barrier_enter(struct mce_softirq_barrier *);
-static void mce_barrier_exit(struct mce_softirq_barrier *);
-
static const struct mca_error_handler *__read_mostly mce_dhandlers;
static const struct mca_error_handler *__read_mostly mce_uhandlers;
static unsigned int __read_mostly mce_dhandler_num;
@@ -385,25 +377,6 @@ static int mce_urgent_action(struct cpu_
* Round2: Do all MCE processing logic as normal.
*/
-static void mce_panic_check(void)
-{
- if (is_mc_panic) {
- local_irq_enable();
- for ( ; ; )
- halt();
- }
-}
-
-/*
- * Initialize a barrier. Just set it to 0.
- */
-static void mce_barrier_init(struct mce_softirq_barrier *bar)
-{
- atomic_set(&bar->val, 0);
- atomic_set(&bar->ingen, 0);
- atomic_set(&bar->outgen, 0);
-}
-
static void mce_handler_init(void)
{
if (smp_processor_id() != 0)
@@ -417,21 +390,6 @@ static void mce_handler_init(void)
spin_lock_init(&mce_logout_lock);
open_softirq(MACHINE_CHECK_SOFTIRQ, mce_softirq);
}
-#if 0
-/*
- * This function will need to be used when offlining a CPU in the
- * recovery actions.
- *
- * Decrement a barrier only. Needed for cases where the CPU
- * in question can't do it itself (e.g. it is being offlined).
- */
-static void mce_barrier_dec(struct mce_softirq_barrier *bar)
-{
- atomic_inc(&bar->outgen);
- wmb();
- atomic_dec(&bar->val);
-}
-#endif
static void mce_spin_lock(spinlock_t *lk)
{
@@ -446,60 +404,6 @@ static void mce_spin_unlock(spinlock_t *
spin_unlock(lk);
}
-/*
- * Increment the generation number and the value. The generation number
- * is incremented when entering a barrier. This way, it can be checked
- * on exit if a CPU is trying to re-enter the barrier. This can happen
- * if the first CPU to make it out immediately exits or re-enters, while
- * another CPU that is still in the loop becomes otherwise occupied
- * (e.g. it needs to service an interrupt, etc), missing the value
- * it's waiting for.
- *
- * These barrier functions should always be paired, so that the
- * counter value will reach 0 again after all CPUs have exited.
- */
-static void mce_barrier_enter(struct mce_softirq_barrier *bar)
-{
- int gen;
-
- if (!mce_broadcast)
- return;
- atomic_inc(&bar->ingen);
- gen = atomic_read(&bar->outgen);
- mb();
- atomic_inc(&bar->val);
- while ( atomic_read(&bar->val) != num_online_cpus() &&
- atomic_read(&bar->outgen) == gen) {
- mb();
- mce_panic_check();
- }
-}
-
-static void mce_barrier_exit(struct mce_softirq_barrier *bar)
-{
- int gen;
-
- if (!mce_broadcast)
- return;
- atomic_inc(&bar->outgen);
- gen = atomic_read(&bar->ingen);
- mb();
- atomic_dec(&bar->val);
- while ( atomic_read(&bar->val) != 0 &&
- atomic_read(&bar->ingen) == gen ) {
- mb();
- mce_panic_check();
- }
-}
-
-#if 0
-static void mce_barrier(struct mce_softirq_barrier *bar)
-{
- mce_barrier_enter(bar);
- mce_barrier_exit(bar);
-}
-#endif
-
/* Intel MCE handler */
static inline void intel_get_extended_msr(struct mcinfo_extended *ext, u32 msr)
{
diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcutil.c
--- /dev/null
+++ b/xen/arch/x86/cpu/mcheck/mcutil.c
@@ -0,0 +1,13 @@
+
+#include <asm/system.h>
+#include "mcutil.h"
+#include "mce.h"
+
+void mce_panic_check(void)
+{
+ if (is_mc_panic) {
+ local_irq_enable();
+ for ( ; ; )
+ halt();
+ }
+}
diff -r 06f42b46c057 -r c45743df3686 xen/arch/x86/cpu/mcheck/mcutil.h
--- /dev/null
+++ b/xen/arch/x86/cpu/mcheck/mcutil.h
@@ -0,0 +1,6 @@
+#ifndef _MCHECK_UTIL_H
+#define _MCHECK_UTIL_H
+
+void mce_panic_check(void);
+
+#endif
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2012-09-10 14:53 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-10 14:53 [PATCH] refactor mce code Christoph Egger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).