From: Waiman Long <Waiman.Long@hp.com>
To: Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org, Waiman Long <Waiman.Long@hp.com>,
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>,
Gleb Natapov <gleb@redhat.com>,
kvm@vger.kernel.org, Scott J Norton <scott.norton@hp.com>,
x86@kernel.org, Paolo Bonzini <paolo.bonzini@gmail.com>,
linux-kernel@vger.kernel.org,
virtualization@lists.linux-foundation.org,
Chegu Vinod <chegu_vinod@hp.com>,
David Vrabel <david.vrabel@citrix.com>,
Oleg Nesterov <oleg@redhat.com>,
xen-devel@lists.xenproject.org,
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
Linus Torvalds <torvalds@linux-foundation.org>
Subject: [PATCH v9 16/19] pvqspinlock: Enable coexistence with the unfair lock
Date: Thu, 17 Apr 2014 11:04:08 -0400 [thread overview]
Message-ID: <1397747051-15401-17-git-send-email-Waiman.Long@hp.com> (raw)
In-Reply-To: <1397747051-15401-1-git-send-email-Waiman.Long@hp.com>
This patch enables the coexistence of both the PV qspinlock and
unfair lock. When both are enabled, however, only the lock fastpath
will perform lock stealing whereas the slowpath will have that disabled
to get the best of both features.
We also need to transition a CPU spinning too long in the pending
bit code path back to the regular queuing code path so that it can
be properly halted by the PV qspinlock code.
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
kernel/locking/qspinlock.c | 74 ++++++++++++++++++++++++++++++++++++++------
1 files changed, 64 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index f9c82f6..21421a6 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -76,6 +76,30 @@ struct qnode {
#define qhead mcs.locked /* The queue head flag */
/*
+ * Allow spinning loop count only if either PV spinlock or unfair lock is
+ * configured.
+ */
+#if defined(CONFIG_PARAVIRT_UNFAIR_LOCKS) || defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define DEF_LOOP_CNT(c) int c = 0
+#define INC_LOOP_CNT(c) (c)++
+#define LOOP_CNT(c) c
+#else
+#define DEF_LOOP_CNT(c)
+#define INC_LOOP_CNT(c)
+#define LOOP_CNT(c) 0
+#endif
+
+/*
+ * Check the pending bit spinning threshold only if PV qspinlock is enabled
+ */
+#define PSPIN_THRESHOLD (1 << 10)
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define pv_qspinlock_enabled() static_key_false(¶virt_spinlocks_enabled)
+#else
+#define pv_qspinlock_enabled() false
+#endif
+
+/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
*
@@ -306,9 +330,6 @@ cmpxchg_tail(struct qspinlock *lock, u32 old, u32 new)
* starvation.
*/
#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
-#define DEF_LOOP_CNT(c) int c = 0
-#define INC_LOOP_CNT(c) (c)++
-#define LOOP_CNT(c) c
#define LSTEAL_MIN (1 << 3)
#define LSTEAL_MAX (1 << 10)
#define LSTEAL_MIN_MASK (LSTEAL_MIN - 1)
@@ -334,7 +355,11 @@ static inline void unfair_init_vars(struct qnode *node)
static inline void
unfair_set_vars(struct qnode *node, struct qnode *prev, u32 prev_tail)
{
- if (!static_key_false(¶virt_unfairlocks_enabled))
+ /*
+ * Disable waiter lock stealing if PV spinlock is enabled
+ */
+ if (pv_qspinlock_enabled() ||
+ !static_key_false(¶virt_unfairlocks_enabled))
return;
node->qprev = prev;
@@ -360,7 +385,11 @@ unfair_set_vars(struct qnode *node, struct qnode *prev, u32 prev_tail)
*/
static inline int unfair_check_and_clear_tail(struct qspinlock *lock, u32 tail)
{
- if (!static_key_false(¶virt_unfairlocks_enabled))
+ /*
+ * Disable waiter lock stealing if PV spinlock is enabled
+ */
+ if (pv_qspinlock_enabled() ||
+ !static_key_false(¶virt_unfairlocks_enabled))
return false;
/*
@@ -389,7 +418,11 @@ unfair_get_lock(struct qspinlock *lock, struct qnode *node, u32 tail, int count)
int isqhead;
struct qnode *next;
- if (!static_key_false(¶virt_unfairlocks_enabled) ||
+ /*
+ * Disable waiter lock stealing if PV spinlock is enabled
+ */
+ if (pv_qspinlock_enabled() ||
+ !static_key_false(¶virt_unfairlocks_enabled) ||
((count & node->lsteal_mask) != node->lsteal_mask))
return false;
@@ -467,9 +500,6 @@ unfair_get_lock(struct qspinlock *lock, struct qnode *node, u32 tail, int count)
}
#else /* CONFIG_PARAVIRT_UNFAIR_LOCKS */
-#define DEF_LOOP_CNT(c)
-#define INC_LOOP_CNT(c)
-#define LOOP_CNT(c) 0
static void unfair_init_vars(struct qnode *node) {}
static void unfair_set_vars(struct qnode *node, struct qnode *prev,
@@ -587,9 +617,28 @@ static inline int trylock_pending(struct qspinlock *lock, u32 *pval)
* store-release that clears the locked bit and create lock
* sequentiality; this because not all try_clear_pending_set_locked()
* implementations imply full barriers.
+ *
+ * When PV qspinlock is enabled, exit the pending bit code path and
+ * go back to the regular queuing path if the lock isn't available
+ * within a certain threshold.
*/
- while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
+ if (pv_qspinlock_enabled())
+ retry = PSPIN_THRESHOLD;
+ while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) {
+ if (pv_qspinlock_enabled() && (--retry == 0)) {
+ /*
+ * Clear the pending bit and exit
+ */
+ for (;;) {
+ new = val & ~_Q_PENDING_MASK;
+ old = atomic_cmpxchg(&lock->val, val, new);
+ if (old == val)
+ return 0;
+ val = old;
+ }
+ }
arch_mutex_cpu_relax();
+ }
/*
* take ownership and clear the pending bit.
@@ -650,6 +699,8 @@ queue_spin_lock_slowerpath(struct qspinlock *lock, struct qnode *node, u32 tail)
}
arch_mutex_cpu_relax();
}
+ } else {
+ ACCESS_ONCE(node->qhead) = true;
}
/*
@@ -717,6 +768,9 @@ notify_next:
while (!(next = (struct qnode *)ACCESS_ONCE(node->mcs.next)))
arch_mutex_cpu_relax();
+ /*
+ * The next one in queue is now at the head
+ */
arch_mcs_spin_unlock_contended(&next->qhead);
}
--
1.7.1
WARNING: multiple messages have this Message-ID (diff)
From: Waiman Long <Waiman.Long@hp.com>
To: Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org, x86@kernel.org,
linux-kernel@vger.kernel.org,
virtualization@lists.linux-foundation.org,
xen-devel@lists.xenproject.org, kvm@vger.kernel.org,
Paolo Bonzini <paolo.bonzini@gmail.com>,
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
Rik van Riel <riel@redhat.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>,
David Vrabel <david.vrabel@citrix.com>,
Oleg Nesterov <oleg@redhat.com>, Gleb Natapov <gleb@redhat.com>,
Scott J Norton <scott.norton@hp.com>,
Chegu Vinod <chegu_vinod@hp.com>,
Waiman Long <Waiman.Long@hp.com>
Subject: [PATCH v9 16/19] pvqspinlock: Enable coexistence with the unfair lock
Date: Thu, 17 Apr 2014 11:04:08 -0400 [thread overview]
Message-ID: <1397747051-15401-17-git-send-email-Waiman.Long@hp.com> (raw)
Message-ID: <20140417150408.3WJz9gyI5qRP4n2f_TF-5N0ZLmJGeTKacvyaQWNyxZE@z> (raw)
In-Reply-To: <1397747051-15401-1-git-send-email-Waiman.Long@hp.com>
This patch enables the coexistence of both the PV qspinlock and
unfair lock. When both are enabled, however, only the lock fastpath
will perform lock stealing whereas the slowpath will have that disabled
to get the best of both features.
We also need to transition a CPU spinning too long in the pending
bit code path back to the regular queuing code path so that it can
be properly halted by the PV qspinlock code.
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
kernel/locking/qspinlock.c | 74 ++++++++++++++++++++++++++++++++++++++------
1 files changed, 64 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index f9c82f6..21421a6 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -76,6 +76,30 @@ struct qnode {
#define qhead mcs.locked /* The queue head flag */
/*
+ * Allow spinning loop count only if either PV spinlock or unfair lock is
+ * configured.
+ */
+#if defined(CONFIG_PARAVIRT_UNFAIR_LOCKS) || defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define DEF_LOOP_CNT(c) int c = 0
+#define INC_LOOP_CNT(c) (c)++
+#define LOOP_CNT(c) c
+#else
+#define DEF_LOOP_CNT(c)
+#define INC_LOOP_CNT(c)
+#define LOOP_CNT(c) 0
+#endif
+
+/*
+ * Check the pending bit spinning threshold only if PV qspinlock is enabled
+ */
+#define PSPIN_THRESHOLD (1 << 10)
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define pv_qspinlock_enabled() static_key_false(¶virt_spinlocks_enabled)
+#else
+#define pv_qspinlock_enabled() false
+#endif
+
+/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
*
@@ -306,9 +330,6 @@ cmpxchg_tail(struct qspinlock *lock, u32 old, u32 new)
* starvation.
*/
#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
-#define DEF_LOOP_CNT(c) int c = 0
-#define INC_LOOP_CNT(c) (c)++
-#define LOOP_CNT(c) c
#define LSTEAL_MIN (1 << 3)
#define LSTEAL_MAX (1 << 10)
#define LSTEAL_MIN_MASK (LSTEAL_MIN - 1)
@@ -334,7 +355,11 @@ static inline void unfair_init_vars(struct qnode *node)
static inline void
unfair_set_vars(struct qnode *node, struct qnode *prev, u32 prev_tail)
{
- if (!static_key_false(¶virt_unfairlocks_enabled))
+ /*
+ * Disable waiter lock stealing if PV spinlock is enabled
+ */
+ if (pv_qspinlock_enabled() ||
+ !static_key_false(¶virt_unfairlocks_enabled))
return;
node->qprev = prev;
@@ -360,7 +385,11 @@ unfair_set_vars(struct qnode *node, struct qnode *prev, u32 prev_tail)
*/
static inline int unfair_check_and_clear_tail(struct qspinlock *lock, u32 tail)
{
- if (!static_key_false(¶virt_unfairlocks_enabled))
+ /*
+ * Disable waiter lock stealing if PV spinlock is enabled
+ */
+ if (pv_qspinlock_enabled() ||
+ !static_key_false(¶virt_unfairlocks_enabled))
return false;
/*
@@ -389,7 +418,11 @@ unfair_get_lock(struct qspinlock *lock, struct qnode *node, u32 tail, int count)
int isqhead;
struct qnode *next;
- if (!static_key_false(¶virt_unfairlocks_enabled) ||
+ /*
+ * Disable waiter lock stealing if PV spinlock is enabled
+ */
+ if (pv_qspinlock_enabled() ||
+ !static_key_false(¶virt_unfairlocks_enabled) ||
((count & node->lsteal_mask) != node->lsteal_mask))
return false;
@@ -467,9 +500,6 @@ unfair_get_lock(struct qspinlock *lock, struct qnode *node, u32 tail, int count)
}
#else /* CONFIG_PARAVIRT_UNFAIR_LOCKS */
-#define DEF_LOOP_CNT(c)
-#define INC_LOOP_CNT(c)
-#define LOOP_CNT(c) 0
static void unfair_init_vars(struct qnode *node) {}
static void unfair_set_vars(struct qnode *node, struct qnode *prev,
@@ -587,9 +617,28 @@ static inline int trylock_pending(struct qspinlock *lock, u32 *pval)
* store-release that clears the locked bit and create lock
* sequentiality; this because not all try_clear_pending_set_locked()
* implementations imply full barriers.
+ *
+ * When PV qspinlock is enabled, exit the pending bit code path and
+ * go back to the regular queuing path if the lock isn't available
+ * within a certain threshold.
*/
- while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
+ if (pv_qspinlock_enabled())
+ retry = PSPIN_THRESHOLD;
+ while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) {
+ if (pv_qspinlock_enabled() && (--retry == 0)) {
+ /*
+ * Clear the pending bit and exit
+ */
+ for (;;) {
+ new = val & ~_Q_PENDING_MASK;
+ old = atomic_cmpxchg(&lock->val, val, new);
+ if (old == val)
+ return 0;
+ val = old;
+ }
+ }
arch_mutex_cpu_relax();
+ }
/*
* take ownership and clear the pending bit.
@@ -650,6 +699,8 @@ queue_spin_lock_slowerpath(struct qspinlock *lock, struct qnode *node, u32 tail)
}
arch_mutex_cpu_relax();
}
+ } else {
+ ACCESS_ONCE(node->qhead) = true;
}
/*
@@ -717,6 +768,9 @@ notify_next:
while (!(next = (struct qnode *)ACCESS_ONCE(node->mcs.next)))
arch_mutex_cpu_relax();
+ /*
+ * The next one in queue is now at the head
+ */
arch_mcs_spin_unlock_contended(&next->qhead);
}
--
1.7.1
next prev parent reply other threads:[~2014-04-17 15:04 UTC|newest]
Thread overview: 131+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-04-17 15:03 [PATCH v9 00/19] qspinlock: a 4-byte queue spinlock with PV support Waiman Long
2014-04-17 15:03 ` Waiman Long
2014-04-17 15:03 ` [PATCH v9 01/19] qspinlock: A simple generic 4-byte queue spinlock Waiman Long
2014-04-17 15:03 ` Waiman Long
2014-04-17 15:03 ` [PATCH v9 02/19] qspinlock, x86: Enable x86-64 to use " Waiman Long
2014-04-17 15:03 ` Waiman Long
2014-04-17 15:03 ` [PATCH v9 03/19] qspinlock: Add pending bit Waiman Long
2014-04-17 15:03 ` Waiman Long
2014-04-17 15:42 ` Peter Zijlstra
2014-04-17 15:42 ` Peter Zijlstra
2014-04-17 21:20 ` Waiman Long
2014-04-18 8:13 ` Peter Zijlstra
2014-04-18 8:13 ` Peter Zijlstra
2014-04-18 17:07 ` Waiman Long
2014-04-18 17:07 ` Waiman Long
2014-04-18 7:42 ` Ingo Molnar
2014-04-18 7:42 ` Ingo Molnar
2014-04-18 16:23 ` Waiman Long
2014-04-18 16:23 ` Waiman Long
2014-04-18 16:35 ` Konrad Rzeszutek Wilk
2014-04-18 16:35 ` Konrad Rzeszutek Wilk
2014-04-18 18:12 ` Waiman Long
2014-04-18 18:12 ` Waiman Long
2014-04-17 15:03 ` [PATCH v9 04/19] qspinlock: Extract out the exchange of tail code word Waiman Long
2014-04-17 15:49 ` Peter Zijlstra
2014-04-17 15:49 ` Peter Zijlstra
2014-04-17 21:28 ` Waiman Long
2014-04-17 21:28 ` Waiman Long
2014-04-18 8:15 ` Peter Zijlstra
2014-04-18 8:15 ` Peter Zijlstra
2014-04-18 17:32 ` Waiman Long
2014-04-18 17:32 ` Waiman Long
2014-04-18 17:53 ` Peter Zijlstra
2014-04-18 17:53 ` Peter Zijlstra
2014-04-18 18:13 ` Waiman Long
2014-04-18 18:13 ` Waiman Long
2014-04-17 15:03 ` [PATCH v9 05/19] qspinlock: Optimize for smaller NR_CPUS Waiman Long
2014-04-17 15:03 ` Waiman Long
2014-04-17 15:50 ` Peter Zijlstra
2014-04-17 15:50 ` Peter Zijlstra
2014-04-17 21:29 ` Waiman Long
2014-04-17 21:29 ` Waiman Long
2014-04-17 15:51 ` Peter Zijlstra
2014-04-17 15:51 ` Peter Zijlstra
2014-04-17 21:33 ` Waiman Long
2014-04-17 21:33 ` Waiman Long
2014-04-17 15:56 ` Peter Zijlstra
2014-04-17 15:56 ` Peter Zijlstra
2014-04-17 21:46 ` Waiman Long
2014-04-17 21:46 ` Waiman Long
2014-04-18 8:27 ` Peter Zijlstra
2014-04-18 8:27 ` Peter Zijlstra
2014-04-18 17:52 ` Waiman Long
2014-04-18 17:52 ` Waiman Long
2014-04-18 19:05 ` Peter Zijlstra
2014-04-18 19:05 ` Peter Zijlstra
2014-04-18 21:40 ` Waiman Long
2014-04-18 21:40 ` Waiman Long
2014-04-23 14:23 ` Waiman Long
2014-04-23 14:23 ` Waiman Long
2014-04-23 14:56 ` Konrad Rzeszutek Wilk
2014-04-23 14:56 ` Konrad Rzeszutek Wilk
2014-04-23 17:43 ` Waiman Long
2014-04-23 17:43 ` Waiman Long
2014-04-23 17:55 ` Konrad Rzeszutek Wilk
2014-04-23 17:55 ` Konrad Rzeszutek Wilk
2014-04-23 22:24 ` Waiman Long
2014-04-23 22:24 ` Waiman Long
2014-04-23 23:48 ` Waiman Long
2014-04-23 23:48 ` Waiman Long
2014-04-17 15:58 ` Peter Zijlstra
2014-04-17 15:58 ` Peter Zijlstra
2014-04-17 21:49 ` Waiman Long
2014-04-17 21:49 ` Waiman Long
2014-04-18 7:46 ` Ingo Molnar
2014-04-18 7:46 ` Ingo Molnar
2014-04-18 16:26 ` Waiman Long
2014-04-18 16:26 ` Waiman Long
2014-04-19 9:24 ` Ingo Molnar
2014-04-19 9:24 ` Ingo Molnar
2014-04-17 15:03 ` [PATCH v9 06/19] qspinlock: prolong the stay in the pending bit path Waiman Long
2014-04-17 15:03 ` Waiman Long
2014-04-17 16:36 ` Peter Zijlstra
2014-04-17 16:36 ` Peter Zijlstra
2014-04-18 1:46 ` Waiman Long
2014-04-18 1:46 ` Waiman Long
2014-04-18 8:33 ` Peter Zijlstra
2014-04-18 8:33 ` Peter Zijlstra
2014-04-18 18:07 ` Waiman Long
2014-04-18 18:07 ` Waiman Long
2014-04-17 15:03 ` [PATCH v9 07/19] qspinlock: Use a simple write to grab the lock, if applicable Waiman Long
2014-04-17 15:03 ` Waiman Long
2014-04-17 16:54 ` Peter Zijlstra
2014-04-17 16:54 ` Peter Zijlstra
2014-04-17 15:04 ` [PATCH v9 08/19] qspinlock: Make a new qnode structure to support virtualization Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 09/19] qspinlock: Prepare for unfair lock support Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 11/19] qspinlock: Split the MCS queuing code into a separate slowerpath Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 12/19] unfair qspinlock: Variable frequency lock stealing mechanism Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 13/19] unfair qspinlock: Enable lock stealing in lock waiters Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 14/19] pvqspinlock, x86: Rename paravirt_ticketlocks_enabled Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 15/19] pvqspinlock, x86: Add PV data structure & methods Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` Waiman Long [this message]
2014-04-17 15:04 ` [PATCH v9 16/19] pvqspinlock: Enable coexistence with the unfair lock Waiman Long
2014-04-17 15:04 ` [PATCH v9 17/19] pvqspinlock: Add qspinlock para-virtualization support Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 18/19] pvqspinlock, x86: Enable PV qspinlock PV for KVM Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 15:04 ` [PATCH v9 19/19] pvqspinlock, x86: Enable PV qspinlock for XEN Waiman Long
2014-04-17 15:04 ` Waiman Long
2014-04-17 17:23 ` [PATCH v9 00/19] qspinlock: a 4-byte queue spinlock with PV support Konrad Rzeszutek Wilk
2014-04-17 17:23 ` Konrad Rzeszutek Wilk
2014-04-17 17:40 ` Raghavendra K T
2014-04-17 17:40 ` Raghavendra K T
2014-04-18 1:50 ` Waiman Long
2014-04-18 1:48 ` Waiman Long
2014-04-18 1:48 ` Waiman Long
2014-04-18 13:18 ` Konrad Rzeszutek Wilk
2014-04-18 13:18 ` Konrad Rzeszutek Wilk
2014-04-18 17:05 ` Waiman Long
2014-04-18 17:05 ` Waiman Long
2014-04-27 18:09 ` Raghavendra K T
2014-05-07 15:00 ` Waiman Long
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1397747051-15401-17-git-send-email-Waiman.Long@hp.com \
--to=waiman.long@hp.com \
--cc=chegu_vinod@hp.com \
--cc=david.vrabel@citrix.com \
--cc=gleb@redhat.com \
--cc=hpa@zytor.com \
--cc=kvm@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=oleg@redhat.com \
--cc=paolo.bonzini@gmail.com \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=raghavendra.kt@linux.vnet.ibm.com \
--cc=scott.norton@hp.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=virtualization@lists.linux-foundation.org \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).