* [PATCH -mm 1/4] llist, Make all llist functions inline
@ 2011-09-06 6:25 Huang Ying
2011-09-06 6:25 ` [PATCH -mm 2/4] llist, Define macro to check NMI safe cmpxchg Huang Ying
` (3 more replies)
0 siblings, 4 replies; 10+ messages in thread
From: Huang Ying @ 2011-09-06 6:25 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-kernel, Andi Kleen, ying.huang, Mathieu Desnoyers,
Peter Zijlstra
Because llist may be used in performance critical code path, make all
llist functions inline to avoid function calling overhead.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Peter Zijlstra <peterz@infradead.org>
---
drivers/acpi/apei/Kconfig | 1
include/linux/llist.h | 122 +++++++++++++++++++++++++++++++++++++++++--
lib/Kconfig | 3 -
lib/Makefile | 2
lib/llist.c | 129 ----------------------------------------------
5 files changed, 116 insertions(+), 141 deletions(-)
delete mode 100644 lib/llist.c
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -14,7 +14,6 @@ config ACPI_APEI_GHES
depends on ACPI_APEI && X86
select ACPI_HED
select IRQ_WORK
- select LLIST
select GENERIC_ALLOCATOR
help
Generic Hardware Error Source provides a way to report
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -37,8 +37,28 @@
* architectures that don't have NMI-safe cmpxchg implementation, the
* list can NOT be used in NMI handler. So code uses the list in NMI
* handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * Copyright 2010,2011 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+
struct llist_head {
struct llist_node *first;
};
@@ -113,14 +133,104 @@ static inline void init_llist_head(struc
* test whether the list is empty without deleting something from the
* list.
*/
-static inline int llist_empty(const struct llist_head *head)
+static inline bool llist_empty(const struct llist_head *head)
{
return ACCESS_ONCE(head->first) == NULL;
}
-void llist_add(struct llist_node *new, struct llist_head *head);
-void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
- struct llist_head *head);
-struct llist_node *llist_del_first(struct llist_head *head);
-struct llist_node *llist_del_all(struct llist_head *head);
+/**
+ * llist_add - add a new entry
+ * @new: new entry to be added
+ * @head: the head for your lock-less list
+ */
+static inline void llist_add(struct llist_node *new, struct llist_head *head)
+{
+ struct llist_node *entry, *old_entry;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ entry = head->first;
+ do {
+ old_entry = entry;
+ new->next = entry;
+ cpu_relax();
+ } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
+}
+
+/**
+ * llist_add_batch - add several linked entries in batch
+ * @new_first: first entry in batch to be added
+ * @new_last: last entry in batch to be added
+ * @head: the head for your lock-less list
+ */
+static inline void llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *entry, *old_entry;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ entry = head->first;
+ do {
+ old_entry = entry;
+ new_last->next = entry;
+ cpu_relax();
+ } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
+}
+
+/**
+ * llist_del_first - delete the first entry of lock-less list
+ * @head: the head for your lock-less list
+ *
+ * If list is empty, return NULL, otherwise, return the first entry
+ * deleted, this is the newest added one.
+ *
+ * Only one llist_del_first user can be used simultaneously with
+ * multiple llist_add users without lock. Because otherwise
+ * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
+ * llist_add) sequence in another user may change @head->first->next,
+ * but keep @head->first. If multiple consumers are needed, please
+ * use llist_del_all or use lock between consumers.
+ */
+static inline struct llist_node *llist_del_first(struct llist_head *head)
+{
+ struct llist_node *entry, *old_entry, *next;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ entry = head->first;
+ do {
+ if (entry == NULL)
+ return NULL;
+ old_entry = entry;
+ next = entry->next;
+ cpu_relax();
+ } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
+
+ return entry;
+}
+
+/**
+ * llist_del_all - delete all entries from lock-less list
+ * @head: the head of lock-less list to delete all entries
+ *
+ * If list is empty, return NULL, otherwise, delete all entries and
+ * return the pointer to the first entry. The order of entries
+ * deleted is from the newest to the oldest added one.
+ */
+static inline struct llist_node *llist_del_all(struct llist_head *head)
+{
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ return xchg(&head->first, NULL);
+}
#endif /* LLIST_H */
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -276,7 +276,4 @@ config CORDIC
so its calculations are in fixed point. Modules can select this
when they require this function. Module will be called cordic.
-config LLIST
- bool
-
endmenu
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -116,8 +116,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
obj-$(CONFIG_CORDIC) += cordic.o
-obj-$(CONFIG_LLIST) += llist.o
-
hostprogs-y := gen_crc32table
clean-files := crc32table.h
--- a/lib/llist.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Lock-less NULL terminated single linked list
- *
- * The basic atomic operation of this list is cmpxchg on long. On
- * architectures that don't have NMI-safe cmpxchg implementation, the
- * list can NOT be used in NMI handler. So code uses the list in NMI
- * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
- *
- * Copyright 2010,2011 Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/llist.h>
-
-#include <asm/system.h>
-
-/**
- * llist_add - add a new entry
- * @new: new entry to be added
- * @head: the head for your lock-less list
- */
-void llist_add(struct llist_node *new, struct llist_head *head)
-{
- struct llist_node *entry, *old_entry;
-
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- entry = head->first;
- do {
- old_entry = entry;
- new->next = entry;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
-}
-EXPORT_SYMBOL_GPL(llist_add);
-
-/**
- * llist_add_batch - add several linked entries in batch
- * @new_first: first entry in batch to be added
- * @new_last: last entry in batch to be added
- * @head: the head for your lock-less list
- */
-void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
- struct llist_head *head)
-{
- struct llist_node *entry, *old_entry;
-
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- entry = head->first;
- do {
- old_entry = entry;
- new_last->next = entry;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
-}
-EXPORT_SYMBOL_GPL(llist_add_batch);
-
-/**
- * llist_del_first - delete the first entry of lock-less list
- * @head: the head for your lock-less list
- *
- * If list is empty, return NULL, otherwise, return the first entry
- * deleted, this is the newest added one.
- *
- * Only one llist_del_first user can be used simultaneously with
- * multiple llist_add users without lock. Because otherwise
- * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
- * llist_add) sequence in another user may change @head->first->next,
- * but keep @head->first. If multiple consumers are needed, please
- * use llist_del_all or use lock between consumers.
- */
-struct llist_node *llist_del_first(struct llist_head *head)
-{
- struct llist_node *entry, *old_entry, *next;
-
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- entry = head->first;
- do {
- if (entry == NULL)
- return NULL;
- old_entry = entry;
- next = entry->next;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
-
- return entry;
-}
-EXPORT_SYMBOL_GPL(llist_del_first);
-
-/**
- * llist_del_all - delete all entries from lock-less list
- * @head: the head of lock-less list to delete all entries
- *
- * If list is empty, return NULL, otherwise, delete all entries and
- * return the pointer to the first entry. The order of entries
- * deleted is from the newest to the oldest added one.
- */
-struct llist_node *llist_del_all(struct llist_head *head)
-{
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- return xchg(&head->first, NULL);
-}
-EXPORT_SYMBOL_GPL(llist_del_all);
^ permalink raw reply [flat|nested] 10+ messages in thread* [PATCH -mm 2/4] llist, Define macro to check NMI safe cmpxchg
2011-09-06 6:25 [PATCH -mm 1/4] llist, Make all llist functions inline Huang Ying
@ 2011-09-06 6:25 ` Huang Ying
2011-09-06 10:50 ` Mathieu Desnoyers
2011-09-06 6:25 ` [PATCH -mm 3/4] llist, Move cpu_relax after cmpxchg Huang Ying
` (2 subsequent siblings)
3 siblings, 1 reply; 10+ messages in thread
From: Huang Ying @ 2011-09-06 6:25 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-kernel, Andi Kleen, ying.huang, Mathieu Desnoyers,
Peter Zijlstra
To make code cleaner and reduce code duplication. Thanks Peter
Zijlstra for reminding.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Peter Zijlstra <peterz@infradead.org>
---
include/linux/llist.h | 22 ++++++++++------------
1 file changed, 10 insertions(+), 12 deletions(-)
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -70,6 +70,12 @@ struct llist_node {
#define LLIST_HEAD_INIT(name) { NULL }
#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+#define CHECK_NMI_SAFE_CMPXCHG()
+#else
+#define CHECK_NMI_SAFE_CMPXCHG() BUG_ON(in_nmi())
+#endif
+
/**
* init_llist_head - initialize lock-less list head
* @head: the head for your lock-less list
@@ -147,9 +153,7 @@ static inline void llist_add(struct llis
{
struct llist_node *entry, *old_entry;
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
+ CHECK_NMI_SAFE_CMPXCHG();
entry = head->first;
do {
@@ -171,9 +175,7 @@ static inline void llist_add_batch(struc
{
struct llist_node *entry, *old_entry;
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
+ CHECK_NMI_SAFE_CMPXCHG();
entry = head->first;
do {
@@ -201,9 +203,7 @@ static inline struct llist_node *llist_d
{
struct llist_node *entry, *old_entry, *next;
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
+ CHECK_NMI_SAFE_CMPXCHG();
entry = head->first;
do {
@@ -227,9 +227,7 @@ static inline struct llist_node *llist_d
*/
static inline struct llist_node *llist_del_all(struct llist_head *head)
{
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
+ CHECK_NMI_SAFE_CMPXCHG();
return xchg(&head->first, NULL);
}
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH -mm 2/4] llist, Define macro to check NMI safe cmpxchg
2011-09-06 6:25 ` [PATCH -mm 2/4] llist, Define macro to check NMI safe cmpxchg Huang Ying
@ 2011-09-06 10:50 ` Mathieu Desnoyers
0 siblings, 0 replies; 10+ messages in thread
From: Mathieu Desnoyers @ 2011-09-06 10:50 UTC (permalink / raw)
To: Huang Ying; +Cc: Andrew Morton, linux-kernel, Andi Kleen, Peter Zijlstra
* Huang Ying (ying.huang@intel.com) wrote:
> To make code cleaner and reduce code duplication. Thanks Peter
> Zijlstra for reminding.
>
> Signed-off-by: Huang Ying <ying.huang@intel.com>
> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> ---
> include/linux/llist.h | 22 ++++++++++------------
> 1 file changed, 10 insertions(+), 12 deletions(-)
>
> --- a/include/linux/llist.h
> +++ b/include/linux/llist.h
> @@ -70,6 +70,12 @@ struct llist_node {
> #define LLIST_HEAD_INIT(name) { NULL }
> #define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
>
> +#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> +#define CHECK_NMI_SAFE_CMPXCHG()
Does it have to be in caps ? And can it be turned into a static inline
function ? I'm thinking about something close to might_sleep().
Thanks,
Mathieu
> +#else
> +#define CHECK_NMI_SAFE_CMPXCHG() BUG_ON(in_nmi())
> +#endif
> +
> /**
> * init_llist_head - initialize lock-less list head
> * @head: the head for your lock-less list
> @@ -147,9 +153,7 @@ static inline void llist_add(struct llis
> {
> struct llist_node *entry, *old_entry;
>
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> + CHECK_NMI_SAFE_CMPXCHG();
>
> entry = head->first;
> do {
> @@ -171,9 +175,7 @@ static inline void llist_add_batch(struc
> {
> struct llist_node *entry, *old_entry;
>
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> + CHECK_NMI_SAFE_CMPXCHG();
>
> entry = head->first;
> do {
> @@ -201,9 +203,7 @@ static inline struct llist_node *llist_d
> {
> struct llist_node *entry, *old_entry, *next;
>
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> + CHECK_NMI_SAFE_CMPXCHG();
>
> entry = head->first;
> do {
> @@ -227,9 +227,7 @@ static inline struct llist_node *llist_d
> */
> static inline struct llist_node *llist_del_all(struct llist_head *head)
> {
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> + CHECK_NMI_SAFE_CMPXCHG();
>
> return xchg(&head->first, NULL);
> }
--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -mm 3/4] llist, Move cpu_relax after cmpxchg
2011-09-06 6:25 [PATCH -mm 1/4] llist, Make all llist functions inline Huang Ying
2011-09-06 6:25 ` [PATCH -mm 2/4] llist, Define macro to check NMI safe cmpxchg Huang Ying
@ 2011-09-06 6:25 ` Huang Ying
2011-09-06 10:52 ` Mathieu Desnoyers
2011-09-06 6:25 ` [PATCH -mm 4/4] irq_work, Use llist in irq_work Huang Ying
2011-09-06 10:47 ` [PATCH -mm 1/4] llist, Make all llist functions inline Mathieu Desnoyers
3 siblings, 1 reply; 10+ messages in thread
From: Huang Ying @ 2011-09-06 6:25 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-kernel, Andi Kleen, ying.huang, Mathieu Desnoyers,
Peter Zijlstra
If the first cmpxchg calling succeeds, it is not necessary to use
cpu_relax before cmpxchg. So cpu_relax in a busy loop involving
cmpxchg should go after cmpxchg instead of before that. This patch
fixes this in llist.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Peter Zijlstra <peterz@infradead.org>
---
include/linux/llist.h | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -156,11 +156,14 @@ static inline void llist_add(struct llis
CHECK_NMI_SAFE_CMPXCHG();
entry = head->first;
- do {
+ for (;;) {
old_entry = entry;
new->next = entry;
+ entry = cmpxchg(&head->first, old_entry, new);
+ if (entry == old_entry)
+ break;
cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
+ }
}
/**
@@ -178,11 +181,14 @@ static inline void llist_add_batch(struc
CHECK_NMI_SAFE_CMPXCHG();
entry = head->first;
- do {
+ for (;;) {
old_entry = entry;
new_last->next = entry;
+ entry = cmpxchg(&head->first, old_entry, new_first);
+ if (entry == old_entry)
+ break;
cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
+ }
}
/**
@@ -206,13 +212,16 @@ static inline struct llist_node *llist_d
CHECK_NMI_SAFE_CMPXCHG();
entry = head->first;
- do {
+ for (;;) {
if (entry == NULL)
return NULL;
old_entry = entry;
next = entry->next;
+ entry = cmpxchg(&head->first, old_entry, next);
+ if (entry == old_entry)
+ break;
cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
+ }
return entry;
}
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH -mm 3/4] llist, Move cpu_relax after cmpxchg
2011-09-06 6:25 ` [PATCH -mm 3/4] llist, Move cpu_relax after cmpxchg Huang Ying
@ 2011-09-06 10:52 ` Mathieu Desnoyers
0 siblings, 0 replies; 10+ messages in thread
From: Mathieu Desnoyers @ 2011-09-06 10:52 UTC (permalink / raw)
To: Huang Ying; +Cc: Andrew Morton, linux-kernel, Andi Kleen, Peter Zijlstra
* Huang Ying (ying.huang@intel.com) wrote:
> If the first cmpxchg calling succeeds, it is not necessary to use
> cpu_relax before cmpxchg. So cpu_relax in a busy loop involving
> cmpxchg should go after cmpxchg instead of before that. This patch
> fixes this in llist.
>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thanks!
MAthieu
> Signed-off-by: Huang Ying <ying.huang@intel.com>
> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> ---
> include/linux/llist.h | 21 +++++++++++++++------
> 1 file changed, 15 insertions(+), 6 deletions(-)
>
> --- a/include/linux/llist.h
> +++ b/include/linux/llist.h
> @@ -156,11 +156,14 @@ static inline void llist_add(struct llis
> CHECK_NMI_SAFE_CMPXCHG();
>
> entry = head->first;
> - do {
> + for (;;) {
> old_entry = entry;
> new->next = entry;
> + entry = cmpxchg(&head->first, old_entry, new);
> + if (entry == old_entry)
> + break;
> cpu_relax();
> - } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
> + }
> }
>
> /**
> @@ -178,11 +181,14 @@ static inline void llist_add_batch(struc
> CHECK_NMI_SAFE_CMPXCHG();
>
> entry = head->first;
> - do {
> + for (;;) {
> old_entry = entry;
> new_last->next = entry;
> + entry = cmpxchg(&head->first, old_entry, new_first);
> + if (entry == old_entry)
> + break;
> cpu_relax();
> - } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
> + }
> }
>
> /**
> @@ -206,13 +212,16 @@ static inline struct llist_node *llist_d
> CHECK_NMI_SAFE_CMPXCHG();
>
> entry = head->first;
> - do {
> + for (;;) {
> if (entry == NULL)
> return NULL;
> old_entry = entry;
> next = entry->next;
> + entry = cmpxchg(&head->first, old_entry, next);
> + if (entry == old_entry)
> + break;
> cpu_relax();
> - } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
> + }
>
> return entry;
> }
--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -mm 4/4] irq_work, Use llist in irq_work
2011-09-06 6:25 [PATCH -mm 1/4] llist, Make all llist functions inline Huang Ying
2011-09-06 6:25 ` [PATCH -mm 2/4] llist, Define macro to check NMI safe cmpxchg Huang Ying
2011-09-06 6:25 ` [PATCH -mm 3/4] llist, Move cpu_relax after cmpxchg Huang Ying
@ 2011-09-06 6:25 ` Huang Ying
2011-09-06 10:57 ` Mathieu Desnoyers
2011-09-06 10:47 ` [PATCH -mm 1/4] llist, Make all llist functions inline Mathieu Desnoyers
3 siblings, 1 reply; 10+ messages in thread
From: Huang Ying @ 2011-09-06 6:25 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-kernel, Andi Kleen, ying.huang, Mathieu Desnoyers,
Peter Zijlstra
Use llist in irq_work instead of the lock-less linked list
implementation in irq_work to avoid the code duplication.
Signed-off-by: Huang Ying <ying.huang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
---
include/linux/irq_work.h | 15 ++++---
kernel/irq_work.c | 92 ++++++++++++++++-------------------------------
2 files changed, 41 insertions(+), 66 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -1,20 +1,23 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
+#include <linux/llist.h>
+
struct irq_work {
- struct irq_work *next;
+ unsigned long flags;
+ struct llist_node llnode;
void (*func)(struct irq_work *);
};
static inline
-void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
+void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- entry->next = NULL;
- entry->func = func;
+ work->flags = 0;
+ work->func = func;
}
-bool irq_work_queue(struct irq_work *entry);
+bool irq_work_queue(struct irq_work *work);
void irq_work_run(void);
-void irq_work_sync(struct irq_work *entry);
+void irq_work_sync(struct irq_work *work);
#endif /* _LINUX_IRQ_WORK_H */
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -10,7 +10,6 @@
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
-#include <asm/processor.h>
/*
* An entry can be in one of four states:
@@ -19,54 +18,34 @@
* claimed NULL, 3 -> {pending} : claimed to be enqueued
* pending next, 3 -> {busy} : queued, pending callback
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
- *
- * We use the lower two bits of the next pointer to keep PENDING and BUSY
- * flags.
*/
#define IRQ_WORK_PENDING 1UL
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
-static inline bool irq_work_is_set(struct irq_work *entry, int flags)
-{
- return (unsigned long)entry->next & flags;
-}
-
-static inline struct irq_work *irq_work_next(struct irq_work *entry)
-{
- unsigned long next = (unsigned long)entry->next;
- next &= ~IRQ_WORK_FLAGS;
- return (struct irq_work *)next;
-}
-
-static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
-{
- unsigned long next = (unsigned long)entry;
- next |= flags;
- return (struct irq_work *)next;
-}
-
-static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
+static DEFINE_PER_CPU(struct llist_head, irq_work_list);
/*
* Claim the entry so that no one else will poke at it.
*/
-static bool irq_work_claim(struct irq_work *entry)
+static bool irq_work_claim(struct irq_work *work)
{
- struct irq_work *next, *nflags;
+ unsigned long flags, nflags;
- do {
- next = entry->next;
- if ((unsigned long)next & IRQ_WORK_PENDING)
+ for (;;) {
+ flags = work->flags;
+ if (flags & IRQ_WORK_PENDING)
return false;
- nflags = next_flags(next, IRQ_WORK_FLAGS);
- } while (cmpxchg(&entry->next, next, nflags) != next);
+ nflags = flags | IRQ_WORK_FLAGS;
+ if (cmpxchg(&work->flags, flags, nflags) == flags)
+ break;
+ cpu_relax();
+ }
return true;
}
-
void __weak arch_irq_work_raise(void)
{
/*
@@ -77,20 +56,13 @@ void __weak arch_irq_work_raise(void)
/*
* Queue the entry and raise the IPI if needed.
*/
-static void __irq_work_queue(struct irq_work *entry)
+static void __irq_work_queue(struct irq_work *work)
{
- struct irq_work *next;
-
preempt_disable();
- do {
- next = __this_cpu_read(irq_work_list);
- /* Can assign non-atomic because we keep the flags set. */
- entry->next = next_flags(next, IRQ_WORK_FLAGS);
- } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
-
+ llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
/* The list was empty, raise self-interrupt to start processing. */
- if (!irq_work_next(entry))
+ if (!work->llnode.next)
arch_irq_work_raise();
preempt_enable();
@@ -102,16 +74,16 @@ static void __irq_work_queue(struct irq_
*
* Can be re-enqueued while the callback is still in progress.
*/
-bool irq_work_queue(struct irq_work *entry)
+bool irq_work_queue(struct irq_work *work)
{
- if (!irq_work_claim(entry)) {
+ if (!irq_work_claim(work)) {
/*
* Already enqueued, can't do!
*/
return false;
}
- __irq_work_queue(entry);
+ __irq_work_queue(work);
return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);
@@ -122,34 +94,34 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
*/
void irq_work_run(void)
{
- struct irq_work *list;
+ struct irq_work *work;
+ struct llist_head *this_list;
+ struct llist_node *llnode;
- if (this_cpu_read(irq_work_list) == NULL)
+ this_list = &__get_cpu_var(irq_work_list);
+ if (llist_empty(this_list))
return;
BUG_ON(!in_irq());
BUG_ON(!irqs_disabled());
- list = this_cpu_xchg(irq_work_list, NULL);
-
- while (list != NULL) {
- struct irq_work *entry = list;
+ llnode = llist_del_all(this_list);
+ while (llnode != NULL) {
+ work = llist_entry(llnode, struct irq_work, llnode);
- list = irq_work_next(list);
+ llnode = llnode->next;
/*
- * Clear the PENDING bit, after this point the @entry
+ * Clear the PENDING bit, after this point the @work
* can be re-used.
*/
- entry->next = next_flags(NULL, IRQ_WORK_BUSY);
- entry->func(entry);
+ work->flags = IRQ_WORK_BUSY;
+ work->func(work);
/*
* Clear the BUSY bit and return to the free state if
* no-one else claimed it meanwhile.
*/
- (void)cmpxchg(&entry->next,
- next_flags(NULL, IRQ_WORK_BUSY),
- NULL);
+ (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
}
}
EXPORT_SYMBOL_GPL(irq_work_run);
@@ -158,11 +130,11 @@ EXPORT_SYMBOL_GPL(irq_work_run);
* Synchronize against the irq_work @entry, ensures the entry is not
* currently in use.
*/
-void irq_work_sync(struct irq_work *entry)
+void irq_work_sync(struct irq_work *work)
{
WARN_ON_ONCE(irqs_disabled());
- while (irq_work_is_set(entry, IRQ_WORK_BUSY))
+ while (work->flags & IRQ_WORK_BUSY)
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH -mm 4/4] irq_work, Use llist in irq_work
2011-09-06 6:25 ` [PATCH -mm 4/4] irq_work, Use llist in irq_work Huang Ying
@ 2011-09-06 10:57 ` Mathieu Desnoyers
2011-09-07 0:55 ` Huang Ying
0 siblings, 1 reply; 10+ messages in thread
From: Mathieu Desnoyers @ 2011-09-06 10:57 UTC (permalink / raw)
To: Huang Ying; +Cc: Andrew Morton, linux-kernel, Andi Kleen, Peter Zijlstra
* Huang Ying (ying.huang@intel.com) wrote:
> Use llist in irq_work instead of the lock-less linked list
> implementation in irq_work to avoid the code duplication.
>
> Signed-off-by: Huang Ying <ying.huang@intel.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> ---
> include/linux/irq_work.h | 15 ++++---
> kernel/irq_work.c | 92 ++++++++++++++++-------------------------------
> 2 files changed, 41 insertions(+), 66 deletions(-)
>
[...]
> -static void __irq_work_queue(struct irq_work *entry)
> +static void __irq_work_queue(struct irq_work *work)
> {
> - struct irq_work *next;
> -
> preempt_disable();
>
> - do {
> - next = __this_cpu_read(irq_work_list);
> - /* Can assign non-atomic because we keep the flags set. */
> - entry->next = next_flags(next, IRQ_WORK_FLAGS);
> - } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
> -
> + llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
> /* The list was empty, raise self-interrupt to start processing. */
> - if (!irq_work_next(entry))
> + if (!work->llnode.next)
Hrm. What happens if this function gets delayed between llist_add and
"if (!work->llnode.next)" ? It seems like the threads performing
llist_del_all would be within its right to free the memory pointed to by
work in the meantime.
Thanks,
Mathieu
--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH -mm 4/4] irq_work, Use llist in irq_work
2011-09-06 10:57 ` Mathieu Desnoyers
@ 2011-09-07 0:55 ` Huang Ying
2011-09-07 7:52 ` Peter Zijlstra
0 siblings, 1 reply; 10+ messages in thread
From: Huang Ying @ 2011-09-07 0:55 UTC (permalink / raw)
To: Mathieu Desnoyers
Cc: Andrew Morton, linux-kernel@vger.kernel.org, Andi Kleen,
Peter Zijlstra
On 09/06/2011 06:57 PM, Mathieu Desnoyers wrote:
> * Huang Ying (ying.huang@intel.com) wrote:
>> Use llist in irq_work instead of the lock-less linked list
>> implementation in irq_work to avoid the code duplication.
>>
>> Signed-off-by: Huang Ying <ying.huang@intel.com>
>> Cc: Peter Zijlstra <peterz@infradead.org>
>> ---
>> include/linux/irq_work.h | 15 ++++---
>> kernel/irq_work.c | 92 ++++++++++++++++-------------------------------
>> 2 files changed, 41 insertions(+), 66 deletions(-)
>>
> [...]
>> -static void __irq_work_queue(struct irq_work *entry)
>> +static void __irq_work_queue(struct irq_work *work)
>> {
>> - struct irq_work *next;
>> -
>> preempt_disable();
>>
>> - do {
>> - next = __this_cpu_read(irq_work_list);
>> - /* Can assign non-atomic because we keep the flags set. */
>> - entry->next = next_flags(next, IRQ_WORK_FLAGS);
>> - } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
>> -
>> + llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
>> /* The list was empty, raise self-interrupt to start processing. */
>> - if (!irq_work_next(entry))
>> + if (!work->llnode.next)
>
>
> Hrm. What happens if this function gets delayed between llist_add and
> "if (!work->llnode.next)" ? It seems like the threads performing
> llist_del_all would be within its right to free the memory pointed to by
> work in the meantime.
Yes. This is an issue. This can be fixed in several way
1) use another flag to indicate whether list is empty
2) make llist_add return whether list is empty before adding
3) request irq_work users to free the memory with call_rcu() or after
synchronize_rcu().
Personally I prefer 1), which will not expose llist implementation
details too.
Best Regards,
Huang Ying
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH -mm 1/4] llist, Make all llist functions inline
2011-09-06 6:25 [PATCH -mm 1/4] llist, Make all llist functions inline Huang Ying
` (2 preceding siblings ...)
2011-09-06 6:25 ` [PATCH -mm 4/4] irq_work, Use llist in irq_work Huang Ying
@ 2011-09-06 10:47 ` Mathieu Desnoyers
3 siblings, 0 replies; 10+ messages in thread
From: Mathieu Desnoyers @ 2011-09-06 10:47 UTC (permalink / raw)
To: Huang Ying; +Cc: Andrew Morton, linux-kernel, Andi Kleen, Peter Zijlstra
* Huang Ying (ying.huang@intel.com) wrote:
> Because llist may be used in performance critical code path, make all
> llist functions inline to avoid function calling overhead.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thanks,
Mathieu
>
> Signed-off-by: Huang Ying <ying.huang@intel.com>
> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> ---
> drivers/acpi/apei/Kconfig | 1
> include/linux/llist.h | 122 +++++++++++++++++++++++++++++++++++++++++--
> lib/Kconfig | 3 -
> lib/Makefile | 2
> lib/llist.c | 129 ----------------------------------------------
> 5 files changed, 116 insertions(+), 141 deletions(-)
> delete mode 100644 lib/llist.c
>
> --- a/drivers/acpi/apei/Kconfig
> +++ b/drivers/acpi/apei/Kconfig
> @@ -14,7 +14,6 @@ config ACPI_APEI_GHES
> depends on ACPI_APEI && X86
> select ACPI_HED
> select IRQ_WORK
> - select LLIST
> select GENERIC_ALLOCATOR
> help
> Generic Hardware Error Source provides a way to report
> --- a/include/linux/llist.h
> +++ b/include/linux/llist.h
> @@ -37,8 +37,28 @@
> * architectures that don't have NMI-safe cmpxchg implementation, the
> * list can NOT be used in NMI handler. So code uses the list in NMI
> * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
> + *
> + * Copyright 2010,2011 Intel Corp.
> + * Author: Huang Ying <ying.huang@intel.com>
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License version
> + * 2 as published by the Free Software Foundation;
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
> */
>
> +#include <linux/kernel.h>
> +#include <asm/system.h>
> +#include <asm/processor.h>
> +
> struct llist_head {
> struct llist_node *first;
> };
> @@ -113,14 +133,104 @@ static inline void init_llist_head(struc
> * test whether the list is empty without deleting something from the
> * list.
> */
> -static inline int llist_empty(const struct llist_head *head)
> +static inline bool llist_empty(const struct llist_head *head)
> {
> return ACCESS_ONCE(head->first) == NULL;
> }
>
> -void llist_add(struct llist_node *new, struct llist_head *head);
> -void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
> - struct llist_head *head);
> -struct llist_node *llist_del_first(struct llist_head *head);
> -struct llist_node *llist_del_all(struct llist_head *head);
> +/**
> + * llist_add - add a new entry
> + * @new: new entry to be added
> + * @head: the head for your lock-less list
> + */
> +static inline void llist_add(struct llist_node *new, struct llist_head *head)
> +{
> + struct llist_node *entry, *old_entry;
> +
> +#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> + BUG_ON(in_nmi());
> +#endif
> +
> + entry = head->first;
> + do {
> + old_entry = entry;
> + new->next = entry;
> + cpu_relax();
> + } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
> +}
> +
> +/**
> + * llist_add_batch - add several linked entries in batch
> + * @new_first: first entry in batch to be added
> + * @new_last: last entry in batch to be added
> + * @head: the head for your lock-less list
> + */
> +static inline void llist_add_batch(struct llist_node *new_first,
> + struct llist_node *new_last,
> + struct llist_head *head)
> +{
> + struct llist_node *entry, *old_entry;
> +
> +#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> + BUG_ON(in_nmi());
> +#endif
> +
> + entry = head->first;
> + do {
> + old_entry = entry;
> + new_last->next = entry;
> + cpu_relax();
> + } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
> +}
> +
> +/**
> + * llist_del_first - delete the first entry of lock-less list
> + * @head: the head for your lock-less list
> + *
> + * If list is empty, return NULL, otherwise, return the first entry
> + * deleted, this is the newest added one.
> + *
> + * Only one llist_del_first user can be used simultaneously with
> + * multiple llist_add users without lock. Because otherwise
> + * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
> + * llist_add) sequence in another user may change @head->first->next,
> + * but keep @head->first. If multiple consumers are needed, please
> + * use llist_del_all or use lock between consumers.
> + */
> +static inline struct llist_node *llist_del_first(struct llist_head *head)
> +{
> + struct llist_node *entry, *old_entry, *next;
> +
> +#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> + BUG_ON(in_nmi());
> +#endif
> +
> + entry = head->first;
> + do {
> + if (entry == NULL)
> + return NULL;
> + old_entry = entry;
> + next = entry->next;
> + cpu_relax();
> + } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
> +
> + return entry;
> +}
> +
> +/**
> + * llist_del_all - delete all entries from lock-less list
> + * @head: the head of lock-less list to delete all entries
> + *
> + * If list is empty, return NULL, otherwise, delete all entries and
> + * return the pointer to the first entry. The order of entries
> + * deleted is from the newest to the oldest added one.
> + */
> +static inline struct llist_node *llist_del_all(struct llist_head *head)
> +{
> +#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> + BUG_ON(in_nmi());
> +#endif
> +
> + return xchg(&head->first, NULL);
> +}
> #endif /* LLIST_H */
> --- a/lib/Kconfig
> +++ b/lib/Kconfig
> @@ -276,7 +276,4 @@ config CORDIC
> so its calculations are in fixed point. Modules can select this
> when they require this function. Module will be called cordic.
>
> -config LLIST
> - bool
> -
> endmenu
> --- a/lib/Makefile
> +++ b/lib/Makefile
> @@ -116,8 +116,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
>
> obj-$(CONFIG_CORDIC) += cordic.o
>
> -obj-$(CONFIG_LLIST) += llist.o
> -
> hostprogs-y := gen_crc32table
> clean-files := crc32table.h
>
> --- a/lib/llist.c
> +++ /dev/null
> @@ -1,129 +0,0 @@
> -/*
> - * Lock-less NULL terminated single linked list
> - *
> - * The basic atomic operation of this list is cmpxchg on long. On
> - * architectures that don't have NMI-safe cmpxchg implementation, the
> - * list can NOT be used in NMI handler. So code uses the list in NMI
> - * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
> - *
> - * Copyright 2010,2011 Intel Corp.
> - * Author: Huang Ying <ying.huang@intel.com>
> - *
> - * This program is free software; you can redistribute it and/or
> - * modify it under the terms of the GNU General Public License version
> - * 2 as published by the Free Software Foundation;
> - *
> - * This program is distributed in the hope that it will be useful,
> - * but WITHOUT ANY WARRANTY; without even the implied warranty of
> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> - * GNU General Public License for more details.
> - *
> - * You should have received a copy of the GNU General Public License
> - * along with this program; if not, write to the Free Software
> - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
> - */
> -#include <linux/kernel.h>
> -#include <linux/module.h>
> -#include <linux/interrupt.h>
> -#include <linux/llist.h>
> -
> -#include <asm/system.h>
> -
> -/**
> - * llist_add - add a new entry
> - * @new: new entry to be added
> - * @head: the head for your lock-less list
> - */
> -void llist_add(struct llist_node *new, struct llist_head *head)
> -{
> - struct llist_node *entry, *old_entry;
> -
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> -
> - entry = head->first;
> - do {
> - old_entry = entry;
> - new->next = entry;
> - cpu_relax();
> - } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
> -}
> -EXPORT_SYMBOL_GPL(llist_add);
> -
> -/**
> - * llist_add_batch - add several linked entries in batch
> - * @new_first: first entry in batch to be added
> - * @new_last: last entry in batch to be added
> - * @head: the head for your lock-less list
> - */
> -void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
> - struct llist_head *head)
> -{
> - struct llist_node *entry, *old_entry;
> -
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> -
> - entry = head->first;
> - do {
> - old_entry = entry;
> - new_last->next = entry;
> - cpu_relax();
> - } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
> -}
> -EXPORT_SYMBOL_GPL(llist_add_batch);
> -
> -/**
> - * llist_del_first - delete the first entry of lock-less list
> - * @head: the head for your lock-less list
> - *
> - * If list is empty, return NULL, otherwise, return the first entry
> - * deleted, this is the newest added one.
> - *
> - * Only one llist_del_first user can be used simultaneously with
> - * multiple llist_add users without lock. Because otherwise
> - * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
> - * llist_add) sequence in another user may change @head->first->next,
> - * but keep @head->first. If multiple consumers are needed, please
> - * use llist_del_all or use lock between consumers.
> - */
> -struct llist_node *llist_del_first(struct llist_head *head)
> -{
> - struct llist_node *entry, *old_entry, *next;
> -
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> -
> - entry = head->first;
> - do {
> - if (entry == NULL)
> - return NULL;
> - old_entry = entry;
> - next = entry->next;
> - cpu_relax();
> - } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
> -
> - return entry;
> -}
> -EXPORT_SYMBOL_GPL(llist_del_first);
> -
> -/**
> - * llist_del_all - delete all entries from lock-less list
> - * @head: the head of lock-less list to delete all entries
> - *
> - * If list is empty, return NULL, otherwise, delete all entries and
> - * return the pointer to the first entry. The order of entries
> - * deleted is from the newest to the oldest added one.
> - */
> -struct llist_node *llist_del_all(struct llist_head *head)
> -{
> -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
> - BUG_ON(in_nmi());
> -#endif
> -
> - return xchg(&head->first, NULL);
> -}
> -EXPORT_SYMBOL_GPL(llist_del_all);
--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2011-09-07 16:33 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-09-06 6:25 [PATCH -mm 1/4] llist, Make all llist functions inline Huang Ying
2011-09-06 6:25 ` [PATCH -mm 2/4] llist, Define macro to check NMI safe cmpxchg Huang Ying
2011-09-06 10:50 ` Mathieu Desnoyers
2011-09-06 6:25 ` [PATCH -mm 3/4] llist, Move cpu_relax after cmpxchg Huang Ying
2011-09-06 10:52 ` Mathieu Desnoyers
2011-09-06 6:25 ` [PATCH -mm 4/4] irq_work, Use llist in irq_work Huang Ying
2011-09-06 10:57 ` Mathieu Desnoyers
2011-09-07 0:55 ` Huang Ying
2011-09-07 7:52 ` Peter Zijlstra
2011-09-06 10:47 ` [PATCH -mm 1/4] llist, Make all llist functions inline Mathieu Desnoyers
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).