linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Waiman Long <Waiman.Long@hpe.com>
To: Alexander Viro <viro@zeniv.linux.org.uk>,
	Jan Kara <jack@suse.com>, Jeff Layton <jlayton@poochiereds.net>,
	"J. Bruce Fields" <bfields@fieldses.org>,
	Tejun Heo <tj@kernel.org>,
	Christoph Lameter <cl@linux-foundation.org>
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	Ingo Molnar <mingo@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Andi Kleen <andi@firstfloor.org>,
	Dave Chinner <dchinner@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Scott J Norton <scott.norton@hp.com>,
	Douglas Hatch <doug.hatch@hp.com>,
	Waiman Long <Waiman.Long@hpe.com>
Subject: [PATCH v5 5/5] lib/percpu-list: Add a config parameter for disabling per-cpu list
Date: Tue,  1 Mar 2016 16:00:03 -0500	[thread overview]
Message-ID: <1456866003-32441-6-git-send-email-Waiman.Long@hpe.com> (raw)
In-Reply-To: <1456866003-32441-1-git-send-email-Waiman.Long@hpe.com>

As there is concern that the larger pcpu_list_node structure and the
per-cpu overhead may be a waste of resource on small system. This patch
adds a config parameter CONFIG_PERCPU_LIST to disable the per-cpu list
if the kernel builder chooses to do so. With per-cpu list disabled,
all the different groups of per-cpu lists will be degenerated into
global lists for all the CPUs.

The current default is to enable per-cpu list. A kernel builder needs
to explicitly turn it off.

Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
---
 fs/inode.c                  |    2 +-
 include/linux/percpu-list.h |   93 ++++++++++++++++++++++++++++++++++++++++++-
 lib/Kconfig                 |   14 ++++++
 lib/percpu-list.c           |   24 +++++++++++-
 4 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/fs/inode.c b/fs/inode.c
index 58d1a13..23e544b 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -437,7 +437,7 @@ EXPORT_SYMBOL_GPL(inode_sb_list_add);
 static inline void inode_sb_list_del(struct inode *inode)
 {
 	if (!list_empty(&inode->i_sb_list.list))
-		pcpu_list_del(&inode->i_sb_list);
+		pcpu_list_del(&inode->i_sb_list, inode->i_sb->s_inodes);
 }
 
 static unsigned long hash(struct super_block *sb, unsigned long hashval)
diff --git a/include/linux/percpu-list.h b/include/linux/percpu-list.h
index fc6207f..12d75ad 100644
--- a/include/linux/percpu-list.h
+++ b/include/linux/percpu-list.h
@@ -80,6 +80,7 @@ static inline void init_pcpu_list_state(struct pcpu_list_state *state)
  */
 #define pcpu_list_next_entry(pos, member) list_next_entry(pos, member.list)
 
+#ifdef CONFIG_PERCPU_LIST
 /*
  * Per-cpu node data structure
  */
@@ -212,7 +213,97 @@ static inline bool pcpu_list_iterate_safe(struct pcpu_list_head *head,
 
 extern void pcpu_list_add(struct pcpu_list_node *node,
 			  struct pcpu_list_head *head);
-extern void pcpu_list_del(struct pcpu_list_node *node);
+extern void pcpu_list_del(struct pcpu_list_node *node,
+			  struct pcpu_list_head *head);
+
+#else /* CONFIG_PERCPU_LIST */
+
+#include <linux/slab.h>
+
+/*
+ * The per-cpu lists will now be degenerated into a single global list
+ */
+struct pcpu_list_node {
+	struct list_head list;
+};
+
+static inline void init_pcpu_list_node(struct pcpu_list_node *node)
+{
+	INIT_LIST_HEAD(&node->list);
+}
+
+static inline void free_pcpu_list_head(struct pcpu_list_head **phead)
+{
+	kfree(*phead);
+	*phead = NULL;
+}
+
+static inline bool pcpu_list_empty(struct pcpu_list_head *head)
+{
+	return list_empty(&head->list);
+}
+
+static inline void
+pcpu_list_add(struct pcpu_list_node *node, struct pcpu_list_head *head)
+{
+	spin_lock(&head->lock);
+	list_add(&node->list, &head->list);
+	spin_unlock(&head->lock);
+}
+
+static inline void
+pcpu_list_del(struct pcpu_list_node *node, struct pcpu_list_head *head)
+{
+	spin_lock(&head->lock);
+	list_del_init(&node->list);
+	spin_unlock(&head->lock);
+}
+
+static inline bool pcpu_list_iterate(struct pcpu_list_head *head,
+				     struct pcpu_list_state *state)
+{
+	/*
+	 * Find next entry
+	 */
+	if (state->curr) {
+		state->curr = list_next_entry(state->curr, list);
+	} else {
+		spin_lock(&head->lock);
+		state->lock = &head->lock;
+		state->curr = list_entry(head->list.next,
+					 struct pcpu_list_node, list);
+	}
+	if (&state->curr->list == &head->list) {
+		spin_unlock(&head->lock);
+		return false;	/* The list has been exhausted */
+	}
+	return true;	/* Continue the iteration */
+}
+
+static inline bool pcpu_list_iterate_safe(struct pcpu_list_head *head,
+					  struct pcpu_list_state *state)
+{
+	/*
+	 * Find next entry
+	 */
+	if (state->curr) {
+		state->curr = state->next;
+		state->next = list_next_entry(state->next, list);
+	} else {
+		spin_lock(&head->lock);
+		state->lock = &head->lock;
+		state->curr = list_entry(head->list.next,
+					 struct pcpu_list_node, list);
+		state->next = list_next_entry(state->curr, list);
+	}
+	if (&state->curr->list == &head->list) {
+		spin_unlock(&head->lock);
+		return false;	/* The list has been exhausted */
+	}
+	return true;	/* Continue the iteration */
+}
+#endif /* CONFIG_PERCPU_LIST */
+
 extern int  init_pcpu_list_head(struct pcpu_list_head **ppcpu_head);
 
 #endif /* __LINUX_PERCPU_LIST_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 133ebc0..cdadc7e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -536,4 +536,18 @@ config ARCH_HAS_PMEM_API
 config ARCH_HAS_MMIO_FLUSH
 	bool
 
+#
+# Per-cpu list
+#
+config PERCPU_LIST
+	bool "Enable the use of per-cpu lists"
+	default y
+	depends on SMP
+	help
+	  Enables the use of per-cpu list to allow parallel insertion
+	  and deletion of list entry at the expense of a bit more
+	  overhead in list iteration as well as larger list node entry.
+	  This can help improve performance on system with a lot of
+	  cpu cores.
+
 endmenu
diff --git a/lib/percpu-list.c b/lib/percpu-list.c
index 5003bbb..46a91aa 100644
--- a/lib/percpu-list.c
+++ b/lib/percpu-list.c
@@ -24,6 +24,7 @@
  */
 static struct lock_class_key percpu_list_key;
 
+#ifdef CONFIG_PERCPU_LIST
 /*
  * Initialize the per-cpu list head
  */
@@ -76,7 +77,7 @@ void pcpu_list_add(struct pcpu_list_node *node, struct pcpu_list_head *head)
  * (becomes NULL or to a different one), we assume that the deletion was done
  * elsewhere.
  */
-void pcpu_list_del(struct pcpu_list_node *node)
+void pcpu_list_del(struct pcpu_list_node *node, struct pcpu_list_head *unused)
 {
 	spinlock_t *lock = READ_ONCE(node->lockptr);
 
@@ -98,3 +99,24 @@ void pcpu_list_del(struct pcpu_list_node *node)
 	}
 	spin_unlock(lock);
 }
+
+#else /* CONFIG_PERCPU_LIST */
+/*
+ * Initialize the per-cpu list head
+ */
+int init_pcpu_list_head(struct pcpu_list_head **phead)
+{
+	struct pcpu_list_head *head = kmalloc(sizeof(struct pcpu_list_head),
+					      GFP_KERNEL);
+
+	if (!head)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&head->list);
+	head->lock = __SPIN_LOCK_UNLOCKED(&head->lock);
+	lockdep_set_class(&head->lock, &percpu_list_key);
+
+	*phead = head;
+	return 0;
+}
+#endif /* CONFIG_PERCPU_LIST */
-- 
1.7.1

  parent reply	other threads:[~2016-03-01 21:00 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-01 20:59 [PATCH v5 0/5] vfs: Use per-cpu list for SB's s_inodes list Waiman Long
2016-03-01 20:59 ` [PATCH v5 1/5] lib/percpu-list: Per-cpu list with associated per-cpu locks Waiman Long
2016-03-01 21:00 ` [PATCH v5 2/5] fsnotify: Simplify inode iteration on umount Waiman Long
2016-03-01 21:00 ` [PATCH v5 3/5] vfs: Remove unnecessary list_for_each_entry_safe() variants Waiman Long
2016-03-01 21:00 ` [PATCH v5 4/5] vfs: Use per-cpu list for superblock's inode list Waiman Long
2016-03-01 21:00 ` Waiman Long [this message]
2016-03-02  8:41   ` [PATCH v5 5/5] lib/percpu-list: Add a config parameter for disabling per-cpu list Ingo Molnar
2016-03-02 21:55     ` Waiman Long
2016-03-03 10:41 ` [PATCH v5 0/5] vfs: Use per-cpu list for SB's s_inodes list Jan Kara

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1456866003-32441-6-git-send-email-Waiman.Long@hpe.com \
    --to=waiman.long@hpe.com \
    --cc=andi@firstfloor.org \
    --cc=bfields@fieldses.org \
    --cc=boqun.feng@gmail.com \
    --cc=cl@linux-foundation.org \
    --cc=dchinner@redhat.com \
    --cc=doug.hatch@hp.com \
    --cc=jack@suse.com \
    --cc=jlayton@poochiereds.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=scott.norton@hp.com \
    --cc=tj@kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).