From: Glauber Costa <glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
To: akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org
Cc: linux-fsdevel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
mgorman-l3A5Bk7waGM@public.gmane.org,
david-FqsqvQoI3Ljby3iVrkZq2A@public.gmane.org,
linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
kamezawa.hiroyu-+CUm20s59erQFUHtdCDX3A@public.gmane.org,
mhocko-Y4LbUc7mvzI@public.gmane.org,
hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org,
hughd-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
gthelen-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
Dave Chinner <dchinner-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
Glauber Costa <glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
Subject: [PATCH v11 11/25] list_lru: per-node list infrastructure
Date: Fri, 7 Jun 2013 00:34:44 +0400 [thread overview]
Message-ID: <1370550898-26711-12-git-send-email-glommer@openvz.org> (raw)
In-Reply-To: <1370550898-26711-1-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
From: Dave Chinner <dchinner-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Now that we have an LRU list API, we can start to enhance the
implementation. This splits the single LRU list into per-node lists
and locks to enhance scalability. Items are placed on lists
according to the node the memory belongs to. To make scanning the
lists efficient, also track whether the per-node lists have entries
in them in a active nodemask.
Note:
We use a fixed-size array for the node LRU, this struct can be very big
if MAX_NUMNODES is big. If this becomes a problem this is fixable by
turning this into a pointer and dynamically allocating this to
nr_node_ids. This quantity is firwmare-provided, and still would provide
room for all nodes at the cost of a pointer lookup and an extra
allocation. Because that allocation will most likely come from a
different slab cache than the main structure holding this structure, we
may very well fail.
[ glommer: fixed warnings, added note about node lru ]
Signed-off-by: Dave Chinner <dchinner-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Signed-off-by: Glauber Costa <glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
Reviewed-by: Greg Thelen <gthelen-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
Acked-by: Mel Gorman <mgorman-l3A5Bk7waGM@public.gmane.org>
---
include/linux/list_lru.h | 23 ++++++--
mm/list_lru.c | 146 +++++++++++++++++++++++++++++++++++------------
2 files changed, 129 insertions(+), 40 deletions(-)
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 1a548b0..f4d4cb6 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -8,6 +8,7 @@
#define _LRU_LIST_H
#include <linux/list.h>
+#include <linux/nodemask.h>
/* list_lru_walk_cb has to always return one of those */
enum lru_status {
@@ -18,11 +19,26 @@ enum lru_status {
internally, but has to return locked. */
};
-struct list_lru {
+struct list_lru_node {
spinlock_t lock;
struct list_head list;
/* kept as signed so we can catch imbalance bugs */
long nr_items;
+} ____cacheline_aligned_in_smp;
+
+struct list_lru {
+ /*
+ * Because we use a fixed-size array, this struct can be very big if
+ * MAX_NUMNODES is big. If this becomes a problem this is fixable by
+ * turning this into a pointer and dynamically allocating this to
+ * nr_node_ids. This quantity is firwmare-provided, and still would
+ * provide room for all nodes at the cost of a pointer lookup and an
+ * extra allocation. Because that allocation will most likely come from
+ * a different slab cache than the main structure holding this
+ * structure, we may very well fail.
+ */
+ struct list_lru_node node[MAX_NUMNODES];
+ nodemask_t active_nodes;
};
int list_lru_init(struct list_lru *lru);
@@ -66,10 +82,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* guarantee that the list is not updated while the count is being computed.
* Callers that want such a guarantee need to provide an outer lock.
*/
-static inline unsigned long list_lru_count(struct list_lru *lru)
-{
- return lru->nr_items;
-}
+unsigned long list_lru_count(struct list_lru *lru);
typedef enum lru_status
(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
diff --git a/mm/list_lru.c b/mm/list_lru.c
index dd74c54..f2d1d6e 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -6,41 +6,73 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mm.h>
#include <linux/list_lru.h>
bool list_lru_add(struct list_lru *lru, struct list_head *item)
{
- spin_lock(&lru->lock);
+ int nid = page_to_nid(virt_to_page(item));
+ struct list_lru_node *nlru = &lru->node[nid];
+
+ spin_lock(&nlru->lock);
+ BUG_ON(nlru->nr_items < 0);
if (list_empty(item)) {
- list_add_tail(item, &lru->list);
- lru->nr_items++;
- spin_unlock(&lru->lock);
+ list_add_tail(item, &nlru->list);
+ if (nlru->nr_items++ == 0)
+ node_set(nid, lru->active_nodes);
+ spin_unlock(&nlru->lock);
return true;
}
- spin_unlock(&lru->lock);
+ spin_unlock(&nlru->lock);
return false;
}
EXPORT_SYMBOL_GPL(list_lru_add);
bool list_lru_del(struct list_lru *lru, struct list_head *item)
{
- spin_lock(&lru->lock);
+ int nid = page_to_nid(virt_to_page(item));
+ struct list_lru_node *nlru = &lru->node[nid];
+
+ spin_lock(&nlru->lock);
if (!list_empty(item)) {
list_del_init(item);
- lru->nr_items--;
- spin_unlock(&lru->lock);
+ if (--nlru->nr_items == 0)
+ node_clear(nid, lru->active_nodes);
+ BUG_ON(nlru->nr_items < 0);
+ spin_unlock(&nlru->lock);
return true;
}
- spin_unlock(&lru->lock);
+ spin_unlock(&nlru->lock);
return false;
}
EXPORT_SYMBOL_GPL(list_lru_del);
-unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
- void *cb_arg, unsigned long nr_to_walk)
+unsigned long list_lru_count(struct list_lru *lru)
{
+ unsigned long count = 0;
+ int nid;
+
+ for_each_node_mask(nid, lru->active_nodes) {
+ struct list_lru_node *nlru = &lru->node[nid];
+
+ spin_lock(&nlru->lock);
+ BUG_ON(nlru->nr_items < 0);
+ count += nlru->nr_items;
+ spin_unlock(&nlru->lock);
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(list_lru_count);
+
+static unsigned long
+list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
+ void *cb_arg, unsigned long *nr_to_walk)
+{
+
+ struct list_lru_node *nlru = &lru->node[nid];
struct list_head *item, *n;
- unsigned long removed = 0;
+ unsigned long isolated = 0;
/*
* If we don't keep state of at which pass we are, we can loop at
* LRU_RETRY, since we have no guarantees that the caller will be able
@@ -50,18 +82,20 @@ unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
*/
bool first_pass = true;
- spin_lock(&lru->lock);
+ spin_lock(&nlru->lock);
restart:
- list_for_each_safe(item, n, &lru->list) {
+ list_for_each_safe(item, n, &nlru->list) {
enum lru_status ret;
- ret = isolate(item, &lru->lock, cb_arg);
+ ret = isolate(item, &nlru->lock, cb_arg);
switch (ret) {
case LRU_REMOVED:
- lru->nr_items--;
- removed++;
+ if (--nlru->nr_items == 0)
+ node_clear(nid, lru->active_nodes);
+ BUG_ON(nlru->nr_items < 0);
+ isolated++;
break;
case LRU_ROTATE:
- list_move_tail(item, &lru->list);
+ list_move_tail(item, &nlru->list);
break;
case LRU_SKIP:
break;
@@ -76,42 +110,84 @@ restart:
BUG();
}
- if (nr_to_walk-- == 0)
+ if ((*nr_to_walk)-- == 0)
break;
}
- spin_unlock(&lru->lock);
- return removed;
+
+ spin_unlock(&nlru->lock);
+ return isolated;
+}
+EXPORT_SYMBOL_GPL(list_lru_walk_node);
+
+unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
+ void *cb_arg, unsigned long nr_to_walk)
+{
+ unsigned long isolated = 0;
+ int nid;
+
+ for_each_node_mask(nid, lru->active_nodes) {
+ isolated += list_lru_walk_node(lru, nid, isolate,
+ cb_arg, &nr_to_walk);
+ if (nr_to_walk <= 0)
+ break;
+ }
+ return isolated;
}
EXPORT_SYMBOL_GPL(list_lru_walk);
-unsigned long list_lru_dispose_all(struct list_lru *lru,
- list_lru_dispose_cb dispose)
+static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
+ list_lru_dispose_cb dispose)
{
- unsigned long disposed = 0;
+ struct list_lru_node *nlru = &lru->node[nid];
LIST_HEAD(dispose_list);
+ unsigned long disposed = 0;
- spin_lock(&lru->lock);
- while (!list_empty(&lru->list)) {
- list_splice_init(&lru->list, &dispose_list);
- disposed += lru->nr_items;
- lru->nr_items = 0;
- spin_unlock(&lru->lock);
+ spin_lock(&nlru->lock);
+ while (!list_empty(&nlru->list)) {
+ list_splice_init(&nlru->list, &dispose_list);
+ disposed += nlru->nr_items;
+ nlru->nr_items = 0;
+ node_clear(nid, lru->active_nodes);
+ spin_unlock(&nlru->lock);
dispose(&dispose_list);
- spin_lock(&lru->lock);
+ spin_lock(&nlru->lock);
}
- spin_unlock(&lru->lock);
+ spin_unlock(&nlru->lock);
return disposed;
}
+unsigned long list_lru_dispose_all(struct list_lru *lru,
+ list_lru_dispose_cb dispose)
+{
+ unsigned long disposed;
+ unsigned long total = 0;
+ int nid;
+
+ do {
+ disposed = 0;
+ for_each_node_mask(nid, lru->active_nodes) {
+ disposed += list_lru_dispose_all_node(lru, nid,
+ dispose);
+ }
+ total += disposed;
+ } while (disposed != 0);
+
+ return total;
+}
+
int list_lru_init(struct list_lru *lru)
{
- spin_lock_init(&lru->lock);
- INIT_LIST_HEAD(&lru->list);
- lru->nr_items = 0;
+ int i;
+ nodes_clear(lru->active_nodes);
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ spin_lock_init(&lru->node[i].lock);
+ INIT_LIST_HEAD(&lru->node[i].list);
+ lru->node[i].nr_items = 0;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(list_lru_init);
--
1.8.1.4
next prev parent reply other threads:[~2013-06-06 20:34 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-06-06 20:34 [PATCH v11 00/25] shrinkers rework: per-numa, generic lists, etc Glauber Costa
2013-06-06 20:34 ` [PATCH v11 02/25] super: fix calculation of shrinkable objects for small numbers Glauber Costa
2013-06-06 20:34 ` [PATCH v11 19/25] fs: convert fs shrinkers to new scan/count API Glauber Costa
2013-06-06 20:34 ` [PATCH v11 20/25] drivers: convert shrinkers to new count/scan API Glauber Costa
2013-06-07 14:10 ` Konrad Rzeszutek Wilk
2013-06-09 12:02 ` Glauber Costa
2013-06-06 20:34 ` [PATCH v11 22/25] shrinker: convert remaining shrinkers to " Glauber Costa
2013-06-06 22:31 ` Andrew Morton
[not found] ` <1370550898-26711-1-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
2013-06-06 20:34 ` [PATCH v11 01/25] fs: bump inode and dentry counters to long Glauber Costa
2013-06-06 20:34 ` [PATCH v11 03/25] dcache: convert dentry_stat.nr_unused to per-cpu counters Glauber Costa
2013-06-06 20:34 ` [PATCH v11 04/25] dentry: move to per-sb LRU locks Glauber Costa
2013-06-06 20:34 ` [PATCH v11 05/25] dcache: remove dentries from LRU before putting on dispose list Glauber Costa
2013-06-06 20:34 ` [PATCH v11 06/25] mm: new shrinker API Glauber Costa
2013-06-06 20:34 ` [PATCH v11 07/25] shrinker: convert superblock shrinkers to new API Glauber Costa
2013-06-06 20:34 ` [PATCH v11 08/25] list: add a new LRU list type Glauber Costa
2013-06-06 20:34 ` [PATCH v11 09/25] inode: convert inode lru list to generic lru list code Glauber Costa
2013-06-06 20:34 ` [PATCH v11 10/25] dcache: convert to use new lru list infrastructure Glauber Costa
2013-06-06 20:34 ` Glauber Costa [this message]
2013-06-06 20:34 ` [PATCH v11 12/25] list_lru: per-node API Glauber Costa
2013-06-06 20:34 ` [PATCH v11 13/25] shrinker: add node awareness Glauber Costa
2013-06-06 20:34 ` [PATCH v11 14/25] vmscan: per-node deferred work Glauber Costa
2013-06-06 20:34 ` [PATCH v11 15/25] fs: convert inode and dentry shrinking to be node aware Glauber Costa
2013-06-06 20:34 ` [PATCH v11 16/25] xfs: convert buftarg LRU to generic code Glauber Costa
2013-06-06 20:34 ` [PATCH v11 17/25] xfs: rework buffer dispose list tracking Glauber Costa
2013-06-06 20:34 ` [PATCH v11 18/25] xfs: convert dquot cache lru to list_lru Glauber Costa
2013-06-06 20:34 ` [PATCH v11 21/25] i915: bail out earlier when shrinker cannot acquire mutex Glauber Costa
2013-06-06 20:34 ` [PATCH v11 23/25] hugepage: convert huge zero page shrinker to new shrinker API Glauber Costa
2013-06-06 20:34 ` [PATCH v11 24/25] shrinker: Kill old ->shrink API Glauber Costa
2013-06-06 20:34 ` [PATCH v11 25/25] list_lru: dynamically adjust node arrays Glauber Costa
2013-06-18 9:42 ` Li Zhong
2013-06-19 7:31 ` Glauber Costa
2013-06-19 9:12 ` Li Zhong
2013-06-19 13:29 ` Glauber Costa
2013-06-19 17:14 ` Andrew Morton
2013-06-20 0:50 ` Li Zhong
2013-06-20 1:35 ` Li Zhong
2013-06-20 2:37 ` Dave Chinner
2013-06-06 21:15 ` [PATCH v11 00/25] shrinkers rework: per-numa, generic lists, etc Andrew Morton
2013-06-07 6:11 ` Glauber Costa
[not found] ` <51B1797D.3010209-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org>
2013-06-07 7:08 ` Glauber Costa
2013-06-07 8:04 ` Glauber Costa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1370550898-26711-12-git-send-email-glommer@openvz.org \
--to=glommer-gefaqzzx7r8dnm+yrofe0a@public.gmane.org \
--cc=akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org \
--cc=cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=david-FqsqvQoI3Ljby3iVrkZq2A@public.gmane.org \
--cc=dchinner-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
--cc=gthelen-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
--cc=hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org \
--cc=hughd-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
--cc=kamezawa.hiroyu-+CUm20s59erQFUHtdCDX3A@public.gmane.org \
--cc=linux-fsdevel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org \
--cc=mgorman-l3A5Bk7waGM@public.gmane.org \
--cc=mhocko-Y4LbUc7mvzI@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).