From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>, Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
Ingo Molnar <mingo@kernel.org>,
Andrea Arcangeli <aarcange@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Linux-MM <linux-mm@kvack.org>,
LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 50/50] sched: numa: Avoid migrating tasks that are placed on their preferred node
Date: Tue, 10 Sep 2013 10:32:30 +0100 [thread overview]
Message-ID: <1378805550-29949-51-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1378805550-29949-1-git-send-email-mgorman@suse.de>
From: Peter Zijlstra <peterz@infradead.org>
(This changelog needs more work, it's currently inaccurate and it's not
clear at exactly what point rt > env->fbq_type is true for the logic to
kick in)
This patch classifies scheduler domains and runqueues into FBQ (cannot
guess what this expands to) types which are one of
regular: There are tasks running that do not care about their NUMA
placement
remote: There are tasks running that care about their placement but are
currently running on a node remote to their ideal placement
all: No distinction
To implement this the patch tracks the number of tasks that are optimally
NUMA placed (rq->nr_preferred_running) and the number of tasks running that
care about their placement (nr_numa_running). The load balancer uses this
information to avoid migrating idea placed NUMA tasks as long as better
options for load balancing exists.
Not-signed-off-by: Peter Zijlstra
---
kernel/sched/core.c | 29 ++++++++++++
kernel/sched/fair.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++-----
kernel/sched/sched.h | 5 ++
3 files changed, 150 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7bf0827..3fc31b7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4485,6 +4485,35 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
}
+
+/*
+ * Requeue a task on a given node and accurately track the number of NUMA
+ * tasks on the runqueues
+ */
+void sched_setnuma(struct task_struct *p, int nid)
+{
+ struct rq *rq;
+ unsigned long flags;
+ bool on_rq, running;
+
+ rq = task_rq_lock(p, &flags);
+ on_rq = p->on_rq;
+ running = task_current(rq, p);
+
+ if (on_rq)
+ dequeue_task(rq, p, 0);
+ if (running)
+ p->sched_class->put_prev_task(rq, p);
+
+ p->numa_preferred_nid = nid;
+ p->numa_migrate_seq = 1;
+
+ if (running)
+ p->sched_class->set_curr_task(rq);
+ if (on_rq)
+ enqueue_task(rq, p, 0);
+ task_rq_unlock(rq, p, &flags);
+}
#endif
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ac7184d..27bc89b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -888,6 +888,18 @@ static unsigned int task_scan_max(struct task_struct *p)
*/
unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
+static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
+{
+ rq->nr_numa_running += (p->numa_preferred_nid != -1);
+ rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
+}
+
+static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
+{
+ rq->nr_numa_running -= (p->numa_preferred_nid != -1);
+ rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
+}
+
struct numa_group {
atomic_t refcount;
@@ -1229,6 +1241,8 @@ static int task_numa_migrate(struct task_struct *p)
if (env.best_cpu == -1)
return -EAGAIN;
+ sched_setnuma(p, env.dst_nid);
+
if (env.best_task == NULL) {
int ret = migrate_task_to(p, env.best_cpu);
return ret;
@@ -1340,8 +1354,7 @@ static void task_numa_placement(struct task_struct *p)
/* Preferred node as the node with the most faults */
if (max_faults && max_nid != p->numa_preferred_nid) {
/* Update the preferred nid and migrate task if possible */
- p->numa_preferred_nid = max_nid;
- p->numa_migrate_seq = 1;
+ sched_setnuma(p, max_nid);
numa_migrate_preferred(p);
}
}
@@ -1736,6 +1749,14 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
}
+
+static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
+{
+}
#endif /* CONFIG_NUMA_BALANCING */
static void
@@ -1745,8 +1766,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (!parent_entity(se))
update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
- if (entity_is_task(se))
- list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
+ if (entity_is_task(se)) {
+ struct rq *rq = rq_of(cfs_rq);
+
+ account_numa_enqueue(rq, task_of(se));
+ list_add(&se->group_node, &rq->cfs_tasks);
+ }
#endif
cfs_rq->nr_running++;
}
@@ -1757,8 +1782,10 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_sub(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
- if (entity_is_task(se))
+ if (entity_is_task(se)) {
+ account_numa_dequeue(rq_of(cfs_rq), task_of(se));
list_del_init(&se->group_node);
+ }
cfs_rq->nr_running--;
}
@@ -4553,6 +4580,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
static unsigned long __read_mostly max_load_balance_interval = HZ/10;
+enum fbq_type { regular, remote, all };
+
#define LBF_ALL_PINNED 0x01
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
@@ -4579,6 +4608,8 @@ struct lb_env {
unsigned int loop;
unsigned int loop_break;
unsigned int loop_max;
+
+ enum fbq_type fbq_type;
};
/*
@@ -5044,6 +5075,10 @@ struct sg_lb_stats {
unsigned int group_weight;
int group_imb; /* Is there an imbalance in the group ? */
int group_has_capacity; /* Is there extra capacity in the group? */
+#ifdef CONFIG_NUMA_BALANCING
+ unsigned int nr_numa_running;
+ unsigned int nr_preferred_running;
+#endif
};
/*
@@ -5335,6 +5370,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_load += load;
sgs->sum_nr_running += nr_running;
+#ifdef CONFIG_NUMA_BALANCING
+ sgs->nr_numa_running += rq->nr_numa_running;
+ sgs->nr_preferred_running += rq->nr_preferred_running;
+#endif
sgs->sum_weighted_load += weighted_cpuload(i);
if (idle_cpu(i))
sgs->idle_cpus++;
@@ -5409,14 +5448,43 @@ static bool update_sd_pick_busiest(struct lb_env *env,
return false;
}
+#ifdef CONFIG_NUMA_BALANCING
+static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
+{
+ if (sgs->sum_nr_running > sgs->nr_numa_running)
+ return regular;
+ if (sgs->sum_nr_running > sgs->nr_preferred_running)
+ return remote;
+ return all;
+}
+
+static inline enum fbq_type fbq_classify_rq(struct rq *rq)
+{
+ if (rq->nr_running > rq->nr_numa_running)
+ return regular;
+ if (rq->nr_running > rq->nr_preferred_running)
+ return remote;
+ return all;
+}
+#else
+static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
+{
+ return all;
+}
+
+static inline enum fbq_type fbq_classify_rq(struct rq *rq)
+{
+ return regular;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
* @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain.
*/
-static inline void update_sd_lb_stats(struct lb_env *env,
- struct sd_lb_stats *sds)
+static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
@@ -5466,6 +5534,9 @@ static inline void update_sd_lb_stats(struct lb_env *env,
sg = sg->next;
} while (sg != env->sd->groups);
+
+ if (env->sd->flags & SD_NUMA)
+ env->fbq_type = fbq_classify_group(&sds->busiest_stat);
}
/**
@@ -5768,15 +5839,47 @@ static struct rq *find_busiest_queue(struct lb_env *env,
int i;
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
- unsigned long power = power_of(i);
- unsigned long capacity = DIV_ROUND_CLOSEST(power,
- SCHED_POWER_SCALE);
- unsigned long wl;
+ unsigned long power, capacity, wl;
+ enum fbq_type rt;
+ rq = cpu_rq(i);
+ rt = fbq_classify_rq(rq);
+
+#ifdef CONFIG_NUMA_BALANCING
+ trace_printk("group(%d:%pc) rq(%d): wl: %lu nr: %d nrn: %d nrp: %d gt:%d rt:%d\n",
+ env->sd->level, sched_group_cpus(group), i,
+ weighted_cpuload(i), rq->nr_running,
+ rq->nr_numa_running, rq->nr_preferred_running,
+ env->fbq_type, rt);
+#endif
+
+ /*
+ * We classify groups/runqueues into three groups:
+ * - regular: there are !numa tasks
+ * - remote: there are numa tasks that run on the 'wrong' node
+ * - all: there is no distinction
+ *
+ * In order to avoid migrating ideally placed numa tasks,
+ * ignore those when there's better options.
+ *
+ * If we ignore the actual busiest queue to migrate another
+ * task, the next balance pass can still reduce the busiest
+ * queue by moving tasks around inside the node.
+ *
+ * If we cannot move enough load due to this classification
+ * the next pass will adjust the group classification and
+ * allow migration of more tasks.
+ *
+ * Both cases only affect the total convergence complexity.
+ */
+ if (rt > env->fbq_type)
+ continue;
+
+ power = power_of(i);
+ capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
if (!capacity)
capacity = fix_small_capacity(env->sd, group);
- rq = cpu_rq(i);
wl = weighted_cpuload(i);
/*
@@ -5888,6 +5991,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.idle = idle,
.loop_break = sched_nr_migrate_break,
.cpus = cpus,
+ .fbq_type = all,
};
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4c6ec25..b9bcea5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -407,6 +407,10 @@ struct rq {
* remote CPUs use both these fields when doing load calculation.
*/
unsigned int nr_running;
+#ifdef CONFIG_NUMA_BALANCING
+ unsigned int nr_numa_running;
+ unsigned int nr_preferred_running;
+#endif
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned long last_load_update_tick;
@@ -555,6 +559,7 @@ static inline u64 rq_clock_task(struct rq *rq)
}
#ifdef CONFIG_NUMA_BALANCING
+extern void sched_setnuma(struct task_struct *p, int node);
extern int migrate_task_to(struct task_struct *p, int cpu);
extern int migrate_swap(struct task_struct *, struct task_struct *);
extern void task_numa_free(struct task_struct *p);
--
1.8.1.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-09-10 9:33 UTC|newest]
Thread overview: 182+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-10 9:31 [PATCH 0/50] Basic scheduler support for automatic NUMA balancing V7 Mel Gorman
2013-09-10 9:31 ` [PATCH 01/50] sched: monolithic code dump of what is being pushed upstream Mel Gorman
2013-09-11 0:58 ` Joonsoo Kim
2013-09-11 3:11 ` Hillf Danton
2013-09-13 8:11 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 02/50] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-09-10 9:31 ` [PATCH 03/50] sched, numa: Comment fixlets Mel Gorman
2013-09-10 9:31 ` [PATCH 04/50] mm: numa: Do not account for a hinting fault if we raced Mel Gorman
2013-09-10 9:31 ` [PATCH 05/50] mm: Wait for THP migrations to complete during NUMA hinting faults Mel Gorman
2013-09-10 9:31 ` [PATCH 06/50] mm: Prevent parallel splits during THP migration Mel Gorman
2013-09-10 9:31 ` [PATCH 07/50] mm: Account for a THP NUMA hinting update as one PTE update Mel Gorman
2013-09-16 12:36 ` Peter Zijlstra
2013-09-16 13:39 ` Rik van Riel
2013-09-16 14:54 ` Peter Zijlstra
2013-09-16 16:11 ` Mel Gorman
2013-09-16 16:37 ` Peter Zijlstra
2013-09-10 9:31 ` [PATCH 08/50] mm: numa: Sanitize task_numa_fault() callsites Mel Gorman
2013-09-10 9:31 ` [PATCH 09/50] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-09-10 9:31 ` [PATCH 10/50] sched: numa: Mitigate chance that same task always updates PTEs Mel Gorman
2013-09-10 9:31 ` [PATCH 11/50] sched: numa: Continue PTE scanning even if migrate rate limited Mel Gorman
2013-09-10 9:31 ` [PATCH 12/50] Revert "mm: sched: numa: Delay PTE scanning until a task is scheduled on a new node" Mel Gorman
2013-09-10 9:31 ` [PATCH 13/50] sched: numa: Initialise numa_next_scan properly Mel Gorman
2013-09-10 9:31 ` [PATCH 14/50] sched: Set the scan rate proportional to the memory usage of the task being scanned Mel Gorman
2013-09-16 15:18 ` Peter Zijlstra
2013-09-16 15:40 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 15/50] sched: numa: Correct adjustment of numa_scan_period Mel Gorman
2013-09-10 9:31 ` [PATCH 16/50] mm: Only flush TLBs if a transhuge PMD is modified for NUMA pte scanning Mel Gorman
2013-09-10 9:31 ` [PATCH 17/50] mm: Do not flush TLB during protection change if !pte_present && !migration_entry Mel Gorman
2013-09-16 16:35 ` Peter Zijlstra
2013-09-17 17:00 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 18/50] sched: numa: Slow scan rate if no NUMA hinting faults are being recorded Mel Gorman
2013-09-10 9:31 ` [PATCH 19/50] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-09-10 9:32 ` [PATCH 20/50] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 21/50] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-09-10 9:32 ` [PATCH 22/50] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 23/50] sched: Resist moving tasks towards nodes with fewer hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 24/50] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-09-10 9:32 ` [PATCH 25/50] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 26/50] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-09-10 9:32 ` [PATCH 27/50] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-09-12 2:10 ` Hillf Danton
2013-09-13 8:11 ` Mel Gorman
2013-09-10 9:32 ` [PATCH 28/50] sched: Remove check that skips small VMAs Mel Gorman
2013-09-10 9:32 ` [PATCH 29/50] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-09-10 9:32 ` [PATCH 30/50] sched: Do not migrate memory immediately after switching node Mel Gorman
2013-09-10 9:32 ` [PATCH 31/50] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-09-10 9:32 ` [PATCH 32/50] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 33/50] sched: numa: increment numa_migrate_seq when task runs in correct location Mel Gorman
2013-09-10 9:32 ` [PATCH 34/50] sched: numa: Do not trap hinting faults for shared libraries Mel Gorman
2013-09-17 2:02 ` 答复: " 张天飞
2013-09-17 8:05 ` ????: " Mel Gorman
2013-09-17 8:22 ` Figo.zhang
2013-09-10 9:32 ` [PATCH 35/50] mm: numa: Only trap pmd hinting faults if we would otherwise trap PTE faults Mel Gorman
2013-09-10 9:32 ` [PATCH 36/50] stop_machine: Introduce stop_two_cpus() Mel Gorman
2013-09-10 9:32 ` [PATCH 37/50] sched: Introduce migrate_swap() Mel Gorman
2013-09-17 14:30 ` [PATCH] hotplug: Optimize {get,put}_online_cpus() Peter Zijlstra
2013-09-17 16:20 ` Mel Gorman
2013-09-17 16:45 ` Peter Zijlstra
2013-09-18 15:49 ` Peter Zijlstra
2013-09-19 14:32 ` Peter Zijlstra
2013-09-21 16:34 ` Oleg Nesterov
2013-09-21 19:13 ` Oleg Nesterov
2013-09-23 9:29 ` Peter Zijlstra
2013-09-23 17:32 ` Oleg Nesterov
2013-09-24 20:24 ` Peter Zijlstra
2013-09-24 21:02 ` Peter Zijlstra
2013-09-25 15:55 ` Oleg Nesterov
2013-09-25 16:59 ` Paul E. McKenney
2013-09-25 17:43 ` Peter Zijlstra
2013-09-25 17:50 ` Oleg Nesterov
2013-09-25 18:40 ` Peter Zijlstra
2013-09-25 21:22 ` Paul E. McKenney
2013-09-26 11:10 ` Peter Zijlstra
[not found] ` <20130926155321.GA4342@redhat.com>
2013-09-26 16:13 ` Peter Zijlstra
2013-09-26 16:14 ` Oleg Nesterov
2013-09-26 16:40 ` Peter Zijlstra
2013-09-26 16:58 ` Oleg Nesterov
2013-09-26 17:50 ` Peter Zijlstra
2013-09-27 18:15 ` Oleg Nesterov
2013-09-27 20:41 ` Peter Zijlstra
2013-09-28 12:48 ` Oleg Nesterov
2013-09-28 14:47 ` Peter Zijlstra
2013-09-28 16:31 ` Oleg Nesterov
2013-09-30 20:11 ` Rafael J. Wysocki
2013-10-01 17:11 ` Srivatsa S. Bhat
2013-10-01 17:36 ` Peter Zijlstra
2013-10-01 17:45 ` Oleg Nesterov
2013-10-01 17:56 ` Peter Zijlstra
2013-10-01 18:07 ` Oleg Nesterov
2013-10-01 19:05 ` Paul E. McKenney
2013-10-02 12:16 ` Oleg Nesterov
2013-10-02 9:08 ` Peter Zijlstra
2013-10-02 12:13 ` Oleg Nesterov
2013-10-02 12:25 ` Peter Zijlstra
2013-10-02 13:31 ` Peter Zijlstra
2013-10-02 14:00 ` Oleg Nesterov
2013-10-02 15:17 ` Peter Zijlstra
2013-10-02 16:31 ` Oleg Nesterov
2013-10-02 17:52 ` Paul E. McKenney
2013-10-01 19:03 ` Srivatsa S. Bhat
2013-10-01 18:14 ` Srivatsa S. Bhat
2013-10-01 18:56 ` Srivatsa S. Bhat
2013-10-02 10:14 ` Srivatsa S. Bhat
2013-09-28 20:46 ` Paul E. McKenney
2013-10-01 3:56 ` Paul E. McKenney
2013-10-01 14:14 ` Oleg Nesterov
2013-10-01 14:45 ` Paul E. McKenney
2013-10-01 14:48 ` Peter Zijlstra
2013-10-01 15:24 ` Paul E. McKenney
2013-10-01 15:34 ` Oleg Nesterov
2013-10-01 15:00 ` Oleg Nesterov
2013-09-29 13:56 ` Oleg Nesterov
2013-10-01 15:38 ` Paul E. McKenney
2013-10-01 15:40 ` Oleg Nesterov
2013-10-01 20:40 ` Paul E. McKenney
2013-09-23 14:50 ` Steven Rostedt
2013-09-23 14:54 ` Peter Zijlstra
2013-09-23 15:13 ` Steven Rostedt
2013-09-23 15:22 ` Peter Zijlstra
2013-09-23 15:59 ` Steven Rostedt
2013-09-23 16:02 ` Peter Zijlstra
2013-09-23 15:50 ` Paul E. McKenney
2013-09-23 16:01 ` Peter Zijlstra
2013-09-23 17:04 ` Paul E. McKenney
2013-09-23 17:30 ` Peter Zijlstra
2013-09-23 17:50 ` Oleg Nesterov
2013-09-24 12:38 ` Peter Zijlstra
2013-09-24 14:42 ` Paul E. McKenney
2013-09-24 16:09 ` Peter Zijlstra
2013-09-24 16:31 ` Oleg Nesterov
2013-09-24 21:09 ` Paul E. McKenney
2013-09-24 16:03 ` Oleg Nesterov
2013-09-24 16:43 ` Steven Rostedt
2013-09-24 17:06 ` Oleg Nesterov
2013-09-24 17:47 ` Paul E. McKenney
2013-09-24 18:00 ` Oleg Nesterov
2013-09-24 20:35 ` Peter Zijlstra
2013-09-25 15:16 ` Oleg Nesterov
2013-09-25 15:35 ` Peter Zijlstra
2013-09-25 16:33 ` Oleg Nesterov
2013-09-24 16:49 ` Paul E. McKenney
2013-09-24 16:54 ` Peter Zijlstra
2013-09-24 17:02 ` Oleg Nesterov
2013-09-24 16:51 ` Peter Zijlstra
2013-09-24 16:39 ` Steven Rostedt
2013-09-29 18:36 ` [RFC] introduce synchronize_sched_{enter,exit}() Oleg Nesterov
2013-09-29 20:01 ` Paul E. McKenney
2013-09-30 12:42 ` Oleg Nesterov
2013-09-29 21:34 ` Steven Rostedt
2013-09-30 13:03 ` Oleg Nesterov
2013-09-30 12:59 ` Peter Zijlstra
2013-09-30 14:24 ` Peter Zijlstra
2013-09-30 15:06 ` Peter Zijlstra
2013-09-30 16:58 ` Oleg Nesterov
2013-09-30 16:38 ` Oleg Nesterov
2013-10-02 14:41 ` Peter Zijlstra
2013-10-03 7:04 ` Ingo Molnar
2013-10-03 7:43 ` Peter Zijlstra
2013-09-17 14:32 ` [PATCH 37/50] sched: Introduce migrate_swap() Peter Zijlstra
2013-09-10 9:32 ` [PATCH 38/50] sched: numa: Use a system-wide search to find swap/migration candidates Mel Gorman
2013-09-10 9:32 ` [PATCH 39/50] sched: numa: Favor placing a task on the preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 40/50] mm: numa: Change page last {nid,pid} into {cpu,pid} Mel Gorman
2013-09-10 9:32 ` [PATCH 41/50] sched: numa: Use {cpu, pid} to create task groups for shared faults Mel Gorman
2013-09-12 12:42 ` Hillf Danton
2013-09-12 14:40 ` Mel Gorman
2013-09-12 12:45 ` Hillf Danton
2013-09-10 9:32 ` [PATCH 42/50] sched: numa: Report a NUMA task group ID Mel Gorman
2013-09-10 9:32 ` [PATCH 43/50] mm: numa: Do not group on RO pages Mel Gorman
2013-09-10 9:32 ` [PATCH 44/50] sched: numa: stay on the same node if CLONE_VM Mel Gorman
2013-09-10 9:32 ` [PATCH 45/50] sched: numa: use group fault statistics in numa placement Mel Gorman
2013-09-10 9:32 ` [PATCH 46/50] sched: numa: Prevent parallel updates to group stats during placement Mel Gorman
2013-09-20 9:55 ` Peter Zijlstra
2013-09-20 12:31 ` Mel Gorman
2013-09-20 12:36 ` Peter Zijlstra
2013-09-20 13:31 ` Mel Gorman
2013-09-10 9:32 ` [PATCH 47/50] sched: numa: add debugging Mel Gorman
2013-09-10 9:32 ` [PATCH 48/50] sched: numa: Decide whether to favour task or group weights based on swap candidate relationships Mel Gorman
2013-09-10 9:32 ` [PATCH 49/50] sched: numa: fix task or group comparison Mel Gorman
2013-09-10 9:32 ` Mel Gorman [this message]
2013-09-11 2:03 ` [PATCH 0/50] Basic scheduler support for automatic NUMA balancing V7 Rik van Riel
2013-09-14 2:57 ` Bob Liu
2013-09-30 10:30 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1378805550-29949-51-git-send-email-mgorman@suse.de \
--to=mgorman@suse.de \
--cc=a.p.zijlstra@chello.nl \
--cc=aarcange@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mingo@kernel.org \
--cc=riel@redhat.com \
--cc=srikar@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).