From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>, Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
Ingo Molnar <mingo@kernel.org>,
Andrea Arcangeli <aarcange@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Linux-MM <linux-mm@kvack.org>,
LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 38/50] sched: numa: Use a system-wide search to find swap/migration candidates
Date: Tue, 10 Sep 2013 10:32:18 +0100 [thread overview]
Message-ID: <1378805550-29949-39-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1378805550-29949-1-git-send-email-mgorman@suse.de>
This patch implements a system-wide search for swap/migration candidates
based on total NUMA hinting faults. It has a balance limit, however it
doesn't properly consider total node balance.
In the old scheme a task selected a preferred node based on the highest
number of private faults recorded on the node. In this scheme, the preferred
node is based on the total number of faults. If the preferred node for a
task changes then task_numa_migrate will search the whole system looking
for tasks to swap with that would improve both the overall compute
balance and minimise the expected number of remote NUMA hinting faults.
Note from Mel: There appears to be no guarantee that the node the source
task is placed on by task_numa_migrate() has any relationship
to the newly selected task->numa_preferred_nid. It is not clear
if this is deliberate but it looks accidental.
[riel@redhat.com: Do not swap with tasks that cannot run on source cpu]
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
kernel/sched/fair.c | 244 ++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 178 insertions(+), 66 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cf16c1a..12b42a6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -816,6 +816,8 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Scheduling class queueing methods:
*/
+static unsigned long task_h_load(struct task_struct *p);
+
#ifdef CONFIG_NUMA_BALANCING
/*
* Approximate time to scan a full NUMA task in ms. The task scan period is
@@ -906,12 +908,40 @@ static unsigned long target_load(int cpu, int type);
static unsigned long power_of(int cpu);
static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
+/* Cached statistics for all CPUs within a node */
struct numa_stats {
+ unsigned long nr_running;
unsigned long load;
- s64 eff_load;
- unsigned long faults;
+
+ /* Total compute capacity of CPUs on a node */
+ unsigned long power;
+
+ /* Approximate capacity in terms of runnable tasks on a node */
+ unsigned long capacity;
+ int has_capacity;
};
+/*
+ * XXX borrowed from update_sg_lb_stats
+ */
+static void update_numa_stats(struct numa_stats *ns, int nid)
+{
+ int cpu;
+
+ memset(ns, 0, sizeof(*ns));
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ struct rq *rq = cpu_rq(cpu);
+
+ ns->nr_running += rq->nr_running;
+ ns->load += weighted_cpuload(cpu);
+ ns->power += power_of(cpu);
+ }
+
+ ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
+ ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
+ ns->has_capacity = (ns->nr_running < ns->capacity);
+}
+
struct task_numa_env {
struct task_struct *p;
@@ -920,28 +950,126 @@ struct task_numa_env {
struct numa_stats src_stats, dst_stats;
- unsigned long best_load;
+ int imbalance_pct, idx;
+
+ struct task_struct *best_task;
+ long best_imp;
int best_cpu;
};
+static void task_numa_assign(struct task_numa_env *env,
+ struct task_struct *p, long imp)
+{
+ if (env->best_task)
+ put_task_struct(env->best_task);
+ if (p)
+ get_task_struct(p);
+
+ env->best_task = p;
+ env->best_imp = imp;
+ env->best_cpu = env->dst_cpu;
+}
+
+/*
+ * This checks if the overall compute and NUMA accesses of the system would
+ * be improved if the source tasks was migrated to the target dst_cpu taking
+ * into account that it might be best if task running on the dst_cpu should
+ * be exchanged with the source task
+ */
+static void task_numa_compare(struct task_numa_env *env, long imp)
+{
+ struct rq *src_rq = cpu_rq(env->src_cpu);
+ struct rq *dst_rq = cpu_rq(env->dst_cpu);
+ struct task_struct *cur;
+ long dst_load, src_load;
+ long load;
+
+ rcu_read_lock();
+ cur = ACCESS_ONCE(dst_rq->curr);
+ if (cur->pid == 0) /* idle */
+ cur = NULL;
+
+ /*
+ * "imp" is the fault differential for the source task between the
+ * source and destination node. Calculate the total differential for
+ * the source task and potential destination task. The more negative
+ * the value is, the more rmeote accesses that would be expected to
+ * be incurred if the tasks were swapped.
+ */
+ if (cur) {
+ /* Skip this swap candidate if cannot move to the source cpu */
+ if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
+ goto unlock;
+
+ imp += task_faults(cur, env->src_nid) -
+ task_faults(cur, env->dst_nid);
+ }
+
+ if (imp < env->best_imp)
+ goto unlock;
+
+ if (!cur) {
+ /* Is there capacity at our destination? */
+ if (env->src_stats.has_capacity &&
+ !env->dst_stats.has_capacity)
+ goto unlock;
+
+ goto balance;
+ }
+
+ /* Balance doesn't matter much if we're running a task per cpu */
+ if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
+ goto assign;
+
+ /*
+ * In the overloaded case, try and keep the load balanced.
+ */
+balance:
+ dst_load = env->dst_stats.load;
+ src_load = env->src_stats.load;
+
+ /* XXX missing power terms */
+ load = task_h_load(env->p);
+ dst_load += load;
+ src_load -= load;
+
+ if (cur) {
+ load = task_h_load(cur);
+ dst_load -= load;
+ src_load += load;
+ }
+
+ /* make src_load the smaller */
+ if (dst_load < src_load)
+ swap(dst_load, src_load);
+
+ if (src_load * env->imbalance_pct < dst_load * 100)
+ goto unlock;
+
+assign:
+ task_numa_assign(env, cur, imp);
+unlock:
+ rcu_read_unlock();
+}
+
static int task_numa_migrate(struct task_struct *p)
{
- int node_cpu = cpumask_first(cpumask_of_node(p->numa_preferred_nid));
+ const struct cpumask *cpumask = cpumask_of_node(p->numa_preferred_nid);
struct task_numa_env env = {
.p = p,
+
.src_cpu = task_cpu(p),
.src_nid = cpu_to_node(task_cpu(p)),
- .dst_cpu = node_cpu,
- .dst_nid = p->numa_preferred_nid,
- .best_load = ULONG_MAX,
- .best_cpu = task_cpu(p),
+
+ .imbalance_pct = 112,
+
+ .best_task = NULL,
+ .best_imp = 0,
+ .best_cpu = -1
};
- struct sched_domain *sd;
- int cpu;
- struct task_group *tg = task_group(p);
- unsigned long weight;
- bool balanced;
- int imbalance_pct, idx = -1;
+ struct sched_domain *sd;
+ unsigned long faults;
+ int nid, cpu, ret;
/*
* Find the lowest common scheduling domain covering the nodes of both
@@ -949,66 +1077,52 @@ static int task_numa_migrate(struct task_struct *p)
*/
rcu_read_lock();
for_each_domain(env.src_cpu, sd) {
- if (cpumask_test_cpu(node_cpu, sched_domain_span(sd))) {
- /*
- * busy_idx is used for the load decision as it is the
- * same index used by the regular load balancer for an
- * active cpu.
- */
- idx = sd->busy_idx;
- imbalance_pct = sd->imbalance_pct;
+ if (cpumask_intersects(cpumask, sched_domain_span(sd))) {
+ env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
break;
}
}
rcu_read_unlock();
- if (WARN_ON_ONCE(idx == -1))
- return 0;
+ faults = task_faults(p, env.src_nid);
+ update_numa_stats(&env.src_stats, env.src_nid);
- /*
- * XXX the below is mostly nicked from wake_affine(); we should
- * see about sharing a bit if at all possible; also it might want
- * some per entity weight love.
- */
- weight = p->se.load.weight;
- env.src_stats.load = source_load(env.src_cpu, idx);
- env.src_stats.eff_load = 100 + (imbalance_pct - 100) / 2;
- env.src_stats.eff_load *= power_of(env.src_cpu);
- env.src_stats.eff_load *= env.src_stats.load + effective_load(tg, env.src_cpu, -weight, -weight);
-
- for_each_cpu(cpu, cpumask_of_node(env.dst_nid)) {
- env.dst_cpu = cpu;
- env.dst_stats.load = target_load(cpu, idx);
-
- /* If the CPU is idle, use it */
- if (!env.dst_stats.load) {
- env.best_cpu = cpu;
- goto migrate;
- }
+ /* Find an alternative node with relatively better statistics */
+ for_each_online_node(nid) {
+ long imp;
- /* Otherwise check the target CPU load */
- env.dst_stats.eff_load = 100;
- env.dst_stats.eff_load *= power_of(cpu);
- env.dst_stats.eff_load *= env.dst_stats.load + effective_load(tg, cpu, weight, weight);
+ if (nid == env.src_nid)
+ continue;
- /*
- * Destination is considered balanced if the destination CPU is
- * less loaded than the source CPU. Unfortunately there is a
- * risk that a task running on a lightly loaded CPU will not
- * migrate to its preferred node due to load imbalances.
- */
- balanced = (env.dst_stats.eff_load <= env.src_stats.eff_load);
- if (!balanced)
+ /* Only consider nodes that recorded more faults */
+ imp = task_faults(p, nid) - faults;
+ if (imp < 0)
continue;
- if (env.dst_stats.eff_load < env.best_load) {
- env.best_load = env.dst_stats.eff_load;
- env.best_cpu = cpu;
+ env.dst_nid = nid;
+ update_numa_stats(&env.dst_stats, env.dst_nid);
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ /* Skip this CPU if the source task cannot migrate */
+ if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
+ continue;
+
+ env.dst_cpu = cpu;
+ task_numa_compare(&env, imp);
}
}
-migrate:
- return migrate_task_to(p, env.best_cpu);
+ /* No better CPU than the current one was found. */
+ if (env.best_cpu == -1)
+ return -EAGAIN;
+
+ if (env.best_task == NULL) {
+ int ret = migrate_task_to(p, env.best_cpu);
+ return ret;
+ }
+
+ ret = migrate_swap(p, env.best_task);
+ put_task_struct(env.best_task);
+ return ret;
}
/* Attempt to migrate a task to a CPU on the preferred node. */
@@ -1046,7 +1160,7 @@ static void task_numa_placement(struct task_struct *p)
/* Find the node with the highest number of faults */
for_each_online_node(nid) {
- unsigned long faults;
+ unsigned long faults = 0;
int priv, i;
for (priv = 0; priv < 2; priv++) {
@@ -1056,10 +1170,10 @@ static void task_numa_placement(struct task_struct *p)
p->numa_faults[i] >>= 1;
p->numa_faults[i] += p->numa_faults_buffer[i];
p->numa_faults_buffer[i] = 0;
+
+ faults += p->numa_faults[i];
}
- /* Find maximum private faults */
- faults = p->numa_faults[task_faults_idx(nid, 1)];
if (faults > max_faults) {
max_faults = faults;
max_nid = nid;
@@ -4405,8 +4519,6 @@ static int move_one_task(struct lb_env *env)
return 0;
}
-static unsigned long task_h_load(struct task_struct *p);
-
static const unsigned int sched_nr_migrate_break = 32;
/*
--
1.8.1.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-09-10 9:33 UTC|newest]
Thread overview: 182+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-10 9:31 [PATCH 0/50] Basic scheduler support for automatic NUMA balancing V7 Mel Gorman
2013-09-10 9:31 ` [PATCH 01/50] sched: monolithic code dump of what is being pushed upstream Mel Gorman
2013-09-11 0:58 ` Joonsoo Kim
2013-09-11 3:11 ` Hillf Danton
2013-09-13 8:11 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 02/50] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-09-10 9:31 ` [PATCH 03/50] sched, numa: Comment fixlets Mel Gorman
2013-09-10 9:31 ` [PATCH 04/50] mm: numa: Do not account for a hinting fault if we raced Mel Gorman
2013-09-10 9:31 ` [PATCH 05/50] mm: Wait for THP migrations to complete during NUMA hinting faults Mel Gorman
2013-09-10 9:31 ` [PATCH 06/50] mm: Prevent parallel splits during THP migration Mel Gorman
2013-09-10 9:31 ` [PATCH 07/50] mm: Account for a THP NUMA hinting update as one PTE update Mel Gorman
2013-09-16 12:36 ` Peter Zijlstra
2013-09-16 13:39 ` Rik van Riel
2013-09-16 14:54 ` Peter Zijlstra
2013-09-16 16:11 ` Mel Gorman
2013-09-16 16:37 ` Peter Zijlstra
2013-09-10 9:31 ` [PATCH 08/50] mm: numa: Sanitize task_numa_fault() callsites Mel Gorman
2013-09-10 9:31 ` [PATCH 09/50] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-09-10 9:31 ` [PATCH 10/50] sched: numa: Mitigate chance that same task always updates PTEs Mel Gorman
2013-09-10 9:31 ` [PATCH 11/50] sched: numa: Continue PTE scanning even if migrate rate limited Mel Gorman
2013-09-10 9:31 ` [PATCH 12/50] Revert "mm: sched: numa: Delay PTE scanning until a task is scheduled on a new node" Mel Gorman
2013-09-10 9:31 ` [PATCH 13/50] sched: numa: Initialise numa_next_scan properly Mel Gorman
2013-09-10 9:31 ` [PATCH 14/50] sched: Set the scan rate proportional to the memory usage of the task being scanned Mel Gorman
2013-09-16 15:18 ` Peter Zijlstra
2013-09-16 15:40 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 15/50] sched: numa: Correct adjustment of numa_scan_period Mel Gorman
2013-09-10 9:31 ` [PATCH 16/50] mm: Only flush TLBs if a transhuge PMD is modified for NUMA pte scanning Mel Gorman
2013-09-10 9:31 ` [PATCH 17/50] mm: Do not flush TLB during protection change if !pte_present && !migration_entry Mel Gorman
2013-09-16 16:35 ` Peter Zijlstra
2013-09-17 17:00 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 18/50] sched: numa: Slow scan rate if no NUMA hinting faults are being recorded Mel Gorman
2013-09-10 9:31 ` [PATCH 19/50] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-09-10 9:32 ` [PATCH 20/50] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 21/50] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-09-10 9:32 ` [PATCH 22/50] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 23/50] sched: Resist moving tasks towards nodes with fewer hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 24/50] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-09-10 9:32 ` [PATCH 25/50] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 26/50] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-09-10 9:32 ` [PATCH 27/50] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-09-12 2:10 ` Hillf Danton
2013-09-13 8:11 ` Mel Gorman
2013-09-10 9:32 ` [PATCH 28/50] sched: Remove check that skips small VMAs Mel Gorman
2013-09-10 9:32 ` [PATCH 29/50] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-09-10 9:32 ` [PATCH 30/50] sched: Do not migrate memory immediately after switching node Mel Gorman
2013-09-10 9:32 ` [PATCH 31/50] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-09-10 9:32 ` [PATCH 32/50] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 33/50] sched: numa: increment numa_migrate_seq when task runs in correct location Mel Gorman
2013-09-10 9:32 ` [PATCH 34/50] sched: numa: Do not trap hinting faults for shared libraries Mel Gorman
2013-09-17 2:02 ` 答复: " 张天飞
2013-09-17 8:05 ` ????: " Mel Gorman
2013-09-17 8:22 ` Figo.zhang
2013-09-10 9:32 ` [PATCH 35/50] mm: numa: Only trap pmd hinting faults if we would otherwise trap PTE faults Mel Gorman
2013-09-10 9:32 ` [PATCH 36/50] stop_machine: Introduce stop_two_cpus() Mel Gorman
2013-09-10 9:32 ` [PATCH 37/50] sched: Introduce migrate_swap() Mel Gorman
2013-09-17 14:30 ` [PATCH] hotplug: Optimize {get,put}_online_cpus() Peter Zijlstra
2013-09-17 16:20 ` Mel Gorman
2013-09-17 16:45 ` Peter Zijlstra
2013-09-18 15:49 ` Peter Zijlstra
2013-09-19 14:32 ` Peter Zijlstra
2013-09-21 16:34 ` Oleg Nesterov
2013-09-21 19:13 ` Oleg Nesterov
2013-09-23 9:29 ` Peter Zijlstra
2013-09-23 17:32 ` Oleg Nesterov
2013-09-24 20:24 ` Peter Zijlstra
2013-09-24 21:02 ` Peter Zijlstra
2013-09-25 15:55 ` Oleg Nesterov
2013-09-25 16:59 ` Paul E. McKenney
2013-09-25 17:43 ` Peter Zijlstra
2013-09-25 17:50 ` Oleg Nesterov
2013-09-25 18:40 ` Peter Zijlstra
2013-09-25 21:22 ` Paul E. McKenney
2013-09-26 11:10 ` Peter Zijlstra
[not found] ` <20130926155321.GA4342@redhat.com>
2013-09-26 16:13 ` Peter Zijlstra
2013-09-26 16:14 ` Oleg Nesterov
2013-09-26 16:40 ` Peter Zijlstra
2013-09-26 16:58 ` Oleg Nesterov
2013-09-26 17:50 ` Peter Zijlstra
2013-09-27 18:15 ` Oleg Nesterov
2013-09-27 20:41 ` Peter Zijlstra
2013-09-28 12:48 ` Oleg Nesterov
2013-09-28 14:47 ` Peter Zijlstra
2013-09-28 16:31 ` Oleg Nesterov
2013-09-30 20:11 ` Rafael J. Wysocki
2013-10-01 17:11 ` Srivatsa S. Bhat
2013-10-01 17:36 ` Peter Zijlstra
2013-10-01 17:45 ` Oleg Nesterov
2013-10-01 17:56 ` Peter Zijlstra
2013-10-01 18:07 ` Oleg Nesterov
2013-10-01 19:05 ` Paul E. McKenney
2013-10-02 12:16 ` Oleg Nesterov
2013-10-02 9:08 ` Peter Zijlstra
2013-10-02 12:13 ` Oleg Nesterov
2013-10-02 12:25 ` Peter Zijlstra
2013-10-02 13:31 ` Peter Zijlstra
2013-10-02 14:00 ` Oleg Nesterov
2013-10-02 15:17 ` Peter Zijlstra
2013-10-02 16:31 ` Oleg Nesterov
2013-10-02 17:52 ` Paul E. McKenney
2013-10-01 19:03 ` Srivatsa S. Bhat
2013-10-01 18:14 ` Srivatsa S. Bhat
2013-10-01 18:56 ` Srivatsa S. Bhat
2013-10-02 10:14 ` Srivatsa S. Bhat
2013-09-28 20:46 ` Paul E. McKenney
2013-10-01 3:56 ` Paul E. McKenney
2013-10-01 14:14 ` Oleg Nesterov
2013-10-01 14:45 ` Paul E. McKenney
2013-10-01 14:48 ` Peter Zijlstra
2013-10-01 15:24 ` Paul E. McKenney
2013-10-01 15:34 ` Oleg Nesterov
2013-10-01 15:00 ` Oleg Nesterov
2013-09-29 13:56 ` Oleg Nesterov
2013-10-01 15:38 ` Paul E. McKenney
2013-10-01 15:40 ` Oleg Nesterov
2013-10-01 20:40 ` Paul E. McKenney
2013-09-23 14:50 ` Steven Rostedt
2013-09-23 14:54 ` Peter Zijlstra
2013-09-23 15:13 ` Steven Rostedt
2013-09-23 15:22 ` Peter Zijlstra
2013-09-23 15:59 ` Steven Rostedt
2013-09-23 16:02 ` Peter Zijlstra
2013-09-23 15:50 ` Paul E. McKenney
2013-09-23 16:01 ` Peter Zijlstra
2013-09-23 17:04 ` Paul E. McKenney
2013-09-23 17:30 ` Peter Zijlstra
2013-09-23 17:50 ` Oleg Nesterov
2013-09-24 12:38 ` Peter Zijlstra
2013-09-24 14:42 ` Paul E. McKenney
2013-09-24 16:09 ` Peter Zijlstra
2013-09-24 16:31 ` Oleg Nesterov
2013-09-24 21:09 ` Paul E. McKenney
2013-09-24 16:03 ` Oleg Nesterov
2013-09-24 16:43 ` Steven Rostedt
2013-09-24 17:06 ` Oleg Nesterov
2013-09-24 17:47 ` Paul E. McKenney
2013-09-24 18:00 ` Oleg Nesterov
2013-09-24 20:35 ` Peter Zijlstra
2013-09-25 15:16 ` Oleg Nesterov
2013-09-25 15:35 ` Peter Zijlstra
2013-09-25 16:33 ` Oleg Nesterov
2013-09-24 16:49 ` Paul E. McKenney
2013-09-24 16:54 ` Peter Zijlstra
2013-09-24 17:02 ` Oleg Nesterov
2013-09-24 16:51 ` Peter Zijlstra
2013-09-24 16:39 ` Steven Rostedt
2013-09-29 18:36 ` [RFC] introduce synchronize_sched_{enter,exit}() Oleg Nesterov
2013-09-29 20:01 ` Paul E. McKenney
2013-09-30 12:42 ` Oleg Nesterov
2013-09-29 21:34 ` Steven Rostedt
2013-09-30 13:03 ` Oleg Nesterov
2013-09-30 12:59 ` Peter Zijlstra
2013-09-30 14:24 ` Peter Zijlstra
2013-09-30 15:06 ` Peter Zijlstra
2013-09-30 16:58 ` Oleg Nesterov
2013-09-30 16:38 ` Oleg Nesterov
2013-10-02 14:41 ` Peter Zijlstra
2013-10-03 7:04 ` Ingo Molnar
2013-10-03 7:43 ` Peter Zijlstra
2013-09-17 14:32 ` [PATCH 37/50] sched: Introduce migrate_swap() Peter Zijlstra
2013-09-10 9:32 ` Mel Gorman [this message]
2013-09-10 9:32 ` [PATCH 39/50] sched: numa: Favor placing a task on the preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 40/50] mm: numa: Change page last {nid,pid} into {cpu,pid} Mel Gorman
2013-09-10 9:32 ` [PATCH 41/50] sched: numa: Use {cpu, pid} to create task groups for shared faults Mel Gorman
2013-09-12 12:42 ` Hillf Danton
2013-09-12 14:40 ` Mel Gorman
2013-09-12 12:45 ` Hillf Danton
2013-09-10 9:32 ` [PATCH 42/50] sched: numa: Report a NUMA task group ID Mel Gorman
2013-09-10 9:32 ` [PATCH 43/50] mm: numa: Do not group on RO pages Mel Gorman
2013-09-10 9:32 ` [PATCH 44/50] sched: numa: stay on the same node if CLONE_VM Mel Gorman
2013-09-10 9:32 ` [PATCH 45/50] sched: numa: use group fault statistics in numa placement Mel Gorman
2013-09-10 9:32 ` [PATCH 46/50] sched: numa: Prevent parallel updates to group stats during placement Mel Gorman
2013-09-20 9:55 ` Peter Zijlstra
2013-09-20 12:31 ` Mel Gorman
2013-09-20 12:36 ` Peter Zijlstra
2013-09-20 13:31 ` Mel Gorman
2013-09-10 9:32 ` [PATCH 47/50] sched: numa: add debugging Mel Gorman
2013-09-10 9:32 ` [PATCH 48/50] sched: numa: Decide whether to favour task or group weights based on swap candidate relationships Mel Gorman
2013-09-10 9:32 ` [PATCH 49/50] sched: numa: fix task or group comparison Mel Gorman
2013-09-10 9:32 ` [PATCH 50/50] sched: numa: Avoid migrating tasks that are placed on their preferred node Mel Gorman
2013-09-11 2:03 ` [PATCH 0/50] Basic scheduler support for automatic NUMA balancing V7 Rik van Riel
2013-09-14 2:57 ` Bob Liu
2013-09-30 10:30 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1378805550-29949-39-git-send-email-mgorman@suse.de \
--to=mgorman@suse.de \
--cc=a.p.zijlstra@chello.nl \
--cc=aarcange@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mingo@kernel.org \
--cc=riel@redhat.com \
--cc=srikar@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).