linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 16/19] sched, numa: NUMA home-node selection code
  2012-07-31 19:12 [PATCH 00/19] sched-numa rewrite Peter Zijlstra
@ 2012-07-31 19:12 ` Peter Zijlstra
  2012-07-31 21:52   ` Rik van Riel
  0 siblings, 1 reply; 3+ messages in thread
From: Peter Zijlstra @ 2012-07-31 19:12 UTC (permalink / raw)
  To: mingo, riel, oleg, pjt, akpm, torvalds, tglx, Lee.Schermerhorn
  Cc: linux-kernel, Peter Zijlstra

[-- Attachment #1: numa-1.patch --]
[-- Type: text/plain, Size: 12429 bytes --]

Now that we have infrastructure in place to migrate pages back to
their home-node, and migrate memory towards the home-node, we need to
set the home-node.

Instead of creating a seconday control loop, fully rely on the
existing load-balancer to do the right thing. The home-node selection
logic will simply pick the node the task has been found to run on
for two consequtive samples (see task_tick_numa).

This means NUMA placement is directly related to regular placement.
The home-node logic in the load-balancer tries to keep a task on the
home-node wheras the fairness and work-conserving constraints will try
and move it away.

The balance between these two 'forces' is what will result in the NUMA
placement.

Cc: Rik van Riel <riel@redhat.com>
Cc: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 include/linux/init_task.h |    3 
 include/linux/mm_types.h  |    3 
 include/linux/sched.h     |   19 +++--
 kernel/sched/core.c       |   18 ++++-
 kernel/sched/fair.c       |  163 ++++++++++++++++++++++++++++++++++++++++++++--
 kernel/sched/features.h   |    1 
 kernel/sched/sched.h      |   33 ++++++---
 kernel/sysctl.c           |   13 +++
 8 files changed, 227 insertions(+), 26 deletions(-)
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -145,7 +145,8 @@ extern struct task_group root_task_group
 
 #ifdef CONFIG_NUMA
 # define INIT_TASK_NUMA(tsk)						\
-	.node = -1,
+	.node = -1,							\
+	.node_last = -1,
 #else
 # define INIT_TASK_NUMA(tsk)
 #endif
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -388,6 +388,9 @@ struct mm_struct {
 #ifdef CONFIG_CPUMASK_OFFSTACK
 	struct cpumask cpumask_allocation;
 #endif
+#ifdef CONFIG_NUMA
+	unsigned long numa_next_scan;
+#endif
 	struct uprobes_state uprobes_state;
 };
 
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -62,6 +62,7 @@ struct sched_param {
 #include <linux/errno.h>
 #include <linux/nodemask.h>
 #include <linux/mm_types.h>
+#include <linux/task_work.h>
 
 #include <asm/page.h>
 #include <asm/ptrace.h>
@@ -1519,8 +1520,14 @@ struct task_struct {
 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
 	short il_next;
 	short pref_node_fork;
-	int node;
-#endif
+
+	int node;			/* task home node   */
+	int node_last;			/* home node filter */
+#ifdef CONFIG_SMP
+	u64 node_stamp;			/* migration stamp  */
+	unsigned long numa_contrib;
+#endif /* CONFIG_SMP  */
+#endif /* CONFIG_NUMA */
 	struct rcu_head rcu;
 
 	/*
@@ -2029,22 +2036,22 @@ extern unsigned int sysctl_sched_nr_migr
 extern unsigned int sysctl_sched_time_avg;
 extern unsigned int sysctl_timer_migration;
 extern unsigned int sysctl_sched_shares_window;
+extern unsigned int sysctl_sched_numa_task_period;
 
 int sched_proc_update_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *length,
 		loff_t *ppos);
-#endif
-#ifdef CONFIG_SCHED_DEBUG
+
 static inline unsigned int get_sysctl_timer_migration(void)
 {
 	return sysctl_timer_migration;
 }
-#else
+#else /* CONFIG_SCHED_DEBUG */
 static inline unsigned int get_sysctl_timer_migration(void)
 {
 	return 1;
 }
-#endif
+#endif /* CONFIG_SCHED_DEBUG */
 extern unsigned int sysctl_sched_rt_period;
 extern int sysctl_sched_rt_runtime;
 
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1722,6 +1722,17 @@ static void __sched_fork(struct task_str
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 	INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
+
+#ifdef CONFIG_NUMA
+	if (p->mm && atomic_read(&p->mm->mm_users) == 1)
+		p->mm->numa_next_scan = jiffies;
+
+	p->node = -1;
+	p->node_last = -1;
+#ifdef CONFIG_SMP
+	p->node_stamp = 0ULL;
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_NUMA */
 }
 
 /*
@@ -6558,9 +6569,9 @@ static struct sched_domain_topology_leve
  * Requeues a task ensuring its on the right load-balance list so
  * that it might get migrated to its new home.
  *
- * Note that we cannot actively migrate ourselves since our callers
- * can be from atomic context. We rely on the regular load-balance
- * mechanisms to move us around -- its all preference anyway.
+ * Since home-node is pure preference there's no hard migrate to force
+ * us anywhere, this also allows us to call this from atomic context if
+ * required.
  */
 void sched_setnode(struct task_struct *p, int node)
 {
@@ -6578,6 +6589,7 @@ void sched_setnode(struct task_struct *p
 		p->sched_class->put_prev_task(rq, p);
 
 	p->node = node;
+	p->node_last = node;
 
 	if (running)
 		p->sched_class->set_curr_task(rq);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -27,6 +27,7 @@
 #include <linux/profile.h>
 #include <linux/interrupt.h>
 #include <linux/random.h>
+#include <linux/mempolicy.h>
 
 #include <trace/events/sched.h>
 
@@ -774,6 +775,139 @@ update_stats_curr_start(struct cfs_rq *c
 }
 
 /**************************************************
+ * Scheduling class numa methods.
+ *
+ * The purpose of the NUMA bits are to maintain compute (task) and data
+ * (memory) locality. We try and achieve this by making tasks stick to
+ * a particular node (their home node) but if fairness mandates they run
+ * elsewhere for long enough, we let the memory follow them.
+ *
+ * Tasks start out with their home-node unset (-1) this effectively means
+ * they act !NUMA until we've established the task is busy enough to bother
+ * with placement.
+ */
+
+static unsigned long task_h_load(struct task_struct *p);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NUMA)
+static void account_offnode_enqueue(struct rq *rq, struct task_struct *p)
+{
+	p->numa_contrib = task_h_load(p);
+	rq->offnode_weight += p->numa_contrib;
+	rq->offnode_running++;
+}
+static void account_offnode_dequeue(struct rq *rq, struct task_struct *p)
+{
+	rq->offnode_weight -= p->numa_contrib;
+	rq->offnode_running--;
+}
+
+/*
+ * numa task sample period in ms
+ */
+unsigned int sysctl_sched_numa_task_period = 2500;
+
+/*
+ * The expensive part of numa migration is done from task_work context.
+ */
+void task_numa_work(struct callback_head *work)
+{
+	unsigned long migrate, next_scan, now = jiffies;
+	struct task_struct *t, *p = current;
+	int node = p->node_last;
+
+	WARN_ON_ONCE(p != container_of(work, struct task_struct, rcu));
+
+	/*
+	 * Who cares about NUMA placement when they're dying.
+	 */
+	if (p->flags & PF_EXITING)
+		return;
+
+	/*
+	 * Enforce maximal migration frequency..
+	 */
+	migrate = p->mm->numa_next_scan;
+	if (time_before(now, migrate))
+		return;
+
+	next_scan = now + 2*msecs_to_jiffies(sysctl_sched_numa_task_period);
+	if (cmpxchg(&p->mm->numa_next_scan, migrate, next_scan) != migrate)
+		return;
+
+	rcu_read_lock();
+	t = p;
+	do {
+		sched_setnode(t, node);
+	} while ((t = next_thread(p)) != p);
+	rcu_read_unlock();
+
+	lazy_migrate_process(p->mm);
+}
+
+/*
+ * Sample task location from hardirq context (tick), this has minimal bias with
+ * obvious exceptions of frequency interference and tick avoidance techniques.
+ * If this were to become a problem we could move this sampling into the
+ * sleep/wakeup path -- but we'd prefer to avoid that for obvious reasons.
+ */
+void task_tick_numa(struct rq *rq, struct task_struct *curr)
+{
+	u64 period, now;
+	int node;
+
+	/*
+	 * We don't care about NUMA placement if we don't have memory.
+	 */
+	if (!curr->mm)
+		return;
+
+	/*
+	 * Sample our node location every @sysctl_sched_numa_task_period
+	 * runtime ms. We use a two stage selection in order to filter
+	 * unlikely locations.
+	 *
+	 * If P(n) is the probability we're on node 'n', then the probability
+	 * we sample the same node twice is P(n)^2. This quadric squishes small
+	 * values and makes it more likely we end up on nodes where we have
+	 * significant presence.
+	 *
+	 * Using runtime rather than walltime has the dual advantage that
+	 * we (mostly) drive the selection from busy threads and that the
+	 * task needs to have done some actual work before we bother with
+	 * NUMA placement.
+	 */
+	now = curr->se.sum_exec_runtime;
+	period = (u64)sysctl_sched_numa_task_period * NSEC_PER_MSEC;
+
+	if (now - curr->node_stamp > period) {
+		curr->node_stamp = now;
+		node = numa_node_id();
+
+		if (curr->node_last == node && curr->node != node) {
+			/*
+			 * We can re-use curr->rcu because we checked curr->mm
+			 * != NULL so release_task()->call_rcu() was not called
+			 * yet and exit_task_work() is called before
+			 * exit_notify().
+			 */
+			init_task_work(&curr->rcu, task_numa_work);
+			task_work_add(curr, &curr->rcu, true);
+		}
+		curr->node_last = node;
+	}
+}
+#else
+static void account_offnode_enqueue(struct rq *rq, struct task_struct *p)
+{
+}
+
+static void account_offnode_dequeue(struct rq *rq, struct task_struct *p)
+{
+}
+#endif /* SMP && NUMA */
+
+/**************************************************
  * Scheduling class queueing methods:
  */
 
@@ -784,9 +918,19 @@ account_entity_enqueue(struct cfs_rq *cf
 	if (!parent_entity(se))
 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
 #ifdef CONFIG_SMP
-	if (entity_is_task(se))
-		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
-#endif
+	if (entity_is_task(se)) {
+		struct rq *rq = rq_of(cfs_rq);
+		struct task_struct *p = task_of(se);
+		struct list_head *tasks = &rq->cfs_tasks;
+
+		if (offnode_task(p)) {
+			account_offnode_enqueue(rq, p);
+			tasks = offnode_tasks(rq);
+		}
+
+		list_add(&se->group_node, tasks);
+	}
+#endif /* CONFIG_SMP */
 	cfs_rq->nr_running++;
 }
 
@@ -796,8 +940,14 @@ account_entity_dequeue(struct cfs_rq *cf
 	update_load_sub(&cfs_rq->load, se->load.weight);
 	if (!parent_entity(se))
 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
-	if (entity_is_task(se))
+	if (entity_is_task(se)) {
+		struct task_struct *p = task_of(se);
+
 		list_del_init(&se->group_node);
+
+		if (offnode_task(p))
+			account_offnode_dequeue(rq_of(cfs_rq), p);
+	}
 	cfs_rq->nr_running--;
 }
 
@@ -3286,8 +3436,6 @@ static int move_one_task(struct lb_env *
 	return 0;
 }
 
-static unsigned long task_h_load(struct task_struct *p);
-
 static const unsigned int sched_nr_migrate_break = 32;
 
 /*
@@ -5173,6 +5321,9 @@ static void task_tick_fair(struct rq *rq
 		cfs_rq = cfs_rq_of(se);
 		entity_tick(cfs_rq, se, queued);
 	}
+
+	if (sched_feat_numa(NUMA))
+		task_tick_numa(rq, curr);
 }
 
 /*
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -71,6 +71,7 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
 SCHED_FEAT(LB_MIN, false)
 
 #ifdef CONFIG_NUMA
+SCHED_FEAT(NUMA,           true)
 SCHED_FEAT(NUMA_HOT,       true)
 SCHED_FEAT(NUMA_BIAS,      true)
 SCHED_FEAT(NUMA_PULL,      true)
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -471,15 +471,6 @@ struct rq {
 #endif
 };
 
-static inline struct list_head *offnode_tasks(struct rq *rq)
-{
-#ifdef CONFIG_NUMA
-	return &rq->offnode_tasks;
-#else
-	return NULL;
-#endif
-}
-
 static inline int cpu_of(struct rq *rq)
 {
 #ifdef CONFIG_SMP
@@ -497,6 +488,30 @@ DECLARE_PER_CPU(struct rq, runqueues);
 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 #define raw_rq()		(&__raw_get_cpu_var(runqueues))
 
+#if defined(CONFIG_SMP) && defined(CONFIG_NUMA)
+static inline bool offnode_task(struct task_struct *t)
+{
+	return t->node != -1 && t->node != cpu_to_node(task_cpu(t));
+}
+
+static inline struct list_head *offnode_tasks(struct rq *rq)
+{
+	return &rq->offnode_tasks;
+}
+
+void sched_setnode(struct task_struct *p, int node);
+#else /* SMP && NUMA */
+static inline bool offnode_task(struct task_struct *t)
+{
+	return false;
+}
+
+static inline struct list_head *offnode_tasks(struct rq *rq)
+{
+	return NULL;
+}
+#endif /* SMP && NUMA */
+
 #ifdef CONFIG_SMP
 
 #define rcu_dereference_check_sched_domain(p) \
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -291,6 +291,7 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &min_wakeup_granularity_ns,
 		.extra2		= &max_wakeup_granularity_ns,
 	},
+#ifdef CONFIG_SMP
 	{
 		.procname	= "sched_tunable_scaling",
 		.data		= &sysctl_sched_tunable_scaling,
@@ -337,7 +338,17 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
-#endif
+#ifdef CONFIG_NUMA
+	{
+		.procname	= "sched_numa_task_period_ms",
+		.data		= &sysctl_sched_numa_task_period,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_SCHED_DEBUG */
 	{
 		.procname	= "sched_rt_period_us",
 		.data		= &sysctl_sched_rt_period,



^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 16/19] sched, numa: NUMA home-node selection code
  2012-07-31 19:12 ` [PATCH 16/19] sched, numa: NUMA home-node selection code Peter Zijlstra
@ 2012-07-31 21:52   ` Rik van Riel
  0 siblings, 0 replies; 3+ messages in thread
From: Rik van Riel @ 2012-07-31 21:52 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, oleg, pjt, akpm, torvalds, tglx, Lee.Schermerhorn,
	linux-kernel

On 07/31/2012 03:12 PM, Peter Zijlstra wrote:

Acked-by: Rik van Riel <riel@redhat.com>

-- 
All rights reversed

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 16/19] sched, numa: NUMA home-node selection code
       [not found] <506310A8.5040402@hp.com>
@ 2012-09-26 14:31 ` Don Morris
  0 siblings, 0 replies; 3+ messages in thread
From: Don Morris @ 2012-09-26 14:31 UTC (permalink / raw)
  To: linux-kernel; +Cc: Peter Zijlstra

Re-sending to LKML due to mailer picking up an incorrect
address. (Sorry for the dupe).

On 09/26/2012 07:26 AM, Don Morris wrote:
> Peter --
> 
> You may have / probably have already seen this, and if so I
> apologize in advance (can't find any sign of a fix via any
> searches...).
> 
> I picked up your August sched/numa patch set and have been
> working on it with a 2-node and a 8-node configuration. Got
> a very intermittent crash on the 2-node which of course
> hasn't reproduced since I got the crash/kdump configured.
> (I suspect it is related, however).
> 
> On the 8-node, however, I very reliably got a hard lockup
> NMI after several minutes. This occurs when running Andrea's
> autonuma-benchmark
> (git://gitorious.org/autonuma-benchmark/autonuma-benchmark.git) reliably
> with the first test (two processes, one
> thread per core/vcore, each loops over a single malloc space).
> I'll attach the full stack set from that crash.
> 
> Since the NMI output seemed really consistent that the hard
> lockup stemmed from waiting for a spinlock that never seemed
> to be picked up, I turned on Lock debugging in the .config and
> got a very clear, very consistent circular dependency warning (just
> below).
> 
> As far as I can tell, the warning is correct and is consistent
> with the actual NMI crash output (variant in that the "pidof"
> process on cpu 52 is going through task_sched_runtime() to do
> the task_rq_lock() operation on the numa01 process which
> results in it getting the pi_lock and waiting for
> the rq->lock when numa01 (back on CPU 0) had the rq->lock
> from scheduler_tick() and is going for the pi_lock via
> task_work_add()... ).
> 
> I'm nowhere near confident enough in my knowledge of the
> nuances of run queue locking during the tick update to try
> to hack a workaround - so sorry no proposed patch fix here,
> just a bug report.
> 
> On another minor note, while looking over this and of course
> noticing that most other cpus were tied up waiting for the
> page lock on one of the huge pages (THP was of course on)
> while one of them busied itself invalidating across the other
> CPUs -- the question comes to mind if that's really needed.
> Yes, it certainly is needed in the true PROT_NONE case you're
> building off of as you certainly can't allow access to a
> translation which is now supposed to be locked out, but you
> could allow transitory minor faults when going from PROT_NONE
> back to access as the fault would clear the TLB anyway (at
> least on x86, any architecture which doesn't do that would have
> to have an explicit TLB invalidation for cases where the translation
> is detected as updated anyway, so that should be okay). In your
> case, I would think the transitory faults on what's really a
> hint to the system would probably be much better than tying up
> N-1 other CPUs to do the other flush on a process that spans
> the system -- especially if the other processors are in a scenario
> where they're running that process but working on a different page
> (and hence may never even touch the page changing access anyway).
> Even in the case where you're adding the hint (access to NONE)
> you could be willing to miss an access in favor of letting the
> next context switch invalidate the TLB for you (again, there
> may be architectures where you'll never invalidate unless it is
> explicitly, I think IPF was that way but it has been a while)
> given you really need a non-trivial run time to merit doing this
> work and have a good chance of settling out to a good access
> pattern.
> 
> Just a thought.
> 
> Thanks for your work,
> Don Morris
> 
> ======================================================
> [ INFO: possible circular locking dependency detected ]
> 3.6.0-rc4 #28 Not tainted
> -------------------------------------------------------
> numa01/35386 is trying to acquire lock:
>  (&p->pi_lock){-.-.-.}, at: [<ffffffff81073e68>] task_work_add+0x38/0xa0
> 
> but task is already holding lock:
>  (&rq->lock){-.-.-.}, at: [<ffffffff81085d83>] scheduler_tick+0x53/0x150
> 
> which lock already depends on the new lock.
> 
> 
> the existing dependency chain (in reverse order) is:
> 
> -> #1 (&rq->lock){-.-.-.}:
>        [<ffffffff810b52e3>] validate_chain+0x633/0x730
>        [<ffffffff810b57d2>] __lock_acquire+0x3f2/0x490
>        [<ffffffff810b5959>] lock_acquire+0xe9/0x120
>        [<ffffffff8152e306>] _raw_spin_lock+0x36/0x70
>        [<ffffffff8108c1f1>] wake_up_new_task+0xd1/0x190
>        [<ffffffff810513f2>] do_fork+0x1f2/0x280
>        [<ffffffff8101bcd6>] kernel_thread+0x76/0x80
>        [<ffffffff81513976>] rest_init+0x26/0xc0
>        [<ffffffff81cdfeff>] start_kernel+0x3c6/0x3d3
>        [<ffffffff81cdf356>] x86_64_start_reservations+0x131/0x136
>        [<ffffffff81cdf45c>] x86_64_start_kernel+0x101/0x110
> 
> -> #0 (&p->pi_lock){-.-.-.}:
>        [<ffffffff810b48ef>] check_prev_add+0x11f/0x4e0
>        [<ffffffff810b52e3>] validate_chain+0x633/0x730
>        [<ffffffff810b57d2>] __lock_acquire+0x3f2/0x490
>        [<ffffffff810b5959>] lock_acquire+0xe9/0x120
>        [<ffffffff8152e4b5>] _raw_spin_lock_irqsave+0x55/0xa0
>        [<ffffffff81073e68>] task_work_add+0x38/0xa0
>        [<ffffffff810905d7>] task_tick_numa+0xb7/0xd0
>        [<ffffffff8109237a>] task_tick_fair+0x5a/0x70
>        [<ffffffff81085e0e>] scheduler_tick+0xde/0x150
>        [<ffffffff8106267e>] update_process_times+0x6e/0x90
>        [<ffffffff810ad803>] tick_sched_timer+0xa3/0xe0
>        [<ffffffff8107c266>] __run_hrtimer+0x106/0x1c0
>        [<ffffffff8107c5f0>] hrtimer_interrupt+0x120/0x260
>        [<ffffffff81538fdd>] smp_apic_timer_interrupt+0x8d/0xa3
>        [<ffffffff81537eaf>] apic_timer_interrupt+0x6f/0x80
>        [<ffffffff8152e326>] _raw_spin_lock+0x56/0x70
>        [<ffffffff811488e8>] do_anonymous_page+0x1e8/0x270
>        [<ffffffff8114d1fc>] handle_pte_fault+0x9c/0x2a0
>        [<ffffffff8114d5a0>] handle_mm_fault+0x1a0/0x1c0
>        [<ffffffff81532de1>] do_page_fault+0x421/0x450
>        [<ffffffff8152f2d5>] page_fault+0x25/0x30
> 
> other info that might help us debug this:
> 
>  Possible unsafe locking scenario:
> 
>        CPU0                    CPU1
>        ----                    ----
>   lock(&rq->lock);
>                                lock(&p->pi_lock);
>                                lock(&rq->lock);
>   lock(&p->pi_lock);
> 
>  *** DEADLOCK ***
> 
> 3 locks held by numa01/35386:
>  #0:  (&mm->mmap_sem){++++++}, at: [<ffffffff81532bbc>]
> do_page_fault+0x1fc/0x450
>  #1:  (&(&mm->page_table_lock)->rlock){+.+...}, at: [<ffffffff811488e8>]
> do_anonymous_page+0x1e8/0x270
>  #2:  (&rq->lock){-.-.-.}, at: [<ffffffff81085d83>]
> scheduler_tick+0x53/0x150
> 
> stack backtrace:
> Pid: 35386, comm: numa01 Not tainted 3.6.0-rc4 #28
> Call Trace:
>  <IRQ>  [<ffffffff810b36a7>] print_circular_bug+0xf7/0x120
>  [<ffffffff8108f5d7>] ? update_sd_lb_stats+0x347/0x700
>  [<ffffffff810b48ef>] check_prev_add+0x11f/0x4e0
>  [<ffffffff8101afe5>] ? native_sched_clock+0x35/0x80
>  [<ffffffff8101a5d9>] ? sched_clock+0x9/0x10
>  [<ffffffff8108d82f>] ? sched_clock_cpu+0x4f/0x110
>  [<ffffffff810b52e3>] validate_chain+0x633/0x730
>  [<ffffffff8101a5d9>] ? sched_clock+0x9/0x10
>  [<ffffffff810b57d2>] __lock_acquire+0x3f2/0x490
>  [<ffffffff810afc5d>] ? trace_hardirqs_off+0xd/0x10
>  [<ffffffff810b5959>] lock_acquire+0xe9/0x120
>  [<ffffffff81073e68>] ? task_work_add+0x38/0xa0
>  [<ffffffff8152e4b5>] _raw_spin_lock_irqsave+0x55/0xa0
>  [<ffffffff81073e68>] ? task_work_add+0x38/0xa0
>  [<ffffffff81073e68>] task_work_add+0x38/0xa0
>  [<ffffffff810905d7>] task_tick_numa+0xb7/0xd0
>  [<ffffffff8109237a>] task_tick_fair+0x5a/0x70
>  [<ffffffff81085e0e>] scheduler_tick+0xde/0x150
>  [<ffffffff8106267e>] update_process_times+0x6e/0x90
>  [<ffffffff810ad803>] tick_sched_timer+0xa3/0xe0
>  [<ffffffff8107c266>] __run_hrtimer+0x106/0x1c0
>  [<ffffffff810ad760>] ? tick_nohz_restart+0xa0/0xa0
>  [<ffffffff8107c5f0>] hrtimer_interrupt+0x120/0x260
>  [<ffffffff81538fdd>] smp_apic_timer_interrupt+0x8d/0xa3
>  [<ffffffff81537eaf>] apic_timer_interrupt+0x6f/0x80
>  <EOI>  [<ffffffff8108d93b>] ? local_clock+0x4b/0x70
>  [<ffffffff812754e2>] ? do_raw_spin_lock+0xb2/0x140
>  [<ffffffff81275509>] ? do_raw_spin_lock+0xd9/0x140
>  [<ffffffff8152e326>] _raw_spin_lock+0x56/0x70
>  [<ffffffff811488e8>] ? do_anonymous_page+0x1e8/0x270
>  [<ffffffff811488e8>] do_anonymous_page+0x1e8/0x270
>  [<ffffffff8114d1fc>] handle_pte_fault+0x9c/0x2a0
>  [<ffffffff81532bbc>] ? do_page_fault+0x1fc/0x450
>  [<ffffffff810b5ddf>] ? __lock_release+0x14f/0x180
>  [<ffffffff8114d5a0>] handle_mm_fault+0x1a0/0x1c0
>  [<ffffffff8107d1c5>] ? down_read_trylock+0x55/0x70
>  [<ffffffff81532de1>] do_page_fault+0x421/0x450
>  [<ffffffff810b5ddf>] ? __lock_release+0x14f/0x180
>  [<ffffffff810b4522>] ? trace_hardirqs_on_caller+0x152/0x1c0
>  [<ffffffff810b459d>] ? trace_hardirqs_on+0xd/0x10
>  [<ffffffff8152ed60>] ? _raw_spin_unlock_irq+0x30/0x40
>  [<ffffffff8152d670>] ? __schedule+0x610/0x690
>  [<ffffffff8126f03d>] ? trace_hardirqs_off_thunk+0x3a/0x3c
>  [<ffffffff8152f2d5>] page_fault+0x25/0x30
> 


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2012-09-26 14:31 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <506310A8.5040402@hp.com>
2012-09-26 14:31 ` [PATCH 16/19] sched, numa: NUMA home-node selection code Don Morris
2012-07-31 19:12 [PATCH 00/19] sched-numa rewrite Peter Zijlstra
2012-07-31 19:12 ` [PATCH 16/19] sched, numa: NUMA home-node selection code Peter Zijlstra
2012-07-31 21:52   ` Rik van Riel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).