From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>,
Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
Ingo Molnar <mingo@kernel.org>,
Andrea Arcangeli <aarcange@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Linux-MM <linux-mm@kvack.org>,
LKML <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 46/50] sched: numa: Prevent parallel updates to group stats during placement
Date: Fri, 20 Sep 2013 13:31:52 +0100 [thread overview]
Message-ID: <20130920123151.GX22421@suse.de> (raw)
In-Reply-To: <20130920095526.GT9326@twins.programming.kicks-ass.net>
On Fri, Sep 20, 2013 at 11:55:26AM +0200, Peter Zijlstra wrote:
> On Tue, Sep 10, 2013 at 10:32:26AM +0100, Mel Gorman wrote:
> > Having multiple tasks in a group go through task_numa_placement
> > simultaneously can lead to a task picking a wrong node to run on, because
> > the group stats may be in the middle of an update. This patch avoids
> > parallel updates by holding the numa_group lock during placement
> > decisions.
> >
> > Signed-off-by: Mel Gorman <mgorman@suse.de>
> > ---
> > kernel/sched/fair.c | 35 +++++++++++++++++++++++------------
> > 1 file changed, 23 insertions(+), 12 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 3a92c58..4653f71 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -1231,6 +1231,7 @@ static void task_numa_placement(struct task_struct *p)
> > {
> > int seq, nid, max_nid = -1, max_group_nid = -1;
> > unsigned long max_faults = 0, max_group_faults = 0;
> > + spinlock_t *group_lock = NULL;
> >
> > seq = ACCESS_ONCE(p->mm->numa_scan_seq);
> > if (p->numa_scan_seq == seq)
> > @@ -1239,6 +1240,12 @@ static void task_numa_placement(struct task_struct *p)
> > p->numa_migrate_seq++;
> > p->numa_scan_period_max = task_scan_max(p);
> >
> > + /* If the task is part of a group prevent parallel updates to group stats */
> > + if (p->numa_group) {
> > + group_lock = &p->numa_group->lock;
> > + spin_lock(group_lock);
> > + }
> > +
> > /* Find the node with the highest number of faults */
> > for_each_online_node(nid) {
> > unsigned long faults = 0, group_faults = 0;
> > @@ -1277,20 +1284,24 @@ static void task_numa_placement(struct task_struct *p)
> > }
> > }
> >
> > + if (p->numa_group) {
> > + /*
> > + * If the preferred task and group nids are different,
> > + * iterate over the nodes again to find the best place.
> > + */
> > + if (max_nid != max_group_nid) {
> > + unsigned long weight, max_weight = 0;
> > +
> > + for_each_online_node(nid) {
> > + weight = task_weight(p, nid) + group_weight(p, nid);
> > + if (weight > max_weight) {
> > + max_weight = weight;
> > + max_nid = nid;
> > + }
> > }
> > }
> > +
> > + spin_unlock(group_lock);
> > }
> >
> > /* Preferred node as the node with the most faults */
>
> If you're going to hold locks you can also do away with all that
> atomic_long_*() nonsense :-)
Yep! Easily done, patch is untested but should be straight-forward.
---8<---
sched: numa: use longs for numa group fault stats
As Peter says "If you're going to hold locks you can also do away with all
that atomic_long_*() nonsense". Lock aquisition moved slightly to protect
the updates. numa_group faults stats type are still "long" to add a basic
sanity check for fault counts going negative.
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
kernel/sched/fair.c | 54 ++++++++++++++++++++++++-----------------------------
1 file changed, 24 insertions(+), 30 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 04a2963..c09687d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -897,8 +897,8 @@ struct numa_group {
struct list_head task_list;
struct rcu_head rcu;
- atomic_long_t total_faults;
- atomic_long_t faults[0];
+ long total_faults;
+ long faults[0];
};
pid_t task_numa_group_id(struct task_struct *p)
@@ -925,8 +925,7 @@ static inline unsigned long group_faults(struct task_struct *p, int nid)
if (!p->numa_group)
return 0;
- return atomic_long_read(&p->numa_group->faults[2*nid]) +
- atomic_long_read(&p->numa_group->faults[2*nid+1]);
+ return p->numa_group->faults[2*nid] + p->numa_group->faults[2*nid+1];
}
/*
@@ -952,17 +951,10 @@ static inline unsigned long task_weight(struct task_struct *p, int nid)
static inline unsigned long group_weight(struct task_struct *p, int nid)
{
- unsigned long total_faults;
-
- if (!p->numa_group)
- return 0;
-
- total_faults = atomic_long_read(&p->numa_group->total_faults);
-
- if (!total_faults)
+ if (!p->numa_group || !p->numa_group->total_faults)
return 0;
- return 1200 * group_faults(p, nid) / total_faults;
+ return 1200 * group_faults(p, nid) / p->numa_group->total_faults;
}
static unsigned long weighted_cpuload(const int cpu);
@@ -1267,9 +1259,9 @@ static void task_numa_placement(struct task_struct *p)
p->total_numa_faults += diff;
if (p->numa_group) {
/* safe because we can only change our own group */
- atomic_long_add(diff, &p->numa_group->faults[i]);
- atomic_long_add(diff, &p->numa_group->total_faults);
- group_faults += atomic_long_read(&p->numa_group->faults[i]);
+ p->numa_group->faults[i] += diff;
+ p->numa_group->total_faults += diff;
+ group_faults += p->numa_group->faults[i];
}
}
@@ -1343,7 +1335,7 @@ static void task_numa_group(struct task_struct *p, int cpupid)
if (unlikely(!p->numa_group)) {
unsigned int size = sizeof(struct numa_group) +
- 2*nr_node_ids*sizeof(atomic_long_t);
+ 2*nr_node_ids*sizeof(long);
grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!grp)
@@ -1355,9 +1347,9 @@ static void task_numa_group(struct task_struct *p, int cpupid)
grp->gid = p->pid;
for (i = 0; i < 2*nr_node_ids; i++)
- atomic_long_set(&grp->faults[i], p->numa_faults[i]);
+ grp->faults[i] = p->numa_faults[i];
- atomic_long_set(&grp->total_faults, p->total_numa_faults);
+ grp->total_faults = p->total_numa_faults;
list_add(&p->numa_entry, &grp->task_list);
grp->nr_tasks++;
@@ -1402,14 +1394,15 @@ unlock:
if (!join)
return;
+ double_lock(&my_grp->lock, &grp->lock);
+
for (i = 0; i < 2*nr_node_ids; i++) {
- atomic_long_sub(p->numa_faults[i], &my_grp->faults[i]);
- atomic_long_add(p->numa_faults[i], &grp->faults[i]);
+ my_grp->faults[i] -= p->numa_faults[i];
+ grp->faults[i] -= p->numa_faults[i];
+ WARN_ON_ONCE(grp->faults[i] < 0);
}
- atomic_long_sub(p->total_numa_faults, &my_grp->total_faults);
- atomic_long_add(p->total_numa_faults, &grp->total_faults);
-
- double_lock(&my_grp->lock, &grp->lock);
+ my_grp->total_faults -= p->total_numa_faults;
+ grp->total_faults -= p->total_numa_faults;
list_move(&p->numa_entry, &grp->task_list);
my_grp->nr_tasks--;
@@ -1430,12 +1423,13 @@ void task_numa_free(struct task_struct *p)
void *numa_faults = p->numa_faults;
if (grp) {
- for (i = 0; i < 2*nr_node_ids; i++)
- atomic_long_sub(p->numa_faults[i], &grp->faults[i]);
-
- atomic_long_sub(p->total_numa_faults, &grp->total_faults);
-
spin_lock(&grp->lock);
+ for (i = 0; i < 2*nr_node_ids; i++) {
+ grp->faults[i] -= p->numa_faults[i];
+ WARN_ON_ONCE(grp->faults[i] < 0);
+ }
+ grp->total_faults -= p->total_numa_faults;
+
list_del(&p->numa_entry);
grp->nr_tasks--;
spin_unlock(&grp->lock);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-09-20 12:32 UTC|newest]
Thread overview: 182+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-10 9:31 [PATCH 0/50] Basic scheduler support for automatic NUMA balancing V7 Mel Gorman
2013-09-10 9:31 ` [PATCH 01/50] sched: monolithic code dump of what is being pushed upstream Mel Gorman
2013-09-11 0:58 ` Joonsoo Kim
2013-09-11 3:11 ` Hillf Danton
2013-09-13 8:11 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 02/50] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-09-10 9:31 ` [PATCH 03/50] sched, numa: Comment fixlets Mel Gorman
2013-09-10 9:31 ` [PATCH 04/50] mm: numa: Do not account for a hinting fault if we raced Mel Gorman
2013-09-10 9:31 ` [PATCH 05/50] mm: Wait for THP migrations to complete during NUMA hinting faults Mel Gorman
2013-09-10 9:31 ` [PATCH 06/50] mm: Prevent parallel splits during THP migration Mel Gorman
2013-09-10 9:31 ` [PATCH 07/50] mm: Account for a THP NUMA hinting update as one PTE update Mel Gorman
2013-09-16 12:36 ` Peter Zijlstra
2013-09-16 13:39 ` Rik van Riel
2013-09-16 14:54 ` Peter Zijlstra
2013-09-16 16:11 ` Mel Gorman
2013-09-16 16:37 ` Peter Zijlstra
2013-09-10 9:31 ` [PATCH 08/50] mm: numa: Sanitize task_numa_fault() callsites Mel Gorman
2013-09-10 9:31 ` [PATCH 09/50] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-09-10 9:31 ` [PATCH 10/50] sched: numa: Mitigate chance that same task always updates PTEs Mel Gorman
2013-09-10 9:31 ` [PATCH 11/50] sched: numa: Continue PTE scanning even if migrate rate limited Mel Gorman
2013-09-10 9:31 ` [PATCH 12/50] Revert "mm: sched: numa: Delay PTE scanning until a task is scheduled on a new node" Mel Gorman
2013-09-10 9:31 ` [PATCH 13/50] sched: numa: Initialise numa_next_scan properly Mel Gorman
2013-09-10 9:31 ` [PATCH 14/50] sched: Set the scan rate proportional to the memory usage of the task being scanned Mel Gorman
2013-09-16 15:18 ` Peter Zijlstra
2013-09-16 15:40 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 15/50] sched: numa: Correct adjustment of numa_scan_period Mel Gorman
2013-09-10 9:31 ` [PATCH 16/50] mm: Only flush TLBs if a transhuge PMD is modified for NUMA pte scanning Mel Gorman
2013-09-10 9:31 ` [PATCH 17/50] mm: Do not flush TLB during protection change if !pte_present && !migration_entry Mel Gorman
2013-09-16 16:35 ` Peter Zijlstra
2013-09-17 17:00 ` Mel Gorman
2013-09-10 9:31 ` [PATCH 18/50] sched: numa: Slow scan rate if no NUMA hinting faults are being recorded Mel Gorman
2013-09-10 9:31 ` [PATCH 19/50] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-09-10 9:32 ` [PATCH 20/50] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 21/50] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-09-10 9:32 ` [PATCH 22/50] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 23/50] sched: Resist moving tasks towards nodes with fewer hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 24/50] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-09-10 9:32 ` [PATCH 25/50] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-09-10 9:32 ` [PATCH 26/50] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-09-10 9:32 ` [PATCH 27/50] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-09-12 2:10 ` Hillf Danton
2013-09-13 8:11 ` Mel Gorman
2013-09-10 9:32 ` [PATCH 28/50] sched: Remove check that skips small VMAs Mel Gorman
2013-09-10 9:32 ` [PATCH 29/50] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-09-10 9:32 ` [PATCH 30/50] sched: Do not migrate memory immediately after switching node Mel Gorman
2013-09-10 9:32 ` [PATCH 31/50] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-09-10 9:32 ` [PATCH 32/50] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 33/50] sched: numa: increment numa_migrate_seq when task runs in correct location Mel Gorman
2013-09-10 9:32 ` [PATCH 34/50] sched: numa: Do not trap hinting faults for shared libraries Mel Gorman
2013-09-17 2:02 ` 答复: " 张天飞
2013-09-17 8:05 ` ????: " Mel Gorman
2013-09-17 8:22 ` Figo.zhang
2013-09-10 9:32 ` [PATCH 35/50] mm: numa: Only trap pmd hinting faults if we would otherwise trap PTE faults Mel Gorman
2013-09-10 9:32 ` [PATCH 36/50] stop_machine: Introduce stop_two_cpus() Mel Gorman
2013-09-10 9:32 ` [PATCH 37/50] sched: Introduce migrate_swap() Mel Gorman
2013-09-17 14:30 ` [PATCH] hotplug: Optimize {get,put}_online_cpus() Peter Zijlstra
2013-09-17 16:20 ` Mel Gorman
2013-09-17 16:45 ` Peter Zijlstra
2013-09-18 15:49 ` Peter Zijlstra
2013-09-19 14:32 ` Peter Zijlstra
2013-09-21 16:34 ` Oleg Nesterov
2013-09-21 19:13 ` Oleg Nesterov
2013-09-23 9:29 ` Peter Zijlstra
2013-09-23 17:32 ` Oleg Nesterov
2013-09-24 20:24 ` Peter Zijlstra
2013-09-24 21:02 ` Peter Zijlstra
2013-09-25 15:55 ` Oleg Nesterov
2013-09-25 16:59 ` Paul E. McKenney
2013-09-25 17:43 ` Peter Zijlstra
2013-09-25 17:50 ` Oleg Nesterov
2013-09-25 18:40 ` Peter Zijlstra
2013-09-25 21:22 ` Paul E. McKenney
2013-09-26 11:10 ` Peter Zijlstra
[not found] ` <20130926155321.GA4342@redhat.com>
2013-09-26 16:13 ` Peter Zijlstra
2013-09-26 16:14 ` Oleg Nesterov
2013-09-26 16:40 ` Peter Zijlstra
2013-09-26 16:58 ` Oleg Nesterov
2013-09-26 17:50 ` Peter Zijlstra
2013-09-27 18:15 ` Oleg Nesterov
2013-09-27 20:41 ` Peter Zijlstra
2013-09-28 12:48 ` Oleg Nesterov
2013-09-28 14:47 ` Peter Zijlstra
2013-09-28 16:31 ` Oleg Nesterov
2013-09-30 20:11 ` Rafael J. Wysocki
2013-10-01 17:11 ` Srivatsa S. Bhat
2013-10-01 17:36 ` Peter Zijlstra
2013-10-01 17:45 ` Oleg Nesterov
2013-10-01 17:56 ` Peter Zijlstra
2013-10-01 18:07 ` Oleg Nesterov
2013-10-01 19:05 ` Paul E. McKenney
2013-10-02 12:16 ` Oleg Nesterov
2013-10-02 9:08 ` Peter Zijlstra
2013-10-02 12:13 ` Oleg Nesterov
2013-10-02 12:25 ` Peter Zijlstra
2013-10-02 13:31 ` Peter Zijlstra
2013-10-02 14:00 ` Oleg Nesterov
2013-10-02 15:17 ` Peter Zijlstra
2013-10-02 16:31 ` Oleg Nesterov
2013-10-02 17:52 ` Paul E. McKenney
2013-10-01 19:03 ` Srivatsa S. Bhat
2013-10-01 18:14 ` Srivatsa S. Bhat
2013-10-01 18:56 ` Srivatsa S. Bhat
2013-10-02 10:14 ` Srivatsa S. Bhat
2013-09-28 20:46 ` Paul E. McKenney
2013-10-01 3:56 ` Paul E. McKenney
2013-10-01 14:14 ` Oleg Nesterov
2013-10-01 14:45 ` Paul E. McKenney
2013-10-01 14:48 ` Peter Zijlstra
2013-10-01 15:24 ` Paul E. McKenney
2013-10-01 15:34 ` Oleg Nesterov
2013-10-01 15:00 ` Oleg Nesterov
2013-09-29 13:56 ` Oleg Nesterov
2013-10-01 15:38 ` Paul E. McKenney
2013-10-01 15:40 ` Oleg Nesterov
2013-10-01 20:40 ` Paul E. McKenney
2013-09-23 14:50 ` Steven Rostedt
2013-09-23 14:54 ` Peter Zijlstra
2013-09-23 15:13 ` Steven Rostedt
2013-09-23 15:22 ` Peter Zijlstra
2013-09-23 15:59 ` Steven Rostedt
2013-09-23 16:02 ` Peter Zijlstra
2013-09-23 15:50 ` Paul E. McKenney
2013-09-23 16:01 ` Peter Zijlstra
2013-09-23 17:04 ` Paul E. McKenney
2013-09-23 17:30 ` Peter Zijlstra
2013-09-23 17:50 ` Oleg Nesterov
2013-09-24 12:38 ` Peter Zijlstra
2013-09-24 14:42 ` Paul E. McKenney
2013-09-24 16:09 ` Peter Zijlstra
2013-09-24 16:31 ` Oleg Nesterov
2013-09-24 21:09 ` Paul E. McKenney
2013-09-24 16:03 ` Oleg Nesterov
2013-09-24 16:43 ` Steven Rostedt
2013-09-24 17:06 ` Oleg Nesterov
2013-09-24 17:47 ` Paul E. McKenney
2013-09-24 18:00 ` Oleg Nesterov
2013-09-24 20:35 ` Peter Zijlstra
2013-09-25 15:16 ` Oleg Nesterov
2013-09-25 15:35 ` Peter Zijlstra
2013-09-25 16:33 ` Oleg Nesterov
2013-09-24 16:49 ` Paul E. McKenney
2013-09-24 16:54 ` Peter Zijlstra
2013-09-24 17:02 ` Oleg Nesterov
2013-09-24 16:51 ` Peter Zijlstra
2013-09-24 16:39 ` Steven Rostedt
2013-09-29 18:36 ` [RFC] introduce synchronize_sched_{enter,exit}() Oleg Nesterov
2013-09-29 20:01 ` Paul E. McKenney
2013-09-30 12:42 ` Oleg Nesterov
2013-09-29 21:34 ` Steven Rostedt
2013-09-30 13:03 ` Oleg Nesterov
2013-09-30 12:59 ` Peter Zijlstra
2013-09-30 14:24 ` Peter Zijlstra
2013-09-30 15:06 ` Peter Zijlstra
2013-09-30 16:58 ` Oleg Nesterov
2013-09-30 16:38 ` Oleg Nesterov
2013-10-02 14:41 ` Peter Zijlstra
2013-10-03 7:04 ` Ingo Molnar
2013-10-03 7:43 ` Peter Zijlstra
2013-09-17 14:32 ` [PATCH 37/50] sched: Introduce migrate_swap() Peter Zijlstra
2013-09-10 9:32 ` [PATCH 38/50] sched: numa: Use a system-wide search to find swap/migration candidates Mel Gorman
2013-09-10 9:32 ` [PATCH 39/50] sched: numa: Favor placing a task on the preferred node Mel Gorman
2013-09-10 9:32 ` [PATCH 40/50] mm: numa: Change page last {nid,pid} into {cpu,pid} Mel Gorman
2013-09-10 9:32 ` [PATCH 41/50] sched: numa: Use {cpu, pid} to create task groups for shared faults Mel Gorman
2013-09-12 12:42 ` Hillf Danton
2013-09-12 14:40 ` Mel Gorman
2013-09-12 12:45 ` Hillf Danton
2013-09-10 9:32 ` [PATCH 42/50] sched: numa: Report a NUMA task group ID Mel Gorman
2013-09-10 9:32 ` [PATCH 43/50] mm: numa: Do not group on RO pages Mel Gorman
2013-09-10 9:32 ` [PATCH 44/50] sched: numa: stay on the same node if CLONE_VM Mel Gorman
2013-09-10 9:32 ` [PATCH 45/50] sched: numa: use group fault statistics in numa placement Mel Gorman
2013-09-10 9:32 ` [PATCH 46/50] sched: numa: Prevent parallel updates to group stats during placement Mel Gorman
2013-09-20 9:55 ` Peter Zijlstra
2013-09-20 12:31 ` Mel Gorman [this message]
2013-09-20 12:36 ` Peter Zijlstra
2013-09-20 13:31 ` Mel Gorman
2013-09-10 9:32 ` [PATCH 47/50] sched: numa: add debugging Mel Gorman
2013-09-10 9:32 ` [PATCH 48/50] sched: numa: Decide whether to favour task or group weights based on swap candidate relationships Mel Gorman
2013-09-10 9:32 ` [PATCH 49/50] sched: numa: fix task or group comparison Mel Gorman
2013-09-10 9:32 ` [PATCH 50/50] sched: numa: Avoid migrating tasks that are placed on their preferred node Mel Gorman
2013-09-11 2:03 ` [PATCH 0/50] Basic scheduler support for automatic NUMA balancing V7 Rik van Riel
2013-09-14 2:57 ` Bob Liu
2013-09-30 10:30 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130920123151.GX22421@suse.de \
--to=mgorman@suse.de \
--cc=aarcange@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=riel@redhat.com \
--cc=srikar@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).