* [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous
[not found] ` <20160115164023.GH3520-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
@ 2016-01-19 17:18 ` Tejun Heo
2016-01-22 14:24 ` Christian Borntraeger
[not found] ` <20160119171841.GP3520-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
0 siblings, 2 replies; 13+ messages in thread
From: Tejun Heo @ 2016-01-19 17:18 UTC (permalink / raw)
To: Li Zefan, Johannes Weiner
Cc: Linux Kernel Mailing List, Christian Borntraeger, linux-s390,
KVM list, Oleg Nesterov, Peter Zijlstra, Paul E. McKenney,
cgroups-u79uwXL29TY76Z2rM5mHXA, kernel-team-b10kYP2dOMg
If "cpuset.memory_migrate" is set, when a process is moved from one
cpuset to another with a different memory node mask, pages in used by
the process are migrated to the new set of nodes. This was performed
synchronously in the ->attach() callback, which is synchronized
against process management. Recently, the synchronization was changed
from per-process rwsem to global percpu rwsem for simplicity and
optimization.
Combined with the synchronous mm migration, this led to deadlocks
because mm migration could schedule a work item which may in turn try
to create a new worker blocking on the process management lock held
from cgroup process migration path.
This heavy an operation shouldn't be performed synchronously from that
deep inside cgroup migration in the first place. This patch punts the
actual migration to an ordered workqueue and updates cgroup process
migration and cpuset config update paths to flush the workqueue after
all locks are released. This way, the operations still seem
synchronous to userland without entangling mm migration with process
management synchronization. CPU hotplug can also invoke mm migration
but there's no reason for it to wait for mm migrations and thus
doesn't synchronize against their completions.
Signed-off-by: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Reported-and-tested-by: Christian Borntraeger <borntraeger-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
Cc: stable-u79uwXL29TY76Z2rM5mHXA@public.gmane.org # v4.4+
---
include/linux/cpuset.h | 6 ++++
kernel/cgroup.c | 2 +
kernel/cpuset.c | 71 +++++++++++++++++++++++++++++++++----------------
3 files changed, 57 insertions(+), 22 deletions(-)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868c..fea160e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
+extern void cpuset_post_attach_flush(void);
+
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c03a640..88abd4d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -58,6 +58,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <linux/cpuset.h>
#include <net/sock.h>
/*
@@ -2739,6 +2740,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_kn_unlock(of->kn);
+ cpuset_post_attach_flush();
return ret ?: nbytes;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3e945fc..41989ab 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -287,6 +287,8 @@ static struct cpuset top_cpuset = {
static DEFINE_MUTEX(cpuset_mutex);
static DEFINE_SPINLOCK(callback_lock);
+static struct workqueue_struct *cpuset_migrate_mm_wq;
+
/*
* CPU / memory hotplug is handled asynchronously.
*/
@@ -972,31 +974,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
}
/*
- * cpuset_migrate_mm
- *
- * Migrate memory region from one set of nodes to another.
- *
- * Temporarilly set tasks mems_allowed to target nodes of migration,
- * so that the migration code can allocate pages on these nodes.
- *
- * While the mm_struct we are migrating is typically from some
- * other task, the task_struct mems_allowed that we are hacking
- * is for our current task, which must allocate new pages for that
- * migrating memory region.
+ * Migrate memory region from one set of nodes to another. This is
+ * performed asynchronously as it can be called from process migration path
+ * holding locks involved in process management. All mm migrations are
+ * performed in the queued order and can be waited for by flushing
+ * cpuset_migrate_mm_wq.
*/
+struct cpuset_migrate_mm_work {
+ struct work_struct work;
+ struct mm_struct *mm;
+ nodemask_t from;
+ nodemask_t to;
+};
+
+static void cpuset_migrate_mm_workfn(struct work_struct *work)
+{
+ struct cpuset_migrate_mm_work *mwork =
+ container_of(work, struct cpuset_migrate_mm_work, work);
+
+ /* on a wq worker, no need to worry about %current's mems_allowed */
+ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
+ mmput(mwork->mm);
+ kfree(mwork);
+}
+
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to)
{
- struct task_struct *tsk = current;
-
- tsk->mems_allowed = *to;
+ struct cpuset_migrate_mm_work *mwork;
- do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+ mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
+ if (mwork) {
+ mwork->mm = mm;
+ mwork->from = *from;
+ mwork->to = *to;
+ INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
+ queue_work(cpuset_migrate_mm_wq, &mwork->work);
+ } else {
+ mmput(mm);
+ }
+}
- rcu_read_lock();
- guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
- rcu_read_unlock();
+void cpuset_post_attach_flush(void)
+{
+ flush_workqueue(cpuset_migrate_mm_wq);
}
/*
@@ -1097,7 +1119,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
- mmput(mm);
+ else
+ mmput(mm);
}
css_task_iter_end(&it);
@@ -1545,11 +1568,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
* @old_mems_allowed is the right nodesets that we
* migrate mm from.
*/
- if (is_memory_migrate(cs)) {
+ if (is_memory_migrate(cs))
cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
&cpuset_attach_nodemask_to);
- }
- mmput(mm);
+ else
+ mmput(mm);
}
}
@@ -1714,6 +1737,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
mutex_unlock(&cpuset_mutex);
kernfs_unbreak_active_protection(of->kn);
css_put(&cs->css);
+ flush_workqueue(cpuset_migrate_mm_wq);
return retval ?: nbytes;
}
@@ -2359,6 +2383,9 @@ void __init cpuset_init_smp(void)
top_cpuset.effective_mems = node_states[N_MEMORY];
register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
+
+ cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
+ BUG_ON(!cpuset_migrate_mm_wq);
}
/**
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 1/2] cgroup: make sure a parent css isn't offlined before its children
[not found] ` <5698A023.9070703-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
@ 2016-01-21 20:31 ` Tejun Heo
2016-01-21 20:32 ` [PATCH 2/2] cgroup: make sure a parent css isn't freed " Tejun Heo
[not found] ` <20160121203111.GF5157-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
0 siblings, 2 replies; 13+ messages in thread
From: Tejun Heo @ 2016-01-21 20:31 UTC (permalink / raw)
To: Christian Borntraeger
Cc: linux-kernel-u79uwXL29TY76Z2rM5mHXA, linux-s390, KVM list,
Oleg Nesterov, Peter Zijlstra, Paul E. McKenney, Li Zefan,
Johannes Weiner, cgroups-u79uwXL29TY76Z2rM5mHXA,
kernel-team-b10kYP2dOMg
There are three subsystem callbacks in css shutdown path -
css_offline(), css_released() and css_free(). Except for
css_released(), cgroup core didn't use to guarantee the order of
invocation. css_offline() or css_free() could be called on a parent
css before its children. This behavior is unexpected and led to
use-after-free in cpu controller.
This patch updates offline path so that a parent css is never offlined
before its children. Each css keeps online_cnt which reaches zero iff
itself and all its children are offline and offline_css() is invoked
only after online_cnt reaches zero.
This fixes the reported cpu controller malfunction. The next patch
will update css_free() handling.
Signed-off-by: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Reported-by: Christian Borntraeger <borntraeger-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
Link: http://lkml.kernel.org/g/5698A023.9070703-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org
Cc: Heiko Carstens <heiko.carstens-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
Cc: Peter Zijlstra <peterz-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
Cc: stable-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
---
Hello, Christian.
Can you please verify whether this patch fixes the issue?
Thanks.
include/linux/cgroup-defs.h | 6 ++++++
kernel/cgroup.c | 22 +++++++++++++++++-----
2 files changed, 23 insertions(+), 5 deletions(-)
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
+ /*
+ * Incremented by online self and children. Used to guarantee that
+ * parents are not offlined before their children.
+ */
+ atomic_t online_cnt;
+
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4761,6 +4761,7 @@ static void init_and_link_css(struct cgr
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
+ atomic_set(&css->online_cnt, 0);
if (cgroup_parent(cgrp)) {
css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4783,6 +4784,10 @@ static int online_css(struct cgroup_subs
if (!ret) {
css->flags |= CSS_ONLINE;
rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+ atomic_inc(&css->online_cnt);
+ if (css->parent)
+ atomic_inc(&css->parent->online_cnt);
}
return ret;
}
@@ -5020,10 +5025,15 @@ static void css_killed_work_fn(struct wo
container_of(work, struct cgroup_subsys_state, destroy_work);
mutex_lock(&cgroup_mutex);
- offline_css(css);
- mutex_unlock(&cgroup_mutex);
- css_put(css);
+ do {
+ offline_css(css);
+ css_put(css);
+ /* @css can't go away while we're holding cgroup_mutex */
+ css = css->parent;
+ } while (css && atomic_dec_and_test(&css->online_cnt));
+
+ mutex_unlock(&cgroup_mutex);
}
/* css kill confirmation processing requires process context, bounce */
@@ -5032,8 +5042,10 @@ static void css_killed_ref_fn(struct per
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- INIT_WORK(&css->destroy_work, css_killed_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
}
/**
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 2/2] cgroup: make sure a parent css isn't freed before its children
2016-01-21 20:31 ` [PATCH 1/2] cgroup: make sure a parent css isn't offlined before its children Tejun Heo
@ 2016-01-21 20:32 ` Tejun Heo
2016-01-22 15:45 ` [PATCH v2 " Tejun Heo
[not found] ` <20160121203111.GF5157-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
1 sibling, 1 reply; 13+ messages in thread
From: Tejun Heo @ 2016-01-21 20:32 UTC (permalink / raw)
To: Christian Borntraeger
Cc: linux-kernel, linux-s390, KVM list, Oleg Nesterov, Peter Zijlstra,
Paul E. McKenney, Li Zefan, Johannes Weiner, cgroups, kernel-team
There are three subsystem callbacks in css shutdown path -
css_offline(), css_released() and css_free(). Except for
css_released(), cgroup core didn't use to guarantee the order of
invocation. css_offline() or css_free() could be called on a parent
css before its children. This behavior is unexpected and led to
use-after-free in cpu controller.
The previous patch updated ordering for css_offline() which fixes the
cpu controller issue. While there currently isn't a known bug caused
by misordering of css_free() invocations, let's fix it too for
consistency.
css_free() ordering can be trivially fixed by moving putting of the
parent css below css_free() invocation.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
kernel/cgroup.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4657,14 +4657,15 @@ static void css_free_work_fn(struct work
if (ss) {
/* css free path */
+ struct cgroup_subsys_state *parent = css->parent;
int id = css->id;
- if (css->parent)
- css_put(css->parent);
-
ss->css_free(css);
cgroup_idr_remove(&ss->css_idr, id);
cgroup_put(cgrp);
+
+ if (parent)
+ css_put(parent);
} else {
/* cgroup free path */
atomic_dec(&cgrp->root->nr_cgrps);
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] cgroup: make sure a parent css isn't offlined before its children
[not found] ` <20160121203111.GF5157-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
@ 2016-01-21 21:24 ` Peter Zijlstra
[not found] ` <20160121212416.GL6357-ndre7Fmf5hadTX5a5knrm8zTDFooKrT+cvkQGrU6aU0@public.gmane.org>
2016-01-22 15:45 ` [PATCH v2 " Tejun Heo
1 sibling, 1 reply; 13+ messages in thread
From: Peter Zijlstra @ 2016-01-21 21:24 UTC (permalink / raw)
To: Tejun Heo
Cc: Christian Borntraeger, linux-kernel-u79uwXL29TY76Z2rM5mHXA,
linux-s390, KVM list, Oleg Nesterov, Paul E. McKenney, Li Zefan,
Johannes Weiner, cgroups-u79uwXL29TY76Z2rM5mHXA,
kernel-team-b10kYP2dOMg
On Thu, Jan 21, 2016 at 03:31:11PM -0500, Tejun Heo wrote:
> There are three subsystem callbacks in css shutdown path -
> css_offline(), css_released() and css_free(). Except for
> css_released(), cgroup core didn't use to guarantee the order of
> invocation. css_offline() or css_free() could be called on a parent
> css before its children. This behavior is unexpected and led to
> use-after-free in cpu controller.
>
> This patch updates offline path so that a parent css is never offlined
> before its children. Each css keeps online_cnt which reaches zero iff
> itself and all its children are offline and offline_css() is invoked
> only after online_cnt reaches zero.
>
> This fixes the reported cpu controller malfunction. The next patch
> will update css_free() handling.
No, I need to fix the cpu controller too, because the offending code
sits off of css_free() (the next patch), but also does a call_rcu() in
between, which also doesn't guarantee order.
So your patch and the below would be required to fix this I think.
And then I should look at removing the call_rcu() from the css_free() at
a later date, I think its superfluous but need to double check that.
---
Subject: sched: Fix cgroup entity load tracking tear-down
When a cgroup's cpu runqueue is destroyed, it should remove its
remaining load accounting from its parent cgroup.
The current site for doing so it unsuited because its far too late and
unordered against other cgroup removal (css_free will be, but we're also
in an RCU callback).
Put it in the css_offline callback, which is the start of cgroup
destruction, right after the group has been made unavailable to
userspace. The css_offline callbacks are called in hierarchical order.
Signed-off-by: Peter Zijlstra (Intel) <peterz-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
---
kernel/sched/core.c | 4 +---
kernel/sched/fair.c | 35 ++++++++++++++++++++---------------
kernel/sched/sched.h | 2 +-
3 files changed, 22 insertions(+), 19 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b8bd352dc63f..d589a140fe0e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7865,11 +7865,9 @@ void sched_destroy_group(struct task_group *tg)
void sched_offline_group(struct task_group *tg)
{
unsigned long flags;
- int i;
/* end participation in shares distribution */
- for_each_possible_cpu(i)
- unregister_fair_sched_group(tg, i);
+ unregister_fair_sched_group(tg);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7f60da0f0fd7..aff660b70bf5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8244,11 +8244,8 @@ void free_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
- if (tg->se) {
- if (tg->se[i])
- remove_entity_load_avg(tg->se[i]);
+ if (tg->se)
kfree(tg->se[i]);
- }
}
kfree(tg->cfs_rq);
@@ -8296,21 +8293,29 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 0;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
{
- struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+ struct rq *rq;
+ int cpu;
- /*
- * Only empty task groups can be destroyed; so we can speculatively
- * check on_list without danger of it being re-added.
- */
- if (!tg->cfs_rq[cpu]->on_list)
- return;
+ for_each_possible_cpu(cpu) {
+ if (tg->se[cpu])
+ remove_entity_load_avg(tg->se[cpu]);
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ /*
+ * Only empty task groups can be destroyed; so we can speculatively
+ * check on_list without danger of it being re-added.
+ */
+ if (!tg->cfs_rq[cpu]->on_list)
+ continue;
+
+ rq = cpu_rq(cpu);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 837bcd383cda..492478bb717c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -313,7 +313,7 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent);
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] cgroup: make sure a parent css isn't offlined before its children
[not found] ` <20160121212416.GL6357-ndre7Fmf5hadTX5a5knrm8zTDFooKrT+cvkQGrU6aU0@public.gmane.org>
@ 2016-01-21 21:28 ` Tejun Heo
2016-01-22 8:18 ` Christian Borntraeger
0 siblings, 1 reply; 13+ messages in thread
From: Tejun Heo @ 2016-01-21 21:28 UTC (permalink / raw)
To: Peter Zijlstra
Cc: Christian Borntraeger, linux-kernel-u79uwXL29TY76Z2rM5mHXA,
linux-s390, KVM list, Oleg Nesterov, Paul E. McKenney, Li Zefan,
Johannes Weiner, cgroups-u79uwXL29TY76Z2rM5mHXA,
kernel-team-b10kYP2dOMg
On Thu, Jan 21, 2016 at 10:24:16PM +0100, Peter Zijlstra wrote:
> On Thu, Jan 21, 2016 at 03:31:11PM -0500, Tejun Heo wrote:
> > There are three subsystem callbacks in css shutdown path -
> > css_offline(), css_released() and css_free(). Except for
> > css_released(), cgroup core didn't use to guarantee the order of
> > invocation. css_offline() or css_free() could be called on a parent
> > css before its children. This behavior is unexpected and led to
> > use-after-free in cpu controller.
> >
> > This patch updates offline path so that a parent css is never offlined
> > before its children. Each css keeps online_cnt which reaches zero iff
> > itself and all its children are offline and offline_css() is invoked
> > only after online_cnt reaches zero.
> >
> > This fixes the reported cpu controller malfunction. The next patch
> > will update css_free() handling.
>
> No, I need to fix the cpu controller too, because the offending code
> sits off of css_free() (the next patch), but also does a call_rcu() in
> between, which also doesn't guarantee order.
Ah, I see. Christian, can you please apply all three patches and see
whether the problem gets fixed? Once verified, I'll update the patch
description and repost.
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/2] cgroup: make sure a parent css isn't offlined before its children
2016-01-21 21:28 ` Tejun Heo
@ 2016-01-22 8:18 ` Christian Borntraeger
0 siblings, 0 replies; 13+ messages in thread
From: Christian Borntraeger @ 2016-01-22 8:18 UTC (permalink / raw)
To: Tejun Heo, Peter Zijlstra
Cc: linux-kernel, linux-s390, KVM list, Oleg Nesterov,
Paul E. McKenney, Li Zefan, Johannes Weiner, cgroups, kernel-team
On 01/21/2016 10:28 PM, Tejun Heo wrote:
> On Thu, Jan 21, 2016 at 10:24:16PM +0100, Peter Zijlstra wrote:
>> On Thu, Jan 21, 2016 at 03:31:11PM -0500, Tejun Heo wrote:
>>> There are three subsystem callbacks in css shutdown path -
>>> css_offline(), css_released() and css_free(). Except for
>>> css_released(), cgroup core didn't use to guarantee the order of
>>> invocation. css_offline() or css_free() could be called on a parent
>>> css before its children. This behavior is unexpected and led to
>>> use-after-free in cpu controller.
>>>
>>> This patch updates offline path so that a parent css is never offlined
>>> before its children. Each css keeps online_cnt which reaches zero iff
>>> itself and all its children are offline and offline_css() is invoked
>>> only after online_cnt reaches zero.
>>>
>>> This fixes the reported cpu controller malfunction. The next patch
>>> will update css_free() handling.
>>
>> No, I need to fix the cpu controller too, because the offending code
>> sits off of css_free() (the next patch), but also does a call_rcu() in
>> between, which also doesn't guarantee order.
>
> Ah, I see. Christian, can you please apply all three patches and see
> whether the problem gets fixed? Once verified, I'll update the patch
> description and repost.
With these 3 patches I always run into the dio/scsi problem, but never in
the css issue. So I cannot test a full day or so, but it looks like
the problem is gone. At least it worked multiple times for 30minutes or
so until my system was killed by the io issue.
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous
2016-01-19 17:18 ` [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous Tejun Heo
@ 2016-01-22 14:24 ` Christian Borntraeger
2016-01-22 15:22 ` Tejun Heo
[not found] ` <20160119171841.GP3520-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
1 sibling, 1 reply; 13+ messages in thread
From: Christian Borntraeger @ 2016-01-22 14:24 UTC (permalink / raw)
To: Tejun Heo, Li Zefan, Johannes Weiner
Cc: Linux Kernel Mailing List, linux-s390, KVM list, Oleg Nesterov,
Peter Zijlstra, Paul E. McKenney, cgroups, kernel-team
On 01/19/2016 06:18 PM, Tejun Heo wrote:
> If "cpuset.memory_migrate" is set, when a process is moved from one
> cpuset to another with a different memory node mask, pages in used by
> the process are migrated to the new set of nodes. This was performed
> synchronously in the ->attach() callback, which is synchronized
> against process management. Recently, the synchronization was changed
> from per-process rwsem to global percpu rwsem for simplicity and
> optimization.
>
> Combined with the synchronous mm migration, this led to deadlocks
> because mm migration could schedule a work item which may in turn try
> to create a new worker blocking on the process management lock held
> from cgroup process migration path.
>
> This heavy an operation shouldn't be performed synchronously from that
> deep inside cgroup migration in the first place. This patch punts the
> actual migration to an ordered workqueue and updates cgroup process
> migration and cpuset config update paths to flush the workqueue after
> all locks are released. This way, the operations still seem
> synchronous to userland without entangling mm migration with process
> management synchronization. CPU hotplug can also invoke mm migration
> but there's no reason for it to wait for mm migrations and thus
> doesn't synchronize against their completions.
>
> Signed-off-by: Tejun Heo <tj@kernel.org>
> Reported-and-tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Hmmm I just realized that this patch slightly differs from the one that
I tested. Do we need a retest?
> Cc: stable@vger.kernel.org # v4.4+
> ---
> include/linux/cpuset.h | 6 ++++
> kernel/cgroup.c | 2 +
> kernel/cpuset.c | 71 +++++++++++++++++++++++++++++++++----------------
> 3 files changed, 57 insertions(+), 22 deletions(-)
>
> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
> index 85a868c..fea160e 100644
> --- a/include/linux/cpuset.h
> +++ b/include/linux/cpuset.h
> @@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
> task_unlock(current);
> }
>
> +extern void cpuset_post_attach_flush(void);
> +
> #else /* !CONFIG_CPUSETS */
>
> static inline bool cpusets_enabled(void) { return false; }
> @@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
> return false;
> }
>
> +static inline void cpuset_post_attach_flush(void)
> +{
> +}
> +
> #endif /* !CONFIG_CPUSETS */
>
> #endif /* _LINUX_CPUSET_H */
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index c03a640..88abd4d 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -58,6 +58,7 @@
> #include <linux/kthread.h>
> #include <linux/delay.h>
> #include <linux/atomic.h>
> +#include <linux/cpuset.h>
> #include <net/sock.h>
>
> /*
> @@ -2739,6 +2740,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
> out_unlock_threadgroup:
> percpu_up_write(&cgroup_threadgroup_rwsem);
> cgroup_kn_unlock(of->kn);
> + cpuset_post_attach_flush();
> return ret ?: nbytes;
> }
>
> diff --git a/kernel/cpuset.c b/kernel/cpuset.c
> index 3e945fc..41989ab 100644
> --- a/kernel/cpuset.c
> +++ b/kernel/cpuset.c
> @@ -287,6 +287,8 @@ static struct cpuset top_cpuset = {
> static DEFINE_MUTEX(cpuset_mutex);
> static DEFINE_SPINLOCK(callback_lock);
>
> +static struct workqueue_struct *cpuset_migrate_mm_wq;
> +
> /*
> * CPU / memory hotplug is handled asynchronously.
> */
> @@ -972,31 +974,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
> }
>
> /*
> - * cpuset_migrate_mm
> - *
> - * Migrate memory region from one set of nodes to another.
> - *
> - * Temporarilly set tasks mems_allowed to target nodes of migration,
> - * so that the migration code can allocate pages on these nodes.
> - *
> - * While the mm_struct we are migrating is typically from some
> - * other task, the task_struct mems_allowed that we are hacking
> - * is for our current task, which must allocate new pages for that
> - * migrating memory region.
> + * Migrate memory region from one set of nodes to another. This is
> + * performed asynchronously as it can be called from process migration path
> + * holding locks involved in process management. All mm migrations are
> + * performed in the queued order and can be waited for by flushing
> + * cpuset_migrate_mm_wq.
> */
>
> +struct cpuset_migrate_mm_work {
> + struct work_struct work;
> + struct mm_struct *mm;
> + nodemask_t from;
> + nodemask_t to;
> +};
> +
> +static void cpuset_migrate_mm_workfn(struct work_struct *work)
> +{
> + struct cpuset_migrate_mm_work *mwork =
> + container_of(work, struct cpuset_migrate_mm_work, work);
> +
> + /* on a wq worker, no need to worry about %current's mems_allowed */
> + do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
> + mmput(mwork->mm);
> + kfree(mwork);
> +}
> +
> static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
> const nodemask_t *to)
> {
> - struct task_struct *tsk = current;
> -
> - tsk->mems_allowed = *to;
> + struct cpuset_migrate_mm_work *mwork;
>
> - do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
> + mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
> + if (mwork) {
> + mwork->mm = mm;
> + mwork->from = *from;
> + mwork->to = *to;
> + INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
> + queue_work(cpuset_migrate_mm_wq, &mwork->work);
> + } else {
> + mmput(mm);
> + }
> +}
>
> - rcu_read_lock();
> - guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
> - rcu_read_unlock();
> +void cpuset_post_attach_flush(void)
> +{
> + flush_workqueue(cpuset_migrate_mm_wq);
> }
>
> /*
> @@ -1097,7 +1119,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
> mpol_rebind_mm(mm, &cs->mems_allowed);
> if (migrate)
> cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
> - mmput(mm);
> + else
> + mmput(mm);
> }
> css_task_iter_end(&it);
>
> @@ -1545,11 +1568,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
> * @old_mems_allowed is the right nodesets that we
> * migrate mm from.
> */
> - if (is_memory_migrate(cs)) {
> + if (is_memory_migrate(cs))
> cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
> &cpuset_attach_nodemask_to);
> - }
> - mmput(mm);
> + else
> + mmput(mm);
> }
> }
>
> @@ -1714,6 +1737,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
> mutex_unlock(&cpuset_mutex);
> kernfs_unbreak_active_protection(of->kn);
> css_put(&cs->css);
> + flush_workqueue(cpuset_migrate_mm_wq);
> return retval ?: nbytes;
> }
>
> @@ -2359,6 +2383,9 @@ void __init cpuset_init_smp(void)
> top_cpuset.effective_mems = node_states[N_MEMORY];
>
> register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
> +
> + cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
> + BUG_ON(!cpuset_migrate_mm_wq);
> }
>
> /**
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous
2016-01-22 14:24 ` Christian Borntraeger
@ 2016-01-22 15:22 ` Tejun Heo
[not found] ` <20160122152232.GB32380-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
0 siblings, 1 reply; 13+ messages in thread
From: Tejun Heo @ 2016-01-22 15:22 UTC (permalink / raw)
To: Christian Borntraeger
Cc: Li Zefan, Johannes Weiner, Linux Kernel Mailing List, linux-s390,
KVM list, Oleg Nesterov, Peter Zijlstra, Paul E. McKenney,
cgroups, kernel-team
Hello, Christian.
On Fri, Jan 22, 2016 at 03:24:40PM +0100, Christian Borntraeger wrote:
> Hmmm I just realized that this patch slightly differs from the one that
> I tested. Do we need a retest?
It should be fine but I'd appreciate if you can test it again.
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous
[not found] ` <20160119171841.GP3520-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
@ 2016-01-22 15:23 ` Tejun Heo
0 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2016-01-22 15:23 UTC (permalink / raw)
To: Li Zefan, Johannes Weiner
Cc: Linux Kernel Mailing List, Christian Borntraeger, linux-s390,
KVM list, Oleg Nesterov, Peter Zijlstra, Paul E. McKenney,
cgroups-u79uwXL29TY76Z2rM5mHXA, kernel-team-b10kYP2dOMg
On Tue, Jan 19, 2016 at 12:18:41PM -0500, Tejun Heo wrote:
> If "cpuset.memory_migrate" is set, when a process is moved from one
> cpuset to another with a different memory node mask, pages in used by
> the process are migrated to the new set of nodes. This was performed
> synchronously in the ->attach() callback, which is synchronized
> against process management. Recently, the synchronization was changed
> from per-process rwsem to global percpu rwsem for simplicity and
> optimization.
>
> Combined with the synchronous mm migration, this led to deadlocks
> because mm migration could schedule a work item which may in turn try
> to create a new worker blocking on the process management lock held
> from cgroup process migration path.
>
> This heavy an operation shouldn't be performed synchronously from that
> deep inside cgroup migration in the first place. This patch punts the
> actual migration to an ordered workqueue and updates cgroup process
> migration and cpuset config update paths to flush the workqueue after
> all locks are released. This way, the operations still seem
> synchronous to userland without entangling mm migration with process
> management synchronization. CPU hotplug can also invoke mm migration
> but there's no reason for it to wait for mm migrations and thus
> doesn't synchronize against their completions.
>
> Signed-off-by: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
> Reported-and-tested-by: Christian Borntraeger <borntraeger-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
> Cc: stable-u79uwXL29TY76Z2rM5mHXA@public.gmane.org # v4.4+
Applied to cgroup/for-4.5-fixes.
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 1/2] cgroup: make sure a parent css isn't offlined before its children
[not found] ` <20160121203111.GF5157-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
2016-01-21 21:24 ` [PATCH 1/2] cgroup: make sure a parent css isn't offlined " Peter Zijlstra
@ 2016-01-22 15:45 ` Tejun Heo
1 sibling, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2016-01-22 15:45 UTC (permalink / raw)
To: Christian Borntraeger
Cc: linux-kernel-u79uwXL29TY76Z2rM5mHXA, linux-s390, KVM list,
Oleg Nesterov, Peter Zijlstra, Paul E. McKenney, Li Zefan,
Johannes Weiner, cgroups-u79uwXL29TY76Z2rM5mHXA,
kernel-team-b10kYP2dOMg
From aa226ff4a1ce79f229c6b7a4c0a14e17fececd01 Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Date: Thu, 21 Jan 2016 15:31:11 -0500
There are three subsystem callbacks in css shutdown path -
css_offline(), css_released() and css_free(). Except for
css_released(), cgroup core didn't guarantee the order of invocation.
css_offline() or css_free() could be called on a parent css before its
children. This behavior is unexpected and led to bugs in cpu and
memory controller.
This patch updates offline path so that a parent css is never offlined
before its children. Each css keeps online_cnt which reaches zero iff
itself and all its children are offline and offline_css() is invoked
only after online_cnt reaches zero.
This fixes the memory controller bug and allows the fix for cpu
controller.
Signed-off-by: Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Reported-and-tested-by: Christian Borntraeger <borntraeger-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
Reported-by: Brian Christiansen <brian.o.christiansen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Link: http://lkml.kernel.org/g/5698A023.9070703-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org
Link: http://lkml.kernel.org/g/CAKB58ikDkzc8REt31WBkD99+hxNzjK4+FBmhkgS+NVrC9vjMSg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org
Cc: Heiko Carstens <heiko.carstens-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
Cc: Peter Zijlstra <peterz-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
Cc: stable-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
---
Hello,
It turns out memcg hits the same issue too. Applied to
cgroup/for-4.5-fixes with description updated.
Thanks.
include/linux/cgroup-defs.h | 6 ++++++
kernel/cgroup.c | 22 +++++++++++++++++-----
2 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 7f540f7..789471d 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
+ /*
+ * Incremented by online self and children. Used to guarantee that
+ * parents are not offlined before their children.
+ */
+ atomic_t online_cnt;
+
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 88abd4d..d015877 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4760,6 +4760,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
+ atomic_set(&css->online_cnt, 0);
if (cgroup_parent(cgrp)) {
css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4782,6 +4783,10 @@ static int online_css(struct cgroup_subsys_state *css)
if (!ret) {
css->flags |= CSS_ONLINE;
rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+ atomic_inc(&css->online_cnt);
+ if (css->parent)
+ atomic_inc(&css->parent->online_cnt);
}
return ret;
}
@@ -5019,10 +5024,15 @@ static void css_killed_work_fn(struct work_struct *work)
container_of(work, struct cgroup_subsys_state, destroy_work);
mutex_lock(&cgroup_mutex);
- offline_css(css);
- mutex_unlock(&cgroup_mutex);
- css_put(css);
+ do {
+ offline_css(css);
+ css_put(css);
+ /* @css can't go away while we're holding cgroup_mutex */
+ css = css->parent;
+ } while (css && atomic_dec_and_test(&css->online_cnt));
+
+ mutex_unlock(&cgroup_mutex);
}
/* css kill confirmation processing requires process context, bounce */
@@ -5031,8 +5041,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- INIT_WORK(&css->destroy_work, css_killed_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
}
/**
--
2.5.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous
[not found] ` <20160122152232.GB32380-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
@ 2016-01-22 15:45 ` Christian Borntraeger
2016-01-22 15:47 ` Tejun Heo
0 siblings, 1 reply; 13+ messages in thread
From: Christian Borntraeger @ 2016-01-22 15:45 UTC (permalink / raw)
To: Tejun Heo
Cc: Li Zefan, Johannes Weiner, Linux Kernel Mailing List, linux-s390,
KVM list, Oleg Nesterov, Peter Zijlstra, Paul E. McKenney,
cgroups-u79uwXL29TY76Z2rM5mHXA, kernel-team-b10kYP2dOMg
On 01/22/2016 04:22 PM, Tejun Heo wrote:
> Hello, Christian.
>
> On Fri, Jan 22, 2016 at 03:24:40PM +0100, Christian Borntraeger wrote:
>> Hmmm I just realized that this patch slightly differs from the one that
>> I tested. Do we need a retest?
>
> It should be fine but I'd appreciate if you can test it again.
I did restart the test after I wrote the mail. The latest version from this mail
thread is still fine as far as I can tell.
Thanks
Christian
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 2/2] cgroup: make sure a parent css isn't freed before its children
2016-01-21 20:32 ` [PATCH 2/2] cgroup: make sure a parent css isn't freed " Tejun Heo
@ 2016-01-22 15:45 ` Tejun Heo
0 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2016-01-22 15:45 UTC (permalink / raw)
To: Christian Borntraeger
Cc: linux-kernel, linux-s390, KVM list, Oleg Nesterov, Peter Zijlstra,
Paul E. McKenney, Li Zefan, Johannes Weiner, cgroups, kernel-team
From 8bb5ef79bc0f4016ecf79e8dce6096a3c63603e4 Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj@kernel.org>
Date: Thu, 21 Jan 2016 15:32:15 -0500
There are three subsystem callbacks in css shutdown path -
css_offline(), css_released() and css_free(). Except for
css_released(), cgroup core didn't guarantee the order of invocation.
css_offline() or css_free() could be called on a parent css before its
children. This behavior is unexpected and led to bugs in cpu and
memory controller.
The previous patch updated ordering for css_offline() which fixes the
cpu controller issue. While there currently isn't a known bug caused
by misordering of css_free() invocations, let's fix it too for
consistency.
css_free() ordering can be trivially fixed by moving putting of the
parent css below css_free() invocation.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
Hello,
Applied to cgroup/for-4.5-fixes w/ description updated. Will push out
to Linus early next week.
Thanks.
kernel/cgroup.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d015877..d27904c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4657,14 +4657,15 @@ static void css_free_work_fn(struct work_struct *work)
if (ss) {
/* css free path */
+ struct cgroup_subsys_state *parent = css->parent;
int id = css->id;
- if (css->parent)
- css_put(css->parent);
-
ss->css_free(css);
cgroup_idr_remove(&ss->css_idr, id);
cgroup_put(cgrp);
+
+ if (parent)
+ css_put(parent);
} else {
/* cgroup free path */
atomic_dec(&cgrp->root->nr_cgrps);
--
2.5.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous
2016-01-22 15:45 ` Christian Borntraeger
@ 2016-01-22 15:47 ` Tejun Heo
0 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2016-01-22 15:47 UTC (permalink / raw)
To: Christian Borntraeger
Cc: Li Zefan, Johannes Weiner, Linux Kernel Mailing List, linux-s390,
KVM list, Oleg Nesterov, Peter Zijlstra, Paul E. McKenney,
cgroups, kernel-team
On Fri, Jan 22, 2016 at 04:45:49PM +0100, Christian Borntraeger wrote:
> On 01/22/2016 04:22 PM, Tejun Heo wrote:
> > Hello, Christian.
> >
> > On Fri, Jan 22, 2016 at 03:24:40PM +0100, Christian Borntraeger wrote:
> >> Hmmm I just realized that this patch slightly differs from the one that
> >> I tested. Do we need a retest?
> >
> > It should be fine but I'd appreciate if you can test it again.
>
> I did restart the test after I wrote the mail. The latest version from this mail
> thread is still fine as far as I can tell.
Thanks a lot. Much appreciated.
--
tejun
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2016-01-22 15:47 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <56978452.6010606@de.ibm.com>
[not found] ` <20160114195630.GA3520@mtj.duckdns.org>
[not found] ` <5698A023.9070703@de.ibm.com>
[not found] ` <20160115164023.GH3520@mtj.duckdns.org>
[not found] ` <20160115164023.GH3520-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
2016-01-19 17:18 ` [PATCH cgroup/for-4.5-fixes] cpuset: make mm migration asynchronous Tejun Heo
2016-01-22 14:24 ` Christian Borntraeger
2016-01-22 15:22 ` Tejun Heo
[not found] ` <20160122152232.GB32380-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
2016-01-22 15:45 ` Christian Borntraeger
2016-01-22 15:47 ` Tejun Heo
[not found] ` <20160119171841.GP3520-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
2016-01-22 15:23 ` Tejun Heo
[not found] ` <5698A023.9070703-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2016-01-21 20:31 ` [PATCH 1/2] cgroup: make sure a parent css isn't offlined before its children Tejun Heo
2016-01-21 20:32 ` [PATCH 2/2] cgroup: make sure a parent css isn't freed " Tejun Heo
2016-01-22 15:45 ` [PATCH v2 " Tejun Heo
[not found] ` <20160121203111.GF5157-qYNAdHglDFBN0TnZuCh8vA@public.gmane.org>
2016-01-21 21:24 ` [PATCH 1/2] cgroup: make sure a parent css isn't offlined " Peter Zijlstra
[not found] ` <20160121212416.GL6357-ndre7Fmf5hadTX5a5knrm8zTDFooKrT+cvkQGrU6aU0@public.gmane.org>
2016-01-21 21:28 ` Tejun Heo
2016-01-22 8:18 ` Christian Borntraeger
2016-01-22 15:45 ` [PATCH v2 " Tejun Heo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).