From: Ben Blum <bblum@andrew.cmu.edu>
To: Ben Blum <bblum@andrew.cmu.edu>
Cc: linux-kernel@vger.kernel.org,
containers@lists.linux-foundation.org, akpm@linux-foundation.org,
ebiederm@xmission.com, lizf@cn.fujitsu.com, matthltc@us.ibm.com,
menage@google.com, oleg@redhat.com
Subject: [PATCH v5 2/3] cgroups: add can_attach callback for checking all threads in a group
Date: Wed, 11 Aug 2010 01:48:14 -0400 [thread overview]
Message-ID: <20100811054814.GC8743@ghc17.ghc.andrew.cmu.edu> (raw)
In-Reply-To: <20100811054604.GA8743@ghc17.ghc.andrew.cmu.edu>
[-- Attachment #1: cgroup-threadgroup-callback.patch --]
[-- Type: text/plain, Size: 8163 bytes --]
Add cgroup wrapper for safely calling can_attach on all threads in a threadgroup
From: Ben Blum <bblum@andrew.cmu.edu>
This patch adds a function cgroup_can_attach_per_thread which handles iterating
over each thread in a threadgroup safely with respect to the invariants that
will be used in cgroup_attach_proc. Also, subsystems whose can_attach calls
require per-thread validation are modified to use the per_thread wrapper to
avoid duplicating cgroup-internal code.
This is a pre-patch for cgroup-procs-writable.patch.
Signed-off-by: Ben Blum <bblum@andrew.cmu.edu>
---
include/linux/cgroup.h | 12 ++++++++++++
kernel/cgroup.c | 35 +++++++++++++++++++++++++++++++++++
kernel/cgroup_freezer.c | 27 ++++++++++++---------------
kernel/cpuset.c | 20 +++++++-------------
kernel/ns_cgroup.c | 27 +++++++++++++--------------
kernel/sched.c | 21 ++++++---------------
6 files changed, 85 insertions(+), 57 deletions(-)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e3d00fd..f040d66 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -580,6 +580,18 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
/*
+ * For use in subsystems whose can_attach functions need to run an operation
+ * on every task in the threadgroup. Calls the given callback once if the
+ * 'threadgroup' flag is false, or once per thread in the group if true.
+ * The callback should return 0/-ERR; this will return 0/-ERR.
+ * The callback will run within an rcu_read section, so must not sleep.
+ */
+int cgroup_can_attach_per_thread(struct cgroup *cgrp, struct task_struct *task,
+ int (*cb)(struct cgroup *cgrp,
+ struct task_struct *task),
+ bool threadgroup);
+
+/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
* if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
* CSS ID is assigned at cgroup allocation (create) automatically
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f91d7dd..e8b8f71 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1688,6 +1688,41 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
}
EXPORT_SYMBOL_GPL(cgroup_path);
+int cgroup_can_attach_per_thread(struct cgroup *cgrp, struct task_struct *task,
+ int (*cb)(struct cgroup *cgrp,
+ struct task_struct *task),
+ bool threadgroup)
+{
+ /* Start by running on the leader, in all cases. */
+ int ret = cb(cgrp, task);
+ if (ret < 0)
+ return ret;
+
+ if (threadgroup) {
+ /* Run on each task in the threadgroup. */
+ struct task_struct *c;
+ rcu_read_lock();
+ /*
+ * It is necessary for the given task to still be the leader
+ * to safely traverse thread_group. See cgroup_attach_proc.
+ */
+ if (!thread_group_leader(task)) {
+ rcu_read_unlock();
+ return -EAGAIN;
+ }
+ list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
+ ret = cb(cgrp, c);
+ if (ret < 0) {
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cgroup_can_attach_per_thread);
+
/**
* cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
* @cgrp: the cgroup the task is attaching to
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index ce71ed5..677b24e 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -161,6 +161,13 @@ static bool is_task_frozen_enough(struct task_struct *task)
(task_is_stopped_or_traced(task) && freezing(task));
}
+static int freezer_can_attach_cb(struct cgroup *cgrp, struct task_struct *task)
+{
+ if (is_task_frozen_enough(task))
+ return -EBUSY;
+ return 0;
+}
+
/*
* The call to cgroup_lock() in the freezer.state write method prevents
* a write to that file racing against an attach, and hence the
@@ -171,6 +178,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
struct task_struct *task, bool threadgroup)
{
struct freezer *freezer;
+ int ret;
/*
* Anything frozen can't move or be moved to/from.
@@ -179,26 +187,15 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
* frozen, so it's sufficient to check the latter condition.
*/
- if (is_task_frozen_enough(task))
- return -EBUSY;
+ ret = cgroup_can_attach_per_thread(new_cgroup, task,
+ freezer_can_attach_cb, threadgroup);
+ if (ret < 0)
+ return ret;
freezer = cgroup_freezer(new_cgroup);
if (freezer->state == CGROUP_FROZEN)
return -EBUSY;
- if (threadgroup) {
- struct task_struct *c;
-
- rcu_read_lock();
- list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
- if (is_task_frozen_enough(c)) {
- rcu_read_unlock();
- return -EBUSY;
- }
- }
- rcu_read_unlock();
- }
-
return 0;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b23c097..cc4b1f7 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1376,6 +1376,11 @@ static int fmeter_getrate(struct fmeter *fmp)
/* Protected by cgroup_lock */
static cpumask_var_t cpus_attach;
+static int cpuset_can_attach_cb(struct cgroup *cgrp, struct task_struct *task)
+{
+ return security_task_setscheduler(task, 0, NULL);
+}
+
/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct task_struct *tsk, bool threadgroup)
@@ -1397,22 +1402,11 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
if (tsk->flags & PF_THREAD_BOUND)
return -EINVAL;
- ret = security_task_setscheduler(tsk, 0, NULL);
+ ret = cgroup_can_attach_per_thread(cont, tsk, cpuset_can_attach_cb,
+ threadgroup);
if (ret)
return ret;
- if (threadgroup) {
- struct task_struct *c;
- rcu_read_lock();
- list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
- ret = security_task_setscheduler(c, 0, NULL);
- if (ret) {
- rcu_read_unlock();
- return ret;
- }
- }
- rcu_read_unlock();
- }
return 0;
}
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 2a5dfec..af0accf 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -42,9 +42,18 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
* (hence either you are in the same cgroup as task, or in an
* ancestor cgroup thereof)
*/
+static int ns_can_attach_cb(struct cgroup *new_cgroup, struct task_struct *task)
+{
+ if (!cgroup_is_descendant(new_cgroup, task))
+ return -EPERM;
+ return 0;
+}
+
static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
struct task_struct *task, bool threadgroup)
{
+ int ret;
+
if (current != task) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -53,20 +62,10 @@ static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
return -EPERM;
}
- if (!cgroup_is_descendant(new_cgroup, task))
- return -EPERM;
-
- if (threadgroup) {
- struct task_struct *c;
- rcu_read_lock();
- list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
- if (!cgroup_is_descendant(new_cgroup, c)) {
- rcu_read_unlock();
- return -EPERM;
- }
- }
- rcu_read_unlock();
- }
+ ret = cgroup_can_attach_per_thread(new_cgroup, task, ns_can_attach_cb,
+ threadgroup);
+ if (ret < 0)
+ return ret;
return 0;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 70fa78d..8330e6f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8715,21 +8715,12 @@ static int
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk, bool threadgroup)
{
- int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
- if (retval)
- return retval;
- if (threadgroup) {
- struct task_struct *c;
- rcu_read_lock();
- list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
- retval = cpu_cgroup_can_attach_task(cgrp, c);
- if (retval) {
- rcu_read_unlock();
- return retval;
- }
- }
- rcu_read_unlock();
- }
+ int ret = cgroup_can_attach_per_thread(cgrp, tsk,
+ cpu_cgroup_can_attach_task,
+ threadgroup);
+ if (ret)
+ return ret;
+
return 0;
}
next prev parent reply other threads:[~2010-08-11 5:49 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-07-30 23:56 [PATCH v4 0/2] cgroups: implement moving a threadgroup's threads atomically with cgroup.procs Ben Blum
2010-07-30 23:57 ` [PATCH v4 1/2] cgroups: read-write lock CLONE_THREAD forking per threadgroup Ben Blum
2010-08-04 3:44 ` Paul Menage
2010-08-04 4:33 ` Ben Blum
2010-08-04 4:34 ` Paul Menage
2010-08-06 6:02 ` Ben Blum
2010-08-06 7:08 ` KAMEZAWA Hiroyuki
2010-08-04 16:34 ` Brian K. White
2010-07-30 23:59 ` [PATCH v4 2/2] cgroups: make procs file writable Ben Blum
2010-08-04 1:08 ` KAMEZAWA Hiroyuki
2010-08-04 4:28 ` Ben Blum
2010-08-04 4:30 ` Paul Menage
2010-08-04 4:38 ` Ben Blum
2010-08-04 4:46 ` Paul Menage
2010-08-03 19:58 ` [PATCH v4 0/2] cgroups: implement moving a threadgroup's threads atomically with cgroup.procs Andrew Morton
2010-08-03 23:45 ` KAMEZAWA Hiroyuki
2010-08-04 2:00 ` Li Zefan
2010-08-11 5:46 ` [PATCH v5 0/3] " Ben Blum
2010-08-11 5:47 ` [PATCH v5 1/3] cgroups: read-write lock CLONE_THREAD forking per threadgroup Ben Blum
2010-08-23 23:35 ` Paul Menage
2010-08-11 5:48 ` Ben Blum [this message]
2010-08-23 23:31 ` [PATCH v5 2/3] cgroups: add can_attach callback for checking all threads in a group Paul Menage
2010-08-11 5:48 ` [PATCH v5 3/3] cgroups: make procs file writable Ben Blum
2010-08-24 18:08 ` Paul Menage
2010-12-24 8:22 ` [PATCH v6 0/3] cgroups: implement moving a threadgroup's threads atomically with cgroup.procs Ben Blum
2010-12-24 8:23 ` [PATCH v6 1/3] cgroups: read-write lock CLONE_THREAD forking per threadgroup Ben Blum
2010-12-24 8:24 ` [PATCH v6 2/3] cgroups: add can_attach callback for checking all threads in a group Ben Blum
2010-12-24 8:24 ` [PATCH v6 3/3] cgroups: make procs file writable Ben Blum
2011-01-12 23:26 ` Paul E. McKenney
2010-12-26 12:09 ` [PATCH v7 0/3] cgroups: implement moving a threadgroup's threads atomically with cgroup.procs Ben Blum
2010-12-26 12:09 ` [PATCH v7 1/3] cgroups: read-write lock CLONE_THREAD forking per threadgroup Ben Blum
2011-01-24 8:38 ` Paul Menage
2011-01-24 21:05 ` Andrew Morton
2011-02-04 21:25 ` Ben Blum
2011-02-04 21:36 ` Andrew Morton
2011-02-04 21:43 ` Ben Blum
2011-02-14 5:31 ` Paul Menage
2010-12-26 12:11 ` [PATCH v7 2/3] cgroups: add atomic-context per-thread subsystem callbacks Ben Blum
2011-01-24 8:38 ` Paul Menage
2011-01-24 15:32 ` Ben Blum
2010-12-26 12:12 ` [PATCH v7 3/3] cgroups: make procs file writable Ben Blum
2011-02-08 1:35 ` [PATCH v8 0/3] cgroups: implement moving a threadgroup's threads atomically with cgroup.procs Ben Blum
2011-02-08 1:37 ` [PATCH v8 1/3] cgroups: read-write lock CLONE_THREAD forking per threadgroup Ben Blum
2011-03-03 17:54 ` Paul Menage
2011-02-08 1:39 ` [PATCH v8 2/3] cgroups: add per-thread subsystem callbacks Ben Blum
2011-03-03 17:59 ` Paul Menage
2011-02-08 1:39 ` [PATCH v8 3/3] cgroups: make procs file writable Ben Blum
2011-02-16 19:22 ` [PATCH v8 4/3] cgroups: use flex_array in attach_proc Ben Blum
2011-03-03 17:48 ` Paul Menage
2011-03-22 5:15 ` Ben Blum
2011-03-22 5:19 ` [PATCH v8.5 " Ben Blum
2011-03-03 18:38 ` [PATCH v8 3/3] cgroups: make procs file writable Paul Menage
2011-03-10 6:18 ` Ben Blum
2011-03-10 20:01 ` Paul Menage
2011-03-15 21:13 ` Ben Blum
2011-03-18 16:54 ` Paul Menage
2011-03-22 5:18 ` [PATCH v8.5 " Ben Blum
2011-03-29 23:27 ` Paul Menage
2011-03-29 23:39 ` Andrew Morton
2011-03-22 5:08 ` [PATCH v8 " Ben Blum
2011-02-09 23:10 ` [PATCH v8 0/3] cgroups: implement moving a threadgroup's threads atomically with cgroup.procs Andrew Morton
2011-02-10 1:02 ` KAMEZAWA Hiroyuki
2011-02-10 1:36 ` Ben Blum
2011-02-14 6:12 ` Paul Menage
2011-02-14 6:12 ` Paul Menage
2011-04-06 19:44 ` [PATCH v8.75 0/4] " Ben Blum
2011-04-06 19:45 ` [PATCH v8.75 1/4] cgroups: read-write lock CLONE_THREAD forking per threadgroup Ben Blum
2011-04-06 19:46 ` [PATCH v8.75 2/4] cgroups: add per-thread subsystem callbacks Ben Blum
2011-04-06 19:46 ` [PATCH v8.75 3/4] cgroups: make procs file writable Ben Blum
2011-04-06 19:47 ` [PATCH v8.75 4/4] cgroups: use flex_array in attach_proc Ben Blum
2011-04-12 23:25 ` [PATCH v8.75 0/4] cgroups: implement moving a threadgroup's threads atomically with cgroup.procs Andrew Morton
2011-04-12 23:59 ` Ben Blum
2011-04-13 2:07 ` Li Zefan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20100811054814.GC8743@ghc17.ghc.andrew.cmu.edu \
--to=bblum@andrew.cmu.edu \
--cc=akpm@linux-foundation.org \
--cc=containers@lists.linux-foundation.org \
--cc=ebiederm@xmission.com \
--cc=linux-kernel@vger.kernel.org \
--cc=lizf@cn.fujitsu.com \
--cc=matthltc@us.ibm.com \
--cc=menage@google.com \
--cc=oleg@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).