From: Waiman Long <longman@redhat.com>
To: Tejun Heo <tj@kernel.org>, Li Zefan <lizefan@huawei.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>
Cc: cgroups@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-doc@vger.kernel.org, linux-mm@kvack.org,
kernel-team@fb.com, pjt@google.com, luto@amacapital.net,
efault@gmx.de, longman@redhat.com
Subject: [RFC PATCH v2 10/17] cgroup: Make debug cgroup support v2 and thread mode
Date: Mon, 15 May 2017 09:34:09 -0400 [thread overview]
Message-ID: <1494855256-12558-11-git-send-email-longman@redhat.com> (raw)
In-Reply-To: <1494855256-12558-1-git-send-email-longman@redhat.com>
Besides supporting cgroup v2 and thread mode, the following changes
are also made:
1) current_* cgroup files now resides only at the root as we don't
need duplicated files of the same function all over the cgroup
hierarchy.
2) The cgroup_css_links_read() function is modified to report
the number of tasks that are skipped because of overflow.
3) The relationship between proc_cset and threaded_csets are displayed.
4) The number of extra unaccounted references are displayed.
5) The status of being a thread root or threaded cgroup is displayed.
6) The current_css_set_read() function now prints out the addresses of
the css'es associated with the current css_set.
7) A new cgroup_subsys_states file is added to display the css objects
associated with a cgroup.
8) A new cgroup_masks file is added to display the various controller
bit masks in the cgroup.
Signed-off-by: Waiman Long <longman@redhat.com>
---
kernel/cgroup/debug.c | 196 +++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 179 insertions(+), 17 deletions(-)
diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c
index ada53e6..3121811 100644
--- a/kernel/cgroup/debug.c
+++ b/kernel/cgroup/debug.c
@@ -38,10 +38,37 @@ static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
return count;
}
-static u64 current_css_set_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
+static int current_css_set_read(struct seq_file *seq, void *v)
{
- return (u64)(unsigned long)current->cgroups;
+ struct css_set *cset;
+ struct cgroup_subsys *ss;
+ struct cgroup_subsys_state *css;
+ int i, refcnt;
+
+ mutex_lock(&cgroup_mutex);
+ spin_lock_irq(&css_set_lock);
+ rcu_read_lock();
+ cset = rcu_dereference(current->cgroups);
+ refcnt = refcount_read(&cset->refcount);
+ seq_printf(seq, "css_set %pK %d", cset, refcnt);
+ if (refcnt > cset->task_count)
+ seq_printf(seq, " +%d", refcnt - cset->task_count);
+ seq_puts(seq, "\n");
+
+ /*
+ * Print the css'es stored in the current css_set.
+ */
+ for_each_subsys(ss, i) {
+ css = cset->subsys[ss->id];
+ if (!css)
+ continue;
+ seq_printf(seq, "%2d: %-4s\t- %lx[%d]\n", ss->id, ss->name,
+ (unsigned long)css, css->id);
+ }
+ rcu_read_unlock();
+ spin_unlock_irq(&css_set_lock);
+ mutex_unlock(&cgroup_mutex);
+ return 0;
}
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
@@ -86,31 +113,151 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
{
struct cgroup_subsys_state *css = seq_css(seq);
struct cgrp_cset_link *link;
+ int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
spin_lock_irq(&css_set_lock);
+ if (css->cgroup->proc_cgrp)
+ seq_puts(seq, (css->cgroup->proc_cgrp == css->cgroup)
+ ? "[thread root]\n" : "[threaded]\n");
+
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
int count = 0;
+ int refcnt = refcount_read(&cset->refcount);
+
+ /*
+ * Print out the proc_cset and threaded_cset relationship
+ * and highlight difference between refcount and task_count.
+ */
+ seq_printf(seq, "css_set %pK", cset);
+ if (rcu_dereference_protected(cset->proc_cset, 1) != cset) {
+ threaded_csets++;
+ seq_printf(seq, "=>%pK", cset->proc_cset);
+ }
+ if (!list_empty(&cset->threaded_csets)) {
+ struct css_set *tcset;
+ int idx = 0;
- seq_printf(seq, "css_set %pK\n", cset);
+ list_for_each_entry(tcset, &cset->threaded_csets,
+ threaded_csets_node) {
+ seq_puts(seq, idx ? "," : "<=");
+ seq_printf(seq, "%pK", tcset);
+ idx++;
+ }
+ } else {
+ seq_printf(seq, " %d", refcnt);
+ if (refcnt - cset->task_count > 0) {
+ int extra = refcnt - cset->task_count;
+
+ seq_printf(seq, " +%d", extra);
+ /*
+ * Take out the one additional reference in
+ * init_css_set.
+ */
+ if (cset == &init_css_set)
+ extra--;
+ extra_refs += extra;
+ }
+ }
+ seq_puts(seq, "\n");
list_for_each_entry(task, &cset->tasks, cg_list) {
- if (count++ > MAX_TASKS_SHOWN_PER_CSS)
- goto overflow;
- seq_printf(seq, " task %d\n", task_pid_vnr(task));
+ if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
+ seq_printf(seq, " task %d\n",
+ task_pid_vnr(task));
}
list_for_each_entry(task, &cset->mg_tasks, cg_list) {
- if (count++ > MAX_TASKS_SHOWN_PER_CSS)
- goto overflow;
- seq_printf(seq, " task %d\n", task_pid_vnr(task));
+ if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
+ seq_printf(seq, " task %d\n",
+ task_pid_vnr(task));
}
- continue;
- overflow:
- seq_puts(seq, " ...\n");
+ /* show # of overflowed tasks */
+ if (count > MAX_TASKS_SHOWN_PER_CSS)
+ seq_printf(seq, " ... (%d)\n",
+ count - MAX_TASKS_SHOWN_PER_CSS);
+
+ if (cset->dead) {
+ seq_puts(seq, " [dead]\n");
+ dead_cnt++;
+ }
+
+ WARN_ON(count != cset->task_count);
}
spin_unlock_irq(&css_set_lock);
+
+ if (!dead_cnt && !extra_refs && !threaded_csets)
+ return 0;
+
+ seq_puts(seq, "\n");
+ if (threaded_csets)
+ seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
+ if (extra_refs)
+ seq_printf(seq, "extra references = %d\n", extra_refs);
+ if (dead_cnt)
+ seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
+
+ return 0;
+}
+
+static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
+{
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct cgroup_subsys *ss;
+ struct cgroup_subsys_state *css;
+ char pbuf[16];
+ int i;
+
+ mutex_lock(&cgroup_mutex);
+ for_each_subsys(ss, i) {
+ css = rcu_dereference_check(cgrp->subsys[ss->id], true);
+ if (!css)
+ continue;
+ pbuf[0] = '\0';
+
+ /* Show the parent CSS if applicable*/
+ if (css->parent)
+ snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
+ css->parent->id);
+ seq_printf(seq, "%2d: %-4s\t- %lx[%d] %d%s\n", ss->id, ss->name,
+ (unsigned long)css, css->id,
+ atomic_read(&css->online_cnt), pbuf);
+ }
+ mutex_unlock(&cgroup_mutex);
+ return 0;
+}
+
+static int cgroup_masks_read(struct seq_file *seq, void *v)
+{
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct cgroup_subsys *ss;
+ int i, j;
+ struct {
+ u16 *mask;
+ char *name;
+ } mask_list[] = {
+ { &cgrp->subtree_control, "subtree_control" },
+ { &cgrp->subtree_ss_mask, "subtree_ss_mask" },
+ };
+
+ mutex_lock(&cgroup_mutex);
+ for (i = 0; i < ARRAY_SIZE(mask_list); i++) {
+ u16 mask = *mask_list[i].mask;
+ bool first = true;
+
+ seq_printf(seq, "%-15s: ", mask_list[i].name);
+ for_each_subsys(ss, j) {
+ if (!(mask & (1 << ss->id)))
+ continue;
+ if (!first)
+ seq_puts(seq, ", ");
+ seq_puts(seq, ss->name);
+ first = false;
+ }
+ seq_putc(seq, '\n');
+ }
+ mutex_unlock(&cgroup_mutex);
return 0;
}
@@ -128,17 +275,20 @@ static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
.name = "current_css_set",
- .read_u64 = current_css_set_read,
+ .seq_show = current_css_set_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "current_css_set_refcount",
.read_u64 = current_css_set_refcount_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "current_css_set_cg_links",
.seq_show = current_css_set_cg_links_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
},
{
@@ -147,6 +297,16 @@ static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
},
{
+ .name = "cgroup_subsys_states",
+ .seq_show = cgroup_subsys_states_read,
+ },
+
+ {
+ .name = "cgroup_masks",
+ .seq_show = cgroup_masks_read,
+ },
+
+ {
.name = "releasable",
.read_u64 = releasable_read,
},
@@ -155,7 +315,9 @@ static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
};
struct cgroup_subsys debug_cgrp_subsys = {
- .css_alloc = debug_css_alloc,
- .css_free = debug_css_free,
- .legacy_cftypes = debug_files,
+ .css_alloc = debug_css_alloc,
+ .css_free = debug_css_free,
+ .legacy_cftypes = debug_files,
+ .dfl_cftypes = debug_files,
+ .threaded = true,
};
--
1.8.3.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-05-15 13:34 UTC|newest]
Thread overview: 70+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-15 13:33 [RFC PATCH v2 00/17] cgroup: Major changes to cgroup v2 core Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 01/17] cgroup: reorganize cgroup.procs / task write path Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 02/17] cgroup: add @flags to css_task_iter_start() and implement CSS_TASK_ITER_PROCS Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 03/17] cgroup: introduce cgroup->proc_cgrp and threaded css_set handling Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 04/17] cgroup: implement CSS_TASK_ITER_THREADED Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 06/17] cgroup: Fix reference counting bug in cgroup_procs_write() Waiman Long
2017-05-17 19:20 ` Tejun Heo
2017-05-15 13:34 ` [RFC PATCH v2 07/17] cgroup: Prevent kill_css() from being called more than once Waiman Long
2017-05-17 19:23 ` Tejun Heo
[not found] ` <20170517192357.GC942-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
2017-05-17 20:24 ` Waiman Long
2017-05-17 21:34 ` Tejun Heo
2017-05-15 13:34 ` [RFC PATCH v2 08/17] cgroup: Move debug cgroup to its own file Waiman Long
2017-05-17 21:36 ` Tejun Heo
2017-05-18 15:29 ` Waiman Long
2017-05-18 15:52 ` Waiman Long
[not found] ` <ee36d4f8-9e9d-a5c7-2174-56c21aaf75af-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-05-19 19:21 ` Tejun Heo
[not found] ` <20170519192146.GA9741-U58pm7aPsJ1N0TnZuCh8vA@public.gmane.org>
2017-05-19 19:33 ` Waiman Long
2017-05-19 20:28 ` Tejun Heo
2017-05-15 13:34 ` [RFC PATCH v2 09/17] cgroup: Keep accurate count of tasks in each css_set Waiman Long
[not found] ` <1494855256-12558-10-git-send-email-longman-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-05-17 21:40 ` Tejun Heo
2017-05-18 15:56 ` Waiman Long
2017-05-15 13:34 ` Waiman Long [this message]
2017-05-17 21:43 ` [RFC PATCH v2 10/17] cgroup: Make debug cgroup support v2 and thread mode Tejun Heo
2017-05-18 15:58 ` Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 11/17] cgroup: Implement new thread mode semantics Waiman Long
2017-05-17 21:47 ` Tejun Heo
2017-05-18 17:21 ` Waiman Long
2017-05-19 20:26 ` Tejun Heo
2017-05-19 20:58 ` Tejun Heo
[not found] ` <20170519202624.GA15279-U58pm7aPsJ1N0TnZuCh8vA@public.gmane.org>
2017-05-22 17:13 ` Waiman Long
[not found] ` <b1d02881-f522-8baa-5ebe-9b1ad74a03e4-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-05-22 17:32 ` Waiman Long
2017-05-24 20:36 ` Tejun Heo
2017-05-24 21:17 ` Waiman Long
2017-05-24 21:27 ` Tejun Heo
2017-06-01 14:50 ` Tejun Heo
[not found] ` <20170601145042.GA3494-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
2017-06-01 15:10 ` Peter Zijlstra
2017-06-01 15:35 ` Tejun Heo
2017-06-01 18:44 ` Waiman Long
2017-06-01 18:47 ` Tejun Heo
2017-06-01 19:27 ` Waiman Long
[not found] ` <ca834386-c41c-2797-702f-91516b06779f-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-06-01 20:38 ` Tejun Heo
[not found] ` <20170601203815.GA13390-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
2017-06-01 20:48 ` Waiman Long
2017-06-01 20:52 ` Tejun Heo
[not found] ` <20170601205203.GB13390-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
2017-06-01 21:12 ` Waiman Long
2017-06-01 21:18 ` Tejun Heo
2017-06-02 20:36 ` Waiman Long
2017-06-03 10:33 ` Tejun Heo
2017-06-01 19:55 ` Waiman Long
2017-06-01 20:15 ` Waiman Long
2017-06-01 18:41 ` Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 12/17] cgroup: Remove cgroup v2 no internal process constraint Waiman Long
2017-05-19 20:38 ` Tejun Heo
2017-05-20 2:10 ` Mike Galbraith
2017-05-24 17:01 ` Tejun Heo
2017-05-22 16:56 ` Waiman Long
2017-05-24 17:05 ` Tejun Heo
2017-05-24 18:09 ` Waiman Long
[not found] ` <20170524170527.GH24798-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
2017-05-24 18:19 ` Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 13/17] cgroup: Allow fine-grained controllers control in cgroup v2 Waiman Long
2017-05-19 20:55 ` Tejun Heo
2017-05-19 21:20 ` Waiman Long
2017-05-24 17:31 ` Tejun Heo
2017-05-24 17:49 ` Waiman Long
[not found] ` <29bc746d-f89b-3385-fd5c-314bcd22f9f7-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-05-24 17:56 ` Tejun Heo
[not found] ` <20170524175600.GL24798-piEFEHQLUPpN0TnZuCh8vA@public.gmane.org>
2017-05-24 18:17 ` Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 14/17] cgroup: Enable printing of v2 controllers' cgroup hierarchy Waiman Long
[not found] ` <1494855256-12558-1-git-send-email-longman-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-05-15 13:34 ` [RFC PATCH v2 05/17] cgroup: implement cgroup v2 thread support Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 15/17] sched: Misc preps for cgroup unified hierarchy interface Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 16/17] sched: Implement interface for cgroup unified hierarchy Waiman Long
2017-05-15 13:34 ` [RFC PATCH v2 17/17] sched: Make cpu/cpuacct threaded controllers Waiman Long
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1494855256-12558-11-git-send-email-longman@redhat.com \
--to=longman@redhat.com \
--cc=cgroups@vger.kernel.org \
--cc=efault@gmx.de \
--cc=hannes@cmpxchg.org \
--cc=kernel-team@fb.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lizefan@huawei.com \
--cc=luto@amacapital.net \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=tj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).