From: Greg Thelen <gthelen@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
containers@lists.osdl.org, Andrea Righi <arighi@develer.com>,
Balbir Singh <balbir@linux.vnet.ibm.com>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
Greg Thelen <gthelen@google.com>
Subject: [PATCH 05/10] memcg: add dirty page accounting infrastructure
Date: Sun, 3 Oct 2010 23:58:00 -0700 [thread overview]
Message-ID: <1286175485-30643-6-git-send-email-gthelen@google.com> (raw)
In-Reply-To: <1286175485-30643-1-git-send-email-gthelen@google.com>
Add memcg routines to track dirty, writeback, and unstable_NFS pages.
These routines are not yet used by the kernel to count such pages.
A later change adds kernel calls to these new routines.
Signed-off-by: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrea Righi <arighi@develer.com>
---
include/linux/memcontrol.h | 3 +
mm/memcontrol.c | 89 ++++++++++++++++++++++++++++++++++++++++----
2 files changed, 84 insertions(+), 8 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7c7bec4..6303da1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -28,6 +28,9 @@ struct mm_struct;
/* Stats that can be updated by kernel. */
enum mem_cgroup_write_page_stat_item {
MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+ MEMCG_NR_FILE_DIRTY, /* # of dirty pages in page cache */
+ MEMCG_NR_FILE_WRITEBACK, /* # of pages under writeback */
+ MEMCG_NR_FILE_UNSTABLE_NFS, /* # of NFS unstable pages */
};
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 267d774..f40839f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -85,10 +85,13 @@ enum mem_cgroup_stat_index {
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_FILE_DIRTY, /* # of dirty pages in page cache */
+ MEM_CGROUP_STAT_FILE_WRITEBACK, /* # of pages under writeback */
+ MEM_CGROUP_STAT_FILE_UNSTABLE_NFS, /* # of NFS unstable pages */
MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
/* incremented at every pagein/pageout */
MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
@@ -1626,6 +1629,48 @@ void mem_cgroup_update_page_stat(struct page *page,
ClearPageCgroupFileMapped(pc);
idx = MEM_CGROUP_STAT_FILE_MAPPED;
break;
+
+ case MEMCG_NR_FILE_DIRTY:
+ /* Use Test{Set,Clear} to only un/charge the memcg once. */
+ if (val > 0) {
+ if (TestSetPageCgroupFileDirty(pc))
+ /* already set */
+ val = 0;
+ } else {
+ if (!TestClearPageCgroupFileDirty(pc))
+ /* already cleared */
+ val = 0;
+ }
+ idx = MEM_CGROUP_STAT_FILE_DIRTY;
+ break;
+
+ case MEMCG_NR_FILE_WRITEBACK:
+ /*
+ * This counter is adjusted while holding the mapping's
+ * tree_lock. Therefore there is no race between settings and
+ * clearing of this flag.
+ */
+ if (val > 0)
+ SetPageCgroupFileWriteback(pc);
+ else
+ ClearPageCgroupFileWriteback(pc);
+ idx = MEM_CGROUP_STAT_FILE_WRITEBACK;
+ break;
+
+ case MEMCG_NR_FILE_UNSTABLE_NFS:
+ /* Use Test{Set,Clear} to only un/charge the memcg once. */
+ if (val > 0) {
+ if (TestSetPageCgroupFileUnstableNFS(pc))
+ /* already set */
+ val = 0;
+ } else {
+ if (!TestClearPageCgroupFileUnstableNFS(pc))
+ /* already cleared */
+ val = 0;
+ }
+ idx = MEM_CGROUP_STAT_FILE_UNSTABLE_NFS;
+ break;
+
default:
BUG();
}
@@ -2133,6 +2178,16 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
memcg_check_events(mem, pc->page);
}
+static void mem_cgroup_move_account_page_stat(struct mem_cgroup *from,
+ struct mem_cgroup *to,
+ enum mem_cgroup_stat_index idx)
+{
+ preempt_disable();
+ __this_cpu_dec(from->stat->count[idx]);
+ __this_cpu_inc(to->stat->count[idx]);
+ preempt_enable();
+}
+
/**
* __mem_cgroup_move_account - move account of the page
* @pc: page_cgroup of the page.
@@ -2159,13 +2214,18 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
VM_BUG_ON(!PageCgroupUsed(pc));
VM_BUG_ON(pc->mem_cgroup != from);
- if (PageCgroupFileMapped(pc)) {
- /* Update mapped_file data for mem_cgroup */
- preempt_disable();
- __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
- __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
- preempt_enable();
- }
+ if (PageCgroupFileMapped(pc))
+ mem_cgroup_move_account_page_stat(from, to,
+ MEM_CGROUP_STAT_FILE_MAPPED);
+ if (PageCgroupFileDirty(pc))
+ mem_cgroup_move_account_page_stat(from, to,
+ MEM_CGROUP_STAT_FILE_DIRTY);
+ if (PageCgroupFileWriteback(pc))
+ mem_cgroup_move_account_page_stat(from, to,
+ MEM_CGROUP_STAT_FILE_WRITEBACK);
+ if (PageCgroupFileUnstableNFS(pc))
+ mem_cgroup_move_account_page_stat(from, to,
+ MEM_CGROUP_STAT_FILE_UNSTABLE_NFS);
mem_cgroup_charge_statistics(from, pc, false);
if (uncharge)
/* This is not "cancel", but cancel_charge does all we need. */
@@ -3545,6 +3605,9 @@ enum {
MCS_PGPGIN,
MCS_PGPGOUT,
MCS_SWAP,
+ MCS_FILE_DIRTY,
+ MCS_WRITEBACK,
+ MCS_UNSTABLE_NFS,
MCS_INACTIVE_ANON,
MCS_ACTIVE_ANON,
MCS_INACTIVE_FILE,
@@ -3567,6 +3630,9 @@ struct {
{"pgpgin", "total_pgpgin"},
{"pgpgout", "total_pgpgout"},
{"swap", "total_swap"},
+ {"dirty", "total_dirty"},
+ {"writeback", "total_writeback"},
+ {"nfs", "total_nfs"},
{"inactive_anon", "total_inactive_anon"},
{"active_anon", "total_active_anon"},
{"inactive_file", "total_inactive_file"},
@@ -3596,6 +3662,13 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
s->stat[MCS_SWAP] += val * PAGE_SIZE;
}
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_DIRTY);
+ s->stat[MCS_FILE_DIRTY] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_WRITEBACK);
+ s->stat[MCS_WRITEBACK] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_UNSTABLE_NFS);
+ s->stat[MCS_UNSTABLE_NFS] += val * PAGE_SIZE;
+
/* per zone stat */
val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
--
1.7.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-10-04 7:01 UTC|newest]
Thread overview: 96+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-10-04 6:57 [PATCH 00/10] memcg: per cgroup dirty page accounting Greg Thelen
2010-10-04 6:57 ` [PATCH 01/10] memcg: add page_cgroup flags for dirty page tracking Greg Thelen
2010-10-05 6:20 ` KAMEZAWA Hiroyuki
2010-10-06 0:37 ` Daisuke Nishimura
2010-10-06 11:07 ` Balbir Singh
2010-10-04 6:57 ` [PATCH 02/10] memcg: document cgroup dirty memory interfaces Greg Thelen
2010-10-05 6:48 ` KAMEZAWA Hiroyuki
2010-10-06 0:49 ` Daisuke Nishimura
2010-10-06 11:12 ` Balbir Singh
2010-10-04 6:57 ` [PATCH 03/10] memcg: create extensible page stat update routines Greg Thelen
2010-10-04 13:48 ` Ciju Rajan K
2010-10-04 15:43 ` Greg Thelen
2010-10-04 17:35 ` Ciju Rajan K
2010-10-05 6:51 ` KAMEZAWA Hiroyuki
2010-10-05 7:10 ` Greg Thelen
2010-10-05 15:42 ` Minchan Kim
2010-10-05 19:59 ` Greg Thelen
2010-10-05 23:57 ` Minchan Kim
2010-10-06 0:48 ` Greg Thelen
2010-10-06 16:19 ` Balbir Singh
2010-10-04 6:57 ` [PATCH 04/10] memcg: disable local interrupts in lock_page_cgroup() Greg Thelen
2010-10-05 6:54 ` KAMEZAWA Hiroyuki
2010-10-05 7:18 ` Greg Thelen
2010-10-05 16:03 ` Minchan Kim
2010-10-05 23:26 ` Greg Thelen
2010-10-06 0:15 ` Minchan Kim
2010-10-07 0:35 ` KAMEZAWA Hiroyuki
2010-10-07 1:54 ` Daisuke Nishimura
2010-10-07 2:17 ` KAMEZAWA Hiroyuki
2010-10-07 6:21 ` [PATCH] memcg: reduce lock time at move charge (Was " KAMEZAWA Hiroyuki
2010-10-07 6:24 ` [PATCH] memcg: lock-free clear page writeback " KAMEZAWA Hiroyuki
2010-10-07 9:05 ` KAMEZAWA Hiroyuki
2010-10-07 23:35 ` Minchan Kim
2010-10-08 4:41 ` KAMEZAWA Hiroyuki
2010-10-07 7:28 ` [PATCH] memcg: reduce lock time at move charge " Daisuke Nishimura
2010-10-07 7:42 ` KAMEZAWA Hiroyuki
2010-10-07 8:04 ` [PATCH v2] " KAMEZAWA Hiroyuki
2010-10-07 23:14 ` Andrew Morton
2010-10-08 1:12 ` Daisuke Nishimura
2010-10-08 4:37 ` KAMEZAWA Hiroyuki
2010-10-08 4:55 ` Andrew Morton
2010-10-08 5:12 ` KAMEZAWA Hiroyuki
2010-10-08 10:41 ` KAMEZAWA Hiroyuki
2010-10-12 3:39 ` Balbir Singh
2010-10-12 3:42 ` KAMEZAWA Hiroyuki
2010-10-12 3:54 ` Balbir Singh
2010-10-12 3:56 ` Daisuke Nishimura
2010-10-12 5:01 ` KAMEZAWA Hiroyuki
2010-10-12 5:48 ` [PATCH v4] memcg: reduce lock time at move charge KAMEZAWA Hiroyuki
2010-10-12 6:23 ` Daisuke Nishimura
2010-10-12 5:39 ` [PATCH 04/10] memcg: disable local interrupts in lock_page_cgroup() Balbir Singh
2010-10-04 6:58 ` Greg Thelen [this message]
2010-10-05 7:22 ` [PATCH 05/10] memcg: add dirty page accounting infrastructure KAMEZAWA Hiroyuki
2010-10-05 7:35 ` Greg Thelen
2010-10-05 16:09 ` Minchan Kim
2010-10-05 20:06 ` Greg Thelen
2010-10-04 6:58 ` [PATCH 06/10] memcg: add kernel calls for memcg dirty page stats Greg Thelen
2010-10-05 6:55 ` KAMEZAWA Hiroyuki
2010-10-04 6:58 ` [PATCH 07/10] memcg: add dirty limits to mem_cgroup Greg Thelen
2010-10-05 7:07 ` KAMEZAWA Hiroyuki
2010-10-05 9:43 ` Andrea Righi
2010-10-05 19:00 ` Greg Thelen
2010-10-07 0:13 ` KAMEZAWA Hiroyuki
2010-10-07 0:27 ` Greg Thelen
2010-10-07 0:48 ` KAMEZAWA Hiroyuki
2010-10-12 0:24 ` Greg Thelen
2010-10-12 0:55 ` KAMEZAWA Hiroyuki
2010-10-12 7:32 ` Greg Thelen
2010-10-12 8:38 ` KAMEZAWA Hiroyuki
2010-10-04 6:58 ` [PATCH 08/10] memcg: add cgroupfs interface to memcg dirty limits Greg Thelen
2010-10-05 7:13 ` KAMEZAWA Hiroyuki
2010-10-05 7:33 ` Greg Thelen
2010-10-05 7:31 ` KAMEZAWA Hiroyuki
2010-10-05 9:18 ` Andrea Righi
2010-10-05 18:31 ` David Rientjes
2010-10-06 18:34 ` Greg Thelen
2010-10-06 20:54 ` Andrea Righi
2010-10-06 13:30 ` Balbir Singh
2010-10-06 13:32 ` Balbir Singh
2010-10-06 16:21 ` Greg Thelen
2010-10-06 16:24 ` Balbir Singh
2010-10-07 6:23 ` Ciju Rajan K
2010-10-07 17:46 ` Greg Thelen
2010-10-04 6:58 ` [PATCH 09/10] writeback: make determine_dirtyable_memory() static Greg Thelen
2010-10-05 7:15 ` KAMEZAWA Hiroyuki
2010-10-04 6:58 ` [PATCH 10/10] memcg: check memcg dirty limits in page writeback Greg Thelen
2010-10-05 7:29 ` KAMEZAWA Hiroyuki
2010-10-06 0:32 ` Minchan Kim
2010-10-05 4:20 ` [PATCH 00/10] memcg: per cgroup dirty page accounting Balbir Singh
2010-10-05 4:50 ` Balbir Singh
2010-10-05 5:50 ` Greg Thelen
2010-10-05 8:37 ` Ciju Rajan K
2010-10-05 22:15 ` Andrea Righi
2010-10-06 3:23 ` Balbir Singh
2010-10-18 5:56 ` KAMEZAWA Hiroyuki
2010-10-18 18:09 ` Greg Thelen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1286175485-30643-6-git-send-email-gthelen@google.com \
--to=gthelen@google.com \
--cc=akpm@linux-foundation.org \
--cc=arighi@develer.com \
--cc=balbir@linux.vnet.ibm.com \
--cc=containers@lists.osdl.org \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=nishimura@mxp.nes.nec.co.jp \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).