linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Greg Thelen <gthelen@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	containers@lists.osdl.org, Andrea Righi <arighi@develer.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Minchan Kim <minchan.kim@gmail.com>,
	Ciju Rajan K <ciju@linux.vnet.ibm.com>,
	David Rientjes <rientjes@google.com>,
	Wu Fengguang <fengguang.wu@intel.com>,
	Greg Thelen <gthelen@google.com>
Subject: [PATCH v4 11/11] memcg: check memcg dirty limits in page writeback
Date: Fri, 29 Oct 2010 00:09:14 -0700	[thread overview]
Message-ID: <1288336154-23256-12-git-send-email-gthelen@google.com> (raw)
In-Reply-To: <1288336154-23256-1-git-send-email-gthelen@google.com>

If the current process is in a non-root memcg, then
balance_dirty_pages() will consider the memcg dirty limits
as well as the system-wide limits.  This allows different
cgroups to have distinct dirty limits which trigger direct
and background writeback at different levels.

Signed-off-by: Andrea Righi <arighi@develer.com>
Signed-off-by: Greg Thelen <gthelen@google.com>
---
Changelog since v3:
- Leave determine_dirtyable_memory() static.  v3 made is non-static.
- balance_dirty_pages() now considers both system and memcg dirty limits and
  usage data.  This data is retrieved with global_dirty_info() and
  memcg_dirty_info().  

 mm/page-writeback.c |  109 ++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 78 insertions(+), 31 deletions(-)

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b3bb2fb..57caee5 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -131,6 +131,18 @@ EXPORT_SYMBOL(laptop_mode);
 static struct prop_descriptor vm_completions;
 static struct prop_descriptor vm_dirties;
 
+static unsigned long dirty_writeback_pages(void)
+{
+	s64 ret;
+
+	ret = mem_cgroup_page_stat(MEMCG_NR_DIRTY_WRITEBACK_PAGES);
+	if (ret < 0)
+		ret = global_page_state(NR_UNSTABLE_NFS) +
+			global_page_state(NR_WRITEBACK);
+
+	return ret;
+}
+
 /*
  * couple the period to the dirty_ratio:
  *
@@ -398,45 +410,67 @@ unsigned long determine_dirtyable_memory(void)
 }
 
 /*
+ * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
+ * runtime tasks.
+ */
+static inline void adjust_dirty_info(struct dirty_info *info)
+{
+	struct task_struct *tsk;
+
+	if (info->background_thresh >= info->dirty_thresh)
+		info->background_thresh = info->dirty_thresh / 2;
+	tsk = current;
+	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
+		info->background_thresh += info->background_thresh / 4;
+		info->dirty_thresh += info->dirty_thresh / 4;
+	}
+}
+
+/*
  * global_dirty_info - return background-writeback and dirty-throttling
  * thresholds as well as dirty usage metrics.
  *
  * Calculate the dirty thresholds based on sysctl parameters
  * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
  * - vm.dirty_ratio             or  vm.dirty_bytes
- * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
- * runtime tasks.
  */
 void global_dirty_info(struct dirty_info *info)
 {
-	unsigned long background;
-	unsigned long dirty;
 	unsigned long available_memory = determine_dirtyable_memory();
-	struct task_struct *tsk;
 
 	if (vm_dirty_bytes)
-		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
+		info->dirty_thresh = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 	else
-		dirty = (vm_dirty_ratio * available_memory) / 100;
+		info->dirty_thresh = (vm_dirty_ratio * available_memory) / 100;
 
 	if (dirty_background_bytes)
-		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
+		info->background_thresh =
+			DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
 	else
-		background = (dirty_background_ratio * available_memory) / 100;
+		info->background_thresh =
+			(dirty_background_ratio * available_memory) / 100;
 
 	info->nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
 				global_page_state(NR_UNSTABLE_NFS);
 	info->nr_writeback = global_page_state(NR_WRITEBACK);
 
-	if (background >= dirty)
-		background = dirty / 2;
-	tsk = current;
-	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
-		background += background / 4;
-		dirty += dirty / 4;
-	}
-	info->background_thresh = background;
-	info->dirty_thresh = dirty;
+	adjust_dirty_info(info);
+}
+
+/*
+ * Calculate the background-writeback and dirty-throttling thresholds and dirty
+ * usage metrics from the current task's memcg dirty limit parameters.  Returns
+ * false if no memcg limits exist.
+ */
+static bool memcg_dirty_info(struct dirty_info *info)
+{
+	unsigned long available_memory = determine_dirtyable_memory();
+
+	if (!mem_cgroup_dirty_info(available_memory, info))
+		return false;
+
+	adjust_dirty_info(info);
+	return true;
 }
 
 /*
@@ -480,7 +514,8 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
 static void balance_dirty_pages(struct address_space *mapping,
 				unsigned long write_chunk)
 {
-	struct dirty_info dirty_info;
+	struct dirty_info sys_info;
+	struct dirty_info memcg_info;
 	long bdi_nr_reclaimable;
 	long bdi_nr_writeback;
 	unsigned long bdi_thresh;
@@ -497,19 +532,27 @@ static void balance_dirty_pages(struct address_space *mapping,
 			.range_cyclic	= 1,
 		};
 
-		global_dirty_info(&dirty_info);
+		global_dirty_info(&sys_info);
+
+		if (!memcg_dirty_info(&memcg_info))
+			memcg_info = sys_info;
 
 		/*
 		 * Throttle it only when the background writeback cannot
 		 * catch-up. This avoids (excessively) small writeouts
 		 * when the bdi limits are ramping up.
 		 */
-		if (dirty_info.nr_reclaimable + dirty_info.nr_writeback <=
-				(dirty_info.background_thresh +
-				 dirty_info.dirty_thresh) / 2)
+		if ((sys_info.nr_reclaimable + sys_info.nr_writeback <=
+				(sys_info.background_thresh +
+				 sys_info.dirty_thresh) / 2) &&
+		    (memcg_info.nr_reclaimable + memcg_info.nr_writeback <=
+				(memcg_info.background_thresh +
+				 memcg_info.dirty_thresh) / 2))
 			break;
 
-		bdi_thresh = bdi_dirty_limit(bdi, dirty_info.dirty_thresh);
+		bdi_thresh = bdi_dirty_limit(bdi,
+				min(sys_info.dirty_thresh,
+				    memcg_info.dirty_thresh));
 		bdi_thresh = task_dirty_limit(current, bdi_thresh);
 
 		/*
@@ -538,9 +581,12 @@ static void balance_dirty_pages(struct address_space *mapping,
 		 */
 		dirty_exceeded =
 			(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
-			|| (dirty_info.nr_reclaimable +
-			    dirty_info.nr_writeback >
-			    dirty_info.dirty_thresh);
+			|| (sys_info.nr_reclaimable +
+			    sys_info.nr_writeback >
+			    sys_info.dirty_thresh)
+			|| (memcg_info.nr_reclaimable +
+			    memcg_info.nr_writeback >
+			    memcg_info.dirty_thresh);
 
 		if (!dirty_exceeded)
 			break;
@@ -593,8 +639,10 @@ static void balance_dirty_pages(struct address_space *mapping,
 	 * background_thresh, to keep the amount of dirty memory low.
 	 */
 	if ((laptop_mode && pages_written) ||
-	    (!laptop_mode && (dirty_info.nr_reclaimable >
-			      dirty_info.background_thresh)))
+	    (!laptop_mode && ((sys_info.nr_reclaimable >
+			       sys_info.background_thresh) ||
+			      (memcg_info.nr_reclaimable >
+			       memcg_info.background_thresh))))
 		bdi_start_background_writeback(bdi);
 }
 
@@ -666,8 +714,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
 		dirty_info.dirty_thresh +=
 			dirty_info.dirty_thresh / 10;      /* wheeee... */
 
-                if (global_page_state(NR_UNSTABLE_NFS) +
-		    global_page_state(NR_WRITEBACK) <= dirty_info.dirty_thresh)
+		if (dirty_writeback_pages() <= dirty_info.dirty_thresh)
 			break;
                 congestion_wait(BLK_RW_ASYNC, HZ/10);
 
-- 
1.7.3.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-10-29  7:15 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-10-29  7:09 [PATCH v4 00/11] memcg: per cgroup dirty page accounting Greg Thelen
2010-10-29  7:09 ` [PATCH v4 01/11] memcg: add page_cgroup flags for dirty page tracking Greg Thelen
2010-10-29  7:09 ` [PATCH v4 02/11] memcg: document cgroup dirty memory interfaces Greg Thelen
2010-10-29 11:03   ` Wu Fengguang
2010-10-29 21:35     ` Greg Thelen
2010-10-30  3:02       ` Wu Fengguang
2010-10-29 20:19   ` Andrew Morton
2010-10-29 21:37     ` Greg Thelen
2010-10-29  7:09 ` [PATCH v4 03/11] memcg: create extensible page stat update routines Greg Thelen
2010-10-31 14:48   ` Ciju Rajan K
2010-10-31 20:11     ` Greg Thelen
2010-11-01 20:16       ` Ciju Rajan K
2010-11-02 19:35       ` Ciju Rajan K
2010-10-29  7:09 ` [PATCH v4 04/11] memcg: add lock to synchronize page accounting and migration Greg Thelen
2010-10-29  7:09 ` [PATCH v4 05/11] writeback: create dirty_info structure Greg Thelen
2010-10-29  7:50   ` KAMEZAWA Hiroyuki
2010-11-18  0:49   ` Andrew Morton
2010-11-18  0:50     ` Andrew Morton
2010-11-18  2:02     ` Greg Thelen
2010-10-29  7:09 ` [PATCH v4 06/11] memcg: add dirty page accounting infrastructure Greg Thelen
2010-10-29 11:13   ` Wu Fengguang
2010-10-29 11:17     ` KAMEZAWA Hiroyuki
2010-10-29  7:09 ` [PATCH v4 07/11] memcg: add kernel calls for memcg dirty page stats Greg Thelen
2010-10-29  7:09 ` [PATCH v4 08/11] memcg: add dirty limits to mem_cgroup Greg Thelen
2010-10-29  7:41   ` KAMEZAWA Hiroyuki
2010-10-29 16:00     ` Greg Thelen
2010-10-29  7:09 ` [PATCH v4 09/11] memcg: CPU hotplug lockdep warning fix Greg Thelen
2010-10-29 20:19   ` Andrew Morton
2010-10-29  7:09 ` [PATCH v4 10/11] memcg: add cgroupfs interface to memcg dirty limits Greg Thelen
2010-10-29  7:43   ` KAMEZAWA Hiroyuki
2010-10-29  7:09 ` Greg Thelen [this message]
2010-10-29  7:48   ` [PATCH v4 11/11] memcg: check memcg dirty limits in page writeback KAMEZAWA Hiroyuki
2010-10-29 16:06     ` Greg Thelen
2010-10-31 20:03       ` Wu Fengguang
2010-10-29 20:19 ` [PATCH v4 00/11] memcg: per cgroup dirty page accounting Andrew Morton
2010-10-30 21:46   ` Greg Thelen
2010-11-02 19:33     ` Ciju Rajan K

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1288336154-23256-12-git-send-email-gthelen@google.com \
    --to=gthelen@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=arighi@develer.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=ciju@linux.vnet.ibm.com \
    --cc=containers@lists.osdl.org \
    --cc=fengguang.wu@intel.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=minchan.kim@gmail.com \
    --cc=nishimura@mxp.nes.nec.co.jp \
    --cc=rientjes@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).