linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
To: akpm@linux-foundation.org, mgorman@suse.de,
	matthew.garrett@nebula.com, dave@sr71.net, rientjes@google.com,
	riel@redhat.com, arjan@linux.intel.com,
	srinivas.pandruvada@linux.intel.com,
	maxime.coquelin@stericsson.com, loic.pallardy@stericsson.com,
	kamezawa.hiroyu@jp.fujitsu.com, lenb@kernel.org, rjw@sisk.pl
Cc: gargankita@gmail.com, paulmck@linux.vnet.ibm.com,
	amit.kachhap@linaro.org, svaidy@linux.vnet.ibm.com,
	andi@firstfloor.org, wujianguo@huawei.com, kmpark@infradead.org,
	thomas.abraham@linaro.org, santosh.shilimkar@ti.com,
	srivatsa.bhat@linux.vnet.ibm.com, linux-pm@vger.kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH v2 13/15] mm: Implement the worker function for memory region compaction
Date: Wed, 10 Apr 2013 03:18:45 +0530	[thread overview]
Message-ID: <20130409214843.4500.3852.stgit@srivatsabhat.in.ibm.com> (raw)
In-Reply-To: <20130409214443.4500.44168.stgit@srivatsabhat.in.ibm.com>

We are going to invoke the memory compaction algorithms for region-evacuation
from worker threads, instead of dedicating a separate kthread to it. So
add the worker infrastructure to perform this.

In the worker, we calculate the cost of migration/compaction for a given
region - if we need to migrate less than 32 pages, then we go ahead, else we
deem the effort to be too costly and abort the compaction.

Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---

 include/linux/mm.h     |   20 ++++++++++++++++++++
 include/linux/mmzone.h |   21 +++++++++++++++++++++
 mm/page_alloc.c        |   33 +++++++++++++++++++++++++++++++++
 3 files changed, 74 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index cb0d898..e380eeb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -755,6 +755,26 @@ static inline void set_next_region_in_freelist(struct free_list *free_list)
 	}
 }
 
+static inline int is_mem_pwr_work_in_progress(struct mem_power_ctrl *mpc)
+{
+	if (mpc->work_status == MEM_PWR_WORK_IN_PROGRESS)
+		return 1;
+	return 0;
+}
+
+static inline void set_mem_pwr_work_in_progress(struct mem_power_ctrl *mpc)
+{
+	mpc->work_status = MEM_PWR_WORK_IN_PROGRESS;
+	smp_mb();
+}
+
+static inline void set_mem_pwr_work_complete(struct mem_power_ctrl *mpc)
+{
+	mpc->work_status = MEM_PWR_WORK_COMPLETE;
+	mpc->region = NULL;
+	smp_mb();
+}
+
 #ifdef SECTION_IN_PAGE_FLAGS
 static inline void set_page_section(struct page *page, unsigned long section)
 {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6e209e9..fdadd2a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -17,6 +17,7 @@
 #include <linux/pageblock-flags.h>
 #include <linux/page-flags-layout.h>
 #include <linux/atomic.h>
+#include <linux/workqueue.h>
 #include <asm/page.h>
 
 /* Free memory management - zoned buddy allocator.  */
@@ -337,6 +338,24 @@ enum zone_type {
 
 #ifndef __GENERATING_BOUNDS_H
 
+/*
+ * In order to evacuate a memory region, if the no. of pages to be migrated
+ * via compaction is more than this number, the effort is considered too
+ * costly and should be aborted.
+ */
+#define MAX_NR_MEM_PWR_MIGRATE_PAGES	32
+
+enum {
+	MEM_PWR_WORK_COMPLETE = 0,
+	MEM_PWR_WORK_IN_PROGRESS
+};
+
+struct mem_power_ctrl {
+	struct work_struct work;
+	struct zone_mem_region *region;
+	int work_status;
+};
+
 struct zone_mem_region {
 	unsigned long start_pfn;
 	unsigned long end_pfn;
@@ -405,6 +424,8 @@ struct zone {
 	struct zone_mem_region	zone_regions[MAX_NR_ZONE_REGIONS];
 	int 			nr_zone_regions;
 
+	struct mem_power_ctrl	mem_power_ctrl;
+
 #ifndef CONFIG_SPARSEMEM
 	/*
 	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40a3aa6..db7b892 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5002,6 +5002,35 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
 }
 
+static void mem_power_mgmt_fn(struct work_struct *work)
+{
+	struct mem_power_ctrl *mpc;
+	struct zone_mem_region *region;
+	unsigned long pages_in_use;
+	struct zone *zone;
+
+	mpc = container_of(work, struct mem_power_ctrl, work);
+
+	if (!mpc->region)
+		return; /* No work to do */
+
+	zone = container_of(mpc, struct zone, mem_power_ctrl);
+	region = mpc->region;
+
+	if (region == zone->zone_regions)
+		return; /* No point compacting region 0. */
+
+	pages_in_use = region->present_pages - region->nr_free;
+
+	if (pages_in_use > 0 &&
+			(pages_in_use <= MAX_NR_MEM_PWR_MIGRATE_PAGES)) {
+
+		evacuate_mem_region(zone, region);
+	}
+
+	set_mem_pwr_work_complete(mpc);
+}
+
 static void __meminit init_node_memory_regions(struct pglist_data *pgdat)
 {
 	int nid = pgdat->node_id;
@@ -5094,6 +5123,10 @@ static void __meminit init_zone_memory_regions(struct pglist_data *pgdat)
 
 		zone_init_free_lists_late(z);
 
+		INIT_WORK(&z->mem_power_ctrl.work, mem_power_mgmt_fn);
+		z->mem_power_ctrl.region = NULL;
+		set_mem_pwr_work_complete(&z->mem_power_ctrl);
+
 		/*
 		 * Revisit the last visited node memory region, in case it
 		 * spans multiple zones.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2013-04-09 21:51 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-04-09 21:45 [RFC PATCH v2 00/15][Sorted-buddy] mm: Memory Power Management Srivatsa S. Bhat
2013-04-09 21:45 ` [RFC PATCH v2 01/15] mm: Introduce memory regions data-structure to capture region boundaries within nodes Srivatsa S. Bhat
2013-04-09 21:46 ` [RFC PATCH v2 02/15] mm: Initialize node memory regions during boot Srivatsa S. Bhat
2013-04-09 21:46 ` [RFC PATCH v2 03/15] mm: Introduce and initialize zone memory regions Srivatsa S. Bhat
2013-04-09 21:46 ` [RFC PATCH v2 04/15] mm: Add helpers to retrieve node region and zone region for a given page Srivatsa S. Bhat
2013-04-09 21:46 ` [RFC PATCH v2 05/15] mm: Add data-structures to describe memory regions within the zones' freelists Srivatsa S. Bhat
2013-04-09 21:47 ` [RFC PATCH v2 06/15] mm: Demarcate and maintain pageblocks in region-order in " Srivatsa S. Bhat
2013-04-09 21:47 ` [RFC PATCH v2 07/15] mm: Add an optimized version of del_from_freelist to keep page allocation fast Srivatsa S. Bhat
2013-04-09 21:47 ` [RFC PATCH v2 08/15] bitops: Document the difference in indexing between fls() and __fls() Srivatsa S. Bhat
2013-04-09 21:47 ` [RFC PATCH v2 09/15] mm: A new optimized O(log n) sorting algo to speed up buddy-sorting Srivatsa S. Bhat
2013-04-09 21:47 ` [RFC PATCH v2 10/15] mm: Add support to accurately track per-memory-region allocation Srivatsa S. Bhat
2013-04-09 21:48 ` [RFC PATCH v2 11/15] mm: Restructure the compaction part of CMA for wider use Srivatsa S. Bhat
2013-04-09 21:48 ` [RFC PATCH v2 12/15] mm: Add infrastructure to evacuate memory regions using compaction Srivatsa S. Bhat
2013-04-09 21:48 ` Srivatsa S. Bhat [this message]
2013-04-09 21:48 ` [RFC PATCH v2 14/15] mm: Add alloc-free handshake to trigger memory region compaction Srivatsa S. Bhat
2013-04-10 23:26   ` Cody P Schafer
2013-04-16 13:49     ` Srivatsa S. Bhat
2013-04-09 21:49 ` [RFC PATCH v2 15/15] mm: Print memory region statistics to understand the buddy allocator behavior Srivatsa S. Bhat
2013-04-17 16:53 ` [RFC PATCH v2 00/15][Sorted-buddy] mm: Memory Power Management Srinivas Pandruvada
2013-04-18  9:54   ` Srivatsa S. Bhat
2013-04-18 15:13     ` Srinivas Pandruvada
2013-04-19  8:11       ` Srivatsa S. Bhat
2013-04-18 17:10 ` Dave Hansen
2013-04-19  6:50   ` Srivatsa S. Bhat
2013-04-25 17:57   ` Srivatsa S. Bhat
2013-04-19  5:34 ` Simon Jeons
2013-04-19  7:12   ` Srivatsa S. Bhat
2013-04-19 15:26     ` Srinivas Pandruvada
2013-05-28 20:08     ` Phillip Susi
2013-05-29  5:36       ` Srivatsa S. Bhat

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130409214843.4500.3852.stgit@srivatsabhat.in.ibm.com \
    --to=srivatsa.bhat@linux.vnet.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=amit.kachhap@linaro.org \
    --cc=andi@firstfloor.org \
    --cc=arjan@linux.intel.com \
    --cc=dave@sr71.net \
    --cc=gargankita@gmail.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kmpark@infradead.org \
    --cc=lenb@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=loic.pallardy@stericsson.com \
    --cc=matthew.garrett@nebula.com \
    --cc=maxime.coquelin@stericsson.com \
    --cc=mgorman@suse.de \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=riel@redhat.com \
    --cc=rientjes@google.com \
    --cc=rjw@sisk.pl \
    --cc=santosh.shilimkar@ti.com \
    --cc=srinivas.pandruvada@linux.intel.com \
    --cc=svaidy@linux.vnet.ibm.com \
    --cc=thomas.abraham@linaro.org \
    --cc=wujianguo@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).