From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail144.messagelabs.com (mail144.messagelabs.com [216.82.254.51]) by kanga.kvack.org (Postfix) with ESMTP id 16C648D0040 for ; Tue, 19 Apr 2011 13:58:38 -0400 (EDT) Received: from hpaq1.eem.corp.google.com (hpaq1.eem.corp.google.com [172.25.149.1]) by smtp-out.google.com with ESMTP id p3JHwYQZ032700 for ; Tue, 19 Apr 2011 10:58:34 -0700 Received: from qwh5 (qwh5.prod.google.com [10.241.194.197]) by hpaq1.eem.corp.google.com with ESMTP id p3JHwLqr015461 (version=TLSv1/SSLv3 cipher=RC4-SHA bits=128 verify=NOT) for ; Tue, 19 Apr 2011 10:58:32 -0700 Received: by qwh5 with SMTP id 5so4212952qwh.34 for ; Tue, 19 Apr 2011 10:58:32 -0700 (PDT) MIME-Version: 1.0 In-Reply-To: <1303235496-3060-2-git-send-email-yinghan@google.com> References: <1303235496-3060-1-git-send-email-yinghan@google.com> <1303235496-3060-2-git-send-email-yinghan@google.com> Date: Tue, 19 Apr 2011 10:58:32 -0700 Message-ID: Subject: Re: [PATCH 1/3] move scan_control definition to header file From: Ying Han Content-Type: multipart/alternative; boundary=000e0cd68ee09b39e104a14941cd Sender: owner-linux-mm@kvack.org List-ID: To: Nick Piggin , KOSAKI Motohiro , Minchan Kim , Daisuke Nishimura , Balbir Singh , Tejun Heo , Pavel Emelyanov , KAMEZAWA Hiroyuki , Andrew Morton , Li Zefan , Mel Gorman , Christoph Lameter , Johannes Weiner , Rik van Riel , Hugh Dickins , Michal Hocko , Dave Hansen , Zhu Yanhai Cc: linux-mm@kvack.org --000e0cd68ee09b39e104a14941cd Content-Type: text/plain; charset=ISO-8859-1 On Tue, Apr 19, 2011 at 10:51 AM, Ying Han wrote: > This patch moves the scan_control definition from vmscan to swap.h > header file, which is needed later to pass the struct to shrinkers. > > Signed-off-by: Ying Han > --- > include/linux/swap.h | 61 > ++++++++++++++++++++++++++++++++++++++++++++++++++ > mm/vmscan.c | 61 > -------------------------------------------------- > 2 files changed, 61 insertions(+), 61 deletions(-) > > diff --git a/include/linux/swap.h b/include/linux/swap.h > index ed6ebe6..cb48fbd 100644 > --- a/include/linux/swap.h > +++ b/include/linux/swap.h > @@ -16,6 +16,67 @@ struct notifier_block; > > struct bio; > > +/* > + * reclaim_mode determines how the inactive list is shrunk > + * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages > + * RECLAIM_MODE_ASYNC: Do not block > + * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback > + * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference > + * page from the LRU and reclaim all pages within a > + * naturally aligned range > + * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number > of > + * order-0 pages and then compact the zone > + */ > +typedef unsigned __bitwise__ reclaim_mode_t; > +#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) > +#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) > +#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) > +#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) > +#define RECLAIM_MODE_COMPACTION ((__force > reclaim_mode_t)0x10u) > + > +struct scan_control { > + /* Incremented by the number of inactive pages that were scanned */ > + unsigned long nr_scanned; > + > + /* Number of pages freed so far during a call to shrink_zones() */ > + unsigned long nr_reclaimed; > + > + /* How many pages shrink_list() should reclaim */ > + unsigned long nr_to_reclaim; > + > + unsigned long hibernation_mode; > + > + /* This context's GFP mask */ > + gfp_t gfp_mask; > + > + int may_writepage; > + > + /* Can mapped pages be reclaimed? */ > + int may_unmap; > + > + /* Can pages be swapped as part of reclaim? */ > + int may_swap; > + > + int swappiness; > + > + int order; > + > + /* > + * Intend to reclaim enough continuous memory rather than reclaim > + * enough amount of memory. i.e, mode for high order allocation. > + */ > + reclaim_mode_t reclaim_mode; > + > + /* Which cgroup do we reclaim from */ > + struct mem_cgroup *mem_cgroup; > + > + /* > + * Nodemask of nodes allowed by the caller. If NULL, all nodes > + * are scanned. > + */ > + nodemask_t *nodemask; > +}; > + > #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified > */ > #define SWAP_FLAG_PRIO_MASK 0x7fff > #define SWAP_FLAG_PRIO_SHIFT 0 > diff --git a/mm/vmscan.c b/mm/vmscan.c > index 060e4c1..08b1ab5 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -52,67 +52,6 @@ > #define CREATE_TRACE_POINTS > #include > > -/* > - * reclaim_mode determines how the inactive list is shrunk > - * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages > - * RECLAIM_MODE_ASYNC: Do not block > - * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback > - * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference > - * page from the LRU and reclaim all pages within a > - * naturally aligned range > - * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number > of > - * order-0 pages and then compact the zone > - */ > -typedef unsigned __bitwise__ reclaim_mode_t; > -#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) > -#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) > -#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) > -#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) > -#define RECLAIM_MODE_COMPACTION ((__force > reclaim_mode_t)0x10u) > - > -struct scan_control { > - /* Incremented by the number of inactive pages that were scanned */ > - unsigned long nr_scanned; > - > - /* Number of pages freed so far during a call to shrink_zones() */ > - unsigned long nr_reclaimed; > - > - /* How many pages shrink_list() should reclaim */ > - unsigned long nr_to_reclaim; > - > - unsigned long hibernation_mode; > - > - /* This context's GFP mask */ > - gfp_t gfp_mask; > - > - int may_writepage; > - > - /* Can mapped pages be reclaimed? */ > - int may_unmap; > - > - /* Can pages be swapped as part of reclaim? */ > - int may_swap; > - > - int swappiness; > - > - int order; > - > - /* > - * Intend to reclaim enough continuous memory rather than reclaim > - * enough amount of memory. i.e, mode for high order allocation. > - */ > - reclaim_mode_t reclaim_mode; > - > - /* Which cgroup do we reclaim from */ > - struct mem_cgroup *mem_cgroup; > - > - /* > - * Nodemask of nodes allowed by the caller. If NULL, all nodes > - * are scanned. > - */ > - nodemask_t *nodemask; > -}; > - > #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) > > #ifdef ARCH_HAS_PREFETCH > -- > 1.7.3.1 > > --000e0cd68ee09b39e104a14941cd Content-Type: text/html; charset=ISO-8859-1 Content-Transfer-Encoding: quoted-printable

On Tue, Apr 19, 2011 at 10:51 AM, Ying H= an <yinghan@goog= le.com> wrote:
This patch moves the scan_control definition from vmscan to swap.h
header file, which is needed later to pass the struct to shrinkers.

Signed-off-by: Ying Han <yinghan@g= oogle.com>
---
=A0include/linux/swap.h | =A0 61 ++++++++++++++++++++++++++++++++++++++++++= ++++++++
=A0mm/vmscan.c =A0 =A0 =A0 =A0 =A0| =A0 61 --------------------------------= ------------------
=A02 files changed, 61 insertions(+), 61 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index ed6ebe6..cb48fbd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -16,6 +16,67 @@ struct notifier_block;

=A0struct bio;

+/*
+ * reclaim_mode determines how the inactive list is shrunk
+ * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
+ * RECLAIM_MODE_ASYNC: =A0Do not block
+ * RECLAIM_MODE_SYNC: =A0 Allow blocking e.g. call wait_on_page_writeback<= br> + * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference=
+ * =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 page from the LRU and reclaim a= ll pages within a
+ * =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 naturally aligned range
+ * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number o= f
+ * =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 order-0 pages and then compact = the zone
+ */
+typedef unsigned __bitwise__ reclaim_mode_t;
+#define RECLAIM_MODE_SINGLE =A0 =A0 =A0 =A0 =A0 =A0((__force reclaim_mode_= t)0x01u)
+#define RECLAIM_MODE_ASYNC =A0 =A0 =A0 =A0 =A0 =A0 ((__force reclaim_mode_= t)0x02u)
+#define RECLAIM_MODE_SYNC =A0 =A0 =A0 =A0 =A0 =A0 =A0((__force reclaim_mod= e_t)0x04u)
+#define RECLAIM_MODE_LUMPYRECLAIM =A0 =A0 =A0((__force reclaim_mode_t)0x08= u)
+#define RECLAIM_MODE_COMPACTION =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0((__force r= eclaim_mode_t)0x10u)
+
+struct scan_control {
+ =A0 =A0 =A0 /* Incremented by the number of inactive pages that were scan= ned */
+ =A0 =A0 =A0 unsigned long nr_scanned;
+
+ =A0 =A0 =A0 /* Number of pages freed so far during a call to shrink_zones= () */
+ =A0 =A0 =A0 unsigned long nr_reclaimed;
+
+ =A0 =A0 =A0 /* How many pages shrink_list() should reclaim */
+ =A0 =A0 =A0 unsigned long nr_to_reclaim;
+
+ =A0 =A0 =A0 unsigned long hibernation_mode;
+
+ =A0 =A0 =A0 /* This context's GFP mask */
+ =A0 =A0 =A0 gfp_t gfp_mask;
+
+ =A0 =A0 =A0 int may_writepage;
+
+ =A0 =A0 =A0 /* Can mapped pages be reclaimed? */
+ =A0 =A0 =A0 int may_unmap;
+
+ =A0 =A0 =A0 /* Can pages be swapped as part of reclaim? */
+ =A0 =A0 =A0 int may_swap;
+
+ =A0 =A0 =A0 int swappiness;
+
+ =A0 =A0 =A0 int order;
+
+ =A0 =A0 =A0 /*
+ =A0 =A0 =A0 =A0* Intend to reclaim enough continuous memory rather than r= eclaim
+ =A0 =A0 =A0 =A0* enough amount of memory. i.e, mode for high order alloca= tion.
+ =A0 =A0 =A0 =A0*/
+ =A0 =A0 =A0 reclaim_mode_t reclaim_mode;
+
+ =A0 =A0 =A0 /* Which cgroup do we reclaim from */
+ =A0 =A0 =A0 struct mem_cgroup *mem_cgroup;
+
+ =A0 =A0 =A0 /*
+ =A0 =A0 =A0 =A0* Nodemask of nodes allowed by the caller. If NULL, all no= des
+ =A0 =A0 =A0 =A0* are scanned.
+ =A0 =A0 =A0 =A0*/
+ =A0 =A0 =A0 nodemask_t =A0 =A0 =A0*nodemask;
+};
+
=A0#define SWAP_FLAG_PREFER =A0 =A0 =A0 0x8000 =A0/* set if swap priority s= pecified */
=A0#define SWAP_FLAG_PRIO_MASK =A0 =A00x7fff
=A0#define SWAP_FLAG_PRIO_SHIFT =A0 0
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 060e4c1..08b1ab5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -52,67 +52,6 @@
=A0#define CREATE_TRACE_POINTS
=A0#include <trace/events/vmscan.h>

-/*
- * reclaim_mode determines how the inactive list is shrunk
- * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
- * RECLAIM_MODE_ASYNC: =A0Do not block
- * RECLAIM_MODE_SYNC: =A0 Allow blocking e.g. call wait_on_page_writeback<= br> - * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference=
- * =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 page from the LRU and reclaim a= ll pages within a
- * =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 naturally aligned range
- * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number o= f
- * =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 order-0 pages and then compact = the zone
- */
-typedef unsigned __bitwise__ reclaim_mode_t;
-#define RECLAIM_MODE_SINGLE =A0 =A0 =A0 =A0 =A0 =A0((__force reclaim_mode_= t)0x01u)
-#define RECLAIM_MODE_ASYNC =A0 =A0 =A0 =A0 =A0 =A0 ((__force reclaim_mode_= t)0x02u)
-#define RECLAIM_MODE_SYNC =A0 =A0 =A0 =A0 =A0 =A0 =A0((__force reclaim_mod= e_t)0x04u)
-#define RECLAIM_MODE_LUMPYRECLAIM =A0 =A0 =A0((__force reclaim_mode_t)0x08= u)
-#define RECLAIM_MODE_COMPACTION =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0((__force r= eclaim_mode_t)0x10u)
-
-struct scan_control {
- =A0 =A0 =A0 /* Incremented by the number of inactive pages that were scan= ned */
- =A0 =A0 =A0 unsigned long nr_scanned;
-
- =A0 =A0 =A0 /* Number of pages freed so far during a call to shrink_zones= () */
- =A0 =A0 =A0 unsigned long nr_reclaimed;
-
- =A0 =A0 =A0 /* How many pages shrink_list() should reclaim */
- =A0 =A0 =A0 unsigned long nr_to_reclaim;
-
- =A0 =A0 =A0 unsigned long hibernation_mode;
-
- =A0 =A0 =A0 /* This context's GFP mask */
- =A0 =A0 =A0 gfp_t gfp_mask;
-
- =A0 =A0 =A0 int may_writepage;
-
- =A0 =A0 =A0 /* Can mapped pages be reclaimed? */
- =A0 =A0 =A0 int may_unmap;
-
- =A0 =A0 =A0 /* Can pages be swapped as part of reclaim? */
- =A0 =A0 =A0 int may_swap;
-
- =A0 =A0 =A0 int swappiness;
-
- =A0 =A0 =A0 int order;
-
- =A0 =A0 =A0 /*
- =A0 =A0 =A0 =A0* Intend to reclaim enough continuous memory rather than r= eclaim
- =A0 =A0 =A0 =A0* enough amount of memory. i.e, mode for high order alloca= tion.
- =A0 =A0 =A0 =A0*/
- =A0 =A0 =A0 reclaim_mode_t reclaim_mode;
-
- =A0 =A0 =A0 /* Which cgroup do we reclaim from */
- =A0 =A0 =A0 struct mem_cgroup *mem_cgroup;
-
- =A0 =A0 =A0 /*
- =A0 =A0 =A0 =A0* Nodemask of nodes allowed by the caller. If NULL, all no= des
- =A0 =A0 =A0 =A0* are scanned.
- =A0 =A0 =A0 =A0*/
- =A0 =A0 =A0 nodemask_t =A0 =A0 =A0*nodemask;
-};
-
=A0#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lr= u))

=A0#ifdef ARCH_HAS_PREFETCH
--
1.7.3.1


--000e0cd68ee09b39e104a14941cd-- -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: email@kvack.org