* [PATCH 1/2] mm: replace COMPACTION_BUILD with IS_ENABLED(CONFIG_COMPACTION)
@ 2012-02-14 21:37 Konstantin Khlebnikov
2012-02-14 21:37 ` [PATCH 2/2] mm: replace NUMA_BUILD with IS_ENABLED(CONFIG_NUMA) Konstantin Khlebnikov
0 siblings, 1 reply; 2+ messages in thread
From: Konstantin Khlebnikov @ 2012-02-14 21:37 UTC (permalink / raw)
To: linux-mm, Andrew Morton, linux-kernel
One more candidate for replacing with IS_ENABLED() macro.
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
---
include/linux/kernel.h | 7 -------
mm/vmscan.c | 4 ++--
2 files changed, 2 insertions(+), 9 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e834342..1300307 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -733,13 +733,6 @@ extern int __build_bug_on_failed;
#define NUMA_BUILD 0
#endif
-/* This helps us avoid #ifdef CONFIG_COMPACTION */
-#ifdef CONFIG_COMPACTION
-#define COMPACTION_BUILD 1
-#else
-#define COMPACTION_BUILD 0
-#endif
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 25ad7ad..4061e91 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -376,7 +376,7 @@ static void set_reclaim_mode(int priority, struct scan_control *sc,
* reclaim/compaction.Depending on the order, we will either set the
* sync mode or just reclaim order-0 pages later.
*/
- if (COMPACTION_BUILD)
+ if (IS_ENABLED(CONFIG_COMPACTION))
sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
else
sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
@@ -2255,7 +2255,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
continue;
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
- if (COMPACTION_BUILD) {
+ if (IS_ENABLED(CONFIG_COMPACTION)) {
/*
* If we already have plenty of memory free for
* compaction in this zone, don't free any more.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [PATCH 2/2] mm: replace NUMA_BUILD with IS_ENABLED(CONFIG_NUMA)
2012-02-14 21:37 [PATCH 1/2] mm: replace COMPACTION_BUILD with IS_ENABLED(CONFIG_COMPACTION) Konstantin Khlebnikov
@ 2012-02-14 21:37 ` Konstantin Khlebnikov
0 siblings, 0 replies; 2+ messages in thread
From: Konstantin Khlebnikov @ 2012-02-14 21:37 UTC (permalink / raw)
To: linux-mm, Andrew Morton, linux-kernel
And another candidate for replacing with IS_ENABLED() macro.
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
---
include/linux/gfp.h | 2 +-
include/linux/kernel.h | 7 -------
mm/page_alloc.c | 18 ++++++++++--------
mm/vmalloc.c | 4 ++--
4 files changed, 13 insertions(+), 18 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 581e74b..17a33d2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -264,7 +264,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
static inline int gfp_zonelist(gfp_t flags)
{
- if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
+ if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE))
return 1;
return 0;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1300307..f7cf0b6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -726,13 +726,6 @@ extern int __build_bug_on_failed;
/* Trap pasters of __FUNCTION__ at compile-time */
#define __FUNCTION__ (__func__)
-/* This helps us to avoid #ifdef CONFIG_NUMA */
-#ifdef CONFIG_NUMA
-#define NUMA_BUILD 1
-#else
-#define NUMA_BUILD 0
-#endif
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7b7358d..dd4ea43 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1720,7 +1720,7 @@ zonelist_scan:
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
- if (NUMA_BUILD && zlc_active &&
+ if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
if ((alloc_flags & ALLOC_CPUSET) &&
@@ -1766,7 +1766,8 @@ zonelist_scan:
classzone_idx, alloc_flags))
goto try_this_zone;
- if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ !did_zlc_setup && nr_online_nodes > 1) {
/*
* we do zlc_setup if there are multiple nodes
* and before considering the first zone allowed
@@ -1784,7 +1785,7 @@ zonelist_scan:
* As we may have just activated ZLC, check if the first
* eligible zone has failed zone_reclaim recently.
*/
- if (NUMA_BUILD && zlc_active &&
+ if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
@@ -1810,11 +1811,11 @@ try_this_zone:
if (page)
break;
this_zone_full:
- if (NUMA_BUILD)
+ if (IS_ENABLED(CONFIG_NUMA))
zlc_mark_zone_full(zonelist, z);
}
- if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
+ if (IS_ENABLED(CONFIG_NUMA) && unlikely(page == NULL && zlc_active)) {
/* Disable zlc cache for second zonelist scan */
zlc_active = 0;
goto zonelist_scan;
@@ -2076,7 +2077,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
return NULL;
/* After successful reclaim, reconsider all zones for allocation */
- if (NUMA_BUILD)
+ if (IS_ENABLED(CONFIG_NUMA))
zlc_clear_zones_full(zonelist);
retry:
@@ -2209,7 +2210,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* allowed per node queues are empty and that nodes are
* over allocated.
*/
- if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
restart:
@@ -2585,7 +2587,7 @@ unsigned int nr_free_pagecache_pages(void)
static inline void show_node(struct zone *zone)
{
- if (NUMA_BUILD)
+ if (IS_ENABLED(CONFIG_NUMA))
printk("Node %d ", zone_to_nid(zone));
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 86ce9a5..ae4ec9e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2538,7 +2538,7 @@ static void s_stop(struct seq_file *m, void *p)
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
{
- if (NUMA_BUILD) {
+ if (IS_ENABLED(CONFIG_NUMA)) {
unsigned int nr, *counters = m->private;
if (!counters)
@@ -2603,7 +2603,7 @@ static int vmalloc_open(struct inode *inode, struct file *file)
unsigned int *ptr = NULL;
int ret;
- if (NUMA_BUILD) {
+ if (IS_ENABLED(CONFIG_NUMA)) {
ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2012-02-14 21:37 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-02-14 21:37 [PATCH 1/2] mm: replace COMPACTION_BUILD with IS_ENABLED(CONFIG_COMPACTION) Konstantin Khlebnikov
2012-02-14 21:37 ` [PATCH 2/2] mm: replace NUMA_BUILD with IS_ENABLED(CONFIG_NUMA) Konstantin Khlebnikov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).