* [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath
@ 2013-01-15 0:16 Minchan Kim
2013-01-15 8:43 ` Michal Nazarewicz
2013-01-15 23:36 ` Andrew Morton
0 siblings, 2 replies; 6+ messages in thread
From: Minchan Kim @ 2013-01-15 0:16 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-kernel, Minchan Kim, KOSAKI Motohiro,
Michal Nazarewicz
Now mm several functions test MIGRATE_ISOLATE and some of those
are hotpath but MIGRATE_ISOLATE is used only if we enable
CONFIG_MEMORY_ISOLATION(ie, CMA, memory-hotplug and memory-failure)
which are not common config option. So let's not add unnecessary
overhead and code when we don't enable CONFIG_MEMORY_ISOLATION.
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
include/linux/mmzone.h | 2 ++
include/linux/page-isolation.h | 19 +++++++++++++++++++
mm/compaction.c | 6 +++++-
mm/page_alloc.c | 16 ++++++++++------
mm/vmstat.c | 2 ++
5 files changed, 38 insertions(+), 7 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 73b64a3..4f4c8c2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -57,7 +57,9 @@ enum {
*/
MIGRATE_CMA,
#endif
+#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
+#endif
MIGRATE_TYPES
};
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index a92061e..3fff8e7 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -1,6 +1,25 @@
#ifndef __LINUX_PAGEISOLATION_H
#define __LINUX_PAGEISOLATION_H
+#ifdef CONFIG_MEMORY_ISOLATION
+static inline bool is_migrate_isolate_page(struct page *page)
+{
+ return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
+}
+static inline bool is_migrate_isolate(int migratetype)
+{
+ return migratetype == MIGRATE_ISOLATE;
+}
+#else
+static inline bool is_migrate_isolate_page(struct page *page)
+{
+ return false;
+}
+static inline bool is_migrate_isolate(int migratetype)
+{
+ return false;
+}
+#endif
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
bool skip_hwpoisoned_pages);
diff --git a/mm/compaction.c b/mm/compaction.c
index 675937c..bb2a655 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -15,6 +15,7 @@
#include <linux/sysctl.h>
#include <linux/sysfs.h>
#include <linux/balloon_compaction.h>
+#include <linux/page-isolation.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -215,7 +216,10 @@ static bool suitable_migration_target(struct page *page)
int migratetype = get_pageblock_migratetype(page);
/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
- if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+ if (migratetype == MIGRATE_RESERVE)
+ return false;
+
+ if (is_migrate_isolate(migratetype))
return false;
/* If the page is a large free page, then allow migration */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 82117f5..319a8f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -665,7 +665,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
- if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) {
+ if (likely(!is_migrate_isolate_page(page))) {
__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
if (is_migrate_cma(mt))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
@@ -683,7 +683,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
- if (unlikely(migratetype != MIGRATE_ISOLATE))
+ if (unlikely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
spin_unlock(&zone->lock);
}
@@ -911,7 +911,9 @@ static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#endif
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
+#ifdef CONFIG_MEMORY_ISOLATION
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
+#endif
};
/*
@@ -1137,7 +1139,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
list_add_tail(&page->lru, list);
if (IS_ENABLED(CONFIG_CMA)) {
mt = get_pageblock_migratetype(page);
- if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+ if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
mt = migratetype;
}
set_freepage_migratetype(page, mt);
@@ -1321,7 +1323,7 @@ void free_hot_cold_page(struct page *page, int cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
- if (unlikely(migratetype == MIGRATE_ISOLATE)) {
+ if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
@@ -1402,7 +1404,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
order = page_order(page);
mt = get_pageblock_migratetype(page);
- if (mt != MIGRATE_ISOLATE) {
+ if (!is_migrate_isolate(mt)) {
/* Obey watermarks as if the page was being allocated */
watermark = low_wmark_pages(zone) + (1 << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
@@ -1425,7 +1427,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
int mt = get_pageblock_migratetype(page);
- if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+ if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
}
@@ -2911,7 +2913,9 @@ static void show_migration_types(unsigned char type)
#ifdef CONFIG_CMA
[MIGRATE_CMA] = 'C',
#endif
+#ifdef CONFIG_MEMORY_ISOLATION
[MIGRATE_ISOLATE] = 'I',
+#endif
};
char tmp[MIGRATE_TYPES + 1];
char *p = tmp;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7a65e26..b0f1db1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -628,7 +628,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
#ifdef CONFIG_CMA
"CMA",
#endif
+#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
+#endif
};
static void *frag_start(struct seq_file *m, loff_t *pos)
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath
2013-01-15 0:16 [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath Minchan Kim
@ 2013-01-15 8:43 ` Michal Nazarewicz
2013-01-15 23:36 ` Andrew Morton
1 sibling, 0 replies; 6+ messages in thread
From: Michal Nazarewicz @ 2013-01-15 8:43 UTC (permalink / raw)
To: Minchan Kim, Andrew Morton; +Cc: linux-mm, linux-kernel, KOSAKI Motohiro
[-- Attachment #1: Type: text/plain, Size: 6989 bytes --]
On Tue, Jan 15 2013, Minchan Kim wrote:
> Now mm several functions test MIGRATE_ISOLATE and some of those
> are hotpath but MIGRATE_ISOLATE is used only if we enable
> CONFIG_MEMORY_ISOLATION(ie, CMA, memory-hotplug and memory-failure)
> which are not common config option. So let's not add unnecessary
> overhead and code when we don't enable CONFIG_MEMORY_ISOLATION.
>
> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
> Cc: Michal Nazarewicz <mina86@mina86.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
> Signed-off-by: Minchan Kim <minchan@kernel.org>
> ---
> include/linux/mmzone.h | 2 ++
> include/linux/page-isolation.h | 19 +++++++++++++++++++
> mm/compaction.c | 6 +++++-
> mm/page_alloc.c | 16 ++++++++++------
> mm/vmstat.c | 2 ++
> 5 files changed, 38 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 73b64a3..4f4c8c2 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -57,7 +57,9 @@ enum {
> */
> MIGRATE_CMA,
> #endif
> +#ifdef CONFIG_MEMORY_ISOLATION
> MIGRATE_ISOLATE, /* can't allocate from here */
> +#endif
> MIGRATE_TYPES
> };
>
> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
> index a92061e..3fff8e7 100644
> --- a/include/linux/page-isolation.h
> +++ b/include/linux/page-isolation.h
> @@ -1,6 +1,25 @@
> #ifndef __LINUX_PAGEISOLATION_H
> #define __LINUX_PAGEISOLATION_H
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +static inline bool is_migrate_isolate_page(struct page *page)
> +{
> + return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
> +}
> +static inline bool is_migrate_isolate(int migratetype)
> +{
> + return migratetype == MIGRATE_ISOLATE;
> +}
> +#else
> +static inline bool is_migrate_isolate_page(struct page *page)
> +{
> + return false;
> +}
> +static inline bool is_migrate_isolate(int migratetype)
> +{
> + return false;
> +}
> +#endif
>
> bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
> bool skip_hwpoisoned_pages);
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 675937c..bb2a655 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -15,6 +15,7 @@
> #include <linux/sysctl.h>
> #include <linux/sysfs.h>
> #include <linux/balloon_compaction.h>
> +#include <linux/page-isolation.h>
> #include "internal.h"
>
> #ifdef CONFIG_COMPACTION
> @@ -215,7 +216,10 @@ static bool suitable_migration_target(struct page *page)
> int migratetype = get_pageblock_migratetype(page);
>
> /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
> - if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
> + if (migratetype == MIGRATE_RESERVE)
> + return false;
> +
> + if (is_migrate_isolate(migratetype))
> return false;
>
> /* If the page is a large free page, then allow migration */
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 82117f5..319a8f0 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -665,7 +665,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
> /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
> __free_one_page(page, zone, 0, mt);
> trace_mm_page_pcpu_drain(page, 0, mt);
> - if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) {
> + if (likely(!is_migrate_isolate_page(page))) {
> __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
> if (is_migrate_cma(mt))
> __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
> @@ -683,7 +683,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
> zone->pages_scanned = 0;
>
> __free_one_page(page, zone, order, migratetype);
> - if (unlikely(migratetype != MIGRATE_ISOLATE))
> + if (unlikely(!is_migrate_isolate(migratetype)))
> __mod_zone_freepage_state(zone, 1 << order, migratetype);
> spin_unlock(&zone->lock);
> }
> @@ -911,7 +911,9 @@ static int fallbacks[MIGRATE_TYPES][4] = {
> [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
> #endif
> [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
> +#ifdef CONFIG_MEMORY_ISOLATION
> [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
> +#endif
> };
>
> /*
> @@ -1137,7 +1139,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
> list_add_tail(&page->lru, list);
> if (IS_ENABLED(CONFIG_CMA)) {
> mt = get_pageblock_migratetype(page);
> - if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
> + if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
> mt = migratetype;
> }
> set_freepage_migratetype(page, mt);
> @@ -1321,7 +1323,7 @@ void free_hot_cold_page(struct page *page, int cold)
> * excessively into the page allocator
> */
> if (migratetype >= MIGRATE_PCPTYPES) {
> - if (unlikely(migratetype == MIGRATE_ISOLATE)) {
> + if (unlikely(is_migrate_isolate(migratetype))) {
> free_one_page(zone, page, 0, migratetype);
> goto out;
> }
> @@ -1402,7 +1404,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
> order = page_order(page);
> mt = get_pageblock_migratetype(page);
>
> - if (mt != MIGRATE_ISOLATE) {
> + if (!is_migrate_isolate(mt)) {
> /* Obey watermarks as if the page was being allocated */
> watermark = low_wmark_pages(zone) + (1 << order);
> if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
> @@ -1425,7 +1427,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
> struct page *endpage = page + (1 << order) - 1;
> for (; page < endpage; page += pageblock_nr_pages) {
> int mt = get_pageblock_migratetype(page);
> - if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
> + if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
> set_pageblock_migratetype(page,
> MIGRATE_MOVABLE);
> }
> @@ -2911,7 +2913,9 @@ static void show_migration_types(unsigned char type)
> #ifdef CONFIG_CMA
> [MIGRATE_CMA] = 'C',
> #endif
> +#ifdef CONFIG_MEMORY_ISOLATION
> [MIGRATE_ISOLATE] = 'I',
> +#endif
> };
> char tmp[MIGRATE_TYPES + 1];
> char *p = tmp;
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 7a65e26..b0f1db1 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -628,7 +628,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
> #ifdef CONFIG_CMA
> "CMA",
> #endif
> +#ifdef CONFIG_MEMORY_ISOLATION
> "Isolate",
> +#endif
> };
>
> static void *frag_start(struct seq_file *m, loff_t *pos)
--
Best regards, _ _
.o. | Liege of Serenely Enlightened Majesty of o' \,=./ `o
..o | Computer Science, Michał “mina86” Nazarewicz (o o)
ooo +----<email/xmpp: mpn@google.com>--------------ooO--(_)--Ooo--
[-- Attachment #2.1: Type: text/plain, Size: 0 bytes --]
[-- Attachment #2.2: Type: application/pgp-signature, Size: 835 bytes --]
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath
2013-01-15 0:16 [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath Minchan Kim
2013-01-15 8:43 ` Michal Nazarewicz
@ 2013-01-15 23:36 ` Andrew Morton
2013-02-25 2:13 ` Minchan Kim
1 sibling, 1 reply; 6+ messages in thread
From: Andrew Morton @ 2013-01-15 23:36 UTC (permalink / raw)
To: Minchan Kim; +Cc: linux-mm, linux-kernel, KOSAKI Motohiro, Michal Nazarewicz
On Tue, 15 Jan 2013 09:16:46 +0900
Minchan Kim <minchan@kernel.org> wrote:
> Now mm several functions test MIGRATE_ISOLATE and some of those
> are hotpath but MIGRATE_ISOLATE is used only if we enable
> CONFIG_MEMORY_ISOLATION(ie, CMA, memory-hotplug and memory-failure)
> which are not common config option. So let's not add unnecessary
> overhead and code when we don't enable CONFIG_MEMORY_ISOLATION.
ugh. Better than nothing, I guess.
There remain call sites which do open-coded
get_pageblock_migratetype(page) != MIGRATE_ISOLATE
(undo_isolate_page_range() is one). Wanna clean these up as well?
>
> ...
>
> @@ -683,7 +683,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
> zone->pages_scanned = 0;
>
> __free_one_page(page, zone, order, migratetype);
> - if (unlikely(migratetype != MIGRATE_ISOLATE))
> + if (unlikely(!is_migrate_isolate(migratetype)))
> __mod_zone_freepage_state(zone, 1 << order, migratetype);
> spin_unlock(&zone->lock);
> }
The code both before and after this patch is assuming that the
migratetype in free_one_page is likely to be MIGRATE_ISOLATE. Seems
wrong. If CONFIG_MEMORY_ISOLATION=n this ends up doing
if(unlikely(true)) which is harmless-but-amusing.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath
2013-01-15 23:36 ` Andrew Morton
@ 2013-02-25 2:13 ` Minchan Kim
2013-02-25 22:50 ` Andrew Morton
0 siblings, 1 reply; 6+ messages in thread
From: Minchan Kim @ 2013-02-25 2:13 UTC (permalink / raw)
To: Andrew Morton; +Cc: linux-mm, linux-kernel, KOSAKI Motohiro, Michal Nazarewicz
Hi Andrew,
Sorry for late reply and I totally missed it. :(
On Tue, Jan 15, 2013 at 03:36:25PM -0800, Andrew Morton wrote:
> On Tue, 15 Jan 2013 09:16:46 +0900
> Minchan Kim <minchan@kernel.org> wrote:
>
> > Now mm several functions test MIGRATE_ISOLATE and some of those
> > are hotpath but MIGRATE_ISOLATE is used only if we enable
> > CONFIG_MEMORY_ISOLATION(ie, CMA, memory-hotplug and memory-failure)
> > which are not common config option. So let's not add unnecessary
> > overhead and code when we don't enable CONFIG_MEMORY_ISOLATION.
>
> ugh. Better than nothing, I guess.
>
> There remain call sites which do open-coded
>
> get_pageblock_migratetype(page) != MIGRATE_ISOLATE
>
> (undo_isolate_page_range() is one). Wanna clean these up as well?
Oops, Sure.
>
> >
> > ...
> >
> > @@ -683,7 +683,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
> > zone->pages_scanned = 0;
> >
> > __free_one_page(page, zone, order, migratetype);
> > - if (unlikely(migratetype != MIGRATE_ISOLATE))
> > + if (unlikely(!is_migrate_isolate(migratetype)))
> > __mod_zone_freepage_state(zone, 1 << order, migratetype);
> > spin_unlock(&zone->lock);
> > }
>
> The code both before and after this patch is assuming that the
> migratetype in free_one_page is likely to be MIGRATE_ISOLATE. Seems
> wrong. If CONFIG_MEMORY_ISOLATION=n this ends up doing
> if(unlikely(true)) which is harmless-but-amusing.
>From the beginning of [2139cbe627, cma: fix counting of isolated pages],
it was wrong. We can't make sure it's very likely.
If it is called by order-0 page free path, it is but if it is called by
high order page free path, we can't.
So I think it would be better to remove unlikley.
They are trivial patch so send it now or send it after you release
first mmotm after finishing merge window?
Andrew, Which is better?
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
--
Kind regards,
Minchan Kim
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath
2013-02-25 2:13 ` Minchan Kim
@ 2013-02-25 22:50 ` Andrew Morton
2013-02-26 0:00 ` Minchan Kim
0 siblings, 1 reply; 6+ messages in thread
From: Andrew Morton @ 2013-02-25 22:50 UTC (permalink / raw)
To: Minchan Kim; +Cc: linux-mm, linux-kernel, KOSAKI Motohiro, Michal Nazarewicz
On Mon, 25 Feb 2013 11:13:08 +0900
Minchan Kim <minchan@kernel.org> wrote:
> >
> > >
> > > ...
> > >
> > > @@ -683,7 +683,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
> > > zone->pages_scanned = 0;
> > >
> > > __free_one_page(page, zone, order, migratetype);
> > > - if (unlikely(migratetype != MIGRATE_ISOLATE))
> > > + if (unlikely(!is_migrate_isolate(migratetype)))
> > > __mod_zone_freepage_state(zone, 1 << order, migratetype);
> > > spin_unlock(&zone->lock);
> > > }
> >
> > The code both before and after this patch is assuming that the
> > migratetype in free_one_page is likely to be MIGRATE_ISOLATE. Seems
> > wrong. If CONFIG_MEMORY_ISOLATION=n this ends up doing
> > if(unlikely(true)) which is harmless-but-amusing.
>
> >From the beginning of [2139cbe627, cma: fix counting of isolated pages],
> it was wrong. We can't make sure it's very likely.
> If it is called by order-0 page free path, it is but if it is called by
> high order page free path, we can't.
> So I think it would be better to remove unlikley.
Order-0 pages surely preponderate, so I'd say that "likely" is the way
to go.
I don't recall anyone ever demonstrating that likely/unlikely actually
does anything useful. It would be interesting to have a play around,
see if it actually does good things to the code generation.
I think someone (perhaps in or near Dave Jones?) once had a patch which
added counters to likely/unlikely, so the kernel can accumulate and
then report upon the hit/miss ratio at each site. iirc, an alarmingly
large number of the sites were deoptimisations!
> They are trivial patch so send it now or send it after you release
> first mmotm after finishing merge window?
It's in mainline now.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath
2013-02-25 22:50 ` Andrew Morton
@ 2013-02-26 0:00 ` Minchan Kim
0 siblings, 0 replies; 6+ messages in thread
From: Minchan Kim @ 2013-02-26 0:00 UTC (permalink / raw)
To: Andrew Morton; +Cc: linux-mm, linux-kernel, KOSAKI Motohiro, Michal Nazarewicz
On Mon, Feb 25, 2013 at 02:50:11PM -0800, Andrew Morton wrote:
> On Mon, 25 Feb 2013 11:13:08 +0900
> Minchan Kim <minchan@kernel.org> wrote:
>
> > >
> > > >
> > > > ...
> > > >
> > > > @@ -683,7 +683,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
> > > > zone->pages_scanned = 0;
> > > >
> > > > __free_one_page(page, zone, order, migratetype);
> > > > - if (unlikely(migratetype != MIGRATE_ISOLATE))
> > > > + if (unlikely(!is_migrate_isolate(migratetype)))
> > > > __mod_zone_freepage_state(zone, 1 << order, migratetype);
> > > > spin_unlock(&zone->lock);
> > > > }
> > >
> > > The code both before and after this patch is assuming that the
> > > migratetype in free_one_page is likely to be MIGRATE_ISOLATE. Seems
> > > wrong. If CONFIG_MEMORY_ISOLATION=n this ends up doing
> > > if(unlikely(true)) which is harmless-but-amusing.
> >
> > >From the beginning of [2139cbe627, cma: fix counting of isolated pages],
> > it was wrong. We can't make sure it's very likely.
> > If it is called by order-0 page free path, it is but if it is called by
> > high order page free path, we can't.
> > So I think it would be better to remove unlikley.
>
> Order-0 pages surely preponderate, so I'd say that "likely" is the way
> to go.
Okay then, let's rule out high order allocation.
Firstly, let's look CONFIG_MEMORY_ISOLATION=y case.
In case of order-0, free_hot_cold_page calls free_one_page very unlikely.
void free_hot_cold_page ()
{
...
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
...
}
So, if free_one_page is called for order-0 page, it's for only MIGRATE_ISOLATE.
So unlikely(!is_migrate_isolate(migratetype)) in free_one_page does make sense
to me.
In case of CONFIG_MEMORY_ISOLATION=n case, below is_migrate_isolate is always
false so it could be compiled out so free_one_page is called only
for high order page free path. So if you don't mind high order free path
hitting on likely/unlikely, I think current code doesn't have any problem.
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(is_migrate_isolate(migratetype))) { ==> always false
free_one_page(zone, page, 0, migratetype);
goto out;
}
In summary, if you don't care of high order free path, there is no problem.
>
> I don't recall anyone ever demonstrating that likely/unlikely actually
> does anything useful. It would be interesting to have a play around,
> see if it actually does good things to the code generation.
Yes. especially about page alloc/free path.
>
> I think someone (perhaps in or near Dave Jones?) once had a patch which
> added counters to likely/unlikely, so the kernel can accumulate and
> then report upon the hit/miss ratio at each site. iirc, an alarmingly
> large number of the sites were deoptimisations!
It seems you mean "Branch Profiling (Trace likely/unlikely profiler)" made by
Steven Rostedt. Anyway, it's a rather troublesome job and needs many workload
but worthy. Will queue it up to my future TODO. :)
>
> > They are trivial patch so send it now or send it after you release
> > first mmotm after finishing merge window?
>
> It's in mainline now.
I will send fix about only undo_isolate_page_range if you don't object my
above opinion.
Thanks.
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
--
Kind regards,
Minchan Kim
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2013-02-26 0:00 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-01-15 0:16 [PATCH v2] mm: remove MIGRATE_ISOLATE check in hotpath Minchan Kim
2013-01-15 8:43 ` Michal Nazarewicz
2013-01-15 23:36 ` Andrew Morton
2013-02-25 2:13 ` Minchan Kim
2013-02-25 22:50 ` Andrew Morton
2013-02-26 0:00 ` Minchan Kim
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).