From: Minchan Kim <minchan@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm <linux-mm@kvack.org>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Vlastimil Babka <vbabka@suse.cz>, John Dias <joaodias@google.com>,
Suren Baghdasaryan <surenb@google.com>,
pullip.cho@samsung.com, Minchan Kim <minchan@kernel.org>
Subject: [RFC 6/7] mm: make alloc_pages_bulk best effort
Date: Fri, 14 Aug 2020 10:31:30 -0700 [thread overview]
Message-ID: <20200814173131.2803002-7-minchan@kernel.org> (raw)
In-Reply-To: <20200814173131.2803002-1-minchan@kernel.org>
alloc_pages_bulk takes best effort approach to make high order
pages so it should keep going with further range even though it
encounters non-movable pages. To achieve it, this patch introduces
ALLOW_ISOLATE_FAILURE flags for start_isolate_page_range and
alloc_bulk in compact_control so it could proceed with further
range although some failures happen from isolation/migration/
free page isolation.
What it does with new flag are
* skip the pageblock if it's not affordable for changing the block
MIGRATE_ISOLATE
* skip the pageblock if it couldn't migrate a page by some reasons
* skip the pageblock if it couldn't isolate free pages by some reasons
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
include/linux/page-isolation.h | 1 +
mm/compaction.c | 17 +++++++++++++----
mm/internal.h | 1 +
mm/page_alloc.c | 32 +++++++++++++++++++++++---------
mm/page_isolation.c | 4 ++++
5 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 572458016331..b8b6789d2bd9 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -32,6 +32,7 @@ static inline bool is_migrate_isolate(int migratetype)
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
+#define ALLOW_ISOLATE_FAILURE 0x4
struct page *has_unmovable_pages(struct zone *zone, struct page *page,
int migratetype, int flags);
diff --git a/mm/compaction.c b/mm/compaction.c
index 1e4392f6fec3..94dee139ce0d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -748,15 +748,24 @@ isolate_freepages_range(struct compact_control *cc,
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, freepage_list, 0, true);
+ block_end_pfn, freepage_list,
+ cc->alloc_bulk ? 1 : 0,
+ cc->alloc_bulk ? false : true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
* there are any holes in the block (ie. invalid PFNs or
- * non-free pages).
+ * non-free pages) so just stop the isolation in the case.
+ * However, in alloc_bulk mode, we could check further range
+ * to find affordable high order free pages so keep going
+ * with next pageblock.
*/
- if (!isolated)
- break;
+ if (!isolated) {
+ if (!cc->alloc_bulk)
+ break;
+ pfn = block_end_pfn;
+ continue;
+ }
/*
* If we managed to isolate pages, it is always (1 << n) *
diff --git a/mm/internal.h b/mm/internal.h
index f9b86257fae2..71f00284326e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -244,6 +244,7 @@ struct compact_control {
bool contended; /* Signal lock or sched contention */
bool rescan; /* Rescanning the same pageblock */
bool alloc_contig; /* alloc_contig_range allocation */
+ bool alloc_bulk; /* alloc_pages_bulk allocation */
int isolate_order; /* minimum order isolated from buddy */
};
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cdf956feae80..66cea47ae2b6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8359,8 +8359,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
/* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed;
unsigned long pfn = start;
- unsigned int tries = 0;
- int ret = 0;
+ unsigned int tries;
+ int ret;
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
@@ -8368,6 +8368,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
migrate_prep();
+next:
+ tries = ret = 0;
while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -8396,15 +8398,25 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
}
if (ret < 0) {
putback_movable_pages(&cc->migratepages);
- return ret;
+ if (cc->alloc_bulk && pfn < end) {
+ /*
+ * -EINTR means current process has fatal signal.
+ * -ENOMEM means there is no free memory.
+ * In these cases, stop the effort to work with
+ * next blocks.
+ */
+ if (ret != -EINTR && ret != -ENOMEM)
+ goto next;
+ }
}
- return 0;
+ return ret;
}
static int __alloc_contig_range(unsigned long start, unsigned long end,
unsigned int migratetype, gfp_t gfp_mask,
unsigned int alloc_order,
- struct list_head *freepage_list)
+ struct list_head *freepage_list,
+ bool alloc_bulk)
{
unsigned long outer_start, outer_end;
unsigned int order;
@@ -8422,6 +8434,7 @@ static int __alloc_contig_range(unsigned long start, unsigned long end,
.gfp_mask = current_gfp_context(gfp_mask),
.alloc_contig = true,
.isolate_order = alloc_order,
+ .alloc_bulk = alloc_bulk,
};
INIT_LIST_HEAD(&cc.migratepages);
@@ -8450,7 +8463,8 @@ static int __alloc_contig_range(unsigned long start, unsigned long end,
*/
ret = start_isolate_page_range(pfn_max_align_down(start),
- pfn_max_align_up(end), migratetype, 0);
+ pfn_max_align_up(end), migratetype,
+ alloc_bulk ? ALLOW_ISOLATE_FAILURE : 0);
if (ret < 0)
return ret;
@@ -8512,7 +8526,7 @@ static int __alloc_contig_range(unsigned long start, unsigned long end,
}
/* Make sure the range is really isolated. */
- if (test_pages_isolated(outer_start, end, 0)) {
+ if (!alloc_bulk && test_pages_isolated(outer_start, end, 0)) {
pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
@@ -8591,7 +8605,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
LIST_HEAD(freepage_list);
return __alloc_contig_range(start, end, migratetype,
- gfp_mask, 0, &freepage_list);
+ gfp_mask, 0, &freepage_list, false);
}
/**
@@ -8628,7 +8642,7 @@ int alloc_pages_bulk(unsigned long start, unsigned long end,
return -EINVAL;
ret = __alloc_contig_range(start, end, migratetype,
- gfp_mask, order, &freepage_list);
+ gfp_mask, order, &freepage_list, true);
if (ret)
return ret;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 242c03121d73..6208db89a31b 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -154,6 +154,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* and PageOffline() pages.
* REPORT_FAILURE - report details about the failure to
* isolate the range
+ * ALLOW_ISOLATE_FAILURE - skip the pageblock of the range
+ * whenever we fail to set MIGRATE_ISOLATE
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
@@ -190,6 +192,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
page = __first_valid_page(pfn, pageblock_nr_pages);
if (page) {
if (set_migratetype_isolate(page, migratetype, flags)) {
+ if (flags & ALLOW_ISOLATE_FAILURE)
+ continue;
undo_pfn = pfn;
goto undo;
}
--
2.28.0.220.ged08abb693-goog
next prev parent reply other threads:[~2020-08-14 17:31 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-14 17:31 [RFC 0/7] Support high-order page bulk allocation Minchan Kim
2020-08-14 17:31 ` [RFC 1/7] mm: page_owner: split page by order Minchan Kim
2020-08-14 17:31 ` [RFC 2/7] mm: introduce split_page_by_order Minchan Kim
2020-08-14 17:31 ` [RFC 3/7] mm: compaction: deal with upcoming high-order page splitting Minchan Kim
2020-08-14 17:31 ` [RFC 4/7] mm: factor __alloc_contig_range out Minchan Kim
2020-08-14 17:31 ` [RFC 5/7] mm: introduce alloc_pages_bulk API Minchan Kim
2020-08-17 17:40 ` David Hildenbrand
2020-08-14 17:31 ` Minchan Kim [this message]
2020-08-14 17:31 ` [RFC 7/7] mm/page_isolation: avoid drain_all_pages for alloc_pages_bulk Minchan Kim
2020-08-14 17:40 ` [RFC 0/7] Support high-order page bulk allocation Matthew Wilcox
2020-08-14 20:55 ` Minchan Kim
2020-08-18 2:16 ` Cho KyongHo
2020-08-18 9:22 ` Cho KyongHo
2020-08-16 12:31 ` David Hildenbrand
2020-08-17 15:27 ` Minchan Kim
2020-08-17 15:45 ` David Hildenbrand
2020-08-17 16:30 ` Minchan Kim
2020-08-17 16:44 ` David Hildenbrand
2020-08-17 17:03 ` David Hildenbrand
2020-08-17 23:34 ` Minchan Kim
2020-08-18 7:42 ` Nicholas Piggin
2020-08-18 7:49 ` David Hildenbrand
2020-08-18 15:15 ` Minchan Kim
2020-08-18 15:58 ` Matthew Wilcox
2020-08-18 16:22 ` David Hildenbrand
2020-08-18 16:49 ` Minchan Kim
2020-08-19 0:27 ` Yang Shi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200814173131.2803002-7-minchan@kernel.org \
--to=minchan@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=joaodias@google.com \
--cc=linux-mm@kvack.org \
--cc=pullip.cho@samsung.com \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).