From: Doug Berger <opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
To: Andrew Morton <akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>
Cc: Jonathan Corbet <corbet-T1hC0tSOHrs@public.gmane.org>,
Rob Herring <robh+dt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
Krzysztof Kozlowski
<krzysztof.kozlowski+dt-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>,
Frank Rowand
<frowand.list-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
Mike Kravetz
<mike.kravetz-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>,
Muchun Song <songmuchun-EC8Uxl6Npydl57MIdRCFDg@public.gmane.org>,
Mike Rapoport <rppt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>,
Marek Szyprowski
<m.szyprowski-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>,
Robin Murphy <robin.murphy-5wv7dgnIgG8@public.gmane.org>,
Borislav Petkov <bp-l3A5Bk7waGM@public.gmane.org>,
"Paul E. McKenney"
<paulmck-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
Neeraj Upadhyay
<quic_neeraju-jfJNa2p1gH1BDgjK7y7TUQ@public.gmane.org>,
Randy Dunlap <rdunlap-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>,
Damien Le Moal
<damien.lemoal-yzvPICuk2AC/Fx7ZUtofftBPR1lH4CV8@public.gmane.org>,
Doug Berger <opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
Florian Fainelli
<f.fainelli-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
David Hildenbrand <david-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
Zi Yan <ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>,
Oscar Salvador <osalvador@su>
Subject: [PATCH 05/21] mm/hugetlb: allow migrated hugepage to dissolve when freed
Date: Tue, 13 Sep 2022 12:54:52 -0700 [thread overview]
Message-ID: <20220913195508.3511038-6-opendmb@gmail.com> (raw)
In-Reply-To: <20220913195508.3511038-1-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
There is no isolation mechanism for hugepages so a hugepage that
is migrated is returned to its hugepage freelist. This creates
problems for alloc_contig_range() because migrated hugepages can
be allocated as migrate targets for subsequent hugepage migration
attempts.
Even if the migration succeeds the alloc_contig_range() attempt
will fail because test_pages_isolated() will find the now free
hugepages haven't been dissolved.
A subsequent attempt by alloc_contig_range() is necessary for the
isolate_migratepages_range() function to find the freed hugepage
and dissolve it (assuming it has not been reallocated).
A workqueue is introduced to perform the equivalent functionality
of alloc_and_dissolve_huge_page() for a migrated hugepage when it
is freed so that the pages can be released to the isolated page
lists of the buddy allocator allowing the alloc_contig_range()
attempt to succeed.
The HPG_dissolve hugepage flag is introduced to allow tagging
migratable hugepages that should be dissolved when freed.
Signed-off-by: Doug Berger <opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
include/linux/hugetlb.h | 5 +++
mm/hugetlb.c | 72 ++++++++++++++++++++++++++++++++++++++---
mm/migrate.c | 1 +
mm/page_alloc.c | 1 +
4 files changed, 75 insertions(+), 4 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3ec981a0d8b3..0e6e21805e51 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -222,6 +222,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
bool is_hugetlb_entry_migration(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void sync_hugetlb_dissolve(void);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -430,6 +431,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline void sync_hugetlb_dissolve(void) { }
+
#endif /* !CONFIG_HUGETLB_PAGE */
/*
* hugepages at page global directory. If arch support
@@ -574,6 +577,7 @@ enum hugetlb_page_flags {
HPG_freed,
HPG_vmemmap_optimized,
HPG_raw_hwp_unreliable,
+ HPG_dissolve,
__NR_HPAGEFLAGS,
};
@@ -621,6 +625,7 @@ HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+HPAGEFLAG(Dissolve, dissolve)
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f232a37df4b6..da80889e1436 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1582,6 +1582,10 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
}
}
+static LLIST_HEAD(hpage_dissolvelist);
+static void dissolve_hpage_workfn(struct work_struct *work);
+static DECLARE_WORK(dissolve_hpage_work, dissolve_hpage_workfn);
+
/*
* As update_and_free_page() can be called under any context, so we cannot
* use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
@@ -1628,6 +1632,8 @@ static inline void flush_free_hpage_work(struct hstate *h)
{
if (hugetlb_vmemmap_optimizable(h))
flush_work(&free_hpage_work);
+ if (!hstate_is_gigantic(h))
+ flush_work(&dissolve_hpage_work);
}
static void update_and_free_page(struct hstate *h, struct page *page,
@@ -1679,7 +1685,7 @@ void free_huge_page(struct page *page)
struct hstate *h = page_hstate(page);
int nid = page_to_nid(page);
struct hugepage_subpool *spool = hugetlb_page_subpool(page);
- bool restore_reserve;
+ bool restore_reserve, dissolve;
unsigned long flags;
VM_BUG_ON_PAGE(page_count(page), page);
@@ -1691,6 +1697,8 @@ void free_huge_page(struct page *page)
page->mapping = NULL;
restore_reserve = HPageRestoreReserve(page);
ClearHPageRestoreReserve(page);
+ dissolve = HPageDissolve(page);
+ ClearHPageDissolve(page);
/*
* If HPageRestoreReserve was set on page, page allocation consumed a
@@ -1729,6 +1737,11 @@ void free_huge_page(struct page *page)
remove_hugetlb_page(h, page, true);
spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_page(h, page, true);
+ } else if (dissolve) {
+ spin_unlock_irqrestore(&hugetlb_lock, flags);
+ if (llist_add((struct llist_node *)&page->mapping,
+ &hpage_dissolvelist))
+ schedule_work(&dissolve_hpage_work);
} else {
arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page);
@@ -2771,6 +2784,49 @@ static void replace_hugepage(struct hstate *h, int nid, struct page *old_page,
enqueue_huge_page(h, new_page);
}
+static void dissolve_hpage_workfn(struct work_struct *work)
+{
+ struct llist_node *node;
+
+ node = llist_del_all(&hpage_dissolvelist);
+
+ while (node) {
+ struct page *oldpage, *newpage;
+ struct hstate *h;
+ int nid;
+
+ oldpage = container_of((struct address_space **)node,
+ struct page, mapping);
+ node = node->next;
+ oldpage->mapping = NULL;
+
+ h = page_hstate(oldpage);
+ nid = page_to_nid(oldpage);
+
+ newpage = alloc_replacement_page(h, nid);
+
+ spin_lock_irq(&hugetlb_lock);
+ /* finish freeing oldpage */
+ arch_clear_hugepage_flags(oldpage);
+ enqueue_huge_page(h, oldpage);
+ if (IS_ERR(newpage)) {
+ /* cannot dissolve so just leave free */
+ spin_unlock_irq(&hugetlb_lock);
+ goto next;
+ }
+
+ replace_hugepage(h, nid, oldpage, newpage);
+
+ /*
+ * Pages have been replaced, we can safely free the old one.
+ */
+ spin_unlock_irq(&hugetlb_lock);
+ __update_and_free_page(h, oldpage);
+next:
+ cond_resched();
+ }
+}
+
/*
* alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
* @h: struct hstate old page belongs to
@@ -2803,6 +2859,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
*/
spin_unlock_irq(&hugetlb_lock);
ret = isolate_hugetlb(old_page, list);
+ SetHPageDissolve(old_page);
spin_lock_irq(&hugetlb_lock);
goto free_new;
} else if (!HPageFreed(old_page)) {
@@ -2864,14 +2921,21 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
if (hstate_is_gigantic(h))
return -ENOMEM;
- if (page_count(head) && !isolate_hugetlb(head, list))
+ if (page_count(head) && !isolate_hugetlb(head, list)) {
+ SetHPageDissolve(head);
ret = 0;
- else if (!page_count(head))
+ } else if (!page_count(head)) {
ret = alloc_and_dissolve_huge_page(h, head, list);
-
+ }
return ret;
}
+void sync_hugetlb_dissolve(void)
+{
+ flush_work(&free_hpage_work);
+ flush_work(&dissolve_hpage_work);
+}
+
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
diff --git a/mm/migrate.c b/mm/migrate.c
index 6a1597c92261..b6c6123e614c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -141,6 +141,7 @@ void putback_movable_pages(struct list_head *l)
list_for_each_entry_safe(page, page2, l, lru) {
if (unlikely(PageHuge(page))) {
+ ClearHPageDissolve(page);
putback_active_hugepage(page);
continue;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5486d47406e..6bf76bbc0308 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -9235,6 +9235,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret && ret != -EBUSY)
goto done;
ret = 0;
+ sync_hugetlb_dissolve();
/*
* Pages from [start, end) are within a pageblock_nr_pages
--
2.25.1
next prev parent reply other threads:[~2022-09-13 19:54 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-13 19:54 [PATCH 00/21] mm: introduce Designated Movable Blocks Doug Berger
2022-09-13 19:54 ` [PATCH 01/21] mm/page_isolation: protect cma from isolate_single_pageblock Doug Berger
2022-09-14 0:02 ` Zi Yan
[not found] ` <36E322BF-F052-4A8B-9FA5-4E0AA84E4AAF-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2022-09-14 0:59 ` Doug Berger
[not found] ` <ff84d89b-6d4f-c739-63be-4c4825b1fd03-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 1:09 ` Zi Yan
2022-09-14 1:47 ` Doug Berger
[not found] ` <6b465dd1-f5b1-cb6c-cee0-5461b66f6031-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 1:53 ` Zi Yan
[not found] ` <819CB6CD-0112-4B07-BDFE-84611470070F-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2022-09-14 17:27 ` Doug Berger
[not found] ` <20220913195508.3511038-2-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-16 3:40 ` kernel test robot
2022-09-13 19:54 ` [PATCH 02/21] mm/hugetlb: correct max_huge_pages accounting on demote Doug Berger
2022-09-14 17:23 ` Mike Kravetz
2022-09-14 17:26 ` Florian Fainelli
[not found] ` <c4d40567-6ac6-d822-7098-a5ad1ae567ec-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 18:43 ` Mike Kravetz
2022-09-14 17:30 ` Doug Berger
2022-09-14 20:58 ` Andrew Morton
2022-09-14 21:11 ` Mike Kravetz
2022-09-13 19:54 ` [PATCH 11/21] mm/page_alloc: introduce init_reserved_pageblock() Doug Berger
2022-09-13 19:55 ` [PATCH 13/21] mm/dmb: Introduce Designated Movable Blocks Doug Berger
[not found] ` <20220913195508.3511038-1-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-13 19:54 ` [PATCH 03/21] mm/hugetlb: correct demote page offset logic Doug Berger
[not found] ` <20220913195508.3511038-4-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-13 23:34 ` Matthew Wilcox
2022-09-14 1:07 ` Doug Berger
[not found] ` <33ff9543-3396-7609-3865-7eed20b853f5-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 17:08 ` Mike Kravetz
2022-09-15 1:40 ` Muchun Song
2022-09-13 19:54 ` [PATCH 04/21] mm/hugetlb: refactor alloc_and_dissolve_huge_page Doug Berger
2022-09-13 19:54 ` Doug Berger [this message]
2022-09-13 19:54 ` [PATCH 06/21] mm/hugetlb: add hugepage isolation support Doug Berger
2022-09-13 19:54 ` [PATCH 07/21] lib/show_mem.c: display MovableOnly Doug Berger
2022-09-13 19:54 ` [PATCH 08/21] mm/vmstat: show start_pfn when zone spans pages Doug Berger
2022-09-13 19:54 ` [PATCH 09/21] mm/page_alloc: calculate node_spanned_pages from pfns Doug Berger
2022-09-13 19:54 ` [PATCH 10/21] mm/page_alloc.c: allow oversized movablecore Doug Berger
2022-09-13 19:54 ` [PATCH 12/21] memblock: introduce MEMBLOCK_MOVABLE flag Doug Berger
2022-09-13 19:55 ` [PATCH 14/21] mm/page_alloc: make alloc_contig_pages DMB aware Doug Berger
2022-09-13 19:55 ` [PATCH 15/21] mm/page_alloc: allow base for movablecore Doug Berger
2022-09-13 19:55 ` [PATCH 16/21] dt-bindings: reserved-memory: introduce designated-movable-block Doug Berger
[not found] ` <20220913195508.3511038-17-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 14:55 ` Rob Herring
2022-09-14 17:13 ` Doug Berger
[not found] ` <57f19774-39a1-03a6-fe68-83d7e4b16521-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-18 10:31 ` Krzysztof Kozlowski
2022-09-18 23:12 ` Doug Berger
2022-09-19 11:03 ` Krzysztof Kozlowski
2022-09-21 0:14 ` Doug Berger
[not found] ` <92a2cf9f-c371-fb7d-11ff-90cdc09dcae6-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-21 6:35 ` Krzysztof Kozlowski
2022-09-18 10:28 ` Krzysztof Kozlowski
[not found] ` <be75776d-c55a-ddce-81f3-aeacc2f29592-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>
2022-09-18 22:41 ` Doug Berger
2022-09-13 19:55 ` [PATCH 17/21] mm/dmb: introduce rmem designated-movable-block Doug Berger
2022-09-13 19:55 ` [PATCH 18/21] mm/cma: support CMA in Designated Movable Blocks Doug Berger
2022-09-14 17:07 ` kernel test robot
[not found] ` <20220913195508.3511038-19-opendmb-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 17:58 ` kernel test robot
2022-09-14 22:03 ` kernel test robot
2022-09-13 19:55 ` [PATCH 19/21] dt-bindings: reserved-memory: shared-dma-pool: support DMB Doug Berger
2022-09-13 19:55 ` [PATCH 20/21] mm/cma: introduce rmem shared-dmb-pool Doug Berger
2022-09-14 13:21 ` [PATCH 00/21] mm: introduce Designated Movable Blocks Rob Herring
[not found] ` <CAL_JsqLmJcLHViPaBPvkBhR4xi+ZQuAJQpXoiJLVRW9EH4EX0Q-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2022-09-14 16:57 ` Doug Berger
[not found] ` <1825234b-f17a-dea4-38f6-ba5881ab9a3d-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-14 18:07 ` Rob Herring
2022-09-13 19:55 ` [PATCH 21/21] mm/hugetlb: introduce hugetlb_dmb Doug Berger
2022-09-19 9:00 ` [PATCH 00/21] mm: introduce Designated Movable Blocks David Hildenbrand
[not found] ` <b610a7b3-d740-8d45-c270-4c638deb1cfa-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2022-09-20 1:03 ` Doug Berger
[not found] ` <02561695-df44-4df6-c486-1431bf152650-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2022-09-23 11:19 ` Mike Rapoport
[not found] ` <Yy2WUypD5qVmqB0k-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2022-09-23 22:10 ` Doug Berger
2022-09-29 9:00 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220913195508.3511038-6-opendmb@gmail.com \
--to=opendmb-re5jqeeqqe8avxtiumwx3w@public.gmane.org \
--cc=akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org \
--cc=bp-l3A5Bk7waGM@public.gmane.org \
--cc=corbet-T1hC0tSOHrs@public.gmane.org \
--cc=damien.lemoal-yzvPICuk2AC/Fx7ZUtofftBPR1lH4CV8@public.gmane.org \
--cc=david-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
--cc=f.fainelli-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org \
--cc=frowand.list-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org \
--cc=hch-jcswGhMUV9g@public.gmane.org \
--cc=krzysztof.kozlowski+dt-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org \
--cc=m.szyprowski-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org \
--cc=mike.kravetz-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org \
--cc=osalvador@su \
--cc=paulmck-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
--cc=quic_neeraju-jfJNa2p1gH1BDgjK7y7TUQ@public.gmane.org \
--cc=rdunlap-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org \
--cc=robh+dt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
--cc=robin.murphy-5wv7dgnIgG8@public.gmane.org \
--cc=rppt-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
--cc=songmuchun-EC8Uxl6Npydl57MIdRCFDg@public.gmane.org \
--cc=ziy-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).