* [PATCH v2 1/5] mm: memory_hotplug: remove head variable in do_migrate_range()
2024-08-16 9:04 [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
@ 2024-08-16 9:04 ` Kefeng Wang
2024-08-16 9:04 ` [PATCH v2 2/5] mm: memory-failure: add unmap_posioned_folio() Kefeng Wang
` (4 subsequent siblings)
5 siblings, 0 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-16 9:04 UTC (permalink / raw)
To: Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Miaohe Lin, Naoya Horiguchi,
linux-mm, Kefeng Wang
Directly use a folio for HugeTLB and THP when calculate the next pfn,
then remove unused head variable.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/memory_hotplug.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index df291f2e509d..7c674cec3c18 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1773,7 +1773,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
- struct page *page, *head;
+ struct page *page;
LIST_HEAD(source);
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -1786,14 +1786,20 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
continue;
page = pfn_to_page(pfn);
folio = page_folio(page);
- head = &folio->page;
- if (PageHuge(page)) {
- pfn = page_to_pfn(head) + compound_nr(head) - 1;
- isolate_hugetlb(folio, &source);
- continue;
- } else if (PageTransHuge(page))
- pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
+ /*
+ * The folio hold no reference or lock, it might be changed
+ * concurrently(eg, split), folio_nr_pages() may read garbage,
+ * but out loop could handle that as it revisits the split
+ * folio later.
+ */
+ if (folio_test_large(folio)) {
+ pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
+ if (folio_test_hugetlb(folio)) {
+ isolate_hugetlb(folio);
+ continue;
+ }
+ }
/*
* HWPoison pages have elevated reference counts so the migration would
--
2.27.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v2 2/5] mm: memory-failure: add unmap_posioned_folio()
2024-08-16 9:04 [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
2024-08-16 9:04 ` [PATCH v2 1/5] mm: memory_hotplug: remove head variable in do_migrate_range() Kefeng Wang
@ 2024-08-16 9:04 ` Kefeng Wang
2024-08-16 9:04 ` [PATCH v2 3/5] mm: memory_hotplug: check hwpoisoned page firstly in do_migrate_range() Kefeng Wang
` (3 subsequent siblings)
5 siblings, 0 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-16 9:04 UTC (permalink / raw)
To: Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Miaohe Lin, Naoya Horiguchi,
linux-mm, Kefeng Wang
Add unmap_posioned_folio() helper which will be reused by
do_migrate_range() from memory hotplug soon.
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/internal.h | 9 +++++++++
mm/memory-failure.c | 43 ++++++++++++++++++++++++++-----------------
2 files changed, 35 insertions(+), 17 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index adbf8c88c9df..5b80c65f82b6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1054,6 +1054,8 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
/*
* mm/memory-failure.c
*/
+#ifdef CONFIG_MEMORY_FAILURE
+int unmap_posioned_folio(struct folio *folio, enum ttu_flags ttu);
void shake_folio(struct folio *folio);
extern int hwpoison_filter(struct page *p);
@@ -1074,6 +1076,13 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
unsigned long ksm_addr);
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+#else
+static inline int unmap_posioned_folio(struct folio *folio, enum ttu_flags ttu)
+{
+ return 0;
+}
+#endif
+
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 353254537b54..93848330de1f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1554,6 +1554,30 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
return ret;
}
+int unmap_posioned_folio(struct folio *folio, enum ttu_flags ttu)
+{
+ if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
+ struct address_space *mapping;
+ /*
+ * For hugetlb pages in shared mappings, try_to_unmap
+ * could potentially call huge_pmd_unshare. Because of
+ * this, take semaphore in write mode here and set
+ * TTU_RMAP_LOCKED to indicate we have taken the lock
+ * at this higher level.
+ */
+ mapping = hugetlb_folio_mapping_lock_write(folio);
+ if (!mapping)
+ return -EAGAIN;
+
+ try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
+ i_mmap_unlock_write(mapping);
+ } else {
+ try_to_unmap(folio, ttu);
+ }
+
+ return 0;
+}
+
/*
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
@@ -1615,23 +1639,8 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
*/
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
- if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
- /*
- * For hugetlb pages in shared mappings, try_to_unmap
- * could potentially call huge_pmd_unshare. Because of
- * this, take semaphore in write mode here and set
- * TTU_RMAP_LOCKED to indicate we have taken the lock
- * at this higher level.
- */
- mapping = hugetlb_folio_mapping_lock_write(folio);
- if (mapping) {
- try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
- i_mmap_unlock_write(mapping);
- } else
- pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
- } else {
- try_to_unmap(folio, ttu);
- }
+ if (unmap_posioned_folio(folio, ttu))
+ pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
unmap_success = !folio_mapped(folio);
if (!unmap_success)
--
2.27.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v2 3/5] mm: memory_hotplug: check hwpoisoned page firstly in do_migrate_range()
2024-08-16 9:04 [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
2024-08-16 9:04 ` [PATCH v2 1/5] mm: memory_hotplug: remove head variable in do_migrate_range() Kefeng Wang
2024-08-16 9:04 ` [PATCH v2 2/5] mm: memory-failure: add unmap_posioned_folio() Kefeng Wang
@ 2024-08-16 9:04 ` Kefeng Wang
2024-08-16 9:04 ` [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list() Kefeng Wang
` (2 subsequent siblings)
5 siblings, 0 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-16 9:04 UTC (permalink / raw)
To: Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Miaohe Lin, Naoya Horiguchi,
linux-mm, Kefeng Wang
The commit b15c87263a69 ("hwpoison, memory_hotplug: allow hwpoisoned
pages to be offlined") don't handle the hugetlb pages, the endless
loop still occur if offline a hwpoison hugetlb, luckly, with the
commit e591ef7d96d6 ("mm,hwpoison,hugetlb,memory_hotplug: hotremove
memory section with hwpoisoned hugepage") section with hwpoisoned
hugepage"), the HPageMigratable of hugetlb page will be clear, and
the hwpoison hugetlb page will be skipped in scan_movable_pages(),
so the endless loop issue is fixed.
However if the HPageMigratable() check passed(without reference and
lock), the hugetlb page may be hwpoisoned, it won't cause issue since
the hwpoisoned page will be handled correctly in the next movable
pages scan loop, and it will be isolated in do_migrate_range() but
fails to migrate. In order to avoid the unnecessary isolation and
unify all hwpoisoned page handling, let's unconditionally check hwpoison
firstly, and if it is a hwpoisoned hugetlb page, try to unmap it as
the catch all safety net like normal page does.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/memory_hotplug.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7c674cec3c18..02a0d4fbc3fe 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1793,13 +1793,8 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* but out loop could handle that as it revisits the split
* folio later.
*/
- if (folio_test_large(folio)) {
+ if (folio_test_large(folio))
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
- if (folio_test_hugetlb(folio)) {
- isolate_hugetlb(folio);
- continue;
- }
- }
/*
* HWPoison pages have elevated reference counts so the migration would
@@ -1808,11 +1803,17 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
* the unmap as the catch all safety net).
*/
- if (PageHWPoison(page)) {
+ if (folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
if (WARN_ON(folio_test_lru(folio)))
folio_isolate_lru(folio);
if (folio_mapped(folio))
- try_to_unmap(folio, TTU_IGNORE_MLOCK);
+ unmap_posioned_folio(folio, TTU_IGNORE_MLOCK);
+ continue;
+ }
+
+ if (folio_test_hugetlb(folio)) {
+ isolate_hugetlb(folio, &source);
continue;
}
--
2.27.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-16 9:04 [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
` (2 preceding siblings ...)
2024-08-16 9:04 ` [PATCH v2 3/5] mm: memory_hotplug: check hwpoisoned page firstly in do_migrate_range() Kefeng Wang
@ 2024-08-16 9:04 ` Kefeng Wang
2024-08-16 9:04 ` [PATCH v2 5/5] mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation Kefeng Wang
2024-08-17 8:43 ` [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
5 siblings, 0 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-16 9:04 UTC (permalink / raw)
To: Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Miaohe Lin, Naoya Horiguchi,
linux-mm, Kefeng Wang
Add isolate_folio_to_list() helper to try to isolate HugeTLB,
no-LRU movable and LRU folios to a list, which will be reused by
do_migrate_range() from memory hotplug soon, also drop the
mf_isolate_folio() since we could directly use new helper in
the soft_offline_in_use_page().
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
include/linux/migrate.h | 3 +++
mm/memory-failure.c | 46 ++++++++++-------------------------------
mm/migrate.c | 27 ++++++++++++++++++++++++
3 files changed, 41 insertions(+), 35 deletions(-)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 644be30b69c8..002e49b2ebd9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 93848330de1f..d8298017bd99 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2659,40 +2659,6 @@ EXPORT_SYMBOL(unpoison_memory);
#undef pr_fmt
#define pr_fmt(fmt) "Soft offline: " fmt
-static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
-{
- bool isolated = false;
-
- if (folio_test_hugetlb(folio)) {
- isolated = isolate_hugetlb(folio, pagelist);
- } else {
- bool lru = !__folio_test_movable(folio);
-
- if (lru)
- isolated = folio_isolate_lru(folio);
- else
- isolated = isolate_movable_page(&folio->page,
- ISOLATE_UNEVICTABLE);
-
- if (isolated) {
- list_add(&folio->lru, pagelist);
- if (lru)
- node_stat_add_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio));
- }
- }
-
- /*
- * If we succeed to isolate the folio, we grabbed another refcount on
- * the folio, so we can safely drop the one we got from get_any_page().
- * If we failed to isolate the folio, it means that we cannot go further
- * and we will return an error, so drop the reference we got from
- * get_any_page() as well.
- */
- folio_put(folio);
- return isolated;
-}
-
/*
* soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
@@ -2744,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page)
return 0;
}
- if (mf_isolate_folio(folio, &pagelist)) {
+ if (isolate_folio_to_list(folio, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
@@ -2766,6 +2732,16 @@ static int soft_offline_in_use_page(struct page *page)
pfn, msg_page[huge], page_count(page), &page->flags);
ret = -EBUSY;
}
+
+ /*
+ * If we succeed to isolate the folio, we grabbed another refcount on
+ * the folio, so we can safely drop the one we got from get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go further
+ * and we will return an error, so drop the reference we got from
+ * get_any_page() as well.
+ */
+ folio_put(folio);
+
return ret;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 6e32098ac2dc..7b7b5b16e610 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l)
}
}
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ bool isolated = false;
+
+ if (folio_test_hugetlb(folio)) {
+ isolated = isolate_hugetlb(folio, list);
+ } else {
+ bool lru = !__folio_test_movable(folio);
+
+ if (lru)
+ isolated = folio_isolate_lru(folio);
+ else
+ isolated = isolate_movable_page(&folio->page,
+ ISOLATE_UNEVICTABLE);
+
+ if (isolated) {
+ list_add(&folio->lru, list);
+ if (lru)
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
+ }
+ }
+
+ return isolated;
+}
+
static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
struct folio *folio,
unsigned long idx)
--
2.27.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v2 5/5] mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation
2024-08-16 9:04 [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
` (3 preceding siblings ...)
2024-08-16 9:04 ` [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list() Kefeng Wang
@ 2024-08-16 9:04 ` Kefeng Wang
2024-08-17 8:43 ` [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
5 siblings, 0 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-16 9:04 UTC (permalink / raw)
To: Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Miaohe Lin, Naoya Horiguchi,
linux-mm, Kefeng Wang
Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU
folio isolation, which cleanup code a bit and save a few calls
to compound_head().
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/memory_hotplug.c | 45 +++++++++++++++++----------------------------
1 file changed, 17 insertions(+), 28 deletions(-)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 02a0d4fbc3fe..cc9c16db2f8c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1773,14 +1773,14 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
- struct page *page;
LIST_HEAD(source);
+ struct folio *folio;
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
- struct folio *folio;
- bool isolated;
+ struct page *page;
+ bool huge;
if (!pfn_valid(pfn))
continue;
@@ -1812,34 +1812,22 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
continue;
}
- if (folio_test_hugetlb(folio)) {
- isolate_hugetlb(folio, &source);
- continue;
+ huge = folio_test_hugetlb(folio);
+ if (!huge) {
+ folio = folio_get_nontail_page(page);
+ if (!folio)
+ continue;
}
- if (!get_page_unless_zero(page))
- continue;
- /*
- * We can skip free pages. And we can deal with pages on
- * LRU and non-lru movable pages.
- */
- if (PageLRU(page))
- isolated = isolate_lru_page(page);
- else
- isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
- if (isolated) {
- list_add_tail(&page->lru, &source);
- if (!__PageMovable(page))
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
-
- } else {
+ if (!isolate_folio_to_list(folio, &source)) {
if (__ratelimit(&migrate_rs)) {
pr_warn("failed to isolate pfn %lx\n", pfn);
dump_page(page, "isolation failed");
}
}
- put_page(page);
+
+ if (!huge)
+ folio_put(folio);
}
if (!list_empty(&source)) {
nodemask_t nmask = node_states[N_MEMORY];
@@ -1854,7 +1842,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We have checked that migration range is on a single zone so
* we can use the nid of the first page to all the others.
*/
- mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
+ mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
/*
* try to allocate from a different node but reuse this node
@@ -1867,11 +1855,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
ret = migrate_pages(&source, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
if (ret) {
- list_for_each_entry(page, &source, lru) {
+ list_for_each_entry(folio, &source, lru) {
if (__ratelimit(&migrate_rs)) {
pr_warn("migrating pfn %lx failed ret:%d\n",
- page_to_pfn(page), ret);
- dump_page(page, "migration failure");
+ folio_pfn(folio), ret);
+ dump_page(&folio->page,
+ "migration failure");
}
}
putback_movable_pages(&source);
--
2.27.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range()
2024-08-16 9:04 [PATCH v2 0/5] mm: memory_hotplug: improve do_migrate_range() Kefeng Wang
` (4 preceding siblings ...)
2024-08-16 9:04 ` [PATCH v2 5/5] mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation Kefeng Wang
@ 2024-08-17 8:43 ` Kefeng Wang
5 siblings, 0 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-17 8:43 UTC (permalink / raw)
To: Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Miaohe Lin, Naoya Horiguchi,
linux-mm
Sorry for the noise, please ignore this one, will resend.
On 2024/8/16 17:04, Kefeng Wang wrote:
> Unify hwpoisoned page handling and isolation of HugeTLB/LRU/non-LRU
> movable page, also convert to use folios in do_migrate_range().
>
> v2:
> - address comments from David(eg, fix HWPoison check/use a folio
> for pfn calculation firstly)
> - fix lkp build errors for isolate_folio_to_list()
> - drop unnecessary comments and don't grab one more ref for hugetlb
>
> Kefeng Wang (5):
> mm: memory_hotplug: remove head variable in do_migrate_range()
> mm: memory-failure: add unmap_posioned_folio()
> mm: memory_hotplug: check hwpoisoned page firstly in
> do_migrate_range()
> mm: migrate: add isolate_folio_to_list()
> mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation
>
> include/linux/migrate.h | 3 ++
> mm/internal.h | 9 +++++
> mm/memory-failure.c | 89 +++++++++++++++++------------------------
> mm/memory_hotplug.c | 62 ++++++++++++++--------------
> mm/migrate.c | 27 +++++++++++++
> 5 files changed, 105 insertions(+), 85 deletions(-)
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-17 8:49 [PATCH resend " Kefeng Wang
@ 2024-08-17 8:49 ` Kefeng Wang
2024-08-20 9:32 ` Miaohe Lin
2024-08-26 14:50 ` David Hildenbrand
0 siblings, 2 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-17 8:49 UTC (permalink / raw)
To: Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Miaohe Lin, Naoya Horiguchi,
linux-mm, Kefeng Wang
Add isolate_folio_to_list() helper to try to isolate HugeTLB,
no-LRU movable and LRU folios to a list, which will be reused by
do_migrate_range() from memory hotplug soon, also drop the
mf_isolate_folio() since we could directly use new helper in
the soft_offline_in_use_page().
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
include/linux/migrate.h | 3 +++
mm/memory-failure.c | 46 ++++++++++-------------------------------
mm/migrate.c | 27 ++++++++++++++++++++++++
3 files changed, 41 insertions(+), 35 deletions(-)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 644be30b69c8..002e49b2ebd9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 93848330de1f..d8298017bd99 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2659,40 +2659,6 @@ EXPORT_SYMBOL(unpoison_memory);
#undef pr_fmt
#define pr_fmt(fmt) "Soft offline: " fmt
-static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
-{
- bool isolated = false;
-
- if (folio_test_hugetlb(folio)) {
- isolated = isolate_hugetlb(folio, pagelist);
- } else {
- bool lru = !__folio_test_movable(folio);
-
- if (lru)
- isolated = folio_isolate_lru(folio);
- else
- isolated = isolate_movable_page(&folio->page,
- ISOLATE_UNEVICTABLE);
-
- if (isolated) {
- list_add(&folio->lru, pagelist);
- if (lru)
- node_stat_add_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio));
- }
- }
-
- /*
- * If we succeed to isolate the folio, we grabbed another refcount on
- * the folio, so we can safely drop the one we got from get_any_page().
- * If we failed to isolate the folio, it means that we cannot go further
- * and we will return an error, so drop the reference we got from
- * get_any_page() as well.
- */
- folio_put(folio);
- return isolated;
-}
-
/*
* soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
@@ -2744,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page)
return 0;
}
- if (mf_isolate_folio(folio, &pagelist)) {
+ if (isolate_folio_to_list(folio, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
@@ -2766,6 +2732,16 @@ static int soft_offline_in_use_page(struct page *page)
pfn, msg_page[huge], page_count(page), &page->flags);
ret = -EBUSY;
}
+
+ /*
+ * If we succeed to isolate the folio, we grabbed another refcount on
+ * the folio, so we can safely drop the one we got from get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go further
+ * and we will return an error, so drop the reference we got from
+ * get_any_page() as well.
+ */
+ folio_put(folio);
+
return ret;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index dbfa910ec24b..53f8429a8ebe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l)
}
}
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ bool isolated = false;
+
+ if (folio_test_hugetlb(folio)) {
+ isolated = isolate_hugetlb(folio, list);
+ } else {
+ bool lru = !__folio_test_movable(folio);
+
+ if (lru)
+ isolated = folio_isolate_lru(folio);
+ else
+ isolated = isolate_movable_page(&folio->page,
+ ISOLATE_UNEVICTABLE);
+
+ if (isolated) {
+ list_add(&folio->lru, list);
+ if (lru)
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
+ }
+ }
+
+ return isolated;
+}
+
static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
struct folio *folio,
unsigned long idx)
--
2.27.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-17 8:49 ` [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list() Kefeng Wang
@ 2024-08-20 9:32 ` Miaohe Lin
2024-08-20 9:46 ` Kefeng Wang
2024-08-26 14:50 ` David Hildenbrand
1 sibling, 1 reply; 15+ messages in thread
From: Miaohe Lin @ 2024-08-20 9:32 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Naoya Horiguchi, linux-mm
On 2024/8/17 16:49, Kefeng Wang wrote:
> Add isolate_folio_to_list() helper to try to isolate HugeTLB,
> no-LRU movable and LRU folios to a list, which will be reused by
> do_migrate_range() from memory hotplug soon, also drop the
> mf_isolate_folio() since we could directly use new helper in
> the soft_offline_in_use_page().
>
> Acked-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Thanks for your patch.
> ---
> include/linux/migrate.h | 3 +++
> mm/memory-failure.c | 46 ++++++++++-------------------------------
> mm/migrate.c | 27 ++++++++++++++++++++++++
> 3 files changed, 41 insertions(+), 35 deletions(-)
>
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 644be30b69c8..002e49b2ebd9 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
> unsigned int *ret_succeeded);
> struct folio *alloc_migration_target(struct folio *src, unsigned long private);
> bool isolate_movable_page(struct page *page, isolate_mode_t mode);
> +bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
>
> int migrate_huge_page_move_mapping(struct address_space *mapping,
> struct folio *dst, struct folio *src);
> @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
> { return NULL; }
> static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> { return false; }
> +static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
> + { return false; }
>
> static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
> struct folio *dst, struct folio *src)
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 93848330de1f..d8298017bd99 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -2659,40 +2659,6 @@ EXPORT_SYMBOL(unpoison_memory);
> #undef pr_fmt
> #define pr_fmt(fmt) "Soft offline: " fmt
>
> -static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
> -{
> - bool isolated = false;
> -
> - if (folio_test_hugetlb(folio)) {
> - isolated = isolate_hugetlb(folio, pagelist);
> - } else {
> - bool lru = !__folio_test_movable(folio);
> -
> - if (lru)
> - isolated = folio_isolate_lru(folio);
> - else
> - isolated = isolate_movable_page(&folio->page,
> - ISOLATE_UNEVICTABLE);
> -
> - if (isolated) {
> - list_add(&folio->lru, pagelist);
> - if (lru)
> - node_stat_add_folio(folio, NR_ISOLATED_ANON +
> - folio_is_file_lru(folio));
> - }
> - }
> -
> - /*
> - * If we succeed to isolate the folio, we grabbed another refcount on
> - * the folio, so we can safely drop the one we got from get_any_page().
> - * If we failed to isolate the folio, it means that we cannot go further
> - * and we will return an error, so drop the reference we got from
> - * get_any_page() as well.
> - */
> - folio_put(folio);
> - return isolated;
> -}
> -
> /*
> * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
> * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
> @@ -2744,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page)
> return 0;
> }
>
> - if (mf_isolate_folio(folio, &pagelist)) {
> + if (isolate_folio_to_list(folio, &pagelist)) {
> ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
> if (!ret) {
> @@ -2766,6 +2732,16 @@ static int soft_offline_in_use_page(struct page *page)
> pfn, msg_page[huge], page_count(page), &page->flags);
> ret = -EBUSY;
> }
> +
> + /*
> + * If we succeed to isolate the folio, we grabbed another refcount on
> + * the folio, so we can safely drop the one we got from get_any_page().
> + * If we failed to isolate the folio, it means that we cannot go further
> + * and we will return an error, so drop the reference we got from
> + * get_any_page() as well.
> + */
> + folio_put(folio);
Why folio_put() is deferred here? With this change, folio will have extra two refcnt when
calling migrate_pages() above. One is from get_any_page() and another one from folio_isolate_lru().
This would lead to migrate_pages() never success. And my many testcases failed due to this
change.
Thanks.
.
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-20 9:32 ` Miaohe Lin
@ 2024-08-20 9:46 ` Kefeng Wang
2024-08-21 2:00 ` Miaohe Lin
0 siblings, 1 reply; 15+ messages in thread
From: Kefeng Wang @ 2024-08-20 9:46 UTC (permalink / raw)
To: Miaohe Lin, Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Naoya Horiguchi, linux-mm
On 2024/8/20 17:32, Miaohe Lin wrote:
> On 2024/8/17 16:49, Kefeng Wang wrote:
>> Add isolate_folio_to_list() helper to try to isolate HugeTLB,
>> no-LRU movable and LRU folios to a list, which will be reused by
>> do_migrate_range() from memory hotplug soon, also drop the
>> mf_isolate_folio() since we could directly use new helper in
>> the soft_offline_in_use_page().
>>
>> Acked-by: David Hildenbrand <david@redhat.com>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>
> Thanks for your patch.
>
>> ---
>> include/linux/migrate.h | 3 +++
>> mm/memory-failure.c | 46 ++++++++++-------------------------------
>> mm/migrate.c | 27 ++++++++++++++++++++++++
>> 3 files changed, 41 insertions(+), 35 deletions(-)
>>
>> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
>> index 644be30b69c8..002e49b2ebd9 100644
>> --- a/include/linux/migrate.h
>> +++ b/include/linux/migrate.h
>> @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
>> unsigned int *ret_succeeded);
>> struct folio *alloc_migration_target(struct folio *src, unsigned long private);
>> bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>> +bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
>>
>> int migrate_huge_page_move_mapping(struct address_space *mapping,
>> struct folio *dst, struct folio *src);
>> @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
>> { return NULL; }
>> static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>> { return false; }
>> +static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
>> + { return false; }
>>
>> static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
>> struct folio *dst, struct folio *src)
>> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
>> index 93848330de1f..d8298017bd99 100644
>> --- a/mm/memory-failure.c
>> +++ b/mm/memory-failure.c
>> @@ -2659,40 +2659,6 @@ EXPORT_SYMBOL(unpoison_memory);
>> #undef pr_fmt
>> #define pr_fmt(fmt) "Soft offline: " fmt
>>
>> -static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
>> -{
>> - bool isolated = false;
>> -
>> - if (folio_test_hugetlb(folio)) {
>> - isolated = isolate_hugetlb(folio, pagelist);
>> - } else {
>> - bool lru = !__folio_test_movable(folio);
>> -
>> - if (lru)
>> - isolated = folio_isolate_lru(folio);
>> - else
>> - isolated = isolate_movable_page(&folio->page,
>> - ISOLATE_UNEVICTABLE);
>> -
>> - if (isolated) {
>> - list_add(&folio->lru, pagelist);
>> - if (lru)
>> - node_stat_add_folio(folio, NR_ISOLATED_ANON +
>> - folio_is_file_lru(folio));
>> - }
>> - }
>> -
>> - /*
>> - * If we succeed to isolate the folio, we grabbed another refcount on
>> - * the folio, so we can safely drop the one we got from get_any_page().
>> - * If we failed to isolate the folio, it means that we cannot go further
>> - * and we will return an error, so drop the reference we got from
>> - * get_any_page() as well.
>> - */
>> - folio_put(folio);
>> - return isolated;
>> -}
>> -
>> /*
>> * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
>> * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
>> @@ -2744,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page)
>> return 0;
>> }
>>
>> - if (mf_isolate_folio(folio, &pagelist)) {
>> + if (isolate_folio_to_list(folio, &pagelist)) {
>> ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
>> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
>> if (!ret) {
>> @@ -2766,6 +2732,16 @@ static int soft_offline_in_use_page(struct page *page)
>> pfn, msg_page[huge], page_count(page), &page->flags);
>> ret = -EBUSY;
>> }
>> +
>> + /*
>> + * If we succeed to isolate the folio, we grabbed another refcount on
>> + * the folio, so we can safely drop the one we got from get_any_page().
>> + * If we failed to isolate the folio, it means that we cannot go further
>> + * and we will return an error, so drop the reference we got from
>> + * get_any_page() as well.
>> + */
>> + folio_put(folio);
>
> Why folio_put() is deferred here? With this change, folio will have extra two refcnt when
> calling migrate_pages() above. One is from get_any_page() and another one from folio_isolate_lru().
> This would lead to migrate_pages() never success. And my many testcases failed due to this
> change.
Thanks for your review, missing this, only test memory-hotplug, could
you try this,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d8298017bd99..64a145a0e29f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page)
struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
bool huge = folio_test_hugetlb(folio);
+ bool isolated;
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
@@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page
*page)
return 0;
}
- if (isolate_folio_to_list(folio, &pagelist)) {
+ isolated = isolate_folio_to_list(folio, &pagelist);
+
+ /*
+ * If we succeed to isolate the folio, we grabbed another
refcount on
+ * the folio, so we can safely drop the one we got from
get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go
further
+ * and we will return an error, so drop the reference we got from
+ * get_any_page() as well.
+ */
+ folio_put(folio);
+
+ if (isolated) {
ret = migrate_pages(&pagelist, alloc_migration_target,
NULL,
(unsigned long)&mtc, MIGRATE_SYNC,
MR_MEMORY_FAILURE, NULL);
if (!ret) {
@@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page
*page)
ret = -EBUSY;
}
- /*
- * If we succeed to isolate the folio, we grabbed another
refcount on
- * the folio, so we can safely drop the one we got from
get_any_page().
- * If we failed to isolate the folio, it means that we cannot go
further
- * and we will return an error, so drop the reference we got from
- * get_any_page() as well.
- */
- folio_put(folio);
-
return ret;
}
>
> Thanks.
> .
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-20 9:46 ` Kefeng Wang
@ 2024-08-21 2:00 ` Miaohe Lin
2024-08-21 2:14 ` Kefeng Wang
0 siblings, 1 reply; 15+ messages in thread
From: Miaohe Lin @ 2024-08-21 2:00 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Naoya Horiguchi, linux-mm
On 2024/8/20 17:46, Kefeng Wang wrote:
>
>
> On 2024/8/20 17:32, Miaohe Lin wrote:
>> On 2024/8/17 16:49, Kefeng Wang wrote:
>>> Add isolate_folio_to_list() helper to try to isolate HugeTLB,
>>> no-LRU movable and LRU folios to a list, which will be reused by
>>> do_migrate_range() from memory hotplug soon, also drop the
>>> mf_isolate_folio() since we could directly use new helper in
>>> the soft_offline_in_use_page().
>>>
>>> Acked-by: David Hildenbrand <david@redhat.com>
>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>>
>> Thanks for your patch.
>>>
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index d8298017bd99..64a145a0e29f 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page)
> struct folio *folio = page_folio(page);
> char const *msg_page[] = {"page", "hugepage"};
> bool huge = folio_test_hugetlb(folio);
> + bool isolated;
> LIST_HEAD(pagelist);
> struct migration_target_control mtc = {
> .nid = NUMA_NO_NODE,
> @@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page *page)
> return 0;
> }
>
> - if (isolate_folio_to_list(folio, &pagelist)) {
> + isolated = isolate_folio_to_list(folio, &pagelist);
> +
> + /*
> + * If we succeed to isolate the folio, we grabbed another refcount on
> + * the folio, so we can safely drop the one we got from get_any_page().
> + * If we failed to isolate the folio, it means that we cannot go further
> + * and we will return an error, so drop the reference we got from
> + * get_any_page() as well.
> + */
> + folio_put(folio);
> +
> + if (isolated) {
> ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
> if (!ret) {
> @@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page *page)
> ret = -EBUSY;
> }
>
> - /*
> - * If we succeed to isolate the folio, we grabbed another refcount on
> - * the folio, so we can safely drop the one we got from get_any_page().
> - * If we failed to isolate the folio, it means that we cannot go further
> - * and we will return an error, so drop the reference we got from
> - * get_any_page() as well.
> - */
> - folio_put(folio);
> -
> return ret;
> }
This works to me.
Thanks.
.
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-21 2:00 ` Miaohe Lin
@ 2024-08-21 2:14 ` Kefeng Wang
2024-08-22 6:56 ` Miaohe Lin
0 siblings, 1 reply; 15+ messages in thread
From: Kefeng Wang @ 2024-08-21 2:14 UTC (permalink / raw)
To: Miaohe Lin, Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Naoya Horiguchi, linux-mm
On 2024/8/21 10:00, Miaohe Lin wrote:
> On 2024/8/20 17:46, Kefeng Wang wrote:
>>
>>
>> On 2024/8/20 17:32, Miaohe Lin wrote:
>>> On 2024/8/17 16:49, Kefeng Wang wrote:
>>>> Add isolate_folio_to_list() helper to try to isolate HugeTLB,
>>>> no-LRU movable and LRU folios to a list, which will be reused by
>>>> do_migrate_range() from memory hotplug soon, also drop the
>>>> mf_isolate_folio() since we could directly use new helper in
>>>> the soft_offline_in_use_page().
>>>>
>>>> Acked-by: David Hildenbrand <david@redhat.com>
>>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>>>
>>> Thanks for your patch.
>>>>
>> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
>> index d8298017bd99..64a145a0e29f 100644
>> --- a/mm/memory-failure.c
>> +++ b/mm/memory-failure.c
>> @@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page)
>> struct folio *folio = page_folio(page);
>> char const *msg_page[] = {"page", "hugepage"};
>> bool huge = folio_test_hugetlb(folio);
>> + bool isolated;
>> LIST_HEAD(pagelist);
>> struct migration_target_control mtc = {
>> .nid = NUMA_NO_NODE,
>> @@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page *page)
>> return 0;
>> }
>>
>> - if (isolate_folio_to_list(folio, &pagelist)) {
>> + isolated = isolate_folio_to_list(folio, &pagelist);
>> +
>> + /*
>> + * If we succeed to isolate the folio, we grabbed another refcount on
>> + * the folio, so we can safely drop the one we got from get_any_page().
>> + * If we failed to isolate the folio, it means that we cannot go further
>> + * and we will return an error, so drop the reference we got from
>> + * get_any_page() as well.
>> + */
>> + folio_put(folio);
>> +
>> + if (isolated) {
>> ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
>> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
>> if (!ret) {
>> @@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page *page)
>> ret = -EBUSY;
>> }
>>
>> - /*
>> - * If we succeed to isolate the folio, we grabbed another refcount on
>> - * the folio, so we can safely drop the one we got from get_any_page().
>> - * If we failed to isolate the folio, it means that we cannot go further
>> - * and we will return an error, so drop the reference we got from
>> - * get_any_page() as well.
>> - */
>> - folio_put(folio);
>> -
>> return ret;
>> }
>
> This works to me.
Good, my bad for break.
Andrew,please help to squash above changes, thanks.
>
> Thanks.
> .
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-21 2:14 ` Kefeng Wang
@ 2024-08-22 6:56 ` Miaohe Lin
0 siblings, 0 replies; 15+ messages in thread
From: Miaohe Lin @ 2024-08-22 6:56 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton
Cc: David Hildenbrand, Oscar Salvador, Naoya Horiguchi, linux-mm
On 2024/8/21 10:14, Kefeng Wang wrote:
>
>
> On 2024/8/21 10:00, Miaohe Lin wrote:
>> On 2024/8/20 17:46, Kefeng Wang wrote:
>>>
>>>
>>> On 2024/8/20 17:32, Miaohe Lin wrote:
>>>> On 2024/8/17 16:49, Kefeng Wang wrote:
>>>>> Add isolate_folio_to_list() helper to try to isolate HugeTLB,
>>>>> no-LRU movable and LRU folios to a list, which will be reused by
>>>>> do_migrate_range() from memory hotplug soon, also drop the
>>>>> mf_isolate_folio() since we could directly use new helper in
>>>>> the soft_offline_in_use_page().
>>>>>
>>>>> Acked-by: David Hildenbrand <david@redhat.com>
>>>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>>>>
>>>> Thanks for your patch.
>>>>>
>>> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
>>> index d8298017bd99..64a145a0e29f 100644
>>> --- a/mm/memory-failure.c
>>> +++ b/mm/memory-failure.c
>>> @@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page)
>>> struct folio *folio = page_folio(page);
>>> char const *msg_page[] = {"page", "hugepage"};
>>> bool huge = folio_test_hugetlb(folio);
>>> + bool isolated;
>>> LIST_HEAD(pagelist);
>>> struct migration_target_control mtc = {
>>> .nid = NUMA_NO_NODE,
>>> @@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page *page)
>>> return 0;
>>> }
>>>
>>> - if (isolate_folio_to_list(folio, &pagelist)) {
>>> + isolated = isolate_folio_to_list(folio, &pagelist);
>>> +
>>> + /*
>>> + * If we succeed to isolate the folio, we grabbed another refcount on
>>> + * the folio, so we can safely drop the one we got from get_any_page().
>>> + * If we failed to isolate the folio, it means that we cannot go further
>>> + * and we will return an error, so drop the reference we got from
>>> + * get_any_page() as well.
>>> + */
>>> + folio_put(folio);
>>> +
>>> + if (isolated) {
>>> ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
>>> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
>>> if (!ret) {
>>> @@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page *page)
>>> ret = -EBUSY;
>>> }
>>>
>>> - /*
>>> - * If we succeed to isolate the folio, we grabbed another refcount on
>>> - * the folio, so we can safely drop the one we got from get_any_page().
>>> - * If we failed to isolate the folio, it means that we cannot go further
>>> - * and we will return an error, so drop the reference we got from
>>> - * get_any_page() as well.
>>> - */
>>> - folio_put(folio);
>>> -
>>> return ret;
>>> }
>>
>> This works to me.
>
> Good, my bad for break.
>
> Andrew,please help to squash above changes, thanks.
With above changes added, this patch looks good to me.
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Tested-by: Miaohe Lin <linmiaohe@huawei.com>
Thanks.
.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-17 8:49 ` [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list() Kefeng Wang
2024-08-20 9:32 ` Miaohe Lin
@ 2024-08-26 14:50 ` David Hildenbrand
2024-08-27 1:19 ` Kefeng Wang
1 sibling, 1 reply; 15+ messages in thread
From: David Hildenbrand @ 2024-08-26 14:50 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton
Cc: Oscar Salvador, Miaohe Lin, Naoya Horiguchi, linux-mm
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index dbfa910ec24b..53f8429a8ebe 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l)
> }
> }
>
> +/* Must be called with an elevated refcount on the non-hugetlb folio */
> +bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
> +{
> + bool isolated = false;
No need to initialize this to "false".
> +
> + if (folio_test_hugetlb(folio)) {
> + isolated = isolate_hugetlb(folio, list);
> + } else {
> + bool lru = !__folio_test_movable(folio);
> +
> + if (lru)
> + isolated = folio_isolate_lru(folio);
> + else
> + isolated = isolate_movable_page(&folio->page,
> + ISOLATE_UNEVICTABLE);
> +
> + if (isolated) {
> + list_add(&folio->lru, list);
> + if (lru)
> + node_stat_add_folio(folio, NR_ISOLATED_ANON +
> + folio_is_file_lru(folio));
> + }
> + }
> +
> + return isolated;
Revisiting this patch, we should likely do
bool isolated, lru;
if (folio_test_hugetlb(folio))
return isolate_hugetlb(folio, list);
lru = !__folio_test_movable(folio);
if (lru)
...
if (!isolated)
return false;
list_add(&folio->lru, list);
if (lru)
node_stat_add_folio(folio, NR_ISOLATED_ANON +
folio_is_file_lru(folio));
return true;
to avoid one indentation level and clean up the code flow a bit.
> +}
> +
> static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
> struct folio *folio,
> unsigned long idx)
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v2 4/5] mm: migrate: add isolate_folio_to_list()
2024-08-26 14:50 ` David Hildenbrand
@ 2024-08-27 1:19 ` Kefeng Wang
0 siblings, 0 replies; 15+ messages in thread
From: Kefeng Wang @ 2024-08-27 1:19 UTC (permalink / raw)
To: David Hildenbrand, Andrew Morton
Cc: Oscar Salvador, Miaohe Lin, Naoya Horiguchi, linux-mm
On 2024/8/26 22:50, David Hildenbrand wrote:
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index dbfa910ec24b..53f8429a8ebe 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l)
>> }
>> }
>> +/* Must be called with an elevated refcount on the non-hugetlb folio */
>> +bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
>> +{
>> + bool isolated = false;
>
> No need to initialize this to "false".
>
>> +
>> + if (folio_test_hugetlb(folio)) {
>> + isolated = isolate_hugetlb(folio, list);
>> + } else {
>> + bool lru = !__folio_test_movable(folio);
>> +
>> + if (lru)
>> + isolated = folio_isolate_lru(folio);
>> + else
>> + isolated = isolate_movable_page(&folio->page,
>> + ISOLATE_UNEVICTABLE);
>> +
>> + if (isolated) {
>> + list_add(&folio->lru, list);
>> + if (lru)
>> + node_stat_add_folio(folio, NR_ISOLATED_ANON +
>> + folio_is_file_lru(folio));
>> + }
>> + }
>> +
>> + return isolated;
>
> Revisiting this patch, we should likely do
>
> bool isolated, lru;
>
> if (folio_test_hugetlb(folio))
> return isolate_hugetlb(folio, list);
>
> lru = !__folio_test_movable(folio);
> if (lru)
> ...
>
> if (!isolated)
> return false;
>
> list_add(&folio->lru, list);
> if (lru)
> node_stat_add_folio(folio, NR_ISOLATED_ANON +
> folio_is_file_lru(folio));
> return true;
>
>
> to avoid one indentation level and clean up the code flow a bit.
Sure, will rewrite according to above pattern.
>
>> +}
>> +
>> static bool try_to_map_unused_to_zeropage(struct
>> page_vma_mapped_walk *pvmw,
>> struct folio *folio,
>> unsigned long idx)
>
^ permalink raw reply [flat|nested] 15+ messages in thread