* [PATCH 0/5] Convert various functions in memory.c to use folios
@ 2023-01-16 19:18 Matthew Wilcox (Oracle)
2023-01-16 19:18 ` [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio() Matthew Wilcox (Oracle)
` (5 more replies)
0 siblings, 6 replies; 14+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-01-16 19:18 UTC (permalink / raw)
To: linux-mm, Andrew Morton; +Cc: Matthew Wilcox (Oracle)
Patches 2 & 3 depend on patch 1, and patch 5 depends on patch 4, but I
think 4+5 are independent of 1-3. tested by running xfstests, so not
really MM-specific, but it does boot! Applies to next-20230116.
Matthew Wilcox (Oracle) (5):
mm: Add vma_alloc_zeroed_movable_folio()
mm: Convert do_anonymous_page() to use a folio
mm: Convert wp_page_copy() to use folios
mm: Use a folio in copy_pte_range()
mm: Use a folio in copy_present_pte()
arch/alpha/include/asm/page.h | 5 +-
arch/arm64/include/asm/page.h | 4 +-
arch/arm64/mm/fault.c | 4 +-
arch/ia64/include/asm/page.h | 14 ++-
arch/m68k/include/asm/page_no.h | 5 +-
arch/s390/include/asm/page.h | 5 +-
arch/x86/include/asm/page.h | 5 +-
include/linux/highmem.h | 33 ++++---
mm/memory.c | 159 ++++++++++++++++----------------
9 files changed, 115 insertions(+), 119 deletions(-)
--
2.35.1
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio()
2023-01-16 19:18 [PATCH 0/5] Convert various functions in memory.c to use folios Matthew Wilcox (Oracle)
@ 2023-01-16 19:18 ` Matthew Wilcox (Oracle)
2023-01-19 10:16 ` Mike Rapoport
2023-01-19 18:22 ` Zi Yan
2023-01-16 19:18 ` [PATCH 2/5] mm: Convert do_anonymous_page() to use a folio Matthew Wilcox (Oracle)
` (4 subsequent siblings)
5 siblings, 2 replies; 14+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-01-16 19:18 UTC (permalink / raw)
To: linux-mm, Andrew Morton; +Cc: Matthew Wilcox (Oracle)
Replace alloc_zeroed_user_highpage_movable(). The main difference is
returning a folio containing a single page instead of returning the
page, but take the opportunity to rename the function to match other
allocation functions a little better and rewrite the documentation
to place more emphasis on the zeroing rather than the highmem aspect.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
arch/alpha/include/asm/page.h | 5 ++---
arch/arm64/include/asm/page.h | 4 ++--
arch/arm64/mm/fault.c | 4 ++--
arch/ia64/include/asm/page.h | 14 ++++++--------
arch/m68k/include/asm/page_no.h | 5 ++---
arch/s390/include/asm/page.h | 5 ++---
arch/x86/include/asm/page.h | 5 ++---
include/linux/highmem.h | 33 ++++++++++++++++-----------------
mm/memory.c | 16 ++++++++++------
9 files changed, 44 insertions(+), 47 deletions(-)
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index 8f3f5eecba28..bc5256fba8f0 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -17,9 +17,8 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 993a27ea6f54..2312e6ee595f 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -29,9 +29,9 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE
-struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr);
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
void tag_clear_highpage(struct page *to);
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 596f46dabe4e..f4cb0f85ccf4 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -925,7 +925,7 @@ NOKPROBE_SYMBOL(do_debug_exception);
/*
* Used during anonymous page fault handling.
*/
-struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr)
{
gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
@@ -938,7 +938,7 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
if (vma->vm_flags & VM_MTE)
flags |= __GFP_ZEROTAGS;
- return alloc_page_vma(flags, vma, vaddr);
+ return vma_alloc_folio(flags, 0, vma, vaddr, false);
}
void tag_clear_highpage(struct page *page)
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 1b990466d540..ba0b365cf2b2 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -82,17 +82,15 @@ do { \
} while (0)
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
({ \
- struct page *page = alloc_page_vma( \
- GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
- if (page) \
- flush_dcache_page(page); \
- page; \
+ struct folio *folio = vma_alloc_folio( \
+ GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
+ if (folio) \
+ flush_dcache_folio(folio); \
+ folio; \
})
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
-
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index c9d0d84158a4..abd2c3aeb015 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -13,9 +13,8 @@ extern unsigned long memory_end;
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 61dea67bb9c7..8a2a3b5d1e29 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -73,9 +73,8 @@ static inline void copy_page(void *to, void *from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
/*
* These are used to make use of C type-checking..
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 9cc82f305f4b..d18e5c332cb9 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -34,9 +34,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
copy_page(to, from);
}
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
#ifndef __pa
#define __pa(x) __phys_addr((unsigned long)(x))
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 56703082f803..9fa462561e05 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -208,31 +208,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
}
#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#ifndef vma_alloc_zeroed_movable_folio
/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
*
- * Returns: The allocated and zeroed HIGHMEM page
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address. It may be allocated from highmem or
+ * the movable zone. An architecture may provide its own implementation.
*
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
- * implementation.
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
*/
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
+ struct folio *folio;
- if (page)
- clear_user_highpage(page, vaddr);
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+ if (folio)
+ clear_user_highpage(&folio->page, vaddr);
- return page;
+ return folio;
}
#endif
diff --git a/mm/memory.c b/mm/memory.c
index f7613650efd4..4cb2cd809b18 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3056,10 +3056,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
goto oom;
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
- new_page = alloc_zeroed_user_highpage_movable(vma,
- vmf->address);
- if (!new_page)
+ struct folio *new_folio;
+
+ new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+ if (!new_folio)
goto oom;
+ new_page = &new_folio->page;
} else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address);
@@ -3998,6 +4000,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
+ struct folio *folio;
vm_fault_t ret = 0;
pte_t entry;
@@ -4047,11 +4050,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
- if (!page)
+ folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+ if (!folio)
goto oom;
- if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+ page = &folio->page;
+ if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
cgroup_throttle_swaprate(page, GFP_KERNEL);
--
2.35.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 2/5] mm: Convert do_anonymous_page() to use a folio
2023-01-16 19:18 [PATCH 0/5] Convert various functions in memory.c to use folios Matthew Wilcox (Oracle)
2023-01-16 19:18 ` [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio() Matthew Wilcox (Oracle)
@ 2023-01-16 19:18 ` Matthew Wilcox (Oracle)
2023-01-19 18:23 ` Zi Yan
2023-01-16 19:18 ` [PATCH 3/5] mm: Convert wp_page_copy() to use folios Matthew Wilcox (Oracle)
` (3 subsequent siblings)
5 siblings, 1 reply; 14+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-01-16 19:18 UTC (permalink / raw)
To: linux-mm, Andrew Morton; +Cc: Matthew Wilcox (Oracle)
Removes six calls to compound_head(); some inline and some external.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/memory.c | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 4cb2cd809b18..6099c786b5f9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3999,7 +3999,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *page;
struct folio *folio;
vm_fault_t ret = 0;
pte_t entry;
@@ -4054,19 +4053,18 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (!folio)
goto oom;
- page = &folio->page;
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
- cgroup_throttle_swaprate(page, GFP_KERNEL);
+ cgroup_throttle_swaprate(&folio->page, GFP_KERNEL);
/*
- * The memory barrier inside __SetPageUptodate makes sure that
+ * The memory barrier inside __folio_mark_uptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
- __SetPageUptodate(page);
+ __folio_mark_uptodate(folio);
- entry = mk_pte(page, vma->vm_page_prot);
+ entry = mk_pte(&folio->page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
@@ -4085,13 +4083,13 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
- put_page(page);
+ folio_put(folio);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, vmf->address);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_add_lru_vma(folio, vma);
setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
@@ -4101,10 +4099,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
release:
- put_page(page);
+ folio_put(folio);
goto unlock;
oom_free_page:
- put_page(page);
+ folio_put(folio);
oom:
return VM_FAULT_OOM;
}
--
2.35.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 3/5] mm: Convert wp_page_copy() to use folios
2023-01-16 19:18 [PATCH 0/5] Convert various functions in memory.c to use folios Matthew Wilcox (Oracle)
2023-01-16 19:18 ` [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio() Matthew Wilcox (Oracle)
2023-01-16 19:18 ` [PATCH 2/5] mm: Convert do_anonymous_page() to use a folio Matthew Wilcox (Oracle)
@ 2023-01-16 19:18 ` Matthew Wilcox (Oracle)
2023-01-19 18:29 ` Zi Yan
2023-01-16 19:18 ` [PATCH 4/5] mm: Use a folio in copy_pte_range() Matthew Wilcox (Oracle)
` (2 subsequent siblings)
5 siblings, 1 reply; 14+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-01-16 19:18 UTC (permalink / raw)
To: linux-mm, Andrew Morton; +Cc: Matthew Wilcox (Oracle)
Use new_folio instead of new_page throughout, because we allocated it
and know it's an order-0 folio. Most old_page uses become old_folio,
but use vmf->page where we need the precise page.
---
mm/memory.c | 65 ++++++++++++++++++++++++++---------------------------
1 file changed, 32 insertions(+), 33 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 6099c786b5f9..dc8a6fd45958 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3043,8 +3043,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
- struct page *old_page = vmf->page;
- struct page *new_page = NULL;
+ struct folio *old_folio = NULL;
+ struct folio *new_folio = NULL;
pte_t entry;
int page_copied = 0;
struct mmu_notifier_range range;
@@ -3052,23 +3052,22 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
delayacct_wpcopy_start();
+ if (vmf->page)
+ old_folio = page_folio(vmf->page);
if (unlikely(anon_vma_prepare(vma)))
goto oom;
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
- struct folio *new_folio;
-
new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
if (!new_folio)
goto oom;
- new_page = &new_folio->page;
} else {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
- vmf->address);
- if (!new_page)
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+ vmf->address, false);
+ if (!new_folio)
goto oom;
- ret = __wp_page_copy_user(new_page, old_page, vmf);
+ ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
if (ret) {
/*
* COW failed, if the fault was solved by other,
@@ -3077,21 +3076,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* from the second attempt.
* The -EHWPOISON case will not be retried.
*/
- put_page(new_page);
- if (old_page)
- put_page(old_page);
+ folio_put(new_folio);
+ if (old_folio)
+ folio_put(old_folio);
delayacct_wpcopy_end();
return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
}
- kmsan_copy_page_meta(new_page, old_page);
+ kmsan_copy_page_meta(&new_folio->page, vmf->page);
}
- if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
+ if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
goto oom_free_new;
- cgroup_throttle_swaprate(new_page, GFP_KERNEL);
+ cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
- __SetPageUptodate(new_page);
+ __folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
vmf->address & PAGE_MASK,
@@ -3103,16 +3102,16 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
*/
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
- if (old_page) {
- if (!PageAnon(old_page)) {
- dec_mm_counter(mm, mm_counter_file(old_page));
+ if (old_folio) {
+ if (!folio_test_anon(old_folio)) {
+ dec_mm_counter(mm, mm_counter_file(&old_folio->page));
inc_mm_counter(mm, MM_ANONPAGES);
}
} else {
inc_mm_counter(mm, MM_ANONPAGES);
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
- entry = mk_pte(new_page, vma->vm_page_prot);
+ entry = mk_pte(&new_folio->page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (unlikely(unshare)) {
if (pte_soft_dirty(vmf->orig_pte))
@@ -3131,8 +3130,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* some TLBs while the old PTE remains in others.
*/
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
- page_add_new_anon_rmap(new_page, vma, vmf->address);
- lru_cache_add_inactive_or_unevictable(new_page, vma);
+ folio_add_new_anon_rmap(new_folio, vma, vmf->address);
+ folio_add_lru_vma(new_folio, vma);
/*
* We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the
@@ -3141,7 +3140,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
BUG_ON(unshare && pte_write(entry));
set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
update_mmu_cache(vma, vmf->address, vmf->pte);
- if (old_page) {
+ if (old_folio) {
/*
* Only after switching the pte to the new page may
* we remove the mapcount here. Otherwise another
@@ -3164,18 +3163,18 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
- page_remove_rmap(old_page, vma, false);
+ page_remove_rmap(vmf->page, vma, false);
}
/* Free the old page.. */
- new_page = old_page;
+ new_folio = old_folio;
page_copied = 1;
} else {
update_mmu_tlb(vma, vmf->address, vmf->pte);
}
- if (new_page)
- put_page(new_page);
+ if (new_folio)
+ folio_put(new_folio);
pte_unmap_unlock(vmf->pte, vmf->ptl);
/*
@@ -3183,19 +3182,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* the above ptep_clear_flush_notify() did already call it.
*/
mmu_notifier_invalidate_range_only_end(&range);
- if (old_page) {
+ if (old_folio) {
if (page_copied)
- free_swap_cache(old_page);
- put_page(old_page);
+ free_swap_cache(&old_folio->page);
+ folio_put(old_folio);
}
delayacct_wpcopy_end();
return 0;
oom_free_new:
- put_page(new_page);
+ folio_put(new_folio);
oom:
- if (old_page)
- put_page(old_page);
+ if (old_folio)
+ folio_put(old_folio);
delayacct_wpcopy_end();
return VM_FAULT_OOM;
--
2.35.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 4/5] mm: Use a folio in copy_pte_range()
2023-01-16 19:18 [PATCH 0/5] Convert various functions in memory.c to use folios Matthew Wilcox (Oracle)
` (2 preceding siblings ...)
2023-01-16 19:18 ` [PATCH 3/5] mm: Convert wp_page_copy() to use folios Matthew Wilcox (Oracle)
@ 2023-01-16 19:18 ` Matthew Wilcox (Oracle)
2023-01-19 18:32 ` Zi Yan
2023-01-16 19:18 ` [PATCH 5/5] mm: Use a folio in copy_present_pte() Matthew Wilcox (Oracle)
2023-01-21 9:22 ` [PATCH 0/5] Convert various functions in memory.c to use folios Mike Rapoport
5 siblings, 1 reply; 14+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-01-16 19:18 UTC (permalink / raw)
To: linux-mm, Andrew Morton; +Cc: Matthew Wilcox (Oracle)
Allocate an order-0 folio instead of a page and pass it all the way
down the call chain. Removes dozens of calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/memory.c | 51 +++++++++++++++++++++++++--------------------------
1 file changed, 25 insertions(+), 26 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index dc8a6fd45958..7aa741a3cd9f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -863,13 +863,13 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
static inline int
copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
- struct page **prealloc, struct page *page)
+ struct folio **prealloc, struct page *page)
{
- struct page *new_page;
+ struct folio *new_folio;
pte_t pte;
- new_page = *prealloc;
- if (!new_page)
+ new_folio = *prealloc;
+ if (!new_folio)
return -EAGAIN;
/*
@@ -877,14 +877,14 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
* over and copy the page & arm it.
*/
*prealloc = NULL;
- copy_user_highpage(new_page, page, addr, src_vma);
- __SetPageUptodate(new_page);
- page_add_new_anon_rmap(new_page, dst_vma, addr);
- lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
- rss[mm_counter(new_page)]++;
+ copy_user_highpage(&new_folio->page, page, addr, src_vma);
+ __folio_mark_uptodate(new_folio);
+ folio_add_new_anon_rmap(new_folio, dst_vma, addr);
+ folio_add_lru_vma(new_folio, dst_vma);
+ rss[MM_ANONPAGES]++;
/* All done, just insert the new page copy in the child */
- pte = mk_pte(new_page, dst_vma->vm_page_prot);
+ pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
if (userfaultfd_pte_wp(dst_vma, *src_pte))
/* Uffd-wp needs to be delivered to dest pte as well */
@@ -900,7 +900,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
static inline int
copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
- struct page **prealloc)
+ struct folio **prealloc)
{
struct mm_struct *src_mm = src_vma->vm_mm;
unsigned long vm_flags = src_vma->vm_flags;
@@ -922,11 +922,11 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
addr, rss, prealloc, page);
}
- rss[mm_counter(page)]++;
+ rss[MM_ANONPAGES]++;
} else if (page) {
get_page(page);
page_dup_file_rmap(page, false);
- rss[mm_counter(page)]++;
+ rss[mm_counter_file(page)]++;
}
/*
@@ -954,23 +954,22 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
return 0;
}
-static inline struct page *
-page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
- unsigned long addr)
+static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
+ struct vm_area_struct *vma, unsigned long addr)
{
- struct page *new_page;
+ struct folio *new_folio;
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
- if (!new_page)
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ if (!new_folio)
return NULL;
- if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) {
- put_page(new_page);
+ if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
+ folio_put(new_folio);
return NULL;
}
- cgroup_throttle_swaprate(new_page, GFP_KERNEL);
+ cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
- return new_page;
+ return new_folio;
}
static int
@@ -986,7 +985,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
int progress, ret = 0;
int rss[NR_MM_COUNTERS];
swp_entry_t entry = (swp_entry_t){0};
- struct page *prealloc = NULL;
+ struct folio *prealloc = NULL;
again:
progress = 0;
@@ -1056,7 +1055,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
* will allocate page according to address). This
* could only happen if one pinned pte changed.
*/
- put_page(prealloc);
+ folio_put(prealloc);
prealloc = NULL;
}
progress += 8;
@@ -1093,7 +1092,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
goto again;
out:
if (unlikely(prealloc))
- put_page(prealloc);
+ folio_put(prealloc);
return ret;
}
--
2.35.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 5/5] mm: Use a folio in copy_present_pte()
2023-01-16 19:18 [PATCH 0/5] Convert various functions in memory.c to use folios Matthew Wilcox (Oracle)
` (3 preceding siblings ...)
2023-01-16 19:18 ` [PATCH 4/5] mm: Use a folio in copy_pte_range() Matthew Wilcox (Oracle)
@ 2023-01-16 19:18 ` Matthew Wilcox (Oracle)
2023-01-19 18:33 ` Zi Yan
2023-01-21 9:22 ` [PATCH 0/5] Convert various functions in memory.c to use folios Mike Rapoport
5 siblings, 1 reply; 14+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-01-16 19:18 UTC (permalink / raw)
To: linux-mm, Andrew Morton; +Cc: Matthew Wilcox (Oracle)
We still have to keep the page around because we need to know which
page in the folio we're copying, but we can replace five implict calls
to compound_head() with one.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/memory.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 7aa741a3cd9f..ec833a2e0601 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -906,25 +906,28 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
unsigned long vm_flags = src_vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
+ struct folio *folio;
page = vm_normal_page(src_vma, addr, pte);
- if (page && PageAnon(page)) {
+ if (page)
+ folio = page_folio(page);
+ if (page && folio_test_anon(folio)) {
/*
* If this page may have been pinned by the parent process,
* copy the page immediately for the child so that we'll always
* guarantee the pinned page won't be randomly replaced in the
* future.
*/
- get_page(page);
+ folio_get(folio);
if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
- /* Page maybe pinned, we have to copy. */
- put_page(page);
+ /* Page may be pinned, we have to copy. */
+ folio_put(folio);
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
addr, rss, prealloc, page);
}
rss[MM_ANONPAGES]++;
} else if (page) {
- get_page(page);
+ folio_get(folio);
page_dup_file_rmap(page, false);
rss[mm_counter_file(page)]++;
}
@@ -937,7 +940,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
- VM_BUG_ON(page && PageAnon(page) && PageAnonExclusive(page));
+ VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
/*
* If it's a shared mapping, mark it clean in
--
2.35.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio()
2023-01-16 19:18 ` [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio() Matthew Wilcox (Oracle)
@ 2023-01-19 10:16 ` Mike Rapoport
2023-01-19 13:57 ` Matthew Wilcox
2023-01-19 18:22 ` Zi Yan
1 sibling, 1 reply; 14+ messages in thread
From: Mike Rapoport @ 2023-01-19 10:16 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm, Andrew Morton
On Mon, Jan 16, 2023 at 07:18:09PM +0000, Matthew Wilcox (Oracle) wrote:
> Replace alloc_zeroed_user_highpage_movable(). The main difference is
> returning a folio containing a single page instead of returning the
> page, but take the opportunity to rename the function to match other
> allocation functions a little better and rewrite the documentation
> to place more emphasis on the zeroing rather than the highmem aspect.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> arch/alpha/include/asm/page.h | 5 ++---
> arch/arm64/include/asm/page.h | 4 ++--
> arch/arm64/mm/fault.c | 4 ++--
> arch/ia64/include/asm/page.h | 14 ++++++--------
> arch/m68k/include/asm/page_no.h | 5 ++---
> arch/s390/include/asm/page.h | 5 ++---
> arch/x86/include/asm/page.h | 5 ++---
> include/linux/highmem.h | 33 ++++++++++++++++-----------------
> mm/memory.c | 16 ++++++++++------
> 9 files changed, 44 insertions(+), 47 deletions(-)
...
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 56703082f803..9fa462561e05 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -208,31 +208,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
> }
> #endif
>
> -#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
> +#ifndef vma_alloc_zeroed_movable_folio
> /**
> - * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
> - * @vma: The VMA the page is to be allocated for
> - * @vaddr: The virtual address the page will be inserted into
> + * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
> + * @vma: The VMA the page is to be allocated for.
> + * @vaddr: The virtual address the page will be inserted into.
> *
> - * Returns: The allocated and zeroed HIGHMEM page
> + * This function will allocate a page suitable for inserting into this
> + * VMA at this virtual address. It may be allocated from highmem or
> + * the movable zone. An architecture may provide its own implementation.
> *
> - * This function will allocate a page for a VMA that the caller knows will
> - * be able to migrate in the future using move_pages() or reclaimed
> - *
> - * An architecture may override this function by defining
> - * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
> - * implementation.
> + * Return: A folio containing one allocated and zeroed page or NULL if
> + * we are out of memory.
> */
> -static inline struct page *
> -alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
> +static inline
> +struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
> unsigned long vaddr)
> {
> - struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
> + struct folio *folio;
>
> - if (page)
> - clear_user_highpage(page, vaddr);
> + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
Add __GFP_ZERO and simply return vma_alloc_folio(...)?
> + if (folio)
> + clear_user_highpage(&folio->page, vaddr);
>
> - return page;
> + return folio;
> }
> #endif
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio()
2023-01-19 10:16 ` Mike Rapoport
@ 2023-01-19 13:57 ` Matthew Wilcox
0 siblings, 0 replies; 14+ messages in thread
From: Matthew Wilcox @ 2023-01-19 13:57 UTC (permalink / raw)
To: Mike Rapoport; +Cc: linux-mm, Andrew Morton
On Thu, Jan 19, 2023 at 12:16:12PM +0200, Mike Rapoport wrote:
> > +static inline
> > +struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
> > unsigned long vaddr)
> > {
> > - struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
> > + struct folio *folio;
> >
> > - if (page)
> > - clear_user_highpage(page, vaddr);
> > + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
>
> Add __GFP_ZERO and simply return vma_alloc_folio(...)?
>
> > + if (folio)
> > + clear_user_highpage(&folio->page, vaddr);
This page is about to be mapped into userspace; the kernel isn't going
to touch the data in it. So we don't care if the kernel's view of this
page contains zeroes, only that the userspace view of this page contains
zeroes. The architectures that override this do exactly what you
suggest, but they know they have a physical cache and so that works for
them. For virtually indexed caches, this is more efficient.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio()
2023-01-16 19:18 ` [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio() Matthew Wilcox (Oracle)
2023-01-19 10:16 ` Mike Rapoport
@ 2023-01-19 18:22 ` Zi Yan
1 sibling, 0 replies; 14+ messages in thread
From: Zi Yan @ 2023-01-19 18:22 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm, Andrew Morton
[-- Attachment #1: Type: text/plain, Size: 1068 bytes --]
On 16 Jan 2023, at 14:18, Matthew Wilcox (Oracle) wrote:
> Replace alloc_zeroed_user_highpage_movable(). The main difference is
> returning a folio containing a single page instead of returning the
> page, but take the opportunity to rename the function to match other
> allocation functions a little better and rewrite the documentation
> to place more emphasis on the zeroing rather than the highmem aspect.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> arch/alpha/include/asm/page.h | 5 ++---
> arch/arm64/include/asm/page.h | 4 ++--
> arch/arm64/mm/fault.c | 4 ++--
> arch/ia64/include/asm/page.h | 14 ++++++--------
> arch/m68k/include/asm/page_no.h | 5 ++---
> arch/s390/include/asm/page.h | 5 ++---
> arch/x86/include/asm/page.h | 5 ++---
> include/linux/highmem.h | 33 ++++++++++++++++-----------------
> mm/memory.c | 16 ++++++++++------
> 9 files changed, 44 insertions(+), 47 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
--
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 2/5] mm: Convert do_anonymous_page() to use a folio
2023-01-16 19:18 ` [PATCH 2/5] mm: Convert do_anonymous_page() to use a folio Matthew Wilcox (Oracle)
@ 2023-01-19 18:23 ` Zi Yan
0 siblings, 0 replies; 14+ messages in thread
From: Zi Yan @ 2023-01-19 18:23 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm, Andrew Morton
[-- Attachment #1: Type: text/plain, Size: 364 bytes --]
On 16 Jan 2023, at 14:18, Matthew Wilcox (Oracle) wrote:
> Removes six calls to compound_head(); some inline and some external.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> mm/memory.c | 20 +++++++++-----------
> 1 file changed, 9 insertions(+), 11 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
--
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 3/5] mm: Convert wp_page_copy() to use folios
2023-01-16 19:18 ` [PATCH 3/5] mm: Convert wp_page_copy() to use folios Matthew Wilcox (Oracle)
@ 2023-01-19 18:29 ` Zi Yan
0 siblings, 0 replies; 14+ messages in thread
From: Zi Yan @ 2023-01-19 18:29 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm, Andrew Morton
[-- Attachment #1: Type: text/plain, Size: 458 bytes --]
On 16 Jan 2023, at 14:18, Matthew Wilcox (Oracle) wrote:
> Use new_folio instead of new_page throughout, because we allocated it
> and know it's an order-0 folio. Most old_page uses become old_folio,
> but use vmf->page where we need the precise page.
> ---
> mm/memory.c | 65 ++++++++++++++++++++++++++---------------------------
> 1 file changed, 32 insertions(+), 33 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
--
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 4/5] mm: Use a folio in copy_pte_range()
2023-01-16 19:18 ` [PATCH 4/5] mm: Use a folio in copy_pte_range() Matthew Wilcox (Oracle)
@ 2023-01-19 18:32 ` Zi Yan
0 siblings, 0 replies; 14+ messages in thread
From: Zi Yan @ 2023-01-19 18:32 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm, Andrew Morton
[-- Attachment #1: Type: text/plain, Size: 463 bytes --]
On 16 Jan 2023, at 14:18, Matthew Wilcox (Oracle) wrote:
> Allocate an order-0 folio instead of a page and pass it all the way
> down the call chain. Removes dozens of calls to compound_head().
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> mm/memory.c | 51 +++++++++++++++++++++++++--------------------------
> 1 file changed, 25 insertions(+), 26 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
--
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 5/5] mm: Use a folio in copy_present_pte()
2023-01-16 19:18 ` [PATCH 5/5] mm: Use a folio in copy_present_pte() Matthew Wilcox (Oracle)
@ 2023-01-19 18:33 ` Zi Yan
0 siblings, 0 replies; 14+ messages in thread
From: Zi Yan @ 2023-01-19 18:33 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm, Andrew Morton
[-- Attachment #1: Type: text/plain, Size: 461 bytes --]
On 16 Jan 2023, at 14:18, Matthew Wilcox (Oracle) wrote:
> We still have to keep the page around because we need to know which
> page in the folio we're copying, but we can replace five implict calls
> to compound_head() with one.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> mm/memory.c | 15 +++++++++------
> 1 file changed, 9 insertions(+), 6 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
--
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 0/5] Convert various functions in memory.c to use folios
2023-01-16 19:18 [PATCH 0/5] Convert various functions in memory.c to use folios Matthew Wilcox (Oracle)
` (4 preceding siblings ...)
2023-01-16 19:18 ` [PATCH 5/5] mm: Use a folio in copy_present_pte() Matthew Wilcox (Oracle)
@ 2023-01-21 9:22 ` Mike Rapoport
5 siblings, 0 replies; 14+ messages in thread
From: Mike Rapoport @ 2023-01-21 9:22 UTC (permalink / raw)
To: Matthew Wilcox (Oracle); +Cc: linux-mm, Andrew Morton
On Mon, Jan 16, 2023 at 07:18:08PM +0000, Matthew Wilcox (Oracle) wrote:
> Patches 2 & 3 depend on patch 1, and patch 5 depends on patch 4, but I
> think 4+5 are independent of 1-3. tested by running xfstests, so not
> really MM-specific, but it does boot! Applies to next-20230116.
>
> Matthew Wilcox (Oracle) (5):
> mm: Add vma_alloc_zeroed_movable_folio()
> mm: Convert do_anonymous_page() to use a folio
> mm: Convert wp_page_copy() to use folios
> mm: Use a folio in copy_pte_range()
> mm: Use a folio in copy_present_pte()
>
> arch/alpha/include/asm/page.h | 5 +-
> arch/arm64/include/asm/page.h | 4 +-
> arch/arm64/mm/fault.c | 4 +-
> arch/ia64/include/asm/page.h | 14 ++-
> arch/m68k/include/asm/page_no.h | 5 +-
> arch/s390/include/asm/page.h | 5 +-
> arch/x86/include/asm/page.h | 5 +-
> include/linux/highmem.h | 33 ++++---
> mm/memory.c | 159 ++++++++++++++++----------------
> 9 files changed, 115 insertions(+), 119 deletions(-)
For the series
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
> --
> 2.35.1
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2023-01-21 9:23 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-01-16 19:18 [PATCH 0/5] Convert various functions in memory.c to use folios Matthew Wilcox (Oracle)
2023-01-16 19:18 ` [PATCH 1/5] mm: Add vma_alloc_zeroed_movable_folio() Matthew Wilcox (Oracle)
2023-01-19 10:16 ` Mike Rapoport
2023-01-19 13:57 ` Matthew Wilcox
2023-01-19 18:22 ` Zi Yan
2023-01-16 19:18 ` [PATCH 2/5] mm: Convert do_anonymous_page() to use a folio Matthew Wilcox (Oracle)
2023-01-19 18:23 ` Zi Yan
2023-01-16 19:18 ` [PATCH 3/5] mm: Convert wp_page_copy() to use folios Matthew Wilcox (Oracle)
2023-01-19 18:29 ` Zi Yan
2023-01-16 19:18 ` [PATCH 4/5] mm: Use a folio in copy_pte_range() Matthew Wilcox (Oracle)
2023-01-19 18:32 ` Zi Yan
2023-01-16 19:18 ` [PATCH 5/5] mm: Use a folio in copy_present_pte() Matthew Wilcox (Oracle)
2023-01-19 18:33 ` Zi Yan
2023-01-21 9:22 ` [PATCH 0/5] Convert various functions in memory.c to use folios Mike Rapoport
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).