linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] mm: convert to folio_alloc_mpol()
@ 2024-05-15  7:07 Kefeng Wang
  2024-05-15  7:07 ` [PATCH 1/4] mm: add folio_alloc_mpol() Kefeng Wang
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-05-15  7:07 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Hugh Dickins, linux-mm, Kefeng Wang

Kefeng Wang (4):
  mm: add folio_alloc_mpol()
  mm: mempolicy: use folio_alloc_mpol_noprof() in
    vma_alloc_folio_noprof()
  mm: mempolicy: use folio_alloc_mpol() in
    alloc_migration_target_by_mpol()
  mm: shmem: use folio_alloc_mpol() in shmem_alloc_folio()

 include/linux/gfp.h |  8 ++++++++
 mm/mempolicy.c      | 18 +++++++++++-------
 mm/shmem.c          | 32 +++++++++-----------------------
 3 files changed, 28 insertions(+), 30 deletions(-)

-- 
2.41.0



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/4] mm: add folio_alloc_mpol()
  2024-05-15  7:07 [PATCH 0/4] mm: convert to folio_alloc_mpol() Kefeng Wang
@ 2024-05-15  7:07 ` Kefeng Wang
  2024-05-15  7:07 ` [PATCH 2/4] mm: mempolicy: use folio_alloc_mpol_noprof() in vma_alloc_folio_noprof() Kefeng Wang
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-05-15  7:07 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Hugh Dickins, linux-mm, Kefeng Wang

This is to add a new folio_alloc_mpol() like folio_alloc() but allocate
folio according to NUMA mempolicy.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 include/linux/gfp.h | 8 ++++++++
 mm/mempolicy.c      | 7 +++++++
 2 files changed, 15 insertions(+)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7f9691d375f0..f53f76e0b17e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -303,6 +303,8 @@ struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
 struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
 		struct mempolicy *mpol, pgoff_t ilx, int nid);
 struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+		struct mempolicy *mpol, pgoff_t ilx, int nid);
 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
 		unsigned long addr, bool hugepage);
 #else
@@ -319,6 +321,11 @@ static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
 {
 	return __folio_alloc_node(gfp, order, numa_node_id());
 }
+static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+		struct mempolicy *mpol, pgoff_t ilx, int nid)
+{
+	return folio_alloc_noprof(gfp, order);
+}
 #define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage)		\
 	folio_alloc_noprof(gfp, order)
 #endif
@@ -326,6 +333,7 @@ static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
 #define alloc_pages(...)			alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
 #define alloc_pages_mpol(...)			alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__))
 #define folio_alloc(...)			alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
+#define folio_alloc_mpol(...)			alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
 #define vma_alloc_folio(...)			alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
 
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index aec756ae5637..69c431ef15d5 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2277,6 +2277,13 @@ struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
 	return page;
 }
 
+struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+		struct mempolicy *pol, pgoff_t ilx, int nid)
+{
+	return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP,
+							order, pol, ilx, nid));
+}
+
 /**
  * vma_alloc_folio - Allocate a folio for a VMA.
  * @gfp: GFP flags.
-- 
2.41.0



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/4] mm: mempolicy: use folio_alloc_mpol_noprof() in vma_alloc_folio_noprof()
  2024-05-15  7:07 [PATCH 0/4] mm: convert to folio_alloc_mpol() Kefeng Wang
  2024-05-15  7:07 ` [PATCH 1/4] mm: add folio_alloc_mpol() Kefeng Wang
@ 2024-05-15  7:07 ` Kefeng Wang
  2024-05-15  7:07 ` [PATCH 3/4] mm: mempolicy: use folio_alloc_mpol() in alloc_migration_target_by_mpol() Kefeng Wang
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-05-15  7:07 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Hugh Dickins, linux-mm, Kefeng Wang

Convert to use folio_alloc_mpol_noprof() to make vma_alloc_folio_noprof()
to use folio throughout.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/mempolicy.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 69c431ef15d5..205d129c6744 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2305,13 +2305,12 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct
 {
 	struct mempolicy *pol;
 	pgoff_t ilx;
-	struct page *page;
+	struct folio *folio;
 
 	pol = get_vma_policy(vma, addr, order, &ilx);
-	page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order,
-				       pol, ilx, numa_node_id());
+	folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
 	mpol_cond_put(pol);
-	return page_rmappable_folio(page);
+	return folio;
 }
 EXPORT_SYMBOL(vma_alloc_folio_noprof);
 
-- 
2.41.0



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/4] mm: mempolicy: use folio_alloc_mpol() in alloc_migration_target_by_mpol()
  2024-05-15  7:07 [PATCH 0/4] mm: convert to folio_alloc_mpol() Kefeng Wang
  2024-05-15  7:07 ` [PATCH 1/4] mm: add folio_alloc_mpol() Kefeng Wang
  2024-05-15  7:07 ` [PATCH 2/4] mm: mempolicy: use folio_alloc_mpol_noprof() in vma_alloc_folio_noprof() Kefeng Wang
@ 2024-05-15  7:07 ` Kefeng Wang
  2024-05-15  7:07 ` [PATCH 4/4] mm: shmem: use folio_alloc_mpol() in shmem_alloc_folio() Kefeng Wang
  2024-07-09 10:55 ` [PATCH] mm: swap_state: use folio_alloc_mpol() in __read_swap_cache_async() Kefeng Wang
  4 siblings, 0 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-05-15  7:07 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Hugh Dickins, linux-mm, Kefeng Wang

Convert to use folio_alloc_mpol() to make vma_alloc_folio_noprof()
to use folio throughout.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/mempolicy.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 205d129c6744..f73acb01ad45 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1211,7 +1211,6 @@ static struct folio *alloc_migration_target_by_mpol(struct folio *src,
 	struct migration_mpol *mmpol = (struct migration_mpol *)private;
 	struct mempolicy *pol = mmpol->pol;
 	pgoff_t ilx = mmpol->ilx;
-	struct page *page;
 	unsigned int order;
 	int nid = numa_node_id();
 	gfp_t gfp;
@@ -1235,8 +1234,7 @@ static struct folio *alloc_migration_target_by_mpol(struct folio *src,
 	else
 		gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
 
-	page = alloc_pages_mpol(gfp, order, pol, ilx, nid);
-	return page_rmappable_folio(page);
+	return folio_alloc_mpol(gfp, order, pol, ilx, nid);
 }
 #else
 
-- 
2.41.0



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/4] mm: shmem: use folio_alloc_mpol() in shmem_alloc_folio()
  2024-05-15  7:07 [PATCH 0/4] mm: convert to folio_alloc_mpol() Kefeng Wang
                   ` (2 preceding siblings ...)
  2024-05-15  7:07 ` [PATCH 3/4] mm: mempolicy: use folio_alloc_mpol() in alloc_migration_target_by_mpol() Kefeng Wang
@ 2024-05-15  7:07 ` Kefeng Wang
  2024-07-09 10:55 ` [PATCH] mm: swap_state: use folio_alloc_mpol() in __read_swap_cache_async() Kefeng Wang
  4 siblings, 0 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-05-15  7:07 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Hugh Dickins, linux-mm, Kefeng Wang

Let's shmem_alloc_folio() to take a order and use folio_alloc_mpol()
helper, then directly use it for normal or large folio to cleanup code.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/shmem.c | 32 +++++++++-----------------------
 1 file changed, 9 insertions(+), 23 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index f5d60436b604..c950d4b25338 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1603,32 +1603,18 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
 	return result;
 }
 
-static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
+static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
 		struct shmem_inode_info *info, pgoff_t index)
 {
 	struct mempolicy *mpol;
 	pgoff_t ilx;
-	struct page *page;
-
-	mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
-	page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id());
-	mpol_cond_put(mpol);
-
-	return page_rmappable_folio(page);
-}
-
-static struct folio *shmem_alloc_folio(gfp_t gfp,
-		struct shmem_inode_info *info, pgoff_t index)
-{
-	struct mempolicy *mpol;
-	pgoff_t ilx;
-	struct page *page;
+	struct folio *folio;
 
-	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
-	page = alloc_pages_mpol(gfp, 0, mpol, ilx, numa_node_id());
+	mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
+	folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
 	mpol_cond_put(mpol);
 
-	return (struct folio *)page;
+	return folio;
 }
 
 static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
@@ -1660,12 +1646,12 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
 				index + HPAGE_PMD_NR - 1, XA_PRESENT))
 			return ERR_PTR(-E2BIG);
 
-		folio = shmem_alloc_hugefolio(gfp, info, index);
+		folio = shmem_alloc_folio(gfp, HPAGE_PMD_ORDER, info, index);
 		if (!folio)
 			count_vm_event(THP_FILE_FALLBACK);
 	} else {
 		pages = 1;
-		folio = shmem_alloc_folio(gfp, info, index);
+		folio = shmem_alloc_folio(gfp, 0, info, index);
 	}
 	if (!folio)
 		return ERR_PTR(-ENOMEM);
@@ -1765,7 +1751,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
 	 */
 	gfp &= ~GFP_CONSTRAINT_MASK;
 	VM_BUG_ON_FOLIO(folio_test_large(old), old);
-	new = shmem_alloc_folio(gfp, info, index);
+	new = shmem_alloc_folio(gfp, 0, info, index);
 	if (!new)
 		return -ENOMEM;
 
@@ -2633,7 +2619,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
 
 	if (!*foliop) {
 		ret = -ENOMEM;
-		folio = shmem_alloc_folio(gfp, info, pgoff);
+		folio = shmem_alloc_folio(gfp, 0, info, pgoff);
 		if (!folio)
 			goto out_unacct_blocks;
 
-- 
2.41.0



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH] mm: swap_state: use folio_alloc_mpol() in __read_swap_cache_async()
  2024-05-15  7:07 [PATCH 0/4] mm: convert to folio_alloc_mpol() Kefeng Wang
                   ` (3 preceding siblings ...)
  2024-05-15  7:07 ` [PATCH 4/4] mm: shmem: use folio_alloc_mpol() in shmem_alloc_folio() Kefeng Wang
@ 2024-07-09 10:55 ` Kefeng Wang
  4 siblings, 0 replies; 6+ messages in thread
From: Kefeng Wang @ 2024-07-09 10:55 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-mm, Hugh Dickins, Kefeng Wang

Convert to use folio_alloc_mpol() helper() in __read_swap_cache_async().

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/swap_state.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/mm/swap_state.c b/mm/swap_state.c
index 994723cef821..a1726e49a5eb 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -470,8 +470,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
 		 * cause any racers to loop around until we add it to cache.
 		 */
-		folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
-						mpol, ilx, numa_node_id());
+		folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
 		if (!folio)
                         goto fail_put_swap;
 
-- 
2.27.0



^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2024-07-09 10:55 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-05-15  7:07 [PATCH 0/4] mm: convert to folio_alloc_mpol() Kefeng Wang
2024-05-15  7:07 ` [PATCH 1/4] mm: add folio_alloc_mpol() Kefeng Wang
2024-05-15  7:07 ` [PATCH 2/4] mm: mempolicy: use folio_alloc_mpol_noprof() in vma_alloc_folio_noprof() Kefeng Wang
2024-05-15  7:07 ` [PATCH 3/4] mm: mempolicy: use folio_alloc_mpol() in alloc_migration_target_by_mpol() Kefeng Wang
2024-05-15  7:07 ` [PATCH 4/4] mm: shmem: use folio_alloc_mpol() in shmem_alloc_folio() Kefeng Wang
2024-07-09 10:55 ` [PATCH] mm: swap_state: use folio_alloc_mpol() in __read_swap_cache_async() Kefeng Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).