linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] mm: use zonelist_zone() to get zone
@ 2024-07-09  3:19 Wei Yang
  2024-07-09 11:19 ` Garg, Shivank
  0 siblings, 1 reply; 7+ messages in thread
From: Wei Yang @ 2024-07-09  3:19 UTC (permalink / raw)
  To: akpm; +Cc: linux-mm, Wei Yang, Mike Rapoport, David Hildenbrand,
	Garg Shivank

Instead of accessing zoneref->zone directly, use zonelist_zone() like
other places for consistency.

No functional change.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
CC: Mike Rapoport (IBM) <rppt@kernel.org>
CC: David Hildenbrand <david@redhat.com>
CC: Garg Shivank <shivankg@amd.com>

---
v2: cover more usage
---
 include/linux/mmzone.h     |  4 ++--
 include/trace/events/oom.h |  2 +-
 mm/mempolicy.c             |  4 ++--
 mm/page_alloc.c            | 14 +++++++-------
 4 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cb7f265c2b96..51bce636373f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
 			zone = zonelist_zone(z))
 
 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
-	for (zone = z->zone;	\
+	for (zone = zonelist_zone(z);	\
 		zone;							\
 		z = next_zones_zonelist(++z, highidx, nodemask),	\
 			zone = zonelist_zone(z))
@@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
 	nid = first_node(*nodes);
 	zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
 	z = first_zones_zonelist(zonelist, ZONE_NORMAL,	nodes);
-	return (!z->zone) ? true : false;
+	return (!zonelist_zone(z)) ? true : false;
 }
 
 
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index a42be4c8563b..fe6997886b77 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -55,7 +55,7 @@ TRACE_EVENT(reclaim_retry_zone,
 	),
 
 	TP_fast_assign(
-		__entry->node = zone_to_nid(zoneref->zone);
+		__entry->node = zone_to_nid(zonelist_zone(zoneref));
 		__entry->zone_idx = zoneref->zone_idx;
 		__entry->order = order;
 		__entry->reclaimable = reclaimable;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f73acb01ad45..83e26ded6278 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void)
 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
 		z = first_zones_zonelist(zonelist, highest_zoneidx,
 							&policy->nodes);
-		return z->zone ? zone_to_nid(z->zone) : node;
+		return zonelist_zone(z) ? zone_to_nid(zonelist_zone(z)) : node;
 	}
 	case MPOL_LOCAL:
 		return node;
@@ -2806,7 +2806,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
 				node_zonelist(thisnid, GFP_HIGHUSER),
 				gfp_zone(GFP_HIGHUSER),
 				&pol->nodes);
-		polnid = zone_to_nid(z->zone);
+		polnid = zone_to_nid(zonelist_zone(z));
 		break;
 
 	default:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 116ee33fd1ce..e2933885bb19 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4218,7 +4218,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	 */
 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
 					ac->highest_zoneidx, ac->nodemask);
-	if (!ac->preferred_zoneref->zone)
+	if (!zonelist_zone(ac->preferred_zoneref))
 		goto nopage;
 
 	/*
@@ -4230,7 +4230,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		struct zoneref *z = first_zones_zonelist(ac->zonelist,
 					ac->highest_zoneidx,
 					&cpuset_current_mems_allowed);
-		if (!z->zone)
+		if (!zonelist_zone(z))
 			goto nopage;
 	}
 
@@ -4587,8 +4587,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 			continue;
 		}
 
-		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
-		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
+		if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
+		    zone_to_nid(zone) != zone_to_nid(zonelist_zone(ac.preferred_zoneref))) {
 			goto failed;
 		}
 
@@ -4647,7 +4647,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 	pcp_trylock_finish(UP_flags);
 
 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
-	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+	zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
 
 out:
 	return nr_populated;
@@ -4705,7 +4705,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
 	 * Forbid the first pass from falling back to types that fragment
 	 * memory until all local zones are considered.
 	 */
-	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
+	alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
 
 	/* First allocation attempt */
 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
@@ -5310,7 +5310,7 @@ int local_memory_node(int node)
 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
 				   gfp_zone(GFP_KERNEL),
 				   NULL);
-	return zone_to_nid(z->zone);
+	return zone_to_nid(zonelist_zone(z));
 }
 #endif
 
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm: use zonelist_zone() to get zone
  2024-07-09  3:19 [PATCH v2] mm: use zonelist_zone() to get zone Wei Yang
@ 2024-07-09 11:19 ` Garg, Shivank
  2024-07-09 21:51   ` Andrew Morton
  0 siblings, 1 reply; 7+ messages in thread
From: Garg, Shivank @ 2024-07-09 11:19 UTC (permalink / raw)
  To: Wei Yang, akpm; +Cc: linux-mm, Mike Rapoport, David Hildenbrand

On 7/9/2024 8:49 AM, Wei Yang wrote:
> Instead of accessing zoneref->zone directly, use zonelist_zone() like
> other places for consistency.
> 
> No functional change.
> 
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> CC: Mike Rapoport (IBM) <rppt@kernel.org>
> CC: David Hildenbrand <david@redhat.com>
> CC: Garg Shivank <shivankg@amd.com>
> 
> ---
> v2: cover more usage
> ---
>  include/linux/mmzone.h     |  4 ++--
>  include/trace/events/oom.h |  2 +-
>  mm/mempolicy.c             |  4 ++--
>  mm/page_alloc.c            | 14 +++++++-------
>  4 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index cb7f265c2b96..51bce636373f 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
>  			zone = zonelist_zone(z))
>  
>  #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
> -	for (zone = z->zone;	\
> +	for (zone = zonelist_zone(z);	\
>  		zone;							\
>  		z = next_zones_zonelist(++z, highidx, nodemask),	\
>  			zone = zonelist_zone(z))
> @@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
>  	nid = first_node(*nodes);
>  	zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
>  	z = first_zones_zonelist(zonelist, ZONE_NORMAL,	nodes);
> -	return (!z->zone) ? true : false;
> +	return (!zonelist_zone(z)) ? true : false;
>  }
>  
>  
> diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
> index a42be4c8563b..fe6997886b77 100644
> --- a/include/trace/events/oom.h
> +++ b/include/trace/events/oom.h
> @@ -55,7 +55,7 @@ TRACE_EVENT(reclaim_retry_zone,
>  	),
>  
>  	TP_fast_assign(
> -		__entry->node = zone_to_nid(zoneref->zone);
> +		__entry->node = zone_to_nid(zonelist_zone(zoneref));
>  		__entry->zone_idx = zoneref->zone_idx;
>  		__entry->order = order;
>  		__entry->reclaimable = reclaimable;
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index f73acb01ad45..83e26ded6278 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void)
>  		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
>  		z = first_zones_zonelist(zonelist, highest_zoneidx,
>  							&policy->nodes);
> -		return z->zone ? zone_to_nid(z->zone) : node;
> +		return zonelist_zone(z) ? zone_to_nid(zonelist_zone(z)) : node;
>  	}
>  	case MPOL_LOCAL:
>  		return node;
> @@ -2806,7 +2806,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
>  				node_zonelist(thisnid, GFP_HIGHUSER),
>  				gfp_zone(GFP_HIGHUSER),
>  				&pol->nodes);
> -		polnid = zone_to_nid(z->zone);
> +		polnid = zone_to_nid(zonelist_zone(z));
>  		break;
>  
>  	default:
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 116ee33fd1ce..e2933885bb19 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4218,7 +4218,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  	 */
>  	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
>  					ac->highest_zoneidx, ac->nodemask);
> -	if (!ac->preferred_zoneref->zone)
> +	if (!zonelist_zone(ac->preferred_zoneref))
>  		goto nopage;
>  
>  	/*
> @@ -4230,7 +4230,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  		struct zoneref *z = first_zones_zonelist(ac->zonelist,
>  					ac->highest_zoneidx,
>  					&cpuset_current_mems_allowed);
> -		if (!z->zone)
> +		if (!zonelist_zone(z))
>  			goto nopage;
>  	}
>  
> @@ -4587,8 +4587,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>  			continue;
>  		}
>  
> -		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
> -		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
> +		if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
> +		    zone_to_nid(zone) != zone_to_nid(zonelist_zone(ac.preferred_zoneref))) {
>  			goto failed;
>  		}
>  
> @@ -4647,7 +4647,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
>  	pcp_trylock_finish(UP_flags);
>  
>  	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
> -	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
> +	zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
>  
>  out:
>  	return nr_populated;
> @@ -4705,7 +4705,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
>  	 * Forbid the first pass from falling back to types that fragment
>  	 * memory until all local zones are considered.
>  	 */
> -	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
> +	alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
>  
>  	/* First allocation attempt */
>  	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
> @@ -5310,7 +5310,7 @@ int local_memory_node(int node)
>  	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
>  				   gfp_zone(GFP_KERNEL),
>  				   NULL);
> -	return zone_to_nid(z->zone);
> +	return zone_to_nid(zonelist_zone(z));
>  }
>  #endif
>  

Hi Wei,

I identified some additional locations where using zonelist_zone
and its related functions (zonelist_node_idx and zonelist_zone_idx) would
improve code consistency.

If it's alright with you, please append below changes to the patch with my tags.

Co-Developed-by: Shivank Garg <shivankg@amd.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>

I have also tested this patch.

Thanks,
Shivank

---
 include/trace/events/oom.h |  4 ++--
 mm/mempolicy.c             |  4 ++--
 mm/mmzone.c                |  2 +-
 mm/page_alloc.c            | 12 ++++++------
 4 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index fe6997886b77..9f0a5d1482c4 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
 	),
 
 	TP_fast_assign(
-		__entry->node = zone_to_nid(zonelist_zone(zoneref));
-		__entry->zone_idx = zoneref->zone_idx;
+		__entry->node = zonelist_node_idx(zoneref);
+		__entry->zone_idx = zonelist_zone_idx(zoneref);
 		__entry->order = order;
 		__entry->reclaimable = reclaimable;
 		__entry->available = available;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e000f19b3852..ec84a11df1cc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1953,7 +1953,7 @@ unsigned int mempolicy_slab_node(void)
 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
 		z = first_zones_zonelist(zonelist, highest_zoneidx,
 							&policy->nodes);
-		return zonelist_zone(z) ? zone_to_nid(zonelist_zone(z)) : node;
+		return zonelist_zone(z) ? zonelist_node_idx(z) : node;
 	}
 	case MPOL_LOCAL:
 		return node;
@@ -2802,7 +2802,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
 				node_zonelist(thisnid, GFP_HIGHUSER),
 				gfp_zone(GFP_HIGHUSER),
 				&pol->nodes);
-		polnid = zone_to_nid(zonelist_zone(z));
+		polnid = zonelist_node_idx(z);
 		break;
 
 	default:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index c01896eca736..f9baa8882fbf 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
 			z++;
 	else
 		while (zonelist_zone_idx(z) > highest_zoneidx ||
-				(z->zone && !zref_in_nodemask(z, nodes)))
+				(zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
 			z++;
 
 	return z;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d841905fa260..e998ff6cbbff 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3336,7 +3336,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 
 		if (no_fallback && nr_online_nodes > 1 &&
-		    zone != ac->preferred_zoneref->zone) {
+		    zone != zonelist_zone(ac->preferred_zoneref)) {
 			int local_nid;
 
 			/*
@@ -3344,7 +3344,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 			 * fragmenting fallbacks. Locality is more important
 			 * than fragmentation avoidance.
 			 */
-			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
+			local_nid = zonelist_node_idx(ac->preferred_zoneref);
 			if (zone_to_nid(zone) != local_nid) {
 				alloc_flags &= ~ALLOC_NOFRAGMENT;
 				goto retry;
@@ -3397,7 +3397,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 				goto try_this_zone;
 
 			if (!node_reclaim_enabled() ||
-			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
+			    !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
 				continue;
 
 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
@@ -3419,7 +3419,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 
 try_this_zone:
-		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
+		page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
 				gfp_mask, alloc_flags, ac->migratetype);
 		if (page) {
 			prep_new_page(page, order, gfp_mask, alloc_flags);
@@ -4560,7 +4560,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 		}
 
 		if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
-		    zone_to_nid(zone) != zone_to_nid(zonelist_zone(ac.preferred_zoneref))) {
+		    zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
 			goto failed;
 		}
 
@@ -5282,7 +5282,7 @@ int local_memory_node(int node)
 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
 				   gfp_zone(GFP_KERNEL),
 				   NULL);
-	return zone_to_nid(zonelist_zone(z));
+	return zonelist_node_idx(z);
 }
 #endif
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm: use zonelist_zone() to get zone
  2024-07-09 11:19 ` Garg, Shivank
@ 2024-07-09 21:51   ` Andrew Morton
  2024-07-15  6:26     ` Garg, Shivank
  2024-07-29  9:17     ` [PATCH] mm: improve code consistency with zonelist_* helper functions Shivank Garg
  0 siblings, 2 replies; 7+ messages in thread
From: Andrew Morton @ 2024-07-09 21:51 UTC (permalink / raw)
  To: Garg, Shivank; +Cc: Wei Yang, linux-mm, Mike Rapoport, David Hildenbrand

On Tue, 9 Jul 2024 16:49:47 +0530 "Garg, Shivank" <shivankg@amd.com> wrote:

> I identified some additional locations where using zonelist_zone
> and its related functions (zonelist_node_idx and zonelist_zone_idx) would
> improve code consistency.
> 
> If it's alright with you, please append below changes to the patch with my tags.

Thanks.  This patch is getting rather large.  Can we please revisit
this after 6.11-rc1 is released?



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm: use zonelist_zone() to get zone
  2024-07-09 21:51   ` Andrew Morton
@ 2024-07-15  6:26     ` Garg, Shivank
  2024-07-29  9:17     ` [PATCH] mm: improve code consistency with zonelist_* helper functions Shivank Garg
  1 sibling, 0 replies; 7+ messages in thread
From: Garg, Shivank @ 2024-07-15  6:26 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Wei Yang, linux-mm, Mike Rapoport, David Hildenbrand

On 7/10/2024 3:21 AM, Andrew Morton wrote:
> On Tue, 9 Jul 2024 16:49:47 +0530 "Garg, Shivank" <shivankg@amd.com> wrote:
> 
>> I identified some additional locations where using zonelist_zone
>> and its related functions (zonelist_node_idx and zonelist_zone_idx) would
>> improve code consistency.
>>
>> If it's alright with you, please append below changes to the patch with my tags.
> 
> Thanks.  This patch is getting rather large.  Can we please revisit
> this after 6.11-rc1 is released?
> 
> 

Sure, sounds good to me.

Thanks,
Shivank





^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH] mm: improve code consistency with zonelist_* helper functions
  2024-07-09 21:51   ` Andrew Morton
  2024-07-15  6:26     ` Garg, Shivank
@ 2024-07-29  9:17     ` Shivank Garg
  2024-07-29  9:44       ` David Hildenbrand
  2024-07-31 23:58       ` Wei Yang
  1 sibling, 2 replies; 7+ messages in thread
From: Shivank Garg @ 2024-07-29  9:17 UTC (permalink / raw)
  To: akpm; +Cc: david, linux-mm, richard.weiyang, rppt, shivankg

From: Wei Yang <richard.weiyang@gmail.com>

Replace direct access to zoneref->zone, zoneref->zone_idx, or
zone_to_nid(zoneref->zone) with the corresponding zonelist_*
helper functions for consistency.

No functional change.

Co-developed-by: Shivank Garg <shivankg@amd.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>

CC: Mike Rapoport (IBM) <rppt@kernel.org>
CC: David Hildenbrand <david@redhat.com>
---

Hi Andrew,

I've rebased the patch on top of 6.11-rc1.

Thanks,
Shivank


 include/linux/mmzone.h     |  4 ++--
 include/trace/events/oom.h |  4 ++--
 mm/mempolicy.c             |  4 ++--
 mm/mmzone.c                |  2 +-
 mm/page_alloc.c            | 22 +++++++++++-----------
 5 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 41458892bc8a..9f389c76581f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
 			zone = zonelist_zone(z))
 
 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
-	for (zone = z->zone;	\
+	for (zone = zonelist_zone(z);	\
 		zone;							\
 		z = next_zones_zonelist(++z, highidx, nodemask),	\
 			zone = zonelist_zone(z))
@@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
 	nid = first_node(*nodes);
 	zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
 	z = first_zones_zonelist(zonelist, ZONE_NORMAL,	nodes);
-	return (!z->zone) ? true : false;
+	return (!zonelist_zone(z)) ? true : false;
 }
 
 
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index a42be4c8563b..9f0a5d1482c4 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
 	),
 
 	TP_fast_assign(
-		__entry->node = zone_to_nid(zoneref->zone);
-		__entry->zone_idx = zoneref->zone_idx;
+		__entry->node = zonelist_node_idx(zoneref);
+		__entry->zone_idx = zonelist_zone_idx(zoneref);
 		__entry->order = order;
 		__entry->reclaimable = reclaimable;
 		__entry->available = available;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b858e22b259d..b3b5f376471f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void)
 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
 		z = first_zones_zonelist(zonelist, highest_zoneidx,
 							&policy->nodes);
-		return z->zone ? zone_to_nid(z->zone) : node;
+		return zonelist_zone(z) ? zonelist_node_idx(z) : node;
 	}
 	case MPOL_LOCAL:
 		return node;
@@ -2809,7 +2809,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
 				node_zonelist(thisnid, GFP_HIGHUSER),
 				gfp_zone(GFP_HIGHUSER),
 				&pol->nodes);
-		polnid = zone_to_nid(z->zone);
+		polnid = zonelist_node_idx(z);
 		break;
 
 	default:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index c01896eca736..f9baa8882fbf 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
 			z++;
 	else
 		while (zonelist_zone_idx(z) > highest_zoneidx ||
-				(z->zone && !zref_in_nodemask(z, nodes)))
+				(zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
 			z++;
 
 	return z;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 28f80daf5c04..94e3aa1e145d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3353,7 +3353,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 
 		if (no_fallback && nr_online_nodes > 1 &&
-		    zone != ac->preferred_zoneref->zone) {
+		    zone != zonelist_zone(ac->preferred_zoneref)) {
 			int local_nid;
 
 			/*
@@ -3361,7 +3361,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 			 * fragmenting fallbacks. Locality is more important
 			 * than fragmentation avoidance.
 			 */
-			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
+			local_nid = zonelist_node_idx(ac->preferred_zoneref);
 			if (zone_to_nid(zone) != local_nid) {
 				alloc_flags &= ~ALLOC_NOFRAGMENT;
 				goto retry;
@@ -3414,7 +3414,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 				goto try_this_zone;
 
 			if (!node_reclaim_enabled() ||
-			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
+			    !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
 				continue;
 
 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
@@ -3436,7 +3436,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 
 try_this_zone:
-		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
+		page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
 				gfp_mask, alloc_flags, ac->migratetype);
 		if (page) {
 			prep_new_page(page, order, gfp_mask, alloc_flags);
@@ -4207,7 +4207,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	 */
 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
 					ac->highest_zoneidx, ac->nodemask);
-	if (!ac->preferred_zoneref->zone)
+	if (!zonelist_zone(ac->preferred_zoneref))
 		goto nopage;
 
 	/*
@@ -4219,7 +4219,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		struct zoneref *z = first_zones_zonelist(ac->zonelist,
 					ac->highest_zoneidx,
 					&cpuset_current_mems_allowed);
-		if (!z->zone)
+		if (!zonelist_zone(z))
 			goto nopage;
 	}
 
@@ -4576,8 +4576,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 			continue;
 		}
 
-		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
-		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
+		if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
+		    zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
 			goto failed;
 		}
 
@@ -4636,7 +4636,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
 	pcp_trylock_finish(UP_flags);
 
 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
-	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+	zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
 
 out:
 	return nr_populated;
@@ -4694,7 +4694,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
 	 * Forbid the first pass from falling back to types that fragment
 	 * memory until all local zones are considered.
 	 */
-	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
+	alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
 
 	/* First allocation attempt */
 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
@@ -5299,7 +5299,7 @@ int local_memory_node(int node)
 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
 				   gfp_zone(GFP_KERNEL),
 				   NULL);
-	return zone_to_nid(z->zone);
+	return zonelist_node_idx(z);
 }
 #endif
 
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm: improve code consistency with zonelist_* helper functions
  2024-07-29  9:17     ` [PATCH] mm: improve code consistency with zonelist_* helper functions Shivank Garg
@ 2024-07-29  9:44       ` David Hildenbrand
  2024-07-31 23:58       ` Wei Yang
  1 sibling, 0 replies; 7+ messages in thread
From: David Hildenbrand @ 2024-07-29  9:44 UTC (permalink / raw)
  To: Shivank Garg, akpm; +Cc: linux-mm, richard.weiyang, rppt

On 29.07.24 11:17, Shivank Garg wrote:
> From: Wei Yang <richard.weiyang@gmail.com>
> 
> Replace direct access to zoneref->zone, zoneref->zone_idx, or
> zone_to_nid(zoneref->zone) with the corresponding zonelist_*
> helper functions for consistency.
> 
> No functional change.
> 
> Co-developed-by: Shivank Garg <shivankg@amd.com>
> Signed-off-by: Shivank Garg <shivankg@amd.com>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> 
> CC: Mike Rapoport (IBM) <rppt@kernel.org>
> CC: David Hildenbrand <david@redhat.com>
> ---
> 
> Hi Andrew,
> 
> I've rebased the patch on top of 6.11-rc1.
> 
> Thanks,
> Shivank

Acked-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm: improve code consistency with zonelist_* helper functions
  2024-07-29  9:17     ` [PATCH] mm: improve code consistency with zonelist_* helper functions Shivank Garg
  2024-07-29  9:44       ` David Hildenbrand
@ 2024-07-31 23:58       ` Wei Yang
  1 sibling, 0 replies; 7+ messages in thread
From: Wei Yang @ 2024-07-31 23:58 UTC (permalink / raw)
  To: Shivank Garg; +Cc: akpm, david, linux-mm, richard.weiyang, rppt

On Mon, Jul 29, 2024 at 02:47:17PM +0530, Shivank Garg wrote:
>From: Wei Yang <richard.weiyang@gmail.com>
>
>Replace direct access to zoneref->zone, zoneref->zone_idx, or
>zone_to_nid(zoneref->zone) with the corresponding zonelist_*
>helper functions for consistency.
>
>No functional change.
>
>Co-developed-by: Shivank Garg <shivankg@amd.com>
>Signed-off-by: Shivank Garg <shivankg@amd.com>
>Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>
>CC: Mike Rapoport (IBM) <rppt@kernel.org>
>CC: David Hildenbrand <david@redhat.com>
>---
>
>Hi Andrew,
>
>I've rebased the patch on top of 6.11-rc1.

Thanks, looks good to me.

>
>Thanks,
>Shivank
>
>
> include/linux/mmzone.h     |  4 ++--
> include/trace/events/oom.h |  4 ++--
> mm/mempolicy.c             |  4 ++--
> mm/mmzone.c                |  2 +-
> mm/page_alloc.c            | 22 +++++++++++-----------
> 5 files changed, 18 insertions(+), 18 deletions(-)
>
>diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>index 41458892bc8a..9f389c76581f 100644
>--- a/include/linux/mmzone.h
>+++ b/include/linux/mmzone.h
>@@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
> 			zone = zonelist_zone(z))
> 
> #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
>-	for (zone = z->zone;	\
>+	for (zone = zonelist_zone(z);	\
> 		zone;							\
> 		z = next_zones_zonelist(++z, highidx, nodemask),	\
> 			zone = zonelist_zone(z))
>@@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
> 	nid = first_node(*nodes);
> 	zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
> 	z = first_zones_zonelist(zonelist, ZONE_NORMAL,	nodes);
>-	return (!z->zone) ? true : false;
>+	return (!zonelist_zone(z)) ? true : false;
> }
> 
> 
>diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
>index a42be4c8563b..9f0a5d1482c4 100644
>--- a/include/trace/events/oom.h
>+++ b/include/trace/events/oom.h
>@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
> 	),
> 
> 	TP_fast_assign(
>-		__entry->node = zone_to_nid(zoneref->zone);
>-		__entry->zone_idx = zoneref->zone_idx;
>+		__entry->node = zonelist_node_idx(zoneref);
>+		__entry->zone_idx = zonelist_zone_idx(zoneref);
> 		__entry->order = order;
> 		__entry->reclaimable = reclaimable;
> 		__entry->available = available;
>diff --git a/mm/mempolicy.c b/mm/mempolicy.c
>index b858e22b259d..b3b5f376471f 100644
>--- a/mm/mempolicy.c
>+++ b/mm/mempolicy.c
>@@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void)
> 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
> 		z = first_zones_zonelist(zonelist, highest_zoneidx,
> 							&policy->nodes);
>-		return z->zone ? zone_to_nid(z->zone) : node;
>+		return zonelist_zone(z) ? zonelist_node_idx(z) : node;
> 	}
> 	case MPOL_LOCAL:
> 		return node;
>@@ -2809,7 +2809,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
> 				node_zonelist(thisnid, GFP_HIGHUSER),
> 				gfp_zone(GFP_HIGHUSER),
> 				&pol->nodes);
>-		polnid = zone_to_nid(z->zone);
>+		polnid = zonelist_node_idx(z);
> 		break;
> 
> 	default:
>diff --git a/mm/mmzone.c b/mm/mmzone.c
>index c01896eca736..f9baa8882fbf 100644
>--- a/mm/mmzone.c
>+++ b/mm/mmzone.c
>@@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
> 			z++;
> 	else
> 		while (zonelist_zone_idx(z) > highest_zoneidx ||
>-				(z->zone && !zref_in_nodemask(z, nodes)))
>+				(zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
> 			z++;
> 
> 	return z;
>diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>index 28f80daf5c04..94e3aa1e145d 100644
>--- a/mm/page_alloc.c
>+++ b/mm/page_alloc.c
>@@ -3353,7 +3353,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
> 		}
> 
> 		if (no_fallback && nr_online_nodes > 1 &&
>-		    zone != ac->preferred_zoneref->zone) {
>+		    zone != zonelist_zone(ac->preferred_zoneref)) {
> 			int local_nid;
> 
> 			/*
>@@ -3361,7 +3361,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
> 			 * fragmenting fallbacks. Locality is more important
> 			 * than fragmentation avoidance.
> 			 */
>-			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
>+			local_nid = zonelist_node_idx(ac->preferred_zoneref);
> 			if (zone_to_nid(zone) != local_nid) {
> 				alloc_flags &= ~ALLOC_NOFRAGMENT;
> 				goto retry;
>@@ -3414,7 +3414,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
> 				goto try_this_zone;
> 
> 			if (!node_reclaim_enabled() ||
>-			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
>+			    !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
> 				continue;
> 
> 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
>@@ -3436,7 +3436,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
> 		}
> 
> try_this_zone:
>-		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
>+		page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
> 				gfp_mask, alloc_flags, ac->migratetype);
> 		if (page) {
> 			prep_new_page(page, order, gfp_mask, alloc_flags);
>@@ -4207,7 +4207,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> 	 */
> 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
> 					ac->highest_zoneidx, ac->nodemask);
>-	if (!ac->preferred_zoneref->zone)
>+	if (!zonelist_zone(ac->preferred_zoneref))
> 		goto nopage;
> 
> 	/*
>@@ -4219,7 +4219,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> 		struct zoneref *z = first_zones_zonelist(ac->zonelist,
> 					ac->highest_zoneidx,
> 					&cpuset_current_mems_allowed);
>-		if (!z->zone)
>+		if (!zonelist_zone(z))
> 			goto nopage;
> 	}
> 
>@@ -4576,8 +4576,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
> 			continue;
> 		}
> 
>-		if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
>-		    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
>+		if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
>+		    zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
> 			goto failed;
> 		}
> 
>@@ -4636,7 +4636,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
> 	pcp_trylock_finish(UP_flags);
> 
> 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
>-	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
>+	zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
> 
> out:
> 	return nr_populated;
>@@ -4694,7 +4694,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
> 	 * Forbid the first pass from falling back to types that fragment
> 	 * memory until all local zones are considered.
> 	 */
>-	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
>+	alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
> 
> 	/* First allocation attempt */
> 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
>@@ -5299,7 +5299,7 @@ int local_memory_node(int node)
> 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
> 				   gfp_zone(GFP_KERNEL),
> 				   NULL);
>-	return zone_to_nid(z->zone);
>+	return zonelist_node_idx(z);
> }
> #endif
> 
>-- 
>2.34.1

-- 
Wei Yang
Help you, Help me


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2024-07-31 23:58 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-07-09  3:19 [PATCH v2] mm: use zonelist_zone() to get zone Wei Yang
2024-07-09 11:19 ` Garg, Shivank
2024-07-09 21:51   ` Andrew Morton
2024-07-15  6:26     ` Garg, Shivank
2024-07-29  9:17     ` [PATCH] mm: improve code consistency with zonelist_* helper functions Shivank Garg
2024-07-29  9:44       ` David Hildenbrand
2024-07-31 23:58       ` Wei Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).