linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH V2] mm/hugetlb: remove unnecessary holding of hugetlb_lock
@ 2025-05-27  3:36 yangge1116
  2025-05-28  2:45 ` Muchun Song
  2025-06-04 22:47 ` Andrew Morton
  0 siblings, 2 replies; 6+ messages in thread
From: yangge1116 @ 2025-05-27  3:36 UTC (permalink / raw)
  To: akpm
  Cc: linux-mm, linux-kernel, 21cnbao, david, baolin.wang, muchun.song,
	osalvador, liuzixing, Ge Yang

From: Ge Yang <yangge1116@126.com>

In the isolate_or_dissolve_huge_folio() function, after acquiring the
hugetlb_lock, it is only for the purpose of obtaining the correct hstate,
which is then passed to the alloc_and_dissolve_hugetlb_folio() function.

The alloc_and_dissolve_hugetlb_folio() function itself also acquires the
hugetlb_lock. We can have the alloc_and_dissolve_hugetlb_folio() function
obtain the hstate by itself, so that the isolate_or_dissolve_huge_folio()
function no longer needs to acquire the hugetlb_lock. In addition, we keep
the folio_test_hugetlb() check within the isolate_or_dissolve_huge_folio()
function. By doing so, we can avoid disrupting the normal path by vainly
holding the hugetlb_lock.

The replace_free_hugepage_folios() function has the same issue, and we
should address it as well.

Suggested-by: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Ge Yang <yangge1116@126.com>
---

V2:
- Add early folio_test_hugetlb() suggested by David
- Change the subject from "mm/hugetlb: remove redundant folio_test_hugetlb" to "mm/hugetlb: reduce unnecessary holding of hugetlb_lock"

 mm/hugetlb.c | 54 +++++++++++++++++-------------------------------------
 1 file changed, 17 insertions(+), 37 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f0b1d53..b4bc2bd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2787,20 +2787,24 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
 /*
  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
  * the old one
- * @h: struct hstate old page belongs to
  * @old_folio: Old folio to dissolve
  * @list: List to isolate the page in case we need to
  * Returns 0 on success, otherwise negated error.
  */
-static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
-			struct folio *old_folio, struct list_head *list)
+static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
+			struct list_head *list)
 {
-	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
+	gfp_t gfp_mask;
+	struct hstate *h;
 	int nid = folio_nid(old_folio);
 	struct folio *new_folio = NULL;
 	int ret = 0;
 
 retry:
+	/*
+	 * The old_folio might have been dissolved from under our feet, so make sure
+	 * to carefully check the state under the lock.
+	 */
 	spin_lock_irq(&hugetlb_lock);
 	if (!folio_test_hugetlb(old_folio)) {
 		/*
@@ -2829,8 +2833,10 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
 		cond_resched();
 		goto retry;
 	} else {
+		h = folio_hstate(old_folio);
 		if (!new_folio) {
 			spin_unlock_irq(&hugetlb_lock);
+			gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 			new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
 							      NULL, NULL);
 			if (!new_folio)
@@ -2874,35 +2880,24 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
 
 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
 {
-	struct hstate *h;
 	int ret = -EBUSY;
 
-	/*
-	 * The page might have been dissolved from under our feet, so make sure
-	 * to carefully check the state under the lock.
-	 * Return success when racing as if we dissolved the page ourselves.
-	 */
-	spin_lock_irq(&hugetlb_lock);
-	if (folio_test_hugetlb(folio)) {
-		h = folio_hstate(folio);
-	} else {
-		spin_unlock_irq(&hugetlb_lock);
+	/* Not to disrupt normal path by vainly holding hugetlb_lock */
+	if (!folio_test_hugetlb(folio))
 		return 0;
-	}
-	spin_unlock_irq(&hugetlb_lock);
 
 	/*
 	 * Fence off gigantic pages as there is a cyclic dependency between
 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
 	 * of bailing out right away without further retrying.
 	 */
-	if (hstate_is_gigantic(h))
+	if (folio_order(folio) > MAX_PAGE_ORDER)
 		return -ENOMEM;
 
 	if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
 		ret = 0;
 	else if (!folio_ref_count(folio))
-		ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
+		ret = alloc_and_dissolve_hugetlb_folio(folio, list);
 
 	return ret;
 }
@@ -2916,7 +2911,6 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
  */
 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
 {
-	struct hstate *h;
 	struct folio *folio;
 	int ret = 0;
 
@@ -2925,23 +2919,9 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
 	while (start_pfn < end_pfn) {
 		folio = pfn_folio(start_pfn);
 
-		/*
-		 * The folio might have been dissolved from under our feet, so make sure
-		 * to carefully check the state under the lock.
-		 */
-		spin_lock_irq(&hugetlb_lock);
-		if (folio_test_hugetlb(folio)) {
-			h = folio_hstate(folio);
-		} else {
-			spin_unlock_irq(&hugetlb_lock);
-			start_pfn++;
-			continue;
-		}
-		spin_unlock_irq(&hugetlb_lock);
-
-		if (!folio_ref_count(folio)) {
-			ret = alloc_and_dissolve_hugetlb_folio(h, folio,
-							       &isolate_list);
+		/* Not to disrupt normal path by vainly holding hugetlb_lock */
+		if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
+			ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
 			if (ret)
 				break;
 
-- 
2.7.4



^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-06-06  5:45 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-27  3:36 [PATCH V2] mm/hugetlb: remove unnecessary holding of hugetlb_lock yangge1116
2025-05-28  2:45 ` Muchun Song
2025-06-04 22:47 ` Andrew Morton
2025-06-05  0:44   ` Ge Yang
2025-06-06  1:12     ` Andrew Morton
2025-06-06  5:45       ` Ge Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).