linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] mm/compaction: use "spinlock_t *" to record held lock in compact [un]lock functions
       [not found] <20230719113001.2023703-1-shikemeng@huaweicloud.com>
@ 2023-07-19 11:29 ` Kemeng Shi
  2023-07-19 11:29 ` [PATCH 2/4] mm/compaction: use "spinlock_t *" to record held lock in isolate_migratepages_block Kemeng Shi
  2023-07-19 11:30 ` [PATCH 4/4] mm/compaction: add compact_unlock_irqrestore to remove repeat code Kemeng Shi
  2 siblings, 0 replies; 4+ messages in thread
From: Kemeng Shi @ 2023-07-19 11:29 UTC (permalink / raw)
  To: akpm, linux-mm, linux-kernel; +Cc: shikemeng

Make compact_lock_irqsave and compact_unlock_should_abort use
"spinlock_t *" to record held lock.
This is a preparation to use compact_unlock_should_abort in
isolate_migratepages_block to remove repeat code.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/compaction.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 9641e2131901..dfef14d3ef78 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -523,22 +523,22 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page)
  * abort when the current block is finished regardless of success rate.
  * Sync compaction acquires the lock.
  *
- * Always returns true which makes it easier to track lock state in callers.
+ * Always returns lock which makes it easier to track lock state in callers.
  */
-static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
+static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 						struct compact_control *cc)
 	__acquires(lock)
 {
 	/* Track if the lock is contended in async mode */
 	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
 		if (spin_trylock_irqsave(lock, *flags))
-			return true;
+			return lock;
 
 		cc->contended = true;
 	}
 
 	spin_lock_irqsave(lock, *flags);
-	return true;
+	return lock;
 }
 
 /*
@@ -553,12 +553,12 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
  * Returns true if compaction should abort due to fatal signal pending.
  * Returns false when compaction can continue.
  */
-static bool compact_unlock_should_abort(spinlock_t *lock,
-		unsigned long flags, bool *locked, struct compact_control *cc)
+static bool compact_unlock_should_abort(spinlock_t **locked,
+		unsigned long flags, struct compact_control *cc)
 {
 	if (*locked) {
-		spin_unlock_irqrestore(lock, flags);
-		*locked = false;
+		spin_unlock_irqrestore(*locked, flags);
+		*locked = NULL;
 	}
 
 	if (fatal_signal_pending(current)) {
@@ -586,7 +586,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 	int nr_scanned = 0, total_isolated = 0;
 	struct page *cursor;
 	unsigned long flags = 0;
-	bool locked = false;
+	spinlock_t *locked = NULL;
 	unsigned long blockpfn = *start_pfn;
 	unsigned int order;
 
@@ -607,8 +607,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 		 * pending.
 		 */
 		if (!(blockpfn % COMPACT_CLUSTER_MAX)
-		    && compact_unlock_should_abort(&cc->zone->lock, flags,
-								&locked, cc))
+		    && compact_unlock_should_abort(&locked, flags, cc))
 			break;
 
 		nr_scanned++;
@@ -673,7 +672,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 	}
 
 	if (locked)
-		spin_unlock_irqrestore(&cc->zone->lock, flags);
+		spin_unlock_irqrestore(locked, flags);
 
 	/*
 	 * There is a tiny chance that we have read bogus compound_order(),
-- 
2.30.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/4] mm/compaction: use "spinlock_t *" to record held lock in isolate_migratepages_block
       [not found] <20230719113001.2023703-1-shikemeng@huaweicloud.com>
  2023-07-19 11:29 ` [PATCH 1/4] mm/compaction: use "spinlock_t *" to record held lock in compact [un]lock functions Kemeng Shi
@ 2023-07-19 11:29 ` Kemeng Shi
  2023-07-19 11:30 ` [PATCH 4/4] mm/compaction: add compact_unlock_irqrestore to remove repeat code Kemeng Shi
  2 siblings, 0 replies; 4+ messages in thread
From: Kemeng Shi @ 2023-07-19 11:29 UTC (permalink / raw)
  To: akpm, linux-mm, linux-kernel; +Cc: shikemeng

Use "spinlock_t *" instead of "struct lruvec *" to record held lock in
isolate_migratepages_block.
This is a preparation to use compact_unlock_should_abort in
isolate_migratepages_block to remove repeat code.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/compaction.c | 21 ++++++++++-----------
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index dfef14d3ef78..638146a49e89 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -840,7 +840,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 	unsigned long nr_scanned = 0, nr_isolated = 0;
 	struct lruvec *lruvec;
 	unsigned long flags = 0;
-	struct lruvec *locked = NULL;
+	spinlock_t *locked = NULL;
 	struct folio *folio = NULL;
 	struct page *page = NULL, *valid_page = NULL;
 	struct address_space *mapping;
@@ -911,7 +911,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		 */
 		if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
 			if (locked) {
-				unlock_page_lruvec_irqrestore(locked, flags);
+				spin_unlock_irqrestore(locked, flags);
 				locked = NULL;
 			}
 
@@ -946,7 +946,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		if (PageHuge(page) && cc->alloc_contig) {
 			if (locked) {
-				unlock_page_lruvec_irqrestore(locked, flags);
+				spin_unlock_irqrestore(locked, flags);
 				locked = NULL;
 			}
 
@@ -1035,7 +1035,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 			if (unlikely(__PageMovable(page)) &&
 					!PageIsolated(page)) {
 				if (locked) {
-					unlock_page_lruvec_irqrestore(locked, flags);
+					spin_unlock_irqrestore(locked, flags);
 					locked = NULL;
 				}
 
@@ -1120,12 +1120,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		lruvec = folio_lruvec(folio);
 
 		/* If we already hold the lock, we can skip some rechecking */
-		if (lruvec != locked) {
+		if (&lruvec->lru_lock != locked) {
 			if (locked)
-				unlock_page_lruvec_irqrestore(locked, flags);
+				spin_unlock_irqrestore(locked, flags);
 
-			compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
-			locked = lruvec;
+			locked = compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
 
 			lruvec_memcg_debug(lruvec, folio);
 
@@ -1188,7 +1187,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 isolate_fail_put:
 		/* Avoid potential deadlock in freeing page under lru_lock */
 		if (locked) {
-			unlock_page_lruvec_irqrestore(locked, flags);
+			spin_unlock_irqrestore(locked, flags);
 			locked = NULL;
 		}
 		folio_put(folio);
@@ -1204,7 +1203,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		 */
 		if (nr_isolated) {
 			if (locked) {
-				unlock_page_lruvec_irqrestore(locked, flags);
+				spin_unlock_irqrestore(locked, flags);
 				locked = NULL;
 			}
 			putback_movable_pages(&cc->migratepages);
@@ -1236,7 +1235,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 isolate_abort:
 	if (locked)
-		unlock_page_lruvec_irqrestore(locked, flags);
+		spin_unlock_irqrestore(locked, flags);
 	if (folio) {
 		folio_set_lru(folio);
 		folio_put(folio);
-- 
2.30.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 4/4] mm/compaction: add compact_unlock_irqrestore to remove repeat code
       [not found] <20230719113001.2023703-1-shikemeng@huaweicloud.com>
  2023-07-19 11:29 ` [PATCH 1/4] mm/compaction: use "spinlock_t *" to record held lock in compact [un]lock functions Kemeng Shi
  2023-07-19 11:29 ` [PATCH 2/4] mm/compaction: use "spinlock_t *" to record held lock in isolate_migratepages_block Kemeng Shi
@ 2023-07-19 11:30 ` Kemeng Shi
  2 siblings, 0 replies; 4+ messages in thread
From: Kemeng Shi @ 2023-07-19 11:30 UTC (permalink / raw)
  To: akpm, linux-mm, linux-kernel; +Cc: shikemeng

Add compact_unlock_irqrestore to remove repeat code. This also make
compact lock functions sereis complete as we can call
compact_lock_irqsave/compact_unlock_irqrestore in pair.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/compaction.c | 43 ++++++++++++++++---------------------------
 1 file changed, 16 insertions(+), 27 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index c1dc821ac6e1..eb1d3d9a422c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -541,6 +541,14 @@ static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 	return lock;
 }
 
+static inline void compact_unlock_irqrestore(spinlock_t **locked, unsigned long flags)
+{
+	if (*locked) {
+		spin_unlock_irqrestore(*locked, flags);
+		*locked = NULL;
+	}
+}
+
 /*
  * Compaction requires the taking of some coarse locks that are potentially
  * very heavily contended. The lock should be periodically unlocked to avoid
@@ -556,10 +564,7 @@ static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 static bool compact_unlock_should_abort(spinlock_t **locked,
 		unsigned long flags, struct compact_control *cc)
 {
-	if (*locked) {
-		spin_unlock_irqrestore(*locked, flags);
-		*locked = NULL;
-	}
+	compact_unlock_irqrestore(locked, flags);
 
 	if (fatal_signal_pending(current)) {
 		cc->contended = true;
@@ -671,8 +676,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
 	}
 
-	if (locked)
-		spin_unlock_irqrestore(locked, flags);
+	compact_unlock_irqrestore(&locked, flags);
 
 	/*
 	 * There is a tiny chance that we have read bogus compound_order(),
@@ -935,10 +939,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		}
 
 		if (PageHuge(page) && cc->alloc_contig) {
-			if (locked) {
-				spin_unlock_irqrestore(locked, flags);
-				locked = NULL;
-			}
+			compact_unlock_irqrestore(&locked, flags);
 
 			ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
 
@@ -1024,10 +1025,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 			 */
 			if (unlikely(__PageMovable(page)) &&
 					!PageIsolated(page)) {
-				if (locked) {
-					spin_unlock_irqrestore(locked, flags);
-					locked = NULL;
-				}
+				compact_unlock_irqrestore(&locked, flags);
 
 				if (isolate_movable_page(page, mode)) {
 					folio = page_folio(page);
@@ -1111,9 +1109,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		/* If we already hold the lock, we can skip some rechecking */
 		if (&lruvec->lru_lock != locked) {
-			if (locked)
-				spin_unlock_irqrestore(locked, flags);
-
+			compact_unlock_irqrestore(&locked, flags);
 			locked = compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
 
 			lruvec_memcg_debug(lruvec, folio);
@@ -1176,10 +1172,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 isolate_fail_put:
 		/* Avoid potential deadlock in freeing page under lru_lock */
-		if (locked) {
-			spin_unlock_irqrestore(locked, flags);
-			locked = NULL;
-		}
+		compact_unlock_irqrestore(&locked, flags);
 		folio_put(folio);
 
 isolate_fail:
@@ -1192,10 +1185,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		 * page anyway.
 		 */
 		if (nr_isolated) {
-			if (locked) {
-				spin_unlock_irqrestore(locked, flags);
-				locked = NULL;
-			}
+			compact_unlock_irqrestore(&locked, flags);
 			putback_movable_pages(&cc->migratepages);
 			cc->nr_migratepages = 0;
 			nr_isolated = 0;
@@ -1224,8 +1214,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 	folio = NULL;
 
 isolate_abort:
-	if (locked)
-		spin_unlock_irqrestore(locked, flags);
+	compact_unlock_irqrestore(&locked, flags);
 	if (folio) {
 		folio_set_lru(folio);
 		folio_put(folio);
-- 
2.30.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 4/4] mm/compaction: add compact_unlock_irqrestore to remove repeat code
       [not found] <20230725180456.2146626-1-shikemeng@huaweicloud.com>
@ 2023-07-25 18:04 ` Kemeng Shi
  0 siblings, 0 replies; 4+ messages in thread
From: Kemeng Shi @ 2023-07-25 18:04 UTC (permalink / raw)
  To: akpm, linux-mm, linux-kernel; +Cc: willy, baolin.wang, david, shikemeng

Add compact_unlock_irqrestore to remove repeat code. This also make
compact lock functions sereis complete as we can call
compact_lock_irqsave/compact_unlock_irqrestore in pair.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/compaction.c | 43 ++++++++++++++++---------------------------
 1 file changed, 16 insertions(+), 27 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index c1dc821ac6e1..eb1d3d9a422c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -541,6 +541,14 @@ static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 	return lock;
 }
 
+static inline void compact_unlock_irqrestore(spinlock_t **locked, unsigned long flags)
+{
+	if (*locked) {
+		spin_unlock_irqrestore(*locked, flags);
+		*locked = NULL;
+	}
+}
+
 /*
  * Compaction requires the taking of some coarse locks that are potentially
  * very heavily contended. The lock should be periodically unlocked to avoid
@@ -556,10 +564,7 @@ static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 static bool compact_unlock_should_abort(spinlock_t **locked,
 		unsigned long flags, struct compact_control *cc)
 {
-	if (*locked) {
-		spin_unlock_irqrestore(*locked, flags);
-		*locked = NULL;
-	}
+	compact_unlock_irqrestore(locked, flags);
 
 	if (fatal_signal_pending(current)) {
 		cc->contended = true;
@@ -671,8 +676,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
 	}
 
-	if (locked)
-		spin_unlock_irqrestore(locked, flags);
+	compact_unlock_irqrestore(&locked, flags);
 
 	/*
 	 * There is a tiny chance that we have read bogus compound_order(),
@@ -935,10 +939,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		}
 
 		if (PageHuge(page) && cc->alloc_contig) {
-			if (locked) {
-				spin_unlock_irqrestore(locked, flags);
-				locked = NULL;
-			}
+			compact_unlock_irqrestore(&locked, flags);
 
 			ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
 
@@ -1024,10 +1025,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 			 */
 			if (unlikely(__PageMovable(page)) &&
 					!PageIsolated(page)) {
-				if (locked) {
-					spin_unlock_irqrestore(locked, flags);
-					locked = NULL;
-				}
+				compact_unlock_irqrestore(&locked, flags);
 
 				if (isolate_movable_page(page, mode)) {
 					folio = page_folio(page);
@@ -1111,9 +1109,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		/* If we already hold the lock, we can skip some rechecking */
 		if (&lruvec->lru_lock != locked) {
-			if (locked)
-				spin_unlock_irqrestore(locked, flags);
-
+			compact_unlock_irqrestore(&locked, flags);
 			locked = compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
 
 			lruvec_memcg_debug(lruvec, folio);
@@ -1176,10 +1172,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 isolate_fail_put:
 		/* Avoid potential deadlock in freeing page under lru_lock */
-		if (locked) {
-			spin_unlock_irqrestore(locked, flags);
-			locked = NULL;
-		}
+		compact_unlock_irqrestore(&locked, flags);
 		folio_put(folio);
 
 isolate_fail:
@@ -1192,10 +1185,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		 * page anyway.
 		 */
 		if (nr_isolated) {
-			if (locked) {
-				spin_unlock_irqrestore(locked, flags);
-				locked = NULL;
-			}
+			compact_unlock_irqrestore(&locked, flags);
 			putback_movable_pages(&cc->migratepages);
 			cc->nr_migratepages = 0;
 			nr_isolated = 0;
@@ -1224,8 +1214,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 	folio = NULL;
 
 isolate_abort:
-	if (locked)
-		spin_unlock_irqrestore(locked, flags);
+	compact_unlock_irqrestore(&locked, flags);
 	if (folio) {
 		folio_set_lru(folio);
 		folio_put(folio);
-- 
2.30.0



^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-07-25 10:04 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20230719113001.2023703-1-shikemeng@huaweicloud.com>
2023-07-19 11:29 ` [PATCH 1/4] mm/compaction: use "spinlock_t *" to record held lock in compact [un]lock functions Kemeng Shi
2023-07-19 11:29 ` [PATCH 2/4] mm/compaction: use "spinlock_t *" to record held lock in isolate_migratepages_block Kemeng Shi
2023-07-19 11:30 ` [PATCH 4/4] mm/compaction: add compact_unlock_irqrestore to remove repeat code Kemeng Shi
     [not found] <20230725180456.2146626-1-shikemeng@huaweicloud.com>
2023-07-25 18:04 ` Kemeng Shi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).