linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 0/4] Speed up f2fs truncate
@ 2024-12-23  8:10 Yi Sun
  2024-12-23  8:10 ` [PATCH v4 1/4] f2fs: introduce update_sit_entry_for_release/alloc() Yi Sun
                   ` (5 more replies)
  0 siblings, 6 replies; 12+ messages in thread
From: Yi Sun @ 2024-12-23  8:10 UTC (permalink / raw)
  To: chao, jaegeuk
  Cc: sunyibuaa, yi.sun, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

Deleting large files is time-consuming, and a large part
of the time is spent in f2fs_invalidate_blocks()
->down_write(sit_info->sentry_lock) and up_write().

If some blocks are continuous, we can process these blocks
at the same time. This can reduce the number of calls to
the down_write() and the up_write(), thereby improving the
overall speed of doing truncate.

Test steps:
Set the CPU and DDR frequencies to the maximum.
dd if=/dev/random of=./test.txt bs=1M count=100000
sync
rm test.txt

Time Comparison of rm:
original        optimization            ratio
7.17s           3.27s                   54.39%

----
v4:
- introduce update_sit_entry_for_alloc().
- [patch 2,3,4 / 4] have no changes compared to v3.

Yi Sun (4):
  f2fs: introduce update_sit_entry_for_release/alloc()
  f2fs: update_sit_entry_for_release() supports consecutive blocks.
  f2fs: add parameter @len to f2fs_invalidate_blocks()
  f2fs: Optimize f2fs_truncate_data_blocks_range()

 fs/f2fs/compress.c |   4 +-
 fs/f2fs/f2fs.h     |   3 +-
 fs/f2fs/file.c     |  78 +++++++++++++++++--
 fs/f2fs/node.c     |   4 +-
 fs/f2fs/segment.c  | 185 +++++++++++++++++++++++++++++----------------
 5 files changed, 198 insertions(+), 76 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v4 1/4] f2fs: introduce update_sit_entry_for_release/alloc()
  2024-12-23  8:10 [PATCH v4 0/4] Speed up f2fs truncate Yi Sun
@ 2024-12-23  8:10 ` Yi Sun
  2024-12-26 14:00   ` Chao Yu
  2024-12-23  8:10 ` [PATCH v4 2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks Yi Sun
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 12+ messages in thread
From: Yi Sun @ 2024-12-23  8:10 UTC (permalink / raw)
  To: chao, jaegeuk
  Cc: sunyibuaa, yi.sun, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

No logical changes, just for cleanliness.

Signed-off-by: Yi Sun <yi.sun@unisoc.com>
---
 fs/f2fs/segment.c | 162 ++++++++++++++++++++++++++--------------------
 1 file changed, 93 insertions(+), 69 deletions(-)

diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 86e547f008f9..bc761d9b889a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2426,16 +2426,103 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
 		SIT_I(sbi)->max_mtime = ctime;
 }
 
-static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
+				block_t blkaddr, unsigned int offset, int del)
+{
+	bool exist;
+#ifdef CONFIG_F2FS_CHECK_FS
+	bool mir_exist;
+#endif
+
+	exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+	mir_exist = f2fs_test_and_clear_bit(offset,
+					se->cur_valid_map_mir);
+	if (unlikely(exist != mir_exist)) {
+		f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
+			blkaddr, exist);
+		f2fs_bug_on(sbi, 1);
+	}
+#endif
+	if (unlikely(!exist)) {
+		f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr);
+		f2fs_bug_on(sbi, 1);
+		se->valid_blocks++;
+		del = 0;
+	} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+		/*
+		 * If checkpoints are off, we must not reuse data that
+		 * was used in the previous checkpoint. If it was used
+		 * before, we must track that to know how much space we
+		 * really have.
+		 */
+		if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
+			spin_lock(&sbi->stat_lock);
+			sbi->unusable_block_count++;
+			spin_unlock(&sbi->stat_lock);
+		}
+	}
+
+	if (f2fs_block_unit_discard(sbi) &&
+			f2fs_test_and_clear_bit(offset, se->discard_map))
+		sbi->discard_blks++;
+
+	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
+		se->ckpt_valid_blocks += del;
+
+	return del;
+}
+
+static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
+				block_t blkaddr, unsigned int offset, int del)
 {
-	struct seg_entry *se;
-	unsigned int segno, offset;
-	long int new_vblocks;
 	bool exist;
 #ifdef CONFIG_F2FS_CHECK_FS
 	bool mir_exist;
 #endif
 
+	exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+	mir_exist = f2fs_test_and_set_bit(offset,
+					se->cur_valid_map_mir);
+	if (unlikely(exist != mir_exist)) {
+		f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
+			blkaddr, exist);
+		f2fs_bug_on(sbi, 1);
+	}
+#endif
+	if (unlikely(exist)) {
+		f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
+		f2fs_bug_on(sbi, 1);
+		se->valid_blocks--;
+		del = 0;
+	}
+
+	if (f2fs_block_unit_discard(sbi) &&
+			!f2fs_test_and_set_bit(offset, se->discard_map))
+		sbi->discard_blks--;
+
+	/*
+	 * SSR should never reuse block which is checkpointed
+	 * or newly invalidated.
+	 */
+	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+		if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
+			se->ckpt_valid_blocks++;
+	}
+
+	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
+		se->ckpt_valid_blocks += del;
+
+	return del;
+}
+
+static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+{
+	struct seg_entry *se;
+	unsigned int segno, offset;
+	long int new_vblocks;
+
 	segno = GET_SEGNO(sbi, blkaddr);
 	if (segno == NULL_SEGNO)
 		return;
@@ -2451,73 +2538,10 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
 
 	/* Update valid block bitmap */
 	if (del > 0) {
-		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
-#ifdef CONFIG_F2FS_CHECK_FS
-		mir_exist = f2fs_test_and_set_bit(offset,
-						se->cur_valid_map_mir);
-		if (unlikely(exist != mir_exist)) {
-			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
-				 blkaddr, exist);
-			f2fs_bug_on(sbi, 1);
-		}
-#endif
-		if (unlikely(exist)) {
-			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
-				 blkaddr);
-			f2fs_bug_on(sbi, 1);
-			se->valid_blocks--;
-			del = 0;
-		}
-
-		if (f2fs_block_unit_discard(sbi) &&
-				!f2fs_test_and_set_bit(offset, se->discard_map))
-			sbi->discard_blks--;
-
-		/*
-		 * SSR should never reuse block which is checkpointed
-		 * or newly invalidated.
-		 */
-		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
-			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
-				se->ckpt_valid_blocks++;
-		}
+		del = update_sit_entry_for_alloc(sbi, se, blkaddr, offset, del);
 	} else {
-		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
-#ifdef CONFIG_F2FS_CHECK_FS
-		mir_exist = f2fs_test_and_clear_bit(offset,
-						se->cur_valid_map_mir);
-		if (unlikely(exist != mir_exist)) {
-			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
-				 blkaddr, exist);
-			f2fs_bug_on(sbi, 1);
-		}
-#endif
-		if (unlikely(!exist)) {
-			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
-				 blkaddr);
-			f2fs_bug_on(sbi, 1);
-			se->valid_blocks++;
-			del = 0;
-		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
-			/*
-			 * If checkpoints are off, we must not reuse data that
-			 * was used in the previous checkpoint. If it was used
-			 * before, we must track that to know how much space we
-			 * really have.
-			 */
-			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
-				spin_lock(&sbi->stat_lock);
-				sbi->unusable_block_count++;
-				spin_unlock(&sbi->stat_lock);
-			}
-		}
-
-		if (f2fs_block_unit_discard(sbi) &&
-			f2fs_test_and_clear_bit(offset, se->discard_map))
-			sbi->discard_blks++;
+		del = update_sit_entry_for_release(sbi, se, blkaddr, offset, del);
 	}
-	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
-		se->ckpt_valid_blocks += del;
 
 	__mark_sit_entry_dirty(sbi, segno);
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v4 2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks.
  2024-12-23  8:10 [PATCH v4 0/4] Speed up f2fs truncate Yi Sun
  2024-12-23  8:10 ` [PATCH v4 1/4] f2fs: introduce update_sit_entry_for_release/alloc() Yi Sun
@ 2024-12-23  8:10 ` Yi Sun
  2024-12-26 14:16   ` Chao Yu
  2024-12-23  8:10 ` [PATCH v4 3/4] f2fs: add parameter @len to f2fs_invalidate_blocks() Yi Sun
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 12+ messages in thread
From: Yi Sun @ 2024-12-23  8:10 UTC (permalink / raw)
  To: chao, jaegeuk
  Cc: sunyibuaa, yi.sun, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

This function can process some consecutive blocks at a time.

When using update_sit_entry() to release consecutive blocks,
ensure that the consecutive blocks belong to the same segment.
Because after update_sit_entry_for_realese(), @segno is still
in use in update_sit_entry().

Signed-off-by: Yi Sun <yi.sun@unisoc.com>
---
 fs/f2fs/segment.c | 75 ++++++++++++++++++++++++++++-------------------
 1 file changed, 45 insertions(+), 30 deletions(-)

diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index bc761d9b889a..67d2859831e4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2426,6 +2426,10 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
 		SIT_I(sbi)->max_mtime = ctime;
 }
 
+/*
+ * NOTE: when updating multiple blocks at the same time, please ensure
+ * that the consecutive input blocks belong to the same segment.
+ */
 static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
 				block_t blkaddr, unsigned int offset, int del)
 {
@@ -2433,42 +2437,48 @@ static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_ent
 #ifdef CONFIG_F2FS_CHECK_FS
 	bool mir_exist;
 #endif
+	int i;
+	int del_count = -del;
+
+	f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
 
-	exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
+	for (i = 0; i < del_count; i++) {
+		exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
 #ifdef CONFIG_F2FS_CHECK_FS
-	mir_exist = f2fs_test_and_clear_bit(offset,
-					se->cur_valid_map_mir);
-	if (unlikely(exist != mir_exist)) {
-		f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
-			blkaddr, exist);
-		f2fs_bug_on(sbi, 1);
-	}
+		mir_exist = f2fs_test_and_clear_bit(offset + i,
+						se->cur_valid_map_mir);
+		if (unlikely(exist != mir_exist)) {
+			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
+				blkaddr + i, exist);
+			f2fs_bug_on(sbi, 1);
+		}
 #endif
-	if (unlikely(!exist)) {
-		f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr);
-		f2fs_bug_on(sbi, 1);
-		se->valid_blocks++;
-		del = 0;
-	} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
-		/*
-		 * If checkpoints are off, we must not reuse data that
-		 * was used in the previous checkpoint. If it was used
-		 * before, we must track that to know how much space we
-		 * really have.
-		 */
-		if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
-			spin_lock(&sbi->stat_lock);
-			sbi->unusable_block_count++;
-			spin_unlock(&sbi->stat_lock);
+		if (unlikely(!exist)) {
+			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
+			f2fs_bug_on(sbi, 1);
+			se->valid_blocks++;
+			del += 1;
+		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+			/*
+			 * If checkpoints are off, we must not reuse data that
+			 * was used in the previous checkpoint. If it was used
+			 * before, we must track that to know how much space we
+			 * really have.
+			 */
+			if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
+				spin_lock(&sbi->stat_lock);
+				sbi->unusable_block_count++;
+				spin_unlock(&sbi->stat_lock);
+			}
 		}
-	}
 
-	if (f2fs_block_unit_discard(sbi) &&
-			f2fs_test_and_clear_bit(offset, se->discard_map))
-		sbi->discard_blks++;
+		if (f2fs_block_unit_discard(sbi) &&
+				f2fs_test_and_clear_bit(offset + i, se->discard_map))
+			sbi->discard_blks++;
 
-	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
-		se->ckpt_valid_blocks += del;
+		if (!f2fs_test_bit(offset + i, se->ckpt_valid_map))
+			se->ckpt_valid_blocks -= 1;
+	}
 
 	return del;
 }
@@ -2517,6 +2527,11 @@ static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry
 	return del;
 }
 
+/*
+ * If releasing blocks, this function supports updating multiple consecutive blocks
+ * at one time, but please note that these consecutive blocks need to belong to the
+ * same segment.
+ */
 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
 {
 	struct seg_entry *se;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v4 3/4] f2fs: add parameter @len to f2fs_invalidate_blocks()
  2024-12-23  8:10 [PATCH v4 0/4] Speed up f2fs truncate Yi Sun
  2024-12-23  8:10 ` [PATCH v4 1/4] f2fs: introduce update_sit_entry_for_release/alloc() Yi Sun
  2024-12-23  8:10 ` [PATCH v4 2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks Yi Sun
@ 2024-12-23  8:10 ` Yi Sun
  2025-01-13 13:49   ` Chao Yu
  2024-12-23  8:10 ` [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range() Yi Sun
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 12+ messages in thread
From: Yi Sun @ 2024-12-23  8:10 UTC (permalink / raw)
  To: chao, jaegeuk
  Cc: sunyibuaa, yi.sun, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

New function can process some consecutive blocks at a time.

Function f2fs_invalidate_blocks()->down_write() and up_write()
are very time-consuming, so if f2fs_invalidate_blocks() can
process consecutive blocks at one time, it will save a lot of time.

Signed-off-by: Yi Sun <yi.sun@unisoc.com>
---
 fs/f2fs/compress.c |  4 ++--
 fs/f2fs/f2fs.h     |  3 ++-
 fs/f2fs/file.c     |  8 ++++----
 fs/f2fs/node.c     |  4 ++--
 fs/f2fs/segment.c  | 32 +++++++++++++++++++++++++-------
 5 files changed, 35 insertions(+), 16 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index be3e6f4d33a2..c5e42eec8ac9 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1380,7 +1380,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 			if (blkaddr == COMPRESS_ADDR)
 				fio.compr_blocks++;
 			if (__is_valid_data_blkaddr(blkaddr))
-				f2fs_invalidate_blocks(sbi, blkaddr);
+				f2fs_invalidate_blocks(sbi, blkaddr, 1);
 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
 			goto unlock_continue;
 		}
@@ -1390,7 +1390,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
 
 		if (i > cc->valid_nr_cpages) {
 			if (__is_valid_data_blkaddr(blkaddr)) {
-				f2fs_invalidate_blocks(sbi, blkaddr);
+				f2fs_invalidate_blocks(sbi, blkaddr, 1);
 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
 			}
 			goto unlock_continue;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c537f6b5a413..b70fa0e32fe3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3723,7 +3723,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+						unsigned int len);
 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index aa9679b3d8e4..81764b10840b 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -652,7 +652,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 				valid_blocks++;
 		}
 
-		f2fs_invalidate_blocks(sbi, blkaddr);
+		f2fs_invalidate_blocks(sbi, blkaddr, 1);
 
 		if (!released || blkaddr != COMPRESS_ADDR)
 			nr_free++;
@@ -750,7 +750,7 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
 		unsigned int i;
 
 		for (i = 0; i < ei.len; i++)
-			f2fs_invalidate_blocks(sbi, ei.blk + i);
+			f2fs_invalidate_blocks(sbi, ei.blk + i, 1);
 
 		dec_valid_block_count(sbi, inode, ei.len);
 		f2fs_update_time(sbi, REQ_TIME);
@@ -1323,7 +1323,7 @@ static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
 		if (ret) {
 			dec_valid_block_count(sbi, inode, 1);
-			f2fs_invalidate_blocks(sbi, *blkaddr);
+			f2fs_invalidate_blocks(sbi, *blkaddr, 1);
 		} else {
 			f2fs_update_data_blkaddr(&dn, *blkaddr);
 		}
@@ -1575,7 +1575,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
 			break;
 		}
 
-		f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+		f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1);
 		f2fs_set_data_blkaddr(dn, NEW_ADDR);
 	}
 
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index c04ee1a7ce57..b09a6d0ee791 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -917,7 +917,7 @@ static int truncate_node(struct dnode_of_data *dn)
 	}
 
 	/* Deallocate node address */
-	f2fs_invalidate_blocks(sbi, ni.blk_addr);
+	f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
 	set_node_addr(sbi, &ni, NULL_ADDR, false);
 
@@ -2764,7 +2764,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
 	if (err)
 		return err;
 
-	f2fs_invalidate_blocks(sbi, ni.blk_addr);
+	f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
 	dec_valid_node_count(sbi, inode, false);
 	set_node_addr(sbi, &ni, NULL_ADDR, false);
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 67d2859831e4..813254dcc00e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -245,7 +245,7 @@ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
 		if (!__is_valid_data_blkaddr(new_addr)) {
 			if (new_addr == NULL_ADDR)
 				dec_valid_block_count(sbi, inode, 1);
-			f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
+			f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
 			f2fs_update_data_blkaddr(&dn, new_addr);
 		} else {
 			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
@@ -2567,25 +2567,43 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
 		get_sec_entry(sbi, segno)->valid_blocks += del;
 }
 
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+				unsigned int len)
 {
 	unsigned int segno = GET_SEGNO(sbi, addr);
 	struct sit_info *sit_i = SIT_I(sbi);
+	block_t addr_start = addr, addr_end = addr + len - 1;
+	unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
+	unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
 
 	f2fs_bug_on(sbi, addr == NULL_ADDR);
 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
 		return;
 
-	f2fs_invalidate_internal_cache(sbi, addr, 1);
+	f2fs_invalidate_internal_cache(sbi, addr, len);
 
 	/* add it into sit main buffer */
 	down_write(&sit_i->sentry_lock);
 
-	update_segment_mtime(sbi, addr, 0);
-	update_sit_entry(sbi, addr, -1);
+	if (seg_num == 1)
+		cnt = len;
+	else
+		cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
 
-	/* add it into dirty seglist */
-	locate_dirty_segment(sbi, segno);
+	do {
+		update_segment_mtime(sbi, addr_start, 0);
+		update_sit_entry(sbi, addr_start, -cnt);
+
+		/* add it into dirty seglist */
+		locate_dirty_segment(sbi, segno);
+
+		/* update @addr_start and @cnt and @segno */
+		addr_start = START_BLOCK(sbi, ++segno);
+		if (++i == seg_num)
+			cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
+		else
+			cnt = max_blocks;
+	} while (i <= seg_num);
 
 	up_write(&sit_i->sentry_lock);
 }
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
  2024-12-23  8:10 [PATCH v4 0/4] Speed up f2fs truncate Yi Sun
                   ` (2 preceding siblings ...)
  2024-12-23  8:10 ` [PATCH v4 3/4] f2fs: add parameter @len to f2fs_invalidate_blocks() Yi Sun
@ 2024-12-23  8:10 ` Yi Sun
  2025-01-14  4:28   ` Chao Yu
  2025-01-08 18:40 ` [f2fs-dev] [PATCH v4 0/4] Speed up f2fs truncate patchwork-bot+f2fs
  2025-01-13 18:51 ` patchwork-bot+f2fs
  5 siblings, 1 reply; 12+ messages in thread
From: Yi Sun @ 2024-12-23  8:10 UTC (permalink / raw)
  To: chao, jaegeuk
  Cc: sunyibuaa, yi.sun, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

Function f2fs_invalidate_blocks() can process continuous
blocks at a time, so f2fs_truncate_data_blocks_range() is
optimized to use the new functionality of
f2fs_invalidate_blocks().

Signed-off-by: Yi Sun <yi.sun@unisoc.com>
---
 fs/f2fs/file.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 68 insertions(+), 4 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 81764b10840b..9980d17ef9f5 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -612,6 +612,15 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
 	return finish_preallocate_blocks(inode);
 }
 
+static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
+					block_t curr, block_t end)
+{
+	if (curr - end == 1 || curr == end)
+		return true;
+	else
+		return false;
+}
+
 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
@@ -621,8 +630,27 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 	int cluster_index = 0, valid_blocks = 0;
 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+	/*
+	 * Temporary record location.
+	 * When the current  @blkaddr and @blkaddr_end can be processed
+	 * together, update the value of @blkaddr_end.
+	 * When it is detected that current @blkaddr is not continues with
+	 * @blkaddr_end, it is necessary to process continues blocks
+	 * range [blkaddr_start, blkaddr_end].
+	 */
+	block_t blkaddr_start, blkaddr_end;
+	/*.
+	 * To avoid processing various invalid data blocks.
+	 * Because @blkaddr_start and @blkaddr_end may be assigned
+	 * NULL_ADDR or invalid data blocks, @last_valid is used to
+	 * record this situation.
+	 */
+	bool last_valid = false;
+	/* Process the last @blkaddr separately? */
+	bool last_one = true;
 
 	addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+	blkaddr_start = blkaddr_end = le32_to_cpu(*addr);
 
 	/* Assumption: truncation starts with cluster */
 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
@@ -638,24 +666,60 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 		}
 
 		if (blkaddr == NULL_ADDR)
-			continue;
+			goto next;
 
 		f2fs_set_data_blkaddr(dn, NULL_ADDR);
 
 		if (__is_valid_data_blkaddr(blkaddr)) {
 			if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
-				continue;
+				goto next;
 			if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
 						DATA_GENERIC_ENHANCE))
-				continue;
+				goto next;
 			if (compressed_cluster)
 				valid_blocks++;
 		}
 
-		f2fs_invalidate_blocks(sbi, blkaddr, 1);
+
+		if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
+			/*
+			 * The current block @blkaddr is continuous with
+			 * @blkaddr_end, so @blkaddr_end is updated.
+			 * And the f2fs_invalidate_blocks() is skipped
+			 * until @blkaddr that cannot be processed
+			 * together is encountered.
+			 */
+			blkaddr_end = blkaddr;
+			if (count == 1)
+				last_one = false;
+			else
+				goto skip_invalid;
+		}
+
+		f2fs_invalidate_blocks(sbi, blkaddr_start,
+					blkaddr_end - blkaddr_start + 1);
+		blkaddr_start = blkaddr_end = blkaddr;
+
+		if (count == 1 && last_one)
+			f2fs_invalidate_blocks(sbi, blkaddr, 1);
+
+skip_invalid:
+		last_valid = true;
 
 		if (!released || blkaddr != COMPRESS_ADDR)
 			nr_free++;
+
+		continue;
+
+next:
+		/* If consecutive blocks have been recorded, we need to process them. */
+		if (last_valid == true)
+			f2fs_invalidate_blocks(sbi, blkaddr_start,
+					blkaddr_end - blkaddr_start + 1);
+
+		blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
+		last_valid = false;
+
 	}
 
 	if (compressed_cluster)
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 1/4] f2fs: introduce update_sit_entry_for_release/alloc()
  2024-12-23  8:10 ` [PATCH v4 1/4] f2fs: introduce update_sit_entry_for_release/alloc() Yi Sun
@ 2024-12-26 14:00   ` Chao Yu
  0 siblings, 0 replies; 12+ messages in thread
From: Chao Yu @ 2024-12-26 14:00 UTC (permalink / raw)
  To: Yi Sun, jaegeuk
  Cc: Chao Yu, sunyibuaa, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

On 2024/12/23 16:10, Yi Sun wrote:
> No logical changes, just for cleanliness.
> 
> Signed-off-by: Yi Sun <yi.sun@unisoc.com>

Reviewed-by: Chao Yu <chao@kernel.org>

Thanks,

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks.
  2024-12-23  8:10 ` [PATCH v4 2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks Yi Sun
@ 2024-12-26 14:16   ` Chao Yu
  0 siblings, 0 replies; 12+ messages in thread
From: Chao Yu @ 2024-12-26 14:16 UTC (permalink / raw)
  To: Yi Sun, jaegeuk
  Cc: Chao Yu, sunyibuaa, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

On 2024/12/23 16:10, Yi Sun wrote:
> This function can process some consecutive blocks at a time.
> 
> When using update_sit_entry() to release consecutive blocks,
> ensure that the consecutive blocks belong to the same segment.
> Because after update_sit_entry_for_realese(), @segno is still
> in use in update_sit_entry().
> 
> Signed-off-by: Yi Sun <yi.sun@unisoc.com>

Reviewed-by: Chao Yu <chao@kernel.org>

Thanks,

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [f2fs-dev] [PATCH v4 0/4] Speed up f2fs truncate
  2024-12-23  8:10 [PATCH v4 0/4] Speed up f2fs truncate Yi Sun
                   ` (3 preceding siblings ...)
  2024-12-23  8:10 ` [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range() Yi Sun
@ 2025-01-08 18:40 ` patchwork-bot+f2fs
  2025-01-13 18:51 ` patchwork-bot+f2fs
  5 siblings, 0 replies; 12+ messages in thread
From: patchwork-bot+f2fs @ 2025-01-08 18:40 UTC (permalink / raw)
  To: Yi Sun
  Cc: chao, jaegeuk, ke.wang, linux-kernel, linux-f2fs-devel, sunyibuaa,
	Hao_hao.Wang

Hello:

This series was applied to jaegeuk/f2fs.git (dev)
by Jaegeuk Kim <jaegeuk@kernel.org>:

On Mon, 23 Dec 2024 16:10:40 +0800 you wrote:
> Deleting large files is time-consuming, and a large part
> of the time is spent in f2fs_invalidate_blocks()
> ->down_write(sit_info->sentry_lock) and up_write().
> 
> If some blocks are continuous, we can process these blocks
> at the same time. This can reduce the number of calls to
> the down_write() and the up_write(), thereby improving the
> overall speed of doing truncate.
> 
> [...]

Here is the summary with links:
  - [f2fs-dev,v4,1/4] f2fs: introduce update_sit_entry_for_release/alloc()
    https://git.kernel.org/jaegeuk/f2fs/c/66baee2b886d
  - [f2fs-dev,v4,2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks.
    https://git.kernel.org/jaegeuk/f2fs/c/81ffbd224e5f
  - [f2fs-dev,v4,3/4] f2fs: add parameter @len to f2fs_invalidate_blocks()
    (no matching commit)
  - [f2fs-dev,v4,4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
    (no matching commit)

You are awesome, thank you!
-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 3/4] f2fs: add parameter @len to f2fs_invalidate_blocks()
  2024-12-23  8:10 ` [PATCH v4 3/4] f2fs: add parameter @len to f2fs_invalidate_blocks() Yi Sun
@ 2025-01-13 13:49   ` Chao Yu
  0 siblings, 0 replies; 12+ messages in thread
From: Chao Yu @ 2025-01-13 13:49 UTC (permalink / raw)
  To: Yi Sun, jaegeuk
  Cc: chao, sunyibuaa, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

On 2024/12/23 16:10, Yi Sun wrote:
> New function can process some consecutive blocks at a time.
> 
> Function f2fs_invalidate_blocks()->down_write() and up_write()
> are very time-consuming, so if f2fs_invalidate_blocks() can
> process consecutive blocks at one time, it will save a lot of time.
> 
> Signed-off-by: Yi Sun <yi.sun@unisoc.com>

Reviewed-by: Chao Yu <chao@kernel.org>

Thanks,

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [f2fs-dev] [PATCH v4 0/4] Speed up f2fs truncate
  2024-12-23  8:10 [PATCH v4 0/4] Speed up f2fs truncate Yi Sun
                   ` (4 preceding siblings ...)
  2025-01-08 18:40 ` [f2fs-dev] [PATCH v4 0/4] Speed up f2fs truncate patchwork-bot+f2fs
@ 2025-01-13 18:51 ` patchwork-bot+f2fs
  5 siblings, 0 replies; 12+ messages in thread
From: patchwork-bot+f2fs @ 2025-01-13 18:51 UTC (permalink / raw)
  To: Yi Sun
  Cc: chao, jaegeuk, ke.wang, linux-kernel, linux-f2fs-devel, sunyibuaa,
	Hao_hao.Wang

Hello:

This series was applied to jaegeuk/f2fs.git (dev)
by Jaegeuk Kim <jaegeuk@kernel.org>:

On Mon, 23 Dec 2024 16:10:40 +0800 you wrote:
> Deleting large files is time-consuming, and a large part
> of the time is spent in f2fs_invalidate_blocks()
> ->down_write(sit_info->sentry_lock) and up_write().
> 
> If some blocks are continuous, we can process these blocks
> at the same time. This can reduce the number of calls to
> the down_write() and the up_write(), thereby improving the
> overall speed of doing truncate.
> 
> [...]

Here is the summary with links:
  - [f2fs-dev,v4,1/4] f2fs: introduce update_sit_entry_for_release/alloc()
    (no matching commit)
  - [f2fs-dev,v4,2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks.
    (no matching commit)
  - [f2fs-dev,v4,3/4] f2fs: add parameter @len to f2fs_invalidate_blocks()
    https://git.kernel.org/jaegeuk/f2fs/c/e53c568f4603
  - [f2fs-dev,v4,4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
    (no matching commit)

You are awesome, thank you!
-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
  2024-12-23  8:10 ` [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range() Yi Sun
@ 2025-01-14  4:28   ` Chao Yu
  2025-01-15  5:09     ` 答复: " 孙毅 (Yi Sun)
  0 siblings, 1 reply; 12+ messages in thread
From: Chao Yu @ 2025-01-14  4:28 UTC (permalink / raw)
  To: Yi Sun, jaegeuk
  Cc: chao, sunyibuaa, linux-f2fs-devel, linux-kernel, niuzhiguo84,
	Hao_hao.Wang, ke.wang

On 12/23/24 16:10, Yi Sun wrote:
> Function f2fs_invalidate_blocks() can process continuous
> blocks at a time, so f2fs_truncate_data_blocks_range() is
> optimized to use the new functionality of
> f2fs_invalidate_blocks().
> 
> Signed-off-by: Yi Sun <yi.sun@unisoc.com>
> ---
>   fs/f2fs/file.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++---
>   1 file changed, 68 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index 81764b10840b..9980d17ef9f5 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -612,6 +612,15 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
>   	return finish_preallocate_blocks(inode);
>   }
>   
> +static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
> +					block_t curr, block_t end)

static inline bool is_consecutive_blkaddrs(block_t cur, block_t end)
{
	return cur == end || cur == end + 1;
}

But maybe we don't need to add this function, see below comments.

> +{
> +	if (curr - end == 1 || curr == end)
> +		return true;
> +	else
> +		return false;
> +}
> +
>   void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
>   {
>   	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
> @@ -621,8 +630,27 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
>   	int cluster_index = 0, valid_blocks = 0;
>   	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
>   	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
> +	/*
> +	 * Temporary record location.
> +	 * When the current  @blkaddr and @blkaddr_end can be processed
> +	 * together, update the value of @blkaddr_end.
> +	 * When it is detected that current @blkaddr is not continues with
> +	 * @blkaddr_end, it is necessary to process continues blocks

I prefer not adding these comments into function, what about describing the 
details in commit message, instead, thoughts?

> +	 * range [blkaddr_start, blkaddr_end].
> +	 */
> +	block_t blkaddr_start, blkaddr_end;
> +	/*.
> +	 * To avoid processing various invalid data blocks.
> +	 * Because @blkaddr_start and @blkaddr_end may be assigned
> +	 * NULL_ADDR or invalid data blocks, @last_valid is used to
> +	 * record this situation.
> +	 */

Ditto,

What about using blkstart & blklen to record last consecutive block addresses, 
and using blklen to identify whether we need to call f2fs_invalidate_blocks() or
not?

> +	bool last_valid = false;
> +	/* Process the last @blkaddr separately? */
> +	bool last_one = true;
>   
>   	addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
> +	blkaddr_start = blkaddr_end = le32_to_cpu(*addr);
>   
>   	/* Assumption: truncation starts with cluster */
>   	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
> @@ -638,24 +666,60 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
>   		}
>   
>   		if (blkaddr == NULL_ADDR)
> -			continue;
> +			goto next;
>   
>   		f2fs_set_data_blkaddr(dn, NULL_ADDR);
>   
>   		if (__is_valid_data_blkaddr(blkaddr)) {
>   			if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
> -				continue;
> +				goto next;
>   			if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
>   						DATA_GENERIC_ENHANCE))
> -				continue;
> +				goto next;
>   			if (compressed_cluster)
>   				valid_blocks++;
>   		}
>   
> -		f2fs_invalidate_blocks(sbi, blkaddr, 1);

How about this? Can you change based on it?

		if (blkstart + blklen == blkaddr) {
			blklen++;
		} else {
			f2fs_invalidate_blocks(sbi, blkstart, blklen);
			blkstart = blkaddr;
			blklen = 1;
		}

> +
> +		if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
> +			/*
> +			 * The current block @blkaddr is continuous with
> +			 * @blkaddr_end, so @blkaddr_end is updated.
> +			 * And the f2fs_invalidate_blocks() is skipped
> +			 * until @blkaddr that cannot be processed
> +			 * together is encountered.
> +			 */
> +			blkaddr_end = blkaddr;
> +			if (count == 1)
> +				last_one = false;
> +			else
> +				goto skip_invalid;
> +		}
> +
> +		f2fs_invalidate_blocks(sbi, blkaddr_start,
> +					blkaddr_end - blkaddr_start + 1);
> +		blkaddr_start = blkaddr_end = blkaddr;
> +
> +		if (count == 1 && last_one)
> +			f2fs_invalidate_blocks(sbi, blkaddr, 1);
> +
> +skip_invalid:
> +		last_valid = true;
>   
>   		if (!released || blkaddr != COMPRESS_ADDR)
>   			nr_free++;
> +
> +		continue;
> +
> +next:

next:

		if (blklen) {
			f2fs_invalidate_blocks(sbi, blkstart, blklen);
			blkstart = blkaddr;
			blklen = 1;
		}

> +		/* If consecutive blocks have been recorded, we need to process them. */
> +		if (last_valid == true)
> +			f2fs_invalidate_blocks(sbi, blkaddr_start,
> +					blkaddr_end - blkaddr_start + 1);
> +
> +		blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
> +		last_valid = false;
> +
>   	}

	if (blklen)
		f2fs_invalidate_blocks(sbi, blkstart, blklen);

Thanks,

>   
>   	if (compressed_cluster)


^ permalink raw reply	[flat|nested] 12+ messages in thread

* 答复: [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
  2025-01-14  4:28   ` Chao Yu
@ 2025-01-15  5:09     ` 孙毅 (Yi Sun)
  0 siblings, 0 replies; 12+ messages in thread
From: 孙毅 (Yi Sun) @ 2025-01-15  5:09 UTC (permalink / raw)
  To: Chao Yu, jaegeuk@kernel.org
  Cc: sunyibuaa@gmail.com, linux-f2fs-devel@lists.sourceforge.net,
	linux-kernel@vger.kernel.org, niuzhiguo84@gmail.com,
	王皓 (Hao_hao Wang), 王科 (Ke Wang),
	孙毅 (Yi Sun)



> -----邮件原件-----
> 发件人: Chao Yu <chao@kernel.org>
> 发送时间: 2025年1月14日 12:29
> 收件人: 孙毅 (Yi Sun) <Yi.Sun@unisoc.com>; jaegeuk@kernel.org
> 抄送: chao@kernel.org; sunyibuaa@gmail.com; linux-f2fs-
> devel@lists.sourceforge.net; linux-kernel@vger.kernel.org;
> niuzhiguo84@gmail.com; 王皓 (Hao_hao Wang) <Hao_hao.Wang@unisoc.com>;
> 王科 (Ke Wang) <Ke.Wang@unisoc.com>
> 主题: Re: [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
> 
> On 12/23/24 16:10, Yi Sun wrote:
> > Function f2fs_invalidate_blocks() can process continuous
> > blocks at a time, so f2fs_truncate_data_blocks_range() is
> > optimized to use the new functionality of
> > f2fs_invalidate_blocks().
> >
> > Signed-off-by: Yi Sun <yi.sun@unisoc.com>
> > ---
> >   fs/f2fs/file.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++---
> >   1 file changed, 68 insertions(+), 4 deletions(-)
> >
> > diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> > index 81764b10840b..9980d17ef9f5 100644
> > --- a/fs/f2fs/file.c
> > +++ b/fs/f2fs/file.c
> > @@ -612,6 +612,15 @@ static int f2fs_file_open(struct inode *inode, struct file
> *filp)
> >       return finish_preallocate_blocks(inode);
> >   }
> >
> > +static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
> > +                                     block_t curr, block_t end)
> 
> static inline bool is_consecutive_blkaddrs(block_t cur, block_t end)
> {
>         return cur == end || cur == end + 1;
> }
> 
> But maybe we don't need to add this function, see below comments.
> 
> > +{
> > +     if (curr - end == 1 || curr == end)
> > +             return true;
> > +     else
> > +             return false;
> > +}
> > +
> >   void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
> >   {
> >       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
> > @@ -621,8 +630,27 @@ void f2fs_truncate_data_blocks_range(struct
> dnode_of_data *dn, int count)
> >       int cluster_index = 0, valid_blocks = 0;
> >       int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
> >       bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
> > +     /*
> > +      * Temporary record location.
> > +      * When the current  @blkaddr and @blkaddr_end can be processed
> > +      * together, update the value of @blkaddr_end.
> > +      * When it is detected that current @blkaddr is not continues with
> > +      * @blkaddr_end, it is necessary to process continues blocks
> 
> I prefer not adding these comments into function, what about describing the
> details in commit message, instead, thoughts?
> 
> > +      * range [blkaddr_start, blkaddr_end].
> > +      */
> > +     block_t blkaddr_start, blkaddr_end;
> > +     /*.
> > +      * To avoid processing various invalid data blocks.
> > +      * Because @blkaddr_start and @blkaddr_end may be assigned
> > +      * NULL_ADDR or invalid data blocks, @last_valid is used to
> > +      * record this situation.
> > +      */
> 
> Ditto,
> 
> What about using blkstart & blklen to record last consecutive block addresses,
> and using blklen to identify whether we need to call f2fs_invalidate_blocks() or
> not?
> 
> > +     bool last_valid = false;
> > +     /* Process the last @blkaddr separately? */
> > +     bool last_one = true;
> >
> >       addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
> > +     blkaddr_start = blkaddr_end = le32_to_cpu(*addr);
> >
> >       /* Assumption: truncation starts with cluster */
> >       for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
> > @@ -638,24 +666,60 @@ void f2fs_truncate_data_blocks_range(struct
> dnode_of_data *dn, int count)
> >               }
> >
> >               if (blkaddr == NULL_ADDR)
> > -                     continue;
> > +                     goto next;
> >
> >               f2fs_set_data_blkaddr(dn, NULL_ADDR);
> >
> >               if (__is_valid_data_blkaddr(blkaddr)) {
> >                       if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
> > -                             continue;
> > +                             goto next;
> >                       if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
> >                                               DATA_GENERIC_ENHANCE))
> > -                             continue;
> > +                             goto next;
> >                       if (compressed_cluster)
> >                               valid_blocks++;
> >               }
> >
> > -             f2fs_invalidate_blocks(sbi, blkaddr, 1);
> 
> How about this? Can you change based on it?
> 
>                 if (blkstart + blklen == blkaddr) {
>                         blklen++;
>                 } else {
>                         f2fs_invalidate_blocks(sbi, blkstart, blklen);
>                         blkstart = blkaddr;
>                         blklen = 1;
>                 }
> 
> > +
> > +             if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
> > +                     /*
> > +                      * The current block @blkaddr is continuous with
> > +                      * @blkaddr_end, so @blkaddr_end is updated.
> > +                      * And the f2fs_invalidate_blocks() is skipped
> > +                      * until @blkaddr that cannot be processed
> > +                      * together is encountered.
> > +                      */
> > +                     blkaddr_end = blkaddr;
> > +                     if (count == 1)
> > +                             last_one = false;
> > +                     else
> > +                             goto skip_invalid;
> > +             }
> > +
> > +             f2fs_invalidate_blocks(sbi, blkaddr_start,
> > +                                     blkaddr_end - blkaddr_start + 1);
> > +             blkaddr_start = blkaddr_end = blkaddr;
> > +
> > +             if (count == 1 && last_one)
> > +                     f2fs_invalidate_blocks(sbi, blkaddr, 1);
> > +
> > +skip_invalid:
> > +             last_valid = true;
> >
> >               if (!released || blkaddr != COMPRESS_ADDR)
> >                       nr_free++;
> > +
> > +             continue;
> > +
> > +next:
> 
> next:
> 
>                 if (blklen) {
>                         f2fs_invalidate_blocks(sbi, blkstart, blklen);
>                         blkstart = blkaddr;
>                         blklen = 1;
>                 }
> 
> > +             /* If consecutive blocks have been recorded, we need to process them.
> */
> > +             if (last_valid == true)
> > +                     f2fs_invalidate_blocks(sbi, blkaddr_start,
> > +                                     blkaddr_end - blkaddr_start + 1);
> > +
> > +             blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
> > +             last_valid = false;
> > +
> >       }
> 
>         if (blklen)
>                 f2fs_invalidate_blocks(sbi, blkstart, blklen);
> 
> Thanks,
> 

Hi Chao,
Your suggestion looks great, I will modify it accordingly.

> >
> >       if (compressed_cluster)


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2025-01-15  5:11 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-12-23  8:10 [PATCH v4 0/4] Speed up f2fs truncate Yi Sun
2024-12-23  8:10 ` [PATCH v4 1/4] f2fs: introduce update_sit_entry_for_release/alloc() Yi Sun
2024-12-26 14:00   ` Chao Yu
2024-12-23  8:10 ` [PATCH v4 2/4] f2fs: update_sit_entry_for_release() supports consecutive blocks Yi Sun
2024-12-26 14:16   ` Chao Yu
2024-12-23  8:10 ` [PATCH v4 3/4] f2fs: add parameter @len to f2fs_invalidate_blocks() Yi Sun
2025-01-13 13:49   ` Chao Yu
2024-12-23  8:10 ` [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range() Yi Sun
2025-01-14  4:28   ` Chao Yu
2025-01-15  5:09     ` 答复: " 孙毅 (Yi Sun)
2025-01-08 18:40 ` [f2fs-dev] [PATCH v4 0/4] Speed up f2fs truncate patchwork-bot+f2fs
2025-01-13 18:51 ` patchwork-bot+f2fs

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).