* [PATCH 01/12] nilfs2: convert segment buffer to be folio-based
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 02/12] nilfs2: convert common metadata file code " Ryusuke Konishi
` (10 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
In the segment buffer (log buffer) implementation, two parts of the
block buffer, CRC calculation and bio preparation, are still
page-based, so convert them to folio-based.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/segbuf.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc431b4c34c9..e08cab03366b 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -205,7 +205,6 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
{
struct buffer_head *bh;
struct nilfs_segment_summary *raw_sum;
- void *kaddr;
u32 crc;
bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
@@ -220,9 +219,13 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
crc = crc32_le(crc, bh->b_data, bh->b_size);
}
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
- kaddr = kmap_local_page(bh->b_page);
- crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
- kunmap_local(kaddr);
+ size_t offset = offset_in_folio(bh->b_folio, bh->b_data);
+ unsigned char *from;
+
+ /* Do not support block sizes larger than PAGE_SIZE */
+ from = kmap_local_folio(bh->b_folio, offset);
+ crc = crc32_le(crc, from, bh->b_size);
+ kunmap_local(from);
}
raw_sum->ss_datasum = cpu_to_le32(crc);
}
@@ -374,7 +377,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi,
struct buffer_head *bh)
{
- int len, err;
+ int err;
BUG_ON(wi->nr_vecs <= 0);
repeat:
@@ -385,8 +388,8 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
(wi->nilfs->ns_blocksize_bits - 9);
}
- len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
- if (len == bh->b_size) {
+ if (bio_add_folio(wi->bio, bh->b_folio, bh->b_size,
+ offset_in_folio(bh->b_folio, bh->b_data))) {
wi->end++;
return 0;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 02/12] nilfs2: convert common metadata file code to be folio-based
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 01/12] nilfs2: convert segment buffer to be folio-based Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 03/12] nilfs2: convert segment usage file " Ryusuke Konishi
` (9 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
In the common routines for metadata files,
nilfs_mdt_insert_new_block(), which inserts a new block buffer into
the cache, is still page-based, and there are two places where
bh_offset() is used. Convert these to page-based.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/alloc.c | 8 +++++---
fs/nilfs2/cpfile.c | 4 ++--
fs/nilfs2/mdt.c | 21 +++++++++++++--------
3 files changed, 20 insertions(+), 13 deletions(-)
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index ba50388ee4bf..d30dfed707b6 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -177,12 +177,14 @@ nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
* nilfs_palloc_desc_block_init - initialize buffer of a group descriptor block
* @inode: inode of metadata file
* @bh: buffer head of the buffer to be initialized
- * @kaddr: kernel address mapped for the page including the buffer
+ * @from: kernel address mapped for a chunk of the block
+ *
+ * This function does not yet support the case where block size > PAGE_SIZE.
*/
static void nilfs_palloc_desc_block_init(struct inode *inode,
- struct buffer_head *bh, void *kaddr)
+ struct buffer_head *bh, void *from)
{
- struct nilfs_palloc_group_desc *desc = kaddr + bh_offset(bh);
+ struct nilfs_palloc_group_desc *desc = from;
unsigned long n = nilfs_palloc_groups_per_desc_block(inode);
__le32 nfrees;
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index f0ce37552446..a8046cbf2753 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -113,9 +113,9 @@ nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
static void nilfs_cpfile_block_init(struct inode *cpfile,
struct buffer_head *bh,
- void *kaddr)
+ void *from)
{
- struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
+ struct nilfs_checkpoint *cp = from;
size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
int n = nilfs_cpfile_checkpoints_per_block(cpfile);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index ceb7dc0b5bad..a4c1e00aaaac 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -33,7 +33,8 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
struct buffer_head *, void *))
{
struct nilfs_inode_info *ii = NILFS_I(inode);
- void *kaddr;
+ struct folio *folio = bh->b_folio;
+ void *from;
int ret;
/* Caller exclude read accesses using page lock */
@@ -47,12 +48,14 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
set_buffer_mapped(bh);
- kaddr = kmap_local_page(bh->b_page);
- memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
+ /* Initialize block (block size > PAGE_SIZE not yet supported) */
+ from = kmap_local_folio(folio, offset_in_folio(folio, bh->b_data));
+ memset(from, 0, bh->b_size);
if (init_block)
- init_block(inode, bh, kaddr);
- flush_dcache_page(bh->b_page);
- kunmap_local(kaddr);
+ init_block(inode, bh, from);
+ kunmap_local(from);
+
+ flush_dcache_folio(folio);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
@@ -571,7 +574,8 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
if (!bh_frozen)
bh_frozen = create_empty_buffers(folio, 1 << blkbits, 0);
- bh_frozen = get_nth_bh(bh_frozen, bh_offset(bh) >> blkbits);
+ bh_frozen = get_nth_bh(bh_frozen,
+ offset_in_folio(folio, bh->b_data) >> blkbits);
if (!buffer_uptodate(bh_frozen))
nilfs_copy_buffer(bh_frozen, bh);
@@ -601,7 +605,8 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
if (!IS_ERR(folio)) {
bh_frozen = folio_buffers(folio);
if (bh_frozen) {
- n = bh_offset(bh) >> inode->i_blkbits;
+ n = offset_in_folio(folio, bh->b_data) >>
+ inode->i_blkbits;
bh_frozen = get_nth_bh(bh_frozen, n);
}
folio_unlock(folio);
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 03/12] nilfs2: convert segment usage file to be folio-based
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 01/12] nilfs2: convert segment buffer to be folio-based Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 02/12] nilfs2: convert common metadata file code " Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 04/12] nilfs2: convert persistent object allocator " Ryusuke Konishi
` (8 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
For the sufile, which is a metadata file that holds information
about managing segments, convert the page-based implementation to a
folio-based implementation.
kmap_local_page() is changed to use kmap_local_folio(), and where
offsets within a page are calculated using bh_offset(), are replaced
with calculations using offset_in_folio() with an additional helper
function nilfs_sufile_segment_usage_offset().
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/sufile.c | 160 +++++++++++++++++++++++----------------------
1 file changed, 82 insertions(+), 78 deletions(-)
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index eea5a6a12f7b..d3ecc813d633 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -70,11 +70,20 @@ nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
max - curr + 1);
}
-static struct nilfs_segment_usage *
-nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
- struct buffer_head *bh, void *kaddr)
+/**
+ * nilfs_sufile_segment_usage_offset - calculate the byte offset of a segment
+ * usage entry in the folio containing it
+ * @sufile: segment usage file inode
+ * @segnum: number of segment usage
+ * @bh: buffer head of block containing segment usage indexed by @segnum
+ *
+ * Return: Byte offset in the folio of the segment usage entry.
+ */
+static size_t nilfs_sufile_segment_usage_offset(const struct inode *sufile,
+ __u64 segnum,
+ struct buffer_head *bh)
{
- return kaddr + bh_offset(bh) +
+ return offset_in_folio(bh->b_folio, bh->b_data) +
nilfs_sufile_get_offset(sufile, segnum) *
NILFS_MDT(sufile)->mi_entry_size;
}
@@ -112,13 +121,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
u64 ncleanadd, u64 ndirtyadd)
{
struct nilfs_sufile_header *header;
- void *kaddr;
- kaddr = kmap_local_page(header_bh->b_page);
- header = kaddr + bh_offset(header_bh);
+ header = kmap_local_folio(header_bh->b_folio, 0);
le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
- kunmap_local(kaddr);
+ kunmap_local(header);
mark_buffer_dirty(header_bh);
}
@@ -313,6 +320,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
size_t susz = NILFS_MDT(sufile)->mi_entry_size;
__u64 segnum, maxsegnum, last_alloc;
+ size_t offset;
void *kaddr;
unsigned long nsegments, nsus, cnt;
int ret, j;
@@ -322,10 +330,9 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
ret = nilfs_sufile_get_header_block(sufile, &header_bh);
if (ret < 0)
goto out_sem;
- kaddr = kmap_local_page(header_bh->b_page);
- header = kaddr + bh_offset(header_bh);
+ header = kmap_local_folio(header_bh->b_folio, 0);
last_alloc = le64_to_cpu(header->sh_last_alloc);
- kunmap_local(kaddr);
+ kunmap_local(header);
nsegments = nilfs_sufile_get_nsegments(sufile);
maxsegnum = sui->allocmax;
@@ -359,9 +366,10 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
&su_bh);
if (ret < 0)
goto out_header;
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(
- sufile, segnum, su_bh, kaddr);
+
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum,
+ su_bh);
+ su = kaddr = kmap_local_folio(su_bh->b_folio, offset);
nsus = nilfs_sufile_segment_usages_in_block(
sufile, segnum, maxsegnum);
@@ -372,12 +380,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
nilfs_segment_usage_set_dirty(su);
kunmap_local(kaddr);
- kaddr = kmap_local_page(header_bh->b_page);
- header = kaddr + bh_offset(header_bh);
+ header = kmap_local_folio(header_bh->b_folio, 0);
le64_add_cpu(&header->sh_ncleansegs, -1);
le64_add_cpu(&header->sh_ndirtysegs, 1);
header->sh_last_alloc = cpu_to_le64(segnum);
- kunmap_local(kaddr);
+ kunmap_local(header);
sui->ncleansegs--;
mark_buffer_dirty(header_bh);
@@ -411,18 +418,18 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
struct buffer_head *su_bh)
{
struct nilfs_segment_usage *su;
- void *kaddr;
+ size_t offset;
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum, su_bh);
+ su = kmap_local_folio(su_bh->b_folio, offset);
if (unlikely(!nilfs_segment_usage_clean(su))) {
nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
__func__, (unsigned long long)segnum);
- kunmap_local(kaddr);
+ kunmap_local(su);
return;
}
nilfs_segment_usage_set_dirty(su);
- kunmap_local(kaddr);
+ kunmap_local(su);
nilfs_sufile_mod_counter(header_bh, -1, 1);
NILFS_SUI(sufile)->ncleansegs--;
@@ -436,14 +443,14 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
struct buffer_head *su_bh)
{
struct nilfs_segment_usage *su;
- void *kaddr;
+ size_t offset;
int clean, dirty;
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum, su_bh);
+ su = kmap_local_folio(su_bh->b_folio, offset);
if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
su->su_nblocks == cpu_to_le32(0)) {
- kunmap_local(kaddr);
+ kunmap_local(su);
return;
}
clean = nilfs_segment_usage_clean(su);
@@ -453,7 +460,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
su->su_lastmod = cpu_to_le64(0);
su->su_nblocks = cpu_to_le32(0);
su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
- kunmap_local(kaddr);
+ kunmap_local(su);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
NILFS_SUI(sufile)->ncleansegs -= clean;
@@ -467,15 +474,15 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
struct buffer_head *su_bh)
{
struct nilfs_segment_usage *su;
- void *kaddr;
+ size_t offset;
int sudirty;
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum, su_bh);
+ su = kmap_local_folio(su_bh->b_folio, offset);
if (nilfs_segment_usage_clean(su)) {
nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
__func__, (unsigned long long)segnum);
- kunmap_local(kaddr);
+ kunmap_local(su);
return;
}
if (unlikely(nilfs_segment_usage_error(su)))
@@ -488,7 +495,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
(unsigned long long)segnum);
nilfs_segment_usage_set_clean(su);
- kunmap_local(kaddr);
+ kunmap_local(su);
mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
@@ -507,7 +514,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
{
struct buffer_head *bh;
- void *kaddr;
+ size_t offset;
struct nilfs_segment_usage *su;
int ret;
@@ -523,12 +530,12 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
goto out_sem;
}
- kaddr = kmap_local_page(bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum, bh);
+ su = kmap_local_folio(bh->b_folio, offset);
if (unlikely(nilfs_segment_usage_error(su))) {
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
- kunmap_local(kaddr);
+ kunmap_local(su);
brelse(bh);
if (nilfs_segment_is_active(nilfs, segnum)) {
nilfs_error(sufile->i_sb,
@@ -546,7 +553,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
ret = -EIO;
} else {
nilfs_segment_usage_set_dirty(su);
- kunmap_local(kaddr);
+ kunmap_local(su);
mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile);
brelse(bh);
@@ -568,7 +575,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
{
struct buffer_head *bh;
struct nilfs_segment_usage *su;
- void *kaddr;
+ size_t offset;
int ret;
down_write(&NILFS_MDT(sufile)->mi_sem);
@@ -576,8 +583,8 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
if (ret < 0)
goto out_sem;
- kaddr = kmap_local_page(bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum, bh);
+ su = kmap_local_folio(bh->b_folio, offset);
if (modtime) {
/*
* Check segusage error and set su_lastmod only when updating
@@ -587,7 +594,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
su->su_lastmod = cpu_to_le64(modtime);
}
su->su_nblocks = cpu_to_le32(nblocks);
- kunmap_local(kaddr);
+ kunmap_local(su);
mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile);
@@ -619,7 +626,6 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
struct buffer_head *header_bh;
struct nilfs_sufile_header *header;
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
- void *kaddr;
int ret;
down_read(&NILFS_MDT(sufile)->mi_sem);
@@ -628,8 +634,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
if (ret < 0)
goto out_sem;
- kaddr = kmap_local_page(header_bh->b_page);
- header = kaddr + bh_offset(header_bh);
+ header = kmap_local_folio(header_bh->b_folio, 0);
sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
@@ -638,7 +643,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
spin_lock(&nilfs->ns_last_segment_lock);
sustat->ss_prot_seq = nilfs->ns_prot_seq;
spin_unlock(&nilfs->ns_last_segment_lock);
- kunmap_local(kaddr);
+ kunmap_local(header);
brelse(header_bh);
out_sem:
@@ -651,18 +656,18 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
struct buffer_head *su_bh)
{
struct nilfs_segment_usage *su;
- void *kaddr;
+ size_t offset;
int suclean;
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum, su_bh);
+ su = kmap_local_folio(su_bh->b_folio, offset);
if (nilfs_segment_usage_error(su)) {
- kunmap_local(kaddr);
+ kunmap_local(su);
return;
}
suclean = nilfs_segment_usage_clean(su);
nilfs_segment_usage_set_error(su);
- kunmap_local(kaddr);
+ kunmap_local(su);
if (suclean) {
nilfs_sufile_mod_counter(header_bh, -1, 0);
@@ -700,7 +705,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
unsigned long segusages_per_block;
unsigned long nsegs, ncleaned;
__u64 segnum;
- void *kaddr;
+ size_t offset;
ssize_t n, nc;
int ret;
int j;
@@ -731,16 +736,16 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
/* hole */
continue;
}
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(
- sufile, segnum, su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum,
+ su_bh);
+ su = kmap_local_folio(su_bh->b_folio, offset);
su2 = su;
for (j = 0; j < n; j++, su = (void *)su + susz) {
if ((le32_to_cpu(su->su_flags) &
~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
nilfs_segment_is_active(nilfs, segnum + j)) {
ret = -EBUSY;
- kunmap_local(kaddr);
+ kunmap_local(su2);
brelse(su_bh);
goto out_header;
}
@@ -752,7 +757,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
nc++;
}
}
- kunmap_local(kaddr);
+ kunmap_local(su2);
if (nc > 0) {
mark_buffer_dirty(su_bh);
ncleaned += nc;
@@ -799,7 +804,6 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
struct buffer_head *header_bh;
struct nilfs_sufile_header *header;
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
- void *kaddr;
unsigned long nsegs, nrsvsegs;
int ret = 0;
@@ -837,10 +841,9 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
sui->allocmin = 0;
}
- kaddr = kmap_local_page(header_bh->b_page);
- header = kaddr + bh_offset(header_bh);
+ header = kmap_local_folio(header_bh->b_folio, 0);
header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
- kunmap_local(kaddr);
+ kunmap_local(header);
mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(sufile);
@@ -874,6 +877,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
struct nilfs_suinfo *si = buf;
size_t susz = NILFS_MDT(sufile)->mi_entry_size;
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+ size_t offset;
void *kaddr;
unsigned long nsegs, segusages_per_block;
ssize_t n;
@@ -901,9 +905,9 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
continue;
}
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(
- sufile, segnum, su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum,
+ su_bh);
+ su = kaddr = kmap_local_folio(su_bh->b_folio, offset);
for (j = 0; j < n;
j++, su = (void *)su + susz, si = (void *)si + sisz) {
si->sui_lastmod = le64_to_cpu(su->su_lastmod);
@@ -951,7 +955,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
struct buffer_head *header_bh, *bh;
struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
struct nilfs_segment_usage *su;
- void *kaddr;
+ size_t offset;
unsigned long blkoff, prev_blkoff;
int cleansi, cleansu, dirtysi, dirtysu;
long ncleaned = 0, ndirtied = 0;
@@ -983,9 +987,9 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
goto out_header;
for (;;) {
- kaddr = kmap_local_page(bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(
- sufile, sup->sup_segnum, bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(
+ sufile, sup->sup_segnum, bh);
+ su = kmap_local_folio(bh->b_folio, offset);
if (nilfs_suinfo_update_lastmod(sup))
su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
@@ -1020,7 +1024,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
}
- kunmap_local(kaddr);
+ kunmap_local(su);
sup = (void *)sup + supsz;
if (sup >= supend)
@@ -1076,6 +1080,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
struct buffer_head *su_bh;
struct nilfs_segment_usage *su;
+ size_t offset;
void *kaddr;
size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
sector_t seg_start, seg_end, start_block, end_block;
@@ -1125,9 +1130,9 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
continue;
}
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
- su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(sufile, segnum,
+ su_bh);
+ su = kaddr = kmap_local_folio(su_bh->b_folio, offset);
for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
if (!nilfs_segment_usage_clean(su))
continue;
@@ -1167,9 +1172,10 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
}
ndiscarded += nblocks;
- kaddr = kmap_local_page(su_bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(
- sufile, segnum, su_bh, kaddr);
+ offset = nilfs_sufile_segment_usage_offset(
+ sufile, segnum, su_bh);
+ su = kaddr = kmap_local_folio(su_bh->b_folio,
+ offset);
}
/* start new extent */
@@ -1221,7 +1227,6 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
struct nilfs_sufile_info *sui;
struct buffer_head *header_bh;
struct nilfs_sufile_header *header;
- void *kaddr;
int err;
if (susize > sb->s_blocksize) {
@@ -1262,10 +1267,9 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
}
sui = NILFS_SUI(sufile);
- kaddr = kmap_local_page(header_bh->b_page);
- header = kaddr + bh_offset(header_bh);
+ header = kmap_local_folio(header_bh->b_folio, 0);
sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
- kunmap_local(kaddr);
+ kunmap_local(header);
brelse(header_bh);
sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 04/12] nilfs2: convert persistent object allocator to be folio-based
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (2 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 03/12] nilfs2: convert segment usage file " Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 05/12] nilfs2: convert inode file " Ryusuke Konishi
` (7 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
Regarding the persistent oject allocator, a common mechanism for
allocating objects in metadata files such as inodes and DAT entries,
convert the page-based implementation to a folio-based implementation.
In this conversion, helper functions nilfs_palloc_group_desc_offset()
and nilfs_palloc_bitmap_offset() are added and used to calculate the
byte offset within a folio of a group descriptor structure and bitmap,
respectively, to replace kmap_local_page with kmap_local_folio.
In addition, a helper function called nilfs_palloc_entry_offset() is
provided to facilitate common calculation of the byte offset within a
folio of metadata file entries managed in the persistent object
allocator format.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/alloc.c | 137 +++++++++++++++++++++++++++++-----------------
fs/nilfs2/alloc.h | 2 +
2 files changed, 89 insertions(+), 50 deletions(-)
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index d30dfed707b6..5e0a6bd3e015 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -339,19 +339,55 @@ static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr)
}
/**
- * nilfs_palloc_block_get_group_desc - get kernel address of a group descriptor
+ * nilfs_palloc_group_desc_offset - calculate the byte offset of a group
+ * descriptor in the folio containing it
* @inode: inode of metadata file using this allocator
* @group: group number
- * @bh: buffer head of the buffer storing the group descriptor block
- * @kaddr: kernel address mapped for the page including the buffer
+ * @bh: buffer head of the group descriptor block
+ *
+ * Return: Byte offset in the folio of the group descriptor for @group.
*/
-static struct nilfs_palloc_group_desc *
-nilfs_palloc_block_get_group_desc(const struct inode *inode,
- unsigned long group,
- const struct buffer_head *bh, void *kaddr)
+static size_t nilfs_palloc_group_desc_offset(const struct inode *inode,
+ unsigned long group,
+ const struct buffer_head *bh)
{
- return (struct nilfs_palloc_group_desc *)(kaddr + bh_offset(bh)) +
- group % nilfs_palloc_groups_per_desc_block(inode);
+ return offset_in_folio(bh->b_folio, bh->b_data) +
+ sizeof(struct nilfs_palloc_group_desc) *
+ (group % nilfs_palloc_groups_per_desc_block(inode));
+}
+
+/**
+ * nilfs_palloc_bitmap_offset - calculate the byte offset of a bitmap block
+ * in the folio containing it
+ * @bh: buffer head of the bitmap block
+ *
+ * Return: Byte offset in the folio of the bitmap block for @bh.
+ */
+static size_t nilfs_palloc_bitmap_offset(const struct buffer_head *bh)
+{
+ return offset_in_folio(bh->b_folio, bh->b_data);
+}
+
+/**
+ * nilfs_palloc_entry_offset - calculate the byte offset of an entry in the
+ * folio containing it
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ * @bh: buffer head of the entry block
+ *
+ * Return: Byte offset in the folio of the entry @nr.
+ */
+size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr,
+ const struct buffer_head *bh)
+{
+ unsigned long entry_index_in_group, entry_index_in_block;
+
+ nilfs_palloc_group(inode, nr, &entry_index_in_group);
+ entry_index_in_block = entry_index_in_group %
+ NILFS_MDT(inode)->mi_entries_per_block;
+
+ return offset_in_folio(bh->b_folio, bh->b_data) +
+ entry_index_in_block * NILFS_MDT(inode)->mi_entry_size;
}
/**
@@ -508,7 +544,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct buffer_head *desc_bh, *bitmap_bh;
struct nilfs_palloc_group_desc *desc;
unsigned char *bitmap;
- void *desc_kaddr, *bitmap_kaddr;
+ size_t doff, boff;
unsigned long group, maxgroup, ngroups;
unsigned long group_offset, maxgroup_offset;
unsigned long n, entries_per_group;
@@ -531,17 +567,17 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
if (ret < 0)
return ret;
- desc_kaddr = kmap_local_page(desc_bh->b_page);
- desc = nilfs_palloc_block_get_group_desc(
- inode, group, desc_bh, desc_kaddr);
+
+ doff = nilfs_palloc_group_desc_offset(inode, group, desc_bh);
+ desc = kmap_local_folio(desc_bh->b_folio, doff);
n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
maxgroup);
- for (j = 0; j < n; j++, desc++, group++, group_offset = 0) {
+ for (j = 0; j < n; j++, group++, group_offset = 0) {
lock = nilfs_mdt_bgl_lock(inode, group);
- if (nilfs_palloc_group_desc_nfrees(desc, lock) == 0)
+ if (nilfs_palloc_group_desc_nfrees(&desc[j], lock) == 0)
continue;
- kunmap_local(desc_kaddr);
+ kunmap_local(desc);
ret = nilfs_palloc_get_bitmap_block(inode, group, 1,
&bitmap_bh);
if (unlikely(ret < 0)) {
@@ -549,12 +585,14 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
return ret;
}
- desc_kaddr = kmap_local_page(desc_bh->b_page);
- desc = nilfs_palloc_block_get_group_desc(
- inode, group, desc_bh, desc_kaddr);
+ /*
+ * Re-kmap the folio containing the first (and
+ * subsequent) group descriptors.
+ */
+ desc = kmap_local_folio(desc_bh->b_folio, doff);
- bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
- bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
+ boff = nilfs_palloc_bitmap_offset(bitmap_bh);
+ bitmap = kmap_local_folio(bitmap_bh->b_folio, boff);
pos = nilfs_palloc_find_available_slot(
bitmap, group_offset, entries_per_group, lock,
wrap);
@@ -564,14 +602,14 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
* beginning, the wrap flag only has an effect on the
* first search.
*/
- kunmap_local(bitmap_kaddr);
+ kunmap_local(bitmap);
if (pos >= 0)
goto found;
brelse(bitmap_bh);
}
- kunmap_local(desc_kaddr);
+ kunmap_local(desc);
brelse(desc_bh);
}
@@ -580,9 +618,9 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
found:
/* found a free entry */
- nilfs_palloc_group_desc_add_entries(desc, lock, -1);
+ nilfs_palloc_group_desc_add_entries(&desc[j], lock, -1);
req->pr_entry_nr = entries_per_group * group + pos;
- kunmap_local(desc_kaddr);
+ kunmap_local(desc);
req->pr_desc_bh = desc_bh;
req->pr_bitmap_bh = bitmap_bh;
@@ -613,18 +651,18 @@ void nilfs_palloc_commit_alloc_entry(struct inode *inode,
void nilfs_palloc_commit_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
- struct nilfs_palloc_group_desc *desc;
unsigned long group, group_offset;
+ size_t doff, boff;
+ struct nilfs_palloc_group_desc *desc;
unsigned char *bitmap;
- void *desc_kaddr, *bitmap_kaddr;
spinlock_t *lock;
group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
- desc_kaddr = kmap_local_page(req->pr_desc_bh->b_page);
- desc = nilfs_palloc_block_get_group_desc(inode, group,
- req->pr_desc_bh, desc_kaddr);
- bitmap_kaddr = kmap_local_page(req->pr_bitmap_bh->b_page);
- bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
+ doff = nilfs_palloc_group_desc_offset(inode, group, req->pr_desc_bh);
+ desc = kmap_local_folio(req->pr_desc_bh->b_folio, doff);
+
+ boff = nilfs_palloc_bitmap_offset(req->pr_bitmap_bh);
+ bitmap = kmap_local_folio(req->pr_bitmap_bh->b_folio, boff);
lock = nilfs_mdt_bgl_lock(inode, group);
if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
@@ -635,8 +673,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
else
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
- kunmap_local(bitmap_kaddr);
- kunmap_local(desc_kaddr);
+ kunmap_local(bitmap);
+ kunmap_local(desc);
mark_buffer_dirty(req->pr_desc_bh);
mark_buffer_dirty(req->pr_bitmap_bh);
@@ -655,17 +693,17 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
struct nilfs_palloc_group_desc *desc;
- void *desc_kaddr, *bitmap_kaddr;
+ size_t doff, boff;
unsigned char *bitmap;
unsigned long group, group_offset;
spinlock_t *lock;
group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
- desc_kaddr = kmap_local_page(req->pr_desc_bh->b_page);
- desc = nilfs_palloc_block_get_group_desc(inode, group,
- req->pr_desc_bh, desc_kaddr);
- bitmap_kaddr = kmap_local_page(req->pr_bitmap_bh->b_page);
- bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
+ doff = nilfs_palloc_group_desc_offset(inode, group, req->pr_desc_bh);
+ desc = kmap_local_folio(req->pr_desc_bh->b_folio, doff);
+
+ boff = nilfs_palloc_bitmap_offset(req->pr_bitmap_bh);
+ bitmap = kmap_local_folio(req->pr_bitmap_bh->b_folio, boff);
lock = nilfs_mdt_bgl_lock(inode, group);
if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
@@ -676,8 +714,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
else
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
- kunmap_local(bitmap_kaddr);
- kunmap_local(desc_kaddr);
+ kunmap_local(bitmap);
+ kunmap_local(desc);
brelse(req->pr_bitmap_bh);
brelse(req->pr_desc_bh);
@@ -741,7 +779,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
struct buffer_head *desc_bh, *bitmap_bh;
struct nilfs_palloc_group_desc *desc;
unsigned char *bitmap;
- void *desc_kaddr, *bitmap_kaddr;
+ size_t doff, boff;
unsigned long group, group_offset;
__u64 group_min_nr, last_nrs[8];
const unsigned long epg = nilfs_palloc_entries_per_group(inode);
@@ -769,8 +807,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
/* Get the first entry number of the group */
group_min_nr = (__u64)group * epg;
- bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
- bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
+ boff = nilfs_palloc_bitmap_offset(bitmap_bh);
+ bitmap = kmap_local_folio(bitmap_bh->b_folio, boff);
lock = nilfs_mdt_bgl_lock(inode, group);
j = i;
@@ -815,7 +853,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
entry_start = rounddown(group_offset, epb);
} while (true);
- kunmap_local(bitmap_kaddr);
+ kunmap_local(bitmap);
mark_buffer_dirty(bitmap_bh);
brelse(bitmap_bh);
@@ -829,11 +867,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
inode->i_ino);
}
- desc_kaddr = kmap_local_page(desc_bh->b_page);
- desc = nilfs_palloc_block_get_group_desc(
- inode, group, desc_bh, desc_kaddr);
+ doff = nilfs_palloc_group_desc_offset(inode, group, desc_bh);
+ desc = kmap_local_folio(desc_bh->b_folio, doff);
nfree = nilfs_palloc_group_desc_add_entries(desc, lock, n);
- kunmap_local(desc_kaddr);
+ kunmap_local(desc);
mark_buffer_dirty(desc_bh);
nilfs_mdt_mark_dirty(inode);
brelse(desc_bh);
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index e19d7eb10084..af8f882619d4 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -33,6 +33,8 @@ int nilfs_palloc_get_entry_block(struct inode *, __u64, int,
struct buffer_head **);
void *nilfs_palloc_block_get_entry(const struct inode *, __u64,
const struct buffer_head *, void *);
+size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr,
+ const struct buffer_head *bh);
int nilfs_palloc_count_max_entries(struct inode *, u64, u64 *);
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 05/12] nilfs2: convert inode file to be folio-based
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (3 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 04/12] nilfs2: convert persistent object allocator " Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 06/12] nilfs2: convert DAT " Ryusuke Konishi
` (6 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
Convert the page-based implementation of ifile, a metadata file that
manages inodes, to folio-based.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/ifile.c | 10 +++++-----
fs/nilfs2/ifile.h | 4 ++--
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 1e86b9303b7c..e7339eb3c08a 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -98,7 +98,7 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
.pr_entry_nr = ino, .pr_entry_bh = NULL
};
struct nilfs_inode *raw_inode;
- void *kaddr;
+ size_t offset;
int ret;
ret = nilfs_palloc_prepare_free_entry(ifile, &req);
@@ -113,11 +113,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
return ret;
}
- kaddr = kmap_local_page(req.pr_entry_bh->b_page);
- raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
- req.pr_entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(ifile, req.pr_entry_nr,
+ req.pr_entry_bh);
+ raw_inode = kmap_local_folio(req.pr_entry_bh->b_folio, offset);
raw_inode->i_flags = 0;
- kunmap_local(kaddr);
+ kunmap_local(raw_inode);
mark_buffer_dirty(req.pr_entry_bh);
brelse(req.pr_entry_bh);
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index 625545cc2a98..5d116a566d9e 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -21,9 +21,9 @@
static inline struct nilfs_inode *
nilfs_ifile_map_inode(struct inode *ifile, ino_t ino, struct buffer_head *ibh)
{
- void *kaddr = kmap_local_page(ibh->b_page);
+ size_t __offset_in_folio = nilfs_palloc_entry_offset(ifile, ino, ibh);
- return nilfs_palloc_block_get_entry(ifile, ino, ibh, kaddr);
+ return kmap_local_folio(ibh->b_folio, __offset_in_folio);
}
static inline void nilfs_ifile_unmap_inode(struct nilfs_inode *raw_inode)
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 06/12] nilfs2: convert DAT file to be folio-based
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (4 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 05/12] nilfs2: convert inode file " Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 07/12] nilfs2: remove nilfs_palloc_block_get_entry() Ryusuke Konishi
` (5 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
Regarding the DAT, a metadata file that manages virtual block
addresses, convert the page-based implementation to a folio-based
implementation.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/dat.c | 98 ++++++++++++++++++++++++++-----------------------
1 file changed, 52 insertions(+), 46 deletions(-)
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 0bef662176a4..e220dcb08aa6 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -89,15 +89,15 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
- void *kaddr;
+ size_t offset;
- kaddr = kmap_local_page(req->pr_entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
- req->pr_entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, req->pr_entry_nr,
+ req->pr_entry_bh);
+ entry = kmap_local_folio(req->pr_entry_bh->b_folio, offset);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
entry->de_blocknr = cpu_to_le64(0);
- kunmap_local(kaddr);
+ kunmap_local(entry);
nilfs_palloc_commit_alloc_entry(dat, req);
nilfs_dat_commit_entry(dat, req);
@@ -113,15 +113,15 @@ static void nilfs_dat_commit_free(struct inode *dat,
struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
- void *kaddr;
+ size_t offset;
- kaddr = kmap_local_page(req->pr_entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
- req->pr_entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, req->pr_entry_nr,
+ req->pr_entry_bh);
+ entry = kmap_local_folio(req->pr_entry_bh->b_folio, offset);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
entry->de_blocknr = cpu_to_le64(0);
- kunmap_local(kaddr);
+ kunmap_local(entry);
nilfs_dat_commit_entry(dat, req);
@@ -143,14 +143,14 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
sector_t blocknr)
{
struct nilfs_dat_entry *entry;
- void *kaddr;
+ size_t offset;
- kaddr = kmap_local_page(req->pr_entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
- req->pr_entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, req->pr_entry_nr,
+ req->pr_entry_bh);
+ entry = kmap_local_folio(req->pr_entry_bh->b_folio, offset);
entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
entry->de_blocknr = cpu_to_le64(blocknr);
- kunmap_local(kaddr);
+ kunmap_local(entry);
nilfs_dat_commit_entry(dat, req);
}
@@ -160,19 +160,19 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
struct nilfs_dat_entry *entry;
__u64 start;
sector_t blocknr;
- void *kaddr;
+ size_t offset;
int ret;
ret = nilfs_dat_prepare_entry(dat, req, 0);
if (ret < 0)
return ret;
- kaddr = kmap_local_page(req->pr_entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
- req->pr_entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, req->pr_entry_nr,
+ req->pr_entry_bh);
+ entry = kmap_local_folio(req->pr_entry_bh->b_folio, offset);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
- kunmap_local(kaddr);
+ kunmap_local(entry);
if (blocknr == 0) {
ret = nilfs_palloc_prepare_free_entry(dat, req);
@@ -200,11 +200,11 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
struct nilfs_dat_entry *entry;
__u64 start, end;
sector_t blocknr;
- void *kaddr;
+ size_t offset;
- kaddr = kmap_local_page(req->pr_entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
- req->pr_entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, req->pr_entry_nr,
+ req->pr_entry_bh);
+ entry = kmap_local_folio(req->pr_entry_bh->b_folio, offset);
end = start = le64_to_cpu(entry->de_start);
if (!dead) {
end = nilfs_mdt_cno(dat);
@@ -212,7 +212,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
}
entry->de_end = cpu_to_le64(end);
blocknr = le64_to_cpu(entry->de_blocknr);
- kunmap_local(kaddr);
+ kunmap_local(entry);
if (blocknr == 0)
nilfs_dat_commit_free(dat, req);
@@ -225,14 +225,14 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
struct nilfs_dat_entry *entry;
__u64 start;
sector_t blocknr;
- void *kaddr;
+ size_t offset;
- kaddr = kmap_local_page(req->pr_entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
- req->pr_entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, req->pr_entry_nr,
+ req->pr_entry_bh);
+ entry = kmap_local_folio(req->pr_entry_bh->b_folio, offset);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
- kunmap_local(kaddr);
+ kunmap_local(entry);
if (start == nilfs_mdt_cno(dat) && blocknr == 0)
nilfs_palloc_abort_free_entry(dat, req);
@@ -336,7 +336,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
{
struct buffer_head *entry_bh;
struct nilfs_dat_entry *entry;
- void *kaddr;
+ size_t offset;
int ret;
ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
@@ -359,21 +359,21 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
}
}
- kaddr = kmap_local_page(entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, vblocknr, entry_bh);
+ entry = kmap_local_folio(entry_bh->b_folio, offset);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
nilfs_crit(dat->i_sb,
"%s: invalid vblocknr = %llu, [%llu, %llu)",
__func__, (unsigned long long)vblocknr,
(unsigned long long)le64_to_cpu(entry->de_start),
(unsigned long long)le64_to_cpu(entry->de_end));
- kunmap_local(kaddr);
+ kunmap_local(entry);
brelse(entry_bh);
return -EINVAL;
}
WARN_ON(blocknr == 0);
entry->de_blocknr = cpu_to_le64(blocknr);
- kunmap_local(kaddr);
+ kunmap_local(entry);
mark_buffer_dirty(entry_bh);
nilfs_mdt_mark_dirty(dat);
@@ -407,7 +407,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
struct buffer_head *entry_bh, *bh;
struct nilfs_dat_entry *entry;
sector_t blocknr;
- void *kaddr;
+ size_t offset;
int ret;
ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
@@ -423,8 +423,8 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
}
}
- kaddr = kmap_local_page(entry_bh->b_page);
- entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
+ offset = nilfs_palloc_entry_offset(dat, vblocknr, entry_bh);
+ entry = kmap_local_folio(entry_bh->b_folio, offset);
blocknr = le64_to_cpu(entry->de_blocknr);
if (blocknr == 0) {
ret = -ENOENT;
@@ -433,7 +433,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
*blocknrp = blocknr;
out:
- kunmap_local(kaddr);
+ kunmap_local(entry);
brelse(entry_bh);
return ret;
}
@@ -442,11 +442,12 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
size_t nvi)
{
struct buffer_head *entry_bh;
- struct nilfs_dat_entry *entry;
+ struct nilfs_dat_entry *entry, *first_entry;
struct nilfs_vinfo *vinfo = buf;
__u64 first, last;
- void *kaddr;
+ size_t offset;
unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
+ unsigned int entry_size = NILFS_MDT(dat)->mi_entry_size;
int i, j, n, ret;
for (i = 0; i < nvi; i += n) {
@@ -454,23 +455,28 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
0, &entry_bh);
if (ret < 0)
return ret;
- kaddr = kmap_local_page(entry_bh->b_page);
- /* last virtual block number in this block */
+
first = vinfo->vi_vblocknr;
first = div64_ul(first, entries_per_block);
first *= entries_per_block;
+ /* first virtual block number in this block */
+
last = first + entries_per_block - 1;
+ /* last virtual block number in this block */
+
+ offset = nilfs_palloc_entry_offset(dat, first, entry_bh);
+ first_entry = kmap_local_folio(entry_bh->b_folio, offset);
for (j = i, n = 0;
j < nvi && vinfo->vi_vblocknr >= first &&
vinfo->vi_vblocknr <= last;
j++, n++, vinfo = (void *)vinfo + visz) {
- entry = nilfs_palloc_block_get_entry(
- dat, vinfo->vi_vblocknr, entry_bh, kaddr);
+ entry = (void *)first_entry +
+ (vinfo->vi_vblocknr - first) * entry_size;
vinfo->vi_start = le64_to_cpu(entry->de_start);
vinfo->vi_end = le64_to_cpu(entry->de_end);
vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
}
- kunmap_local(kaddr);
+ kunmap_local(first_entry);
brelse(entry_bh);
}
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 07/12] nilfs2: remove nilfs_palloc_block_get_entry()
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (5 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 06/12] nilfs2: convert DAT " Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 08/12] nilfs2: convert checkpoint file to be folio-based Ryusuke Konishi
` (4 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
All calls to nilfs_palloc_block_get_entry() are now gone, so remove
it.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/alloc.c | 19 -------------------
fs/nilfs2/alloc.h | 2 --
2 files changed, 21 deletions(-)
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 5e0a6bd3e015..ba3e1f591f36 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -390,25 +390,6 @@ size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr,
entry_index_in_block * NILFS_MDT(inode)->mi_entry_size;
}
-/**
- * nilfs_palloc_block_get_entry - get kernel address of an entry
- * @inode: inode of metadata file using this allocator
- * @nr: serial number of the entry (e.g. inode number)
- * @bh: buffer head of the buffer storing the entry block
- * @kaddr: kernel address mapped for the page including the buffer
- */
-void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
- const struct buffer_head *bh, void *kaddr)
-{
- unsigned long entry_offset, group_offset;
-
- nilfs_palloc_group(inode, nr, &group_offset);
- entry_offset = group_offset % NILFS_MDT(inode)->mi_entries_per_block;
-
- return kaddr + bh_offset(bh) +
- entry_offset * NILFS_MDT(inode)->mi_entry_size;
-}
-
/**
* nilfs_palloc_find_available_slot - find available slot in a group
* @bitmap: bitmap of the group
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index af8f882619d4..3f115ab7e9a7 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -31,8 +31,6 @@ nilfs_palloc_entries_per_group(const struct inode *inode)
int nilfs_palloc_init_blockgroup(struct inode *, unsigned int);
int nilfs_palloc_get_entry_block(struct inode *, __u64, int,
struct buffer_head **);
-void *nilfs_palloc_block_get_entry(const struct inode *, __u64,
- const struct buffer_head *, void *);
size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr,
const struct buffer_head *bh);
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 08/12] nilfs2: convert checkpoint file to be folio-based
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (6 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 07/12] nilfs2: remove nilfs_palloc_block_get_entry() Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 09/12] nilfs2: Remove nilfs_writepage Ryusuke Konishi
` (3 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
Regarding the cpfile, a metadata file that manages checkpoints, convert
the page-based implementation to a folio-based implementation.
This change involves some helper functions to calculate byte offsets on
folios and removing a few helper functions that are no longer needed.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/nilfs2/cpfile.c | 379 ++++++++++++++++++++++++---------------------
1 file changed, 204 insertions(+), 175 deletions(-)
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index a8046cbf2753..c20207d7a989 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -68,49 +68,36 @@ static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
static unsigned int
nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
struct buffer_head *bh,
- void *kaddr,
unsigned int n)
{
- struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
+ struct nilfs_checkpoint *cp;
unsigned int count;
+ cp = kmap_local_folio(bh->b_folio,
+ offset_in_folio(bh->b_folio, bh->b_data));
count = le32_to_cpu(cp->cp_checkpoints_count) + n;
cp->cp_checkpoints_count = cpu_to_le32(count);
+ kunmap_local(cp);
return count;
}
static unsigned int
nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
struct buffer_head *bh,
- void *kaddr,
unsigned int n)
{
- struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
+ struct nilfs_checkpoint *cp;
unsigned int count;
+ cp = kmap_local_folio(bh->b_folio,
+ offset_in_folio(bh->b_folio, bh->b_data));
WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
count = le32_to_cpu(cp->cp_checkpoints_count) - n;
cp->cp_checkpoints_count = cpu_to_le32(count);
+ kunmap_local(cp);
return count;
}
-static inline struct nilfs_cpfile_header *
-nilfs_cpfile_block_get_header(const struct inode *cpfile,
- struct buffer_head *bh,
- void *kaddr)
-{
- return kaddr + bh_offset(bh);
-}
-
-static struct nilfs_checkpoint *
-nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
- struct buffer_head *bh,
- void *kaddr)
-{
- return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
- NILFS_MDT(cpfile)->mi_entry_size;
-}
-
static void nilfs_cpfile_block_init(struct inode *cpfile,
struct buffer_head *bh,
void *from)
@@ -125,6 +112,54 @@ static void nilfs_cpfile_block_init(struct inode *cpfile,
}
}
+/**
+ * nilfs_cpfile_checkpoint_offset - calculate the byte offset of a checkpoint
+ * entry in the folio containing it
+ * @cpfile: checkpoint file inode
+ * @cno: checkpoint number
+ * @bh: buffer head of block containing checkpoint indexed by @cno
+ *
+ * Return: Byte offset in the folio of the checkpoint specified by @cno.
+ */
+static size_t nilfs_cpfile_checkpoint_offset(const struct inode *cpfile,
+ __u64 cno,
+ struct buffer_head *bh)
+{
+ return offset_in_folio(bh->b_folio, bh->b_data) +
+ nilfs_cpfile_get_offset(cpfile, cno) *
+ NILFS_MDT(cpfile)->mi_entry_size;
+}
+
+/**
+ * nilfs_cpfile_cp_snapshot_list_offset - calculate the byte offset of a
+ * checkpoint snapshot list in the folio
+ * containing it
+ * @cpfile: checkpoint file inode
+ * @cno: checkpoint number
+ * @bh: buffer head of block containing checkpoint indexed by @cno
+ *
+ * Return: Byte offset in the folio of the checkpoint snapshot list specified
+ * by @cno.
+ */
+static size_t nilfs_cpfile_cp_snapshot_list_offset(const struct inode *cpfile,
+ __u64 cno,
+ struct buffer_head *bh)
+{
+ return nilfs_cpfile_checkpoint_offset(cpfile, cno, bh) +
+ offsetof(struct nilfs_checkpoint, cp_snapshot_list);
+}
+
+/**
+ * nilfs_cpfile_ch_snapshot_list_offset - calculate the byte offset of the
+ * snapshot list in the header
+ *
+ * Return: Byte offset in the folio of the checkpoint snapshot list
+ */
+static size_t nilfs_cpfile_ch_snapshot_list_offset(void)
+{
+ return offsetof(struct nilfs_cpfile_header, ch_snapshot_list);
+}
+
static int nilfs_cpfile_get_header_block(struct inode *cpfile,
struct buffer_head **bhp)
{
@@ -214,7 +249,7 @@ int nilfs_cpfile_read_checkpoint(struct inode *cpfile, __u64 cno,
{
struct buffer_head *cp_bh;
struct nilfs_checkpoint *cp;
- void *kaddr;
+ size_t offset;
int ret;
if (cno < 1 || cno > nilfs_mdt_cno(cpfile))
@@ -228,8 +263,8 @@ int nilfs_cpfile_read_checkpoint(struct inode *cpfile, __u64 cno,
goto out_sem;
}
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
+ cp = kmap_local_folio(cp_bh->b_folio, offset);
if (nilfs_checkpoint_invalid(cp)) {
ret = -EINVAL;
goto put_cp;
@@ -254,7 +289,7 @@ int nilfs_cpfile_read_checkpoint(struct inode *cpfile, __u64 cno,
root->ifile = ifile;
put_cp:
- kunmap_local(kaddr);
+ kunmap_local(cp);
brelse(cp_bh);
out_sem:
up_read(&NILFS_MDT(cpfile)->mi_sem);
@@ -282,7 +317,7 @@ int nilfs_cpfile_create_checkpoint(struct inode *cpfile, __u64 cno)
struct buffer_head *header_bh, *cp_bh;
struct nilfs_cpfile_header *header;
struct nilfs_checkpoint *cp;
- void *kaddr;
+ size_t offset;
int ret;
if (WARN_ON_ONCE(cno < 1))
@@ -297,24 +332,22 @@ int nilfs_cpfile_create_checkpoint(struct inode *cpfile, __u64 cno)
if (unlikely(ret < 0))
goto out_header;
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
+ cp = kmap_local_folio(cp_bh->b_folio, offset);
if (nilfs_checkpoint_invalid(cp)) {
/* a newly-created checkpoint */
nilfs_checkpoint_clear_invalid(cp);
+ kunmap_local(cp);
if (!nilfs_cpfile_is_in_first(cpfile, cno))
nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
- kaddr, 1);
- kunmap_local(kaddr);
+ 1);
- kaddr = kmap_local_page(header_bh->b_page);
- header = nilfs_cpfile_block_get_header(cpfile, header_bh,
- kaddr);
+ header = kmap_local_folio(header_bh->b_folio, 0);
le64_add_cpu(&header->ch_ncheckpoints, 1);
- kunmap_local(kaddr);
+ kunmap_local(header);
mark_buffer_dirty(header_bh);
} else {
- kunmap_local(kaddr);
+ kunmap_local(cp);
}
/* Force the buffer and the inode to become dirty */
@@ -353,7 +386,7 @@ int nilfs_cpfile_finalize_checkpoint(struct inode *cpfile, __u64 cno,
{
struct buffer_head *cp_bh;
struct nilfs_checkpoint *cp;
- void *kaddr;
+ size_t offset;
int ret;
if (WARN_ON_ONCE(cno < 1))
@@ -367,10 +400,10 @@ int nilfs_cpfile_finalize_checkpoint(struct inode *cpfile, __u64 cno,
goto out_sem;
}
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
+ cp = kmap_local_folio(cp_bh->b_folio, offset);
if (unlikely(nilfs_checkpoint_invalid(cp))) {
- kunmap_local(kaddr);
+ kunmap_local(cp);
brelse(cp_bh);
goto error;
}
@@ -391,7 +424,7 @@ int nilfs_cpfile_finalize_checkpoint(struct inode *cpfile, __u64 cno,
nilfs_write_inode_common(root->ifile, &cp->cp_ifile_inode);
nilfs_bmap_write(NILFS_I(root->ifile)->i_bmap, &cp->cp_ifile_inode);
- kunmap_local(kaddr);
+ kunmap_local(cp);
brelse(cp_bh);
out_sem:
up_write(&NILFS_MDT(cpfile)->mi_sem);
@@ -432,6 +465,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
struct nilfs_checkpoint *cp;
size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
__u64 cno;
+ size_t offset;
void *kaddr;
unsigned long tnicps;
int ret, ncps, nicps, nss, count, i;
@@ -462,9 +496,8 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
continue;
}
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(
- cpfile, cno, cp_bh, kaddr);
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
+ cp = kaddr = kmap_local_folio(cp_bh->b_folio, offset);
nicps = 0;
for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
if (nilfs_checkpoint_snapshot(cp)) {
@@ -474,43 +507,42 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
nicps++;
}
}
- if (nicps > 0) {
- tnicps += nicps;
- mark_buffer_dirty(cp_bh);
- nilfs_mdt_mark_dirty(cpfile);
- if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
- count =
- nilfs_cpfile_block_sub_valid_checkpoints(
- cpfile, cp_bh, kaddr, nicps);
- if (count == 0) {
- /* make hole */
- kunmap_local(kaddr);
- brelse(cp_bh);
- ret =
- nilfs_cpfile_delete_checkpoint_block(
- cpfile, cno);
- if (ret == 0)
- continue;
- nilfs_err(cpfile->i_sb,
- "error %d deleting checkpoint block",
- ret);
- break;
- }
- }
+ kunmap_local(kaddr);
+
+ if (nicps <= 0) {
+ brelse(cp_bh);
+ continue;
}
- kunmap_local(kaddr);
+ tnicps += nicps;
+ mark_buffer_dirty(cp_bh);
+ nilfs_mdt_mark_dirty(cpfile);
+ if (nilfs_cpfile_is_in_first(cpfile, cno)) {
+ brelse(cp_bh);
+ continue;
+ }
+
+ count = nilfs_cpfile_block_sub_valid_checkpoints(cpfile, cp_bh,
+ nicps);
brelse(cp_bh);
+ if (count)
+ continue;
+
+ /* Delete the block if there are no more valid checkpoints */
+ ret = nilfs_cpfile_delete_checkpoint_block(cpfile, cno);
+ if (unlikely(ret)) {
+ nilfs_err(cpfile->i_sb,
+ "error %d deleting checkpoint block", ret);
+ break;
+ }
}
if (tnicps > 0) {
- kaddr = kmap_local_page(header_bh->b_page);
- header = nilfs_cpfile_block_get_header(cpfile, header_bh,
- kaddr);
+ header = kmap_local_folio(header_bh->b_folio, 0);
le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile);
- kunmap_local(kaddr);
+ kunmap_local(header);
}
brelse(header_bh);
@@ -544,6 +576,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
struct buffer_head *bh;
size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
__u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
+ size_t offset;
void *kaddr;
int n, ret;
int ncps, i;
@@ -562,8 +595,8 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
}
ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
- kaddr = kmap_local_page(bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, bh);
+ cp = kaddr = kmap_local_folio(bh->b_folio, offset);
for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
if (!nilfs_checkpoint_invalid(cp)) {
nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
@@ -597,7 +630,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
struct nilfs_cpinfo *ci = buf;
__u64 curr = *cnop, next;
unsigned long curr_blkoff, next_blkoff;
- void *kaddr;
+ size_t offset;
int n = 0, ret;
down_read(&NILFS_MDT(cpfile)->mi_sem);
@@ -606,10 +639,9 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
ret = nilfs_cpfile_get_header_block(cpfile, &bh);
if (ret < 0)
goto out;
- kaddr = kmap_local_page(bh->b_page);
- header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
+ header = kmap_local_folio(bh->b_folio, 0);
curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
- kunmap_local(kaddr);
+ kunmap_local(header);
brelse(bh);
if (curr == 0) {
ret = 0;
@@ -627,9 +659,9 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
ret = 0; /* No snapshots (started from a hole block) */
goto out;
}
- kaddr = kmap_local_page(bh->b_page);
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, curr, bh);
+ cp = kmap_local_folio(bh->b_folio, offset);
while (n < nci) {
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
curr = ~(__u64)0; /* Terminator */
if (unlikely(nilfs_checkpoint_invalid(cp) ||
!nilfs_checkpoint_snapshot(cp)))
@@ -641,9 +673,9 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
if (next == 0)
break; /* reach end of the snapshot list */
+ kunmap_local(cp);
next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
if (curr_blkoff != next_blkoff) {
- kunmap_local(kaddr);
brelse(bh);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
0, &bh);
@@ -651,12 +683,13 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
WARN_ON(ret == -ENOENT);
goto out;
}
- kaddr = kmap_local_page(bh->b_page);
}
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, next, bh);
+ cp = kmap_local_folio(bh->b_folio, offset);
curr = next;
curr_blkoff = next_blkoff;
}
- kunmap_local(kaddr);
+ kunmap_local(cp);
brelse(bh);
*cnop = curr;
ret = n;
@@ -733,26 +766,6 @@ int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
}
-static struct nilfs_snapshot_list *
-nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
- __u64 cno,
- struct buffer_head *bh,
- void *kaddr)
-{
- struct nilfs_cpfile_header *header;
- struct nilfs_checkpoint *cp;
- struct nilfs_snapshot_list *list;
-
- if (cno != 0) {
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
- list = &cp->cp_snapshot_list;
- } else {
- header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
- list = &header->ch_snapshot_list;
- }
- return list;
-}
-
static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
{
struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
@@ -761,94 +774,103 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
struct nilfs_snapshot_list *list;
__u64 curr, prev;
unsigned long curr_blkoff, prev_blkoff;
- void *kaddr;
+ size_t offset, curr_list_offset, prev_list_offset;
int ret;
if (cno == 0)
return -ENOENT; /* checkpoint number 0 is invalid */
down_write(&NILFS_MDT(cpfile)->mi_sem);
+ ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
+ if (unlikely(ret < 0))
+ goto out_sem;
+
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
if (ret < 0)
- goto out_sem;
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
+ goto out_header;
+
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
+ cp = kmap_local_folio(cp_bh->b_folio, offset);
if (nilfs_checkpoint_invalid(cp)) {
ret = -ENOENT;
- kunmap_local(kaddr);
+ kunmap_local(cp);
goto out_cp;
}
if (nilfs_checkpoint_snapshot(cp)) {
ret = 0;
- kunmap_local(kaddr);
+ kunmap_local(cp);
goto out_cp;
}
- kunmap_local(kaddr);
+ kunmap_local(cp);
- ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
- if (ret < 0)
- goto out_cp;
- kaddr = kmap_local_page(header_bh->b_page);
- header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
+ /*
+ * Find the last snapshot before the checkpoint being changed to
+ * snapshot mode by going backwards through the snapshot list.
+ * Set "prev" to its checkpoint number, or 0 if not found.
+ */
+ header = kmap_local_folio(header_bh->b_folio, 0);
list = &header->ch_snapshot_list;
curr_bh = header_bh;
get_bh(curr_bh);
curr = 0;
curr_blkoff = 0;
+ curr_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
prev = le64_to_cpu(list->ssl_prev);
while (prev > cno) {
prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
curr = prev;
+ kunmap_local(list);
if (curr_blkoff != prev_blkoff) {
- kunmap_local(kaddr);
brelse(curr_bh);
ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
0, &curr_bh);
- if (ret < 0)
- goto out_header;
- kaddr = kmap_local_page(curr_bh->b_page);
+ if (unlikely(ret < 0))
+ goto out_cp;
}
+ curr_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
+ cpfile, curr, curr_bh);
+ list = kmap_local_folio(curr_bh->b_folio, curr_list_offset);
curr_blkoff = prev_blkoff;
- cp = nilfs_cpfile_block_get_checkpoint(
- cpfile, curr, curr_bh, kaddr);
- list = &cp->cp_snapshot_list;
prev = le64_to_cpu(list->ssl_prev);
}
- kunmap_local(kaddr);
+ kunmap_local(list);
if (prev != 0) {
ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
&prev_bh);
if (ret < 0)
goto out_curr;
+
+ prev_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
+ cpfile, prev, prev_bh);
} else {
prev_bh = header_bh;
get_bh(prev_bh);
+ prev_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
}
- kaddr = kmap_local_page(curr_bh->b_page);
- list = nilfs_cpfile_block_get_snapshot_list(
- cpfile, curr, curr_bh, kaddr);
+ /* Update the list entry for the next snapshot */
+ list = kmap_local_folio(curr_bh->b_folio, curr_list_offset);
list->ssl_prev = cpu_to_le64(cno);
- kunmap_local(kaddr);
+ kunmap_local(list);
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
+ /* Update the checkpoint being changed to a snapshot */
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
+ cp = kmap_local_folio(cp_bh->b_folio, offset);
cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
nilfs_checkpoint_set_snapshot(cp);
- kunmap_local(kaddr);
+ kunmap_local(cp);
- kaddr = kmap_local_page(prev_bh->b_page);
- list = nilfs_cpfile_block_get_snapshot_list(
- cpfile, prev, prev_bh, kaddr);
+ /* Update the list entry for the previous snapshot */
+ list = kmap_local_folio(prev_bh->b_folio, prev_list_offset);
list->ssl_next = cpu_to_le64(cno);
- kunmap_local(kaddr);
+ kunmap_local(list);
- kaddr = kmap_local_page(header_bh->b_page);
- header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
+ /* Update the statistics in the header */
+ header = kmap_local_folio(header_bh->b_folio, 0);
le64_add_cpu(&header->ch_nsnapshots, 1);
- kunmap_local(kaddr);
+ kunmap_local(header);
mark_buffer_dirty(prev_bh);
mark_buffer_dirty(curr_bh);
@@ -861,12 +883,12 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
out_curr:
brelse(curr_bh);
- out_header:
- brelse(header_bh);
-
out_cp:
brelse(cp_bh);
+ out_header:
+ brelse(header_bh);
+
out_sem:
up_write(&NILFS_MDT(cpfile)->mi_sem);
return ret;
@@ -879,79 +901,87 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
struct nilfs_checkpoint *cp;
struct nilfs_snapshot_list *list;
__u64 next, prev;
- void *kaddr;
+ size_t offset, next_list_offset, prev_list_offset;
int ret;
if (cno == 0)
return -ENOENT; /* checkpoint number 0 is invalid */
down_write(&NILFS_MDT(cpfile)->mi_sem);
+ ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
+ if (unlikely(ret < 0))
+ goto out_sem;
+
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
if (ret < 0)
- goto out_sem;
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
+ goto out_header;
+
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
+ cp = kmap_local_folio(cp_bh->b_folio, offset);
if (nilfs_checkpoint_invalid(cp)) {
ret = -ENOENT;
- kunmap_local(kaddr);
+ kunmap_local(cp);
goto out_cp;
}
if (!nilfs_checkpoint_snapshot(cp)) {
ret = 0;
- kunmap_local(kaddr);
+ kunmap_local(cp);
goto out_cp;
}
list = &cp->cp_snapshot_list;
next = le64_to_cpu(list->ssl_next);
prev = le64_to_cpu(list->ssl_prev);
- kunmap_local(kaddr);
+ kunmap_local(cp);
- ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
- if (ret < 0)
- goto out_cp;
if (next != 0) {
ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
&next_bh);
if (ret < 0)
- goto out_header;
+ goto out_cp;
+
+ next_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
+ cpfile, next, next_bh);
} else {
next_bh = header_bh;
get_bh(next_bh);
+ next_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
}
if (prev != 0) {
ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
&prev_bh);
if (ret < 0)
goto out_next;
+
+ prev_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
+ cpfile, prev, prev_bh);
} else {
prev_bh = header_bh;
get_bh(prev_bh);
+ prev_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
}
- kaddr = kmap_local_page(next_bh->b_page);
- list = nilfs_cpfile_block_get_snapshot_list(
- cpfile, next, next_bh, kaddr);
+ /* Update the list entry for the next snapshot */
+ list = kmap_local_folio(next_bh->b_folio, next_list_offset);
list->ssl_prev = cpu_to_le64(prev);
- kunmap_local(kaddr);
+ kunmap_local(list);
- kaddr = kmap_local_page(prev_bh->b_page);
- list = nilfs_cpfile_block_get_snapshot_list(
- cpfile, prev, prev_bh, kaddr);
+ /* Update the list entry for the previous snapshot */
+ list = kmap_local_folio(prev_bh->b_folio, prev_list_offset);
list->ssl_next = cpu_to_le64(next);
- kunmap_local(kaddr);
+ kunmap_local(list);
- kaddr = kmap_local_page(cp_bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
+ /* Update the snapshot being changed back to a plain checkpoint */
+ cp = kmap_local_folio(cp_bh->b_folio, offset);
cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
nilfs_checkpoint_clear_snapshot(cp);
- kunmap_local(kaddr);
+ kunmap_local(cp);
- kaddr = kmap_local_page(header_bh->b_page);
- header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
+ /* Update the statistics in the header */
+ header = kmap_local_folio(header_bh->b_folio, 0);
le64_add_cpu(&header->ch_nsnapshots, -1);
- kunmap_local(kaddr);
+ kunmap_local(header);
mark_buffer_dirty(next_bh);
mark_buffer_dirty(prev_bh);
@@ -964,12 +994,12 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
out_next:
brelse(next_bh);
- out_header:
- brelse(header_bh);
-
out_cp:
brelse(cp_bh);
+ out_header:
+ brelse(header_bh);
+
out_sem:
up_write(&NILFS_MDT(cpfile)->mi_sem);
return ret;
@@ -990,7 +1020,7 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
{
struct buffer_head *bh;
struct nilfs_checkpoint *cp;
- void *kaddr;
+ size_t offset;
int ret;
/*
@@ -1004,13 +1034,14 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
if (ret < 0)
goto out;
- kaddr = kmap_local_page(bh->b_page);
- cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
+
+ offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, bh);
+ cp = kmap_local_folio(bh->b_folio, offset);
if (nilfs_checkpoint_invalid(cp))
ret = -ENOENT;
else
ret = nilfs_checkpoint_snapshot(cp);
- kunmap_local(kaddr);
+ kunmap_local(cp);
brelse(bh);
out:
@@ -1079,7 +1110,6 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
{
struct buffer_head *bh;
struct nilfs_cpfile_header *header;
- void *kaddr;
int ret;
down_read(&NILFS_MDT(cpfile)->mi_sem);
@@ -1087,12 +1117,11 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
ret = nilfs_cpfile_get_header_block(cpfile, &bh);
if (ret < 0)
goto out_sem;
- kaddr = kmap_local_page(bh->b_page);
- header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
+ header = kmap_local_folio(bh->b_folio, 0);
cpstat->cs_cno = nilfs_mdt_cno(cpfile);
cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
- kunmap_local(kaddr);
+ kunmap_local(header);
brelse(bh);
out_sem:
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 09/12] nilfs2: Remove nilfs_writepage
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (7 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 08/12] nilfs2: convert checkpoint file to be folio-based Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 10/12] nilfs2: Convert nilfs_page_count_clean_buffers() to take a folio Ryusuke Konishi
` (2 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Since nilfs2 has a ->writepages operation already, ->writepage is only
called by the migration code. If we add a ->migrate_folio operation,
it won't even be used for that and so it can be deleted.
[ konishi.ryusuke: fixed panic by using buffer_migrate_folio_norefs ]
Link: https://lkml.kernel.org/r/20241002150036.1339475-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
---
fs/nilfs2/inode.c | 33 +--------------------------------
1 file changed, 1 insertion(+), 32 deletions(-)
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index be6acf6e2bfc..c24f06268010 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -170,37 +170,6 @@ static int nilfs_writepages(struct address_space *mapping,
return err;
}
-static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
- struct inode *inode = folio->mapping->host;
- int err;
-
- if (sb_rdonly(inode->i_sb)) {
- /*
- * It means that filesystem was remounted in read-only
- * mode because of error or metadata corruption. But we
- * have dirty pages that try to be flushed in background.
- * So, here we simply discard this dirty page.
- */
- nilfs_clear_folio_dirty(folio);
- folio_unlock(folio);
- return -EROFS;
- }
-
- folio_redirty_for_writepage(wbc, folio);
- folio_unlock(folio);
-
- if (wbc->sync_mode == WB_SYNC_ALL) {
- err = nilfs_construct_segment(inode->i_sb);
- if (unlikely(err))
- return err;
- } else if (wbc->for_reclaim)
- nilfs_flush_segment(inode->i_sb, inode->i_ino);
-
- return 0;
-}
-
static bool nilfs_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
@@ -295,7 +264,6 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
const struct address_space_operations nilfs_aops = {
- .writepage = nilfs_writepage,
.read_folio = nilfs_read_folio,
.writepages = nilfs_writepages,
.dirty_folio = nilfs_dirty_folio,
@@ -304,6 +272,7 @@ const struct address_space_operations nilfs_aops = {
.write_end = nilfs_write_end,
.invalidate_folio = block_invalidate_folio,
.direct_IO = nilfs_direct_IO,
+ .migrate_folio = buffer_migrate_folio_norefs,
.is_partially_uptodate = block_is_partially_uptodate,
};
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 10/12] nilfs2: Convert nilfs_page_count_clean_buffers() to take a folio
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (8 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 09/12] nilfs2: Remove nilfs_writepage Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 11/12] nilfs2: Convert nilfs_recovery_copy_block() " Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 12/12] nilfs2: Convert metadata aops from writepage to writepages Ryusuke Konishi
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Both callers have a folio, so pass it in and use it directly.
[ konishi.ryusuke: fixed a checkpatch warning about function declaration ]
Link: https://lkml.kernel.org/r/20241002150036.1339475-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
---
fs/nilfs2/dir.c | 2 +-
fs/nilfs2/inode.c | 2 +-
fs/nilfs2/page.c | 4 ++--
fs/nilfs2/page.h | 4 ++--
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index a8602729586a..14e8d82f8629 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -95,7 +95,7 @@ static void nilfs_commit_chunk(struct folio *folio,
unsigned int nr_dirty;
int err;
- nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
+ nr_dirty = nilfs_page_count_clean_buffers(folio, from, to);
copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index c24f06268010..cf9ba481ae37 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -242,7 +242,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
unsigned int nr_dirty;
int err;
- nr_dirty = nilfs_page_count_clean_buffers(&folio->page, start,
+ nr_dirty = nilfs_page_count_clean_buffers(folio, start,
start + copied);
copied = generic_write_end(file, mapping, pos, len, copied, folio,
fsdata);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 10def4b55995..e48079ebe939 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -422,14 +422,14 @@ void nilfs_clear_folio_dirty(struct folio *folio)
__nilfs_clear_folio_dirty(folio);
}
-unsigned int nilfs_page_count_clean_buffers(struct page *page,
+unsigned int nilfs_page_count_clean_buffers(struct folio *folio,
unsigned int from, unsigned int to)
{
unsigned int block_start, block_end;
struct buffer_head *bh, *head;
unsigned int nc = 0;
- for (bh = head = page_buffers(page), block_start = 0;
+ for (bh = head = folio_buffers(folio), block_start = 0;
bh != head || !block_start;
block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + bh->b_size;
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 64521a03a19e..136cd1c143c9 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -43,8 +43,8 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_folio_dirty(struct folio *folio);
void nilfs_clear_dirty_pages(struct address_space *mapping);
-unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
- unsigned int);
+unsigned int nilfs_page_count_clean_buffers(struct folio *folio,
+ unsigned int from, unsigned int to);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
sector_t *blkoff);
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 11/12] nilfs2: Convert nilfs_recovery_copy_block() to take a folio
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (9 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 10/12] nilfs2: Convert nilfs_page_count_clean_buffers() to take a folio Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
2024-10-24 9:25 ` [PATCH 12/12] nilfs2: Convert metadata aops from writepage to writepages Ryusuke Konishi
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Use memcpy_to_folio() instead of open-coding it, and use offset_in_folio()
in case anybody wants to use nilfs2 on a device with large blocks.
[ konishi.ryusuke: added label name change ]
Link: https://lkml.kernel.org/r/20241002150036.1339475-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
---
fs/nilfs2/recovery.c | 17 +++++++----------
1 file changed, 7 insertions(+), 10 deletions(-)
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 21d81097a89f..e43405bf521e 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -481,19 +481,16 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
struct nilfs_recovery_block *rb,
- loff_t pos, struct page *page)
+ loff_t pos, struct folio *folio)
{
struct buffer_head *bh_org;
- size_t from = pos & ~PAGE_MASK;
- void *kaddr;
+ size_t from = offset_in_folio(folio, pos);
bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
if (unlikely(!bh_org))
return -EIO;
- kaddr = kmap_local_page(page);
- memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
- kunmap_local(kaddr);
+ memcpy_to_folio(folio, from, bh_org->b_data, bh_org->b_size);
brelse(bh_org);
return 0;
}
@@ -531,13 +528,13 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_inode;
}
- err = nilfs_recovery_copy_block(nilfs, rb, pos, &folio->page);
+ err = nilfs_recovery_copy_block(nilfs, rb, pos, folio);
if (unlikely(err))
- goto failed_page;
+ goto failed_folio;
err = nilfs_set_file_dirty(inode, 1);
if (unlikely(err))
- goto failed_page;
+ goto failed_folio;
block_write_end(NULL, inode->i_mapping, pos, blocksize,
blocksize, folio, NULL);
@@ -548,7 +545,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
(*nr_salvaged_blocks)++;
goto next;
- failed_page:
+ failed_folio:
folio_unlock(folio);
folio_put(folio);
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 12/12] nilfs2: Convert metadata aops from writepage to writepages
2024-10-24 9:25 [PATCH 00/12] nilfs2: Finish folio conversion Ryusuke Konishi
` (10 preceding siblings ...)
2024-10-24 9:25 ` [PATCH 11/12] nilfs2: Convert nilfs_recovery_copy_block() " Ryusuke Konishi
@ 2024-10-24 9:25 ` Ryusuke Konishi
11 siblings, 0 replies; 13+ messages in thread
From: Ryusuke Konishi @ 2024-10-24 9:25 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox, linux-nilfs, linux-kernel, linux-fsdevel
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
By implementing ->writepages instead of ->writepage, we remove a
layer of indirect function calls from the writeback path and the
last use of struct page in nilfs2.
[ konishi.ryusuke: fixed panic by using buffer_migrate_folio_norefs ]
Link: https://lkml.kernel.org/r/20241002150036.1339475-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
---
fs/nilfs2/mdt.c | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index a4c1e00aaaac..432181cfb0b5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -399,10 +399,9 @@ int nilfs_mdt_fetch_dirty(struct inode *inode)
return test_bit(NILFS_I_DIRTY, &ii->i_state);
}
-static int
-nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
+static int nilfs_mdt_write_folio(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct folio *folio = page_folio(page);
struct inode *inode = folio->mapping->host;
struct super_block *sb;
int err = 0;
@@ -435,11 +434,23 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
return err;
}
+static int nilfs_mdt_writeback(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct folio *folio = NULL;
+ int error;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ error = nilfs_mdt_write_folio(folio, wbc);
+
+ return error;
+}
static const struct address_space_operations def_mdt_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .writepage = nilfs_mdt_write_page,
+ .writepages = nilfs_mdt_writeback,
+ .migrate_folio = buffer_migrate_folio_norefs,
};
static const struct inode_operations def_mdt_iops;
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread