* [f2fs-dev] [PATCH] f2fs: use round_up()/DIV_ROUND_UP() @ 2020-03-30 10:03 Chao Yu 2020-03-30 18:42 ` Jaegeuk Kim 0 siblings, 1 reply; 5+ messages in thread From: Chao Yu @ 2020-03-30 10:03 UTC (permalink / raw) To: jaegeuk; +Cc: linux-kernel, linux-f2fs-devel .i_cluster_size should be power of 2, so we can use round_up() instead of roundup() to enhance the calculation. In addition, use DIV_ROUND_UP to clean up codes. Signed-off-by: Chao Yu <yuchao0@huawei.com> --- fs/f2fs/data.c | 16 ++++++---------- fs/f2fs/file.c | 17 +++++------------ 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 0a829a89f596..8257d5e7aa3b 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1969,8 +1969,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, bool is_readahead) { struct bio *bio = *bio_ret; - const unsigned blkbits = inode->i_blkbits; - const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; @@ -1979,8 +1977,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, block_in_file = (sector_t)page_index(page); last_block = block_in_file + nr_pages; - last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >> - blkbits; + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), + PAGE_SIZE); if (last_block > last_block_in_file) last_block = last_block_in_file; @@ -2062,7 +2060,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, */ f2fs_wait_on_block_writeback(inode, block_nr); - if (bio_add_page(bio, page, blocksize, 0) < blocksize) + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) goto submit_and_realloc; inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); @@ -2091,16 +2089,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, struct bio *bio = *bio_ret; unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; sector_t last_block_in_file; - const unsigned blkbits = inode->i_blkbits; - const unsigned blocksize = 1 << blkbits; struct decompress_io_ctx *dic = NULL; int i; int ret = 0; f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); - last_block_in_file = (f2fs_readpage_limit(inode) + - blocksize - 1) >> blkbits; + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), + PAGE_SIZE); /* get rid of pages beyond EOF */ for (i = 0; i < cc->cluster_size; i++) { @@ -2197,7 +2193,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, f2fs_wait_on_block_writeback(inode, blkaddr); - if (bio_add_page(bio, page, blocksize, 0) < blocksize) + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) goto submit_and_realloc; inc_page_count(sbi, F2FS_RD_DATA); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index c2d38a1c4972..0f8be076620c 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -736,16 +736,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) * for compressed file, only support cluster size * aligned truncation. */ - if (f2fs_compressed_file(inode)) { - size_t cluster_shift = PAGE_SHIFT + - F2FS_I(inode)->i_log_cluster_size; - size_t cluster_mask = (1 << cluster_shift) - 1; - - free_from = from >> cluster_shift; - if (from & cluster_mask) - free_from++; - free_from <<= cluster_shift; - } + if (f2fs_compressed_file(inode)) + free_from = round_up(from, + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); #endif err = f2fs_do_truncate_blocks(inode, free_from, lock); @@ -3537,7 +3530,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); - count = roundup(count, F2FS_I(inode)->i_cluster_size); + count = round_up(count, F2FS_I(inode)->i_cluster_size); ret = release_compress_blocks(&dn, count); @@ -3689,7 +3682,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); - count = roundup(count, F2FS_I(inode)->i_cluster_size); + count = round_up(count, F2FS_I(inode)->i_cluster_size); ret = reserve_compress_blocks(&dn, count); -- 2.18.0.rc1 _______________________________________________ Linux-f2fs-devel mailing list Linux-f2fs-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel ^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [PATCH] f2fs: use round_up()/DIV_ROUND_UP() 2020-03-30 10:03 [f2fs-dev] [PATCH] f2fs: use round_up()/DIV_ROUND_UP() Chao Yu @ 2020-03-30 18:42 ` Jaegeuk Kim 2020-03-31 0:48 ` Chao Yu 0 siblings, 1 reply; 5+ messages in thread From: Jaegeuk Kim @ 2020-03-30 18:42 UTC (permalink / raw) To: Chao Yu; +Cc: linux-kernel, linux-f2fs-devel On 03/30, Chao Yu wrote: > .i_cluster_size should be power of 2, so we can use round_up() instead > of roundup() to enhance the calculation. > > In addition, use DIV_ROUND_UP to clean up codes. > > Signed-off-by: Chao Yu <yuchao0@huawei.com> > --- > fs/f2fs/data.c | 16 ++++++---------- > fs/f2fs/file.c | 17 +++++------------ > 2 files changed, 11 insertions(+), 22 deletions(-) > > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c > index 0a829a89f596..8257d5e7aa3b 100644 > --- a/fs/f2fs/data.c > +++ b/fs/f2fs/data.c > @@ -1969,8 +1969,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, > bool is_readahead) > { > struct bio *bio = *bio_ret; > - const unsigned blkbits = inode->i_blkbits; > - const unsigned blocksize = 1 << blkbits; > sector_t block_in_file; > sector_t last_block; > sector_t last_block_in_file; > @@ -1979,8 +1977,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, > > block_in_file = (sector_t)page_index(page); > last_block = block_in_file + nr_pages; > - last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >> > - blkbits; > + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), > + PAGE_SIZE); What if PAGE_SIZE is bigger than 4KB? > if (last_block > last_block_in_file) > last_block = last_block_in_file; > > @@ -2062,7 +2060,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, > */ > f2fs_wait_on_block_writeback(inode, block_nr); > > - if (bio_add_page(bio, page, blocksize, 0) < blocksize) > + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) > goto submit_and_realloc; > > inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); > @@ -2091,16 +2089,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, > struct bio *bio = *bio_ret; > unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; > sector_t last_block_in_file; > - const unsigned blkbits = inode->i_blkbits; > - const unsigned blocksize = 1 << blkbits; > struct decompress_io_ctx *dic = NULL; > int i; > int ret = 0; > > f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); > > - last_block_in_file = (f2fs_readpage_limit(inode) + > - blocksize - 1) >> blkbits; > + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), > + PAGE_SIZE); > > /* get rid of pages beyond EOF */ > for (i = 0; i < cc->cluster_size; i++) { > @@ -2197,7 +2193,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, > > f2fs_wait_on_block_writeback(inode, blkaddr); > > - if (bio_add_page(bio, page, blocksize, 0) < blocksize) > + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) > goto submit_and_realloc; > > inc_page_count(sbi, F2FS_RD_DATA); > diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c > index c2d38a1c4972..0f8be076620c 100644 > --- a/fs/f2fs/file.c > +++ b/fs/f2fs/file.c > @@ -736,16 +736,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) > * for compressed file, only support cluster size > * aligned truncation. > */ > - if (f2fs_compressed_file(inode)) { > - size_t cluster_shift = PAGE_SHIFT + > - F2FS_I(inode)->i_log_cluster_size; > - size_t cluster_mask = (1 << cluster_shift) - 1; > - > - free_from = from >> cluster_shift; > - if (from & cluster_mask) > - free_from++; > - free_from <<= cluster_shift; > - } > + if (f2fs_compressed_file(inode)) > + free_from = round_up(from, > + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); > #endif > > err = f2fs_do_truncate_blocks(inode, free_from, lock); > @@ -3537,7 +3530,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) > > end_offset = ADDRS_PER_PAGE(dn.node_page, inode); > count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); > - count = roundup(count, F2FS_I(inode)->i_cluster_size); > + count = round_up(count, F2FS_I(inode)->i_cluster_size); > > ret = release_compress_blocks(&dn, count); > > @@ -3689,7 +3682,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) > > end_offset = ADDRS_PER_PAGE(dn.node_page, inode); > count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); > - count = roundup(count, F2FS_I(inode)->i_cluster_size); > + count = round_up(count, F2FS_I(inode)->i_cluster_size); > > ret = reserve_compress_blocks(&dn, count); > > -- > 2.18.0.rc1 _______________________________________________ Linux-f2fs-devel mailing list Linux-f2fs-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel ^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [PATCH] f2fs: use round_up()/DIV_ROUND_UP() 2020-03-30 18:42 ` Jaegeuk Kim @ 2020-03-31 0:48 ` Chao Yu 2020-03-31 3:55 ` Jaegeuk Kim 0 siblings, 1 reply; 5+ messages in thread From: Chao Yu @ 2020-03-31 0:48 UTC (permalink / raw) To: Jaegeuk Kim; +Cc: linux-kernel, linux-f2fs-devel On 2020/3/31 2:42, Jaegeuk Kim wrote: > On 03/30, Chao Yu wrote: >> .i_cluster_size should be power of 2, so we can use round_up() instead >> of roundup() to enhance the calculation. >> >> In addition, use DIV_ROUND_UP to clean up codes. >> >> Signed-off-by: Chao Yu <yuchao0@huawei.com> >> --- >> fs/f2fs/data.c | 16 ++++++---------- >> fs/f2fs/file.c | 17 +++++------------ >> 2 files changed, 11 insertions(+), 22 deletions(-) >> >> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c >> index 0a829a89f596..8257d5e7aa3b 100644 >> --- a/fs/f2fs/data.c >> +++ b/fs/f2fs/data.c >> @@ -1969,8 +1969,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, >> bool is_readahead) >> { >> struct bio *bio = *bio_ret; >> - const unsigned blkbits = inode->i_blkbits; >> - const unsigned blocksize = 1 << blkbits; >> sector_t block_in_file; >> sector_t last_block; >> sector_t last_block_in_file; >> @@ -1979,8 +1977,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, >> >> block_in_file = (sector_t)page_index(page); >> last_block = block_in_file + nr_pages; >> - last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >> >> - blkbits; >> + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), >> + PAGE_SIZE); > > What if PAGE_SIZE is bigger than 4KB? We don't support 8kb+ sized-page, right? static int __init init_f2fs_fs(void) { int err; if (PAGE_SIZE != F2FS_BLKSIZE) { printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n", PAGE_SIZE, F2FS_BLKSIZE); return -EINVAL; } Thanks, > >> if (last_block > last_block_in_file) >> last_block = last_block_in_file; >> >> @@ -2062,7 +2060,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, >> */ >> f2fs_wait_on_block_writeback(inode, block_nr); >> >> - if (bio_add_page(bio, page, blocksize, 0) < blocksize) >> + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) >> goto submit_and_realloc; >> >> inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); >> @@ -2091,16 +2089,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, >> struct bio *bio = *bio_ret; >> unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; >> sector_t last_block_in_file; >> - const unsigned blkbits = inode->i_blkbits; >> - const unsigned blocksize = 1 << blkbits; >> struct decompress_io_ctx *dic = NULL; >> int i; >> int ret = 0; >> >> f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); >> >> - last_block_in_file = (f2fs_readpage_limit(inode) + >> - blocksize - 1) >> blkbits; >> + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), >> + PAGE_SIZE); >> >> /* get rid of pages beyond EOF */ >> for (i = 0; i < cc->cluster_size; i++) { >> @@ -2197,7 +2193,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, >> >> f2fs_wait_on_block_writeback(inode, blkaddr); >> >> - if (bio_add_page(bio, page, blocksize, 0) < blocksize) >> + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) >> goto submit_and_realloc; >> >> inc_page_count(sbi, F2FS_RD_DATA); >> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c >> index c2d38a1c4972..0f8be076620c 100644 >> --- a/fs/f2fs/file.c >> +++ b/fs/f2fs/file.c >> @@ -736,16 +736,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) >> * for compressed file, only support cluster size >> * aligned truncation. >> */ >> - if (f2fs_compressed_file(inode)) { >> - size_t cluster_shift = PAGE_SHIFT + >> - F2FS_I(inode)->i_log_cluster_size; >> - size_t cluster_mask = (1 << cluster_shift) - 1; >> - >> - free_from = from >> cluster_shift; >> - if (from & cluster_mask) >> - free_from++; >> - free_from <<= cluster_shift; >> - } >> + if (f2fs_compressed_file(inode)) >> + free_from = round_up(from, >> + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); >> #endif >> >> err = f2fs_do_truncate_blocks(inode, free_from, lock); >> @@ -3537,7 +3530,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) >> >> end_offset = ADDRS_PER_PAGE(dn.node_page, inode); >> count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); >> - count = roundup(count, F2FS_I(inode)->i_cluster_size); >> + count = round_up(count, F2FS_I(inode)->i_cluster_size); >> >> ret = release_compress_blocks(&dn, count); >> >> @@ -3689,7 +3682,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) >> >> end_offset = ADDRS_PER_PAGE(dn.node_page, inode); >> count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); >> - count = roundup(count, F2FS_I(inode)->i_cluster_size); >> + count = round_up(count, F2FS_I(inode)->i_cluster_size); >> >> ret = reserve_compress_blocks(&dn, count); >> >> -- >> 2.18.0.rc1 > . > _______________________________________________ Linux-f2fs-devel mailing list Linux-f2fs-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel ^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [PATCH] f2fs: use round_up()/DIV_ROUND_UP() 2020-03-31 0:48 ` Chao Yu @ 2020-03-31 3:55 ` Jaegeuk Kim 2020-03-31 7:51 ` Chao Yu 0 siblings, 1 reply; 5+ messages in thread From: Jaegeuk Kim @ 2020-03-31 3:55 UTC (permalink / raw) To: Chao Yu; +Cc: linux-kernel, linux-f2fs-devel On 03/31, Chao Yu wrote: > On 2020/3/31 2:42, Jaegeuk Kim wrote: > > On 03/30, Chao Yu wrote: > >> .i_cluster_size should be power of 2, so we can use round_up() instead > >> of roundup() to enhance the calculation. > >> > >> In addition, use DIV_ROUND_UP to clean up codes. > >> > >> Signed-off-by: Chao Yu <yuchao0@huawei.com> > >> --- > >> fs/f2fs/data.c | 16 ++++++---------- > >> fs/f2fs/file.c | 17 +++++------------ > >> 2 files changed, 11 insertions(+), 22 deletions(-) > >> > >> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c > >> index 0a829a89f596..8257d5e7aa3b 100644 > >> --- a/fs/f2fs/data.c > >> +++ b/fs/f2fs/data.c > >> @@ -1969,8 +1969,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, > >> bool is_readahead) > >> { > >> struct bio *bio = *bio_ret; > >> - const unsigned blkbits = inode->i_blkbits; > >> - const unsigned blocksize = 1 << blkbits; > >> sector_t block_in_file; > >> sector_t last_block; > >> sector_t last_block_in_file; > >> @@ -1979,8 +1977,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, > >> > >> block_in_file = (sector_t)page_index(page); > >> last_block = block_in_file + nr_pages; > >> - last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >> > >> - blkbits; > >> + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), > >> + PAGE_SIZE); > > > > What if PAGE_SIZE is bigger than 4KB? > > We don't support 8kb+ sized-page, right? That's only assumption below. I don't think we can just replace block with PAGE in every places. > > static int __init init_f2fs_fs(void) > { > int err; > > if (PAGE_SIZE != F2FS_BLKSIZE) { > printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n", > PAGE_SIZE, F2FS_BLKSIZE); > return -EINVAL; > } > > Thanks, > > > > >> if (last_block > last_block_in_file) > >> last_block = last_block_in_file; > >> > >> @@ -2062,7 +2060,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, > >> */ > >> f2fs_wait_on_block_writeback(inode, block_nr); > >> > >> - if (bio_add_page(bio, page, blocksize, 0) < blocksize) > >> + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) > >> goto submit_and_realloc; > >> > >> inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); > >> @@ -2091,16 +2089,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, > >> struct bio *bio = *bio_ret; > >> unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; > >> sector_t last_block_in_file; > >> - const unsigned blkbits = inode->i_blkbits; > >> - const unsigned blocksize = 1 << blkbits; > >> struct decompress_io_ctx *dic = NULL; > >> int i; > >> int ret = 0; > >> > >> f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); > >> > >> - last_block_in_file = (f2fs_readpage_limit(inode) + > >> - blocksize - 1) >> blkbits; > >> + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), > >> + PAGE_SIZE); > >> > >> /* get rid of pages beyond EOF */ > >> for (i = 0; i < cc->cluster_size; i++) { > >> @@ -2197,7 +2193,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, > >> > >> f2fs_wait_on_block_writeback(inode, blkaddr); > >> > >> - if (bio_add_page(bio, page, blocksize, 0) < blocksize) > >> + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) > >> goto submit_and_realloc; > >> > >> inc_page_count(sbi, F2FS_RD_DATA); > >> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c > >> index c2d38a1c4972..0f8be076620c 100644 > >> --- a/fs/f2fs/file.c > >> +++ b/fs/f2fs/file.c > >> @@ -736,16 +736,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) > >> * for compressed file, only support cluster size > >> * aligned truncation. > >> */ > >> - if (f2fs_compressed_file(inode)) { > >> - size_t cluster_shift = PAGE_SHIFT + > >> - F2FS_I(inode)->i_log_cluster_size; > >> - size_t cluster_mask = (1 << cluster_shift) - 1; > >> - > >> - free_from = from >> cluster_shift; > >> - if (from & cluster_mask) > >> - free_from++; > >> - free_from <<= cluster_shift; > >> - } > >> + if (f2fs_compressed_file(inode)) > >> + free_from = round_up(from, > >> + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); > >> #endif > >> > >> err = f2fs_do_truncate_blocks(inode, free_from, lock); > >> @@ -3537,7 +3530,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) > >> > >> end_offset = ADDRS_PER_PAGE(dn.node_page, inode); > >> count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); > >> - count = roundup(count, F2FS_I(inode)->i_cluster_size); > >> + count = round_up(count, F2FS_I(inode)->i_cluster_size); > >> > >> ret = release_compress_blocks(&dn, count); > >> > >> @@ -3689,7 +3682,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) > >> > >> end_offset = ADDRS_PER_PAGE(dn.node_page, inode); > >> count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); > >> - count = roundup(count, F2FS_I(inode)->i_cluster_size); > >> + count = round_up(count, F2FS_I(inode)->i_cluster_size); > >> > >> ret = reserve_compress_blocks(&dn, count); > >> > >> -- > >> 2.18.0.rc1 > > . > > _______________________________________________ Linux-f2fs-devel mailing list Linux-f2fs-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel ^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [f2fs-dev] [PATCH] f2fs: use round_up()/DIV_ROUND_UP() 2020-03-31 3:55 ` Jaegeuk Kim @ 2020-03-31 7:51 ` Chao Yu 0 siblings, 0 replies; 5+ messages in thread From: Chao Yu @ 2020-03-31 7:51 UTC (permalink / raw) To: Jaegeuk Kim; +Cc: linux-kernel, linux-f2fs-devel On 2020/3/31 11:55, Jaegeuk Kim wrote: > On 03/31, Chao Yu wrote: >> On 2020/3/31 2:42, Jaegeuk Kim wrote: >>> On 03/30, Chao Yu wrote: >>>> .i_cluster_size should be power of 2, so we can use round_up() instead >>>> of roundup() to enhance the calculation. >>>> >>>> In addition, use DIV_ROUND_UP to clean up codes. >>>> >>>> Signed-off-by: Chao Yu <yuchao0@huawei.com> >>>> --- >>>> fs/f2fs/data.c | 16 ++++++---------- >>>> fs/f2fs/file.c | 17 +++++------------ >>>> 2 files changed, 11 insertions(+), 22 deletions(-) >>>> >>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c >>>> index 0a829a89f596..8257d5e7aa3b 100644 >>>> --- a/fs/f2fs/data.c >>>> +++ b/fs/f2fs/data.c >>>> @@ -1969,8 +1969,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, >>>> bool is_readahead) >>>> { >>>> struct bio *bio = *bio_ret; >>>> - const unsigned blkbits = inode->i_blkbits; >>>> - const unsigned blocksize = 1 << blkbits; >>>> sector_t block_in_file; >>>> sector_t last_block; >>>> sector_t last_block_in_file; >>>> @@ -1979,8 +1977,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, >>>> >>>> block_in_file = (sector_t)page_index(page); >>>> last_block = block_in_file + nr_pages; >>>> - last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >> >>>> - blkbits; >>>> + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), >>>> + PAGE_SIZE); >>> >>> What if PAGE_SIZE is bigger than 4KB? >> >> We don't support 8kb+ sized-page, right? > > That's only assumption below. I don't think we can just replace block with PAGE > in every places. Fixed, BTW, we didn't unify the usage of PAGE_SIZE/blocksize when calling bio_add_page(), it needs to unify to use PAGE_SIZE? f2fs_read_multi_pages() if (bio_add_page(bio, page, blocksize, 0) < blocksize) f2fs_submit_page_write if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { > >> >> static int __init init_f2fs_fs(void) >> { >> int err; >> >> if (PAGE_SIZE != F2FS_BLKSIZE) { >> printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n", >> PAGE_SIZE, F2FS_BLKSIZE); >> return -EINVAL; >> } >> >> Thanks, >> >>> >>>> if (last_block > last_block_in_file) >>>> last_block = last_block_in_file; >>>> >>>> @@ -2062,7 +2060,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, >>>> */ >>>> f2fs_wait_on_block_writeback(inode, block_nr); >>>> >>>> - if (bio_add_page(bio, page, blocksize, 0) < blocksize) >>>> + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) >>>> goto submit_and_realloc; >>>> >>>> inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); >>>> @@ -2091,16 +2089,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, >>>> struct bio *bio = *bio_ret; >>>> unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; >>>> sector_t last_block_in_file; >>>> - const unsigned blkbits = inode->i_blkbits; >>>> - const unsigned blocksize = 1 << blkbits; >>>> struct decompress_io_ctx *dic = NULL; >>>> int i; >>>> int ret = 0; >>>> >>>> f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); >>>> >>>> - last_block_in_file = (f2fs_readpage_limit(inode) + >>>> - blocksize - 1) >> blkbits; >>>> + last_block_in_file = DIV_ROUND_UP(f2fs_readpage_limit(inode), >>>> + PAGE_SIZE); >>>> >>>> /* get rid of pages beyond EOF */ >>>> for (i = 0; i < cc->cluster_size; i++) { >>>> @@ -2197,7 +2193,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, >>>> >>>> f2fs_wait_on_block_writeback(inode, blkaddr); >>>> >>>> - if (bio_add_page(bio, page, blocksize, 0) < blocksize) >>>> + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) >>>> goto submit_and_realloc; >>>> >>>> inc_page_count(sbi, F2FS_RD_DATA); >>>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c >>>> index c2d38a1c4972..0f8be076620c 100644 >>>> --- a/fs/f2fs/file.c >>>> +++ b/fs/f2fs/file.c >>>> @@ -736,16 +736,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) >>>> * for compressed file, only support cluster size >>>> * aligned truncation. >>>> */ >>>> - if (f2fs_compressed_file(inode)) { >>>> - size_t cluster_shift = PAGE_SHIFT + >>>> - F2FS_I(inode)->i_log_cluster_size; >>>> - size_t cluster_mask = (1 << cluster_shift) - 1; >>>> - >>>> - free_from = from >> cluster_shift; >>>> - if (from & cluster_mask) >>>> - free_from++; >>>> - free_from <<= cluster_shift; >>>> - } >>>> + if (f2fs_compressed_file(inode)) >>>> + free_from = round_up(from, >>>> + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); >>>> #endif >>>> >>>> err = f2fs_do_truncate_blocks(inode, free_from, lock); >>>> @@ -3537,7 +3530,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) >>>> >>>> end_offset = ADDRS_PER_PAGE(dn.node_page, inode); >>>> count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); >>>> - count = roundup(count, F2FS_I(inode)->i_cluster_size); >>>> + count = round_up(count, F2FS_I(inode)->i_cluster_size); >>>> >>>> ret = release_compress_blocks(&dn, count); >>>> >>>> @@ -3689,7 +3682,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) >>>> >>>> end_offset = ADDRS_PER_PAGE(dn.node_page, inode); >>>> count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); >>>> - count = roundup(count, F2FS_I(inode)->i_cluster_size); >>>> + count = round_up(count, F2FS_I(inode)->i_cluster_size); >>>> >>>> ret = reserve_compress_blocks(&dn, count); >>>> >>>> -- >>>> 2.18.0.rc1 >>> . >>> > . > _______________________________________________ Linux-f2fs-devel mailing list Linux-f2fs-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel ^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2020-03-31 7:51 UTC | newest] Thread overview: 5+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2020-03-30 10:03 [f2fs-dev] [PATCH] f2fs: use round_up()/DIV_ROUND_UP() Chao Yu 2020-03-30 18:42 ` Jaegeuk Kim 2020-03-31 0:48 ` Chao Yu 2020-03-31 3:55 ` Jaegeuk Kim 2020-03-31 7:51 ` Chao Yu
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).