public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Jaegeuk Kim <jaegeuk@kernel.org>
To: Chao Yu <chao@kernel.org>
Cc: Yangtao Li <frank.li@vivo.com>,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH] f2fs: do some cleanup for f2fs module init
Date: Mon, 12 Dec 2022 14:53:02 -0800	[thread overview]
Message-ID: <Y5ewzsPuCd5UbCCJ@google.com> (raw)
In-Reply-To: <b8c54a6b-1f6d-9a86-b87c-e980902aa3a3@kernel.org>

On 12/11, Chao Yu wrote:
> On 2022/11/25 19:47, Yangtao Li wrote:
> > Just for cleanup, no functional changes.
> > 
> > Signed-off-by: Yangtao Li <frank.li@vivo.com>
> > ---
> >   fs/f2fs/compress.c | 46 ++++++----------------------------------------
> >   fs/f2fs/data.c     | 14 ++++----------
> >   fs/f2fs/gc.c       |  4 +---
> >   fs/f2fs/recovery.c |  4 +---
> >   fs/f2fs/super.c    |  8 ++------
> >   5 files changed, 14 insertions(+), 62 deletions(-)
> > 
> > diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> > index d315c2de136f..f920ba8e0e85 100644
> > --- a/fs/f2fs/compress.c
> > +++ b/fs/f2fs/compress.c
> > @@ -567,10 +567,7 @@ MODULE_PARM_DESC(num_compress_pages,
> >   int f2fs_init_compress_mempool(void)
> >   {
> >   	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
> > -	if (!compress_page_pool)
> > -		return -ENOMEM;
> > -
> > -	return 0;
> > +	return compress_page_pool ? 0 : -ENOMEM;
> 
> I don't think this needs cleanup, other part looks good to me.

What is the point here comparing to the below? fyi; I picked this change.

> 
> Thanks,
> 
> >   }
> >   void f2fs_destroy_compress_mempool(void)
> > @@ -1983,9 +1980,7 @@ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
> >   	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
> >   					sbi->page_array_slab_size);
> > -	if (!sbi->page_array_slab)
> > -		return -ENOMEM;
> > -	return 0;
> > +	return sbi->page_array_slab ? 0 : -ENOMEM;
> >   }
> >   void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
> > @@ -1993,53 +1988,24 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
> >   	kmem_cache_destroy(sbi->page_array_slab);
> >   }
> > -static int __init f2fs_init_cic_cache(void)
> > +int __init f2fs_init_compress_cache(void)
> >   {
> >   	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
> >   					sizeof(struct compress_io_ctx));
> >   	if (!cic_entry_slab)
> >   		return -ENOMEM;
> > -	return 0;
> > -}
> > -
> > -static void f2fs_destroy_cic_cache(void)
> > -{
> > -	kmem_cache_destroy(cic_entry_slab);
> > -}
> > -
> > -static int __init f2fs_init_dic_cache(void)
> > -{
> >   	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
> >   					sizeof(struct decompress_io_ctx));
> >   	if (!dic_entry_slab)
> > -		return -ENOMEM;
> > -	return 0;
> > -}
> > -
> > -static void f2fs_destroy_dic_cache(void)
> > -{
> > -	kmem_cache_destroy(dic_entry_slab);
> > -}
> > -
> > -int __init f2fs_init_compress_cache(void)
> > -{
> > -	int err;
> > -
> > -	err = f2fs_init_cic_cache();
> > -	if (err)
> > -		goto out;
> > -	err = f2fs_init_dic_cache();
> > -	if (err)
> >   		goto free_cic;
> >   	return 0;
> >   free_cic:
> > -	f2fs_destroy_cic_cache();
> > -out:
> > +	kmem_cache_destroy(cic_entry_slab);
> >   	return -ENOMEM;
> >   }
> >   void f2fs_destroy_compress_cache(void)
> >   {
> > -	f2fs_destroy_dic_cache();
> > -	f2fs_destroy_cic_cache();
> > +	kmem_cache_destroy(dic_entry_slab);
> > +	kmem_cache_destroy(cic_entry_slab);
> >   }
> > diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > index 560fa80590e9..35c19248b1e2 100644
> > --- a/fs/f2fs/data.c
> > +++ b/fs/f2fs/data.c
> > @@ -39,10 +39,8 @@ static struct bio_set f2fs_bioset;
> >   int __init f2fs_init_bioset(void)
> >   {
> > -	if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
> > -					0, BIOSET_NEED_BVECS))
> > -		return -ENOMEM;
> > -	return 0;
> > +	return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
> > +					0, BIOSET_NEED_BVECS);
> >   }
> >   void f2fs_destroy_bioset(void)
> > @@ -4090,9 +4088,7 @@ int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
> >   	sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
> >   						 WQ_UNBOUND | WQ_HIGHPRI,
> >   						 num_online_cpus());
> > -	if (!sbi->post_read_wq)
> > -		return -ENOMEM;
> > -	return 0;
> > +	return sbi->post_read_wq ? 0 : -ENOMEM;
> >   }
> >   void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
> > @@ -4105,9 +4101,7 @@ int __init f2fs_init_bio_entry_cache(void)
> >   {
> >   	bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
> >   			sizeof(struct bio_entry));
> > -	if (!bio_entry_slab)
> > -		return -ENOMEM;
> > -	return 0;
> > +	return bio_entry_slab ? 0 : -ENOMEM;
> >   }
> >   void f2fs_destroy_bio_entry_cache(void)
> > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> > index 0f967b1e98f2..4b0d2fa3a769 100644
> > --- a/fs/f2fs/gc.c
> > +++ b/fs/f2fs/gc.c
> > @@ -1903,9 +1903,7 @@ int __init f2fs_create_garbage_collection_cache(void)
> >   {
> >   	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
> >   					sizeof(struct victim_entry));
> > -	if (!victim_entry_slab)
> > -		return -ENOMEM;
> > -	return 0;
> > +	return victim_entry_slab ? 0 : -ENOMEM;
> >   }
> >   void f2fs_destroy_garbage_collection_cache(void)
> > diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
> > index dea95b48b647..77fd453949b1 100644
> > --- a/fs/f2fs/recovery.c
> > +++ b/fs/f2fs/recovery.c
> > @@ -923,9 +923,7 @@ int __init f2fs_create_recovery_cache(void)
> >   {
> >   	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
> >   					sizeof(struct fsync_inode_entry));
> > -	if (!fsync_entry_slab)
> > -		return -ENOMEM;
> > -	return 0;
> > +	return fsync_entry_slab ? 0 : -ENOMEM;
> >   }
> >   void f2fs_destroy_recovery_cache(void)
> > diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> > index 31435c8645c8..1d56cba495a5 100644
> > --- a/fs/f2fs/super.c
> > +++ b/fs/f2fs/super.c
> > @@ -288,9 +288,7 @@ static int __init f2fs_create_casefold_cache(void)
> >   {
> >   	f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
> >   							F2FS_NAME_LEN);
> > -	if (!f2fs_cf_name_slab)
> > -		return -ENOMEM;
> > -	return 0;
> > +	return f2fs_cf_name_slab ? 0 : -ENOMEM;
> >   }
> >   static void f2fs_destroy_casefold_cache(void)
> > @@ -4646,9 +4644,7 @@ static int __init init_inodecache(void)
> >   	f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
> >   			sizeof(struct f2fs_inode_info), 0,
> >   			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
> > -	if (!f2fs_inode_cachep)
> > -		return -ENOMEM;
> > -	return 0;
> > +	return f2fs_inode_cachep ? 0 : -ENOMEM;
> >   }
> >   static void destroy_inodecache(void)

  reply	other threads:[~2022-12-12 22:53 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-25 11:47 [PATCH] f2fs: do some cleanup for f2fs module init Yangtao Li
2022-12-11  2:14 ` Chao Yu
2022-12-12 22:53   ` Jaegeuk Kim [this message]
2022-12-13  1:34     ` Chao Yu
2022-12-13  1:37       ` Jaegeuk Kim
2022-12-13  2:02         ` Chao Yu
2022-12-13 12:09           ` Yangtao Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Y5ewzsPuCd5UbCCJ@google.com \
    --to=jaegeuk@kernel.org \
    --cc=chao@kernel.org \
    --cc=frank.li@vivo.com \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox