From: Zhu Yanjun <zyjzyj2000@gmail.com>
To: Bob Pearson <rpearsonhpe@gmail.com>,
leon@kernel.org, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearson@hpe.com>
Subject: Re: [PATCH for-next] rdma_rxe: address an issue with hardened user copy
Date: Thu, 27 Aug 2020 09:21:52 +0800 [thread overview]
Message-ID: <a63b12bf-322f-f3ef-271e-cbf12944301f@gmail.com> (raw)
In-Reply-To: <20200825165836.27477-1-rpearson@hpe.com>
On 8/26/2020 12:58 AM, Bob Pearson wrote:
> Change rxe pools to use kzalloc instead of kmem_cache to allocate
Why do you use kzalloc instead of kmem_cache? For performance or some bugs?
Zhu Yanjun
> memory for rxe objects.
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
> drivers/infiniband/sw/rxe/rxe.c | 8 ----
> drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
> drivers/infiniband/sw/rxe/rxe_pool.h | 7 ----
> 3 files changed, 2 insertions(+), 73 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
> index cc395da13eff..a1ff70e0b1f8 100644
> --- a/drivers/infiniband/sw/rxe/rxe.c
> +++ b/drivers/infiniband/sw/rxe/rxe.c
> @@ -277,13 +277,6 @@ static int __init rxe_module_init(void)
> {
> int err;
>
> - /* initialize slab caches for managed objects */
> - err = rxe_cache_init();
> - if (err) {
> - pr_err("unable to init object pools\n");
> - return err;
> - }
> -
> err = rxe_net_init();
> if (err)
> return err;
> @@ -298,7 +291,6 @@ static void __exit rxe_module_exit(void)
> rdma_link_unregister(&rxe_link_ops);
> ib_unregister_driver(RDMA_DRIVER_RXE);
> rxe_net_exit();
> - rxe_cache_exit();
>
> pr_info("unloaded\n");
> }
> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
> index c0fab4a65f9e..70fc9f7a25b6 100644
> --- a/drivers/infiniband/sw/rxe/rxe_pool.c
> +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
> @@ -84,62 +84,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
> return rxe_type_info[pool->type].name;
> }
>
> -static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
> -{
> - return rxe_type_info[pool->type].cache;
> -}
> -
> -static void rxe_cache_clean(size_t cnt)
> -{
> - int i;
> - struct rxe_type_info *type;
> -
> - for (i = 0; i < cnt; i++) {
> - type = &rxe_type_info[i];
> - if (!(type->flags & RXE_POOL_NO_ALLOC)) {
> - kmem_cache_destroy(type->cache);
> - type->cache = NULL;
> - }
> - }
> -}
> -
> -int rxe_cache_init(void)
> -{
> - int err;
> - int i;
> - size_t size;
> - struct rxe_type_info *type;
> -
> - for (i = 0; i < RXE_NUM_TYPES; i++) {
> - type = &rxe_type_info[i];
> - size = ALIGN(type->size, RXE_POOL_ALIGN);
> - if (!(type->flags & RXE_POOL_NO_ALLOC)) {
> - type->cache =
> - kmem_cache_create(type->name, size,
> - RXE_POOL_ALIGN,
> - RXE_POOL_CACHE_FLAGS, NULL);
> - if (!type->cache) {
> - pr_err("Unable to init kmem cache for %s\n",
> - type->name);
> - err = -ENOMEM;
> - goto err1;
> - }
> - }
> - }
> -
> - return 0;
> -
> -err1:
> - rxe_cache_clean(i);
> -
> - return err;
> -}
> -
> -void rxe_cache_exit(void)
> -{
> - rxe_cache_clean(RXE_NUM_TYPES);
> -}
> -
> static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
> {
> int err = 0;
> @@ -381,7 +325,7 @@ void *rxe_alloc(struct rxe_pool *pool)
> if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
> goto out_cnt;
>
> - elem = kmem_cache_zalloc(pool_cache(pool),
> + elem = kzalloc(rxe_type_info[pool->type].size,
> (pool->flags & RXE_POOL_ATOMIC) ?
> GFP_ATOMIC : GFP_KERNEL);
> if (!elem)
> @@ -443,7 +387,7 @@ void rxe_elem_release(struct kref *kref)
> pool->cleanup(elem);
>
> if (!(pool->flags & RXE_POOL_NO_ALLOC))
> - kmem_cache_free(pool_cache(pool), elem);
> + kfree(elem);
> atomic_dec(&pool->num_elem);
> ib_device_put(&pool->rxe->ib_dev);
> rxe_pool_put(pool);
> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
> index 64d92be3f060..3d722aae5f15 100644
> --- a/drivers/infiniband/sw/rxe/rxe_pool.h
> +++ b/drivers/infiniband/sw/rxe/rxe_pool.h
> @@ -42,7 +42,6 @@ struct rxe_type_info {
> u32 min_index;
> size_t key_offset;
> size_t key_size;
> - struct kmem_cache *cache;
> };
>
> extern struct rxe_type_info rxe_type_info[];
> @@ -96,12 +95,6 @@ struct rxe_pool {
> } key;
> };
>
> -/* initialize slab caches for managed objects */
> -int rxe_cache_init(void);
> -
> -/* cleanup slab caches for managed objects */
> -void rxe_cache_exit(void);
> -
> /* initialize a pool of objects with given limit on
> * number of elements. gets parameters from rxe_type_info
> * pool elements will be allocated out of a slab cache
next prev parent reply other threads:[~2020-08-27 1:21 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-25 16:58 [PATCH for-next] rdma_rxe: address an issue with hardened user copy Bob Pearson
2020-08-27 1:21 ` Zhu Yanjun [this message]
2020-08-27 1:30 ` Bob Pearson
2020-08-27 7:02 ` Leon Romanovsky
2020-08-27 13:58 ` Jason Gunthorpe
2020-08-27 16:43 ` Bob Pearson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a63b12bf-322f-f3ef-271e-cbf12944301f@gmail.com \
--to=zyjzyj2000@gmail.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=rpearson@hpe.com \
--cc=rpearsonhpe@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox