From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, zyjzyj2000@gmail.com, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearson@hpe.com>
Subject: [PATCH for-next 4/7] RDMA/rxe: Make pool lookup and alloc APIs type safe
Date: Wed, 16 Dec 2020 17:15:47 -0600 [thread overview]
Message-ID: <20201216231550.27224-5-rpearson@hpe.com> (raw)
In-Reply-To: <20201216231550.27224-1-rpearson@hpe.com>
The allocate, lookup index, lookup key and cleanup routines
in rxe_pool.c currently are not type safe against relocating
the pelem field in the objects. Planned changes to move
allocation of objects into rdma-core make addressing this a
requirement.
Use the elem_offset field in rxe_type_info make these APIs
safe against moving the pelem field.
Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
drivers/infiniband/sw/rxe/rxe_pool.c | 55 +++++++++++++++++++---------
1 file changed, 38 insertions(+), 17 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 4d667b78af9b..2873ecfb84c2 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -315,7 +315,9 @@ void rxe_drop_index(void *arg)
void *rxe_alloc(struct rxe_pool *pool)
{
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem;
+ u8 *obj;
unsigned long flags;
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
@@ -334,16 +336,17 @@ void *rxe_alloc(struct rxe_pool *pool)
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- elem = kzalloc(rxe_type_info[pool->type].size,
- (pool->flags & RXE_POOL_ATOMIC) ?
- GFP_ATOMIC : GFP_KERNEL);
- if (!elem)
+ obj = kzalloc(info->size, (pool->flags & RXE_POOL_ATOMIC) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (!obj)
goto out_cnt;
+ elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+
elem->pool = pool;
kref_init(&elem->ref_cnt);
- return elem;
+ return obj;
out_cnt:
atomic_dec(&pool->num_elem);
@@ -391,12 +394,17 @@ void rxe_elem_release(struct kref *kref)
struct rxe_pool_entry *elem =
container_of(kref, struct rxe_pool_entry, ref_cnt);
struct rxe_pool *pool = elem->pool;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ u8 *obj;
if (pool->cleanup)
pool->cleanup(elem);
- if (!(pool->flags & RXE_POOL_NO_ALLOC))
- kfree(elem);
+ if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
+ obj = (u8 *)elem - info->elem_offset;
+ kfree(obj);
+ }
+
atomic_dec(&pool->num_elem);
ib_device_put(&pool->rxe->ib_dev);
rxe_pool_put(pool);
@@ -404,8 +412,10 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- struct rb_node *node = NULL;
- struct rxe_pool_entry *elem = NULL;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rb_node *node;
+ struct rxe_pool_entry *elem;
+ u8 *obj = NULL;
unsigned long flags;
read_lock_irqsave(&pool->pool_lock, flags);
@@ -422,21 +432,28 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
node = node->rb_left;
else if (elem->index < index)
node = node->rb_right;
- else {
- kref_get(&elem->ref_cnt);
+ else
break;
- }
+ }
+
+ if (node) {
+ kref_get(&elem->ref_cnt);
+ obj = (u8 *)elem - info->elem_offset;
+ } else {
+ obj = NULL;
}
out:
read_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? elem : NULL;
+ return obj;
}
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{
- struct rb_node *node = NULL;
- struct rxe_pool_entry *elem = NULL;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rb_node *node;
+ struct rxe_pool_entry *elem;
+ u8 *obj = NULL;
int cmp;
unsigned long flags;
@@ -461,10 +478,14 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
break;
}
- if (node)
+ if (node) {
kref_get(&elem->ref_cnt);
+ obj = (u8 *)elem - info->elem_offset;
+ } else {
+ obj = NULL;
+ }
out:
read_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? elem : NULL;
+ return obj;
}
--
2.27.0
next prev parent reply other threads:[~2020-12-16 23:16 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-16 23:15 [PATCH for-next 0/7] RDMA/rxe: cleanup and extensions Bob Pearson
2020-12-16 23:15 ` [PATCH for-next 1/7] RDMA/rxe: Remove unneeded RXE_POOL_ATOMIC flag Bob Pearson
2020-12-16 23:15 ` [PATCH for-next 2/7] RDMA/rxe: Let pools support both keys and indices Bob Pearson
2020-12-16 23:15 ` [PATCH for-next 3/7] RDMA/rxe: Add elem_offset field to rxe_type_info Bob Pearson
2020-12-16 23:15 ` Bob Pearson [this message]
2021-01-12 20:57 ` [PATCH for-next 4/7] RDMA/rxe: Make pool lookup and alloc APIs type safe Jason Gunthorpe
2020-12-16 23:15 ` [PATCH for-next 5/7] RDMA/rxe: Make add/drop key/index " Bob Pearson
2020-12-16 23:15 ` [PATCH for-next 6/7] RDMA/rxe: Add unlocked versions of pool APIs Bob Pearson
2021-01-12 20:41 ` Jason Gunthorpe
2020-12-16 23:15 ` [PATCH 7/7] RDMA/rxe: Fix race in rxe_mcast.c Bob Pearson
2021-01-13 0:27 ` [PATCH for-next 0/7] RDMA/rxe: cleanup and extensions Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201216231550.27224-5-rpearson@hpe.com \
--to=rpearsonhpe@gmail.com \
--cc=jgg@nvidia.com \
--cc=linux-rdma@vger.kernel.org \
--cc=rpearson@hpe.com \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).