public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
* [PATCH] hash: fix overflow of 32-bit offsets
@ 2026-02-20 14:39 Bruce Richardson
  2026-02-20 15:09 ` Morten Brørup
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Bruce Richardson @ 2026-02-20 14:39 UTC (permalink / raw)
  To: dev; +Cc: vladimir.medvedkin, oleksandrn, Bruce Richardson, stable

When computing the offset inside the overall hash structure by adding an
offset to a base pointer, the offset was generally calculated by
multiplying two 32-bit values, which could then overflow. Prevent
overflow by using (uintptr_t) casts on the elements being multiplied to
ensure they are 64-bit on 64-bit systems.

Fixes: b26473ff8f4a ("hash: add reset function")
Fixes: 406da3dfb3b5 ("hash: move duplicated code into functions")
Fixes: 9eca8bd7a61c ("hash: separate lock-free and r/w lock lookup")
Fixes: 4d9ca3ed2133 ("hash: use ordered loads only if signature matches")
Fixes: 769b2de7fb52 ("hash: implement RCU resources reclamation")
Fixes: e605a1d36ca7 ("hash: add lock-free r/w concurrency")
Fixes: 6dc34e0afe7a ("hash: retrieve a key given its position")
Fixes: f9edbc9bb6bc ("hash: add iterate function")
Fixes: 75706568a7eb ("hash: add extendable bucket feature")
Cc: stable@dpdk.org

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/hash/rte_cuckoo_hash.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index da12825c6e..f2478d5286 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -705,7 +705,7 @@ rte_hash_reset(struct rte_hash *h)
 	}
 
 	memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
-	memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
+	memset(h->key_store, 0, (size_t)h->key_entry_size * (h->entries + 1));
 	*h->tbl_chng_cnt = 0;
 
 	/* reset the free ring */
@@ -774,7 +774,7 @@ search_and_update(const struct rte_hash *h, void *data, const void *key,
 	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
 		if (bkt->sig_current[i] == sig) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					bkt->key_idx[i] * h->key_entry_size);
+					(uintptr_t)bkt->key_idx[i] * h->key_entry_size);
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				/* The store to application data at *data
 				 * should not leak after the store to pdata
@@ -1140,7 +1140,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
 			return -ENOSPC;
 	}
 
-	new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
+	new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
 	/* The store to application data (by the application) at *data should
 	 * not leak after the store of pdata in the key store. i.e. pdata is
 	 * the guard variable. Release the application data to the readers.
@@ -1329,7 +1329,7 @@ search_one_bucket_l(const struct rte_hash *h, const void *key,
 		if (bkt->sig_current[i] == sig &&
 				bkt->key_idx[i] != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					bkt->key_idx[i] * h->key_entry_size);
+					(uintptr_t)bkt->key_idx[i] * h->key_entry_size);
 
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				if (data != NULL)
@@ -1367,7 +1367,7 @@ search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
 					  rte_memory_order_acquire);
 			if (key_idx != EMPTY_SLOT) {
 				k = (struct rte_hash_key *) ((char *)keys +
-						key_idx * h->key_entry_size);
+						(uintptr_t)key_idx * h->key_entry_size);
 
 				if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 					if (data != NULL) {
@@ -1569,7 +1569,7 @@ __hash_rcu_qsbr_free_resource(void *p, void *e, unsigned int n)
 	keys = h->key_store;
 
 	k = (struct rte_hash_key *) ((char *)keys +
-				rcu_dq_entry.key_idx * h->key_entry_size);
+				(uintptr_t)rcu_dq_entry.key_idx * h->key_entry_size);
 	key_data = k->pdata;
 	if (h->hash_rcu_cfg->free_key_data_func)
 		h->hash_rcu_cfg->free_key_data_func(h->hash_rcu_cfg->key_data_ptr,
@@ -1757,7 +1757,7 @@ search_and_remove(const struct rte_hash *h, const void *key,
 					  rte_memory_order_acquire);
 		if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					key_idx * h->key_entry_size);
+					(uintptr_t)key_idx * h->key_entry_size);
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				bkt->sig_current[i] = NULL_SIGNATURE;
 				/* Free the key store index if
@@ -1912,7 +1912,7 @@ rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
 	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
 
 	struct rte_hash_key *k, *keys = h->key_store;
-	k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
+	k = (struct rte_hash_key *) ((char *) keys + (uintptr_t)(position + 1) *
 				     h->key_entry_size);
 	*key = k->key;
 
@@ -2007,7 +2007,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				(uintptr_t)key_idx * h->key_entry_size);
 			rte_prefetch0(key_slot);
 			continue;
 		}
@@ -2021,7 +2021,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				(uintptr_t)key_idx * h->key_entry_size);
 			rte_prefetch0(key_slot);
 		}
 	}
@@ -2046,7 +2046,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				(uintptr_t)key_idx * h->key_entry_size);
 
 			/*
 			 * If key index is 0, do not compare key,
@@ -2074,7 +2074,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				(uintptr_t)key_idx * h->key_entry_size);
 
 			/*
 			 * If key index is 0, do not compare key,
@@ -2193,7 +2193,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					(uintptr_t)key_idx * h->key_entry_size);
 				rte_prefetch0(key_slot);
 				continue;
 			}
@@ -2207,7 +2207,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					(uintptr_t)key_idx * h->key_entry_size);
 				rte_prefetch0(key_slot);
 			}
 		}
@@ -2233,7 +2233,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					(uintptr_t)key_idx * h->key_entry_size);
 
 				/*
 				 * If key index is 0, do not compare key,
@@ -2265,7 +2265,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					(uintptr_t)key_idx * h->key_entry_size);
 
 				/*
 				 * If key index is 0, do not compare key,
@@ -2621,7 +2621,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 
 	__hash_rw_reader_lock(h);
 	next_key = (struct rte_hash_key *) ((char *)h->key_store +
-				position * h->key_entry_size);
+				(uintptr_t)position * h->key_entry_size);
 	/* Return key and data */
 	*key = next_key->key;
 	*data = next_key->pdata;
@@ -2652,7 +2652,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 	}
 	__hash_rw_reader_lock(h);
 	next_key = (struct rte_hash_key *) ((char *)h->key_store +
-				position * h->key_entry_size);
+				(uintptr_t)position * h->key_entry_size);
 	/* Return key and data */
 	*key = next_key->key;
 	*data = next_key->pdata;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* RE: [PATCH] hash: fix overflow of 32-bit offsets
  2026-02-20 14:39 [PATCH] hash: fix overflow of 32-bit offsets Bruce Richardson
@ 2026-02-20 15:09 ` Morten Brørup
  2026-02-20 16:11   ` Bruce Richardson
  2026-02-20 16:33 ` Bruce Richardson
  2026-02-27 13:01 ` [PATCH v3] " Bruce Richardson
  2 siblings, 1 reply; 6+ messages in thread
From: Morten Brørup @ 2026-02-20 15:09 UTC (permalink / raw)
  To: Bruce Richardson, dev; +Cc: vladimir.medvedkin, oleksandrn, stable

> From: Bruce Richardson [mailto:bruce.richardson@intel.com]
> Sent: Friday, 20 February 2026 15.40
> 
> When computing the offset inside the overall hash structure by adding
> an
> offset to a base pointer, the offset was generally calculated by
> multiplying two 32-bit values, which could then overflow. Prevent
> overflow by using (uintptr_t) casts on the elements being multiplied to
> ensure they are 64-bit on 64-bit systems.
> 
> Fixes: b26473ff8f4a ("hash: add reset function")
> Fixes: 406da3dfb3b5 ("hash: move duplicated code into functions")
> Fixes: 9eca8bd7a61c ("hash: separate lock-free and r/w lock lookup")
> Fixes: 4d9ca3ed2133 ("hash: use ordered loads only if signature
> matches")
> Fixes: 769b2de7fb52 ("hash: implement RCU resources reclamation")
> Fixes: e605a1d36ca7 ("hash: add lock-free r/w concurrency")
> Fixes: 6dc34e0afe7a ("hash: retrieve a key given its position")
> Fixes: f9edbc9bb6bc ("hash: add iterate function")
> Fixes: 75706568a7eb ("hash: add extendable bucket feature")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> ---
>  lib/hash/rte_cuckoo_hash.c | 36 ++++++++++++++++++------------------
>  1 file changed, 18 insertions(+), 18 deletions(-)
> 
> diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
> index da12825c6e..f2478d5286 100644
> --- a/lib/hash/rte_cuckoo_hash.c
> +++ b/lib/hash/rte_cuckoo_hash.c
> @@ -705,7 +705,7 @@ rte_hash_reset(struct rte_hash *h)
>  	}
> 
>  	memset(h->buckets, 0, h->num_buckets * sizeof(struct
> rte_hash_bucket));
> -	memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
> +	memset(h->key_store, 0, (size_t)h->key_entry_size * (h->entries +
> 1));

Agree.

>  	*h->tbl_chng_cnt = 0;
> 
>  	/* reset the free ring */
> @@ -774,7 +774,7 @@ search_and_update(const struct rte_hash *h, void
> *data, const void *key,
>  	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
>  		if (bkt->sig_current[i] == sig) {
>  			k = (struct rte_hash_key *) ((char *)keys +
> -					bkt->key_idx[i] * h->key_entry_size);
> +					(uintptr_t)bkt->key_idx[i] * h-
> >key_entry_size);

The fix is technically correct.
However, for source code readability purposes:

Please don't cast bkt->key_idx[i] (and similar below) to uintptr_t; it's not a pointer type.
It might be reasonable casting it to ptrdiff_t, but I'm not sure about that.

The natural type for h->key_entry_size is size_t; it's only smaller to save memory or something.
So, please cast the type that naturally would be wider, i.e. use: (size_t)h->key_entry_size

PS: If you change the casting as suggested, remember to update the patch description accordingly.

With or without above suggestion for readability,
Acked-by: Morten Brørup <mb@smartsharesystems.com>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] hash: fix overflow of 32-bit offsets
  2026-02-20 15:09 ` Morten Brørup
@ 2026-02-20 16:11   ` Bruce Richardson
  0 siblings, 0 replies; 6+ messages in thread
From: Bruce Richardson @ 2026-02-20 16:11 UTC (permalink / raw)
  To: Morten Brørup; +Cc: dev, vladimir.medvedkin, oleksandrn, stable

On Fri, Feb 20, 2026 at 04:09:14PM +0100, Morten Brørup wrote:
> > From: Bruce Richardson [mailto:bruce.richardson@intel.com]
> > Sent: Friday, 20 February 2026 15.40
> > 
> > When computing the offset inside the overall hash structure by adding
> > an
> > offset to a base pointer, the offset was generally calculated by
> > multiplying two 32-bit values, which could then overflow. Prevent
> > overflow by using (uintptr_t) casts on the elements being multiplied to
> > ensure they are 64-bit on 64-bit systems.
> > 
> > Fixes: b26473ff8f4a ("hash: add reset function")
> > Fixes: 406da3dfb3b5 ("hash: move duplicated code into functions")
> > Fixes: 9eca8bd7a61c ("hash: separate lock-free and r/w lock lookup")
> > Fixes: 4d9ca3ed2133 ("hash: use ordered loads only if signature
> > matches")
> > Fixes: 769b2de7fb52 ("hash: implement RCU resources reclamation")
> > Fixes: e605a1d36ca7 ("hash: add lock-free r/w concurrency")
> > Fixes: 6dc34e0afe7a ("hash: retrieve a key given its position")
> > Fixes: f9edbc9bb6bc ("hash: add iterate function")
> > Fixes: 75706568a7eb ("hash: add extendable bucket feature")
> > Cc: stable@dpdk.org
> > 
> > Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> > ---
> >  lib/hash/rte_cuckoo_hash.c | 36 ++++++++++++++++++------------------
> >  1 file changed, 18 insertions(+), 18 deletions(-)
> > 
> > diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
> > index da12825c6e..f2478d5286 100644
> > --- a/lib/hash/rte_cuckoo_hash.c
> > +++ b/lib/hash/rte_cuckoo_hash.c
> > @@ -705,7 +705,7 @@ rte_hash_reset(struct rte_hash *h)
> >  	}
> > 
> >  	memset(h->buckets, 0, h->num_buckets * sizeof(struct
> > rte_hash_bucket));
> > -	memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
> > +	memset(h->key_store, 0, (size_t)h->key_entry_size * (h->entries +
> > 1));
> 
> Agree.
> 
> >  	*h->tbl_chng_cnt = 0;
> > 
> >  	/* reset the free ring */
> > @@ -774,7 +774,7 @@ search_and_update(const struct rte_hash *h, void
> > *data, const void *key,
> >  	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
> >  		if (bkt->sig_current[i] == sig) {
> >  			k = (struct rte_hash_key *) ((char *)keys +
> > -					bkt->key_idx[i] * h->key_entry_size);
> > +					(uintptr_t)bkt->key_idx[i] * h-
> > >key_entry_size);
> 
> The fix is technically correct.
> However, for source code readability purposes:
> 
> Please don't cast bkt->key_idx[i] (and similar below) to uintptr_t; it's not a pointer type.
> It might be reasonable casting it to ptrdiff_t, but I'm not sure about that.
> 
> The natural type for h->key_entry_size is size_t; it's only smaller to save memory or something.
> So, please cast the type that naturally would be wider, i.e. use: (size_t)h->key_entry_size
> 
> PS: If you change the casting as suggested, remember to update the patch description accordingly.
> 
> With or without above suggestion for readability,
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>
Ok to use size_t instead. Let me respin.

/Bruce 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] hash: fix overflow of 32-bit offsets
  2026-02-20 14:39 [PATCH] hash: fix overflow of 32-bit offsets Bruce Richardson
  2026-02-20 15:09 ` Morten Brørup
@ 2026-02-20 16:33 ` Bruce Richardson
  2026-02-27 13:01 ` [PATCH v3] " Bruce Richardson
  2 siblings, 0 replies; 6+ messages in thread
From: Bruce Richardson @ 2026-02-20 16:33 UTC (permalink / raw)
  To: dev
  Cc: oleksandrn, vladimir.medvedkin, Bruce Richardson, stable,
	Morten Brørup

When computing the offset inside the overall hash structure by adding an
offset to a base pointer, the offset was generally calculated by
multiplying two 32-bit values, which could then overflow. Prevent
overflow by using (size_t) casts on the elements being multiplied to
ensure they are 64-bit on 64-bit systems.

Fixes: b26473ff8f4a ("hash: add reset function")
Fixes: 406da3dfb3b5 ("hash: move duplicated code into functions")
Fixes: 9eca8bd7a61c ("hash: separate lock-free and r/w lock lookup")
Fixes: 4d9ca3ed2133 ("hash: use ordered loads only if signature matches")
Fixes: 769b2de7fb52 ("hash: implement RCU resources reclamation")
Fixes: e605a1d36ca7 ("hash: add lock-free r/w concurrency")
Fixes: 6dc34e0afe7a ("hash: retrieve a key given its position")
Fixes: f9edbc9bb6bc ("hash: add iterate function")
Fixes: 75706568a7eb ("hash: add extendable bucket feature")
Cc: stable@dpdk.org

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
V2: 
* use size_t rather than uintptr_t
* consistently apply size_t cast to the key_entry_size value rather than the position one.

---
 lib/hash/rte_cuckoo_hash.c | 37 ++++++++++++++++++-------------------
 1 file changed, 18 insertions(+), 19 deletions(-)

diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index da12825c6e..3f382dd117 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -705,7 +705,7 @@ rte_hash_reset(struct rte_hash *h)
 	}
 
 	memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
-	memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
+	memset(h->key_store, 0, (size_t)h->key_entry_size * (h->entries + 1));
 	*h->tbl_chng_cnt = 0;
 
 	/* reset the free ring */
@@ -774,7 +774,7 @@ search_and_update(const struct rte_hash *h, void *data, const void *key,
 	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
 		if (bkt->sig_current[i] == sig) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					bkt->key_idx[i] * h->key_entry_size);
+					bkt->key_idx[i] * (size_t)h->key_entry_size);
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				/* The store to application data at *data
 				 * should not leak after the store to pdata
@@ -1140,7 +1140,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
 			return -ENOSPC;
 	}
 
-	new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
+	new_k = RTE_PTR_ADD(keys, slot_id * (size_t)h->key_entry_size);
 	/* The store to application data (by the application) at *data should
 	 * not leak after the store of pdata in the key store. i.e. pdata is
 	 * the guard variable. Release the application data to the readers.
@@ -1329,7 +1329,7 @@ search_one_bucket_l(const struct rte_hash *h, const void *key,
 		if (bkt->sig_current[i] == sig &&
 				bkt->key_idx[i] != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					bkt->key_idx[i] * h->key_entry_size);
+					bkt->key_idx[i] * (size_t)h->key_entry_size);
 
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				if (data != NULL)
@@ -1367,7 +1367,7 @@ search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
 					  rte_memory_order_acquire);
 			if (key_idx != EMPTY_SLOT) {
 				k = (struct rte_hash_key *) ((char *)keys +
-						key_idx * h->key_entry_size);
+						key_idx * (size_t)h->key_entry_size);
 
 				if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 					if (data != NULL) {
@@ -1569,7 +1569,7 @@ __hash_rcu_qsbr_free_resource(void *p, void *e, unsigned int n)
 	keys = h->key_store;
 
 	k = (struct rte_hash_key *) ((char *)keys +
-				rcu_dq_entry.key_idx * h->key_entry_size);
+				rcu_dq_entry.key_idx * (size_t)h->key_entry_size);
 	key_data = k->pdata;
 	if (h->hash_rcu_cfg->free_key_data_func)
 		h->hash_rcu_cfg->free_key_data_func(h->hash_rcu_cfg->key_data_ptr,
@@ -1757,7 +1757,7 @@ search_and_remove(const struct rte_hash *h, const void *key,
 					  rte_memory_order_acquire);
 		if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				bkt->sig_current[i] = NULL_SIGNATURE;
 				/* Free the key store index if
@@ -1912,8 +1912,7 @@ rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
 	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
 
 	struct rte_hash_key *k, *keys = h->key_store;
-	k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
-				     h->key_entry_size);
+	k = (struct rte_hash_key *) ((char *) keys + (position + 1) * (size_t)h->key_entry_size);
 	*key = k->key;
 
 	if (position !=
@@ -2007,7 +2006,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 			rte_prefetch0(key_slot);
 			continue;
 		}
@@ -2021,7 +2020,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 			rte_prefetch0(key_slot);
 		}
 	}
@@ -2046,7 +2045,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 
 			/*
 			 * If key index is 0, do not compare key,
@@ -2074,7 +2073,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 
 			/*
 			 * If key index is 0, do not compare key,
@@ -2193,7 +2192,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 				rte_prefetch0(key_slot);
 				continue;
 			}
@@ -2207,7 +2206,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 				rte_prefetch0(key_slot);
 			}
 		}
@@ -2233,7 +2232,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 
 				/*
 				 * If key index is 0, do not compare key,
@@ -2265,7 +2264,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 
 				/*
 				 * If key index is 0, do not compare key,
@@ -2621,7 +2620,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 
 	__hash_rw_reader_lock(h);
 	next_key = (struct rte_hash_key *) ((char *)h->key_store +
-				position * h->key_entry_size);
+				position * (size_t)h->key_entry_size);
 	/* Return key and data */
 	*key = next_key->key;
 	*data = next_key->pdata;
@@ -2652,7 +2651,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 	}
 	__hash_rw_reader_lock(h);
 	next_key = (struct rte_hash_key *) ((char *)h->key_store +
-				position * h->key_entry_size);
+				position * (size_t)h->key_entry_size);
 	/* Return key and data */
 	*key = next_key->key;
 	*data = next_key->pdata;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3] hash: fix overflow of 32-bit offsets
  2026-02-20 14:39 [PATCH] hash: fix overflow of 32-bit offsets Bruce Richardson
  2026-02-20 15:09 ` Morten Brørup
  2026-02-20 16:33 ` Bruce Richardson
@ 2026-02-27 13:01 ` Bruce Richardson
  2026-03-05 12:19   ` David Marchand
  2 siblings, 1 reply; 6+ messages in thread
From: Bruce Richardson @ 2026-02-27 13:01 UTC (permalink / raw)
  To: dev; +Cc: Bruce Richardson, stable, Morten Brørup

When computing the offset inside the overall hash structure by adding an
offset to a base pointer, the offset was generally calculated by
multiplying two 32-bit values, which could then overflow. Prevent
overflow by using (size_t) casts on the elements being multiplied to
ensure they are 64-bit on 64-bit systems.

Fixes: b26473ff8f4a ("hash: add reset function")
Fixes: 406da3dfb3b5 ("hash: move duplicated code into functions")
Fixes: 9eca8bd7a61c ("hash: separate lock-free and r/w lock lookup")
Fixes: 4d9ca3ed2133 ("hash: use ordered loads only if signature matches")
Fixes: 769b2de7fb52 ("hash: implement RCU resources reclamation")
Fixes: e605a1d36ca7 ("hash: add lock-free r/w concurrency")
Fixes: 6dc34e0afe7a ("hash: retrieve a key given its position")
Fixes: f9edbc9bb6bc ("hash: add iterate function")
Fixes: 75706568a7eb ("hash: add extendable bucket feature")
Cc: stable@dpdk.org

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
V3:
* rebase on latest main and resubmit now that some CI issues have
   been(independently) fixed

V2:
* use size_t rather than uintptr_t
* consistently apply size_t cast to the key_entry_size value rather
  than the position one.
---
 lib/hash/rte_cuckoo_hash.c | 37 ++++++++++++++++++-------------------
 1 file changed, 18 insertions(+), 19 deletions(-)

diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index da12825c6e..3f382dd117 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -705,7 +705,7 @@ rte_hash_reset(struct rte_hash *h)
 	}
 
 	memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
-	memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
+	memset(h->key_store, 0, (size_t)h->key_entry_size * (h->entries + 1));
 	*h->tbl_chng_cnt = 0;
 
 	/* reset the free ring */
@@ -774,7 +774,7 @@ search_and_update(const struct rte_hash *h, void *data, const void *key,
 	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
 		if (bkt->sig_current[i] == sig) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					bkt->key_idx[i] * h->key_entry_size);
+					bkt->key_idx[i] * (size_t)h->key_entry_size);
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				/* The store to application data at *data
 				 * should not leak after the store to pdata
@@ -1140,7 +1140,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
 			return -ENOSPC;
 	}
 
-	new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
+	new_k = RTE_PTR_ADD(keys, slot_id * (size_t)h->key_entry_size);
 	/* The store to application data (by the application) at *data should
 	 * not leak after the store of pdata in the key store. i.e. pdata is
 	 * the guard variable. Release the application data to the readers.
@@ -1329,7 +1329,7 @@ search_one_bucket_l(const struct rte_hash *h, const void *key,
 		if (bkt->sig_current[i] == sig &&
 				bkt->key_idx[i] != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					bkt->key_idx[i] * h->key_entry_size);
+					bkt->key_idx[i] * (size_t)h->key_entry_size);
 
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				if (data != NULL)
@@ -1367,7 +1367,7 @@ search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
 					  rte_memory_order_acquire);
 			if (key_idx != EMPTY_SLOT) {
 				k = (struct rte_hash_key *) ((char *)keys +
-						key_idx * h->key_entry_size);
+						key_idx * (size_t)h->key_entry_size);
 
 				if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 					if (data != NULL) {
@@ -1569,7 +1569,7 @@ __hash_rcu_qsbr_free_resource(void *p, void *e, unsigned int n)
 	keys = h->key_store;
 
 	k = (struct rte_hash_key *) ((char *)keys +
-				rcu_dq_entry.key_idx * h->key_entry_size);
+				rcu_dq_entry.key_idx * (size_t)h->key_entry_size);
 	key_data = k->pdata;
 	if (h->hash_rcu_cfg->free_key_data_func)
 		h->hash_rcu_cfg->free_key_data_func(h->hash_rcu_cfg->key_data_ptr,
@@ -1757,7 +1757,7 @@ search_and_remove(const struct rte_hash *h, const void *key,
 					  rte_memory_order_acquire);
 		if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				bkt->sig_current[i] = NULL_SIGNATURE;
 				/* Free the key store index if
@@ -1912,8 +1912,7 @@ rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
 	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
 
 	struct rte_hash_key *k, *keys = h->key_store;
-	k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
-				     h->key_entry_size);
+	k = (struct rte_hash_key *) ((char *) keys + (position + 1) * (size_t)h->key_entry_size);
 	*key = k->key;
 
 	if (position !=
@@ -2007,7 +2006,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 			rte_prefetch0(key_slot);
 			continue;
 		}
@@ -2021,7 +2020,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 			rte_prefetch0(key_slot);
 		}
 	}
@@ -2046,7 +2045,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 
 			/*
 			 * If key index is 0, do not compare key,
@@ -2074,7 +2073,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
 			const struct rte_hash_key *key_slot =
 				(const struct rte_hash_key *)(
 				(const char *)h->key_store +
-				key_idx * h->key_entry_size);
+				key_idx * (size_t)h->key_entry_size);
 
 			/*
 			 * If key index is 0, do not compare key,
@@ -2193,7 +2192,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 				rte_prefetch0(key_slot);
 				continue;
 			}
@@ -2207,7 +2206,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 				rte_prefetch0(key_slot);
 			}
 		}
@@ -2233,7 +2232,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 
 				/*
 				 * If key index is 0, do not compare key,
@@ -2265,7 +2264,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+					key_idx * (size_t)h->key_entry_size);
 
 				/*
 				 * If key index is 0, do not compare key,
@@ -2621,7 +2620,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 
 	__hash_rw_reader_lock(h);
 	next_key = (struct rte_hash_key *) ((char *)h->key_store +
-				position * h->key_entry_size);
+				position * (size_t)h->key_entry_size);
 	/* Return key and data */
 	*key = next_key->key;
 	*data = next_key->pdata;
@@ -2652,7 +2651,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 	}
 	__hash_rw_reader_lock(h);
 	next_key = (struct rte_hash_key *) ((char *)h->key_store +
-				position * h->key_entry_size);
+				position * (size_t)h->key_entry_size);
 	/* Return key and data */
 	*key = next_key->key;
 	*data = next_key->pdata;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v3] hash: fix overflow of 32-bit offsets
  2026-02-27 13:01 ` [PATCH v3] " Bruce Richardson
@ 2026-03-05 12:19   ` David Marchand
  0 siblings, 0 replies; 6+ messages in thread
From: David Marchand @ 2026-03-05 12:19 UTC (permalink / raw)
  To: Bruce Richardson; +Cc: dev, stable, Morten Brørup

On Fri, 27 Feb 2026 at 14:03, Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> When computing the offset inside the overall hash structure by adding an
> offset to a base pointer, the offset was generally calculated by
> multiplying two 32-bit values, which could then overflow. Prevent
> overflow by using (size_t) casts on the elements being multiplied to
> ensure they are 64-bit on 64-bit systems.
>
> Fixes: b26473ff8f4a ("hash: add reset function")
> Fixes: 406da3dfb3b5 ("hash: move duplicated code into functions")
> Fixes: 9eca8bd7a61c ("hash: separate lock-free and r/w lock lookup")
> Fixes: 4d9ca3ed2133 ("hash: use ordered loads only if signature matches")
> Fixes: 769b2de7fb52 ("hash: implement RCU resources reclamation")
> Fixes: e605a1d36ca7 ("hash: add lock-free r/w concurrency")
> Fixes: 6dc34e0afe7a ("hash: retrieve a key given its position")
> Fixes: f9edbc9bb6bc ("hash: add iterate function")
> Fixes: 75706568a7eb ("hash: add extendable bucket feature")
> Cc: stable@dpdk.org
>
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>

Applied, thanks.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2026-03-05 12:20 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-20 14:39 [PATCH] hash: fix overflow of 32-bit offsets Bruce Richardson
2026-02-20 15:09 ` Morten Brørup
2026-02-20 16:11   ` Bruce Richardson
2026-02-20 16:33 ` Bruce Richardson
2026-02-27 13:01 ` [PATCH v3] " Bruce Richardson
2026-03-05 12:19   ` David Marchand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox