public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
* [PATCH] mempool: introduce statistics reset function
@ 2026-02-23 10:20 Morten Brørup
  2026-02-23 17:21 ` Stephen Hemminger
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Morten Brørup @ 2026-02-23 10:20 UTC (permalink / raw)
  To: dev, Andrew Rybchenko; +Cc: Morten Brørup

Populating a mempool with objects is accounted for in the statistics.
When analyzing mempool cache statistics, this may distort the data.
In order to simplify mempool cache statistics analysis, a mempool
statistics reset function was added.

Furthermore, details about average burst sizes and mempool cache miss
rates were added to the statistics shown when dumping a mempool.

Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/mempool/mempool_trace.h        |  7 +++++
 lib/mempool/mempool_trace_points.c |  4 +++
 lib/mempool/rte_mempool.c          | 50 ++++++++++++++++++++++++++++--
 lib/mempool/rte_mempool.h          | 12 +++++++
 4 files changed, 71 insertions(+), 2 deletions(-)

diff --git a/lib/mempool/mempool_trace.h b/lib/mempool/mempool_trace.h
index c595a3116b..23cda1473c 100644
--- a/lib/mempool/mempool_trace.h
+++ b/lib/mempool/mempool_trace.h
@@ -104,6 +104,13 @@ RTE_TRACE_POINT(
 	rte_trace_point_emit_string(mempool->name);
 )
 
+RTE_TRACE_POINT(
+	rte_mempool_trace_stats_reset,
+	RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
+	rte_trace_point_emit_ptr(mempool);
+	rte_trace_point_emit_string(mempool->name);
+)
+
 RTE_TRACE_POINT(
 	rte_mempool_trace_cache_create,
 	RTE_TRACE_POINT_ARGS(uint32_t size, int socket_id,
diff --git a/lib/mempool/mempool_trace_points.c b/lib/mempool/mempool_trace_points.c
index ec465780f4..8249981502 100644
--- a/lib/mempool/mempool_trace_points.c
+++ b/lib/mempool/mempool_trace_points.c
@@ -60,6 +60,10 @@ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_default,
 RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_anon,
 	lib.mempool.populate.anon)
 
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(__rte_mempool_trace_stats_reset, 26.03)
+RTE_TRACE_POINT_REGISTER(rte_mempool_trace_stats_reset,
+	lib.mempool.stats_reset)
+
 RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_create,
 	lib.mempool.cache_create)
 
diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 3042d94c14..ff7d940f91 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -1049,6 +1049,28 @@ rte_mempool_in_use_count(const struct rte_mempool *mp)
 	return mp->size - rte_mempool_avail_count(mp);
 }
 
+/* Reset the statistics of a mempool. */
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_mempool_stats_reset, 26.03)
+void
+rte_mempool_stats_reset(struct rte_mempool *mp)
+{
+	RTE_ASSERT(mp != NULL);
+
+#ifdef RTE_LIBRTE_MEMPOOL_STATS
+	memset(&mp->stats, 0, sizeof(mp->stats));
+	if (mp->cache_size != 0) {
+		for (unsigned int lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+			memset(&mp->local_cache[lcore_id].stats, 0,
+					sizeof(mp->local_cache[lcore_id].stats));
+		}
+	}
+
+	rte_mempool_trace_stats_reset(mp);
+#else
+	RTE_SET_USED(mp);
+#endif
+}
+
 /* dump the cache status */
 static unsigned
 rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
@@ -1327,10 +1349,34 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
 	fprintf(f, "    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
 	fprintf(f, "    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
 	if (info.contig_block_size > 0) {
-		fprintf(f, "    get_success_blks=%"PRIu64"\n",
-			sum.get_success_blks);
+		fprintf(f, "    get_success_blks=%"PRIu64"\n", sum.get_success_blks);
 		fprintf(f, "    get_fail_blks=%"PRIu64"\n", sum.get_fail_blks);
 	}
+	fprintf(f, "    avg objs/bulk put=%#Lf, get=%#Lf, get_fail=%#Lf\n",
+			sum.put_bulk != 0 ? (long double)sum.put_objs / sum.put_bulk : 0,
+			sum.get_success_bulk != 0 ?
+			(long double)sum.get_success_objs / sum.get_success_bulk : 0,
+			sum.get_fail_bulk != 0 ?
+			(long double)sum.get_fail_objs / sum.get_fail_bulk : 0);
+	fprintf(f, "    avg common_pool objs/bulk put=%#Lf, get=%#Lf\n",
+			sum.put_common_pool_bulk != 0 ?
+			(long double)sum.put_common_pool_objs / sum.put_common_pool_bulk : 0,
+			sum.get_common_pool_bulk != 0 ?
+			(long double)sum.get_common_pool_objs / sum.get_common_pool_bulk : 0);
+	fprintf(f, "    avg cache miss rate put_objs=%s%#Lf, get_objs=%s%#Lf\n",
+			sum.put_common_pool_objs != 0 ? "1/" : "",
+			sum.put_common_pool_objs != 0 ?
+			(long double)sum.put_objs / sum.put_common_pool_objs : 0,
+			sum.get_common_pool_objs != 0 ? "1/" : "",
+			sum.get_common_pool_objs != 0 ?
+			(long double)sum.get_success_objs / sum.get_common_pool_objs : 0);
+	fprintf(f, "    avg cache miss rate put_bulk=%s%#Lf, get_bulk=%s%#Lf\n",
+			sum.put_common_pool_bulk != 0 ? "1/" : "",
+			sum.put_common_pool_bulk != 0 ?
+			(long double)sum.put_bulk / sum.put_common_pool_bulk : 0,
+			sum.get_common_pool_bulk != 0 ? "1/" : "",
+			sum.get_common_pool_bulk != 0 ?
+			(long double)sum.get_success_bulk / sum.get_common_pool_bulk : 0);
 #else
 	fprintf(f, "  no statistics available\n");
 #endif
diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
index 1144dca58a..1aea1789e9 100644
--- a/lib/mempool/rte_mempool.h
+++ b/lib/mempool/rte_mempool.h
@@ -1288,6 +1288,18 @@ uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
 	rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
+ *
+ * Reset the statistics of a mempool.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ */
+__rte_experimental
+void rte_mempool_stats_reset(struct rte_mempool *mp);
+
 /**
  * Dump the status of the mempool to a file.
  *
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] mempool: introduce statistics reset function
  2026-02-23 10:20 [PATCH] mempool: introduce statistics reset function Morten Brørup
@ 2026-02-23 17:21 ` Stephen Hemminger
  2026-02-23 18:14   ` Morten Brørup
  2026-02-24  6:29 ` Andrew Rybchenko
  2026-02-24  9:28 ` [PATCH v2] " Morten Brørup
  2 siblings, 1 reply; 10+ messages in thread
From: Stephen Hemminger @ 2026-02-23 17:21 UTC (permalink / raw)
  To: Morten Brørup; +Cc: dev, Andrew Rybchenko

On Mon, 23 Feb 2026 10:20:54 +0000
Morten Brørup <mb@smartsharesystems.com> wrote:

> +	fprintf(f, "    avg objs/bulk put=%#Lf, get=%#Lf, get_fail=%#Lf\n",
> +			sum.put_bulk != 0 ? (long double)sum.put_objs / sum.put_bulk : 0,
> +			sum.get_success_bulk != 0 ?
> +			(long double)sum.get_success_objs / sum.get_success_bulk : 0,
> +			sum.get_fail_bulk != 0 ?
> +			(long double)sum.get_fail_objs / sum.get_fail_bulk : 0);
> +	fprintf(f, "    avg common_pool objs/bulk put=%#Lf, get=%#Lf\n",
> +			sum.put_common_pool_bulk != 0 ?
> +			(long double)sum.put_common_pool_objs / sum.put_common_pool_bulk : 0,
> +			sum.get_common_pool_bulk != 0 ?
> +			(long double)sum.get_common_pool_objs / sum.get_common_pool_bulk : 0);
> +	fprintf(f, "    avg cache miss rate put_objs=%s%#Lf, get_objs=%s%#Lf\n",
> +			sum.put_common_pool_objs != 0 ? "1/" : "",
> +			sum.put_common_pool_objs != 0 ?
> +			(long double)sum.put_objs / sum.put_common_pool_objs : 0,
> +			sum.get_common_pool_objs != 0 ? "1/" : "",
> +			sum.get_common_pool_objs != 0 ?
> +			(long double)sum.get_success_objs / sum.get_common_pool_objs : 0);
> +	fprintf(f, "    avg cache miss rate put_bulk=%s%#Lf, get_bulk=%s%#Lf\n",
> +			sum.put_common_pool_bulk != 0 ? "1/" : "",
> +			sum.put_common_pool_bulk != 0 ?
> +			(long double)sum.put_bulk / sum.put_common_pool_bulk : 0,
> +			sum.get_common_pool_bulk != 0 ? "1/" : "",
> +			sum.get_common_pool_bulk != 0 ?
> +			(long double)sum.get_success_bulk / sum.get_common_pool_bulk : 0);

This is getting verbose, would look better as function or better yet table driven.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] mempool: introduce statistics reset function
  2026-02-23 17:21 ` Stephen Hemminger
@ 2026-02-23 18:14   ` Morten Brørup
  0 siblings, 0 replies; 10+ messages in thread
From: Morten Brørup @ 2026-02-23 18:14 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: dev, Andrew Rybchenko

> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Monday, 23 February 2026 18.21
> 
> On Mon, 23 Feb 2026 10:20:54 +0000
> Morten Brørup <mb@smartsharesystems.com> wrote:
> 
> > +	fprintf(f, "    avg objs/bulk put=%#Lf, get=%#Lf,
> get_fail=%#Lf\n",
> > +			sum.put_bulk != 0 ? (long double)sum.put_objs /
> sum.put_bulk : 0,
> > +			sum.get_success_bulk != 0 ?
> > +			(long double)sum.get_success_objs /
> sum.get_success_bulk : 0,
> > +			sum.get_fail_bulk != 0 ?
> > +			(long double)sum.get_fail_objs / sum.get_fail_bulk :
> 0);
> > +	fprintf(f, "    avg common_pool objs/bulk put=%#Lf, get=%#Lf\n",
> > +			sum.put_common_pool_bulk != 0 ?
> > +			(long double)sum.put_common_pool_objs /
> sum.put_common_pool_bulk : 0,
> > +			sum.get_common_pool_bulk != 0 ?
> > +			(long double)sum.get_common_pool_objs /
> sum.get_common_pool_bulk : 0);
> > +	fprintf(f, "    avg cache miss rate put_objs=%s%#Lf,
> get_objs=%s%#Lf\n",
> > +			sum.put_common_pool_objs != 0 ? "1/" : "",
> > +			sum.put_common_pool_objs != 0 ?
> > +			(long double)sum.put_objs / sum.put_common_pool_objs
> : 0,
> > +			sum.get_common_pool_objs != 0 ? "1/" : "",
> > +			sum.get_common_pool_objs != 0 ?
> > +			(long double)sum.get_success_objs /
> sum.get_common_pool_objs : 0);
> > +	fprintf(f, "    avg cache miss rate put_bulk=%s%#Lf,
> get_bulk=%s%#Lf\n",
> > +			sum.put_common_pool_bulk != 0 ? "1/" : "",
> > +			sum.put_common_pool_bulk != 0 ?
> > +			(long double)sum.put_bulk / sum.put_common_pool_bulk
> : 0,
> > +			sum.get_common_pool_bulk != 0 ? "1/" : "",
> > +			sum.get_common_pool_bulk != 0 ?
> > +			(long double)sum.get_success_bulk /
> sum.get_common_pool_bulk : 0);
> 
> This is getting verbose, would look better as function or better yet
> table driven.

Dump outputs are formatted as hierarchy, like this, so table is a no-go.
Only the last two are similar, and could share a macro/function, but I think it would make the code less readable.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mempool: introduce statistics reset function
  2026-02-23 10:20 [PATCH] mempool: introduce statistics reset function Morten Brørup
  2026-02-23 17:21 ` Stephen Hemminger
@ 2026-02-24  6:29 ` Andrew Rybchenko
  2026-02-24  6:38   ` Morten Brørup
  2026-02-24  9:28 ` [PATCH v2] " Morten Brørup
  2 siblings, 1 reply; 10+ messages in thread
From: Andrew Rybchenko @ 2026-02-24  6:29 UTC (permalink / raw)
  To: Morten Brørup, dev

On 2/23/26 1:20 PM, Morten Brørup wrote:
> Populating a mempool with objects is accounted for in the statistics.
> When analyzing mempool cache statistics, this may distort the data.
> In order to simplify mempool cache statistics analysis, a mempool
> statistics reset function was added.
> 
> Furthermore, details about average burst sizes and mempool cache miss
> rates were added to the statistics shown when dumping a mempool.
> 
> Signed-off-by: Morten Brørup <mb@smartsharesystems.com>

I'd like to see thoughts about consistency after reset taking into
account that reset will likely to be done from control core whereas
stats are updated from data cores. Results could be very interesting.
I guess it is not the reason to introduce locking or any other kind
of synchronization, but user should be warned as the bare minimum.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] mempool: introduce statistics reset function
  2026-02-24  6:29 ` Andrew Rybchenko
@ 2026-02-24  6:38   ` Morten Brørup
  2026-02-24  6:50     ` Andrew Rybchenko
  0 siblings, 1 reply; 10+ messages in thread
From: Morten Brørup @ 2026-02-24  6:38 UTC (permalink / raw)
  To: Andrew Rybchenko, dev

> From: Andrew Rybchenko [mailto:andrew.rybchenko@oktetlabs.ru]
> Sent: Tuesday, 24 February 2026 07.29
> 
> On 2/23/26 1:20 PM, Morten Brørup wrote:
> > Populating a mempool with objects is accounted for in the statistics.
> > When analyzing mempool cache statistics, this may distort the data.
> > In order to simplify mempool cache statistics analysis, a mempool
> > statistics reset function was added.
> >
> > Furthermore, details about average burst sizes and mempool cache miss
> > rates were added to the statistics shown when dumping a mempool.
> >
> > Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> 
> I'd like to see thoughts about consistency after reset taking into
> account that reset will likely to be done from control core whereas
> stats are updated from data cores. Results could be very interesting.
> I guess it is not the reason to introduce locking or any other kind
> of synchronization, but user should be warned as the bare minimum.

When used for cache statistics analysis, the reset function will be called once only: After everything has been initialized (mempool created and populated, and ethdev Rx queues preloaded with mbufs), but before the data plane is started.

I don't think the end user should be warned; this may be normal behavior for an application.
In addition to the trace event, I'll add a DEBUG level log message in the function.

And a warning about potential inconsistency in the function description.

Good feedback, Andrew!


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mempool: introduce statistics reset function
  2026-02-24  6:38   ` Morten Brørup
@ 2026-02-24  6:50     ` Andrew Rybchenko
  0 siblings, 0 replies; 10+ messages in thread
From: Andrew Rybchenko @ 2026-02-24  6:50 UTC (permalink / raw)
  To: Morten Brørup, dev

On 2/24/26 9:38 AM, Morten Brørup wrote:
>> From: Andrew Rybchenko [mailto:andrew.rybchenko@oktetlabs.ru]
>> Sent: Tuesday, 24 February 2026 07.29
>>
>> On 2/23/26 1:20 PM, Morten Brørup wrote:
>>> Populating a mempool with objects is accounted for in the statistics.
>>> When analyzing mempool cache statistics, this may distort the data.
>>> In order to simplify mempool cache statistics analysis, a mempool
>>> statistics reset function was added.
>>>
>>> Furthermore, details about average burst sizes and mempool cache miss
>>> rates were added to the statistics shown when dumping a mempool.
>>>
>>> Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
>>
>> I'd like to see thoughts about consistency after reset taking into
>> account that reset will likely to be done from control core whereas
>> stats are updated from data cores. Results could be very interesting.
>> I guess it is not the reason to introduce locking or any other kind
>> of synchronization, but user should be warned as the bare minimum.
> 
> When used for cache statistics analysis, the reset function will be called once only: After everything has been initialized (mempool created and populated, and ethdev Rx queues preloaded with mbufs), but before the data plane is started.

Thanks I see now. May be these assumptions should be mentioned in the
function description? Or is it too much?

> I don't think the end user should be warned; this may be normal behavior for an application.
> In addition to the trace event, I'll add a DEBUG level log message in the function.>>
> And a warning about potential inconsistency in the function description.

Sounds good.

> Good feedback, Andrew!
> 


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v2] mempool: introduce statistics reset function
  2026-02-23 10:20 [PATCH] mempool: introduce statistics reset function Morten Brørup
  2026-02-23 17:21 ` Stephen Hemminger
  2026-02-24  6:29 ` Andrew Rybchenko
@ 2026-02-24  9:28 ` Morten Brørup
  2026-02-24  9:57   ` Andrew Rybchenko
  2026-02-27  9:20   ` fengchengwen
  2 siblings, 2 replies; 10+ messages in thread
From: Morten Brørup @ 2026-02-24  9:28 UTC (permalink / raw)
  To: Andrew Rybchenko, Stephen Hemminger, dev; +Cc: Morten Brørup

Populating a mempool with objects is accounted for in the statistics.
When analyzing mempool cache statistics, this may distort the data.
In order to simplify mempool cache statistics analysis, a mempool
statistics reset function was added.

Furthermore, details about average burst sizes and mempool cache miss
rates were added to the statistics shown when dumping a mempool.

Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
---
v2:
* Added detailed usage instructions to the function description. (Andrew)
* Added DEBUG log message to the function. (Andrew)
---
 lib/mempool/mempool_trace.h        |  7 ++++
 lib/mempool/mempool_trace_points.c |  4 +++
 lib/mempool/rte_mempool.c          | 51 ++++++++++++++++++++++++++++--
 lib/mempool/rte_mempool.h          | 29 +++++++++++++++++
 4 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/lib/mempool/mempool_trace.h b/lib/mempool/mempool_trace.h
index c595a3116b..23cda1473c 100644
--- a/lib/mempool/mempool_trace.h
+++ b/lib/mempool/mempool_trace.h
@@ -104,6 +104,13 @@ RTE_TRACE_POINT(
 	rte_trace_point_emit_string(mempool->name);
 )
 
+RTE_TRACE_POINT(
+	rte_mempool_trace_stats_reset,
+	RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
+	rte_trace_point_emit_ptr(mempool);
+	rte_trace_point_emit_string(mempool->name);
+)
+
 RTE_TRACE_POINT(
 	rte_mempool_trace_cache_create,
 	RTE_TRACE_POINT_ARGS(uint32_t size, int socket_id,
diff --git a/lib/mempool/mempool_trace_points.c b/lib/mempool/mempool_trace_points.c
index ec465780f4..8249981502 100644
--- a/lib/mempool/mempool_trace_points.c
+++ b/lib/mempool/mempool_trace_points.c
@@ -60,6 +60,10 @@ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_default,
 RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_anon,
 	lib.mempool.populate.anon)
 
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(__rte_mempool_trace_stats_reset, 26.03)
+RTE_TRACE_POINT_REGISTER(rte_mempool_trace_stats_reset,
+	lib.mempool.stats_reset)
+
 RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_create,
 	lib.mempool.cache_create)
 
diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 3042d94c14..d33ea15157 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -1049,6 +1049,29 @@ rte_mempool_in_use_count(const struct rte_mempool *mp)
 	return mp->size - rte_mempool_avail_count(mp);
 }
 
+/* Reset the statistics of a mempool. */
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_mempool_stats_reset, 26.03)
+void
+rte_mempool_stats_reset(struct rte_mempool *mp)
+{
+	RTE_ASSERT(mp != NULL);
+
+#ifdef RTE_LIBRTE_MEMPOOL_STATS
+	memset(&mp->stats, 0, sizeof(mp->stats));
+	if (mp->cache_size != 0) {
+		for (unsigned int lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+			memset(&mp->local_cache[lcore_id].stats, 0,
+					sizeof(mp->local_cache[lcore_id].stats));
+		}
+	}
+
+	RTE_MEMPOOL_LOG(DEBUG, "<%s>@%p: statistics reset", mp->name, mp);
+	rte_mempool_trace_stats_reset(mp);
+#else
+	RTE_SET_USED(mp);
+#endif
+}
+
 /* dump the cache status */
 static unsigned
 rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
@@ -1327,10 +1350,34 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
 	fprintf(f, "    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
 	fprintf(f, "    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
 	if (info.contig_block_size > 0) {
-		fprintf(f, "    get_success_blks=%"PRIu64"\n",
-			sum.get_success_blks);
+		fprintf(f, "    get_success_blks=%"PRIu64"\n", sum.get_success_blks);
 		fprintf(f, "    get_fail_blks=%"PRIu64"\n", sum.get_fail_blks);
 	}
+	fprintf(f, "    avg objs/bulk put=%#Lf, get=%#Lf, get_fail=%#Lf\n",
+			sum.put_bulk != 0 ? (long double)sum.put_objs / sum.put_bulk : 0,
+			sum.get_success_bulk != 0 ?
+			(long double)sum.get_success_objs / sum.get_success_bulk : 0,
+			sum.get_fail_bulk != 0 ?
+			(long double)sum.get_fail_objs / sum.get_fail_bulk : 0);
+	fprintf(f, "    avg common_pool objs/bulk put=%#Lf, get=%#Lf\n",
+			sum.put_common_pool_bulk != 0 ?
+			(long double)sum.put_common_pool_objs / sum.put_common_pool_bulk : 0,
+			sum.get_common_pool_bulk != 0 ?
+			(long double)sum.get_common_pool_objs / sum.get_common_pool_bulk : 0);
+	fprintf(f, "    avg cache miss rate put_objs=%s%#Lf, get_objs=%s%#Lf\n",
+			sum.put_common_pool_objs != 0 ? "1/" : "",
+			sum.put_common_pool_objs != 0 ?
+			(long double)sum.put_objs / sum.put_common_pool_objs : 0,
+			sum.get_common_pool_objs != 0 ? "1/" : "",
+			sum.get_common_pool_objs != 0 ?
+			(long double)sum.get_success_objs / sum.get_common_pool_objs : 0);
+	fprintf(f, "    avg cache miss rate put_bulk=%s%#Lf, get_bulk=%s%#Lf\n",
+			sum.put_common_pool_bulk != 0 ? "1/" : "",
+			sum.put_common_pool_bulk != 0 ?
+			(long double)sum.put_bulk / sum.put_common_pool_bulk : 0,
+			sum.get_common_pool_bulk != 0 ? "1/" : "",
+			sum.get_common_pool_bulk != 0 ?
+			(long double)sum.get_success_bulk / sum.get_common_pool_bulk : 0);
 #else
 	fprintf(f, "  no statistics available\n");
 #endif
diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
index 1144dca58a..0b62837534 100644
--- a/lib/mempool/rte_mempool.h
+++ b/lib/mempool/rte_mempool.h
@@ -1288,6 +1288,35 @@ uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
 	rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
+ *
+ * Reset the statistics of a mempool.
+ *
+ * This function is intended for use when analyzing mempool statistics
+ * without counting any mempool operations performed during application
+ * initialization.
+ * For example, populating the mempool counts as put operations into the
+ * common pool, and setting up ethdev Rx queues counts as get operations.
+ *
+ * This function should only be called after application initialization,
+ * before the data path is started; otherwise, the mempool statistics may
+ * become inconsistent.
+ *
+ * For a perfectly clean slate, the local caches of the mempools used
+ * during application initialization should be flushed before resetting
+ * the mempool statistics.
+ * For example, mbuf pools used by ethdev Rx queues.
+ *
+ * @see rte_mempool_cache_flush()
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ */
+__rte_experimental
+void rte_mempool_stats_reset(struct rte_mempool *mp);
+
 /**
  * Dump the status of the mempool to a file.
  *
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH v2] mempool: introduce statistics reset function
  2026-02-24  9:28 ` [PATCH v2] " Morten Brørup
@ 2026-02-24  9:57   ` Andrew Rybchenko
  2026-02-27  9:20   ` fengchengwen
  1 sibling, 0 replies; 10+ messages in thread
From: Andrew Rybchenko @ 2026-02-24  9:57 UTC (permalink / raw)
  To: Morten Brørup, Stephen Hemminger, dev

On 2/24/26 12:28 PM, Morten Brørup wrote:
> Populating a mempool with objects is accounted for in the statistics.
> When analyzing mempool cache statistics, this may distort the data.
> In order to simplify mempool cache statistics analysis, a mempool
> statistics reset function was added.
> 
> Furthermore, details about average burst sizes and mempool cache miss
> rates were added to the statistics shown when dumping a mempool.
> 
> Signed-off-by: Morten Brørup <mb@smartsharesystems.com>

Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2] mempool: introduce statistics reset function
  2026-02-24  9:28 ` [PATCH v2] " Morten Brørup
  2026-02-24  9:57   ` Andrew Rybchenko
@ 2026-02-27  9:20   ` fengchengwen
  2026-02-27 21:24     ` Morten Brørup
  1 sibling, 1 reply; 10+ messages in thread
From: fengchengwen @ 2026-02-27  9:20 UTC (permalink / raw)
  To: Morten Brørup, Andrew Rybchenko, Stephen Hemminger, dev

Acked-by: Chengwen Feng <fengchengwen@huawei.com>

Another question:
   How about the overload of mempool stats, If the cost is low, can we
   remove the debug macro? As far as I know, some DPDK applications enable
   mempool statistics (maybe self-impl) in the production environment,
   which is mainly used to help locate problems.

On 2/24/2026 5:28 PM, Morten Brørup wrote:
> Populating a mempool with objects is accounted for in the statistics.
> When analyzing mempool cache statistics, this may distort the data.
> In order to simplify mempool cache statistics analysis, a mempool
> statistics reset function was added.
> 
> Furthermore, details about average burst sizes and mempool cache miss
> rates were added to the statistics shown when dumping a mempool.
> 
> Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> ---
> v2:
> * Added detailed usage instructions to the function description. (Andrew)
> * Added DEBUG log message to the function. (Andrew)
> ---
>  lib/mempool/mempool_trace.h        |  7 ++++
>  lib/mempool/mempool_trace_points.c |  4 +++
>  lib/mempool/rte_mempool.c          | 51 ++++++++++++++++++++++++++++--
>  lib/mempool/rte_mempool.h          | 29 +++++++++++++++++
>  4 files changed, 89 insertions(+), 2 deletions(-)
> 
> diff --git a/lib/mempool/mempool_trace.h b/lib/mempool/mempool_trace.h
> index c595a3116b..23cda1473c 100644
> --- a/lib/mempool/mempool_trace.h
> +++ b/lib/mempool/mempool_trace.h
> @@ -104,6 +104,13 @@ RTE_TRACE_POINT(
>  	rte_trace_point_emit_string(mempool->name);
>  )
>  
> +RTE_TRACE_POINT(
> +	rte_mempool_trace_stats_reset,
> +	RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
> +	rte_trace_point_emit_ptr(mempool);
> +	rte_trace_point_emit_string(mempool->name);
> +)
> +
>  RTE_TRACE_POINT(
>  	rte_mempool_trace_cache_create,
>  	RTE_TRACE_POINT_ARGS(uint32_t size, int socket_id,
> diff --git a/lib/mempool/mempool_trace_points.c b/lib/mempool/mempool_trace_points.c
> index ec465780f4..8249981502 100644
> --- a/lib/mempool/mempool_trace_points.c
> +++ b/lib/mempool/mempool_trace_points.c
> @@ -60,6 +60,10 @@ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_default,
>  RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_anon,
>  	lib.mempool.populate.anon)
>  
> +RTE_EXPORT_EXPERIMENTAL_SYMBOL(__rte_mempool_trace_stats_reset, 26.03)
> +RTE_TRACE_POINT_REGISTER(rte_mempool_trace_stats_reset,
> +	lib.mempool.stats_reset)
> +
>  RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_create,
>  	lib.mempool.cache_create)
>  
> diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
> index 3042d94c14..d33ea15157 100644
> --- a/lib/mempool/rte_mempool.c
> +++ b/lib/mempool/rte_mempool.c
> @@ -1049,6 +1049,29 @@ rte_mempool_in_use_count(const struct rte_mempool *mp)
>  	return mp->size - rte_mempool_avail_count(mp);
>  }
>  
> +/* Reset the statistics of a mempool. */
> +RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_mempool_stats_reset, 26.03)
> +void
> +rte_mempool_stats_reset(struct rte_mempool *mp)
> +{
> +	RTE_ASSERT(mp != NULL);
> +
> +#ifdef RTE_LIBRTE_MEMPOOL_STATS
> +	memset(&mp->stats, 0, sizeof(mp->stats));
> +	if (mp->cache_size != 0) {
> +		for (unsigned int lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +			memset(&mp->local_cache[lcore_id].stats, 0,
> +					sizeof(mp->local_cache[lcore_id].stats));
> +		}
> +	}
> +
> +	RTE_MEMPOOL_LOG(DEBUG, "<%s>@%p: statistics reset", mp->name, mp);
> +	rte_mempool_trace_stats_reset(mp);
> +#else
> +	RTE_SET_USED(mp);
> +#endif
> +}
> +
>  /* dump the cache status */
>  static unsigned
>  rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
> @@ -1327,10 +1350,34 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
>  	fprintf(f, "    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
>  	fprintf(f, "    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
>  	if (info.contig_block_size > 0) {
> -		fprintf(f, "    get_success_blks=%"PRIu64"\n",
> -			sum.get_success_blks);
> +		fprintf(f, "    get_success_blks=%"PRIu64"\n", sum.get_success_blks);
>  		fprintf(f, "    get_fail_blks=%"PRIu64"\n", sum.get_fail_blks);
>  	}
> +	fprintf(f, "    avg objs/bulk put=%#Lf, get=%#Lf, get_fail=%#Lf\n",
> +			sum.put_bulk != 0 ? (long double)sum.put_objs / sum.put_bulk : 0,
> +			sum.get_success_bulk != 0 ?
> +			(long double)sum.get_success_objs / sum.get_success_bulk : 0,
> +			sum.get_fail_bulk != 0 ?
> +			(long double)sum.get_fail_objs / sum.get_fail_bulk : 0);
> +	fprintf(f, "    avg common_pool objs/bulk put=%#Lf, get=%#Lf\n",
> +			sum.put_common_pool_bulk != 0 ?
> +			(long double)sum.put_common_pool_objs / sum.put_common_pool_bulk : 0,
> +			sum.get_common_pool_bulk != 0 ?
> +			(long double)sum.get_common_pool_objs / sum.get_common_pool_bulk : 0);
> +	fprintf(f, "    avg cache miss rate put_objs=%s%#Lf, get_objs=%s%#Lf\n",
> +			sum.put_common_pool_objs != 0 ? "1/" : "",
> +			sum.put_common_pool_objs != 0 ?
> +			(long double)sum.put_objs / sum.put_common_pool_objs : 0,
> +			sum.get_common_pool_objs != 0 ? "1/" : "",
> +			sum.get_common_pool_objs != 0 ?
> +			(long double)sum.get_success_objs / sum.get_common_pool_objs : 0);
> +	fprintf(f, "    avg cache miss rate put_bulk=%s%#Lf, get_bulk=%s%#Lf\n",
> +			sum.put_common_pool_bulk != 0 ? "1/" : "",
> +			sum.put_common_pool_bulk != 0 ?
> +			(long double)sum.put_bulk / sum.put_common_pool_bulk : 0,
> +			sum.get_common_pool_bulk != 0 ? "1/" : "",
> +			sum.get_common_pool_bulk != 0 ?
> +			(long double)sum.get_success_bulk / sum.get_common_pool_bulk : 0);
>  #else
>  	fprintf(f, "  no statistics available\n");
>  #endif
> diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
> index 1144dca58a..0b62837534 100644
> --- a/lib/mempool/rte_mempool.h
> +++ b/lib/mempool/rte_mempool.h
> @@ -1288,6 +1288,35 @@ uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
>  uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
>  	rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
>  
> +/**
> + * @warning
> + * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
> + *
> + * Reset the statistics of a mempool.
> + *
> + * This function is intended for use when analyzing mempool statistics
> + * without counting any mempool operations performed during application
> + * initialization.
> + * For example, populating the mempool counts as put operations into the
> + * common pool, and setting up ethdev Rx queues counts as get operations.
> + *
> + * This function should only be called after application initialization,
> + * before the data path is started; otherwise, the mempool statistics may
> + * become inconsistent.
> + *
> + * For a perfectly clean slate, the local caches of the mempools used
> + * during application initialization should be flushed before resetting
> + * the mempool statistics.
> + * For example, mbuf pools used by ethdev Rx queues.
> + *
> + * @see rte_mempool_cache_flush()
> + *
> + * @param mp
> + *   A pointer to the mempool structure.
> + */
> +__rte_experimental
> +void rte_mempool_stats_reset(struct rte_mempool *mp);
> +
>  /**
>   * Dump the status of the mempool to a file.
>   *


^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH v2] mempool: introduce statistics reset function
  2026-02-27  9:20   ` fengchengwen
@ 2026-02-27 21:24     ` Morten Brørup
  0 siblings, 0 replies; 10+ messages in thread
From: Morten Brørup @ 2026-02-27 21:24 UTC (permalink / raw)
  To: fengchengwen, Andrew Rybchenko, Stephen Hemminger, dev; +Cc: techboard

> From: fengchengwen [mailto:fengchengwen@huawei.com]
> Sent: Friday, 27 February 2026 10.20
> 
> Acked-by: Chengwen Feng <fengchengwen@huawei.com>
> 
> Another question:
>    How about the overload of mempool stats, If the cost is low, can we
>    remove the debug macro? As far as I know, some DPDK applications
> enable
>    mempool statistics (maybe self-impl) in the production environment,
>    which is mainly used to help locate problems.

Mempool is a core library, so even though the overhead of the mempool statistics is relatively low cost, it is a debug-only facility, and the extra overhead should be not be forced upon users.
IMO, we should keep it build time configurable.

You do raise a good point about the balance tradeoff!
Maybe enabling it for production is worth it for the majority of users. In SmartShare Systems, we enable it for production too. 

We should discuss changing the default from disabled to enabled.


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2026-02-27 21:24 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-23 10:20 [PATCH] mempool: introduce statistics reset function Morten Brørup
2026-02-23 17:21 ` Stephen Hemminger
2026-02-23 18:14   ` Morten Brørup
2026-02-24  6:29 ` Andrew Rybchenko
2026-02-24  6:38   ` Morten Brørup
2026-02-24  6:50     ` Andrew Rybchenko
2026-02-24  9:28 ` [PATCH v2] " Morten Brørup
2026-02-24  9:57   ` Andrew Rybchenko
2026-02-27  9:20   ` fengchengwen
2026-02-27 21:24     ` Morten Brørup

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox