public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
* [PATCH] test: add a test for null PMD
@ 2026-01-04 22:25 Stephen Hemminger
  2026-01-05 14:49 ` Marat Khalili
                   ` (10 more replies)
  0 siblings, 11 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-04 22:25 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD.
This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 827 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 828 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index efec42a6bf..967e20d187 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['ethdev', 'net'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..ebc2447c6d
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,827 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+static bool port_created;
+static bool port_started;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	struct rte_eth_dev_info dev_info;
+	int ret;
+
+	ret = rte_eth_dev_info_get(pid, &dev_info);
+	if (ret != 0) {
+		printf("Failed to get device info for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	int ret;
+
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	ret = create_null_port(NULL_DEV_NAME, NULL, &port_id);
+	if (ret != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+	port_created = true;
+
+	ret = configure_null_port(port_id);
+	if (ret != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+	port_started = true;
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	if (port_started) {
+		rte_eth_dev_stop(port_id);
+		port_started = false;
+	}
+
+	if (port_created) {
+		rte_eth_dev_close(port_id);
+		rte_vdev_uninit(NULL_DEV_NAME);
+		port_id = RTE_MAX_ETHPORTS;
+		port_created = false;
+	}
+
+	if (mp != NULL) {
+		rte_mempool_free(mp);
+		mp = NULL;
+	}
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == 64,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == 64,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	for (i = 0; i < nb_rx; i++)
+		rte_pktmbuf_free(bufs[i]);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int i;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	for (i = 0; i < BURST_SIZE; i++) {
+		bufs[i] = rte_pktmbuf_alloc(mp);
+		TEST_ASSERT(bufs[i] != NULL, "Failed to allocate mbuf");
+		bufs[i]->data_len = 64;
+		bufs[i]->pkt_len = 64;
+	}
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	/* Give some time for async operations (if any) */
+	rte_delay_us_block(100);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Statistics verification
+ */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	unsigned int i;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	for (i = 0; i < BURST_SIZE; i++) {
+		tx_bufs[i] = rte_pktmbuf_alloc(mp);
+		TEST_ASSERT(tx_bufs[i] != NULL, "Failed to allocate mbuf");
+		tx_bufs[i]->data_len = 64;
+		tx_bufs[i]->pkt_len = 64;
+	}
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+	/* Default packet size is 64 bytes */
+	TEST_ASSERT(stats.ibytes == BURST_SIZE * 64,
+		    "Expected ibytes=%u, got %"PRIu64,
+		    BURST_SIZE * 64, stats.ibytes);
+	TEST_ASSERT(stats.obytes == BURST_SIZE * 64,
+		    "Expected obytes=%u, got %"PRIu64,
+		    BURST_SIZE * 64, stats.obytes);
+
+	/* Free RX mbufs */
+	for (i = 0; i < nb_rx; i++)
+		rte_pktmbuf_free(rx_bufs[i]);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Custom packet size
+ */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+		rte_pktmbuf_free(bufs[i]);
+	}
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Copy mode
+ */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	for (i = 0; i < nb_rx; i++)
+		rte_pktmbuf_free(rx_bufs[i]);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: No-RX mode
+ */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port;
+	uint16_t nb_rx, nb_tx;
+	unsigned int i;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	for (i = 0; i < BURST_SIZE; i++) {
+		tx_bufs[i] = rte_pktmbuf_alloc(mp);
+		TEST_ASSERT(tx_bufs[i] != NULL, "Failed to allocate mbuf");
+		tx_bufs[i]->data_len = 64;
+		tx_bufs[i]->pkt_len = 64;
+	}
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Link status
+ */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Device info
+ */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+	TEST_ASSERT(dev_info.max_rx_pktlen == (uint32_t)-1,
+		    "Unexpected max_rx_pktlen");
+	TEST_ASSERT(dev_info.min_rx_bufsize == 0,
+		    "Expected min_rx_bufsize=0, got %u", dev_info.min_rx_bufsize);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Multiple RX/TX bursts
+ */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int i, burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		for (i = 0; i < nb_rx; i++)
+			rte_pktmbuf_free(bufs[i]);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		for (i = 0; i < BURST_SIZE; i++) {
+			bufs[i] = rte_pktmbuf_alloc(mp);
+			TEST_ASSERT(bufs[i] != NULL, "Failed to allocate mbuf");
+			bufs[i]->data_len = 64;
+			bufs[i]->pkt_len = 64;
+		}
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Stats reset
+ */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+	for (i = 0; i < nb_rx; i++)
+		rte_pktmbuf_free(bufs[i]);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: MAC address operations
+ */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, true, true, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* RE: [PATCH] test: add a test for null PMD
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
@ 2026-01-05 14:49 ` Marat Khalili
  2026-01-05 17:38   ` Stephen Hemminger
  2026-01-06 16:47 ` [PATCH v2 0/2] net/null: add a test Stephen Hemminger
                   ` (9 subsequent siblings)
  10 siblings, 1 reply; 61+ messages in thread
From: Marat Khalili @ 2026-01-05 14:49 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: dev@dpdk.org

Thank you for doing this. Having tests for things like this is important both 
to make sure they work and as a form of documentation.

In my humble opinion we should not approach this mechanically though, which is 
what AI tools tend to do. It is usually said that tests should test desired 
behavior, not specific implementation. Particularly in settings some of the 
values might not matter, or the whole range might be acceptable, in other cases 
current implementation might be questionable and by writing a test we just 
legitimize it.

Please more see comments inline.

> diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
> new file mode 100644
> index 0000000000..ebc2447c6d
> --- /dev/null
> +++ b/app/test/test_pmd_null.c
> @@ -0,0 +1,827 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2026 Stephen Hemminger
> + */
> +
> +#include <inttypes.h>
> +#include <stdbool.h>
> +#include <stdio.h>
> +#include <string.h>
> +
> +#include <rte_bus_vdev.h>
> +#include <rte_cycles.h>
> +#include <rte_ethdev.h>
> +#include <rte_ether.h>
> +#include <rte_lcore.h>
> +#include <rte_mbuf.h>
> +#include <rte_mempool.h>

I do not think it is a problem, but just curious: how does test_pmd_ring.c 
manage to use much fewer includes?

> +
> +#include "test.h"
> +
> +#define NUM_MBUFS 256
> +#define MBUF_CACHE_SIZE 32
> +#define BURST_SIZE 32
> +#define RING_SIZE 512
> +
> +/* Test device names */
> +#define NULL_DEV_NAME "net_null_test"
> +
> +static struct rte_mempool *mp;
> +static uint16_t port_id = RTE_MAX_ETHPORTS;
> +static bool port_created;
> +static bool port_started;

// snip setup and teardown functions, could find nothing to nitpick there

> +/*
> + * Test: Basic RX - should return empty packets
> + */
> +static int
> +test_null_rx_basic(void)
> +{
> +	struct rte_mbuf *bufs[BURST_SIZE];
> +	uint16_t nb_rx;
> +	unsigned int i;
> +
> +	/* RX should return requested number of empty packets */
> +	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_rx == BURST_SIZE,
> +		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
> +
> +	/* Verify packets have expected properties */
> +	for (i = 0; i < nb_rx; i++) {
> +		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
> +		TEST_ASSERT(bufs[i]->port == port_id,
> +			    "Unexpected port id in mbuf: %u", bufs[i]->port);
> +		/* Default packet size is 64 bytes */

Default packet size repeats a few times in this file, can we have a global
constant for it?

> +		TEST_ASSERT(bufs[i]->pkt_len == 64,
> +			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
> +		TEST_ASSERT(bufs[i]->data_len == 64,
> +			    "Unexpected data_len: %u", bufs[i]->data_len);
> +	}
> +
> +	/* Free received mbufs */
> +	for (i = 0; i < nb_rx; i++)
> +		rte_pktmbuf_free(bufs[i]);
> +
> +	return TEST_SUCCESS;
> +}
> +
> +/*
> + * Test: Basic TX - should free all packets
> + */
> +static int
> +test_null_tx_basic(void)
> +{
> +	struct rte_mbuf *bufs[BURST_SIZE];
> +	uint16_t nb_tx;
> +	unsigned int i;
> +	unsigned int pool_count_before, pool_count_after;
> +
> +	/* Allocate mbufs for TX */
> +	for (i = 0; i < BURST_SIZE; i++) {
> +		bufs[i] = rte_pktmbuf_alloc(mp);
> +		TEST_ASSERT(bufs[i] != NULL, "Failed to allocate mbuf");
> +		bufs[i]->data_len = 64;
> +		bufs[i]->pkt_len = 64;

It would help to test sending packets of various sizes, especially spanning 
multiple buffers or allocating them at the edge of buffer. May even uncover 
some bugs in the copy mode implementation ;)

> +	}
> +
> +	pool_count_before = rte_mempool_avail_count(mp);
> +
> +	/* TX should accept and free all packets */
> +	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_tx == BURST_SIZE,
> +		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
> +
> +	/* Give some time for async operations (if any) */
> +	rte_delay_us_block(100);
> +
> +	pool_count_after = rte_mempool_avail_count(mp);
> +
> +	/* Verify mbufs were freed - pool should have same count */
> +	TEST_ASSERT(pool_count_after >= pool_count_before,
> +		    "Mbufs not freed: before=%u, after=%u",
> +		    pool_count_before, pool_count_after);
> +
> +	return TEST_SUCCESS;
> +}
> +
> +/*
> + * Test: Statistics verification
> + */
> +static int
> +test_null_stats(void)
> +{
> +	struct rte_eth_stats stats;
> +	struct rte_mbuf *rx_bufs[BURST_SIZE];
> +	struct rte_mbuf *tx_bufs[BURST_SIZE];
> +	uint16_t nb_rx, nb_tx;
> +	unsigned int i;
> +	int ret;
> +
> +	/* Reset stats */
> +	ret = rte_eth_stats_reset(port_id);
> +	TEST_ASSERT(ret == 0, "Failed to reset stats");
> +
> +	/* Get initial stats */
> +	ret = rte_eth_stats_get(port_id, &stats);
> +	TEST_ASSERT(ret == 0, "Failed to get stats");
> +	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
> +	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
> +
> +	/* Perform RX */
> +	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
> +
> +	/* Allocate and perform TX */
> +	for (i = 0; i < BURST_SIZE; i++) {
> +		tx_bufs[i] = rte_pktmbuf_alloc(mp);
> +		TEST_ASSERT(tx_bufs[i] != NULL, "Failed to allocate mbuf");
> +		tx_bufs[i]->data_len = 64;
> +		tx_bufs[i]->pkt_len = 64;

Here and elsewhere, it would be interesting to test something non-default.

> +	}
> +	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
> +
> +	/* Get updated stats */
> +	ret = rte_eth_stats_get(port_id, &stats);
> +	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
> +
> +	/* Verify stats */
> +	TEST_ASSERT(stats.ipackets == BURST_SIZE,
> +		    "Expected ipackets=%u, got %"PRIu64,
> +		    BURST_SIZE, stats.ipackets);
> +	TEST_ASSERT(stats.opackets == BURST_SIZE,
> +		    "Expected opackets=%u, got %"PRIu64,
> +		    BURST_SIZE, stats.opackets);
> +	/* Default packet size is 64 bytes */
> +	TEST_ASSERT(stats.ibytes == BURST_SIZE * 64,
> +		    "Expected ibytes=%u, got %"PRIu64,
> +		    BURST_SIZE * 64, stats.ibytes);
> +	TEST_ASSERT(stats.obytes == BURST_SIZE * 64,
> +		    "Expected obytes=%u, got %"PRIu64,
> +		    BURST_SIZE * 64, stats.obytes);
> +
> +	/* Free RX mbufs */
> +	for (i = 0; i < nb_rx; i++)
> +		rte_pktmbuf_free(rx_bufs[i]);
> +
> +	return TEST_SUCCESS;
> +}
> +
> +/*
> + * Test: Custom packet size
> + */
> +static int
> +test_null_custom_size(void)
> +{
> +	struct rte_mbuf *bufs[BURST_SIZE];
> +	uint16_t custom_port;
> +	uint16_t nb_rx;
> +	unsigned int i;
> +	const unsigned int custom_size = 256;

What about bigger values, something exceeding RTE_MBUF_DEFAULT_BUF_SIZE? Will 
it be able to honour them or at least fail gracefully?

> +	int ret;
> +
> +	/* Create null device with custom size */
> +	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
> +	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
> +
> +	ret = configure_null_port(custom_port);
> +	TEST_ASSERT(ret == 0, "Failed to configure null port");
> +
> +	/* RX should return packets with custom size */
> +	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
> +
> +	/* Verify custom packet size */
> +	for (i = 0; i < nb_rx; i++) {
> +		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
> +			    "Expected pkt_len=%u, got %u",
> +			    custom_size, bufs[i]->pkt_len);
> +		TEST_ASSERT(bufs[i]->data_len == custom_size,
> +			    "Expected data_len=%u, got %u",
> +			    custom_size, bufs[i]->data_len);
> +		rte_pktmbuf_free(bufs[i]);
> +	}
> +
> +	/* Cleanup custom port */
> +	rte_eth_dev_stop(custom_port);
> +	rte_eth_dev_close(custom_port);
> +	rte_vdev_uninit("net_null_size_test");
> +
> +	return TEST_SUCCESS;
> +}
> +
> +/*
> + * Test: Copy mode
> + */
> +static int
> +test_null_copy_mode(void)
> +{
> +	struct rte_mbuf *rx_bufs[BURST_SIZE];
> +	uint16_t copy_port;
> +	uint16_t nb_rx;
> +	unsigned int i;
> +	int ret;
> +
> +	/* Create null device with copy enabled */
> +	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
> +	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
> +
> +	ret = configure_null_port(copy_port);
> +	TEST_ASSERT(ret == 0, "Failed to configure null port");
> +
> +	/* RX in copy mode should work */
> +	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
> +
> +	/* Free RX mbufs */
> +	for (i = 0; i < nb_rx; i++)
> +		rte_pktmbuf_free(rx_bufs[i]);
> +
> +	/* Cleanup */
> +	rte_eth_dev_stop(copy_port);
> +	rte_eth_dev_close(copy_port);
> +	rte_vdev_uninit("net_null_copy_test");
> +
> +	return TEST_SUCCESS;
> +}

As far as I understand, copy mode should not be different for the end user 
(apart from performance and bugs). The test for it is not as comprehensive as 
for the default mode. Can we save some LOCs and improve coverage by just 
running full tests twice, with a boolean parameter for the copy mode?

> +
> +/*
> + * Test: No-RX mode
> + */
> +static int
> +test_null_no_rx_mode(void)
> +{
> +	struct rte_mbuf *rx_bufs[BURST_SIZE];
> +	struct rte_mbuf *tx_bufs[BURST_SIZE];
> +	uint16_t norx_port;
> +	uint16_t nb_rx, nb_tx;
> +	unsigned int i;
> +	int ret;
> +
> +	/* Create null device with no-rx enabled */
> +	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
> +	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
> +
> +	ret = configure_null_port(norx_port);
> +	TEST_ASSERT(ret == 0, "Failed to configure null port");
> +
> +	/* RX in no-rx mode should return 0 packets */
> +	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_rx == 0,
> +		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
> +
> +	/* TX in no-rx mode should still work (frees packets) */
> +	for (i = 0; i < BURST_SIZE; i++) {
> +		tx_bufs[i] = rte_pktmbuf_alloc(mp);
> +		TEST_ASSERT(tx_bufs[i] != NULL, "Failed to allocate mbuf");
> +		tx_bufs[i]->data_len = 64;
> +		tx_bufs[i]->pkt_len = 64;
> +	}
> +
> +	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
> +	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
> +
> +	/* Cleanup */
> +	rte_eth_dev_stop(norx_port);
> +	rte_eth_dev_close(norx_port);
> +	rte_vdev_uninit("net_null_norx_test");
> +
> +	return TEST_SUCCESS;

Probably fine, but could also make it a mode for the rest of the tests to 
verify also statistics etc.

> +}
> +
> +/*
> + * Test: Link status
> + */
> +static int
> +test_null_link_status(void)
> +{
> +	struct rte_eth_link link;
> +	int ret;
> +
> +	ret = rte_eth_link_get_nowait(port_id, &link);
> +	TEST_ASSERT(ret == 0, "Failed to get link status");
> +
> +	/* After start, link should be UP */
> +	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
> +		    "Expected link UP after start");
> +	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
> +		    "Expected 10G link speed");

I am not sure it is important to test that the link speed is exactly 10G. Will 
it be a bug if it becomes 25G tomorrow? Probably any valid value would do.

> +	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
> +		    "Expected full duplex");
> +
> +	/* Stop the device */
> +	ret = rte_eth_dev_stop(port_id);
> +	TEST_ASSERT(ret == 0, "Failed to stop device");
> +
> +	ret = rte_eth_link_get_nowait(port_id, &link);
> +	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
> +
> +	/* After stop, link should be DOWN */
> +	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
> +		    "Expected link DOWN after stop");
> +
> +	/* Restart for subsequent tests */
> +	ret = rte_eth_dev_start(port_id);
> +	TEST_ASSERT(ret == 0, "Failed to restart device");
> +
> +	return TEST_SUCCESS;
> +}
> +
> +/*
> + * Test: Device info
> + */
> +static int
> +test_null_dev_info(void)
> +{
> +	struct rte_eth_dev_info dev_info;
> +	int ret;
> +
> +	ret = rte_eth_dev_info_get(port_id, &dev_info);
> +	TEST_ASSERT(ret == 0, "Failed to get device info");
> +
> +	/* Verify expected device info values */
> +	TEST_ASSERT(dev_info.max_mac_addrs == 1,
> +		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
> +	TEST_ASSERT(dev_info.max_rx_pktlen == (uint32_t)-1,
> +		    "Unexpected max_rx_pktlen");

Why is (uint32_t)-1 is a valid packet length for this device? Can at actually 
accept them?

> +	TEST_ASSERT(dev_info.min_rx_bufsize == 0,
> +		    "Expected min_rx_bufsize=0, got %u", dev_info.min_rx_bufsize);
> +
> +	/* Check TX offload capabilities */
> +	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
> +		    "Expected MULTI_SEGS TX offload capability");

It sure sets these values, but are they correct values, and would anything else 
be incorrect?

> +	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
> +		    "Expected MT_LOCKFREE TX offload capability");
> +
> +	/* Check RSS capabilities */
> +	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
> +	TEST_ASSERT(dev_info.hash_key_size == 40,
> +		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
> +	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
> +		    "Expected RSS offloads to be set");
> +
> +	return TEST_SUCCESS;
> +}

// snip test_null_multiple_bursts, looks fine

> +/*
> + * Test: RSS configuration
> + * Note: RSS requires multi-queue configuration
> + */
> +static int
> +test_null_rss_config(void)
> +{
> +	struct rte_eth_dev_info dev_info;
> +	struct rte_eth_rss_conf rss_conf;
> +	struct rte_eth_conf port_conf = {0};
> +	uint8_t rss_key[40];
> +	uint16_t rss_port;
> +	const uint16_t num_queues = 2;
> +	uint16_t q;
> +	int ret;
> +
> +	/* Create a new null device for RSS testing with multiple queues */
> +	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
> +	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
> +
> +	ret = rte_eth_dev_info_get(rss_port, &dev_info);
> +	TEST_ASSERT(ret == 0, "Failed to get device info");
> +
> +	/* Configure with RSS enabled and multiple queues */
> +	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
> +	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
> +
> +	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
> +	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
> +
> +	for (q = 0; q < num_queues; q++) {
> +		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
> +					     rte_eth_dev_socket_id(rss_port),
> +					     NULL, mp);
> +		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
> +
> +		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
> +					     rte_eth_dev_socket_id(rss_port),
> +					     NULL);
> +		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
> +	}
> +
> +	ret = rte_eth_dev_start(rss_port);
> +	TEST_ASSERT(ret == 0, "Failed to start RSS port");
> +
> +	/* Get current RSS config */
> +	memset(&rss_conf, 0, sizeof(rss_conf));
> +	rss_conf.rss_key = rss_key;
> +	rss_conf.rss_key_len = sizeof(rss_key);
> +
> +	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
> +	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
> +
> +	/* Update RSS config with new key */
> +	memset(rss_key, 0x55, sizeof(rss_key));
> +	rss_conf.rss_key = rss_key;
> +	rss_conf.rss_key_len = sizeof(rss_key);
> +	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
> +
> +	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
> +	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
> +
> +	/* Verify the update */
> +	memset(rss_key, 0, sizeof(rss_key));
> +	rss_conf.rss_key = rss_key;
> +
> +	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
> +	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
> +
> +	/* Verify key was updated */
> +	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
> +		TEST_ASSERT(rss_key[i] == 0x55,
> +			    "RSS key not updated at byte %u", i);
> +	}
> +

Can we receive and send something from/to these queues and verify resulting 
statistics? Statistics is one of the most important use cases for the null 
device, and it does benefit from multi-queue.

> +	/* Cleanup */
> +	rte_eth_dev_stop(rss_port);
> +	rte_eth_dev_close(rss_port);
> +	rte_vdev_uninit("net_null_rss_test");
> +
> +	return TEST_SUCCESS;
> +}
> +
> +/*
> + * Test: RETA (Redirection Table) configuration
> + * Note: RETA requires multi-queue RSS configuration
> + */
> +static int
> +test_null_reta_config(void)
> +{

Not sure who actually handles reta for the null device, additional tests 
actually using it may or may not be necessary depending on the answer.

// snip the rest of the function

> +}

// snip test_null_stats_reset, looks fine.

> +/*
> + * Test: MAC address operations
> + */
> +static int
> +test_null_mac_addr(void)
> +{
> +	struct rte_ether_addr mac_addr;
> +	struct rte_ether_addr new_mac = {
> +		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
> +	};
> +	int ret;
> +
> +	/* Get current MAC address */
> +	ret = rte_eth_macaddr_get(port_id, &mac_addr);
> +	TEST_ASSERT(ret == 0, "Failed to get MAC address");
> +
> +	/* Set new MAC address */
> +	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
> +	TEST_ASSERT(ret == 0, "Failed to set MAC address");

Should we now check that it was actually set?

> +
> +	return TEST_SUCCESS;
> +}
> +
> +/*
> + * Test: Promiscuous and allmulticast modes
> + */
> +static int
> +test_null_promisc_allmulti(void)
> +{
> +	int ret;
> +
> +	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
> +	ret = rte_eth_promiscuous_get(port_id);
> +	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
> +
> +	/* Test allmulticast mode - null PMD starts with allmulti enabled */
> +	ret = rte_eth_allmulticast_get(port_id);
> +	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");

Similar concerns as with other settings, are these specific values important?

> +
> +	return TEST_SUCCESS;
> +}
> +
> +static struct unit_test_suite null_pmd_test_suite = {
> +	.suite_name = "Null PMD Unit Test Suite",
> +	.setup = test_null_setup,
> +	.teardown = test_null_teardown,
> +	.unit_test_cases = {
> +		TEST_CASE(test_null_rx_basic),
> +		TEST_CASE(test_null_tx_basic),
> +		TEST_CASE(test_null_stats),
> +		TEST_CASE(test_null_custom_size),
> +		TEST_CASE(test_null_copy_mode),
> +		TEST_CASE(test_null_no_rx_mode),
> +		TEST_CASE(test_null_link_status),
> +		TEST_CASE(test_null_dev_info),
> +		TEST_CASE(test_null_multiple_bursts),
> +		TEST_CASE(test_null_rss_config),
> +		TEST_CASE(test_null_reta_config),
> +		TEST_CASE(test_null_stats_reset),
> +		TEST_CASE(test_null_mac_addr),
> +		TEST_CASE(test_null_promisc_allmulti),

test_pmd_ring.c is also doing some command-line testing, although it requires 
some cooperation from the test framework. Are we not going to cover this part 
for now?

> +
> +		TEST_CASES_END() /**< NULL terminate unit test array */
> +	}
> +};
> +
> +static int
> +test_pmd_null(void)
> +{
> +	return unit_test_suite_runner(&null_pmd_test_suite);
> +}
> +
> +REGISTER_FAST_TEST(null_pmd_autotest, true, true, test_pmd_null);
> --
> 2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* Re: [PATCH] test: add a test for null PMD
  2026-01-05 14:49 ` Marat Khalili
@ 2026-01-05 17:38   ` Stephen Hemminger
  0 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-05 17:38 UTC (permalink / raw)
  To: Marat Khalili; +Cc: dev@dpdk.org

On Mon, 5 Jan 2026 14:49:41 +0000
Marat Khalili <marat.khalili@huawei.com> wrote:
> Thank you for doing this. Having tests for things like this is important both 
> to make sure they work and as a form of documentation.
> 
> In my humble opinion we should not approach this mechanically though, which is 
> what AI tools tend to do. It is usually said that tests should test desired 
> behavior, not specific implementation. Particularly in settings some of the 
> values might not matter, or the whole range might be acceptable, in other cases 
> current implementation might be questionable and by writing a test we just 
> legitimize it.
> 
> Please more see comments inline.

I did not see anything obviously wrong with the tests that it generated so
far. And coverage is more important on legacy code.

> > diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
> > new file mode 100644
> > index 0000000000..ebc2447c6d
> > --- /dev/null
> > +++ b/app/test/test_pmd_null.c
> > @@ -0,0 +1,827 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2026 Stephen Hemminger
> > + */
> > +
> > +#include <inttypes.h>
> > +#include <stdbool.h>
> > +#include <stdio.h>
> > +#include <string.h>
> > +
> > +#include <rte_bus_vdev.h>
> > +#include <rte_cycles.h>
> > +#include <rte_ethdev.h>
> > +#include <rte_ether.h>
> > +#include <rte_lcore.h>
> > +#include <rte_mbuf.h>
> > +#include <rte_mempool.h>  
> 
> I do not think it is a problem, but just curious: how does test_pmd_ring.c 
> manage to use much fewer includes?

I tend to run the tool iwyu on the generated code. This gives a more correct
set of includes without implied includes.

> > +/* Test device names */
> > +#define NULL_DEV_NAME "net_null_test"
> > +
> > +static struct rte_mempool *mp;
> > +static uint16_t port_id = RTE_MAX_ETHPORTS;
> > +static bool port_created;
> > +static bool port_started;  
> 
> // snip setup and teardown functions, could find nothing to nitpick there

The code is exercising device in multiple ways so it didn't seem to need them.
Would be better to avoid global variables though. Like the mempool should
be set in setup.

> Default packet size repeats a few times in this file, can we have a global
> constant for it?
> 
> > +		TEST_ASSERT(bufs[i]->pkt_len == 64,
> > +			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
> > +		TEST_ASSERT(bufs[i]->data_len == 64,
> > +			    "Unexpected data_len: %u", bufs[i]->data_len);
> > +	}
> > +

Right test should use RTE_ETHER_MIN_LEN

> > +	/* Allocate mbufs for TX */
> > +	for (i = 0; i < BURST_SIZE; i++) {
> > +		bufs[i] = rte_pktmbuf_alloc(mp);
> > +		TEST_ASSERT(bufs[i] != NULL, "Failed to allocate mbuf");
> > +		bufs[i]->data_len = 64;
> > +		bufs[i]->pkt_len = 64;  
> 
> It would help to test sending packets of various sizes, especially spanning 
> multiple buffers or allocating them at the edge of buffer. May even uncover 
> some bugs in the copy mode implementation ;)

Good point.
The copy mode is making several assumptions already and I see bugs there.
It is confusing dummy_packet as mbuf versus data.


The test and the driver should also test multi-segments.

> > +	/* After start, link should be UP */
> > +	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
> > +		    "Expected link UP after start");
> > +	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
> > +		    "Expected 10G link speed");  
> 
> I am not sure it is important to test that the link speed is exactly 10G. Will 
> it be a bug if it becomes 25G tomorrow? Probably any valid value would do.

It is a dummy driver, speed really doesn't matter and should probably be ignored.

> > +static int
> > +test_null_dev_info(void)
> > +{
> > +	struct rte_eth_dev_info dev_info;
> > +	int ret;
> > +
> > +	ret = rte_eth_dev_info_get(port_id, &dev_info);
> > +	TEST_ASSERT(ret == 0, "Failed to get device info");
> > +
> > +	/* Verify expected device info values */
> > +	TEST_ASSERT(dev_info.max_mac_addrs == 1,
> > +		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
> > +	TEST_ASSERT(dev_info.max_rx_pktlen == (uint32_t)-1,
> > +		    "Unexpected max_rx_pktlen");  
> 
> Why is (uint32_t)-1 is a valid packet length for this device? Can at actually 
> accept them?

This is awkward way of saying UINT32_MAX.
And it means the driver has no limit on what the receive buffer might be.
This is not really right it does have limits based on the device flags.

> 
> > +	TEST_ASSERT(dev_info.min_rx_bufsize == 0,
> > +		    "Expected min_rx_bufsize=0, got %u", dev_info.min_rx_bufsize);
> > +
> > +	/* Check TX offload capabilities */
> > +	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
> > +		    "Expected MULTI_SEGS TX offload capability");  
> 
> It sure sets these values, but are they correct values, and would anything else 
> be incorrect?

I think test is ok testing for what it expects to work.

> > +/*
> > + * Test: RSS configuration
> > + * Note: RSS requires multi-queue configuration
> > + */
> > +static int
> > +test_null_rss_config(void)
> > +{
> > +	struct rte_eth_dev_info dev_info;
> > +	struct rte_eth_rss_conf rss_conf;
> > +	struct rte_eth_conf port_conf = {0};
> > +	uint8_t rss_key[40];
> > +	uint16_t rss_port;
> > +	const uint16_t num_queues = 2;
> > +	uint16_t q;
> > +	int ret;
> > +
> > +	/* Create a new null device for RSS testing with multiple queues */
> > +	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
> > +	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
> > +
> > +	ret = rte_eth_dev_info_get(rss_port, &dev_info);
> > +	TEST_ASSERT(ret == 0, "Failed to get device info");
> > +
> > +	/* Configure with RSS enabled and multiple queues */
> > +	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
> > +	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
> > +
> > +	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
> > +	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
> > +
> > +	for (q = 0; q < num_queues; q++) {
> > +		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
> > +					     rte_eth_dev_socket_id(rss_port),
> > +					     NULL, mp);
> > +		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
> > +
> > +		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
> > +					     rte_eth_dev_socket_id(rss_port),
> > +					     NULL);
> > +		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
> > +	}
> > +
> > +	ret = rte_eth_dev_start(rss_port);
> > +	TEST_ASSERT(ret == 0, "Failed to start RSS port");
> > +
> > +	/* Get current RSS config */
> > +	memset(&rss_conf, 0, sizeof(rss_conf));
> > +	rss_conf.rss_key = rss_key;
> > +	rss_conf.rss_key_len = sizeof(rss_key);
> > +
> > +	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
> > +	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
> > +
> > +	/* Update RSS config with new key */
> > +	memset(rss_key, 0x55, sizeof(rss_key));
> > +	rss_conf.rss_key = rss_key;
> > +	rss_conf.rss_key_len = sizeof(rss_key);
> > +	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
> > +
> > +	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
> > +	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
> > +
> > +	/* Verify the update */
> > +	memset(rss_key, 0, sizeof(rss_key));
> > +	rss_conf.rss_key = rss_key;
> > +
> > +	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
> > +	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
> > +
> > +	/* Verify key was updated */
> > +	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
> > +		TEST_ASSERT(rss_key[i] == 0x55,
> > +			    "RSS key not updated at byte %u", i);
> > +	}
> > +  
> 
> Can we receive and send something from/to these queues and verify resulting 
> statistics? Statistics is one of the most important use cases for the null 
> device, and it does benefit from multi-queue.

Use of RSS in Null PMD is odd. It really doesn't do RSS. It is lying about it.
The packets are never hashed on receive. Looks like it was just a way to exercise
the RSS API's.

My goal is to get some tests for vdev's where there are none now, and fix
some of the obvious wonky stuff in the drivers (like doing stats in odd and
slow ways).



^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v2 0/2] net/null: add a test
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
  2026-01-05 14:49 ` Marat Khalili
@ 2026-01-06 16:47 ` Stephen Hemminger
  2026-01-06 16:47   ` [PATCH v2 1/2] test: add a test for null PMD Stephen Hemminger
  2026-01-06 16:47   ` [PATCH v2 2/2] net/null: revise info_get Stephen Hemminger
  2026-01-08 20:40 ` [PATCH v3 0/3] test: new test for null PMD Stephen Hemminger
                   ` (8 subsequent siblings)
  10 siblings, 2 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-06 16:47 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test and small improvement for null PMD

v2 - review feedback and small improvement

Stephen Hemminger (2):
  test: add a test for null PMD
  net/null: revise info_get

 app/test/meson.build            |   1 +
 app/test/test_pmd_null.c        | 811 ++++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c |  11 +-
 3 files changed, 816 insertions(+), 7 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v2 1/2] test: add a test for null PMD
  2026-01-06 16:47 ` [PATCH v2 0/2] net/null: add a test Stephen Hemminger
@ 2026-01-06 16:47   ` Stephen Hemminger
  2026-01-06 17:40     ` Marat Khalili
  2026-01-06 16:47   ` [PATCH v2 2/2] net/null: revise info_get Stephen Hemminger
  1 sibling, 1 reply; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-06 16:47 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD.
This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 802 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 803 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index efec42a6bf..a45991ec34 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..c6a8bba701
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,802 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	struct rte_eth_dev_info dev_info;
+	int ret;
+
+	ret = rte_eth_dev_info_get(pid, &dev_info);
+	if (ret != 0) {
+		printf("Failed to get device info for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+		m->data_len = len;
+		m->buf_len = len;
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Statistics verification
+ */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Custom packet size
+ */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Copy mode
+ */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: No-RX mode
+ */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Link status
+ */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Device info
+ */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+	TEST_ASSERT(dev_info.max_rx_pktlen == UINT32_MAX,
+		    "Unexpected max_rx_pktlen");
+	TEST_ASSERT(dev_info.min_rx_bufsize == 0,
+		    "Expected min_rx_bufsize=0, got %u", dev_info.min_rx_bufsize);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Multiple RX/TX bursts
+ */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Stats reset
+ */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: MAC address operations
+ */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, true, true, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v2 2/2] net/null: revise info_get
  2026-01-06 16:47 ` [PATCH v2 0/2] net/null: add a test Stephen Hemminger
  2026-01-06 16:47   ` [PATCH v2 1/2] test: add a test for null PMD Stephen Hemminger
@ 2026-01-06 16:47   ` Stephen Hemminger
  1 sibling, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-06 16:47 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The dev_info_get callback does not need to check for invalid arguments
since that is already done at ethdev layer.

Make the min/max MTU values reflect the most an Ethernet type
PMD would allow.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/test_pmd_null.c        | 13 +++++++++++--
 drivers/net/null/rte_eth_null.c | 11 ++++-------
 2 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
index c6a8bba701..ee62d7ecb5 100644
--- a/app/test/test_pmd_null.c
+++ b/app/test/test_pmd_null.c
@@ -430,6 +430,10 @@ static int
 test_null_dev_info(void)
 {
 	struct rte_eth_dev_info dev_info;
+	const uint16_t jumbo_mtu = RTE_ETHER_MAX_JUMBO_FRAME_LEN
+		- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
 	int ret;
 
 	ret = rte_eth_dev_info_get(port_id, &dev_info);
@@ -438,8 +442,13 @@ test_null_dev_info(void)
 	/* Verify expected device info values */
 	TEST_ASSERT(dev_info.max_mac_addrs == 1,
 		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
-	TEST_ASSERT(dev_info.max_rx_pktlen == UINT32_MAX,
-		    "Unexpected max_rx_pktlen");
+
+	TEST_ASSERT(dev_info.max_mtu == jumbo_mtu,
+		    "Unexpected max_mtu: %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.max_rx_pktlen == RTE_ETHER_MAX_JUMBO_FRAME_LEN,
+		    "Unexpected max_rx_pktlen: %u", dev_info.max_rx_pktlen);
 	TEST_ASSERT(dev_info.min_rx_bufsize == 0,
 		    "Expected min_rx_bufsize=0, got %u", dev_info.min_rx_bufsize);
 
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..2928f0d2d5 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,19 +298,16 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
+	dev_info->max_rx_pktlen = RTE_ETHER_MAX_JUMBO_FRAME_LEN;
+	dev_info->max_mtu = RTE_ETHER_MAX_JUMBO_FRAME_LEN
+		- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN;
 	dev_info->reta_size = internals->reta_size;
 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
 	dev_info->hash_key_size = sizeof(internals->rss_key);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* RE: [PATCH v2 1/2] test: add a test for null PMD
  2026-01-06 16:47   ` [PATCH v2 1/2] test: add a test for null PMD Stephen Hemminger
@ 2026-01-06 17:40     ` Marat Khalili
  2026-01-06 18:01       ` Stephen Hemminger
  0 siblings, 1 reply; 61+ messages in thread
From: Marat Khalili @ 2026-01-06 17:40 UTC (permalink / raw)
  To: Stephen Hemminger, dev@dpdk.org

> +/* Create random valid ethernet packets */
> +static int
> +test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
> +{
> +	unsigned int i;
> +
> +	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
> +		return -1;
> +
> +	for (i = 0; i < burst_size; i++) {
> +		struct rte_mbuf *m = bufs[i];
> +		uint16_t len;
> +
> +		/* Choose random length between ether min and available space */
> +		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)

Nit: could technically use rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN + 1.

> +			+ RTE_ETHER_MIN_LEN;
> +		m->data_len = len;
> +		m->buf_len = len;

Not sure why we are changing buf_len here.

> +	}
> +	return 0;
> +}

Would still be cool to verify non-standard data_off.

// snip

> +/*
> + * Test: MAC address operations
> + */
> +static int
> +test_null_mac_addr(void)
> +{
> +	struct rte_ether_addr mac_addr;
> +	struct rte_ether_addr new_mac = {
> +		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
> +	};
> +	int ret;
> +
> +	/* Get current MAC address */
> +	ret = rte_eth_macaddr_get(port_id, &mac_addr);
> +	TEST_ASSERT(ret == 0, "Failed to get MAC address");
> +
> +	/* Set new MAC address */
> +	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
> +	TEST_ASSERT(ret == 0, "Failed to set MAC address");
> +

Still not checking that it actually does something.

> +	return TEST_SUCCESS;
> +}

I still wish the test was more behavior- and less implementation-based.
It almost feels like it works around existing bugs in the module now.
However I agree that no tests whatsoever is probably worse, therefore

Acked-by: Marat Khalili <marat.khalili@huawei.com>

^ permalink raw reply	[flat|nested] 61+ messages in thread

* Re: [PATCH v2 1/2] test: add a test for null PMD
  2026-01-06 17:40     ` Marat Khalili
@ 2026-01-06 18:01       ` Stephen Hemminger
  0 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-06 18:01 UTC (permalink / raw)
  To: Marat Khalili; +Cc: dev@dpdk.org

On Tue, 6 Jan 2026 17:40:43 +0000
Marat Khalili <marat.khalili@huawei.com> wrote:

> > +/* Create random valid ethernet packets */
> > +static int
> > +test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
> > +{
> > +	unsigned int i;
> > +
> > +	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
> > +		return -1;
> > +
> > +	for (i = 0; i < burst_size; i++) {
> > +		struct rte_mbuf *m = bufs[i];
> > +		uint16_t len;
> > +
> > +		/* Choose random length between ether min and available space */
> > +		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)  
> 
> Nit: could technically use rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN + 1.
> 
> > +			+ RTE_ETHER_MIN_LEN;
> > +		m->data_len = len;
> > +		m->buf_len = len;  
> 
> Not sure why we are changing buf_len here.
> 
> > +	}
> > +	return 0;
> > +}  

Next version is going to use packet generator stuff that is in test infra now.

^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v3 0/3] test: new test for null PMD
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
  2026-01-05 14:49 ` Marat Khalili
  2026-01-06 16:47 ` [PATCH v2 0/2] net/null: add a test Stephen Hemminger
@ 2026-01-08 20:40 ` Stephen Hemminger
  2026-01-08 20:40   ` [PATCH v3 1/3] net/null: cleanup info_get Stephen Hemminger
                     ` (2 more replies)
  2026-01-10 17:22 ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
                   ` (7 subsequent siblings)
  10 siblings, 3 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-08 20:40 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series adds small optimization to net/null and
adds a unit test for null PMD

v3 - review feedback
   - generalize packet generator to handle bigger frames
   - add test for lockless Tx

Stephen Hemminger (3):
  net/null: cleanup info_get
  test: allow larger packet sizes
  test: add a test for null PMD

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  30 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 968 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   |   9 +-
 5 files changed, 993 insertions(+), 19 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v3 1/3] net/null: cleanup info_get
  2026-01-08 20:40 ` [PATCH v3 0/3] test: new test for null PMD Stephen Hemminger
@ 2026-01-08 20:40   ` Stephen Hemminger
  2026-01-08 20:40   ` [PATCH v3 2/3] test: allow larger packet sizes Stephen Hemminger
  2026-01-08 20:40   ` [PATCH v3 3/3] test: add a test for null PMD Stephen Hemminger
  2 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-08 20:40 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The info_get callback doesn't need to check its args
since already done by ethdev. The maximum packet size allowed
by this dummy driver is limited only by the maximum values
in mbuf fields.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..3e743a23a9 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,17 +298,12 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v3 2/3] test: allow larger packet sizes
  2026-01-08 20:40 ` [PATCH v3 0/3] test: new test for null PMD Stephen Hemminger
  2026-01-08 20:40   ` [PATCH v3 1/3] net/null: cleanup info_get Stephen Hemminger
@ 2026-01-08 20:40   ` Stephen Hemminger
  2026-01-09 15:00     ` Morten Brørup
  2026-01-08 20:40   ` [PATCH v3 3/3] test: add a test for null PMD Stephen Hemminger
  2 siblings, 1 reply; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-08 20:40 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

The packet length in packet burst generator was uint8_t which
limited usefulness for testing larger packet sizes.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 30 ++++++++++++++++++++----------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 12 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..4f659e4d71 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -216,13 +216,18 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		int nb_pkt_per_burst, uint16_t pkt_len, uint8_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+	uint16_t last_seg_data_len;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	last_seg_data_len = pkt_seg_data_len + (pkt_len % nb_pkt_segs);
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -246,7 +251,7 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 			if (i != nb_pkt_segs - 1)
 				pkt_seg->data_len = pkt_seg_data_len;
 			else
-				pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs;
+				pkt_seg->data_len = last_seg_data_len;
 		}
 		pkt_seg->next = NULL; /* Last segment of packet. */
 
@@ -300,13 +305,18 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		int nb_pkt_per_burst, uint16_t pkt_len, uint8_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+	uint16_t last_seg_data_len;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	last_seg_data_len = pkt_seg_data_len + (pkt_len % nb_pkt_segs);
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -330,7 +340,7 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 			if (i != nb_pkt_segs - 1)
 				pkt_seg->data_len = pkt_seg_data_len;
 			else
-				pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs;
+				pkt_seg->data_len = last_seg_data_len;
 		}
 		pkt_seg->next = NULL; /* Last segment of packet. */
 
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..600addee3a 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		int nb_pkt_per_burst, uint16_t pkt_len, uint8_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		int nb_pkt_per_burst, uint16_t pkt_len, uint8_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v3 3/3] test: add a test for null PMD
  2026-01-08 20:40 ` [PATCH v3 0/3] test: new test for null PMD Stephen Hemminger
  2026-01-08 20:40   ` [PATCH v3 1/3] net/null: cleanup info_get Stephen Hemminger
  2026-01-08 20:40   ` [PATCH v3 2/3] test: allow larger packet sizes Stephen Hemminger
@ 2026-01-08 20:40   ` Stephen Hemminger
  2026-01-09  1:21     ` Stephen Hemminger
  2 siblings, 1 reply; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-08 20:40 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes
and lockless transmit.

Original version of  test was generated with Claude based off
of existing test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 968 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 969 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index efec42a6bf..a45991ec34 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..1790dc891c
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	struct rte_eth_dev_info dev_info;
+	int ret;
+
+	ret = rte_eth_dev_info_get(pid, &dev_info);
+	if (ret != 0) {
+		printf("Failed to get device info for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+		m->data_len = len;
+		m->buf_len = len;
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Statistics verification
+ */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Custom packet size
+ */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Copy mode
+ */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: No-RX mode
+ */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Link status
+ */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Device info
+ */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Multiple RX/TX bursts
+ */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Stats reset
+ */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: MAC address operations
+ */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Multi-threaded TX test structures and worker function
+ */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	args->tx_count = 0;
+	args->error = 0;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (unsigned int i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded TX on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE];
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].queue = 0;  /* All use same queue */
+		worker_args[num_workers].pool = mt_pool;
+		worker_args[num_workers].tx_count = 0;
+		worker_args[num_workers].error = 0;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT TX test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up TX counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total TX from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, true, true, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* Re: [PATCH v3 3/3] test: add a test for null PMD
  2026-01-08 20:40   ` [PATCH v3 3/3] test: add a test for null PMD Stephen Hemminger
@ 2026-01-09  1:21     ` Stephen Hemminger
  0 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-09  1:21 UTC (permalink / raw)
  To: dev

On Thu,  8 Jan 2026 12:40:48 -0800
Stephen Hemminger <stephen@networkplumber.org> wrote:

> Add a test for null PMD including different packet sizes
> and lockless transmit.
> 
> Original version of  test was generated with Claude based off
> of existing test_pmd_ring.c with some cleanup afterwards.
> 
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
>  app/test/meson.build     |   1 +
>  app/test/test_pmd_null.c | 968 +++++++++++++++++++++++++++++++++++++++
>  2 files changed, 969 insertions(+)
>  create mode 100644 app/test/test_pmd_null.c
> 
> diff --git a/app/test/meson.build b/app/test/meson.build
> index efec42a6bf..a45991ec34 100644
> --- a/app/test/meson.build
> +++ b/app/test/meson.build
> @@ -141,6 +141,7 @@ source_file_deps = {
>      'test_per_lcore.c': [],
>      'test_pflock.c': [],
>      'test_pie.c': ['sched'],
> +    'test_pmd_null.c': ['net_ring', 'ethdev', 'bus_vdev'],

Should be net_null here not net_ring

^ permalink raw reply	[flat|nested] 61+ messages in thread

* RE: [PATCH v3 2/3] test: allow larger packet sizes
  2026-01-08 20:40   ` [PATCH v3 2/3] test: allow larger packet sizes Stephen Hemminger
@ 2026-01-09 15:00     ` Morten Brørup
  2026-01-10 17:21       ` Stephen Hemminger
  0 siblings, 1 reply; 61+ messages in thread
From: Morten Brørup @ 2026-01-09 15:00 UTC (permalink / raw)
  To: Stephen Hemminger, dev

>  generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf
> **pkts_burst,
>  		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
>  		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
> -		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t
> nb_pkt_segs);
> +		int nb_pkt_per_burst, uint16_t pkt_len, uint8_t
> nb_pkt_segs);

The pkt_len field in rte_mbuf is uint32_t.
You could upgrade the packet length here to uint32_t too, instead of upgrading to only uint16_t.

<feature creep>
While you are at it, the nb_segs field in rte_mbuf is uint16_t, you could also upgrade the number of segments to uint16_t.
</feature creep>


^ permalink raw reply	[flat|nested] 61+ messages in thread

* Re: [PATCH v3 2/3] test: allow larger packet sizes
  2026-01-09 15:00     ` Morten Brørup
@ 2026-01-10 17:21       ` Stephen Hemminger
  0 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-10 17:21 UTC (permalink / raw)
  To: Morten Brørup; +Cc: dev

On Fri, 9 Jan 2026 16:00:00 +0100
Morten Brørup <mb@smartsharesystems.com> wrote:

> >  generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf
> > **pkts_burst,
> >  		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
> >  		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
> > -		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t
> > nb_pkt_segs);
> > +		int nb_pkt_per_burst, uint16_t pkt_len, uint8_t
> > nb_pkt_segs);  
> 
> The pkt_len field in rte_mbuf is uint32_t.
> You could upgrade the packet length here to uint32_t too, instead of upgrading to only uint16_t.

This code is generating UDP packets, and the maximum size allowed with UDP is UINT16_MAX.
IP packet length is also limited to 16 bits.

But you comment did motivate fixing to allow more packet segments.

^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v4 0/3] null pmd minor cleanup and add test
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (2 preceding siblings ...)
  2026-01-08 20:40 ` [PATCH v3 0/3] test: new test for null PMD Stephen Hemminger
@ 2026-01-10 17:22 ` Stephen Hemminger
  2026-01-10 17:22   ` [PATCH v4 1/3] net/null: cleanup info_get Stephen Hemminger
                     ` (3 more replies)
  2026-01-14 18:30 ` [PATCH v5 0/3] test: add null PMD test suite Stephen Hemminger
                   ` (6 subsequent siblings)
  10 siblings, 4 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-10 17:22 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series adds small optimization to net/null and
adds a unit test for null PMD.

v4 - change parameters to packet burst

Stephen Hemminger (3):
  net/null: cleanup info_get
  test: generate larger packet bursts
  test: add a test for null PMD

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  38 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 968 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   |   9 +-
 5 files changed, 994 insertions(+), 26 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v4 1/3] net/null: cleanup info_get
  2026-01-10 17:22 ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
@ 2026-01-10 17:22   ` Stephen Hemminger
  2026-01-10 17:22   ` [PATCH v4 2/3] test: generate larger packet bursts Stephen Hemminger
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-10 17:22 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The info_get callback doesn't need to check its args
since already done by ethdev.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..3e743a23a9 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,17 +298,12 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v4 2/3] test: generate larger packet bursts
  2026-01-10 17:22 ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
  2026-01-10 17:22   ` [PATCH v4 1/3] net/null: cleanup info_get Stephen Hemminger
@ 2026-01-10 17:22   ` Stephen Hemminger
  2026-01-10 17:22   ` [PATCH v4 3/3] test: add a test for null PMD Stephen Hemminger
  2026-01-12  0:56   ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
  3 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-10 17:22 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

The packet length in packet burst generator was uint8_t which
limited usefulness for testing larger packet sizes.

The number of packets segments per packet is currently limited
by mbuf nb_segs which is 16 bits. The comment is incorrect.

Change nb_pkt_per_burst to uint16_t since that is the limit
for tx_burst.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 38 +++++++++++++++++--------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 23 insertions(+), 19 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..548296e33e 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,23 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+	uint16_t last_seg_data_len;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	last_seg_data_len = pkt_seg_data_len + (pkt_len % nb_pkt_segs);
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -243,10 +242,10 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 				goto nomore_mbuf;
 			}
 			pkt_seg = pkt_seg->next;
-			if (i != nb_pkt_segs - 1)
+			if (i != (int)(nb_pkt_segs - 1))
 				pkt_seg->data_len = pkt_seg_data_len;
 			else
-				pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs;
+				pkt_seg->data_len = last_seg_data_len;
 		}
 		pkt_seg->next = NULL; /* Last segment of packet. */
 
@@ -300,13 +299,18 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+	uint16_t last_seg_data_len;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	last_seg_data_len = pkt_seg_data_len + (pkt_len % nb_pkt_segs);
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -330,7 +334,7 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 			if (i != nb_pkt_segs - 1)
 				pkt_seg->data_len = pkt_seg_data_len;
 			else
-				pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs;
+				pkt_seg->data_len = last_seg_data_len;
 		}
 		pkt_seg->next = NULL; /* Last segment of packet. */
 
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v4 3/3] test: add a test for null PMD
  2026-01-10 17:22 ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
  2026-01-10 17:22   ` [PATCH v4 1/3] net/null: cleanup info_get Stephen Hemminger
  2026-01-10 17:22   ` [PATCH v4 2/3] test: generate larger packet bursts Stephen Hemminger
@ 2026-01-10 17:22   ` Stephen Hemminger
  2026-01-12  0:56   ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
  3 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-10 17:22 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 968 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 969 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index efec42a6bf..a45991ec34 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..1790dc891c
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	struct rte_eth_dev_info dev_info;
+	int ret;
+
+	ret = rte_eth_dev_info_get(pid, &dev_info);
+	if (ret != 0) {
+		printf("Failed to get device info for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+		m->data_len = len;
+		m->buf_len = len;
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Statistics verification
+ */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Custom packet size
+ */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Copy mode
+ */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: No-RX mode
+ */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Link status
+ */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Device info
+ */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Multiple RX/TX bursts
+ */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Stats reset
+ */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: MAC address operations
+ */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Multi-threaded TX test structures and worker function
+ */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	args->tx_count = 0;
+	args->error = 0;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (unsigned int i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded TX on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE];
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].queue = 0;  /* All use same queue */
+		worker_args[num_workers].pool = mt_pool;
+		worker_args[num_workers].tx_count = 0;
+		worker_args[num_workers].error = 0;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT TX test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up TX counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total TX from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, true, true, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* Re: [PATCH v4 0/3] null pmd minor cleanup and add test
  2026-01-10 17:22 ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
                     ` (2 preceding siblings ...)
  2026-01-10 17:22   ` [PATCH v4 3/3] test: add a test for null PMD Stephen Hemminger
@ 2026-01-12  0:56   ` Stephen Hemminger
  3 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-12  0:56 UTC (permalink / raw)
  To: dev

On Sat, 10 Jan 2026 09:22:35 -0800
Stephen Hemminger <stephen@networkplumber.org> wrote:

> This series adds small optimization to net/null and
> adds a unit test for null PMD.
> 
> v4 - change parameters to packet burst
> 
> Stephen Hemminger (3):
>   net/null: cleanup info_get
>   test: generate larger packet bursts
>   test: add a test for null PMD
> 
>  app/test/meson.build              |   1 +
>  app/test/packet_burst_generator.c |  38 +-
>  app/test/packet_burst_generator.h |   4 +-
>  app/test/test_pmd_null.c          | 968 ++++++++++++++++++++++++++++++
>  drivers/net/null/rte_eth_null.c   |   9 +-
>  5 files changed, 994 insertions(+), 26 deletions(-)
>  create mode 100644 app/test/test_pmd_null.c
> 

AI code review feedback:

Now let me provide a comprehensive review of these three patches according to the DPDK guidelines.

---

## DPDK Patch Review: null PMD changes (v4 1-3/3)

### Patch 1/3: `net/null: cleanup info_get`

**Commit Message:**
- ✅ Subject line: 29 characters (≤60 limit)
- ✅ Format: `net/null: cleanup info_get` - correct prefix format
- ✅ Lowercase after colon
- ✅ No trailing period
- ✅ `Signed-off-by:` present with real name and email
- ✅ Body wrapped appropriately

**Code Review:**

| Severity | Issue |
|----------|-------|
| ✅ Good | Removing redundant NULL checks - ethdev validates arguments before calling driver callbacks |
| ✅ Good | `UINT32_MAX` is cleaner than `(uint32_t)-1` |
| ✅ Good | Removing `min_rx_bufsize = 0` since struct is zero-initialized |
| ✅ Good | Combining declaration with initialization |

**Verdict: LGTM** - Clean, straightforward improvement.

---

### Patch 2/3: `test: generate larger packet bursts`

**Commit Message:**

| Severity | Issue |
|----------|-------|
| ⚠️ Warning | Subject prefix `test:` should be `app/test:` per DPDK conventions |
| ✅ Good | Subject ≤60 characters |
| ✅ Good | Signed-off-by present |
| ✅ Good | Body explains the rationale clearly |

**Code Review:**

| Severity | Issue |
|----------|-------|
| ⚠️ Warning | Line 245: `if (i != (int)(nb_pkt_segs - 1))` - awkward cast. Consider changing loop variable `i` to `uint16_t` to avoid mixed signed/unsigned comparison |
| ℹ️ Info | The comment removal about `RTE_MAX_SEGS_PER_PKT` is good - the old comment was misleading (`nb_segs` is 16-bit, not 8-bit) |

**Suggested fix for the cast:**
```c
// Either change loop variable:
for (uint16_t seg = 1; seg < nb_pkt_segs; seg++) {
    ...
    if (seg != nb_pkt_segs - 1)

// Or keep int and compare differently:
if (i < nb_pkt_segs - 1)
```

---

### Patch 3/3: `test: add a test for null PMD`

**Commit Message:**

| Severity | Issue |
|----------|-------|
| ⚠️ Warning | Subject prefix `test:` should be `app/test:` per DPDK conventions |
| ✅ Good | Subject ≤60 characters |
| ✅ Good | Signed-off-by present |

**Code Review:**

| Severity | Issue | Location |
|----------|-------|----------|
| ❌ Error | **Wrong dependency in meson.build**: `'net_ring'` should be `'net_null'` - the test uses null PMD, not ring PMD | `app/test/meson.build` |
| ❌ Error | **Bug in `test_mbuf_setup_burst()`**: Setting `m->buf_len = len` is incorrect. `buf_len` is the buffer size, not packet length. This corrupts the mbuf metadata and could cause memory corruption. Should only set `data_len` and `pkt_len`. | Lines 200-201 |
| ⚠️ Warning | Missing return value checks in `test_null_teardown()` for `rte_eth_dev_stop()` and `rte_eth_dev_close()` | Lines 137-138 |
| ⚠️ Warning | `test_null_mac_addr()` sets MAC but doesn't verify the change took effect | Lines 571-586 |
| ℹ️ Info | Good use of `TEST_SKIPPED` for multi-threaded test when insufficient lcores available |
| ✅ Good | SPDX license identifier on first line |
| ✅ Good | Copyright immediately follows SPDX |
| ✅ Good | Proper include ordering (system → DPDK → local) |
| ✅ Good | Comprehensive test coverage: basic RX/TX, stats, custom size, copy mode, no-rx mode, link status, RSS, RETA, MT |

**Critical fix needed in `test_mbuf_setup_burst()`:**
```c
static int
test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
{
	unsigned int i;

	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
		return -1;

	for (i = 0; i < burst_size; i++) {
		struct rte_mbuf *m = bufs[i];
		uint16_t len;

		/* Choose random length between ether min and available space */
		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
			+ RTE_ETHER_MIN_LEN;
		m->data_len = len;
-		m->buf_len = len;    /* ERROR: buf_len should not be modified! */
+		m->pkt_len = len;    /* Set total packet length */
	}
	return 0;
}
```

**Fix for meson.build:**
```python
-    'test_pmd_null.c': ['net_ring', 'ethdev', 'bus_vdev'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
```

---

## Summary

| Patch | Status | Action Required |
|-------|--------|-----------------|
| 1/3 `net/null: cleanup info_get` | ✅ Ready | None - LGTM |
| 2/3 `test: generate larger packet bursts` | ⚠️ Minor issues | Consider prefix fix and cleaner loop variable |
| 3/3 `test: add a test for null PMD` | ❌ Needs v5 | Fix `buf_len` bug, fix meson dependency |

The critical issues in patch 3/3 (the `buf_len` corruption bug and wrong meson dependency) should be fixed before merging.

^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v5 0/3] test: add null PMD test suite
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (3 preceding siblings ...)
  2026-01-10 17:22 ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
@ 2026-01-14 18:30 ` Stephen Hemminger
  2026-01-14 18:30   ` [PATCH v5 1/3] net/null: cleanup info_get Stephen Hemminger
                     ` (2 more replies)
  2026-01-18 16:50 ` [PATCH v6 0/3] test: add null PMD test suite Stephen Hemminger
                   ` (5 subsequent siblings)
  10 siblings, 3 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-14 18:30 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series adds a comprehensive test suite for the null PMD driver.

The first patch does a minor cleanup of the null PMD info_get callback,
removing unnecessary argument validation that is already performed by
the ethdev layer.

The second patch extends the packet burst generator to support larger
packet sizes by changing the pkt_len parameter from uint8_t to uint16_t.
This also fixes an incorrect comment about the maximum number of
segments per packet.

The third patch adds a new test suite for the null PMD covering:
  - Basic RX/TX operations
  - Statistics tracking and reset
  - Custom packet sizes
  - Copy mode and no-rx mode
  - Link status
  - Device info validation
  - RSS and RETA configuration
  - MAC address operations
  - Promiscuous and allmulticast modes
  - Multi-threaded TX (MT_LOCKFREE capability)

v5 - fix issues found from AI code review

Stephen Hemminger (3):
  net/null: cleanup info_get
  test: generate larger packet bursts
  test: add a test for null PMD

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  34 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 964 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   |   9 +-
 5 files changed, 989 insertions(+), 23 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v5 1/3] net/null: cleanup info_get
  2026-01-14 18:30 ` [PATCH v5 0/3] test: add null PMD test suite Stephen Hemminger
@ 2026-01-14 18:30   ` Stephen Hemminger
  2026-01-14 18:30   ` [PATCH v5 2/3] test: generate larger packet bursts Stephen Hemminger
  2026-01-14 18:30   ` [PATCH v5 3/3] test: add a test for null PMD Stephen Hemminger
  2 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-14 18:30 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The info_get callback doesn't need to check its args
since already done by ethdev.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..3e743a23a9 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,17 +298,12 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v5 2/3] test: generate larger packet bursts
  2026-01-14 18:30 ` [PATCH v5 0/3] test: add null PMD test suite Stephen Hemminger
  2026-01-14 18:30   ` [PATCH v5 1/3] net/null: cleanup info_get Stephen Hemminger
@ 2026-01-14 18:30   ` Stephen Hemminger
  2026-01-14 18:30   ` [PATCH v5 3/3] test: add a test for null PMD Stephen Hemminger
  2 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-14 18:30 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

The packet length in packet burst generator was uint8_t which
limited usefulness for testing larger packet sizes.

The number of packets segments per packet is currently limited
by mbuf nb_segs which is 16 bits. The comment is incorrect.

Change nb_pkt_per_burst to uint16_t since that is the limit
for tx_burst.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 34 ++++++++++++++++++-------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..346c69e6ce 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,24 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -300,13 +300,19 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v5 3/3] test: add a test for null PMD
  2026-01-14 18:30 ` [PATCH v5 0/3] test: add null PMD test suite Stephen Hemminger
  2026-01-14 18:30   ` [PATCH v5 1/3] net/null: cleanup info_get Stephen Hemminger
  2026-01-14 18:30   ` [PATCH v5 2/3] test: generate larger packet bursts Stephen Hemminger
@ 2026-01-14 18:30   ` Stephen Hemminger
  2 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-14 18:30 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 964 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 965 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index efec42a6bf..1d4cb16e08 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..e1751686af
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,964 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	int ret;
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+
+		if (rte_pktmbuf_append(m, len) == NULL) {
+			rte_pktmbuf_free_bulk(bufs, burst_size);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Statistics verification
+ */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Custom packet size
+ */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Copy mode
+ */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: No-RX mode
+ */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Link status
+ */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Device info
+ */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Multiple RX/TX bursts
+ */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Stats reset
+ */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: MAC address operations
+ */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Multi-threaded TX test structures and worker function
+ */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	args->tx_count = 0;
+	args->error = 0;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (unsigned int i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded TX on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE];
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].queue = 0;  /* All use same queue */
+		worker_args[num_workers].pool = mt_pool;
+		worker_args[num_workers].tx_count = 0;
+		worker_args[num_workers].error = 0;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT TX test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up TX counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total TX from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, true, true, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v6 0/3] test: add null PMD test suite
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (4 preceding siblings ...)
  2026-01-14 18:30 ` [PATCH v5 0/3] test: add null PMD test suite Stephen Hemminger
@ 2026-01-18 16:50 ` Stephen Hemminger
  2026-01-18 16:50   ` [PATCH v6 1/3] net/null: cleanup info response Stephen Hemminger
                     ` (2 more replies)
  2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
                   ` (4 subsequent siblings)
  10 siblings, 3 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-18 16:50 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series adds a comprehensive test suite for the null PMD driver.

The first patch does a minor cleanup of the null PMD info_get callback,
removing unnecessary argument validation that is already performed by
the ethdev layer.

The second patch extends the packet burst generator to support larger
packet sizes by changing the pkt_len parameter from uint8_t to uint16_t.
This also fixes an incorrect comment about the maximum number of
segments per packet.

The third patch adds a new test suite for the null PMD covering:
  - Basic RX/TX operations
  - Statistics tracking and reset
  - Custom packet sizes
  - Copy mode and no-rx mode
  - Link status
  - Device info validation
  - RSS and RETA configuration
  - MAC address operations
  - Promiscuous and allmulticast modes
  - Multi-threaded TX (MT_LOCKFREE capability)

v6 - rebase for recent REGISTER_TEST() change

Stephen Hemminger (3):
  net/null: cleanup info response
  test: generate larger packet bursts
  test: add a test for null PMD

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  34 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 964 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   |   9 +-
 5 files changed, 989 insertions(+), 23 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v6 1/3] net/null: cleanup info response
  2026-01-18 16:50 ` [PATCH v6 0/3] test: add null PMD test suite Stephen Hemminger
@ 2026-01-18 16:50   ` Stephen Hemminger
  2026-01-18 16:50   ` [PATCH v6 2/3] test: generate larger packet bursts Stephen Hemminger
  2026-01-18 16:50   ` [PATCH v6 3/3] test: add a test for null PMD Stephen Hemminger
  2 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-18 16:50 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The info_get callback doesn't need to check its args
since already done by ethdev.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..3e743a23a9 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,17 +298,12 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v6 2/3] test: generate larger packet bursts
  2026-01-18 16:50 ` [PATCH v6 0/3] test: add null PMD test suite Stephen Hemminger
  2026-01-18 16:50   ` [PATCH v6 1/3] net/null: cleanup info response Stephen Hemminger
@ 2026-01-18 16:50   ` Stephen Hemminger
  2026-01-18 16:50   ` [PATCH v6 3/3] test: add a test for null PMD Stephen Hemminger
  2 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-18 16:50 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

The packet length in packet burst generator was uint8_t which
limited usefulness for testing larger packet sizes.

The number of packets segments per packet is currently limited
by mbuf nb_segs which is 16 bits. The comment is incorrect.

Change nb_pkt_per_burst to uint16_t since that is the limit
for tx_burst.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 34 ++++++++++++++++++-------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..346c69e6ce 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,24 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -300,13 +300,19 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v6 3/3] test: add a test for null PMD
  2026-01-18 16:50 ` [PATCH v6 0/3] test: add null PMD test suite Stephen Hemminger
  2026-01-18 16:50   ` [PATCH v6 1/3] net/null: cleanup info response Stephen Hemminger
  2026-01-18 16:50   ` [PATCH v6 2/3] test: generate larger packet bursts Stephen Hemminger
@ 2026-01-18 16:50   ` Stephen Hemminger
  2 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-18 16:50 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 964 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 965 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index efec42a6bf..1d4cb16e08 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..3104f1ced0
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,964 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	int ret;
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+
+		if (rte_pktmbuf_append(m, len) == NULL) {
+			rte_pktmbuf_free_bulk(bufs, burst_size);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Statistics verification
+ */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Custom packet size
+ */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Copy mode
+ */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: No-RX mode
+ */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Link status
+ */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Device info
+ */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Multiple RX/TX bursts
+ */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Stats reset
+ */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: MAC address operations
+ */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Multi-threaded TX test structures and worker function
+ */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	args->tx_count = 0;
+	args->error = 0;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (unsigned int i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded TX on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE];
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].queue = 0;  /* All use same queue */
+		worker_args[num_workers].pool = mt_pool;
+		worker_args[num_workers].tx_count = 0;
+		worker_args[num_workers].error = 0;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT TX test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up TX counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total TX from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, NOHUGE_OK, ASAN_OK, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v7 0/5] net/null: improvements and bug fixes
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (5 preceding siblings ...)
  2026-01-18 16:50 ` [PATCH v6 0/3] test: add null PMD test suite Stephen Hemminger
@ 2026-01-25 20:23 ` Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 1/5] net/null: cleanup info response Stephen Hemminger
                     ` (4 more replies)
  2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
                   ` (3 subsequent siblings)
  10 siblings, 5 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-25 20:23 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series makes several improvements to the null PMD driver and
adds comprehensive unit tests.

Changes in v7:
- Added release notes to patch 1
- Simplified test code by using single-line comments and struct
  initializers instead of explicit zeroing
- Added two new bug fix patches (patches 4 and 5) that validate
  packet size against mbuf data room constraints

The series:
1) Cleans up the info_get callback by removing redundant argument
   checks and using proper constants (UINT32_MAX instead of cast).

2) Extends the packet burst generator to support larger packets
   by changing type from uint8_t to uint16_t, enabling better
   testing of drivers with various packet sizes.

3) Adds a comprehensive unit test for the null PMD covering RX/TX,
   statistics, custom packet sizes, copy mode, no-RX mode, link
   status, device info, RSS, RETA, and multi-threaded TX.

4) Adds validation that the mbuf pool passed to rx_queue_setup
   has sufficient data room for the configured packet size.
   This prevents generating invalid mbufs in copy mode.

5) Adds validation that the packet size devarg doesn't exceed
   UINT16_MAX, which is the maximum size for a single mbuf
   data segment.

Patches 4 and 5 are marked for stable backport as they fix
potential issues where invalid mbufs could be generated.

Stephen Hemminger (5):
  net/null: cleanup info response
  test: generate larger packet bursts
  test: add a test for null PMD
  net/null: add check for pool vs packet size
  net/null: check packet size argument

 app/test/meson.build                   |   1 +
 app/test/packet_burst_generator.c      |  34 +-
 app/test/packet_burst_generator.h      |   4 +-
 app/test/test_pmd_null.c               | 916 +++++++++++++++++++++++++
 doc/guides/rel_notes/release_26_03.rst |  10 +
 drivers/net/null/rte_eth_null.c        |  21 +++---
 6 files changed, 959 insertions(+), 27 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0



Stephen Hemminger (5):
  net/null: cleanup info response
  test: generate larger packet bursts
  test: add a test for null PMD
  net/null: add check for pool vs packet size
  net/null: check packet size argument

 app/test/meson.build                   |   1 +
 app/test/packet_burst_generator.c      |  34 +-
 app/test/packet_burst_generator.h      |   4 +-
 app/test/test_pmd_null.c               | 938 +++++++++++++++++++++++++
 doc/guides/rel_notes/release_26_03.rst |   8 +
 drivers/net/null/rte_eth_null.c        |  21 +-
 6 files changed, 980 insertions(+), 26 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v7 1/5] net/null: cleanup info response
  2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
@ 2026-01-25 20:23   ` Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 2/5] test: generate larger packet bursts Stephen Hemminger
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-25 20:23 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The info_get callback doesn't need to check its args
since already done by ethdev.

The maximum receive packet length is limited by the mbuf
packet length argument which is 32 bits.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 doc/guides/rel_notes/release_26_03.rst | 4 ++++
 drivers/net/null/rte_eth_null.c        | 9 ++-------
 2 files changed, 6 insertions(+), 7 deletions(-)

diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index 15dabee7a1..9f2f2ff58f 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated NULL ethernet driver.**
+
+  * Changed info response to match mbuf limits.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..3e743a23a9 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,17 +298,12 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v7 2/5] test: generate larger packet bursts
  2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 1/5] net/null: cleanup info response Stephen Hemminger
@ 2026-01-25 20:23   ` Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 3/5] test: add a test for null PMD Stephen Hemminger
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-25 20:23 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

The packet length in packet burst generator was uint8_t which
limited usefulness for testing larger packet sizes.

The number of packets segments per packet is currently limited
by mbuf nb_segs which is 16 bits. The comment is incorrect.

Change nb_pkt_per_burst to uint16_t since that is the limit
for tx_burst.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 34 ++++++++++++++++++-------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..346c69e6ce 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,24 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -300,13 +300,19 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v7 3/5] test: add a test for null PMD
  2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 1/5] net/null: cleanup info response Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 2/5] test: generate larger packet bursts Stephen Hemminger
@ 2026-01-25 20:23   ` Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 4/5] net/null: add check for pool vs packet size Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 5/5] net/null: check packet size argument Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-25 20:23 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 964 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 965 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index f4d04a6e42..a1a0915c81 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..3104f1ced0
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,964 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	int ret;
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+
+		if (rte_pktmbuf_append(m, len) == NULL) {
+			rte_pktmbuf_free_bulk(bufs, burst_size);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Statistics verification
+ */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Custom packet size
+ */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Copy mode
+ */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: No-RX mode
+ */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Link status
+ */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Device info
+ */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Multiple RX/TX bursts
+ */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Stats reset
+ */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: MAC address operations
+ */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Multi-threaded TX test structures and worker function
+ */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	args->tx_count = 0;
+	args->error = 0;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (unsigned int i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded TX on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE];
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].queue = 0;  /* All use same queue */
+		worker_args[num_workers].pool = mt_pool;
+		worker_args[num_workers].tx_count = 0;
+		worker_args[num_workers].error = 0;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT TX test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up TX counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total TX from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, NOHUGE_OK, ASAN_OK, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v7 4/5] net/null: add check for pool vs packet size
  2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
                     ` (2 preceding siblings ...)
  2026-01-25 20:23   ` [PATCH v7 3/5] test: add a test for null PMD Stephen Hemminger
@ 2026-01-25 20:23   ` Stephen Hemminger
  2026-01-25 20:23   ` [PATCH v7 5/5] net/null: check packet size argument Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-25 20:23 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Bernard Iremonger

The mbuf pool passed when setting up queue must have space
in the mbuf to hold the requested packet size; otherwise the
code in eth_null_copy_rx() would generate invalid mbuf.

Fixes: c743e50c475f ("null: new poll mode driver")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/test_pmd_null.c               | 48 ++++++--------------------
 doc/guides/rel_notes/release_26_03.rst |  2 ++
 drivers/net/null/rte_eth_null.c        |  3 ++
 3 files changed, 16 insertions(+), 37 deletions(-)

diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
index 3104f1ced0..72503d64c7 100644
--- a/app/test/test_pmd_null.c
+++ b/app/test/test_pmd_null.c
@@ -221,9 +221,7 @@ test_null_tx_basic(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: Statistics verification
- */
+/* Test: Statistics verification */
 static int
 test_null_stats(void)
 {
@@ -271,9 +269,7 @@ test_null_stats(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: Custom packet size
- */
+/* Test: Custom packet size */
 static int
 test_null_custom_size(void)
 {
@@ -314,9 +310,7 @@ test_null_custom_size(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: Copy mode
- */
+/* Test: Copy mode */
 static int
 test_null_copy_mode(void)
 {
@@ -346,9 +340,7 @@ test_null_copy_mode(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: No-RX mode
- */
+/* Test: No-RX mode */
 static int
 test_null_no_rx_mode(void)
 {
@@ -384,9 +376,7 @@ test_null_no_rx_mode(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: Link status
- */
+/* Test: Link status */
 static int
 test_null_link_status(void)
 {
@@ -422,9 +412,7 @@ test_null_link_status(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: Device info
- */
+/* Test: Device info */
 static int
 test_null_dev_info(void)
 {
@@ -462,9 +450,7 @@ test_null_dev_info(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: Multiple RX/TX bursts
- */
+/* Test: Multiple RX/TX bursts */
 static int
 test_null_multiple_bursts(void)
 {
@@ -685,9 +671,7 @@ test_null_reta_config(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: Stats reset
- */
+/* Test: Stats reset */
 static int
 test_null_stats_reset(void)
 {
@@ -730,9 +714,7 @@ test_null_stats_reset(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Test: MAC address operations
- */
+/* Test: MAC address operations */
 static int
 test_null_mac_addr(void)
 {
@@ -772,9 +754,7 @@ test_null_promisc_allmulti(void)
 	return TEST_SUCCESS;
 }
 
-/*
- * Multi-threaded TX test structures and worker function
- */
+/* Multi-threaded TX test structures and worker function */
 #define MT_TX_BURSTS_PER_LCORE 100
 #define MT_TX_BURST_SIZE 8
 
@@ -794,9 +774,6 @@ mt_tx_worker(void *arg)
 	unsigned int burst;
 	uint16_t nb_tx;
 
-	args->tx_count = 0;
-	args->error = 0;
-
 	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
 		/* Allocate mbufs */
 		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
@@ -834,7 +811,7 @@ test_null_mt_tx(void)
 {
 	struct rte_eth_dev_info dev_info;
 	struct rte_mempool *mt_pool;
-	struct mt_tx_args worker_args[RTE_MAX_LCORE];
+	struct mt_tx_args worker_args[RTE_MAX_LCORE] = { 0 };
 	struct rte_eth_stats stats;
 	unsigned int lcore_id;
 	unsigned int num_workers = 0;
@@ -885,10 +862,7 @@ test_null_mt_tx(void)
 			break;
 
 		worker_args[num_workers].port = port_id;
-		worker_args[num_workers].queue = 0;  /* All use same queue */
 		worker_args[num_workers].pool = mt_pool;
-		worker_args[num_workers].tx_count = 0;
-		worker_args[num_workers].error = 0;
 
 		ret = rte_eal_remote_launch(mt_tx_worker,
 					    &worker_args[num_workers],
diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index 9f2f2ff58f..e59d839164 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -58,6 +58,8 @@ New Features
 * **Updated NULL ethernet driver.**
 
   * Changed info response to match mbuf limits.
+  * Added validation that mbuf pool data size is large enough for
+    packet size argument.
 
 
 Removed Items
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 3e743a23a9..81001d9326 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -241,6 +241,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 
 	packet_size = internals->packet_size;
 
+	if (rte_pktmbuf_data_room_size(mb_pool) < packet_size)
+		return -ENOSPC;
+
 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
 	dev->data->rx_queues[rx_queue_id] =
 		&internals->rx_null_queues[rx_queue_id];
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v7 5/5] net/null: check packet size argument
  2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
                     ` (3 preceding siblings ...)
  2026-01-25 20:23   ` [PATCH v7 4/5] net/null: add check for pool vs packet size Stephen Hemminger
@ 2026-01-25 20:23   ` Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-25 20:23 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Ferruh Yigit

The size argument to the PMD can not be larger than the largest
per-mbuf data segment size; otherwise the logic in eth_null_rx()
would generate an invalid mbuf.

Fixes: 4df90194f2a2 ("net/null: prefer unsigned int")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 doc/guides/rel_notes/release_26_03.rst | 2 ++
 drivers/net/null/rte_eth_null.c        | 9 ++++++---
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index e59d839164..04d2cfee8a 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -60,6 +60,8 @@ New Features
   * Changed info response to match mbuf limits.
   * Added validation that mbuf pool data size is large enough for
     packet size argument.
+  * Added check that packet size argument is not larger than largest
+    possible mbuf data segment.
 
 
 Removed Items
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 81001d9326..b5b83f1cf0 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -610,14 +610,17 @@ get_packet_size_arg(const char *key __rte_unused,
 {
 	const char *a = value;
 	unsigned int *packet_size = extra_args;
+	unsigned long sz;
 
 	if ((value == NULL) || (extra_args == NULL))
 		return -EINVAL;
 
-	*packet_size = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_size == UINT_MAX)
-		return -1;
+	errno = 0;
+	sz = strtoul(a, NULL, 0);
+	if (sz > UINT16_MAX || errno != 0)
+		return -EINVAL;
 
+	*packet_size = sz;
 	return 0;
 }
 
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v8 0/5] net/null: improvements and bug fixes
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (6 preceding siblings ...)
  2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
@ 2026-01-28 19:00 ` Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 1/5] net/null: cleanup info response Stephen Hemminger
                     ` (4 more replies)
  2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
                   ` (2 subsequent siblings)
  10 siblings, 5 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-28 19:00 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series makes several improvements to the null PMD driver and
adds comprehensive unit tests.

The series:
1) Cleans up the info_get callback by removing redundant argument
   checks and using proper constants (UINT32_MAX instead of cast).

2) Extends the packet burst generator to support larger packets
   by changing type from uint8_t to uint16_t, enabling better
   testing of drivers with various packet sizes.

3) Adds a comprehensive unit test for the null PMD covering RX/TX,
   statistics, custom packet sizes, copy mode, no-RX mode, link
   status, device info, RSS, RETA, and multi-threaded TX.

4) Adds validation that the mbuf pool passed to rx_queue_setup
   has sufficient data room for the configured packet size.
   This prevents generating invalid mbufs in copy mode.

5) Adds validation that the packet size devarg doesn't exceed
   UINT16_MAX, which is the maximum size for a single mbuf
   data segment.

Patches 4 and 5 are marked for stable backport as they fix
potential issues where invalid mbufs could be generated.

v8:
  - Rebased to latest main
  - Minor revision to avoid changes in later fixes

v7:
  - Added release notes to patch 1
  - Simplified test code by using single-line comments and struct
    initializers instead of explicit zeroing
  - Added two new bug fix patches (patches 4 and 5) that validate
    packet size against mbuf data room constraints

Stephen Hemminger (5):
  net/null: cleanup info response
  test: generate larger packet bursts
  test: add a test for null PMD
  net/null: add check for pool vs packet size
  net/null: check packet size argument

 app/test/meson.build                   |   1 +
 app/test/packet_burst_generator.c      |  34 +-
 app/test/packet_burst_generator.h      |   4 +-
 app/test/test_pmd_null.c               | 916 +++++++++++++++++++++++++
 doc/guides/rel_notes/release_26_03.rst |  10 +
 drivers/net/null/rte_eth_null.c        |  21 +++---
 6 files changed, 959 insertions(+), 27 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

Stephen Hemminger (5):
  net/null: cleanup info response
  test: generate larger packet bursts
  test: add a test for null PMD
  net/null: add check for pool vs packet size
  net/null: check packet size argument

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  34 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 936 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   |  20 +-
 5 files changed, 970 insertions(+), 25 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v8 1/5] net/null: cleanup info response
  2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
@ 2026-01-28 19:00   ` Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 2/5] test: generate larger packet bursts Stephen Hemminger
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-28 19:00 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The info_get callback doesn't need to check its args
since already done by ethdev.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..4496c093a7 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,14 +298,10 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
 	dev_info->min_rx_bufsize = 0;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v8 2/5] test: generate larger packet bursts
  2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 1/5] net/null: cleanup info response Stephen Hemminger
@ 2026-01-28 19:00   ` Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 3/5] test: add a test for null PMD Stephen Hemminger
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-28 19:00 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

The packet length in packet burst generator was uint8_t which
limited usefulness for testing larger packet sizes.

The number of packets segments per packet is currently limited
by mbuf nb_segs which is 16 bits. The comment is incorrect.

Change nb_pkt_per_burst to uint16_t since that is the limit
for tx_burst.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 34 ++++++++++++++++++-------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..346c69e6ce 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,24 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -300,13 +300,19 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v8 3/5] test: add a test for null PMD
  2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 1/5] net/null: cleanup info response Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 2/5] test: generate larger packet bursts Stephen Hemminger
@ 2026-01-28 19:00   ` Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 4/5] net/null: add check for pool vs packet size Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 5/5] net/null: check packet size argument Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-28 19:00 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 936 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 937 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index f4d04a6e42..a1a0915c81 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..05d2dd7e6e
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,936 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	int ret;
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic RX - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* RX should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+
+		if (rte_pktmbuf_append(m, len) == NULL) {
+			rte_pktmbuf_free_bulk(bufs, burst_size);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic TX - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for TX */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* TX should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Statistics verification */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform RX */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Allocate and perform TX */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Custom packet size */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Copy mode */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
+
+	/* Free RX mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: No-RX mode */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* RX in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* TX in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Link status */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Device info */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check TX offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Multiple RX/TX bursts */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple RX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple TX bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to TX %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Stats reset */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: MAC address operations */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Promiscuous and allmulticast modes */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/* Multi-threaded TX test structures and worker function */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (unsigned int i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded TX on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE] = { 0 };
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].pool = mt_pool;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT TX test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up TX counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total TX from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, NOHUGE_OK, ASAN_OK, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v8 4/5] net/null: add check for pool vs packet size
  2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
                     ` (2 preceding siblings ...)
  2026-01-28 19:00   ` [PATCH v8 3/5] test: add a test for null PMD Stephen Hemminger
@ 2026-01-28 19:00   ` Stephen Hemminger
  2026-01-28 19:00   ` [PATCH v8 5/5] net/null: check packet size argument Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-28 19:00 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Bernard Iremonger

The mbuf pool passed when setting up queue must have space
in the mbuf to hold the requested packet size; otherwise the
code in eth_null_copy_rx() would generate invalid mbuf.

Fixes: c743e50c475f ("null: new poll mode driver")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 4496c093a7..4b4d996686 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -241,6 +241,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 
 	packet_size = internals->packet_size;
 
+	if (rte_pktmbuf_data_room_size(mb_pool) < packet_size)
+		return -ENOSPC;
+
 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
 	dev->data->rx_queues[rx_queue_id] =
 		&internals->rx_null_queues[rx_queue_id];
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v8 5/5] net/null: check packet size argument
  2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
                     ` (3 preceding siblings ...)
  2026-01-28 19:00   ` [PATCH v8 4/5] net/null: add check for pool vs packet size Stephen Hemminger
@ 2026-01-28 19:00   ` Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-28 19:00 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Ferruh Yigit

The size argument to the PMD can not be larger than the largest
per-mbuf data segment size; otherwise the logic in eth_null_rx()
would generate an invalid mbuf.

Fixes: 4df90194f2a2 ("net/null: prefer unsigned int")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 4b4d996686..aee7754654 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -611,14 +611,17 @@ get_packet_size_arg(const char *key __rte_unused,
 {
 	const char *a = value;
 	unsigned int *packet_size = extra_args;
+	unsigned long sz;
 
 	if ((value == NULL) || (extra_args == NULL))
 		return -EINVAL;
 
-	*packet_size = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_size == UINT_MAX)
-		return -1;
+	errno = 0;
+	sz = strtoul(a, NULL, 0);
+	if (sz > UINT16_MAX || errno != 0)
+		return -EINVAL;
 
+	*packet_size = sz;
 	return 0;
 }
 
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v9 0/5] net/null: improvements and bug fixes
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (7 preceding siblings ...)
  2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
@ 2026-01-29 20:25 ` Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 1/5] net/null: cleanup info response Stephen Hemminger
                     ` (4 more replies)
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
  10 siblings, 5 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-29 20:25 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series makes several improvements to the null PMD driver and
adds comprehensive unit tests.

The series:
1) Cleans up the info_get callback by removing redundant argument
   checks and using proper constants. Also sets min_rx_bufsize to
   the configured packet_size, enabling the ethdev layer to reject
   undersized mbuf pools.

2) Adds proper validation for numeric devargs (packet_size,
   packet_copy, no_rx) using a common helper function that rejects
   empty strings, trailing non-numeric characters, and out-of-range
   values.

3) Removes redundant argument validation in dev_ops callbacks that
   is already performed by the ethdev layer.

4) Extends the packet burst generator to support larger packets
   by widening parameter types from uint8_t to uint16_t.

5) Adds a comprehensive unit test for the null PMD covering RX/TX,
   statistics, custom packet sizes, copy mode, no-RX mode, link
   status, device info, RSS, RETA, and multi-threaded TX.

Patches 1 and 2 are marked for stable backport as they fix issues
where invalid mbufs could be generated or invalid arguments silently
accepted.

v9:
  - Consolidated argument validation into a single patch with
    common helper function get_unsigned_arg()
  - Merged pool vs packet size check into the cleanup patch
  - Removed redundant argument validation in a separate patch
  - Reordered patches to put fixes before tests

v8:
  - Rebased to latest main
  - Minor revision to avoid changes in later fixes

v7:
  - Added release notes to patch 1
  - Simplified test code by using single-line comments and struct
    initializers instead of explicit zeroing
  - Added two new bug fix patches (patches 4 and 5) that validate
    packet size against mbuf data room constraints

Stephen Hemminger (5):
  net/null: cleanup info response
  net/null: validate the numeric devargs
  net/null: remove redundant argument validation
  test: support larger packet sizes in burst generator
  test: add a test for null PMD

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  34 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 935 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   | 113 ++--
 5 files changed, 997 insertions(+), 90 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v9 1/5] net/null: cleanup info response
  2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
@ 2026-01-29 20:25   ` Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 2/5] net/null: validate the numeric devargs Stephen Hemminger
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-29 20:25 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Bernard Iremonger

The info_get callback doesn't need to check its args
since already done by ethdev.

The null driver generates fake packets of configured packet_size.
The mbuf pool passed when setting up Rx queue must have space
to hold the requested packet size; otherwise eth_null_copy_rx()
would generate invalid mbufs.

Setting min_rx_bufsize to packet_size enables the existing
validation in rte_eth_rx_queue_setup() to reject undersized pools.

Fixes: c743e50c475f ("null: new poll mode driver")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..ba87c82a73 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -298,17 +298,13 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
+	dev_info->min_rx_bufsize = internals->packet_size;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v9 2/5] net/null: validate the numeric devargs
  2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 1/5] net/null: cleanup info response Stephen Hemminger
@ 2026-01-29 20:25   ` Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 3/5] net/null: remove redundant argument validation Stephen Hemminger
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-29 20:25 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Ferruh Yigit

The driver was not correctly validating the arguments for packet_size,
packet_copy, and no_rx. The original parsing had several issues:
- Empty strings were not rejected
- Trailing non-numeric characters were silently ignored
- Large values could wrap around causing unexpected behavior

Add a common helper function get_unsigned_arg() that properly validates
numeric arguments by checking for empty input, ensuring the entire
string is consumed, and enforcing appropriate maximum values for each
parameter.

Fixes: 4df90194f2a2 ("net/null: prefer unsigned int")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 59 +++++++++++++++------------------
 1 file changed, 26 insertions(+), 33 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index ba87c82a73..cd7a29bce9 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -602,56 +602,49 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	return 0;
 }
 
-static inline int
-get_packet_size_arg(const char *key __rte_unused,
-		const char *value, void *extra_args)
+static int
+get_unsigned_arg(const char *str, unsigned int *retval,
+		 unsigned int maxval)
 {
-	const char *a = value;
-	unsigned int *packet_size = extra_args;
+	char *endp = NULL;
+	unsigned long val;
 
-	if ((value == NULL) || (extra_args == NULL))
+	if (str == NULL || retval == NULL)
 		return -EINVAL;
 
-	*packet_size = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_size == UINT_MAX)
-		return -1;
+	if (*str == '\0')
+		return -EINVAL; /* empty string */
 
+	val = strtoul(str, &endp, 0);
+	if (*endp != '\0')
+		return -EINVAL; /* non-numeric character */
+
+	if (val > maxval)
+		return -ERANGE;
+
+	*retval = val;
 	return 0;
 }
 
-static inline int
-get_packet_copy_arg(const char *key __rte_unused,
+static int
+get_packet_size_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
-	const char *a = value;
-	unsigned int *packet_copy = extra_args;
-
-	if ((value == NULL) || (extra_args == NULL))
-		return -EINVAL;
-
-	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_copy == UINT_MAX)
-		return -1;
+	return get_unsigned_arg(value, extra_args, UINT16_MAX);
+}
 
-	return 0;
+static int
+get_packet_copy_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	return get_unsigned_arg(value, extra_args, UINT32_MAX);
 }
 
 static int
 get_packet_no_rx_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
-	const char *a = value;
-	unsigned int no_rx;
-
-	if (value == NULL || extra_args == NULL)
-		return -EINVAL;
-
-	no_rx = (unsigned int)strtoul(a, NULL, 0);
-	if (no_rx != 0 && no_rx != 1)
-		return -1;
-
-	*(unsigned int *)extra_args = no_rx;
-	return 0;
+	return get_unsigned_arg(value, extra_args, 1);
 }
 
 static int
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v9 3/5] net/null: remove redundant argument validation
  2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 1/5] net/null: cleanup info response Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 2/5] net/null: validate the numeric devargs Stephen Hemminger
@ 2026-01-29 20:25   ` Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 4/5] test: support larger packet sizes in burst generator Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 5/5] test: add a test for null PMD Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-29 20:25 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The dev_ops callbacks for stop, rx_queue_setup, and tx_queue_setup
are only called from the ethdev layer which already validates the
device pointer and queue index arguments.

Remove the redundant checks for:
 - dev == NULL in eth_dev_stop, eth_rx_queue_setup, eth_tx_queue_setup
 - queue_id >= nb_queues in eth_rx_queue_setup, eth_tx_queue_setup

Also consolidate variable declarations with their initializations
and remove unnecessary NULL initialization in rte_pmd_null_remove.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 44 ++++++++-------------------------
 1 file changed, 10 insertions(+), 34 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index cd7a29bce9..d5c419ec15 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -207,9 +207,6 @@ eth_dev_stop(struct rte_eth_dev *dev)
 {
 	uint16_t i;
 
-	if (dev == NULL)
-		return 0;
-
 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -227,25 +224,16 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		const struct rte_eth_rxconf *rx_conf __rte_unused,
 		struct rte_mempool *mb_pool)
 {
+	struct pmd_internals *internals = dev->data->dev_private;
+	unsigned int packet_size = internals->packet_size;
 	struct rte_mbuf *dummy_packet;
-	struct pmd_internals *internals;
-	unsigned int packet_size;
 
-	if ((dev == NULL) || (mb_pool == NULL))
+	if (mb_pool == NULL)
 		return -EINVAL;
 
-	internals = dev->data->dev_private;
-
-	if (rx_queue_id >= dev->data->nb_rx_queues)
-		return -ENODEV;
-
-	packet_size = internals->packet_size;
-
 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
-	dev->data->rx_queues[rx_queue_id] =
-		&internals->rx_null_queues[rx_queue_id];
-	dummy_packet = rte_zmalloc_socket(NULL,
-			packet_size, 0, dev->data->numa_node);
+	dev->data->rx_queues[rx_queue_id] = &internals->rx_null_queues[rx_queue_id];
+	dummy_packet = rte_zmalloc_socket(NULL, packet_size, 0, dev->data->numa_node);
 	if (dummy_packet == NULL)
 		return -ENOMEM;
 
@@ -261,24 +249,12 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		unsigned int socket_id __rte_unused,
 		const struct rte_eth_txconf *tx_conf __rte_unused)
 {
+	struct pmd_internals *internals = dev->data->dev_private;
+	unsigned int packet_size = internals->packet_size;
 	struct rte_mbuf *dummy_packet;
-	struct pmd_internals *internals;
-	unsigned int packet_size;
-
-	if (dev == NULL)
-		return -EINVAL;
 
-	internals = dev->data->dev_private;
-
-	if (tx_queue_id >= dev->data->nb_tx_queues)
-		return -ENODEV;
-
-	packet_size = internals->packet_size;
-
-	dev->data->tx_queues[tx_queue_id] =
-		&internals->tx_null_queues[tx_queue_id];
-	dummy_packet = rte_zmalloc_socket(NULL,
-			packet_size, 0, dev->data->numa_node);
+	dev->data->tx_queues[tx_queue_id] = &internals->tx_null_queues[tx_queue_id];
+	dummy_packet = rte_zmalloc_socket(NULL, packet_size, 0, dev->data->numa_node);
 	if (dummy_packet == NULL)
 		return -ENOMEM;
 
@@ -739,7 +715,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
 static int
 rte_pmd_null_remove(struct rte_vdev_device *dev)
 {
-	struct rte_eth_dev *eth_dev = NULL;
+	struct rte_eth_dev *eth_dev;
 
 	if (!dev)
 		return -EINVAL;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v9 4/5] test: support larger packet sizes in burst generator
  2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
                     ` (2 preceding siblings ...)
  2026-01-29 20:25   ` [PATCH v9 3/5] net/null: remove redundant argument validation Stephen Hemminger
@ 2026-01-29 20:25   ` Stephen Hemminger
  2026-01-29 20:25   ` [PATCH v9 5/5] test: add a test for null PMD Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-29 20:25 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Widen the parameter types in the packet burst generator functions
to support larger values:
- pkt_len: uint8_t to uint16_t for larger packet sizes
- nb_pkt_segs: uint8_t to uint16_t to match mbuf nb_segs
- nb_pkt_per_burst: int to uint16_t to match tx_burst limit

Remove the incorrect RTE_MAX_SEGS_PER_PKT macro that claimed
nb_segs is 8-bit when it is actually 16-bit.

Add validation to prevent division by zero when nb_pkt_segs is 0.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 34 ++++++++++++++++++-------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..346c69e6ce 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,24 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -300,13 +300,19 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v9 5/5] test: add a test for null PMD
  2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
                     ` (3 preceding siblings ...)
  2026-01-29 20:25   ` [PATCH v9 4/5] test: support larger packet sizes in burst generator Stephen Hemminger
@ 2026-01-29 20:25   ` Stephen Hemminger
  4 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-01-29 20:25 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 935 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 936 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index f4d04a6e42..a1a0915c81 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..bd520af64f
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,935 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	int ret;
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup Rx queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup Tx queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic Rx - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* Rx should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+
+		if (rte_pktmbuf_append(m, len) == NULL) {
+			rte_pktmbuf_free_bulk(bufs, burst_size);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic Tx - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for Tx */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* Tx should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to Tx %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Statistics verification */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform Rx */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+
+	/* Allocate and perform Tx */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after Rx/Tx");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Custom packet size */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Copy mode */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst in copy mode failed");
+
+	/* Free Rx mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: No-Rx mode */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* Tx in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Link status */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Device info */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check Tx offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Multiple Rx/Tx bursts */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple Rx bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple Tx bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to Tx %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Stats reset */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: MAC address operations */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Promiscuous and allmulticast modes */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/* Multi-threaded Tx test structures and worker function */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8u
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (uint16_t i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded Tx on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE] = { 0 };
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].pool = mt_pool;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT Tx test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up Tx counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total Tx from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, NOHUGE_OK, ASAN_OK, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v10 0/6] net/null: bug fixes and improvements
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (8 preceding siblings ...)
  2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
@ 2026-02-01 17:17 ` Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 1/6] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
                     ` (5 more replies)
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
  10 siblings, 6 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-01 17:17 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series makes several improvements to the null PMD driver and
adds comprehensive unit tests.

The series:
1) Cleans up the info_get callback by removing redundant argument
   checks and using proper constants. Also sets min_rx_bufsize to
   the configured packet_size, enabling the ethdev layer to reject
   undersized mbuf pools.

2) Adds proper validation for numeric devargs (packet_size,
   packet_copy, no_rx) using a common helper function that rejects
   empty strings, trailing non-numeric characters, and out-of-range
   values.

3) Removes redundant argument validation in dev_ops callbacks that
   is already performed by the ethdev layer.

4) Extends the packet burst generator to support larger packets
   by widening parameter types from uint8_t to uint16_t.

5) Adds a comprehensive unit test for the null PMD covering RX/TX,
   statistics, custom packet sizes, copy mode, no-RX mode, link
   status, device info, RSS, RETA, and multi-threaded TX.

Patches 1, 2, and 3 are marked for stable backport as they fix issues
where invalid mbufs could be generated or invalid arguments silently
accepted.

v10:
  - Fix mbuf leak in eth_null_copy_tx

v9:
  - Consolidated argument validation into a single patch with
    common helper function get_unsigned_arg()
  - Merged pool vs packet size check into the cleanup patch
  - Removed redundant argument validation in a separate patch
  - Reordered patches to put fixes before tests

Stephen Hemminger (6):
  net/null: fix missing mbuf leakage in the copy transmit
  net/null: cleanup info response
  net/null: validate the numeric devargs
  net/null: remove redundant argument validation
  test: support larger packet sizes in burst generator
  test: add a test for null PMD

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  34 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 935 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   | 114 ++--
 5 files changed, 998 insertions(+), 90 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v10 1/6] net/null: fix missing mbuf leakage in the copy transmit
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
@ 2026-02-01 17:17   ` Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 2/6] net/null: cleanup info response Stephen Hemminger
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-01 17:17 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa

The transmit variant that copies the data into the dummy packet
is missing calling free. Looks like a regression got introduced
when optimizing the transmit logic to do bulk free.

Fixes: 9946ba548bf8 ("net/null: optimize Tx")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..0931dac12e 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -172,6 +172,7 @@ eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(m, void *), len);
 		bytes += m->pkt_len;
 	}
+	rte_pktmbuf_free_bulk(bufs, nb_bufs);
 
 	rte_atomic_fetch_add_explicit(&h->tx_pkts, nb_bufs, rte_memory_order_relaxed);
 	rte_atomic_fetch_add_explicit(&h->tx_bytes, bytes, rte_memory_order_relaxed);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v10 2/6] net/null: cleanup info response
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 1/6] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
@ 2026-02-01 17:17   ` Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 3/6] net/null: validate the numeric devargs Stephen Hemminger
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-01 17:17 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Bernard Iremonger

The info_get callback doesn't need to check its args
since already done by ethdev.

The null driver generates fake packets of configured packet_size.
The mbuf pool passed when setting up Rx queue must have space
to hold the requested packet size; otherwise eth_null_copy_rx()
would generate invalid mbufs.

Setting min_rx_bufsize to packet_size enables the existing
validation in rte_eth_rx_queue_setup() to reject undersized pools.

Fixes: c743e50c475f ("null: new poll mode driver")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 0931dac12e..f2ffdc2b2b 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -299,17 +299,13 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
+	dev_info->min_rx_bufsize = internals->packet_size;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v10 3/6] net/null: validate the numeric devargs
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 1/6] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 2/6] net/null: cleanup info response Stephen Hemminger
@ 2026-02-01 17:17   ` Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 4/6] net/null: remove redundant argument validation Stephen Hemminger
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-01 17:17 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Ferruh Yigit

The driver was not correctly validating the arguments for packet_size,
packet_copy, and no_rx. The original parsing had several issues:
- Empty strings were not rejected
- Trailing non-numeric characters were silently ignored
- Large values could wrap around causing unexpected behavior

Add a common helper function get_unsigned_arg() that properly validates
numeric arguments by checking for empty input, ensuring the entire
string is consumed, and enforcing appropriate maximum values for each
parameter.

Fixes: 4df90194f2a2 ("net/null: prefer unsigned int")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 59 +++++++++++++++------------------
 1 file changed, 26 insertions(+), 33 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index f2ffdc2b2b..0b6c9577cd 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -603,56 +603,49 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	return 0;
 }
 
-static inline int
-get_packet_size_arg(const char *key __rte_unused,
-		const char *value, void *extra_args)
+static int
+get_unsigned_arg(const char *str, unsigned int *retval,
+		 unsigned int maxval)
 {
-	const char *a = value;
-	unsigned int *packet_size = extra_args;
+	char *endp = NULL;
+	unsigned long val;
 
-	if ((value == NULL) || (extra_args == NULL))
+	if (str == NULL || retval == NULL)
 		return -EINVAL;
 
-	*packet_size = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_size == UINT_MAX)
-		return -1;
+	if (*str == '\0')
+		return -EINVAL; /* empty string */
 
+	val = strtoul(str, &endp, 0);
+	if (*endp != '\0')
+		return -EINVAL; /* non-numeric character */
+
+	if (val > maxval)
+		return -ERANGE;
+
+	*retval = val;
 	return 0;
 }
 
-static inline int
-get_packet_copy_arg(const char *key __rte_unused,
+static int
+get_packet_size_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
-	const char *a = value;
-	unsigned int *packet_copy = extra_args;
-
-	if ((value == NULL) || (extra_args == NULL))
-		return -EINVAL;
-
-	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_copy == UINT_MAX)
-		return -1;
+	return get_unsigned_arg(value, extra_args, UINT16_MAX);
+}
 
-	return 0;
+static int
+get_packet_copy_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	return get_unsigned_arg(value, extra_args, UINT32_MAX);
 }
 
 static int
 get_packet_no_rx_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
-	const char *a = value;
-	unsigned int no_rx;
-
-	if (value == NULL || extra_args == NULL)
-		return -EINVAL;
-
-	no_rx = (unsigned int)strtoul(a, NULL, 0);
-	if (no_rx != 0 && no_rx != 1)
-		return -1;
-
-	*(unsigned int *)extra_args = no_rx;
-	return 0;
+	return get_unsigned_arg(value, extra_args, 1);
 }
 
 static int
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v10 4/6] net/null: remove redundant argument validation
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
                     ` (2 preceding siblings ...)
  2026-02-01 17:17   ` [PATCH v10 3/6] net/null: validate the numeric devargs Stephen Hemminger
@ 2026-02-01 17:17   ` Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 5/6] test: support larger packet sizes in burst generator Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 6/6] test: add a test for null PMD Stephen Hemminger
  5 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-01 17:17 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The dev_ops callbacks for stop, rx_queue_setup, and tx_queue_setup
are only called from the ethdev layer which already validates the
device pointer and queue index arguments.

Remove the redundant checks for:
 - dev == NULL in eth_dev_stop, eth_rx_queue_setup, eth_tx_queue_setup
 - queue_id >= nb_queues in eth_rx_queue_setup, eth_tx_queue_setup

Also consolidate variable declarations with their initializations
and remove unnecessary NULL initialization in rte_pmd_null_remove.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 44 ++++++++-------------------------
 1 file changed, 10 insertions(+), 34 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 0b6c9577cd..672459fe05 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -208,9 +208,6 @@ eth_dev_stop(struct rte_eth_dev *dev)
 {
 	uint16_t i;
 
-	if (dev == NULL)
-		return 0;
-
 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -228,25 +225,16 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		const struct rte_eth_rxconf *rx_conf __rte_unused,
 		struct rte_mempool *mb_pool)
 {
+	struct pmd_internals *internals = dev->data->dev_private;
+	unsigned int packet_size = internals->packet_size;
 	struct rte_mbuf *dummy_packet;
-	struct pmd_internals *internals;
-	unsigned int packet_size;
 
-	if ((dev == NULL) || (mb_pool == NULL))
+	if (mb_pool == NULL)
 		return -EINVAL;
 
-	internals = dev->data->dev_private;
-
-	if (rx_queue_id >= dev->data->nb_rx_queues)
-		return -ENODEV;
-
-	packet_size = internals->packet_size;
-
 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
-	dev->data->rx_queues[rx_queue_id] =
-		&internals->rx_null_queues[rx_queue_id];
-	dummy_packet = rte_zmalloc_socket(NULL,
-			packet_size, 0, dev->data->numa_node);
+	dev->data->rx_queues[rx_queue_id] = &internals->rx_null_queues[rx_queue_id];
+	dummy_packet = rte_zmalloc_socket(NULL, packet_size, 0, dev->data->numa_node);
 	if (dummy_packet == NULL)
 		return -ENOMEM;
 
@@ -262,24 +250,12 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		unsigned int socket_id __rte_unused,
 		const struct rte_eth_txconf *tx_conf __rte_unused)
 {
+	struct pmd_internals *internals = dev->data->dev_private;
+	unsigned int packet_size = internals->packet_size;
 	struct rte_mbuf *dummy_packet;
-	struct pmd_internals *internals;
-	unsigned int packet_size;
-
-	if (dev == NULL)
-		return -EINVAL;
 
-	internals = dev->data->dev_private;
-
-	if (tx_queue_id >= dev->data->nb_tx_queues)
-		return -ENODEV;
-
-	packet_size = internals->packet_size;
-
-	dev->data->tx_queues[tx_queue_id] =
-		&internals->tx_null_queues[tx_queue_id];
-	dummy_packet = rte_zmalloc_socket(NULL,
-			packet_size, 0, dev->data->numa_node);
+	dev->data->tx_queues[tx_queue_id] = &internals->tx_null_queues[tx_queue_id];
+	dummy_packet = rte_zmalloc_socket(NULL, packet_size, 0, dev->data->numa_node);
 	if (dummy_packet == NULL)
 		return -ENOMEM;
 
@@ -740,7 +716,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
 static int
 rte_pmd_null_remove(struct rte_vdev_device *dev)
 {
-	struct rte_eth_dev *eth_dev = NULL;
+	struct rte_eth_dev *eth_dev;
 
 	if (!dev)
 		return -EINVAL;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v10 5/6] test: support larger packet sizes in burst generator
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
                     ` (3 preceding siblings ...)
  2026-02-01 17:17   ` [PATCH v10 4/6] net/null: remove redundant argument validation Stephen Hemminger
@ 2026-02-01 17:17   ` Stephen Hemminger
  2026-02-01 17:17   ` [PATCH v10 6/6] test: add a test for null PMD Stephen Hemminger
  5 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-01 17:17 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Widen the parameter types in the packet burst generator functions
to support larger values:
- pkt_len: uint8_t to uint16_t for larger packet sizes
- nb_pkt_segs: uint8_t to uint16_t to match mbuf nb_segs
- nb_pkt_per_burst: int to uint16_t to match tx_burst limit

Remove the incorrect RTE_MAX_SEGS_PER_PKT macro that claimed
nb_segs is 8-bit when it is actually 16-bit.

Add validation to prevent division by zero when nb_pkt_segs is 0.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 34 ++++++++++++++++++-------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..346c69e6ce 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,24 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -300,13 +300,19 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v10 6/6] test: add a test for null PMD
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
                     ` (4 preceding siblings ...)
  2026-02-01 17:17   ` [PATCH v10 5/6] test: support larger packet sizes in burst generator Stephen Hemminger
@ 2026-02-01 17:17   ` Stephen Hemminger
  5 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-01 17:17 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 935 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 936 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index f4d04a6e42..a1a0915c81 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..bd520af64f
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,935 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	int ret;
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup Rx queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup Tx queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic Rx - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* Rx should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+
+		if (rte_pktmbuf_append(m, len) == NULL) {
+			rte_pktmbuf_free_bulk(bufs, burst_size);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic Tx - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for Tx */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* Tx should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to Tx %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Statistics verification */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform Rx */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+
+	/* Allocate and perform Tx */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after Rx/Tx");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Custom packet size */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Copy mode */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst in copy mode failed");
+
+	/* Free Rx mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: No-Rx mode */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* Tx in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Link status */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Device info */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check Tx offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Multiple Rx/Tx bursts */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple Rx bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple Tx bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to Tx %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Stats reset */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: MAC address operations */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Promiscuous and allmulticast modes */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/* Multi-threaded Tx test structures and worker function */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8u
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (uint16_t i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded Tx on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE] = { 0 };
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].pool = mt_pool;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT Tx test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up Tx counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total Tx from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, NOHUGE_OK, ASAN_OK, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v11 0/7] net/null: bug fixes and improvements
  2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
                   ` (9 preceding siblings ...)
  2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
@ 2026-02-02 22:16 ` Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 1/7] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
                     ` (6 more replies)
  10 siblings, 7 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

This series makes several improvements to the null PMD driver and
adds comprehensive unit tests.

The series:
1) Cleans up the info_get callback by removing redundant argument
   checks and using proper constants. Also sets min_rx_bufsize to
   the configured packet_size, enabling the ethdev layer to reject
   undersized mbuf pools.

2) Adds proper validation for numeric devargs (packet_size,
   packet_copy, no_rx) using a common helper function that rejects
   empty strings, trailing non-numeric characters, and out-of-range
   values.

3) Removes redundant argument validation in dev_ops callbacks that
   is already performed by the ethdev layer.

4) Adds VLAN insert and strip offload support. The driver advertises
   RX VLAN strip and TX VLAN insert capabilities. When VLAN strip is
   enabled, received mbufs are marked with the appropriate flags and
   a dummy vlan_tci. The vlan_offload_set dev_op is implemented for
   runtime toggling via rte_eth_dev_set_vlan_offload().

5) Extends the packet burst generator to support larger packets
   by widening parameter types from uint8_t to uint16_t.

6) Adds a comprehensive unit test for the null PMD covering RX/TX,
   statistics, custom packet sizes, copy mode, no-RX mode, link
   status, device info, RSS, RETA, VLAN strip, VLAN insert, and
   multi-threaded TX.

Patches 1, 2, and 3 are marked for stable backport as they fix issues
where invalid mbufs could be generated or invalid arguments silently
accepted.

v11:
  - Add VLAN insert and strip offload support to the driver
  - Rebase to main

v10:
  - Fix mbuf leak in eth_null_copy_tx

v9:
  - Consolidated argument validation into a single patch with
    common helper function get_unsigned_arg()
  - Merged pool vs packet size check into the cleanup patch
  - Removed redundant argument validation in a separate patch
  - Reordered patches to put fixes before tests

Stephen Hemminger (7):
  net/null: fix missing mbuf leakage in the copy transmit
  net/null: cleanup info response
  net/null: validate the numeric devargs
  net/null: remove redundant argument validation
  test: support larger packet sizes in burst generator
  test: add a test for null PMD
  net/null: add VLAN insert and strip offload support

 app/test/meson.build              |   1 +
 app/test/packet_burst_generator.c |  34 +-
 app/test/packet_burst_generator.h |   4 +-
 app/test/test_pmd_null.c          | 947 ++++++++++++++++++++++++++++++
 drivers/net/null/rte_eth_null.c   | 156 ++---
 5 files changed, 1050 insertions(+), 92 deletions(-)
 create mode 100644 app/test/test_pmd_null.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 61+ messages in thread

* [PATCH v11 1/7] net/null: fix missing mbuf leakage in the copy transmit
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
@ 2026-02-02 22:16   ` Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 2/7] net/null: cleanup info response Stephen Hemminger
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa

The transmit variant that copies the data into the dummy packet
is missing calling free. Looks like a regression got introduced
when optimizing the transmit logic to do bulk free.

Fixes: 9946ba548bf8 ("net/null: optimize Tx")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..0931dac12e 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -172,6 +172,7 @@ eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(m, void *), len);
 		bytes += m->pkt_len;
 	}
+	rte_pktmbuf_free_bulk(bufs, nb_bufs);
 
 	rte_atomic_fetch_add_explicit(&h->tx_pkts, nb_bufs, rte_memory_order_relaxed);
 	rte_atomic_fetch_add_explicit(&h->tx_bytes, bytes, rte_memory_order_relaxed);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v11 2/7] net/null: cleanup info response
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 1/7] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
@ 2026-02-02 22:16   ` Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 3/7] net/null: validate the numeric devargs Stephen Hemminger
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Bernard Iremonger

The info_get callback doesn't need to check its args
since already done by ethdev.

The null driver generates fake packets of configured packet_size.
The mbuf pool passed when setting up Rx queue must have space
to hold the requested packet size; otherwise eth_null_copy_rx()
would generate invalid mbufs.

Setting min_rx_bufsize to packet_size enables the existing
validation in rte_eth_rx_queue_setup() to reject undersized pools.

Fixes: c743e50c475f ("null: new poll mode driver")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 0931dac12e..f2ffdc2b2b 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -299,17 +299,13 @@ static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals;
-
-	if ((dev == NULL) || (dev_info == NULL))
-		return -EINVAL;
+	struct pmd_internals *internals = dev->data->dev_private;
 
-	internals = dev->data->dev_private;
 	dev_info->max_mac_addrs = 1;
-	dev_info->max_rx_pktlen = (uint32_t)-1;
+	dev_info->max_rx_pktlen = UINT32_MAX;
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
-	dev_info->min_rx_bufsize = 0;
+	dev_info->min_rx_bufsize = internals->packet_size;
 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
 
 	dev_info->reta_size = internals->reta_size;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v11 3/7] net/null: validate the numeric devargs
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 1/7] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 2/7] net/null: cleanup info response Stephen Hemminger
@ 2026-02-02 22:16   ` Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 4/7] net/null: remove redundant argument validation Stephen Hemminger
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, stable, Tetsuya Mukawa, Ferruh Yigit

The driver was not correctly validating the arguments for packet_size,
packet_copy, and no_rx. The original parsing had several issues:
- Empty strings were not rejected
- Trailing non-numeric characters were silently ignored
- Large values could wrap around causing unexpected behavior

Add a common helper function get_unsigned_arg() that properly validates
numeric arguments by checking for empty input, ensuring the entire
string is consumed, and enforcing appropriate maximum values for each
parameter.

Fixes: 4df90194f2a2 ("net/null: prefer unsigned int")
Cc: stable@dpdk.org

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 59 +++++++++++++++------------------
 1 file changed, 26 insertions(+), 33 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index f2ffdc2b2b..0b6c9577cd 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -603,56 +603,49 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
 	return 0;
 }
 
-static inline int
-get_packet_size_arg(const char *key __rte_unused,
-		const char *value, void *extra_args)
+static int
+get_unsigned_arg(const char *str, unsigned int *retval,
+		 unsigned int maxval)
 {
-	const char *a = value;
-	unsigned int *packet_size = extra_args;
+	char *endp = NULL;
+	unsigned long val;
 
-	if ((value == NULL) || (extra_args == NULL))
+	if (str == NULL || retval == NULL)
 		return -EINVAL;
 
-	*packet_size = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_size == UINT_MAX)
-		return -1;
+	if (*str == '\0')
+		return -EINVAL; /* empty string */
 
+	val = strtoul(str, &endp, 0);
+	if (*endp != '\0')
+		return -EINVAL; /* non-numeric character */
+
+	if (val > maxval)
+		return -ERANGE;
+
+	*retval = val;
 	return 0;
 }
 
-static inline int
-get_packet_copy_arg(const char *key __rte_unused,
+static int
+get_packet_size_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
-	const char *a = value;
-	unsigned int *packet_copy = extra_args;
-
-	if ((value == NULL) || (extra_args == NULL))
-		return -EINVAL;
-
-	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
-	if (*packet_copy == UINT_MAX)
-		return -1;
+	return get_unsigned_arg(value, extra_args, UINT16_MAX);
+}
 
-	return 0;
+static int
+get_packet_copy_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	return get_unsigned_arg(value, extra_args, UINT32_MAX);
 }
 
 static int
 get_packet_no_rx_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
-	const char *a = value;
-	unsigned int no_rx;
-
-	if (value == NULL || extra_args == NULL)
-		return -EINVAL;
-
-	no_rx = (unsigned int)strtoul(a, NULL, 0);
-	if (no_rx != 0 && no_rx != 1)
-		return -1;
-
-	*(unsigned int *)extra_args = no_rx;
-	return 0;
+	return get_unsigned_arg(value, extra_args, 1);
 }
 
 static int
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v11 4/7] net/null: remove redundant argument validation
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
                     ` (2 preceding siblings ...)
  2026-02-02 22:16   ` [PATCH v11 3/7] net/null: validate the numeric devargs Stephen Hemminger
@ 2026-02-02 22:16   ` Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 5/7] test: support larger packet sizes in burst generator Stephen Hemminger
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

The dev_ops callbacks for stop, rx_queue_setup, and tx_queue_setup
are only called from the ethdev layer which already validates the
device pointer and queue index arguments.

Remove the redundant checks for:
 - dev == NULL in eth_dev_stop, eth_rx_queue_setup, eth_tx_queue_setup
 - queue_id >= nb_queues in eth_rx_queue_setup, eth_tx_queue_setup

Also consolidate variable declarations with their initializations
and remove unnecessary NULL initialization in rte_pmd_null_remove.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 44 ++++++++-------------------------
 1 file changed, 10 insertions(+), 34 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 0b6c9577cd..672459fe05 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -208,9 +208,6 @@ eth_dev_stop(struct rte_eth_dev *dev)
 {
 	uint16_t i;
 
-	if (dev == NULL)
-		return 0;
-
 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -228,25 +225,16 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		const struct rte_eth_rxconf *rx_conf __rte_unused,
 		struct rte_mempool *mb_pool)
 {
+	struct pmd_internals *internals = dev->data->dev_private;
+	unsigned int packet_size = internals->packet_size;
 	struct rte_mbuf *dummy_packet;
-	struct pmd_internals *internals;
-	unsigned int packet_size;
 
-	if ((dev == NULL) || (mb_pool == NULL))
+	if (mb_pool == NULL)
 		return -EINVAL;
 
-	internals = dev->data->dev_private;
-
-	if (rx_queue_id >= dev->data->nb_rx_queues)
-		return -ENODEV;
-
-	packet_size = internals->packet_size;
-
 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
-	dev->data->rx_queues[rx_queue_id] =
-		&internals->rx_null_queues[rx_queue_id];
-	dummy_packet = rte_zmalloc_socket(NULL,
-			packet_size, 0, dev->data->numa_node);
+	dev->data->rx_queues[rx_queue_id] = &internals->rx_null_queues[rx_queue_id];
+	dummy_packet = rte_zmalloc_socket(NULL, packet_size, 0, dev->data->numa_node);
 	if (dummy_packet == NULL)
 		return -ENOMEM;
 
@@ -262,24 +250,12 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		unsigned int socket_id __rte_unused,
 		const struct rte_eth_txconf *tx_conf __rte_unused)
 {
+	struct pmd_internals *internals = dev->data->dev_private;
+	unsigned int packet_size = internals->packet_size;
 	struct rte_mbuf *dummy_packet;
-	struct pmd_internals *internals;
-	unsigned int packet_size;
-
-	if (dev == NULL)
-		return -EINVAL;
 
-	internals = dev->data->dev_private;
-
-	if (tx_queue_id >= dev->data->nb_tx_queues)
-		return -ENODEV;
-
-	packet_size = internals->packet_size;
-
-	dev->data->tx_queues[tx_queue_id] =
-		&internals->tx_null_queues[tx_queue_id];
-	dummy_packet = rte_zmalloc_socket(NULL,
-			packet_size, 0, dev->data->numa_node);
+	dev->data->tx_queues[tx_queue_id] = &internals->tx_null_queues[tx_queue_id];
+	dummy_packet = rte_zmalloc_socket(NULL, packet_size, 0, dev->data->numa_node);
 	if (dummy_packet == NULL)
 		return -ENOMEM;
 
@@ -740,7 +716,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
 static int
 rte_pmd_null_remove(struct rte_vdev_device *dev)
 {
-	struct rte_eth_dev *eth_dev = NULL;
+	struct rte_eth_dev *eth_dev;
 
 	if (!dev)
 		return -EINVAL;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v11 5/7] test: support larger packet sizes in burst generator
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
                     ` (3 preceding siblings ...)
  2026-02-02 22:16   ` [PATCH v11 4/7] net/null: remove redundant argument validation Stephen Hemminger
@ 2026-02-02 22:16   ` Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 6/7] test: add a test for null PMD Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 7/7] net/null: add VLAN insert and strip offload support Stephen Hemminger
  6 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Widen the parameter types in the packet burst generator functions
to support larger values:
- pkt_len: uint8_t to uint16_t for larger packet sizes
- nb_pkt_segs: uint8_t to uint16_t to match mbuf nb_segs
- nb_pkt_per_burst: int to uint16_t to match tx_burst limit

Remove the incorrect RTE_MAX_SEGS_PER_PKT macro that claimed
nb_segs is 8-bit when it is actually 16-bit.

Add validation to prevent division by zero when nb_pkt_segs is 0.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/packet_burst_generator.c | 34 ++++++++++++++++++-------------
 app/test/packet_burst_generator.h |  4 ++--
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 4c17737739..346c69e6ce 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -205,24 +205,24 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
 	return pkt_len;
 }
 
-/*
- * The maximum number of segments per packet is used when creating
- * scattered transmit packets composed of a list of mbufs.
- */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
-
 
 int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -300,13 +300,19 @@ generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs)
 {
-	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
+	int i, nb_pkt = 0;
+	size_t eth_hdr_size;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
-	size_t eth_hdr_size;
-	int i, nb_pkt = 0;
+	uint16_t pkt_seg_data_len;
+
+	if (nb_pkt_segs == 0)
+		return -1;
+
+	/* Calculate per-segment data length */
+	pkt_seg_data_len = pkt_len / nb_pkt_segs;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h
index cce41bcd0f..bc16764e77 100644
--- a/app/test/packet_burst_generator.h
+++ b/app/test/packet_burst_generator.h
@@ -62,14 +62,14 @@ int
 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
 		struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 int
 generate_packet_burst_proto(struct rte_mempool *mp,
 		struct rte_mbuf **pkts_burst, struct rte_ether_hdr *eth_hdr,
 		uint8_t vlan_enabled, void *ip_hdr,
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
-		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+		uint16_t nb_pkt_per_burst, uint16_t pkt_len, uint16_t nb_pkt_segs);
 
 #ifdef __cplusplus
 }
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v11 6/7] test: add a test for null PMD
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
                     ` (4 preceding siblings ...)
  2026-02-02 22:16   ` [PATCH v11 5/7] test: support larger packet sizes in burst generator Stephen Hemminger
@ 2026-02-02 22:16   ` Stephen Hemminger
  2026-02-02 22:16   ` [PATCH v11 7/7] net/null: add VLAN insert and strip offload support Stephen Hemminger
  6 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger

Add a test for null PMD including different packet sizes.

This test was generated with Claude AI based off of existing
test_pmd_ring.c with some cleanup afterwards.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/meson.build     |   1 +
 app/test/test_pmd_null.c | 935 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 936 insertions(+)
 create mode 100644 app/test/test_pmd_null.c

diff --git a/app/test/meson.build b/app/test/meson.build
index f4d04a6e42..a1a0915c81 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -141,6 +141,7 @@ source_file_deps = {
     'test_per_lcore.c': [],
     'test_pflock.c': [],
     'test_pie.c': ['sched'],
+    'test_pmd_null.c': ['net_null', 'ethdev', 'bus_vdev'],
     'test_pmd_perf.c': ['ethdev', 'net'] + packet_burst_generator_deps,
     'test_pmd_ring.c': ['net_ring', 'ethdev', 'bus_vdev'],
     'test_pmd_ring_perf.c': ['ethdev', 'net_ring', 'bus_vdev'],
diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
new file mode 100644
index 0000000000..bd520af64f
--- /dev/null
+++ b/app/test/test_pmd_null.c
@@ -0,0 +1,935 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_MBUFS 256
+#define MBUF_CACHE_SIZE 32
+#define BURST_SIZE 32
+#define RING_SIZE 512
+#define PACKET_SIZE 64
+
+/* Test device names */
+#define NULL_DEV_NAME "net_null_test"
+
+static struct rte_mempool *mp;
+static uint16_t port_id = RTE_MAX_ETHPORTS;
+
+static int
+create_null_port(const char *name, const char *args, uint16_t *out_port_id)
+{
+	int ret;
+
+	ret = rte_vdev_init(name, args);
+	if (ret != 0) {
+		printf("Failed to create null device '%s': %d\n", name, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_get_port_by_name(name, out_port_id);
+	if (ret != 0) {
+		printf("Failed to get port id for '%s': %d\n", name, ret);
+		rte_vdev_uninit(name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+configure_null_port(uint16_t pid)
+{
+	struct rte_eth_conf port_conf = {0};
+	int ret;
+
+	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
+	if (ret != 0) {
+		printf("Failed to configure port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_rx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL, mp);
+	if (ret != 0) {
+		printf("Failed to setup Rx queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_tx_queue_setup(pid, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(pid),
+				     NULL);
+	if (ret != 0) {
+		printf("Failed to setup Tx queue for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	ret = rte_eth_dev_start(pid);
+	if (ret != 0) {
+		printf("Failed to start port %u: %d\n", pid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+test_null_setup(void)
+{
+	/* Create mempool for mbufs */
+	mp = rte_pktmbuf_pool_create("null_test_pool", NUM_MBUFS,
+				     MBUF_CACHE_SIZE, 0,
+				     RTE_MBUF_DEFAULT_BUF_SIZE,
+				     rte_socket_id());
+	if (mp == NULL) {
+		printf("Failed to create mempool\n");
+		return -1;
+	}
+
+	/* Create and configure null port */
+	if (create_null_port(NULL_DEV_NAME, NULL, &port_id) != 0) {
+		printf("Failed to create null port\n");
+		return -1;
+	}
+
+	if (configure_null_port(port_id) != 0) {
+		printf("Failed to configure null port\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+test_null_teardown(void)
+{
+	/* Stop and close test port */
+	rte_eth_dev_stop(port_id);
+	rte_eth_dev_close(port_id);
+	rte_vdev_uninit(NULL_DEV_NAME);
+	port_id = RTE_MAX_ETHPORTS;
+
+	rte_mempool_free(mp);
+	mp = NULL;
+}
+
+/*
+ * Test: Basic Rx - should return empty packets
+ */
+static int
+test_null_rx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	unsigned int i;
+
+	/* Rx should return requested number of empty packets */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE,
+		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
+
+	/* Verify packets have expected properties */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i] != NULL, "Received NULL mbuf");
+		TEST_ASSERT(bufs[i]->port == port_id,
+			    "Unexpected port id in mbuf: %u", bufs[i]->port);
+
+		/* Default packet size is 64 bytes */
+		TEST_ASSERT(bufs[i]->pkt_len == PACKET_SIZE,
+			    "Unexpected pkt_len: %u", bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == PACKET_SIZE,
+			    "Unexpected data_len: %u", bufs[i]->data_len);
+	}
+
+	/* Free received mbufs */
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Create random valid ethernet packets */
+static int
+test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
+{
+	unsigned int i;
+
+	if (rte_pktmbuf_alloc_bulk(mp, bufs, burst_size) != 0)
+		return -1;
+
+	for (i = 0; i < burst_size; i++) {
+		struct rte_mbuf *m = bufs[i];
+		uint16_t len;
+
+		/* Choose random length between ether min and available space */
+		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
+			+ RTE_ETHER_MIN_LEN;
+
+		if (rte_pktmbuf_append(m, len) == NULL) {
+			rte_pktmbuf_free_bulk(bufs, burst_size);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Test: Basic Tx - should free all packets
+ */
+static int
+test_null_tx_basic(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_tx;
+	unsigned int pool_count_before, pool_count_after;
+
+	/* Allocate mbufs for Tx */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
+
+	pool_count_before = rte_mempool_avail_count(mp);
+
+	/* Tx should accept and free all packets */
+	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to Tx %u packets, but sent %u", BURST_SIZE, nb_tx);
+
+	pool_count_after = rte_mempool_avail_count(mp);
+
+	/* Verify mbufs were freed - pool should have same count */
+	TEST_ASSERT(pool_count_after >= pool_count_before,
+		    "Mbufs not freed: before=%u, after=%u",
+		    pool_count_before, pool_count_after);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Statistics verification */
+static int
+test_null_stats(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t nb_rx, nb_tx;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Get initial stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
+	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
+
+	/* Perform Rx */
+	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+
+	/* Allocate and perform Tx */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst failed");
+
+	/* Get updated stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after Rx/Tx");
+
+	/* Verify stats */
+	TEST_ASSERT(stats.ipackets == BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    BURST_SIZE, stats.opackets);
+
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Custom packet size */
+static int
+test_null_custom_size(void)
+{
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t custom_port;
+	uint16_t nb_rx;
+	unsigned int i;
+	const unsigned int custom_size = 256;
+	int ret;
+
+	/* Create null device with custom size */
+	ret = create_null_port("net_null_size_test", "size=256", &custom_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with custom size");
+
+	ret = configure_null_port(custom_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx should return packets with custom size */
+	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+
+	/* Verify custom packet size */
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT(bufs[i]->pkt_len == custom_size,
+			    "Expected pkt_len=%u, got %u",
+			    custom_size, bufs[i]->pkt_len);
+		TEST_ASSERT(bufs[i]->data_len == custom_size,
+			    "Expected data_len=%u, got %u",
+			    custom_size, bufs[i]->data_len);
+	}
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Cleanup custom port */
+	rte_eth_dev_stop(custom_port);
+	rte_eth_dev_close(custom_port);
+	rte_vdev_uninit("net_null_size_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Copy mode */
+static int
+test_null_copy_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	uint16_t copy_port, nb_rx;
+	int ret;
+
+	/* Create null device with copy enabled */
+	ret = create_null_port("net_null_copy_test", "copy=1", &copy_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with copy mode");
+
+	ret = configure_null_port(copy_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx in copy mode should work */
+	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst in copy mode failed");
+
+	/* Free Rx mbufs */
+	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
+
+	/* Cleanup */
+	rte_eth_dev_stop(copy_port);
+	rte_eth_dev_close(copy_port);
+	rte_vdev_uninit("net_null_copy_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: No-Rx mode */
+static int
+test_null_no_rx_mode(void)
+{
+	struct rte_mbuf *rx_bufs[BURST_SIZE];
+	struct rte_mbuf *tx_bufs[BURST_SIZE];
+	uint16_t norx_port, nb_rx, nb_tx;
+	int ret;
+
+	/* Create null device with no-rx enabled */
+	ret = create_null_port("net_null_norx_test", "no-rx=1", &norx_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port with no-rx mode");
+
+	ret = configure_null_port(norx_port);
+	TEST_ASSERT(ret == 0, "Failed to configure null port");
+
+	/* Rx in no-rx mode should return 0 packets */
+	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == 0,
+		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
+
+	/* Tx in no-rx mode should still work (frees packets) */
+	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
+		    "Could not allocate tx mbufs");
+
+	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst in no-rx mode failed");
+
+	/* Cleanup */
+	rte_eth_dev_stop(norx_port);
+	rte_eth_dev_close(norx_port);
+	rte_vdev_uninit("net_null_norx_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Link status */
+static int
+test_null_link_status(void)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status");
+
+	/* After start, link should be UP */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_UP,
+		    "Expected link UP after start");
+	TEST_ASSERT(link.link_speed == RTE_ETH_SPEED_NUM_10G,
+		    "Expected 10G link speed");
+	TEST_ASSERT(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX,
+		    "Expected full duplex");
+
+	/* Stop the device */
+	ret = rte_eth_dev_stop(port_id);
+	TEST_ASSERT(ret == 0, "Failed to stop device");
+
+	ret = rte_eth_link_get_nowait(port_id, &link);
+	TEST_ASSERT(ret == 0, "Failed to get link status after stop");
+
+	/* After stop, link should be DOWN */
+	TEST_ASSERT(link.link_status == RTE_ETH_LINK_DOWN,
+		    "Expected link DOWN after stop");
+
+	/* Restart for subsequent tests */
+	ret = rte_eth_dev_start(port_id);
+	TEST_ASSERT(ret == 0, "Failed to restart device");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Device info */
+static int
+test_null_dev_info(void)
+{
+	struct rte_eth_dev_info dev_info;
+	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+		RTE_ETHER_CRC_LEN;
+	int ret;
+
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Verify expected device info values */
+	TEST_ASSERT(dev_info.max_mac_addrs == 1,
+		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
+
+	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
+		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
+	TEST_ASSERT(dev_info.min_mtu == min_mtu,
+		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+
+	/* Check Tx offload capabilities */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
+		    "Expected MULTI_SEGS TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
+		    "Expected MT_LOCKFREE TX offload capability");
+
+	/* Check RSS capabilities */
+	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
+	TEST_ASSERT(dev_info.hash_key_size == 40,
+		    "Expected hash_key_size=40, got %u", dev_info.hash_key_size);
+	TEST_ASSERT(dev_info.flow_type_rss_offloads != 0,
+		    "Expected RSS offloads to be set");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Multiple Rx/Tx bursts */
+static int
+test_null_multiple_bursts(void)
+{
+	struct rte_eth_stats stats;
+	uint16_t nb_rx, nb_tx;
+	unsigned int burst;
+	const unsigned int num_bursts = 10;
+	int ret;
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Perform multiple Rx bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_rx == BURST_SIZE,
+			    "Burst %u: Expected %u packets, got %u",
+			    burst, BURST_SIZE, nb_rx);
+
+		rte_pktmbuf_free_bulk(bufs, nb_rx);
+	}
+
+	/* Perform multiple Tx bursts */
+	for (burst = 0; burst < num_bursts; burst++) {
+		struct rte_mbuf *bufs[BURST_SIZE];
+
+		TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+			    "Could not allocate tx mbufs");
+
+		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
+		TEST_ASSERT(nb_tx == BURST_SIZE,
+			    "Burst %u: Expected to Tx %u, sent %u",
+			    burst, BURST_SIZE, nb_tx);
+	}
+
+	/* Verify total stats */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	TEST_ASSERT(stats.ipackets == num_bursts * BURST_SIZE,
+		    "Expected ipackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.ipackets);
+	TEST_ASSERT(stats.opackets == num_bursts * BURST_SIZE,
+		    "Expected opackets=%u, got %"PRIu64,
+		    num_bursts * BURST_SIZE, stats.opackets);
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RSS configuration
+ * Note: RSS requires multi-queue configuration
+ */
+static int
+test_null_rss_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_conf rss_conf;
+	struct rte_eth_conf port_conf = {0};
+	uint8_t rss_key[40];
+	uint16_t rss_port;
+	const uint16_t num_queues = 2;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RSS testing with multiple queues */
+	ret = create_null_port("net_null_rss_test", NULL, &rss_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RSS test");
+
+	ret = rte_eth_dev_info_get(rss_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(rss_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RSS port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(rss_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(rss_port);
+	TEST_ASSERT(ret == 0, "Failed to start RSS port");
+
+	/* Get current RSS config */
+	memset(&rss_conf, 0, sizeof(rss_conf));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config");
+
+	/* Update RSS config with new key */
+	memset(rss_key, 0x55, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+	rss_conf.rss_key_len = sizeof(rss_key);
+	rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_rss_hash_update(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to update RSS hash config");
+
+	/* Verify the update */
+	memset(rss_key, 0, sizeof(rss_key));
+	rss_conf.rss_key = rss_key;
+
+	ret = rte_eth_dev_rss_hash_conf_get(rss_port, &rss_conf);
+	TEST_ASSERT(ret == 0, "Failed to get RSS hash config after update");
+
+	/* Verify key was updated */
+	for (unsigned int i = 0; i < sizeof(rss_key); i++) {
+		TEST_ASSERT(rss_key[i] == 0x55,
+			    "RSS key not updated at byte %u", i);
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(rss_port);
+	rte_eth_dev_close(rss_port);
+	rte_vdev_uninit("net_null_rss_test");
+
+	return TEST_SUCCESS;
+}
+
+/*
+ * Test: RETA (Redirection Table) configuration
+ * Note: RETA requires multi-queue RSS configuration
+ */
+static int
+test_null_reta_config(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
+						 RTE_ETH_RETA_GROUP_SIZE];
+	struct rte_eth_conf port_conf = {0};
+	uint16_t reta_port;
+	const uint16_t num_queues = 2;
+	unsigned int i, j, nreta;
+	uint16_t q;
+	int ret;
+
+	/* Create a new null device for RETA testing with multiple queues */
+	ret = create_null_port("net_null_reta_test", NULL, &reta_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for RETA test");
+
+	ret = rte_eth_dev_info_get(reta_port, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	TEST_ASSERT(dev_info.reta_size > 0, "RETA size is zero");
+
+	/* Configure with RSS enabled and multiple queues */
+	port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+	port_conf.rx_adv_conf.rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+	ret = rte_eth_dev_configure(reta_port, num_queues, num_queues, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure RETA port");
+
+	for (q = 0; q < num_queues; q++) {
+		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL, mp);
+		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+
+		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
+					     rte_eth_dev_socket_id(reta_port),
+					     NULL);
+		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+	}
+
+	ret = rte_eth_dev_start(reta_port);
+	TEST_ASSERT(ret == 0, "Failed to start RETA port");
+
+	/* Initialize RETA config */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	nreta = dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
+	for (i = 0; i < nreta; i++) {
+		reta_conf[i].mask = UINT64_MAX;
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
+			reta_conf[i].reta[j] = j % num_queues;
+	}
+
+	/* Update RETA */
+	ret = rte_eth_dev_rss_reta_update(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to update RETA");
+
+	/* Query RETA */
+	memset(reta_conf, 0, sizeof(reta_conf));
+	for (i = 0; i < nreta; i++)
+		reta_conf[i].mask = UINT64_MAX;
+
+	ret = rte_eth_dev_rss_reta_query(reta_port, reta_conf, dev_info.reta_size);
+	TEST_ASSERT(ret == 0, "Failed to query RETA");
+
+	/* Verify RETA values */
+	for (i = 0; i < nreta; i++) {
+		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+			TEST_ASSERT(reta_conf[i].reta[j] == j % num_queues,
+				    "RETA mismatch at [%u][%u]", i, j);
+		}
+	}
+
+	/* Cleanup */
+	rte_eth_dev_stop(reta_port);
+	rte_eth_dev_close(reta_port);
+	rte_vdev_uninit("net_null_reta_test");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Stats reset */
+static int
+test_null_stats_reset(void)
+{
+	struct rte_eth_stats stats;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t nb_rx;
+	int ret;
+
+	/* Generate some traffic */
+	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx > 0, "Failed to receive packets");
+
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
+
+	/* Verify stats are non-zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+	TEST_ASSERT(stats.ipackets > 0, "Expected non-zero ipackets");
+
+	/* Reset stats */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Verify stats are zero */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats after reset");
+	TEST_ASSERT(stats.ipackets == 0,
+		    "Expected ipackets=0 after reset, got %"PRIu64,
+		    stats.ipackets);
+	TEST_ASSERT(stats.opackets == 0,
+		    "Expected opackets=0 after reset, got %"PRIu64,
+		    stats.opackets);
+	TEST_ASSERT(stats.ibytes == 0,
+		    "Expected ibytes=0 after reset, got %"PRIu64,
+		    stats.ibytes);
+	TEST_ASSERT(stats.obytes == 0,
+		    "Expected obytes=0 after reset, got %"PRIu64,
+		    stats.obytes);
+
+	return TEST_SUCCESS;
+}
+
+/* Test: MAC address operations */
+static int
+test_null_mac_addr(void)
+{
+	struct rte_ether_addr mac_addr;
+	struct rte_ether_addr new_mac = {
+		.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+	};
+	int ret;
+
+	/* Get current MAC address */
+	ret = rte_eth_macaddr_get(port_id, &mac_addr);
+	TEST_ASSERT(ret == 0, "Failed to get MAC address");
+
+	/* Set new MAC address */
+	ret = rte_eth_dev_default_mac_addr_set(port_id, &new_mac);
+	TEST_ASSERT(ret == 0, "Failed to set MAC address");
+
+	return TEST_SUCCESS;
+}
+
+/* Test: Promiscuous and allmulticast modes */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
+
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+
+	return TEST_SUCCESS;
+}
+
+/* Multi-threaded Tx test structures and worker function */
+#define MT_TX_BURSTS_PER_LCORE 100
+#define MT_TX_BURST_SIZE 8u
+
+struct mt_tx_args {
+	uint16_t port;
+	uint16_t queue;
+	struct rte_mempool *pool;
+	uint64_t tx_count;	/* packets successfully transmitted */
+	int error;		/* non-zero if worker encountered error */
+};
+
+static int
+mt_tx_worker(void *arg)
+{
+	struct mt_tx_args *args = arg;
+	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
+	unsigned int burst;
+	uint16_t nb_tx;
+
+	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
+		/* Allocate mbufs */
+		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
+			args->error = -ENOMEM;
+			return -1;
+		}
+
+		/* Set minimal packet size */
+		for (uint16_t i = 0; i < MT_TX_BURST_SIZE; i++) {
+			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
+			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
+		}
+
+		/* Transmit on shared queue */
+		nb_tx = rte_eth_tx_burst(args->port, args->queue,
+					 bufs, MT_TX_BURST_SIZE);
+		args->tx_count += nb_tx;
+
+		/* Free any unsent packets */
+		if (nb_tx < MT_TX_BURST_SIZE)
+			rte_pktmbuf_free_bulk(&bufs[nb_tx],
+					      MT_TX_BURST_SIZE - nb_tx);
+	}
+
+	return 0;
+}
+
+/*
+ * Test: Multi-threaded Tx on same queue (MT_LOCKFREE)
+ * The null PMD advertises MT_LOCKFREE capability, meaning multiple
+ * threads can transmit on the same queue without external locking.
+ */
+static int
+test_null_mt_tx(void)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_mempool *mt_pool;
+	struct mt_tx_args worker_args[RTE_MAX_LCORE] = { 0 };
+	struct rte_eth_stats stats;
+	unsigned int lcore_id;
+	unsigned int num_workers = 0;
+	uint64_t total_expected = 0;
+	int ret;
+
+	/* Check MT_LOCKFREE capability */
+	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	TEST_ASSERT(ret == 0, "Failed to get device info");
+
+	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+		printf("  MT_LOCKFREE not supported, skipping test\n");
+		return TEST_SKIPPED;
+	}
+
+	/* Count available worker lcores */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		num_workers++;
+	}
+
+	if (num_workers < 2) {
+		printf("  Need at least 2 worker lcores for MT test, "
+		       "have %u, skipping\n", num_workers);
+		return TEST_SKIPPED;
+	}
+
+	/* Limit to reasonable number of workers */
+	if (num_workers > 4)
+		num_workers = 4;
+
+	/* Create larger mempool for multi-threaded test */
+	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
+					  num_workers * MT_TX_BURSTS_PER_LCORE *
+					  MT_TX_BURST_SIZE * 2,
+					  MBUF_CACHE_SIZE, 0,
+					  RTE_MBUF_DEFAULT_BUF_SIZE,
+					  rte_socket_id());
+	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+
+	/* Reset stats before test */
+	ret = rte_eth_stats_reset(port_id);
+	TEST_ASSERT(ret == 0, "Failed to reset stats");
+
+	/* Launch workers */
+	num_workers = 0;
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		if (num_workers >= 4)
+			break;
+
+		worker_args[num_workers].port = port_id;
+		worker_args[num_workers].pool = mt_pool;
+
+		ret = rte_eal_remote_launch(mt_tx_worker,
+					    &worker_args[num_workers],
+					    lcore_id);
+		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
+			    lcore_id);
+		num_workers++;
+	}
+
+	printf("  Launched %u workers for MT Tx test\n", num_workers);
+
+	/* Wait for all workers to complete */
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
+		rte_eal_wait_lcore(lcore_id);
+	}
+
+	/* Check for errors and sum up Tx counts */
+	for (unsigned int i = 0; i < num_workers; i++) {
+		TEST_ASSERT(worker_args[i].error == 0,
+			    "Worker %u encountered error: %d",
+			    i, worker_args[i].error);
+		total_expected += worker_args[i].tx_count;
+	}
+
+	/* Verify stats match expected */
+	ret = rte_eth_stats_get(port_id, &stats);
+	TEST_ASSERT(ret == 0, "Failed to get stats");
+
+	printf("  Total Tx from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
+	       total_expected, stats.opackets);
+
+	TEST_ASSERT(stats.opackets == total_expected,
+		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
+		    total_expected, stats.opackets);
+
+	/* Cleanup */
+	rte_mempool_free(mt_pool);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite null_pmd_test_suite = {
+	.suite_name = "Null PMD Unit Test Suite",
+	.setup = test_null_setup,
+	.teardown = test_null_teardown,
+	.unit_test_cases = {
+		TEST_CASE(test_null_rx_basic),
+		TEST_CASE(test_null_tx_basic),
+		TEST_CASE(test_null_stats),
+		TEST_CASE(test_null_custom_size),
+		TEST_CASE(test_null_copy_mode),
+		TEST_CASE(test_null_no_rx_mode),
+		TEST_CASE(test_null_link_status),
+		TEST_CASE(test_null_dev_info),
+		TEST_CASE(test_null_multiple_bursts),
+		TEST_CASE(test_null_mt_tx),
+		TEST_CASE(test_null_rss_config),
+		TEST_CASE(test_null_reta_config),
+		TEST_CASE(test_null_stats_reset),
+		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_promisc_allmulti),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_pmd_null(void)
+{
+	return unit_test_suite_runner(&null_pmd_test_suite);
+}
+
+REGISTER_FAST_TEST(null_pmd_autotest, NOHUGE_OK, ASAN_OK, test_pmd_null);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

* [PATCH v11 7/7] net/null: add VLAN insert and strip offload support
  2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
                     ` (5 preceding siblings ...)
  2026-02-02 22:16   ` [PATCH v11 6/7] test: add a test for null PMD Stephen Hemminger
@ 2026-02-02 22:16   ` Stephen Hemminger
  6 siblings, 0 replies; 61+ messages in thread
From: Stephen Hemminger @ 2026-02-02 22:16 UTC (permalink / raw)
  To: dev; +Cc: Stephen Hemminger, Tetsuya Mukawa

Add support for VLAN strip (RX) and VLAN insert (TX) offloads to the
null PMD. This makes the driver more useful for testing VLAN-aware
application logic without requiring physical hardware.

Advertise RTE_ETH_RX_OFFLOAD_VLAN_STRIP and
RTE_ETH_TX_OFFLOAD_VLAN_INSERT in device capabilities. Implement the
vlan_offload_set dev_op to track strip enable/disable at runtime.
When VLAN strip is enabled, the RX path sets RTE_MBUF_F_RX_VLAN and
RTE_MBUF_F_RX_VLAN_STRIPPED on received mbufs and populates vlan_tci
with a fixed dummy value. TX VLAN insert requires no datapath changes
since the null TX path unconditionally accepts and frees all packets.

Add corresponding test cases for both offloads and extend the existing
dev_info test to verify the new capability flags.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test/test_pmd_null.c        | 366 +++++++++++++++++---------------
 drivers/net/null/rte_eth_null.c |  42 +++-
 2 files changed, 229 insertions(+), 179 deletions(-)

diff --git a/app/test/test_pmd_null.c b/app/test/test_pmd_null.c
index bd520af64f..4fc3073930 100644
--- a/app/test/test_pmd_null.c
+++ b/app/test/test_pmd_null.c
@@ -2,17 +2,15 @@
  * Copyright(c) 2026 Stephen Hemminger
  */
 
+#include <stdbool.h>
 #include <stdio.h>
 #include <string.h>
 #include <inttypes.h>
-#include <errno.h>
 
 #include <rte_bus_vdev.h>
-#include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_ethdev.h>
 #include <rte_ether.h>
-#include <rte_launch.h>
 #include <rte_lcore.h>
 #include <rte_mbuf.h>
 #include <rte_mempool.h>
@@ -57,8 +55,15 @@ static int
 configure_null_port(uint16_t pid)
 {
 	struct rte_eth_conf port_conf = {0};
+	struct rte_eth_dev_info dev_info;
 	int ret;
 
+	ret = rte_eth_dev_info_get(pid, &dev_info);
+	if (ret != 0) {
+		printf("Failed to get device info for port %u: %d\n", pid, ret);
+		return ret;
+	}
+
 	ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
 	if (ret != 0) {
 		printf("Failed to configure port %u: %d\n", pid, ret);
@@ -69,7 +74,7 @@ configure_null_port(uint16_t pid)
 				     rte_eth_dev_socket_id(pid),
 				     NULL, mp);
 	if (ret != 0) {
-		printf("Failed to setup Rx queue for port %u: %d\n", pid, ret);
+		printf("Failed to setup RX queue for port %u: %d\n", pid, ret);
 		return ret;
 	}
 
@@ -77,7 +82,7 @@ configure_null_port(uint16_t pid)
 				     rte_eth_dev_socket_id(pid),
 				     NULL);
 	if (ret != 0) {
-		printf("Failed to setup Tx queue for port %u: %d\n", pid, ret);
+		printf("Failed to setup TX queue for port %u: %d\n", pid, ret);
 		return ret;
 	}
 
@@ -131,7 +136,7 @@ test_null_teardown(void)
 }
 
 /*
- * Test: Basic Rx - should return empty packets
+ * Test: Basic RX - should return empty packets
  */
 static int
 test_null_rx_basic(void)
@@ -140,7 +145,7 @@ test_null_rx_basic(void)
 	uint16_t nb_rx;
 	unsigned int i;
 
-	/* Rx should return requested number of empty packets */
+	/* RX should return requested number of empty packets */
 	nb_rx = rte_eth_rx_burst(port_id, 0, bufs, BURST_SIZE);
 	TEST_ASSERT(nb_rx == BURST_SIZE,
 		    "Expected %u packets, got %u", BURST_SIZE, nb_rx);
@@ -180,17 +185,14 @@ test_mbuf_setup_burst(struct rte_mbuf **bufs, unsigned int burst_size)
 		/* Choose random length between ether min and available space */
 		len = rte_rand_max(rte_pktmbuf_tailroom(m) - RTE_ETHER_MIN_LEN)
 			+ RTE_ETHER_MIN_LEN;
-
-		if (rte_pktmbuf_append(m, len) == NULL) {
-			rte_pktmbuf_free_bulk(bufs, burst_size);
-			return -1;
-		}
+		m->data_len = len;
+		m->buf_len = len;
 	}
 	return 0;
 }
 
 /*
- * Test: Basic Tx - should free all packets
+ * Test: Basic TX - should free all packets
  */
 static int
 test_null_tx_basic(void)
@@ -199,16 +201,16 @@ test_null_tx_basic(void)
 	uint16_t nb_tx;
 	unsigned int pool_count_before, pool_count_after;
 
-	/* Allocate mbufs for Tx */
+	/* Allocate mbufs for TX */
 	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
 		    "Could not allocate mbufs");
 
 	pool_count_before = rte_mempool_avail_count(mp);
 
-	/* Tx should accept and free all packets */
+	/* TX should accept and free all packets */
 	nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
 	TEST_ASSERT(nb_tx == BURST_SIZE,
-		    "Expected to Tx %u packets, but sent %u", BURST_SIZE, nb_tx);
+		    "Expected to TX %u packets, but sent %u", BURST_SIZE, nb_tx);
 
 	pool_count_after = rte_mempool_avail_count(mp);
 
@@ -220,7 +222,9 @@ test_null_tx_basic(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Statistics verification */
+/*
+ * Test: Statistics verification
+ */
 static int
 test_null_stats(void)
 {
@@ -240,20 +244,20 @@ test_null_stats(void)
 	TEST_ASSERT(stats.ipackets == 0, "Initial ipackets not zero");
 	TEST_ASSERT(stats.opackets == 0, "Initial opackets not zero");
 
-	/* Perform Rx */
+	/* Perform RX */
 	nb_rx = rte_eth_rx_burst(port_id, 0, rx_bufs, BURST_SIZE);
-	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
 
-	/* Allocate and perform Tx */
+	/* Allocate and perform TX */
 	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
 		    "Could not allocate tx mbufs");
 
 	nb_tx = rte_eth_tx_burst(port_id, 0, tx_bufs, BURST_SIZE);
-	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst failed");
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst failed");
 
 	/* Get updated stats */
 	ret = rte_eth_stats_get(port_id, &stats);
-	TEST_ASSERT(ret == 0, "Failed to get stats after Rx/Tx");
+	TEST_ASSERT(ret == 0, "Failed to get stats after RX/TX");
 
 	/* Verify stats */
 	TEST_ASSERT(stats.ipackets == BURST_SIZE,
@@ -268,7 +272,9 @@ test_null_stats(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Custom packet size */
+/*
+ * Test: Custom packet size
+ */
 static int
 test_null_custom_size(void)
 {
@@ -286,9 +292,9 @@ test_null_custom_size(void)
 	ret = configure_null_port(custom_port);
 	TEST_ASSERT(ret == 0, "Failed to configure null port");
 
-	/* Rx should return packets with custom size */
+	/* RX should return packets with custom size */
 	nb_rx = rte_eth_rx_burst(custom_port, 0, bufs, BURST_SIZE);
-	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst failed");
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
 
 	/* Verify custom packet size */
 	for (i = 0; i < nb_rx; i++) {
@@ -309,7 +315,9 @@ test_null_custom_size(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Copy mode */
+/*
+ * Test: Copy mode
+ */
 static int
 test_null_copy_mode(void)
 {
@@ -324,11 +332,11 @@ test_null_copy_mode(void)
 	ret = configure_null_port(copy_port);
 	TEST_ASSERT(ret == 0, "Failed to configure null port");
 
-	/* Rx in copy mode should work */
+	/* RX in copy mode should work */
 	nb_rx = rte_eth_rx_burst(copy_port, 0, rx_bufs, BURST_SIZE);
-	TEST_ASSERT(nb_rx == BURST_SIZE, "Rx burst in copy mode failed");
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst in copy mode failed");
 
-	/* Free Rx mbufs */
+	/* Free RX mbufs */
 	rte_pktmbuf_free_bulk(rx_bufs, nb_rx);
 
 	/* Cleanup */
@@ -339,7 +347,9 @@ test_null_copy_mode(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: No-Rx mode */
+/*
+ * Test: No-RX mode
+ */
 static int
 test_null_no_rx_mode(void)
 {
@@ -355,17 +365,17 @@ test_null_no_rx_mode(void)
 	ret = configure_null_port(norx_port);
 	TEST_ASSERT(ret == 0, "Failed to configure null port");
 
-	/* Rx in no-rx mode should return 0 packets */
+	/* RX in no-rx mode should return 0 packets */
 	nb_rx = rte_eth_rx_burst(norx_port, 0, rx_bufs, BURST_SIZE);
 	TEST_ASSERT(nb_rx == 0,
 		    "Expected 0 packets in no-rx mode, got %u", nb_rx);
 
-	/* Tx in no-rx mode should still work (frees packets) */
+	/* TX in no-rx mode should still work (frees packets) */
 	TEST_ASSERT(test_mbuf_setup_burst(tx_bufs, BURST_SIZE) == 0,
 		    "Could not allocate tx mbufs");
 
 	nb_tx = rte_eth_tx_burst(norx_port, 0, tx_bufs, BURST_SIZE);
-	TEST_ASSERT(nb_tx == BURST_SIZE, "Tx burst in no-rx mode failed");
+	TEST_ASSERT(nb_tx == BURST_SIZE, "TX burst in no-rx mode failed");
 
 	/* Cleanup */
 	rte_eth_dev_stop(norx_port);
@@ -375,7 +385,9 @@ test_null_no_rx_mode(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Link status */
+/*
+ * Test: Link status
+ */
 static int
 test_null_link_status(void)
 {
@@ -411,13 +423,13 @@ test_null_link_status(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Device info */
+/*
+ * Test: Device info
+ */
 static int
 test_null_dev_info(void)
 {
 	struct rte_eth_dev_info dev_info;
-	const uint16_t min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
-		RTE_ETHER_CRC_LEN;
 	int ret;
 
 	ret = rte_eth_dev_info_get(port_id, &dev_info);
@@ -427,17 +439,24 @@ test_null_dev_info(void)
 	TEST_ASSERT(dev_info.max_mac_addrs == 1,
 		    "Expected max_mac_addrs=1, got %u", dev_info.max_mac_addrs);
 
-	/* Null PMD has no MTU restrictions, so max_mtu is UINT16_MAX */
+	/*
+	 * The null PMD reports max_rx_pktlen=UINT32_MAX, so the ethdev layer
+	 * derives max_mtu=UINT16_MAX and min_mtu from RTE_ETHER_MIN_LEN.
+	 */
 	TEST_ASSERT(dev_info.max_mtu == UINT16_MAX,
-		    "Expected max_mtu=UINT16_MAX, got %u", dev_info.max_mtu);
-	TEST_ASSERT(dev_info.min_mtu == min_mtu,
-		    "Unexpected min_mtu: %u", dev_info.min_mtu);
+		    "Unexpected max_mtu: %u", dev_info.max_mtu);
 
-	/* Check Tx offload capabilities */
+	/* Check TX offload capabilities */
 	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
 		    "Expected MULTI_SEGS TX offload capability");
 	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE,
 		    "Expected MT_LOCKFREE TX offload capability");
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT,
+		    "Expected VLAN_INSERT TX offload capability");
+
+	/* Check RX offload capabilities */
+	TEST_ASSERT(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
+		    "Expected VLAN_STRIP RX offload capability");
 
 	/* Check RSS capabilities */
 	TEST_ASSERT(dev_info.reta_size > 0, "Expected non-zero reta_size");
@@ -449,7 +468,9 @@ test_null_dev_info(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Multiple Rx/Tx bursts */
+/*
+ * Test: Multiple RX/TX bursts
+ */
 static int
 test_null_multiple_bursts(void)
 {
@@ -463,7 +484,7 @@ test_null_multiple_bursts(void)
 	ret = rte_eth_stats_reset(port_id);
 	TEST_ASSERT(ret == 0, "Failed to reset stats");
 
-	/* Perform multiple Rx bursts */
+	/* Perform multiple RX bursts */
 	for (burst = 0; burst < num_bursts; burst++) {
 		struct rte_mbuf *bufs[BURST_SIZE];
 
@@ -475,7 +496,7 @@ test_null_multiple_bursts(void)
 		rte_pktmbuf_free_bulk(bufs, nb_rx);
 	}
 
-	/* Perform multiple Tx bursts */
+	/* Perform multiple TX bursts */
 	for (burst = 0; burst < num_bursts; burst++) {
 		struct rte_mbuf *bufs[BURST_SIZE];
 
@@ -484,7 +505,7 @@ test_null_multiple_bursts(void)
 
 		nb_tx = rte_eth_tx_burst(port_id, 0, bufs, BURST_SIZE);
 		TEST_ASSERT(nb_tx == BURST_SIZE,
-			    "Burst %u: Expected to Tx %u, sent %u",
+			    "Burst %u: Expected to TX %u, sent %u",
 			    burst, BURST_SIZE, nb_tx);
 	}
 
@@ -536,12 +557,12 @@ test_null_rss_config(void)
 		ret = rte_eth_rx_queue_setup(rss_port, q, RING_SIZE,
 					     rte_eth_dev_socket_id(rss_port),
 					     NULL, mp);
-		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
 
 		ret = rte_eth_tx_queue_setup(rss_port, q, RING_SIZE,
 					     rte_eth_dev_socket_id(rss_port),
 					     NULL);
-		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
 	}
 
 	ret = rte_eth_dev_start(rss_port);
@@ -622,12 +643,12 @@ test_null_reta_config(void)
 		ret = rte_eth_rx_queue_setup(reta_port, q, RING_SIZE,
 					     rte_eth_dev_socket_id(reta_port),
 					     NULL, mp);
-		TEST_ASSERT(ret == 0, "Failed to setup Rx queue %u", q);
+		TEST_ASSERT(ret == 0, "Failed to setup RX queue %u", q);
 
 		ret = rte_eth_tx_queue_setup(reta_port, q, RING_SIZE,
 					     rte_eth_dev_socket_id(reta_port),
 					     NULL);
-		TEST_ASSERT(ret == 0, "Failed to setup Tx queue %u", q);
+		TEST_ASSERT(ret == 0, "Failed to setup TX queue %u", q);
 	}
 
 	ret = rte_eth_dev_start(reta_port);
@@ -670,7 +691,9 @@ test_null_reta_config(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Stats reset */
+/*
+ * Test: Stats reset
+ */
 static int
 test_null_stats_reset(void)
 {
@@ -713,7 +736,9 @@ test_null_stats_reset(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: MAC address operations */
+/*
+ * Test: MAC address operations
+ */
 static int
 test_null_mac_addr(void)
 {
@@ -734,169 +759,155 @@ test_null_mac_addr(void)
 	return TEST_SUCCESS;
 }
 
-/* Test: Promiscuous and allmulticast modes */
+/*
+ * Test: VLAN strip offload
+ * Verify that when RX VLAN strip is enabled, received packets have
+ * the VLAN stripped flags set and a valid vlan_tci.
+ */
 static int
-test_null_promisc_allmulti(void)
+test_null_vlan_strip(void)
 {
+	struct rte_eth_conf port_conf = {0};
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t vlan_port;
+	uint16_t nb_rx;
+	unsigned int i;
 	int ret;
 
-	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
-	ret = rte_eth_promiscuous_get(port_id);
-	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+	/* Create a null device for VLAN strip testing */
+	ret = create_null_port("net_null_vlan_strip", NULL, &vlan_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for VLAN strip");
 
-	/* Test allmulticast mode - null PMD starts with allmulti enabled */
-	ret = rte_eth_allmulticast_get(port_id);
-	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
+	/* Configure with VLAN strip enabled */
+	port_conf.rxmode.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
-	return TEST_SUCCESS;
-}
+	ret = rte_eth_dev_configure(vlan_port, 1, 1, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure VLAN strip port");
 
-/* Multi-threaded Tx test structures and worker function */
-#define MT_TX_BURSTS_PER_LCORE 100
-#define MT_TX_BURST_SIZE 8u
+	ret = rte_eth_rx_queue_setup(vlan_port, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(vlan_port),
+				     NULL, mp);
+	TEST_ASSERT(ret == 0, "Failed to setup RX queue");
 
-struct mt_tx_args {
-	uint16_t port;
-	uint16_t queue;
-	struct rte_mempool *pool;
-	uint64_t tx_count;	/* packets successfully transmitted */
-	int error;		/* non-zero if worker encountered error */
-};
+	ret = rte_eth_tx_queue_setup(vlan_port, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(vlan_port),
+				     NULL);
+	TEST_ASSERT(ret == 0, "Failed to setup TX queue");
 
-static int
-mt_tx_worker(void *arg)
-{
-	struct mt_tx_args *args = arg;
-	struct rte_mbuf *bufs[MT_TX_BURST_SIZE];
-	unsigned int burst;
-	uint16_t nb_tx;
+	ret = rte_eth_dev_start(vlan_port);
+	TEST_ASSERT(ret == 0, "Failed to start VLAN strip port");
 
-	for (burst = 0; burst < MT_TX_BURSTS_PER_LCORE; burst++) {
-		/* Allocate mbufs */
-		if (rte_pktmbuf_alloc_bulk(args->pool, bufs, MT_TX_BURST_SIZE) != 0) {
-			args->error = -ENOMEM;
-			return -1;
-		}
+	/* RX should return packets with VLAN stripped flags */
+	nb_rx = rte_eth_rx_burst(vlan_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_rx == BURST_SIZE, "RX burst failed");
 
-		/* Set minimal packet size */
-		for (uint16_t i = 0; i < MT_TX_BURST_SIZE; i++) {
-			bufs[i]->data_len = RTE_ETHER_MIN_LEN;
-			bufs[i]->pkt_len = RTE_ETHER_MIN_LEN;
-		}
+	for (i = 0; i < nb_rx; i++) {
+		TEST_ASSERT((bufs[i]->ol_flags & RTE_MBUF_F_RX_VLAN) != 0,
+			    "Packet %u missing RTE_MBUF_F_RX_VLAN flag", i);
+		TEST_ASSERT((bufs[i]->ol_flags & RTE_MBUF_F_RX_VLAN_STRIPPED) != 0,
+			    "Packet %u missing RTE_MBUF_F_RX_VLAN_STRIPPED flag", i);
+		TEST_ASSERT(bufs[i]->vlan_tci != 0,
+			    "Packet %u has zero vlan_tci", i);
+	}
 
-		/* Transmit on shared queue */
-		nb_tx = rte_eth_tx_burst(args->port, args->queue,
-					 bufs, MT_TX_BURST_SIZE);
-		args->tx_count += nb_tx;
+	rte_pktmbuf_free_bulk(bufs, nb_rx);
 
-		/* Free any unsent packets */
-		if (nb_tx < MT_TX_BURST_SIZE)
-			rte_pktmbuf_free_bulk(&bufs[nb_tx],
-					      MT_TX_BURST_SIZE - nb_tx);
-	}
+	/* Cleanup */
+	rte_eth_dev_stop(vlan_port);
+	rte_eth_dev_close(vlan_port);
+	rte_vdev_uninit("net_null_vlan_strip");
 
-	return 0;
+	return TEST_SUCCESS;
 }
 
 /*
- * Test: Multi-threaded Tx on same queue (MT_LOCKFREE)
- * The null PMD advertises MT_LOCKFREE capability, meaning multiple
- * threads can transmit on the same queue without external locking.
+ * Test: VLAN insert TX offload
+ * Verify that the null PMD accepts packets with the VLAN insert flag
+ * and that the offload capability is properly advertised.
  */
 static int
-test_null_mt_tx(void)
+test_null_vlan_insert(void)
 {
+	struct rte_eth_conf port_conf = {0};
 	struct rte_eth_dev_info dev_info;
-	struct rte_mempool *mt_pool;
-	struct mt_tx_args worker_args[RTE_MAX_LCORE] = { 0 };
-	struct rte_eth_stats stats;
-	unsigned int lcore_id;
-	unsigned int num_workers = 0;
-	uint64_t total_expected = 0;
+	struct rte_mbuf *bufs[BURST_SIZE];
+	uint16_t vlan_port;
+	uint16_t nb_tx;
+	unsigned int i;
 	int ret;
 
-	/* Check MT_LOCKFREE capability */
-	ret = rte_eth_dev_info_get(port_id, &dev_info);
+	/* Create a null device for VLAN insert testing */
+	ret = create_null_port("net_null_vlan_insert", NULL, &vlan_port);
+	TEST_ASSERT(ret == 0, "Failed to create null port for VLAN insert");
+
+	ret = rte_eth_dev_info_get(vlan_port, &dev_info);
 	TEST_ASSERT(ret == 0, "Failed to get device info");
 
-	if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
-		printf("  MT_LOCKFREE not supported, skipping test\n");
-		return TEST_SKIPPED;
-	}
+	/* Verify VLAN insert is advertised */
+	TEST_ASSERT(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT,
+		    "TX VLAN insert offload not advertised");
 
-	/* Count available worker lcores */
-	RTE_LCORE_FOREACH_WORKER(lcore_id) {
-		num_workers++;
-	}
+	/* Also verify VLAN strip RX capability */
+	TEST_ASSERT(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
+		    "RX VLAN strip offload not advertised");
 
-	if (num_workers < 2) {
-		printf("  Need at least 2 worker lcores for MT test, "
-		       "have %u, skipping\n", num_workers);
-		return TEST_SKIPPED;
-	}
+	/* Configure with VLAN insert TX offload */
+	port_conf.txmode.offloads = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
-	/* Limit to reasonable number of workers */
-	if (num_workers > 4)
-		num_workers = 4;
+	ret = rte_eth_dev_configure(vlan_port, 1, 1, &port_conf);
+	TEST_ASSERT(ret == 0, "Failed to configure VLAN insert port");
 
-	/* Create larger mempool for multi-threaded test */
-	mt_pool = rte_pktmbuf_pool_create("mt_tx_pool",
-					  num_workers * MT_TX_BURSTS_PER_LCORE *
-					  MT_TX_BURST_SIZE * 2,
-					  MBUF_CACHE_SIZE, 0,
-					  RTE_MBUF_DEFAULT_BUF_SIZE,
-					  rte_socket_id());
-	TEST_ASSERT(mt_pool != NULL, "Failed to create MT test mempool");
+	ret = rte_eth_rx_queue_setup(vlan_port, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(vlan_port),
+				     NULL, mp);
+	TEST_ASSERT(ret == 0, "Failed to setup RX queue");
 
-	/* Reset stats before test */
-	ret = rte_eth_stats_reset(port_id);
-	TEST_ASSERT(ret == 0, "Failed to reset stats");
+	ret = rte_eth_tx_queue_setup(vlan_port, 0, RING_SIZE,
+				     rte_eth_dev_socket_id(vlan_port),
+				     NULL);
+	TEST_ASSERT(ret == 0, "Failed to setup TX queue");
 
-	/* Launch workers */
-	num_workers = 0;
-	RTE_LCORE_FOREACH_WORKER(lcore_id) {
-		if (num_workers >= 4)
-			break;
-
-		worker_args[num_workers].port = port_id;
-		worker_args[num_workers].pool = mt_pool;
-
-		ret = rte_eal_remote_launch(mt_tx_worker,
-					    &worker_args[num_workers],
-					    lcore_id);
-		TEST_ASSERT(ret == 0, "Failed to launch worker on lcore %u",
-			    lcore_id);
-		num_workers++;
-	}
+	ret = rte_eth_dev_start(vlan_port);
+	TEST_ASSERT(ret == 0, "Failed to start VLAN insert port");
 
-	printf("  Launched %u workers for MT Tx test\n", num_workers);
+	/* Allocate packets and set VLAN insert flag with a TCI */
+	TEST_ASSERT(test_mbuf_setup_burst(bufs, BURST_SIZE) == 0,
+		    "Could not allocate mbufs");
 
-	/* Wait for all workers to complete */
-	RTE_LCORE_FOREACH_WORKER(lcore_id) {
-		rte_eal_wait_lcore(lcore_id);
+	for (i = 0; i < BURST_SIZE; i++) {
+		bufs[i]->ol_flags |= RTE_MBUF_F_TX_VLAN;
+		bufs[i]->vlan_tci = 200;
 	}
 
-	/* Check for errors and sum up Tx counts */
-	for (unsigned int i = 0; i < num_workers; i++) {
-		TEST_ASSERT(worker_args[i].error == 0,
-			    "Worker %u encountered error: %d",
-			    i, worker_args[i].error);
-		total_expected += worker_args[i].tx_count;
-	}
+	/* TX should accept all VLAN-tagged packets */
+	nb_tx = rte_eth_tx_burst(vlan_port, 0, bufs, BURST_SIZE);
+	TEST_ASSERT(nb_tx == BURST_SIZE,
+		    "Expected to TX %u VLAN packets, sent %u",
+		    BURST_SIZE, nb_tx);
 
-	/* Verify stats match expected */
-	ret = rte_eth_stats_get(port_id, &stats);
-	TEST_ASSERT(ret == 0, "Failed to get stats");
+	/* Cleanup */
+	rte_eth_dev_stop(vlan_port);
+	rte_eth_dev_close(vlan_port);
+	rte_vdev_uninit("net_null_vlan_insert");
 
-	printf("  Total Tx from workers: %"PRIu64", stats.opackets: %"PRIu64"\n",
-	       total_expected, stats.opackets);
+	return TEST_SUCCESS;
+}
 
-	TEST_ASSERT(stats.opackets == total_expected,
-		    "Stats mismatch: expected %"PRIu64" opackets, got %"PRIu64,
-		    total_expected, stats.opackets);
+/*
+ * Test: Promiscuous and allmulticast modes
+ */
+static int
+test_null_promisc_allmulti(void)
+{
+	int ret;
 
-	/* Cleanup */
-	rte_mempool_free(mt_pool);
+	/* Test promiscuous mode - null PMD starts with promiscuous enabled */
+	ret = rte_eth_promiscuous_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected promiscuous mode enabled");
+
+	/* Test allmulticast mode - null PMD starts with allmulti enabled */
+	ret = rte_eth_allmulticast_get(port_id);
+	TEST_ASSERT(ret == 1, "Expected allmulticast mode enabled");
 
 	return TEST_SUCCESS;
 }
@@ -915,11 +926,12 @@ static struct unit_test_suite null_pmd_test_suite = {
 		TEST_CASE(test_null_link_status),
 		TEST_CASE(test_null_dev_info),
 		TEST_CASE(test_null_multiple_bursts),
-		TEST_CASE(test_null_mt_tx),
 		TEST_CASE(test_null_rss_config),
 		TEST_CASE(test_null_reta_config),
 		TEST_CASE(test_null_stats_reset),
 		TEST_CASE(test_null_mac_addr),
+		TEST_CASE(test_null_vlan_strip),
+		TEST_CASE(test_null_vlan_insert),
 		TEST_CASE(test_null_promisc_allmulti),
 
 		TEST_CASES_END() /**< NULL terminate unit test array */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 672459fe05..c371c55059 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -22,6 +22,8 @@ static unsigned int default_packet_size = 64;
 static unsigned int default_packet_copy;
 static unsigned int default_no_rx;
 
+#define ETH_NULL_VLAN_STRIP_TCI	100	/**< Dummy VLAN TCI for simulated strip. */
+
 static const char *valid_arguments[] = {
 	ETH_NULL_PACKET_SIZE_ARG,
 	ETH_NULL_PACKET_COPY_ARG,
@@ -54,6 +56,7 @@ struct pmd_internals {
 	unsigned int packet_size;
 	unsigned int packet_copy;
 	unsigned int no_rx;
+	unsigned int vlan_strip;
 	uint16_t port_id;
 
 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
@@ -101,6 +104,11 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		bufs[i]->pkt_len = packet_size;
 		bytes += packet_size;
 		bufs[i]->port = h->internals->port_id;
+		if (h->internals->vlan_strip) {
+			bufs[i]->ol_flags |= RTE_MBUF_F_RX_VLAN |
+					     RTE_MBUF_F_RX_VLAN_STRIPPED;
+			bufs[i]->vlan_tci = ETH_NULL_VLAN_STRIP_TCI;
+		}
 	}
 
 	h->rx_pkts += nb_bufs;
@@ -127,6 +135,11 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		bufs[i]->pkt_len = packet_size;
 		bytes += packet_size;
 		bufs[i]->port = h->internals->port_id;
+		if (h->internals->vlan_strip) {
+			bufs[i]->ol_flags |= RTE_MBUF_F_RX_VLAN |
+					     RTE_MBUF_F_RX_VLAN_STRIPPED;
+			bufs[i]->vlan_tci = ETH_NULL_VLAN_STRIP_TCI;
+		}
 	}
 
 	h->rx_pkts += nb_bufs;
@@ -180,8 +193,13 @@ eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 }
 
 static int
-eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+eth_dev_configure(struct rte_eth_dev *dev)
 {
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	internals->vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+				   RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
+
 	return 0;
 }
 
@@ -271,6 +289,22 @@ eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
 	return 0;
 }
 
+static int
+eth_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	struct pmd_internals *internals = dev->data->dev_private;
+
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (dev->data->dev_conf.rxmode.offloads &
+		    RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+			internals->vlan_strip = 1;
+		else
+			internals->vlan_strip = 0;
+	}
+
+	return 0;
+}
+
 static int
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
@@ -282,7 +316,10 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
 	dev_info->min_rx_bufsize = internals->packet_size;
-	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+				    RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	dev_info->reta_size = internals->reta_size;
 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
@@ -495,6 +532,7 @@ static const struct eth_dev_ops ops = {
 	.rx_queue_release = eth_rx_queue_release,
 	.tx_queue_release = eth_tx_queue_release,
 	.mtu_set = eth_mtu_set,
+	.vlan_offload_set = eth_vlan_offload_set,
 	.link_update = eth_link_update,
 	.mac_addr_set = eth_mac_address_set,
 	.stats_get = eth_stats_get,
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 61+ messages in thread

end of thread, other threads:[~2026-02-02 22:19 UTC | newest]

Thread overview: 61+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-01-04 22:25 [PATCH] test: add a test for null PMD Stephen Hemminger
2026-01-05 14:49 ` Marat Khalili
2026-01-05 17:38   ` Stephen Hemminger
2026-01-06 16:47 ` [PATCH v2 0/2] net/null: add a test Stephen Hemminger
2026-01-06 16:47   ` [PATCH v2 1/2] test: add a test for null PMD Stephen Hemminger
2026-01-06 17:40     ` Marat Khalili
2026-01-06 18:01       ` Stephen Hemminger
2026-01-06 16:47   ` [PATCH v2 2/2] net/null: revise info_get Stephen Hemminger
2026-01-08 20:40 ` [PATCH v3 0/3] test: new test for null PMD Stephen Hemminger
2026-01-08 20:40   ` [PATCH v3 1/3] net/null: cleanup info_get Stephen Hemminger
2026-01-08 20:40   ` [PATCH v3 2/3] test: allow larger packet sizes Stephen Hemminger
2026-01-09 15:00     ` Morten Brørup
2026-01-10 17:21       ` Stephen Hemminger
2026-01-08 20:40   ` [PATCH v3 3/3] test: add a test for null PMD Stephen Hemminger
2026-01-09  1:21     ` Stephen Hemminger
2026-01-10 17:22 ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
2026-01-10 17:22   ` [PATCH v4 1/3] net/null: cleanup info_get Stephen Hemminger
2026-01-10 17:22   ` [PATCH v4 2/3] test: generate larger packet bursts Stephen Hemminger
2026-01-10 17:22   ` [PATCH v4 3/3] test: add a test for null PMD Stephen Hemminger
2026-01-12  0:56   ` [PATCH v4 0/3] null pmd minor cleanup and add test Stephen Hemminger
2026-01-14 18:30 ` [PATCH v5 0/3] test: add null PMD test suite Stephen Hemminger
2026-01-14 18:30   ` [PATCH v5 1/3] net/null: cleanup info_get Stephen Hemminger
2026-01-14 18:30   ` [PATCH v5 2/3] test: generate larger packet bursts Stephen Hemminger
2026-01-14 18:30   ` [PATCH v5 3/3] test: add a test for null PMD Stephen Hemminger
2026-01-18 16:50 ` [PATCH v6 0/3] test: add null PMD test suite Stephen Hemminger
2026-01-18 16:50   ` [PATCH v6 1/3] net/null: cleanup info response Stephen Hemminger
2026-01-18 16:50   ` [PATCH v6 2/3] test: generate larger packet bursts Stephen Hemminger
2026-01-18 16:50   ` [PATCH v6 3/3] test: add a test for null PMD Stephen Hemminger
2026-01-25 20:23 ` [PATCH v7 0/5] net/null: improvements and bug fixes Stephen Hemminger
2026-01-25 20:23   ` [PATCH v7 1/5] net/null: cleanup info response Stephen Hemminger
2026-01-25 20:23   ` [PATCH v7 2/5] test: generate larger packet bursts Stephen Hemminger
2026-01-25 20:23   ` [PATCH v7 3/5] test: add a test for null PMD Stephen Hemminger
2026-01-25 20:23   ` [PATCH v7 4/5] net/null: add check for pool vs packet size Stephen Hemminger
2026-01-25 20:23   ` [PATCH v7 5/5] net/null: check packet size argument Stephen Hemminger
2026-01-28 19:00 ` [PATCH v8 0/5] net/null: improvements and bug fixes Stephen Hemminger
2026-01-28 19:00   ` [PATCH v8 1/5] net/null: cleanup info response Stephen Hemminger
2026-01-28 19:00   ` [PATCH v8 2/5] test: generate larger packet bursts Stephen Hemminger
2026-01-28 19:00   ` [PATCH v8 3/5] test: add a test for null PMD Stephen Hemminger
2026-01-28 19:00   ` [PATCH v8 4/5] net/null: add check for pool vs packet size Stephen Hemminger
2026-01-28 19:00   ` [PATCH v8 5/5] net/null: check packet size argument Stephen Hemminger
2026-01-29 20:25 ` [PATCH v9 0/5] net/null: improvements and bug fixes Stephen Hemminger
2026-01-29 20:25   ` [PATCH v9 1/5] net/null: cleanup info response Stephen Hemminger
2026-01-29 20:25   ` [PATCH v9 2/5] net/null: validate the numeric devargs Stephen Hemminger
2026-01-29 20:25   ` [PATCH v9 3/5] net/null: remove redundant argument validation Stephen Hemminger
2026-01-29 20:25   ` [PATCH v9 4/5] test: support larger packet sizes in burst generator Stephen Hemminger
2026-01-29 20:25   ` [PATCH v9 5/5] test: add a test for null PMD Stephen Hemminger
2026-02-01 17:17 ` [PATCH v10 0/6] net/null: bug fixes and improvements Stephen Hemminger
2026-02-01 17:17   ` [PATCH v10 1/6] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
2026-02-01 17:17   ` [PATCH v10 2/6] net/null: cleanup info response Stephen Hemminger
2026-02-01 17:17   ` [PATCH v10 3/6] net/null: validate the numeric devargs Stephen Hemminger
2026-02-01 17:17   ` [PATCH v10 4/6] net/null: remove redundant argument validation Stephen Hemminger
2026-02-01 17:17   ` [PATCH v10 5/6] test: support larger packet sizes in burst generator Stephen Hemminger
2026-02-01 17:17   ` [PATCH v10 6/6] test: add a test for null PMD Stephen Hemminger
2026-02-02 22:16 ` [PATCH v11 0/7] net/null: bug fixes and improvements Stephen Hemminger
2026-02-02 22:16   ` [PATCH v11 1/7] net/null: fix missing mbuf leakage in the copy transmit Stephen Hemminger
2026-02-02 22:16   ` [PATCH v11 2/7] net/null: cleanup info response Stephen Hemminger
2026-02-02 22:16   ` [PATCH v11 3/7] net/null: validate the numeric devargs Stephen Hemminger
2026-02-02 22:16   ` [PATCH v11 4/7] net/null: remove redundant argument validation Stephen Hemminger
2026-02-02 22:16   ` [PATCH v11 5/7] test: support larger packet sizes in burst generator Stephen Hemminger
2026-02-02 22:16   ` [PATCH v11 6/7] test: add a test for null PMD Stephen Hemminger
2026-02-02 22:16   ` [PATCH v11 7/7] net/null: add VLAN insert and strip offload support Stephen Hemminger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox