netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next v2 0/2] net: ibmveth: Make ibmveth use WARN_ON instead of BUG_ON and added KUnit tests
@ 2025-04-14 19:40 Dave Marquardt
  2025-04-14 19:40 ` [PATCH net-next v2 1/2] net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON Dave Marquardt
  2025-04-14 19:40 ` [PATCH net-next v2 2/2] net: ibmveth: added KUnit tests for some buffer pool functions Dave Marquardt
  0 siblings, 2 replies; 5+ messages in thread
From: Dave Marquardt @ 2025-04-14 19:40 UTC (permalink / raw)
  To: netdev; +Cc: michal.swiatkowski, Dave Marquardt, linuxppc-dev

- Made ibmveth driver use WARN_ON with recovery rather than BUG_ON. Some
  recovery code schedules a reset through new function ibmveth_reset. Also
  removed a conflicting and unneeded forward declaration.
- Added KUnit tests for some areas changed by the WARN_ON changes.

Changes:
v2: Addressed Michal Swiatkowski's comments
- Split into multiple patches
- Used a more descriptive label
*** BLURB HERE ***

Dave Marquardt (2):
  net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON
  net: ibmveth: added KUnit tests for some buffer pool functions

 drivers/net/ethernet/ibm/Kconfig   |  13 ++
 drivers/net/ethernet/ibm/ibmveth.c | 241 ++++++++++++++++++++++++++---
 drivers/net/ethernet/ibm/ibmveth.h |  65 ++++----
 3 files changed, 268 insertions(+), 51 deletions(-)

-- 
2.49.0


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH net-next v2 1/2] net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON
  2025-04-14 19:40 [PATCH net-next v2 0/2] net: ibmveth: Make ibmveth use WARN_ON instead of BUG_ON and added KUnit tests Dave Marquardt
@ 2025-04-14 19:40 ` Dave Marquardt
  2025-04-16 12:34   ` Simon Horman
  2025-04-14 19:40 ` [PATCH net-next v2 2/2] net: ibmveth: added KUnit tests for some buffer pool functions Dave Marquardt
  1 sibling, 1 reply; 5+ messages in thread
From: Dave Marquardt @ 2025-04-14 19:40 UTC (permalink / raw)
  To: netdev; +Cc: michal.swiatkowski, Dave Marquardt, linuxppc-dev

Replaced BUG_ON calls with WARN_ON calls with error handling, with
calls to a new ibmveth_reset routine, which resets the device. Removed
conflicting and unneeded forward declaration.

Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
---
 drivers/net/ethernet/ibm/ibmveth.c | 116 ++++++++++++++++++++++++-----
 drivers/net/ethernet/ibm/ibmveth.h |  65 ++++++++--------
 2 files changed, 130 insertions(+), 51 deletions(-)

diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 04192190beba..e005caf5874d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,8 +39,6 @@
 #include "ibmveth.h"
 
 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
-				       bool reuse);
 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
 
 static struct kobj_type ktype_veth_pool;
@@ -231,7 +229,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
 		index = pool->free_map[free_index];
 		skb = NULL;
 
-		BUG_ON(index == IBM_VETH_INVALID_MAP);
+		if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+			schedule_work(&adapter->work);
+			goto bad_index_failure;
+		}
 
 		/* are we allocating a new buffer or recycling an old one */
 		if (pool->skbuff[index])
@@ -300,6 +301,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
 		                 DMA_FROM_DEVICE);
 	dev_kfree_skb_any(pool->skbuff[index]);
 	pool->skbuff[index] = NULL;
+bad_index_failure:
 	adapter->replenish_add_buff_failure++;
 
 	mb();
@@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
 	}
 }
 
-/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
-					    u64 correlator, bool reuse)
+/**
+ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
+ * @adapter: adapter instance
+ * @correlator: identifies pool and index
+ * @reuse: whether to reuse buffer
+ *
+ * Return:
+ * * %0       - success
+ * * %-EINVAL - correlator maps to pool or index out of range
+ * * %-EFAULT - pool and index map to null skb
+ */
+static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+					   u64 correlator, bool reuse)
 {
 	unsigned int pool  = correlator >> 32;
 	unsigned int index = correlator & 0xffffffffUL;
 	unsigned int free_index;
 	struct sk_buff *skb;
 
-	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
-	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+	if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+	    WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+		schedule_work(&adapter->work);
+		return -EINVAL;
+	}
 
 	skb = adapter->rx_buff_pool[pool].skbuff[index];
-	BUG_ON(skb == NULL);
+	if (WARN_ON(!skb)) {
+		schedule_work(&adapter->work);
+		return -EFAULT;
+	}
 
 	/* if we are going to reuse the buffer then keep the pointers around
 	 * but mark index as available. replenish will see the skb pointer and
@@ -411,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
 	mb();
 
 	atomic_dec(&(adapter->rx_buff_pool[pool].available));
+
+	return 0;
 }
 
 /* get the current buffer on the rx queue */
@@ -420,24 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
 	unsigned int pool = correlator >> 32;
 	unsigned int index = correlator & 0xffffffffUL;
 
-	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
-	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+	if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+	    WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+		schedule_work(&adapter->work);
+		return NULL;
+	}
 
 	return adapter->rx_buff_pool[pool].skbuff[index];
 }
 
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
-				       bool reuse)
+/**
+ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
+ *
+ * @adapter - pointer to adapter
+ * @reuse   - whether to reuse buffer
+ *
+ * Context: called from ibmveth_poll
+ *
+ * Return:
+ * * %0    - success
+ * * other - non-zero return from ibmveth_remove_buffer_from_pool
+ */
+static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+				      bool reuse)
 {
 	u64 cor;
+	int rc;
 
 	cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
-	ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+	rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+	if (unlikely(rc))
+		return rc;
 
 	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 		adapter->rx_queue.index = 0;
 		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 	}
+
+	return 0;
 }
 
 static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
@@ -709,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev)
 	return 0;
 }
 
+/**
+ * ibmveth_reset - Handle scheduled reset work
+ *
+ * @w - pointer to work_struct embedded in adapter structure
+ *
+ * Context: This routine acquires rtnl_mutex and disables its NAPI through
+ *          ibmveth_close. It can't be called directly in a context that has
+ *          already acquired rtnl_mutex or disabled its NAPI, or directly from
+ *          a poll routine.
+ *
+ * Return: void
+ */
+static void ibmveth_reset(struct work_struct *w)
+{
+	struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
+	struct net_device *netdev = adapter->netdev;
+
+	netdev_dbg(netdev, "reset starting\n");
+
+	rtnl_lock();
+
+	dev_close(adapter->netdev);
+	dev_open(adapter->netdev, NULL);
+
+	rtnl_unlock();
+
+	netdev_dbg(netdev, "reset complete\n");
+}
+
 static int ibmveth_set_link_ksettings(struct net_device *dev,
 				      const struct ethtool_link_ksettings *cmd)
 {
@@ -1324,7 +1393,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
 			wmb(); /* suggested by larson1 */
 			adapter->rx_invalid_buffer++;
 			netdev_dbg(netdev, "recycling invalid buffer\n");
-			ibmveth_rxq_harvest_buffer(adapter, true);
+			if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+				break;
 		} else {
 			struct sk_buff *skb, *new_skb;
 			int length = ibmveth_rxq_frame_length(adapter);
@@ -1334,6 +1404,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
 			__sum16 iph_check = 0;
 
 			skb = ibmveth_rxq_get_buffer(adapter);
+			if (unlikely(!skb))
+				break;
 
 			/* if the large packet bit is set in the rx queue
 			 * descriptor, the mss will be written by PHYP eight
@@ -1357,10 +1429,12 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
 				if (rx_flush)
 					ibmveth_flush_buffer(skb->data,
 						length + offset);
-				ibmveth_rxq_harvest_buffer(adapter, true);
+				if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+					break;
 				skb = new_skb;
 			} else {
-				ibmveth_rxq_harvest_buffer(adapter, false);
+				if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
+					break;
 				skb_reserve(skb, offset);
 			}
 
@@ -1407,7 +1481,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
 	 * then check once more to make sure we are done.
 	 */
 	lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
-	BUG_ON(lpar_rc != H_SUCCESS);
+	if (WARN_ON(lpar_rc != H_SUCCESS)) {
+		schedule_work(&adapter->work);
+		goto out;
+	}
 
 	if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
 		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1428,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
 	if (napi_schedule_prep(&adapter->napi)) {
 		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 				       VIO_IRQ_DISABLE);
-		BUG_ON(lpar_rc != H_SUCCESS);
+		WARN_ON(lpar_rc != H_SUCCESS);
 		__napi_schedule(&adapter->napi);
 	}
 	return IRQ_HANDLED;
@@ -1670,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
 	adapter->vdev = dev;
 	adapter->netdev = netdev;
+	INIT_WORK(&adapter->work, ibmveth_reset);
 	adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
 	ibmveth_init_link_settings(netdev);
 
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 8468e2c59d7a..b0a2460ec9f9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -134,38 +134,39 @@ struct ibmveth_rx_q {
 };
 
 struct ibmveth_adapter {
-    struct vio_dev *vdev;
-    struct net_device *netdev;
-    struct napi_struct napi;
-    unsigned int mcastFilterSize;
-    void * buffer_list_addr;
-    void * filter_list_addr;
-    void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
-    unsigned int tx_ltb_size;
-    dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
-    dma_addr_t buffer_list_dma;
-    dma_addr_t filter_list_dma;
-    struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
-    struct ibmveth_rx_q rx_queue;
-    int rx_csum;
-    int large_send;
-    bool is_active_trunk;
-
-    u64 fw_ipv6_csum_support;
-    u64 fw_ipv4_csum_support;
-    u64 fw_large_send_support;
-    /* adapter specific stats */
-    u64 replenish_task_cycles;
-    u64 replenish_no_mem;
-    u64 replenish_add_buff_failure;
-    u64 replenish_add_buff_success;
-    u64 rx_invalid_buffer;
-    u64 rx_no_buffer;
-    u64 tx_map_failed;
-    u64 tx_send_failed;
-    u64 tx_large_packets;
-    u64 rx_large_packets;
-    /* Ethtool settings */
+	struct vio_dev *vdev;
+	struct net_device *netdev;
+	struct napi_struct napi;
+	struct work_struct work;
+	unsigned int mcastFilterSize;
+	void *buffer_list_addr;
+	void *filter_list_addr;
+	void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+	unsigned int tx_ltb_size;
+	dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
+	dma_addr_t buffer_list_dma;
+	dma_addr_t filter_list_dma;
+	struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+	struct ibmveth_rx_q rx_queue;
+	int rx_csum;
+	int large_send;
+	bool is_active_trunk;
+
+	u64 fw_ipv6_csum_support;
+	u64 fw_ipv4_csum_support;
+	u64 fw_large_send_support;
+	/* adapter specific stats */
+	u64 replenish_task_cycles;
+	u64 replenish_no_mem;
+	u64 replenish_add_buff_failure;
+	u64 replenish_add_buff_success;
+	u64 rx_invalid_buffer;
+	u64 rx_no_buffer;
+	u64 tx_map_failed;
+	u64 tx_send_failed;
+	u64 tx_large_packets;
+	u64 rx_large_packets;
+	/* Ethtool settings */
 	u8 duplex;
 	u32 speed;
 };
-- 
2.49.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH net-next v2 2/2] net: ibmveth: added KUnit tests for some buffer pool functions
  2025-04-14 19:40 [PATCH net-next v2 0/2] net: ibmveth: Make ibmveth use WARN_ON instead of BUG_ON and added KUnit tests Dave Marquardt
  2025-04-14 19:40 ` [PATCH net-next v2 1/2] net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON Dave Marquardt
@ 2025-04-14 19:40 ` Dave Marquardt
  1 sibling, 0 replies; 5+ messages in thread
From: Dave Marquardt @ 2025-04-14 19:40 UTC (permalink / raw)
  To: netdev; +Cc: michal.swiatkowski, Dave Marquardt, linuxppc-dev

Added KUnit tests for ibmveth_remove_buffer_from_pool and
ibmveth_rxq_get_buffer under new IBMVETH_KUNIT_TEST config option.

Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
---
 drivers/net/ethernet/ibm/Kconfig   |  13 +++
 drivers/net/ethernet/ibm/ibmveth.c | 125 +++++++++++++++++++++++++++++
 2 files changed, 138 insertions(+)

diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index c0c112d95b89..4f4b23465c47 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -27,6 +27,19 @@ config IBMVETH
 	  To compile this driver as a module, choose M here. The module will
 	  be called ibmveth.
 
+config IBMVETH_KUNIT_TEST
+	bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS
+	depends on KUNIT
+	depends on KUNIT=y && IBMVETH=y
+	default KUNIT_ALL_TESTS
+	help
+	  This builds unit tests for the IBM LAN Virtual Ethernet driver.
+
+	  For more information on KUnit and unit tests in general, please refer
+	  to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+	  If unsure, say N.
+
 source "drivers/net/ethernet/ibm/emac/Kconfig"
 
 config EHEA
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index e005caf5874d..2c81ede563e9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -2040,3 +2040,128 @@ static void __exit ibmveth_module_exit(void)
 
 module_init(ibmveth_module_init);
 module_exit(ibmveth_module_exit);
+
+#ifdef CONFIG_IBMVETH_KUNIT_TEST
+#include <kunit/test.h>
+
+/**
+ * ibmveth_reset_kunit - reset routine for running in KUnit environment
+ *
+ * @w - pointer to work_struct embedded in adapter structure
+ *
+ * Context: Called in the KUnit environment. Does nothing.
+ *
+ * Return: void
+ */
+static void ibmveth_reset_kunit(struct work_struct *w)
+{
+	netdev_dbg(NULL, "reset_kunit starting\n");
+	netdev_dbg(NULL, "reset_kunit complete\n");
+}
+
+/**
+ * ibmveth_remove_buffer_from_pool_test - unit test for some of
+ *                                        ibmveth_remove_buffer_from_pool
+ * @test - pointer to kunit structure
+ *
+ * Tests the error returns from ibmveth_remove_buffer_from_pool.
+ * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be
+ * checked to see that these warnings happened.
+ *
+ * Return: void
+ */
+static void ibmveth_remove_buffer_from_pool_test(struct kunit *test)
+{
+	struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+	struct ibmveth_buff_pool *pool;
+	u64 correlator;
+
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+
+	INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+	/* Set sane values for buffer pools */
+	for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+		ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+					 pool_count[i], pool_size[i],
+					 pool_active[i]);
+
+	pool = &adapter->rx_buff_pool[0];
+	pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+	correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0;
+	KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+	KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+	correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size;
+	KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+	KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+	correlator = (u64)0 | 0;
+	pool->skbuff[0] = NULL;
+	KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+	KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+}
+
+/**
+ * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
+ * @test - pointer to kunit structure
+ *
+ * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for
+ * the NULL returns, so dmesg should be checked to see that these warnings
+ * happened.
+ *
+ * Return: void
+ */
+static void ibmveth_rxq_get_buffer_test(struct kunit *test)
+{
+	struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+	struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL);
+	struct ibmveth_buff_pool *pool;
+
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+	INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+	adapter->rx_queue.queue_len = 1;
+	adapter->rx_queue.index = 0;
+	adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry),
+						     GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr);
+
+	/* Set sane values for buffer pools */
+	for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+		ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+					 pool_count[i], pool_size[i],
+					 pool_active[i]);
+
+	pool = &adapter->rx_buff_pool[0];
+	pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+	adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0;
+	KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+	adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size;
+	KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+	pool->skbuff[0] = skb;
+	adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0;
+	KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter));
+}
+
+static struct kunit_case ibmveth_test_cases[] = {
+	KUNIT_CASE(ibmveth_remove_buffer_from_pool_test),
+	KUNIT_CASE(ibmveth_rxq_get_buffer_test),
+	{}
+};
+
+static struct kunit_suite ibmveth_test_suite = {
+	.name = "ibmveth-kunit-test",
+	.test_cases = ibmveth_test_cases,
+};
+
+kunit_test_suite(ibmveth_test_suite);
+#endif
-- 
2.49.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next v2 1/2] net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON
  2025-04-14 19:40 ` [PATCH net-next v2 1/2] net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON Dave Marquardt
@ 2025-04-16 12:34   ` Simon Horman
  2025-04-16 14:13     ` Dave Marquardt
  0 siblings, 1 reply; 5+ messages in thread
From: Simon Horman @ 2025-04-16 12:34 UTC (permalink / raw)
  To: Dave Marquardt; +Cc: netdev, michal.swiatkowski, linuxppc-dev

On Mon, Apr 14, 2025 at 02:40:15PM -0500, Dave Marquardt wrote:
> Replaced BUG_ON calls with WARN_ON calls with error handling, with
> calls to a new ibmveth_reset routine, which resets the device. Removed
> conflicting and unneeded forward declaration.

To me the most important change here is adding the ibmveth_reset.
So I would report that in the subject (rather than the WARN_ON) change.
But perhaps that is just me.

> 
> Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
> ---
>  drivers/net/ethernet/ibm/ibmveth.c | 116 ++++++++++++++++++++++++-----
>  drivers/net/ethernet/ibm/ibmveth.h |  65 ++++++++--------
>  2 files changed, 130 insertions(+), 51 deletions(-)
> 
> diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c

...

> @@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
>  	}
>  }
>  
> -/* remove a buffer from a pool */
> -static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
> -					    u64 correlator, bool reuse)
> +/**
> + * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
> + * @adapter: adapter instance
> + * @correlator: identifies pool and index
> + * @reuse: whether to reuse buffer

The above is the correct way to document function parameters in a Kernel doc.

> + *
> + * Return:
> + * * %0       - success
> + * * %-EINVAL - correlator maps to pool or index out of range
> + * * %-EFAULT - pool and index map to null skb
> + */
> +static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
> +					   u64 correlator, bool reuse)

...

> +/**
> + * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
> + *
> + * @adapter - pointer to adapter
> + * @reuse   - whether to reuse buffer

But this is not correct. IOW, tooling expects
f.e. @adapter: ...  rather than @adapter - ...

Flagged by W=1 builds and ./scripts/kernel-doc -none

> + *
> + * Context: called from ibmveth_poll
> + *
> + * Return:
> + * * %0    - success
> + * * other - non-zero return from ibmveth_remove_buffer_from_pool
> + */
> +static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
> +				      bool reuse)

...

> diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
> index 8468e2c59d7a..b0a2460ec9f9 100644
> --- a/drivers/net/ethernet/ibm/ibmveth.h
> +++ b/drivers/net/ethernet/ibm/ibmveth.h
> @@ -134,38 +134,39 @@ struct ibmveth_rx_q {
>  };
>  
>  struct ibmveth_adapter {
> -    struct vio_dev *vdev;
> -    struct net_device *netdev;
> -    struct napi_struct napi;
> -    unsigned int mcastFilterSize;
> -    void * buffer_list_addr;
> -    void * filter_list_addr;
> -    void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
> -    unsigned int tx_ltb_size;
> -    dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
> -    dma_addr_t buffer_list_dma;
> -    dma_addr_t filter_list_dma;
> -    struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
> -    struct ibmveth_rx_q rx_queue;
> -    int rx_csum;
> -    int large_send;
> -    bool is_active_trunk;
> -
> -    u64 fw_ipv6_csum_support;
> -    u64 fw_ipv4_csum_support;
> -    u64 fw_large_send_support;
> -    /* adapter specific stats */
> -    u64 replenish_task_cycles;
> -    u64 replenish_no_mem;
> -    u64 replenish_add_buff_failure;
> -    u64 replenish_add_buff_success;
> -    u64 rx_invalid_buffer;
> -    u64 rx_no_buffer;
> -    u64 tx_map_failed;
> -    u64 tx_send_failed;
> -    u64 tx_large_packets;
> -    u64 rx_large_packets;
> -    /* Ethtool settings */
> +	struct vio_dev *vdev;
> +	struct net_device *netdev;
> +	struct napi_struct napi;
> +	struct work_struct work;
> +	unsigned int mcastFilterSize;
> +	void *buffer_list_addr;
> +	void *filter_list_addr;
> +	void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
> +	unsigned int tx_ltb_size;
> +	dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
> +	dma_addr_t buffer_list_dma;
> +	dma_addr_t filter_list_dma;
> +	struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
> +	struct ibmveth_rx_q rx_queue;
> +	int rx_csum;
> +	int large_send;
> +	bool is_active_trunk;
> +
> +	u64 fw_ipv6_csum_support;
> +	u64 fw_ipv4_csum_support;
> +	u64 fw_large_send_support;
> +	/* adapter specific stats */
> +	u64 replenish_task_cycles;
> +	u64 replenish_no_mem;
> +	u64 replenish_add_buff_failure;
> +	u64 replenish_add_buff_success;
> +	u64 rx_invalid_buffer;
> +	u64 rx_no_buffer;
> +	u64 tx_map_failed;
> +	u64 tx_send_failed;
> +	u64 tx_large_packets;
> +	u64 rx_large_packets;
> +	/* Ethtool settings */
>  	u8 duplex;
>  	u32 speed;
>  };

If you would like to update the indentation of this structure
then please do so in a separate patch which precedes
adding/removing/chainging fields of the structure.

As it, it's very hard to see the non-formatting changes in this hunk.

-- 
pw-bot: changes-requested

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next v2 1/2] net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON
  2025-04-16 12:34   ` Simon Horman
@ 2025-04-16 14:13     ` Dave Marquardt
  0 siblings, 0 replies; 5+ messages in thread
From: Dave Marquardt @ 2025-04-16 14:13 UTC (permalink / raw)
  To: Simon Horman; +Cc: netdev, michal.swiatkowski, linuxppc-dev

Simon Horman <horms@kernel.org> writes:

> On Mon, Apr 14, 2025 at 02:40:15PM -0500, Dave Marquardt wrote:
>> Replaced BUG_ON calls with WARN_ON calls with error handling, with
>> calls to a new ibmveth_reset routine, which resets the device. Removed
>> conflicting and unneeded forward declaration.
>
> To me the most important change here is adding the ibmveth_reset.
> So I would report that in the subject (rather than the WARN_ON) change.
> But perhaps that is just me.

Thanks, I'll consider that.

>> 
>> Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
>> ---
>>  drivers/net/ethernet/ibm/ibmveth.c | 116 ++++++++++++++++++++++++-----
>>  drivers/net/ethernet/ibm/ibmveth.h |  65 ++++++++--------
>>  2 files changed, 130 insertions(+), 51 deletions(-)
>> 
>> diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
>
> ...
>
>> @@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
>>  	}
>>  }
>>  
>> -/* remove a buffer from a pool */
>> -static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
>> -					    u64 correlator, bool reuse)
>> +/**
>> + * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
>> + * @adapter: adapter instance
>> + * @correlator: identifies pool and index
>> + * @reuse: whether to reuse buffer
>
> The above is the correct way to document function parameters in a Kernel doc.
>
>> + *
>> + * Return:
>> + * * %0       - success
>> + * * %-EINVAL - correlator maps to pool or index out of range
>> + * * %-EFAULT - pool and index map to null skb
>> + */
>> +static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
>> +					   u64 correlator, bool reuse)
>
> ...
>
>> +/**
>> + * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
>> + *
>> + * @adapter - pointer to adapter
>> + * @reuse   - whether to reuse buffer
>
> But this is not correct. IOW, tooling expects
> f.e. @adapter: ...  rather than @adapter - ...
>
> Flagged by W=1 builds and ./scripts/kernel-doc -none

Thanks, I'll start using this in my work.

>> + *
>> + * Context: called from ibmveth_poll
>> + *
>> + * Return:
>> + * * %0    - success
>> + * * other - non-zero return from ibmveth_remove_buffer_from_pool
>> + */
>> +static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
>> +				      bool reuse)
>
> ...
>
>> diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
>> index 8468e2c59d7a..b0a2460ec9f9 100644
>> --- a/drivers/net/ethernet/ibm/ibmveth.h
>> +++ b/drivers/net/ethernet/ibm/ibmveth.h
>> @@ -134,38 +134,39 @@ struct ibmveth_rx_q {
>>  };
>>  
>>  struct ibmveth_adapter {
>> -    struct vio_dev *vdev;
>> -    struct net_device *netdev;
>> -    struct napi_struct napi;
>> -    unsigned int mcastFilterSize;
>> -    void * buffer_list_addr;
>> -    void * filter_list_addr;
>> -    void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
>> -    unsigned int tx_ltb_size;
>> -    dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
>> -    dma_addr_t buffer_list_dma;
>> -    dma_addr_t filter_list_dma;
>> -    struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
>> -    struct ibmveth_rx_q rx_queue;
>> -    int rx_csum;
>> -    int large_send;
>> -    bool is_active_trunk;
>> -
>> -    u64 fw_ipv6_csum_support;
>> -    u64 fw_ipv4_csum_support;
>> -    u64 fw_large_send_support;
>> -    /* adapter specific stats */
>> -    u64 replenish_task_cycles;
>> -    u64 replenish_no_mem;
>> -    u64 replenish_add_buff_failure;
>> -    u64 replenish_add_buff_success;
>> -    u64 rx_invalid_buffer;
>> -    u64 rx_no_buffer;
>> -    u64 tx_map_failed;
>> -    u64 tx_send_failed;
>> -    u64 tx_large_packets;
>> -    u64 rx_large_packets;
>> -    /* Ethtool settings */
>> +	struct vio_dev *vdev;
>> +	struct net_device *netdev;
>> +	struct napi_struct napi;
>> +	struct work_struct work;
>> +	unsigned int mcastFilterSize;
>> +	void *buffer_list_addr;
>> +	void *filter_list_addr;
>> +	void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
>> +	unsigned int tx_ltb_size;
>> +	dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
>> +	dma_addr_t buffer_list_dma;
>> +	dma_addr_t filter_list_dma;
>> +	struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
>> +	struct ibmveth_rx_q rx_queue;
>> +	int rx_csum;
>> +	int large_send;
>> +	bool is_active_trunk;
>> +
>> +	u64 fw_ipv6_csum_support;
>> +	u64 fw_ipv4_csum_support;
>> +	u64 fw_large_send_support;
>> +	/* adapter specific stats */
>> +	u64 replenish_task_cycles;
>> +	u64 replenish_no_mem;
>> +	u64 replenish_add_buff_failure;
>> +	u64 replenish_add_buff_success;
>> +	u64 rx_invalid_buffer;
>> +	u64 rx_no_buffer;
>> +	u64 tx_map_failed;
>> +	u64 tx_send_failed;
>> +	u64 tx_large_packets;
>> +	u64 rx_large_packets;
>> +	/* Ethtool settings */
>>  	u8 duplex;
>>  	u32 speed;
>>  };
>
> If you would like to update the indentation of this structure
> then please do so in a separate patch which precedes
> adding/removing/chainging fields of the structure.
>
> As it, it's very hard to see the non-formatting changes in this hunk.

I agree. Thanks for the suggestion.

-Dave

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2025-04-16 14:13 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-04-14 19:40 [PATCH net-next v2 0/2] net: ibmveth: Make ibmveth use WARN_ON instead of BUG_ON and added KUnit tests Dave Marquardt
2025-04-14 19:40 ` [PATCH net-next v2 1/2] net: ibmveth: make ibmveth use WARN_ON instead of BUG_ON Dave Marquardt
2025-04-16 12:34   ` Simon Horman
2025-04-16 14:13     ` Dave Marquardt
2025-04-14 19:40 ` [PATCH net-next v2 2/2] net: ibmveth: added KUnit tests for some buffer pool functions Dave Marquardt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).