* [PATCH net-next] net: ibmveth: Refactored veth_pool_store for better maintainability
@ 2025-05-06 16:00 Dave Marquardt
2025-05-06 18:15 ` Jacob Keller
2025-05-08 2:50 ` patchwork-bot+netdevbpf
0 siblings, 2 replies; 3+ messages in thread
From: Dave Marquardt @ 2025-05-06 16:00 UTC (permalink / raw)
To: netdev; +Cc: linuxppc-dev, Dave Marquardt
Make veth_pool_store detect requested pool changes, close device if
necessary, update pool, and reopen device.
Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
---
drivers/net/ethernet/ibm/ibmveth.c | 111 +++++++++++++++++------------
1 file changed, 67 insertions(+), 44 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 45143467286e..24046fe16634 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1871,6 +1871,26 @@ static ssize_t veth_pool_show(struct kobject *kobj,
return 0;
}
+/**
+ * veth_pool_store - sysfs store handler for pool attributes
+ * @kobj: kobject embedded in pool
+ * @attr: attribute being changed
+ * @buf: value being stored
+ * @count: length of @buf in bytes
+ *
+ * Stores new value in pool attribute. Verifies the range of the new value for
+ * size and buff_size. Verifies that at least one pool remains available to
+ * receive MTU-sized packets.
+ *
+ * Context: Process context.
+ * Takes and releases rtnl_mutex to ensure correct ordering of close
+ * and open calls.
+ * Return:
+ * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
+ * * %-EINVAL - New pool size or buffer size is out of range
+ * * count - Return count for success
+ * * other - Return value from a failed ibmveth_open call
+ */
static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
@@ -1880,28 +1900,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
+ bool change = false;
+ u32 newbuff_size;
+ u32 oldbuff_size;
+ int newactive;
+ int oldactive;
+ u32 newsize;
+ u32 oldsize;
long rc;
rtnl_lock();
+ oldbuff_size = pool->buff_size;
+ oldactive = pool->active;
+ oldsize = pool->size;
+
+ newbuff_size = oldbuff_size;
+ newactive = oldactive;
+ newsize = oldsize;
+
if (attr == &veth_active_attr) {
- if (value && !pool->active) {
- if (netif_running(netdev)) {
- if (ibmveth_alloc_buffer_pool(pool)) {
- netdev_err(netdev,
- "unable to alloc pool\n");
- rc = -ENOMEM;
- goto unlock_err;
- }
- pool->active = 1;
- ibmveth_close(netdev);
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->active = 1;
- }
- } else if (!value && pool->active) {
+ if (value && !oldactive) {
+ newactive = 1;
+ change = true;
+ } else if (!value && oldactive) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
@@ -1921,43 +1943,44 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
goto unlock_err;
}
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->active = 0;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- }
- pool->active = 0;
+ newactive = 0;
+ change = true;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
rc = -EINVAL;
goto unlock_err;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->size = value;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->size = value;
- }
+ }
+ if (value != oldsize) {
+ newsize = value;
+ change = true;
}
} else if (attr == &veth_size_attr) {
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
rc = -EINVAL;
goto unlock_err;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->buff_size = value;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->buff_size = value;
+ }
+ if (value != oldbuff_size) {
+ newbuff_size = value;
+ change = true;
+ }
+ }
+
+ if (change) {
+ if (netif_running(netdev))
+ ibmveth_close(netdev);
+
+ pool->active = newactive;
+ pool->buff_size = newbuff_size;
+ pool->size = newsize;
+
+ if (netif_running(netdev)) {
+ rc = ibmveth_open(netdev);
+ if (rc) {
+ pool->active = oldactive;
+ pool->buff_size = oldbuff_size;
+ pool->size = oldsize;
+ goto unlock_err;
}
}
}
--
2.49.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH net-next] net: ibmveth: Refactored veth_pool_store for better maintainability
2025-05-06 16:00 [PATCH net-next] net: ibmveth: Refactored veth_pool_store for better maintainability Dave Marquardt
@ 2025-05-06 18:15 ` Jacob Keller
2025-05-08 2:50 ` patchwork-bot+netdevbpf
1 sibling, 0 replies; 3+ messages in thread
From: Jacob Keller @ 2025-05-06 18:15 UTC (permalink / raw)
To: Dave Marquardt, netdev; +Cc: linuxppc-dev
On 5/6/2025 9:00 AM, Dave Marquardt wrote:
> Make veth_pool_store detect requested pool changes, close device if
> necessary, update pool, and reopen device.
>
> Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
> ---
The diff itself is quite ugly, but the end result was much easier to follow.
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH net-next] net: ibmveth: Refactored veth_pool_store for better maintainability
2025-05-06 16:00 [PATCH net-next] net: ibmveth: Refactored veth_pool_store for better maintainability Dave Marquardt
2025-05-06 18:15 ` Jacob Keller
@ 2025-05-08 2:50 ` patchwork-bot+netdevbpf
1 sibling, 0 replies; 3+ messages in thread
From: patchwork-bot+netdevbpf @ 2025-05-08 2:50 UTC (permalink / raw)
To: Dave Marquardt; +Cc: netdev, linuxppc-dev
Hello:
This patch was applied to netdev/net-next.git (main)
by Jakub Kicinski <kuba@kernel.org>:
On Tue, 6 May 2025 11:00:04 -0500 you wrote:
> Make veth_pool_store detect requested pool changes, close device if
> necessary, update pool, and reopen device.
>
> Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com>
> ---
> drivers/net/ethernet/ibm/ibmveth.c | 111 +++++++++++++++++------------
> 1 file changed, 67 insertions(+), 44 deletions(-)
Here is the summary with links:
- [net-next] net: ibmveth: Refactored veth_pool_store for better maintainability
https://git.kernel.org/netdev/net-next/c/46431fd5224f
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2025-05-08 2:49 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-06 16:00 [PATCH net-next] net: ibmveth: Refactored veth_pool_store for better maintainability Dave Marquardt
2025-05-06 18:15 ` Jacob Keller
2025-05-08 2:50 ` patchwork-bot+netdevbpf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).