* [PATCH net-next] ibmvnic: Split counters for scrq/pools/napi
@ 2018-02-22 3:33 Nathan Fontenot
2018-02-22 20:04 ` David Miller
0 siblings, 1 reply; 2+ messages in thread
From: Nathan Fontenot @ 2018-02-22 3:33 UTC (permalink / raw)
To: netdev; +Cc: jallen, tlfalcon
The approach of one counter to rule them all when tracking the number
of active sub-crqs, pools, and napi has problems handling some failover
scenarios. This is due to the split in initializing the sub crqs,
pools and napi in different places and the placement of updating
the active counts.
This patch simplifies this by having a counter for tx and rx
sub-crqs, pools, and napi.
Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
---
drivers/net/ethernet/ibm/ibmvnic.c | 38 ++++++++++++++++--------------------
drivers/net/ethernet/ibm/ibmvnic.h | 7 +++++--
2 files changed, 22 insertions(+), 23 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1703b881252f..8ca88f7cc661 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -461,7 +461,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->rx_pool)
return;
- for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -484,6 +484,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
+ adapter->num_active_rx_pools = 0;
}
static int init_rx_pools(struct net_device *netdev)
@@ -508,6 +509,8 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
+ adapter->num_active_rx_pools = rxadd_subcrqs;
+
for (i = 0; i < rxadd_subcrqs; i++) {
rx_pool = &adapter->rx_pool[i];
@@ -608,7 +611,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->tx_pool)
return;
- for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_tx_pools; i++) {
netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
tx_pool = &adapter->tx_pool[i];
kfree(tx_pool->tx_buff);
@@ -619,6 +622,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
+ adapter->num_active_tx_pools = 0;
}
static int init_tx_pools(struct net_device *netdev)
@@ -635,6 +639,8 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tx_pool)
return -1;
+ adapter->num_active_tx_pools = tx_subcrqs;
+
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
@@ -745,6 +751,7 @@ static int init_napi(struct ibmvnic_adapter *adapter)
ibmvnic_poll, NAPI_POLL_WEIGHT);
}
+ adapter->num_active_rx_napi = adapter->req_rx_queues;
return 0;
}
@@ -755,7 +762,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
if (!adapter->napi)
return;
- for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_rx_napi; i++) {
if (&adapter->napi[i]) {
netdev_dbg(adapter->netdev,
"Releasing napi[%d]\n", i);
@@ -765,6 +772,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
kfree(adapter->napi);
adapter->napi = NULL;
+ adapter->num_active_rx_napi = 0;
}
static int ibmvnic_login(struct net_device *netdev)
@@ -998,10 +1006,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
return rc;
rc = init_tx_pools(netdev);
-
- adapter->num_active_tx_scrqs = adapter->req_tx_queues;
- adapter->num_active_rx_scrqs = adapter->req_rx_queues;
-
return rc;
}
@@ -1706,9 +1710,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
release_napi(adapter);
init_napi(adapter);
-
- adapter->num_active_tx_scrqs = adapter->req_tx_queues;
- adapter->num_active_rx_scrqs = adapter->req_rx_queues;
} else {
rc = reset_tx_pools(adapter);
if (rc)
@@ -2398,19 +2399,10 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
{
- u64 num_tx_scrqs, num_rx_scrqs;
int i;
- if (adapter->state == VNIC_PROBED) {
- num_tx_scrqs = adapter->req_tx_queues;
- num_rx_scrqs = adapter->req_rx_queues;
- } else {
- num_tx_scrqs = adapter->num_active_tx_scrqs;
- num_rx_scrqs = adapter->num_active_rx_scrqs;
- }
-
if (adapter->tx_scrq) {
- for (i = 0; i < num_tx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
if (!adapter->tx_scrq[i])
continue;
@@ -2429,10 +2421,11 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
+ adapter->num_active_tx_scrqs = 0;
}
if (adapter->rx_scrq) {
- for (i = 0; i < num_rx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
continue;
@@ -2451,6 +2444,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL;
+ adapter->num_active_rx_scrqs = 0;
}
}
@@ -2718,6 +2712,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
+ adapter->num_active_tx_scrqs++;
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -2728,6 +2723,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
+ adapter->num_active_rx_scrqs++;
}
kfree(allqueues);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 68e712c69211..099c89d49945 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1092,8 +1092,11 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
- u64 num_active_rx_scrqs;
- u64 num_active_tx_scrqs;
+ u32 num_active_rx_scrqs;
+ u32 num_active_rx_pools;
+ u32 num_active_rx_napi;
+ u32 num_active_tx_scrqs;
+ u32 num_active_tx_pools;
struct tasklet_struct tasklet;
enum vnic_state state;
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH net-next] ibmvnic: Split counters for scrq/pools/napi
2018-02-22 3:33 [PATCH net-next] ibmvnic: Split counters for scrq/pools/napi Nathan Fontenot
@ 2018-02-22 20:04 ` David Miller
0 siblings, 0 replies; 2+ messages in thread
From: David Miller @ 2018-02-22 20:04 UTC (permalink / raw)
To: nfont; +Cc: netdev, jallen, tlfalcon
From: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Date: Wed, 21 Feb 2018 21:33:56 -0600
> The approach of one counter to rule them all when tracking the number
> of active sub-crqs, pools, and napi has problems handling some failover
> scenarios. This is due to the split in initializing the sub crqs,
> pools and napi in different places and the placement of updating
> the active counts.
>
> This patch simplifies this by having a counter for tx and rx
> sub-crqs, pools, and napi.
>
> Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Applied, thanks Nathan.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2018-02-22 20:04 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-02-22 3:33 [PATCH net-next] ibmvnic: Split counters for scrq/pools/napi Nathan Fontenot
2018-02-22 20:04 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).