From: Jakub Kicinski <jakub.kicinski@netronome.com>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, Jakub Kicinski <jakub.kicinski@netronome.com>
Subject: [PATCHv3 net-next 02/14] nfp: break up nfp_net_{alloc|free}_rings
Date: Thu, 18 Feb 2016 20:38:17 +0000 [thread overview]
Message-ID: <1455827909-26443-3-git-send-email-jakub.kicinski@netronome.com> (raw)
In-Reply-To: <1455827909-26443-1-git-send-email-jakub.kicinski@netronome.com>
nfp_net_{alloc|free}_rings contained strange mix of allocations
and vector initialization. Remove it, declare vector init as
a separate function and handle allocations explicitly.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
---
.../net/ethernet/netronome/nfp/nfp_net_common.c | 126 ++++++++-------------
1 file changed, 47 insertions(+), 79 deletions(-)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index bebdae80ccda..d39ac3553e1e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1486,91 +1486,40 @@ err_alloc:
return -ENOMEM;
}
-static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free)
+static int
+nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ int idx)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
+ int err;
- while (n_free--) {
- r_vec = &nn->r_vecs[n_free];
- entry = &nn->irq_entries[r_vec->irq_idx];
+ snprintf(r_vec->name, sizeof(r_vec->name),
+ "%s-rxtx-%d", nn->netdev->name, idx);
+ err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
+ if (err) {
+ nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
+ return err;
+ }
- nfp_net_rx_ring_free(r_vec->rx_ring);
- nfp_net_tx_ring_free(r_vec->tx_ring);
+ /* Setup NAPI */
+ netif_napi_add(nn->netdev, &r_vec->napi,
+ nfp_net_poll, NAPI_POLL_WEIGHT);
- irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
+ irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
- netif_napi_del(&r_vec->napi);
- }
-}
+ nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
-/**
- * nfp_net_free_rings() - Free all ring resources
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_free_rings(struct nfp_net *nn)
-{
- __nfp_net_free_rings(nn, nn->num_r_vecs);
+ return 0;
}
-/**
- * nfp_net_alloc_rings() - Allocate resources for RX and TX rings
- * @nn: NFP Net device to reconfigure
- *
- * Return: 0 on success or negative errno on error.
- */
-static int nfp_net_alloc_rings(struct nfp_net *nn)
+static void
+nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
- int err;
- int r;
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
- for (r = 0; r < nn->num_r_vecs; r++) {
- r_vec = &nn->r_vecs[r];
- entry = &nn->irq_entries[r_vec->irq_idx];
-
- /* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
-
- snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->netdev->name, r);
- err = request_irq(entry->vector, r_vec->handler, 0,
- r_vec->name, r_vec);
- if (err) {
- nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector);
- goto err_napi_del;
- }
-
- irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
-
- nn_dbg(nn, "RV%02d: irq=%03d/%03d\n",
- r, entry->vector, entry->entry);
-
- /* Allocate TX ring resources */
- err = nfp_net_tx_ring_alloc(r_vec->tx_ring);
- if (err)
- goto err_free_irq;
-
- /* Allocate RX ring resources */
- err = nfp_net_rx_ring_alloc(r_vec->rx_ring);
- if (err)
- goto err_free_tx;
- }
-
- return 0;
-
-err_free_tx:
- nfp_net_tx_ring_free(r_vec->tx_ring);
-err_free_irq:
irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
-err_napi_del:
netif_napi_del(&r_vec->napi);
- __nfp_net_free_rings(nn, r);
- return err;
+ free_irq(entry->vector, r_vec);
}
/**
@@ -1734,9 +1683,19 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
- err = nfp_net_alloc_rings(nn);
- if (err)
- goto err_free_lsc;
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
+ if (err)
+ goto err_free_prev_vecs;
+
+ err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring);
+ if (err)
+ goto err_cleanup_vec_p;
+
+ err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring);
+ if (err)
+ goto err_free_tx_ring_p;
+ }
err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
if (err)
@@ -1829,8 +1788,15 @@ err_disable_napi:
err_clear_config:
nfp_net_clear_config_and_disable(nn);
err_free_rings:
- nfp_net_free_rings(nn);
-err_free_lsc:
+ r = nn->num_r_vecs;
+err_free_prev_vecs:
+ while (r--) {
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+err_free_tx_ring_p:
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+err_cleanup_vec_p:
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ }
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
@@ -1871,9 +1837,11 @@ static int nfp_net_netdev_close(struct net_device *netdev)
for (r = 0; r < nn->num_r_vecs; r++) {
nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
}
- nfp_net_free_rings(nn);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
--
1.9.1
next prev parent reply other threads:[~2016-02-18 20:38 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-02-18 20:38 [PATCHv3 net-next 00/14] nfp: MTU fixes for net Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 01/14] nfp: move link state interrupt request/free calls Jakub Kicinski
2016-02-18 20:38 ` Jakub Kicinski [this message]
2016-02-18 20:38 ` [PATCHv3 net-next 03/14] nfp: make *x_ring_init do all the init Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 04/14] nfp: allocate ring SW structs dynamically Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 05/14] nfp: cleanup tx ring flush and rename to reset Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 06/14] nfp: reorganize initial filling of RX rings Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 07/14] nfp: preallocate RX buffers early in .ndo_open Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 08/14] nfp: move filling ring information to FW config Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 09/14] nfp: slice .ndo_open() and .ndo_stop() up Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 10/14] nfp: sync ring state during FW reconfiguration Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 11/14] nfp: propagate list buffer size in struct rx_ring Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 12/14] nfp: convert .ndo_change_mtu() to prepare/commit paradigm Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 13/14] nfp: pass ring count as function parameter Jakub Kicinski
2016-02-18 20:38 ` [PATCHv3 net-next 14/14] nfp: allow ring size reconfiguration at runtime Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1455827909-26443-3-git-send-email-jakub.kicinski@netronome.com \
--to=jakub.kicinski@netronome.com \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).