From: Sathya Perla <sathya.perla@emulex.com>
To: <netdev@vger.kernel.org>
Subject: [PATCH next 6/6] be2net: implement ethtool set/get_channel hooks
Date: Tue, 27 Aug 2013 16:57:35 +0530 [thread overview]
Message-ID: <1377602855-13920-7-git-send-email-sathya.perla@emulex.com> (raw)
In-Reply-To: <1377602855-13920-1-git-send-email-sathya.perla@emulex.com>
Support is provided only for combined channels. When SR-IOV is not
enabled, BE3 supports upto 16 channels and Lancer-R/SH-R support upto
32 channels.
Signed-off-by: Sathya Perla <sathya.perla@emulex.com>
---
drivers/net/ethernet/emulex/benet/be.h | 17 +++--
drivers/net/ethernet/emulex/benet/be_ethtool.c | 25 ++++++
drivers/net/ethernet/emulex/benet/be_main.c | 94 +++++++++++++++++------
3 files changed, 104 insertions(+), 32 deletions(-)
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8b41635..ace5050 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -99,16 +99,17 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
-#define BE3_MAX_RSS_QS 8
#define BE2_MAX_RSS_QS 4
-#define BE3_MAX_TX_QS 8
-#define MAX_RSS_QS BE3_MAX_RSS_QS
-#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
-#define MAX_EVT_QS MAX_RSS_QS
+#define BE3_MAX_RSS_QS 16
+#define BE3_MAX_TX_QS 16
+#define BE3_MAX_EVT_QS 16
+
+#define MAX_RX_QS 32
+#define MAX_EVT_QS 32
+#define MAX_TX_QS 32
-#define MAX_TX_QS 8
#define MAX_ROCE_EQS 5
-#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
+#define MAX_MSIX_VECTORS 32
#define MIN_MSIX_VECTORS 1
#define BE_TX_BUDGET 256
#define BE_NAPI_WEIGHT 64
@@ -701,6 +702,8 @@ extern int be_load_fw(struct be_adapter *adapter, u8 *func);
extern bool be_is_wol_supported(struct be_adapter *adapter);
extern bool be_pause_supported(struct be_adapter *adapter);
extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+int be_update_queues(struct be_adapter *adapter);
+int be_poll(struct napi_struct *napi, int budget);
/*
* internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 4f8c941..b440a1f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1119,6 +1119,29 @@ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return status;
}
+static void be_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ ch->combined_count = adapter->num_evt_qs;
+ ch->max_combined = be_max_qs(adapter);
+}
+
+static int be_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (ch->rx_count || ch->tx_count || ch->other_count ||
+ !ch->combined_count || ch->combined_count > be_max_qs(adapter))
+ return -EINVAL;
+
+ adapter->cfg_num_qs = ch->combined_count;
+
+ return be_update_queues(adapter);
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1145,4 +1168,6 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_channels = be_get_channels,
+ .set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b08459d..50116f8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1913,6 +1913,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ netif_napi_del(&eqo->napi);
}
be_queue_free(adapter, &eqo->q);
}
@@ -1928,6 +1929,8 @@ static int be_evt_queues_create(struct be_adapter *adapter)
adapter->cfg_num_qs);
for_all_evt_queues(adapter, eqo, i) {
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
@@ -2021,12 +2024,6 @@ static int be_tx_qs_create(struct be_adapter *adapter)
int status, i;
adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
- if (adapter->num_tx_qs != MAX_TX_QS) {
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_qs);
- rtnl_unlock();
- }
for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq;
@@ -2087,13 +2084,6 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
if (adapter->num_rx_qs > 1)
adapter->num_rx_qs++;
- if (adapter->num_rx_qs != MAX_RX_QS) {
- rtnl_lock();
- netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_qs);
- rtnl_unlock();
- }
-
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
rxo->adapter = adapter;
@@ -2244,7 +2234,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
return (work_done < budget); /* Done */
}
-static int be_poll(struct napi_struct *napi, int budget)
+int be_poll(struct napi_struct *napi, int budget)
{
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter = eqo->adapter;
@@ -2356,6 +2346,7 @@ static void be_msix_disable(struct be_adapter *adapter)
if (msix_enabled(adapter)) {
pci_disable_msix(adapter->pdev);
adapter->num_msix_vec = 0;
+ adapter->num_msix_roce_vec = 0;
}
}
@@ -2771,14 +2762,19 @@ static void be_clear_queues(struct be_adapter *adapter)
be_evt_queues_destroy(adapter);
}
-static int be_clear(struct be_adapter *adapter)
+static void be_cancel_worker(struct be_adapter *adapter)
{
- int i;
-
if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
cancel_delayed_work_sync(&adapter->work);
adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
}
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
+ int i;
+
+ be_cancel_worker(adapter);
if (sriov_enabled(adapter))
be_vf_clear(adapter);
@@ -2982,7 +2978,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
res->max_rx_qs = res->max_rss_qs + 1;
- res->max_evt_qs = be_physfn(adapter) ? MAX_EVT_QS : 1;
+ res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -3108,8 +3104,15 @@ static int be_mac_setup(struct be_adapter *adapter)
return 0;
}
+static void be_schedule_worker(struct be_adapter *adapter)
+{
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+}
+
static int be_setup_queues(struct be_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
int status;
status = be_evt_queues_create(adapter);
@@ -3128,12 +3131,56 @@ static int be_setup_queues(struct be_adapter *adapter)
if (status)
goto err;
+ status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
+ if (status)
+ goto err;
+
+ status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
+ if (status)
+ goto err;
+
return 0;
err:
dev_err(&adapter->pdev->dev, "queue_setup failed\n");
return status;
}
+int be_update_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (netif_running(netdev))
+ be_close(netdev);
+
+ be_cancel_worker(adapter);
+
+ /* If any vectors have been shared with RoCE we cannot re-program
+ * the MSIx table.
+ */
+ if (!adapter->num_msix_roce_vec)
+ be_msix_disable(adapter);
+
+ be_clear_queues(adapter);
+
+ if (!msix_enabled(adapter)) {
+ status = be_msix_enable(adapter);
+ if (status)
+ return status;
+ }
+
+ status = be_setup_queues(adapter);
+ if (status)
+ return status;
+
+ be_schedule_worker(adapter);
+
+ if (netif_running(netdev))
+ status = be_open(netdev);
+
+ return status;
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -3163,7 +3210,10 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
+ /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+ rtnl_lock();
status = be_setup_queues(adapter);
+ rtnl_unlock();
if (status)
goto err;
@@ -3202,8 +3252,7 @@ static int be_setup(struct be_adapter *adapter)
if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
- adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+ be_schedule_worker(adapter);
return 0;
err:
be_clear(adapter);
@@ -3769,8 +3818,6 @@ static const struct net_device_ops be_netdev_ops = {
static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *eqo;
- int i;
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
@@ -3793,9 +3840,6 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
-
- for_all_evt_queues(adapter, eqo, i)
- netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
--
1.7.1
next prev parent reply other threads:[~2013-08-27 11:23 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-27 11:27 [PATCH next 0/6] be2net patch set Sathya Perla
2013-08-27 11:27 ` [PATCH next 1/6] be2net: use EQ_CREATEv2 for SH-R Sathya Perla
2013-08-27 11:27 ` [PATCH next 2/6] be2net: Fixup profile management routines Sathya Perla
2013-08-27 11:27 ` [PATCH next 3/6] be2net: refactor be_get_resources() code Sathya Perla
2013-08-27 11:27 ` [PATCH next 4/6] be2net: Fix be_cmd_if_create() to use MBOX if MCCQ is not created Sathya Perla
2013-08-27 11:27 ` [PATCH next 5/6] be2net: refactor be_setup() to consolidate queue creation routines Sathya Perla
2013-08-27 11:27 ` Sathya Perla [this message]
2013-08-27 19:57 ` [PATCH next 0/6] be2net patch set David Miller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1377602855-13920-7-git-send-email-sathya.perla@emulex.com \
--to=sathya.perla@emulex.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).