From: Michael Chan <michael.chan@broadcom.com>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org
Subject: [PATCH net-next RFC 2/3] bnxt_en: Store min/max tx/rx rings for individual VFs.
Date: Wed, 9 May 2018 07:21:42 -0400 [thread overview]
Message-ID: <1525864903-32619-3-git-send-email-michael.chan@broadcom.com> (raw)
In-Reply-To: <1525864903-32619-1-git-send-email-michael.chan@broadcom.com>
With new infrastructure to configure queues differently for each VF,
we need to store the current min/max rx/tx rings for each VF.
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 +++++
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 23 +++++++++++++++++++----
2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9b14eb6..2f5a23c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -837,6 +837,10 @@ struct bnxt_vf_info {
u32 func_flags; /* func cfg flags */
u32 min_tx_rate;
u32 max_tx_rate;
+ u16 min_tx_rings;
+ u16 max_tx_rings;
+ u16 min_rx_rings;
+ u16 max_rx_rings;
void *hwrm_cmd_req_addr;
dma_addr_t hwrm_cmd_req_dma_addr;
};
@@ -1351,6 +1355,7 @@ struct bnxt {
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
struct bnxt_vf_info vf;
+ struct hwrm_func_vf_resource_cfg_input vf_resc_cfg_input;
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a649108..489e534 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -171,6 +171,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->min_tx_queues = vf->min_tx_rings;
+ ivi->max_tx_queues = vf->max_tx_rings;
+ ivi->min_rx_queues = vf->min_rx_rings;
+ ivi->max_rx_queues = vf->max_rx_rings;
return 0;
}
@@ -498,6 +502,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
+ struct bnxt_vf_info *vf = &pf->vf[i];
+
req.vf_id = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
@@ -506,7 +512,11 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
break;
}
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = pf->first_vf_id + i;
+ vf->fw_fid = pf->first_vf_id + i;
+ vf->min_tx_rings = le16_to_cpu(req.min_tx_rings);
+ vf->max_tx_rings = vf_tx_rings;
+ vf->min_rx_rings = le16_to_cpu(req.min_rx_rings);
+ vf->max_rx_rings = vf_rx_rings;
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (pf->active_vfs) {
@@ -521,6 +531,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
+ memcpy(&bp->vf_resc_cfg_input, &req, sizeof(req));
rc = pf->active_vfs;
}
return rc;
@@ -585,6 +596,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
+ struct bnxt_vf_info *vf = &pf->vf[i];
int vf_tx_rsvd = vf_tx_rings;
req.fid = cpu_to_le16(pf->first_vf_id + i);
@@ -593,12 +605,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
if (rc)
break;
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = le16_to_cpu(req.fid);
- rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
- &vf_tx_rsvd);
+ vf->fw_fid = le16_to_cpu(req.fid);
+ rc = __bnxt_hwrm_get_tx_rings(bp, vf->fw_fid, &vf_tx_rsvd);
if (rc)
break;
total_vf_tx_rings += vf_tx_rsvd;
+ vf->min_tx_rings = vf_tx_rsvd;
+ vf->max_tx_rings = vf_tx_rsvd;
+ vf->min_rx_rings = vf_rx_rings;
+ vf->max_rx_rings = vf_rx_rings;
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
--
1.8.3.1
next prev parent reply other threads:[~2018-05-09 11:21 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-09 11:21 [PATCH net-next RFC 0/3] net: Add support to configure SR-IOV VF queues Michael Chan
2018-05-09 11:21 ` [PATCH net-next RFC 1/3] net: Add support to configure SR-IOV VF minimum and maximum queues Michael Chan
2018-05-09 23:15 ` Jakub Kicinski
2018-05-10 0:22 ` Michael Chan
2018-05-10 1:10 ` Jakub Kicinski
2018-05-10 2:32 ` Michael Chan
2018-05-09 11:21 ` Michael Chan [this message]
2018-05-09 11:21 ` [PATCH net-next RFC 3/3] bnxt_en: Implement .ndo_set_vf_queues() Michael Chan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1525864903-32619-3-git-send-email-michael.chan@broadcom.com \
--to=michael.chan@broadcom.com \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).