From: Michael Chan <michael.chan@broadcom.com>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org
Subject: [PATCH net-next 08/23] bnxt_en: Configure context memory on new devices.
Date: Sun, 14 Oct 2018 07:02:44 -0400 [thread overview]
Message-ID: <1539514979-2859-9-git-send-email-michael.chan@broadcom.com> (raw)
In-Reply-To: <1539514979-2859-1-git-send-email-michael.chan@broadcom.com>
Call firmware to configure the DMA addresses of all context memory
pages on new devices requiring context memory.
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 123 +++++++++++++++++++++++++++++-
1 file changed, 120 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 83427da..b0e2416 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5325,6 +5325,114 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
return rc;
}
+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ u8 pg_size = 0;
+
+ if (BNXT_PAGE_SHIFT == 13)
+ pg_size = 1 << 4;
+ else if (BNXT_PAGE_SIZE == 16)
+ pg_size = 2 << 4;
+
+ *pg_attr = pg_size;
+ if (rmem->nr_pages > 1) {
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
+{
+ struct hwrm_func_backing_store_cfg_input req = {0};
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ __le32 *num_entries;
+ __le64 *pg_dir;
+ u8 *pg_attr;
+ int i, rc;
+ u32 ena;
+
+ if (!ctx)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
+ req.enables = cpu_to_le32(enables);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
+ ctx_pg = &ctx->qp_mem;
+ req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
+ req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
+ req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.qpc_pg_size_qpc_lvl,
+ &req.qpc_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
+ ctx_pg = &ctx->srq_mem;
+ req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
+ req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.srq_pg_size_srq_lvl,
+ &req.srq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
+ ctx_pg = &ctx->cq_mem;
+ req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
+ req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
+ &req.cq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
+ ctx_pg = &ctx->vnic_mem;
+ req.vnic_num_vnic_entries =
+ cpu_to_le16(ctx->vnic_max_vnic_entries);
+ req.vnic_num_ring_table_entries =
+ cpu_to_le16(ctx->vnic_max_ring_table_entries);
+ req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.vnic_pg_size_vnic_lvl,
+ &req.vnic_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
+ ctx_pg = &ctx->stat_mem;
+ req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
+ req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.stat_pg_size_stat_lvl,
+ &req.stat_page_dir);
+ }
+ for (i = 0, num_entries = &req.tqm_sp_num_entries,
+ pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
+ pg_dir = &req.tqm_sp_page_dir,
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ if (!(enables & ena))
+ continue;
+
+ req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
+ ctx_pg = ctx->tqm_mem[i];
+ *num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
{
@@ -5341,6 +5449,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
rmem->page_size = BNXT_PAGE_SIZE;
rmem->pg_arr = ctx_pg->ctx_pg_arr;
rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
return bnxt_alloc_ring(bp, rmem);
}
@@ -5371,7 +5480,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
- u32 mem_size, entries;
+ u32 mem_size, ena, entries;
int i, rc;
rc = bnxt_hwrm_func_backing_store_qcaps(bp);
@@ -5424,15 +5533,23 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
ctx->tqm_max_entries_per_ring);
- for (i = 0; i < bp->max_q + 1; i++) {
+ for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
if (rc)
return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
}
- ctx->flags |= BNXT_CTX_FLAG_INITED;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+ if (rc)
+ netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
+ rc);
+ else
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
+
return 0;
}
--
2.5.1
next prev parent reply other threads:[~2018-10-14 18:44 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-14 11:02 [PATCH net-next 00/23] bnxt_en: Add support for new 57500 chips Michael Chan
2018-10-14 11:02 ` [PATCH net-next 01/23] bnxt_en: Update firmware interface spec. to 1.10.0.3 Michael Chan
2018-10-14 11:02 ` [PATCH net-next 02/23] bnxt_en: Add additional extended port statistics Michael Chan
2018-10-14 11:02 ` [PATCH net-next 03/23] bnxt_en: Add maximum extended request length fw message support Michael Chan
2018-10-14 11:02 ` [PATCH net-next 04/23] bnxt_en: Update interrupt coalescing logic Michael Chan
2018-10-14 11:02 ` [PATCH net-next 05/23] bnxt_en: Refactor bnxt_ring_struct Michael Chan
2018-10-14 11:02 ` [PATCH net-next 06/23] bnxt_en: Add new flags to setup new page table PTE bits on newer devices Michael Chan
2018-10-14 11:02 ` [PATCH net-next 07/23] bnxt_en: Check context memory requirements from firmware Michael Chan
2018-10-14 11:02 ` Michael Chan [this message]
2018-10-14 11:02 ` [PATCH net-next 09/23] bnxt_en: Add 57500 new chip ID and basic structures Michael Chan
2018-10-14 11:02 ` [PATCH net-next 10/23] bnxt_en: Re-structure doorbells Michael Chan
2018-10-14 11:02 ` [PATCH net-next 11/23] bnxt_en: Adjust MSIX and ring groups for 57500 series chips Michael Chan
2018-10-14 11:02 ` [PATCH net-next 12/23] bnxt_en: Modify the ring reservation functions " Michael Chan
2018-10-14 11:02 ` [PATCH net-next 13/23] bnxt_en: Allocate completion ring structures " Michael Chan
2018-10-14 11:02 ` [PATCH net-next 14/23] bnxt_en: Add helper functions to get firmware CP ring ID Michael Chan
2018-10-14 11:02 ` [PATCH net-next 15/23] bnxt_en: Modify bnxt_ring_alloc_send_msg() to support 57500 chips Michael Chan
2018-10-14 11:02 ` [PATCH net-next 16/23] bnxt_en: Allocate/Free CP rings for 57500 series chips Michael Chan
2018-10-14 11:02 ` [PATCH net-next 17/23] bnxt_en: Increase RSS context array count and skip ring groups on 57500 chips Michael Chan
2018-10-14 11:02 ` [PATCH net-next 18/23] bnxt_en: Add RSS support for " Michael Chan
2018-10-14 11:02 ` [PATCH net-next 19/23] bnxt_en: Use bnxt_cp_ring_info struct pointer as parameter for RX path Michael Chan
2018-10-14 11:02 ` [PATCH net-next 20/23] bnxt_en: Add coalescing setup for 57500 chips Michael Chan
2018-10-14 11:02 ` [PATCH net-next 21/23] bnxt_en: Refactor bnxt_poll_work() Michael Chan
2018-10-14 11:02 ` [PATCH net-next 22/23] bnxt_en: Add new NAPI poll function for 57500 chips Michael Chan
2018-10-14 11:02 ` [PATCH net-next 23/23] bnxt_en: Add PCI ID for BCM57508 device Michael Chan
2018-10-16 5:47 ` [PATCH net-next 00/23] bnxt_en: Add support for new 57500 chips David Miller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1539514979-2859-9-git-send-email-michael.chan@broadcom.com \
--to=michael.chan@broadcom.com \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).