netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: sunil.kovvuri@gmail.com
To: netdev@vger.kernel.org, davem@davemloft.net
Cc: arnd@arndb.de, linux-soc@vger.kernel.org,
	Sunil Goutham <sgoutham@marvell.com>
Subject: [PATCH 07/17] octeontx2-af: Broadcast packet replication support
Date: Fri, 19 Oct 2018 19:07:28 +0530	[thread overview]
Message-ID: <1539956258-29377-8-git-send-email-sunil.kovvuri@gmail.com> (raw)
In-Reply-To: <1539956258-29377-1-git-send-email-sunil.kovvuri@gmail.com>

From: Sunil Goutham <sgoutham@marvell.com>

Allocate memory for mcast/bcast/mirror replication entry
contexts, replication buffers (used by HW) and config HW
with corresponding memory bases. Added support for installing
MCEs via NIX AQ mbox.

For now support is restricted to broadcast pkt replication,
hence MCE table size and number of replication buffers
allocated are less. Each CGX LMAC mapped RVU PF is assigned
a MCE table of size 'num VFs of that PF + PF'.

Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
---
 drivers/net/ethernet/marvell/octeontx2/af/rvu.h    |  19 ++
 .../net/ethernet/marvell/octeontx2/af/rvu_nix.c    | 201 +++++++++++++++++++++
 2 files changed, 220 insertions(+)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index b3dbbd6..b39400d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -59,6 +59,20 @@ struct rvu_block {
 	unsigned char name[NAME_SIZE];
 };
 
+struct nix_mcast {
+	struct qmem	*mce_ctx;
+	struct qmem	*mcast_buf;
+	int		replay_pkind;
+	int		next_free_mce;
+	spinlock_t	mce_lock; /* Serialize MCE updates */
+};
+
+struct nix_mce_list {
+	struct hlist_head	head;
+	int			count;
+	int			max;
+};
+
 /* Structure for per RVU func info ie PF/VF */
 struct rvu_pfvf {
 	bool		npalf; /* Only one NPALF per RVU_FUNC */
@@ -93,6 +107,10 @@ struct rvu_pfvf {
 	unsigned long	*cq_bmap;
 
 	u8		mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+
+	/* Broadcast pkt replication info */
+	u16			bcast_mce_idx;
+	struct nix_mce_list	bcast_mce_list;
 };
 
 struct nix_txsch {
@@ -108,6 +126,7 @@ struct npc_pkind {
 
 struct nix_hw {
 	struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
+	struct nix_mcast mcast;
 };
 
 struct rvu_hwinfo {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4f2528e..947424a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -16,6 +16,61 @@
 #include "rvu.h"
 #include "cgx.h"
 
+enum mc_tbl_sz {
+	MC_TBL_SZ_256,
+	MC_TBL_SZ_512,
+	MC_TBL_SZ_1K,
+	MC_TBL_SZ_2K,
+	MC_TBL_SZ_4K,
+	MC_TBL_SZ_8K,
+	MC_TBL_SZ_16K,
+	MC_TBL_SZ_32K,
+	MC_TBL_SZ_64K,
+};
+
+enum mc_buf_cnt {
+	MC_BUF_CNT_8,
+	MC_BUF_CNT_16,
+	MC_BUF_CNT_32,
+	MC_BUF_CNT_64,
+	MC_BUF_CNT_128,
+	MC_BUF_CNT_256,
+	MC_BUF_CNT_512,
+	MC_BUF_CNT_1024,
+	MC_BUF_CNT_2048,
+};
+
+/* For now considering MC resources needed for broadcast
+ * pkt replication only. i.e 256 HWVFs + 12 PFs.
+ */
+#define MC_TBL_SIZE	MC_TBL_SZ_512
+#define MC_BUF_CNT	MC_BUF_CNT_128
+
+struct mce {
+	struct hlist_node	node;
+	u16			idx;
+	u16			pcifunc;
+};
+
+static void nix_mce_list_init(struct nix_mce_list *list, int max)
+{
+	INIT_HLIST_HEAD(&list->head);
+	list->count = 0;
+	list->max = max;
+}
+
+static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+{
+	int idx;
+
+	if (!mcast)
+		return 0;
+
+	idx = mcast->next_free_mce;
+	mcast->next_free_mce += count;
+	return idx;
+}
+
 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
 {
 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
@@ -315,6 +370,19 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 		    (req->qidx >= (256UL << (cfg & 0xF))))
 			rc = NIX_AF_ERR_AQ_ENQUEUE;
 		break;
+	case NIX_AQ_CTYPE_MCE:
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+		/* Check if index exceeds MCE list length */
+		if (!hw->nix0->mcast.mce_ctx ||
+		    (req->qidx >= (256UL << (cfg & 0xF))))
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+
+		/* Adding multicast lists for requests from PF/VFs is not
+		 * yet supported, so ignore this.
+		 */
+		if (rsp)
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+		break;
 	default:
 		rc = NIX_AF_ERR_AQ_ENQUEUE;
 	}
@@ -361,6 +429,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
 			memcpy(mask, &req->rss_mask,
 			       sizeof(struct nix_rsse_s));
+		else if (req->ctype == NIX_AQ_CTYPE_MCE)
+			memcpy(mask, &req->mce_mask,
+			       sizeof(struct nix_rx_mce_s));
 		/* Fall through */
 	case NIX_AQ_INSTOP_INIT:
 		if (req->ctype == NIX_AQ_CTYPE_RQ)
@@ -371,6 +442,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+		else if (req->ctype == NIX_AQ_CTYPE_MCE)
+			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
 		break;
 	case NIX_AQ_INSTOP_NOP:
 	case NIX_AQ_INSTOP_READ:
@@ -446,6 +519,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
 				memcpy(&rsp->rss, ctx,
 				       sizeof(struct nix_cq_ctx_s));
+			else if (req->ctype == NIX_AQ_CTYPE_MCE)
+				memcpy(&rsp->mce, ctx,
+				       sizeof(struct nix_rx_mce_s));
 		}
 	}
 
@@ -1041,6 +1117,122 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
 	return 0;
 }
 
+static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
+			 u16 pcifunc, int next, bool eol)
+{
+	struct nix_aq_enq_req aq_req;
+	int err;
+
+	aq_req.hdr.pcifunc = pcifunc;
+	aq_req.ctype = NIX_AQ_CTYPE_MCE;
+	aq_req.op = op;
+	aq_req.qidx = mce;
+
+	/* Forward bcast pkts to RQ0, RSS not needed */
+	aq_req.mce.op = 0;
+	aq_req.mce.index = 0;
+	aq_req.mce.eol = eol;
+	aq_req.mce.pf_func = pcifunc;
+	aq_req.mce.next = next;
+
+	/* All fields valid */
+	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
+
+	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+	if (err) {
+		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
+			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+		return err;
+	}
+	return 0;
+}
+
+static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+	struct nix_mcast *mcast = &nix_hw->mcast;
+	int err, pf, numvfs, idx;
+	struct rvu_pfvf *pfvf;
+	u16 pcifunc;
+	u64 cfg;
+
+	/* Skip PF0 (i.e AF) */
+	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+		/* If PF is not enabled, nothing to do */
+		if (!((cfg >> 20) & 0x01))
+			continue;
+		/* Get numVFs attached to this PF */
+		numvfs = (cfg >> 12) & 0xFF;
+
+		pfvf = &rvu->pf[pf];
+		/* Save the start MCE */
+		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+
+		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+
+		for (idx = 0; idx < (numvfs + 1); idx++) {
+			/* idx-0 is for PF, followed by VFs */
+			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+			pcifunc |= idx;
+			/* Add dummy entries now, so that we don't have to check
+			 * for whether AQ_OP should be INIT/WRITE later on.
+			 * Will be updated when a NIXLF is attached/detached to
+			 * these PF/VFs.
+			 */
+			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
+					    NIX_AQ_INSTOP_INIT,
+					    pcifunc, 0, true);
+			if (err)
+				return err;
+		}
+	}
+	return 0;
+}
+
+static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+	struct nix_mcast *mcast = &nix_hw->mcast;
+	struct rvu_hwinfo *hw = rvu->hw;
+	int err, size;
+
+	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
+	size = (1ULL << size);
+
+	/* Alloc memory for multicast/mirror replication entries */
+	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
+			 (256UL << MC_TBL_SIZE), size);
+	if (err)
+		return -ENOMEM;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
+		    (u64)mcast->mce_ctx->iova);
+
+	/* Set max list length equal to max no of VFs per PF  + PF itself */
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
+		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
+
+	/* Alloc memory for multicast replication buffers */
+	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
+	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
+			 (8UL << MC_BUF_CNT), size);
+	if (err)
+		return -ENOMEM;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
+		    (u64)mcast->mcast_buf->iova);
+
+	/* Alloc pkind for NIX internal RX multicast/mirror replay */
+	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
+
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
+		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
+		    BIT_ULL(20) | MC_BUF_CNT);
+
+	spin_lock_init(&mcast->mce_lock);
+
+	return nix_setup_bcast_tables(rvu, nix_hw);
+}
+
 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 {
 	struct nix_txsch *txsch;
@@ -1242,6 +1434,10 @@ int rvu_nix_init(struct rvu *rvu)
 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
 		if (err)
 			return err;
+
+		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+		if (err)
+			return err;
 	}
 	return 0;
 }
@@ -1251,6 +1447,7 @@ void rvu_nix_freemem(struct rvu *rvu)
 	struct rvu_hwinfo *hw = rvu->hw;
 	struct rvu_block *block;
 	struct nix_txsch *txsch;
+	struct nix_mcast *mcast;
 	struct nix_hw *nix_hw;
 	int blkaddr, lvl;
 
@@ -1270,5 +1467,9 @@ void rvu_nix_freemem(struct rvu *rvu)
 			txsch = &nix_hw->txsch[lvl];
 			kfree(txsch->schq.bmap);
 		}
+
+		mcast = &nix_hw->mcast;
+		qmem_free(rvu->dev, mcast->mce_ctx);
+		qmem_free(rvu->dev, mcast->mcast_buf);
 	}
 }
-- 
2.7.4

  parent reply	other threads:[~2018-10-19 21:44 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-19 13:37 [PATCH 00/17] octeontx2-af: NPC parser and NIX blocks initialization sunil.kovvuri
2018-10-19 13:37 ` [PATCH 01/17] octeontx2-af: NIX Tx scheduler queues alloc/free sunil.kovvuri
2018-10-19 13:37 ` [PATCH 02/17] octeontx2-af: NIX Tx scheduler queue config support sunil.kovvuri
2018-10-19 13:37 ` [PATCH 03/17] octeontx2-af: Reset NIXLF's Rx/Tx stats sunil.kovvuri
2018-10-19 13:37 ` [PATCH 04/17] octeontx2-af: Add NPC KPU profile sunil.kovvuri
2018-10-19 13:37 ` [PATCH 05/17] octeontx2-af: Config NPC KPU engines with parser profile sunil.kovvuri
2018-10-19 13:37 ` [PATCH 06/17] octeontx2-af: Config pkind for CGX mapped PFs sunil.kovvuri
2018-10-19 13:37 ` sunil.kovvuri [this message]
2018-10-19 13:37 ` [PATCH 08/17] octeontx2-af: Update bcast list upon NIXLF alloc/free sunil.kovvuri
2018-10-19 13:37 ` [PATCH 09/17] octeontx2-af: Support for VTAG strip and capture sunil.kovvuri
2018-10-19 13:37 ` [PATCH 10/17] octeontx2-af: Enable packet length and csum validation sunil.kovvuri
2018-10-19 13:37 ` [PATCH 11/17] octeontx2-af: NPC MCAM and LDATA extract minimal configuration sunil.kovvuri
2018-10-19 13:37 ` [PATCH 12/17] octeontx2-af: Add LMAC channel info to NIXLF_ALLOC response sunil.kovvuri
2018-10-19 13:37 ` [PATCH 13/17] octeontx2-af: Install ucast and bcast pkt forwarding rules sunil.kovvuri
2018-10-22 16:18   ` kbuild test robot
2018-10-19 13:37 ` [PATCH 14/17] octeontx2-af: NIX Rx flowkey configuration for RSS sunil.kovvuri
2018-10-19 13:37 ` [PATCH 15/17] octeontx2-af: Support for changing RSS algorithm sunil.kovvuri
2018-10-19 13:37 ` [PATCH 16/17] octeontx2-af: Support for setting MAC address sunil.kovvuri
2018-10-19 13:37 ` [PATCH 17/17] octeontx2-af: Support for NIXLF's UCAST/PROMISC/ALLMULTI modes sunil.kovvuri

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1539956258-29377-8-git-send-email-sunil.kovvuri@gmail.com \
    --to=sunil.kovvuri@gmail.com \
    --cc=arnd@arndb.de \
    --cc=davem@davemloft.net \
    --cc=linux-soc@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=sgoutham@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).