public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Paolo Abeni <pabeni@redhat.com>
To: "illusion.wang" <illusion.wang@nebula-matrix.com>,
	dimon.zhao@nebula-matrix.com, alvin.wang@nebula-matrix.com,
	sam.chen@nebula-matrix.com, netdev@vger.kernel.org
Cc: andrew+netdev@lunn.ch, corbet@lwn.net, kuba@kernel.org,
	linux-doc@vger.kernel.org, lorenzo@kernel.org, horms@kernel.org,
	vadim.fedorenko@linux.dev, lukas.bulwahn@redhat.com,
	edumazet@google.com, open list <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH v7 net-next 08/11] net/nebula-matrix: add vsi resource implementation
Date: Thu, 12 Mar 2026 13:04:25 +0100	[thread overview]
Message-ID: <1b835f08-20c8-4c40-bf17-ebc300e849a9@redhat.com> (raw)
In-Reply-To: <20260310120959.22015-9-illusion.wang@nebula-matrix.com>

On 3/10/26 1:09 PM, illusion.wang wrote:
> +static int nbl_dped_init(struct nbl_hw_mgt *hw_mgt)
> +{
> +	nbl_hw_wr32(hw_mgt, NBL_DPED_VLAN_OFFSET, 0xC);
> +	nbl_hw_wr32(hw_mgt, NBL_DPED_DSCP_OFFSET_0, 0x8);
> +	nbl_hw_wr32(hw_mgt, NBL_DPED_DSCP_OFFSET_1, 0x4);
> +
> +	// dped checksum offload

Minor nit: use /* */ for comments.

> +	nbl_configure_dped_checksum(hw_mgt);
> +
> +	return 0;
> +}
> +
> +static int nbl_uped_init(struct nbl_hw_mgt *hw_mgt)
> +{
> +	struct ped_hw_edit_profile hw_edit;
> +
> +	nbl_hw_rd_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u32 *)&hw_edit,
> +		       sizeof(hw_edit));
> +	hw_edit.l3_len = 0;
> +	nbl_hw_wr_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u32 *)&hw_edit,
> +		       sizeof(hw_edit));
> +
> +	nbl_hw_rd_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u32 *)&hw_edit,
> +		       sizeof(hw_edit));
> +	hw_edit.l3_len = 1;
> +	nbl_hw_wr_regs(hw_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u32 *)&hw_edit,
> +		       sizeof(hw_edit));
> +
> +	return 0;
> +}
> +
> +static void nbl_shaping_eth_init(struct nbl_hw_mgt *hw_mgt, u8 eth_id, u8 speed)
> +{
> +	struct nbl_shaping_dvn_dport dvn_dport = { 0 };
> +	struct nbl_shaping_dport dport = { 0 };
> +	u32 rate, half_rate;
> +
> +	if (speed == NBL_FW_PORT_SPEED_100G) {
> +		rate = NBL_SHAPING_DPORT_100G_RATE;
> +		half_rate = NBL_SHAPING_DPORT_HALF_100G_RATE;
> +	} else {
> +		rate = NBL_SHAPING_DPORT_25G_RATE;
> +		half_rate = NBL_SHAPING_DPORT_HALF_25G_RATE;
> +	}
> +
> +	dport.cir = rate;
> +	dport.pir = rate;
> +	dport.depth = max(dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH);
> +	dport.cbs = dport.depth;
> +	dport.pbs = dport.depth;
> +	dport.valid = 1;
> +
> +	dvn_dport.cir = half_rate;
> +	dvn_dport.pir = rate;
> +	dvn_dport.depth = dport.depth;
> +	dvn_dport.cbs = dvn_dport.depth;
> +	dvn_dport.pbs = dvn_dport.depth;
> +	dvn_dport.valid = 1;
> +
> +	nbl_hw_wr_regs(hw_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u32 *)&dport,
> +		       sizeof(dport));
> +	nbl_hw_wr_regs(hw_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id),
> +		       (u32 *)&dvn_dport, sizeof(dvn_dport));
> +}
> +
> +static int nbl_shaping_init(struct nbl_hw_mgt *hw_mgt, u8 speed)
> +{
> +#define NBL_SHAPING_FLUSH_INTERVAL 128
> +	struct nbl_shaping_net net_shaping = { 0 };
> +	struct dsch_psha_en psha_en = { 0 };
> +	int i;
> +
> +	for (i = 0; i < NBL_MAX_ETHERNET; i++)
> +		nbl_shaping_eth_init(hw_mgt, i, speed);
> +
> +	psha_en.en = 0xF;
> +	nbl_hw_wr_regs(hw_mgt, NBL_DSCH_PSHA_EN_ADDR, (u32 *)&psha_en,
> +		       sizeof(psha_en));
> +
> +	for (i = 0; i < NBL_MAX_FUNC; i++) {
> +		nbl_hw_wr_regs(hw_mgt, NBL_SHAPING_NET_REG(i),
> +			       (u32 *)&net_shaping, sizeof(net_shaping));
> +		if ((i % NBL_SHAPING_FLUSH_INTERVAL) == 0)
> +			nbl_flush_writes(hw_mgt);
> +	}
> +	nbl_flush_writes(hw_mgt);
> +	return 0;
> +}
> +
> +static int nbl_dsch_qid_max_init(struct nbl_hw_mgt *hw_mgt)
> +{
> +	struct dsch_vn_quanta quanta = { 0 };
> +
> +	quanta.h_qua = NBL_HOST_QUANTA;
> +	quanta.e_qua = NBL_ECPU_QUANTA;
> +	nbl_hw_wr_regs(hw_mgt, NBL_DSCH_VN_QUANTA_ADDR, (u32 *)&quanta,
> +		       sizeof(quanta));
> +	nbl_hw_wr32(hw_mgt, NBL_DSCH_HOST_QID_MAX, NBL_MAX_QUEUE_ID);
> +
> +	nbl_hw_wr32(hw_mgt, NBL_DVN_ECPU_QUEUE_NUM, 0);
> +	nbl_hw_wr32(hw_mgt, NBL_UVN_ECPU_QUEUE_NUM, 0);
> +
> +	return 0;
> +}
> +
> +static int nbl_ustore_init(struct nbl_hw_mgt *hw_mgt, u8 eth_num)
> +{
> +	struct nbl_ustore_port_drop_th drop_th = { 0 };
> +	struct ustore_pkt_len pkt_len;
> +	int i;
> +
> +	nbl_hw_rd_regs(hw_mgt, NBL_USTORE_PKT_LEN_ADDR, (u32 *)&pkt_len,
> +		       sizeof(pkt_len));
> +	/* min arp packet length 42 (14 + 28) */
> +	pkt_len.min = 42;
> +	nbl_hw_wr_regs(hw_mgt, NBL_USTORE_PKT_LEN_ADDR, (u32 *)&pkt_len,
> +		       sizeof(pkt_len));
> +
> +	drop_th.en = 1;
> +	if (eth_num == 1)
> +		drop_th.disc_th = NBL_USTORE_SIGNLE_ETH_DROP_TH;
> +	else if (eth_num == 2)
> +		drop_th.disc_th = NBL_USTORE_DUAL_ETH_DROP_TH;
> +	else
> +		drop_th.disc_th = NBL_USTORE_QUAD_ETH_DROP_TH;
> +
> +	for (i = 0; i < 4; i++)
> +		nbl_hw_wr_regs(hw_mgt, NBL_USTORE_PORT_DROP_TH_REG_ARR(i),
> +			       (u32 *)&drop_th, sizeof(drop_th));
> +
> +	for (i = 0; i < NBL_MAX_ETHERNET; i++) {
> +		nbl_hw_rd32(hw_mgt, NBL_USTORE_BUF_PORT_DROP_PKT(i));
> +		nbl_hw_rd32(hw_mgt, NBL_USTORE_BUF_PORT_TRUN_PKT(i));
> +	}
> +
> +	return 0;
> +}
> +
> +static int nbl_dstore_init(struct nbl_hw_mgt *hw_mgt, u8 speed)
> +{
> +	struct dstore_port_drop_th drop_th;
> +	struct dstore_d_dport_fc_th fc_th;
> +	struct dstore_disc_bp_th bp_th;
> +	int i;
> +
> +	for (i = 0; i < 6; i++) {
> +		nbl_hw_rd_regs(hw_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i),
> +			       (u32 *)&drop_th, sizeof(drop_th));
> +		drop_th.en = 0;
> +		nbl_hw_wr_regs(hw_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i),
> +			       (u32 *)&drop_th, sizeof(drop_th));
> +	}
> +
> +	nbl_hw_rd_regs(hw_mgt, NBL_DSTORE_DISC_BP_TH, (u32 *)&bp_th,
> +		       sizeof(bp_th));
> +	bp_th.en = 1;
> +	nbl_hw_wr_regs(hw_mgt, NBL_DSTORE_DISC_BP_TH, (u32 *)&bp_th,
> +		       sizeof(bp_th));
> +
> +	for (i = 0; i < 4; i++) {
> +		nbl_hw_rd_regs(hw_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i),
> +			       (u32 *)&fc_th, sizeof(fc_th));
> +		if (speed == NBL_FW_PORT_SPEED_100G) {
> +			fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G;
> +			fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G;
> +		} else {
> +			fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH;
> +			fc_th.xon_th = NBL_DSTORE_DROP_XON_TH;
> +		}
> +
> +		fc_th.fc_en = 1;
> +		nbl_hw_wr_regs(hw_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i),
> +			       (u32 *)&fc_th, sizeof(fc_th));
> +	}
> +
> +	return 0;
> +}
> +
> +static void nbl_dvn_descreq_num_cfg(struct nbl_hw_mgt *hw_mgt, u32 descreq_num)
> +{
> +	u32 split_ring_prefect_num = (descreq_num >> 16) & 0xffff;
> +	u32 packet_ring_prefect_num = descreq_num & 0xffff;
> +	struct nbl_dvn_descreq_num_cfg num_cfg = { 0 };
> +
> +	packet_ring_prefect_num =
> +		packet_ring_prefect_num > 32 ? 32 : packet_ring_prefect_num;
> +	packet_ring_prefect_num =
> +		packet_ring_prefect_num < 8 ? 8 : packet_ring_prefect_num;
> +	num_cfg.packed_l1_num = (packet_ring_prefect_num - 8) / 4;
> +
> +	split_ring_prefect_num =
> +		split_ring_prefect_num > 16 ? 16 : split_ring_prefect_num;
> +	split_ring_prefect_num =
> +		split_ring_prefect_num < 8 ? 8 : split_ring_prefect_num;
> +	num_cfg.avring_cfg_num = split_ring_prefect_num > 8 ? 1 : 0;

Minor nit: prefer human readable macro names to magic numbers (8, 16, 32
above).

/P


  reply	other threads:[~2026-03-12 12:04 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-10 12:09 [PATCH v7 net-next 00/11] nbl driver for Nebulamatrix NICs illusion.wang
2026-03-10 12:09 ` [PATCH v7 net-next 01/11] net/nebula-matrix: add minimum nbl build framework illusion.wang
2026-03-10 12:09 ` [PATCH v7 net-next 02/11] net/nebula-matrix: add our driver architecture illusion.wang
2026-03-10 12:09 ` [PATCH v7 net-next 03/11] net/nebula-matrix: add chip related definitions illusion.wang
2026-03-12 11:58   ` Paolo Abeni
2026-03-13  8:05     ` 回复:[PATCH " Illusion Wang
2026-03-13  9:36       ` Paolo Abeni
2026-03-10 12:09 ` [PATCH v7 net-next 04/11] net/nebula-matrix: channel msg value and msg struct illusion.wang
2026-03-10 12:09 ` [PATCH v7 net-next 05/11] net/nebula-matrix: add channel layer illusion.wang
2026-03-10 12:09 ` [PATCH v7 net-next 06/11] net/nebula-matrix: add common resource implementation illusion.wang
2026-03-12 11:59   ` Paolo Abeni
2026-03-10 12:09 ` [PATCH v7 net-next 07/11] net/nebula-matrix: add intr " illusion.wang
2026-03-10 12:09 ` [PATCH v7 net-next 08/11] net/nebula-matrix: add vsi " illusion.wang
2026-03-12 12:04   ` Paolo Abeni [this message]
2026-03-10 12:09 ` [PATCH v7 net-next 09/11] net/nebula-matrix: add Dispatch layer implementation illusion.wang
2026-03-12 12:10   ` Paolo Abeni
2026-03-10 12:09 ` [PATCH v7 net-next 10/11] net/nebula-matrix: add common/ctrl dev init/reinit operation illusion.wang
2026-03-12 12:14   ` Paolo Abeni
2026-03-10 12:09 ` [PATCH v7 net-next 11/11] net/nebula-matrix: add common dev start/stop operation illusion.wang
2026-03-12 12:15   ` Paolo Abeni

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1b835f08-20c8-4c40-bf17-ebc300e849a9@redhat.com \
    --to=pabeni@redhat.com \
    --cc=alvin.wang@nebula-matrix.com \
    --cc=andrew+netdev@lunn.ch \
    --cc=corbet@lwn.net \
    --cc=dimon.zhao@nebula-matrix.com \
    --cc=edumazet@google.com \
    --cc=horms@kernel.org \
    --cc=illusion.wang@nebula-matrix.com \
    --cc=kuba@kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lorenzo@kernel.org \
    --cc=lukas.bulwahn@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=sam.chen@nebula-matrix.com \
    --cc=vadim.fedorenko@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox