From: Wei Fang <wei.fang@nxp.com>
To: claudiu.manoil@nxp.com, vladimir.oltean@nxp.com,
xiaoning.wang@nxp.com, andrew+netdev@lunn.ch,
davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
pabeni@redhat.com, robh@kernel.org, krzk+dt@kernel.org,
conor+dt@kernel.org, f.fainelli@gmail.com, frank.li@nxp.com,
chleroy@kernel.org, horms@kernel.org, linux@armlinux.org.uk
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
devicetree@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
linux-arm-kernel@lists.infradead.org, imx@lists.linux.dev
Subject: [PATCH v5 net-next 12/15] net: dsa: netc: add FDB, STP, MTU, port setup and host flooding support
Date: Thu, 30 Apr 2026 10:49:42 +0800 [thread overview]
Message-ID: <20260430024945.3413973-13-wei.fang@nxp.com> (raw)
In-Reply-To: <20260430024945.3413973-1-wei.fang@nxp.com>
Expand the NETC switch driver with several foundational features:
- FDB and MDB management
- STP state handling
- MTU configuration
- Port setup/teardown
- Host flooding support
At this stage, the driver operates only in standalone port mode. Each
port uses VLAN 0 as its PVID, meaning ingress frames are internally
assigned VID 0 regardless of whether they arrive tagged or untagged.
Note that this does not inject a VLAN 0 header into the frame, the VID
is used purely for subsequent VLAN processing within the switch.
Signed-off-by: Wei Fang <wei.fang@nxp.com>
---
drivers/net/dsa/netc/netc_main.c | 573 ++++++++++++++++++++++++++
drivers/net/dsa/netc/netc_switch.h | 36 ++
drivers/net/dsa/netc/netc_switch_hw.h | 14 +
3 files changed, 623 insertions(+)
diff --git a/drivers/net/dsa/netc/netc_main.c b/drivers/net/dsa/netc/netc_main.c
index edf50cb32cb6..7f1ab1fbb6fb 100644
--- a/drivers/net/dsa/netc/netc_main.c
+++ b/drivers/net/dsa/netc/netc_main.c
@@ -7,11 +7,36 @@
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/fsl/enetc_mdio.h>
+#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/of_mdio.h>
#include "netc_switch.h"
+static struct netc_fdb_entry *
+netc_lookup_fdb_entry(struct netc_switch *priv,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct netc_fdb_entry *entry;
+
+ hlist_for_each_entry(entry, &priv->fdb_list, node)
+ if (ether_addr_equal(entry->keye.mac_addr, addr) &&
+ le16_to_cpu(entry->keye.fid) == vid)
+ return entry;
+
+ return NULL;
+}
+
+static void netc_destroy_fdb_list(struct netc_switch *priv)
+{
+ struct netc_fdb_entry *entry;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(entry, tmp, &priv->fdb_list, node)
+ netc_del_fdb_entry(entry);
+}
+
static enum dsa_tag_protocol
netc_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot)
@@ -176,6 +201,15 @@ static int netc_init_switch_id(struct netc_switch *priv)
return 0;
}
+static void netc_get_switch_capabilities(struct netc_switch *priv)
+{
+ struct netc_switch_regs *regs = &priv->regs;
+ u32 val;
+
+ val = netc_base_rd(regs, NETC_FDBHTCAPR);
+ priv->num_fdb_gmac = FIELD_GET(FDBHTCAPR_NUM_GMAC, val);
+}
+
static int netc_init_all_ports(struct netc_switch *priv)
{
struct device *dev = priv->dev;
@@ -383,6 +417,206 @@ static void netc_port_default_config(struct netc_port *np)
netc_port_set_all_tc_msdu(np);
}
+static u32 netc_available_port_bitmap(struct netc_switch *priv)
+{
+ struct dsa_port *dp;
+ u32 bitmap = 0;
+
+ dsa_switch_for_each_available_port(dp, priv->ds)
+ bitmap |= BIT(dp->index);
+
+ return bitmap;
+}
+
+static int netc_add_standalone_vlan_entry(struct netc_switch *priv)
+{
+ u32 bitmap_stg = VFT_STG_ID(0) | netc_available_port_bitmap(priv);
+ struct vft_cfge_data *cfge;
+ u16 cfg;
+ int err;
+
+ cfge = kzalloc_obj(*cfge);
+ if (!cfge)
+ return -ENOMEM;
+
+ cfge->bitmap_stg = cpu_to_le32(bitmap_stg);
+ cfge->et_eid = cpu_to_le32(NTMP_NULL_ENTRY_ID);
+ cfge->fid = cpu_to_le16(NETC_STANDALONE_PVID);
+
+ /* For standalone ports, MAC learning needs to be disabled, so frames
+ * from other user ports will not be forwarded to the standalone ports,
+ * because there are no FDB entries on the standalone ports. Also, the
+ * frames received by the standalone ports cannot be flooded to other
+ * ports, so MAC forwarding option needs to be set to
+ * MFO_NO_MATCH_DISCARD, so the frames will discarded rather than
+ * flooding to other ports.
+ */
+ cfg = FIELD_PREP(VFT_MLO, MLO_DISABLE) |
+ FIELD_PREP(VFT_MFO, MFO_NO_MATCH_DISCARD);
+ cfge->cfg = cpu_to_le16(cfg);
+
+ err = ntmp_vft_add_entry(&priv->ntmp, NETC_STANDALONE_PVID, cfge);
+ if (err)
+ dev_err(priv->dev,
+ "Failed to add standalone VLAN entry\n");
+
+ kfree(cfge);
+
+ return err;
+}
+
+static int netc_port_add_fdb_entry(struct netc_port *np,
+ const unsigned char *addr, u16 vid)
+{
+ struct netc_switch *priv = np->switch_priv;
+ struct netc_fdb_entry *entry;
+ struct fdbt_keye_data *keye;
+ struct fdbt_cfge_data *cfge;
+ int port = np->dp->index;
+ u32 cfg = 0;
+ int err;
+
+ entry = kzalloc_obj(*entry);
+ if (!entry)
+ return -ENOMEM;
+
+ keye = &entry->keye;
+ cfge = &entry->cfge;
+ ether_addr_copy(keye->mac_addr, addr);
+ keye->fid = cpu_to_le16(vid);
+
+ cfge->port_bitmap = cpu_to_le32(BIT(port));
+ cfge->cfg = cpu_to_le32(cfg);
+ cfge->et_eid = cpu_to_le32(NTMP_NULL_ENTRY_ID);
+
+ err = ntmp_fdbt_add_entry(&priv->ntmp, &entry->entry_id, keye, cfge);
+ if (err) {
+ kfree(entry);
+
+ return err;
+ }
+
+ netc_add_fdb_entry(priv, entry);
+
+ return 0;
+}
+
+static int netc_port_set_fdb_entry(struct netc_port *np,
+ const unsigned char *addr, u16 vid)
+{
+ struct netc_switch *priv = np->switch_priv;
+ struct netc_fdb_entry *entry;
+ struct fdbt_cfge_data *cfge;
+ int port = np->dp->index;
+ int err = 0;
+
+ mutex_lock(&priv->fdbt_lock);
+
+ entry = netc_lookup_fdb_entry(priv, addr, vid);
+ if (!entry) {
+ err = netc_port_add_fdb_entry(np, addr, vid);
+ if (err)
+ dev_err(priv->dev,
+ "Failed to add FDB entry on port %d\n",
+ port);
+
+ goto unlock_fdbt;
+ }
+
+ cfge = &entry->cfge;
+ /* If the entry already exists on the port, return 0 directly */
+ if (unlikely(cfge->port_bitmap & cpu_to_le32(BIT(port))))
+ goto unlock_fdbt;
+
+ /* If the entry already exists, but not on this port, we need to
+ * update the port bitmap. In general, it should only be valid
+ * for multicast or broadcast address.
+ */
+ cfge->port_bitmap |= cpu_to_le32(BIT(port));
+ err = ntmp_fdbt_update_entry(&priv->ntmp, entry->entry_id, cfge);
+ if (err) {
+ cfge->port_bitmap &= cpu_to_le32(~BIT(port));
+ dev_err(priv->dev, "Failed to set FDB entry on port %d\n",
+ port);
+ }
+
+unlock_fdbt:
+ mutex_unlock(&priv->fdbt_lock);
+
+ return err;
+}
+
+static int netc_port_del_fdb_entry(struct netc_port *np,
+ const unsigned char *addr, u16 vid)
+{
+ struct netc_switch *priv = np->switch_priv;
+ struct ntmp_user *ntmp = &priv->ntmp;
+ struct netc_fdb_entry *entry;
+ struct fdbt_cfge_data *cfge;
+ int port = np->dp->index;
+ int err = 0;
+
+ mutex_lock(&priv->fdbt_lock);
+
+ entry = netc_lookup_fdb_entry(priv, addr, vid);
+ if (unlikely(!entry))
+ goto unlock_fdbt;
+
+ cfge = &entry->cfge;
+ if (unlikely(!(cfge->port_bitmap & cpu_to_le32(BIT(port)))))
+ goto unlock_fdbt;
+
+ if (cfge->port_bitmap != cpu_to_le32(BIT(port))) {
+ /* If the entry also exists on other ports, we need to
+ * update the entry in the FDB table.
+ */
+ cfge->port_bitmap &= cpu_to_le32(~BIT(port));
+ err = ntmp_fdbt_update_entry(ntmp, entry->entry_id, cfge);
+ if (err) {
+ cfge->port_bitmap |= cpu_to_le32(BIT(port));
+ goto unlock_fdbt;
+ }
+ } else {
+ /* If the entry only exists on this port, just delete
+ * it from the FDB table.
+ */
+ err = ntmp_fdbt_delete_entry(ntmp, entry->entry_id);
+ if (err)
+ goto unlock_fdbt;
+
+ netc_del_fdb_entry(entry);
+ }
+
+unlock_fdbt:
+ mutex_unlock(&priv->fdbt_lock);
+
+ return err;
+}
+
+static int netc_add_standalone_fdb_bcast_entry(struct netc_switch *priv)
+{
+ const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct dsa_port *dp, *cpu_dp = NULL;
+
+ dsa_switch_for_each_cpu_port(dp, priv->ds) {
+ cpu_dp = dp;
+ break;
+ }
+
+ if (!cpu_dp)
+ return -ENODEV;
+
+ /* If the user port acts as a standalone port, then its PVID is 0,
+ * MLO is set to "disable MAC learning" and MFO is set to "discard
+ * frames if no matching entry found in FDB table". Therefore, we
+ * need to add a broadcast FDB entry on the CPU port so that the
+ * broadcast frames received on the user port can be forwarded to
+ * the CPU port.
+ */
+ return netc_port_set_fdb_entry(NETC_PORT(priv->ds, cpu_dp->index),
+ bcast, NETC_STANDALONE_PVID);
+}
+
static int netc_setup(struct dsa_switch *ds)
{
struct netc_switch *priv = ds->priv;
@@ -393,6 +627,8 @@ static int netc_setup(struct dsa_switch *ds)
if (err)
return err;
+ netc_get_switch_capabilities(priv);
+
err = netc_init_all_ports(priv);
if (err)
return err;
@@ -401,19 +637,61 @@ static int netc_setup(struct dsa_switch *ds)
if (err)
return err;
+ INIT_HLIST_HEAD(&priv->fdb_list);
+ mutex_init(&priv->fdbt_lock);
+
netc_switch_fixed_config(priv);
/* default setting for ports */
dsa_switch_for_each_available_port(dp, ds)
netc_port_default_config(priv->ports[dp->index]);
+ err = netc_add_standalone_vlan_entry(priv);
+ if (err)
+ goto free_lock_and_ntmp_user;
+
+ err = netc_add_standalone_fdb_bcast_entry(priv);
+ if (err)
+ goto free_lock_and_ntmp_user;
+
return 0;
+
+free_lock_and_ntmp_user:
+ mutex_destroy(&priv->fdbt_lock);
+ netc_free_ntmp_user(priv);
+
+ return err;
+}
+
+static void netc_destroy_all_lists(struct netc_switch *priv)
+{
+ netc_destroy_fdb_list(priv);
+ mutex_destroy(&priv->fdbt_lock);
+}
+
+static void netc_free_host_flood_rules(struct netc_switch *priv)
+{
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_user_port(dp, priv->ds) {
+ struct netc_port *np = priv->ports[dp->index];
+
+ /* No need to clear the hardware IPFT entry. Because PCIe
+ * FLR will be performed when the switch is re-registered,
+ * it will reset hardware state. So only need to free the
+ * memory to avoid memory leak.
+ */
+ kfree(np->host_flood);
+ np->host_flood = NULL;
+ }
}
static void netc_teardown(struct dsa_switch *ds)
{
struct netc_switch *priv = ds->priv;
+ netc_destroy_all_lists(priv);
+ netc_free_host_flood_rules(priv);
netc_free_ntmp_user(priv);
}
@@ -542,6 +820,289 @@ static void netc_switch_get_ip_revision(struct netc_switch *priv)
priv->revision = FIELD_GET(IPBRR0_IP_REV, val);
}
+static int netc_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct netc_port *np = NETC_PORT(ds, port);
+ int err;
+
+ if (np->enable)
+ return 0;
+
+ err = clk_prepare_enable(np->ref_clk);
+ if (err) {
+ dev_err(ds->dev,
+ "Failed to enable enet_ref_clk of port %d\n", port);
+ return err;
+ }
+
+ np->enable = true;
+
+ return 0;
+}
+
+static void netc_port_disable(struct dsa_switch *ds, int port)
+{
+ struct netc_port *np = NETC_PORT(ds, port);
+
+ /* When .port_disable() is called, .port_enable() may not have been
+ * called. In this case, both the prepare_count and enable_count of
+ * clock are 0. Calling clk_disable_unprepare() at this time will
+ * cause warnings.
+ */
+ if (!np->enable)
+ return;
+
+ clk_disable_unprepare(np->ref_clk);
+ np->enable = false;
+}
+
+static void netc_port_stp_state_set(struct dsa_switch *ds,
+ int port, u8 state)
+{
+ struct netc_port *np = NETC_PORT(ds, port);
+ u32 val;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ val = NETC_STG_STATE_DISABLED;
+ break;
+ case BR_STATE_LEARNING:
+ val = NETC_STG_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = NETC_STG_STATE_FORWARDING;
+ break;
+ default:
+ return;
+ }
+
+ netc_port_wr(np, NETC_BPSTGSR, val);
+}
+
+static int netc_port_change_mtu(struct dsa_switch *ds,
+ int port, int mtu)
+{
+ u32 max_frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ struct netc_port *np = NETC_PORT(ds, port);
+
+ /* dsa_user_change_mtu() does not add the switch tag overhead
+ * for the CPU port, so we need to add this overhead for the
+ * CPU port here.
+ */
+ if (dsa_is_cpu_port(ds, port)) {
+ max_frame_size += NETC_TAG_MAX_LEN;
+ if (max_frame_size > NETC_MAX_FRAME_LEN)
+ max_frame_size = NETC_MAX_FRAME_LEN;
+ }
+
+ netc_port_set_max_frame_size(np, max_frame_size);
+
+ return 0;
+}
+
+static int netc_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return NETC_MAX_FRAME_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int netc_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct netc_port *np = NETC_PORT(ds, port);
+
+ /* Currently, only support standalone port mode, so only
+ * NETC_STANDALONE_PVID (= 0) is supported here.
+ */
+ if (vid != NETC_STANDALONE_PVID)
+ return -EOPNOTSUPP;
+
+ return netc_port_set_fdb_entry(np, addr, vid);
+}
+
+static int netc_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct netc_port *np = NETC_PORT(ds, port);
+
+ if (vid != NETC_STANDALONE_PVID)
+ return -EOPNOTSUPP;
+
+ return netc_port_del_fdb_entry(np, addr, vid);
+}
+
+static int netc_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct netc_switch *priv = ds->priv;
+ u32 resume_eid = NTMP_NULL_ENTRY_ID;
+ struct fdbt_entry_data *entry;
+ struct fdbt_keye_data *keye;
+ struct fdbt_cfge_data *cfge;
+ u32 cfg, cnt = 0;
+ bool is_static;
+ int err;
+ u16 vid;
+
+ entry = kmalloc_obj(*entry);
+ if (!entry)
+ return -ENOMEM;
+
+ keye = &entry->keye;
+ cfge = &entry->cfge;
+ mutex_lock(&priv->fdbt_lock);
+
+ do {
+ memset(entry, 0, sizeof(*entry));
+ err = ntmp_fdbt_search_port_entry(&priv->ntmp, port,
+ &resume_eid, entry);
+ if (err || entry->entry_id == NTMP_NULL_ENTRY_ID)
+ break;
+
+ cfg = le32_to_cpu(cfge->cfg);
+ is_static = (cfg & FDBT_DYNAMIC) ? false : true;
+ vid = le16_to_cpu(keye->fid);
+
+ err = cb(keye->mac_addr, vid, is_static, data);
+ if (err)
+ break;
+
+ /* To prevent hardware malfunctions from causing an
+ * infinite loop.
+ */
+ if (++cnt >= priv->num_fdb_gmac)
+ break;
+ } while (resume_eid != NTMP_NULL_ENTRY_ID);
+
+ mutex_unlock(&priv->fdbt_lock);
+ kfree(entry);
+
+ return err;
+}
+
+static int netc_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ return netc_port_fdb_add(ds, port, mdb->addr, mdb->vid, db);
+}
+
+static int netc_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ return netc_port_fdb_del(ds, port, mdb->addr, mdb->vid, db);
+}
+
+static int netc_port_add_host_flood_rule(struct netc_port *np,
+ bool uc, bool mc)
+{
+ const u8 dmac_mask[ETH_ALEN] = {0x1, 0, 0, 0, 0, 0};
+ struct netc_switch *priv = np->switch_priv;
+ struct ipft_entry_data *host_flood;
+ struct ipft_keye_data *keye;
+ struct ipft_cfge_data *cfge;
+ u16 src_port;
+ u32 cfg;
+ int err;
+
+ if (!uc && !mc) {
+ /* Disable ingress port filter table lookup */
+ netc_port_wr(np, NETC_PIPFCR, 0);
+ np->uc = false;
+ np->mc = false;
+
+ return 0;
+ }
+
+ host_flood = kzalloc_obj(*host_flood);
+ if (!host_flood)
+ return -ENOMEM;
+
+ keye = &host_flood->keye;
+ cfge = &host_flood->cfge;
+
+ src_port = FIELD_PREP(IPFT_SRC_PORT, np->dp->index);
+ src_port |= IPFT_SRC_PORT_MASK;
+ keye->src_port = cpu_to_le16(src_port);
+
+ /* If either only unicast or only multicast need to be flooded
+ * to the host, we always set the mask that tests the first MAC
+ * DA octet. The value should be 0 for the first bit (if unicast
+ * has to be flooded) or 1 (if multicast). If both unicast and
+ * multicast have to be flooded, we leave the key mask empty, so
+ * it matches everything.
+ */
+ if (uc && !mc)
+ ether_addr_copy(keye->dmac_mask, dmac_mask);
+
+ if (!uc && mc) {
+ ether_addr_copy(keye->dmac, dmac_mask);
+ ether_addr_copy(keye->dmac_mask, dmac_mask);
+ }
+
+ cfg = FIELD_PREP(IPFT_FLTFA, IPFT_FLTFA_REDIRECT);
+ cfg |= FIELD_PREP(IPFT_HR, NETC_HR_HOST_FLOOD);
+ cfge->cfg = cpu_to_le32(cfg);
+
+ err = ntmp_ipft_add_entry(&priv->ntmp, host_flood);
+ if (err) {
+ kfree(host_flood);
+ return err;
+ }
+
+ np->uc = uc;
+ np->mc = mc;
+ np->host_flood = host_flood;
+ /* Enable ingress port filter table lookup */
+ netc_port_wr(np, NETC_PIPFCR, PIPFCR_EN);
+
+ return 0;
+}
+
+static void netc_port_remove_host_flood(struct netc_port *np,
+ struct ipft_entry_data *host_flood)
+{
+ struct netc_switch *priv = np->switch_priv;
+
+ if (!host_flood)
+ return;
+
+ ntmp_ipft_delete_entry(&priv->ntmp, host_flood->entry_id);
+ kfree(host_flood);
+}
+
+static void netc_port_set_host_flood(struct dsa_switch *ds, int port,
+ bool uc, bool mc)
+{
+ struct netc_port *np = NETC_PORT(ds, port);
+ struct ipft_entry_data *old_host_flood;
+
+ if (np->uc == uc && np->mc == mc)
+ return;
+
+ /* IPFT does not support in-place updates to the KEYE element,
+ * we need to add a new entry and then delete the old one. So
+ * save the old entry first.
+ */
+ old_host_flood = np->host_flood;
+ np->host_flood = NULL;
+
+ if (netc_port_add_host_flood_rule(np, uc, mc)) {
+ np->host_flood = old_host_flood;
+ dev_err(ds->dev, "Failed to add host flood rule on port %d\n",
+ port);
+ return;
+ }
+
+ /* Remove the old host flood entry */
+ netc_port_remove_host_flood(np, old_host_flood);
+}
+
static void netc_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -768,6 +1329,17 @@ static const struct dsa_switch_ops netc_switch_ops = {
.setup = netc_setup,
.teardown = netc_teardown,
.phylink_get_caps = netc_phylink_get_caps,
+ .port_enable = netc_port_enable,
+ .port_disable = netc_port_disable,
+ .port_stp_state_set = netc_port_stp_state_set,
+ .port_change_mtu = netc_port_change_mtu,
+ .port_max_mtu = netc_port_max_mtu,
+ .port_fdb_add = netc_port_fdb_add,
+ .port_fdb_del = netc_port_fdb_del,
+ .port_fdb_dump = netc_port_fdb_dump,
+ .port_mdb_add = netc_port_mdb_add,
+ .port_mdb_del = netc_port_mdb_del,
+ .port_set_host_flood = netc_port_set_host_flood,
};
static int netc_switch_probe(struct pci_dev *pdev,
@@ -807,6 +1379,7 @@ static int netc_switch_probe(struct pci_dev *pdev,
ds->num_tx_queues = NETC_TC_NUM;
ds->ops = &netc_switch_ops;
ds->phylink_mac_ops = &netc_phylink_mac_ops;
+ ds->fdb_isolation = true;
ds->priv = priv;
priv->ds = ds;
diff --git a/drivers/net/dsa/netc/netc_switch.h b/drivers/net/dsa/netc/netc_switch.h
index eb65c36ecead..3efe37fca390 100644
--- a/drivers/net/dsa/netc/netc_switch.h
+++ b/drivers/net/dsa/netc/netc_switch.h
@@ -30,6 +30,8 @@
#define NETC_MAX_FRAME_LEN 9600
+#define NETC_STANDALONE_PVID 0
+
struct netc_switch;
struct netc_switch_info {
@@ -43,6 +45,11 @@ struct netc_port_caps {
u32 pseudo_link:1;
};
+enum netc_host_reason {
+ /* Software defined host reasons */
+ NETC_HR_HOST_FLOOD = 8,
+};
+
struct netc_port {
void __iomem *iobase;
struct netc_switch *switch_priv;
@@ -50,6 +57,11 @@ struct netc_port {
struct dsa_port *dp;
struct clk *ref_clk; /* RGMII/RMII reference clock */
struct mii_bus *emdio;
+
+ u16 enable:1;
+ u16 uc:1;
+ u16 mc:1;
+ struct ipft_entry_data *host_flood;
};
struct netc_switch_regs {
@@ -58,6 +70,13 @@ struct netc_switch_regs {
void __iomem *global;
};
+struct netc_fdb_entry {
+ u32 entry_id;
+ struct fdbt_cfge_data cfge;
+ struct fdbt_keye_data keye;
+ struct hlist_node node;
+};
+
struct netc_switch {
struct pci_dev *pdev;
struct device *dev;
@@ -69,6 +88,11 @@ struct netc_switch {
struct netc_port **ports;
struct ntmp_user ntmp;
+ struct hlist_head fdb_list;
+ struct mutex fdbt_lock; /* FDB table lock */
+
+ /* Switch hardware capabilities */
+ u32 num_fdb_gmac;
};
#define NETC_PRIV(ds) ((struct netc_switch *)((ds)->priv))
@@ -91,6 +115,18 @@ static inline bool is_netc_pseudo_port(struct netc_port *np)
return np->caps.pseudo_link;
}
+static inline void netc_add_fdb_entry(struct netc_switch *priv,
+ struct netc_fdb_entry *entry)
+{
+ hlist_add_head(&entry->node, &priv->fdb_list);
+}
+
+static inline void netc_del_fdb_entry(struct netc_fdb_entry *entry)
+{
+ hlist_del(&entry->node);
+ kfree(entry);
+}
+
int netc_switch_platform_probe(struct netc_switch *priv);
#endif
diff --git a/drivers/net/dsa/netc/netc_switch_hw.h b/drivers/net/dsa/netc/netc_switch_hw.h
index 7d9afb493053..b04e9866d72a 100644
--- a/drivers/net/dsa/netc/netc_switch_hw.h
+++ b/drivers/net/dsa/netc/netc_switch_hw.h
@@ -36,6 +36,9 @@
#define VFHTDECR2_MLO GENMASK(26, 24)
#define VFHTDECR2_MFO GENMASK(28, 27)
+#define NETC_FDBHTCAPR 0x2020
+#define FDBHTCAPR_NUM_GMAC GENMASK(8, 0)
+
/* Definition of Switch port registers */
#define NETC_PCAPR 0x0000
#define PCAPR_LINK_TYPE BIT(4)
@@ -67,6 +70,9 @@
#define PQOSMR_VQMP GENMASK(19, 16)
#define PQOSMR_QVMP GENMASK(23, 20)
+#define NETC_PIPFCR 0x0084
+#define PIPFCR_EN BIT(0)
+
#define NETC_POR 0x100
#define POR_TXDIS BIT(0)
#define POR_RXDIS BIT(1)
@@ -122,6 +128,14 @@ enum netc_mfo {
#define BPDVR_RXVAM BIT(24)
#define BPDVR_TXTAGA GENMASK(26, 25)
+#define NETC_BPSTGSR 0x520
+
+enum netc_stg_stage {
+ NETC_STG_STATE_DISABLED = 0,
+ NETC_STG_STATE_LEARNING,
+ NETC_STG_STATE_FORWARDING,
+};
+
/* Definition of Switch ethernet MAC port registers */
#define NETC_PMAC_OFFSET 0x400
#define NETC_PM_CMD_CFG(a) (0x1008 + (a) * 0x400)
--
2.34.1
next prev parent reply other threads:[~2026-04-30 2:48 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-30 2:49 [PATCH v5 net-next 00/15] Add preliminary NETC switch support for i.MX94 Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 01/15] dt-bindings: net: dsa: update the description of 'dsa,member' property Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 02/15] dt-bindings: net: dsa: add NETC switch Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 03/15] net: enetc: add pre-boot initialization for i.MX94 switch Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 04/15] net: enetc: add basic operations to the FDB table Wei Fang
2026-05-05 8:59 ` Paolo Abeni
2026-05-06 6:37 ` Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 05/15] net: enetc: add support for the "Add" operation to VLAN filter table Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 06/15] net: enetc: add support for the "Update" operation to buffer pool table Wei Fang
2026-05-06 7:21 ` Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 07/15] net: enetc: add support for "Add" and "Delete" operations to IPFT Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 08/15] net: enetc: add multiple command BD rings support Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 09/15] net: dsa: add NETC switch tag support Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 10/15] net: dsa: netc: introduce NXP NETC switch driver for i.MX94 Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 11/15] net: dsa: netc: add phylink MAC operations Wei Fang
2026-04-30 2:49 ` Wei Fang [this message]
2026-04-30 2:49 ` [PATCH v5 net-next 13/15] net: dsa: netc: initialize buffer pool table and implement flow-control Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 14/15] net: dsa: netc: add support for the standardized counters Wei Fang
2026-04-30 2:49 ` [PATCH v5 net-next 15/15] net: dsa: netc: add support for ethtool private statistics Wei Fang
2026-05-05 9:43 ` Paolo Abeni
2026-05-06 7:06 ` Wei Fang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260430024945.3413973-13-wei.fang@nxp.com \
--to=wei.fang@nxp.com \
--cc=andrew+netdev@lunn.ch \
--cc=chleroy@kernel.org \
--cc=claudiu.manoil@nxp.com \
--cc=conor+dt@kernel.org \
--cc=davem@davemloft.net \
--cc=devicetree@vger.kernel.org \
--cc=edumazet@google.com \
--cc=f.fainelli@gmail.com \
--cc=frank.li@nxp.com \
--cc=horms@kernel.org \
--cc=imx@lists.linux.dev \
--cc=krzk+dt@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=robh@kernel.org \
--cc=vladimir.oltean@nxp.com \
--cc=xiaoning.wang@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox