* [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers
[not found] <20240604155850.51983-1-mengyuanlou@net-swift.com>
@ 2024-06-04 15:57 ` Mengyuan Lou
2024-06-05 5:03 ` Przemek Kitszel
2024-06-05 7:31 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 2/6] net: libwx: Add sriov api for wangxun nics Mengyuan Lou
` (4 subsequent siblings)
5 siblings, 2 replies; 14+ messages in thread
From: Mengyuan Lou @ 2024-06-04 15:57 UTC (permalink / raw)
To: netdev; +Cc: jiawenwu, duanqiangwen, Mengyuan Lou
Implements the mailbox interfaces for wangxun pf drivers
ngbe and txgbe.
Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
---
drivers/net/ethernet/wangxun/libwx/Makefile | 2 +-
drivers/net/ethernet/wangxun/libwx/wx_mbx.c | 189 +++++++++++++++++++
drivers/net/ethernet/wangxun/libwx/wx_mbx.h | 32 ++++
drivers/net/ethernet/wangxun/libwx/wx_type.h | 5 +
4 files changed, 227 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_mbx.c
create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_mbx.h
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 42ccd6e4052e..913a978c9032 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_mbx.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
new file mode 100644
index 000000000000..e7d7178a1f13
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+#include <linux/pci.h>
+#include "wx_type.h"
+#include "wx_mbx.h"
+
+/**
+ * wx_obtain_mbx_lock_pf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * return: return SUCCESS if we obtained the mailbox lock
+ **/
+static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf)
+{
+ int ret = -EBUSY;
+ int count = 5;
+ u32 mailbox;
+
+ while (count--) {
+ /* Take ownership of the buffer */
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ mailbox = rd32(wx, WX_PXMAILBOX(vf));
+ if (mailbox & WX_PXMAILBOX_PFU) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (ret)
+ wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf);
+
+ return ret;
+}
+
+static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index)
+{
+ u32 mbvficr = rd32(wx, WX_MBVFICR(index));
+ int ret = -EBUSY;
+
+ if (mbvficr & mask) {
+ ret = 0;
+ wr32(wx, WX_MBVFICR(index), mask);
+ }
+
+ return ret;
+}
+
+/**
+ * wx_check_for_ack_pf - checks to see if the VF has ACKed
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
+ **/
+int wx_check_for_ack_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFACK_MASK, BIT(vf_bit)),
+ index);
+}
+EXPORT_SYMBOL(wx_check_for_ack_pf);
+
+/**
+ * wx_check_for_msg_pf - checks to see if the VF has sent mail
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
+ **/
+int wx_check_for_msg_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFREQ_MASK, BIT(vf_bit)),
+ index);
+}
+EXPORT_SYMBOL(wx_check_for_msg_pf);
+
+/**
+ * wx_write_mbx_pf - Places a message in the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * return: return SUCCESS if it successfully copied message into the buffer
+ **/
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ ret = -EINVAL;
+ goto out_no_write;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_pf(wx, vf);
+ wx_check_for_ack_pf(wx, vf);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_PXMBMEM(vf), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS);
+
+out_no_write:
+ return ret;
+}
+EXPORT_SYMBOL(wx_write_mbx_pf);
+
+/**
+ * wx_read_mbx_pf - Read a message from the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * return: return SUCCESS if VF copy a message from the mailbox buffer.
+ **/
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret;
+ u16 i;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_PXMBMEM(vf), i);
+
+ /* Acknowledge the message and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK);
+out_no_read:
+ return ret;
+}
+EXPORT_SYMBOL(wx_read_mbx_pf);
+
+/**
+ * wx_check_for_rst_pf - checks to see if the VF has reset
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
+ **/
+int wx_check_for_rst_pf(struct wx *wx, u16 vf)
+{
+ u32 reg_offset = vf / 32;
+ u32 vf_shift = vf % 32;
+ int ret = -EBUSY;
+ u32 vflre = 0;
+
+ vflre = rd32(wx, WX_VFLRE(reg_offset));
+
+ if (vflre & BIT(vf_shift)) {
+ ret = 0;
+ wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(wx_check_for_rst_pf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
new file mode 100644
index 000000000000..1579096fb6ad
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+#ifndef _WX_MBX_H_
+#define _WX_MBX_H_
+
+#define WX_VXMAILBOX_SIZE 15
+
+/* PF Registers */
+#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */
+#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */
+#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
+#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+
+#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
+
+#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
+#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */
+
+/* SR-IOV specific macros */
+#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */
+#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
+#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
+
+#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
+
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
+
+#endif /* _WX_MBX_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 5aaf7b1fa2db..caa2f4157834 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -674,6 +674,10 @@ struct wx_bus_info {
u16 device;
};
+struct wx_mbx_info {
+ u16 size;
+};
+
struct wx_thermal_sensor_data {
s16 temp;
s16 alarm_thresh;
@@ -995,6 +999,7 @@ struct wx {
struct pci_dev *pdev;
struct net_device *netdev;
struct wx_bus_info bus;
+ struct wx_mbx_info mbx;
struct wx_mac_info mac;
enum em_mac_type mac_type;
enum sp_media_type media_type;
--
2.44.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH net-next v4 2/6] net: libwx: Add sriov api for wangxun nics
[not found] <20240604155850.51983-1-mengyuanlou@net-swift.com>
2024-06-04 15:57 ` [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers Mengyuan Lou
@ 2024-06-04 15:57 ` Mengyuan Lou
2024-06-05 7:42 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 3/6] net: libwx: Redesign flow when sriov is enabled Mengyuan Lou
` (3 subsequent siblings)
5 siblings, 1 reply; 14+ messages in thread
From: Mengyuan Lou @ 2024-06-04 15:57 UTC (permalink / raw)
To: netdev; +Cc: jiawenwu, duanqiangwen, Mengyuan Lou
Implement sriov_configure interface for wangxun nics in libwx.
Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
---
drivers/net/ethernet/wangxun/libwx/Makefile | 2 +-
drivers/net/ethernet/wangxun/libwx/wx_mbx.h | 4 +
drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 221 ++++++++++++++++++
drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 10 +
drivers/net/ethernet/wangxun/libwx/wx_type.h | 38 +++
5 files changed, 274 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_sriov.c
create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_sriov.h
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 913a978c9032..5b996d973d29 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_mbx.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_mbx.o wx_sriov.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
index 1579096fb6ad..3c70654a8b14 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -23,6 +23,10 @@
#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
+enum wxvf_xcast_modes {
+ WXVF_XCAST_MODE_NONE = 0,
+};
+
int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
new file mode 100644
index 000000000000..032b75f23460
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_mbx.h"
+#include "wx_sriov.h"
+
+static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
+{
+ unsigned int vfn = (event_mask & GENMASK(5, 0));
+ struct wx *wx = pci_get_drvdata(pdev);
+
+ bool enable = ((event_mask & BIT(31)) != 0);
+
+ if (enable)
+ eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
+}
+
+static void wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
+{
+ struct vf_macvlans *mv_list;
+ int num_vf_macvlans, i;
+
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&wx->vf_mvs.l);
+
+ num_vf_macvlans = wx->mac.num_rar_entries -
+ (WX_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return;
+
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (mv_list) {
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].l, &wx->vf_mvs.l);
+ }
+ wx->mv_list = mv_list;
+ }
+}
+
+static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
+{
+ u32 value = 0;
+ int i;
+
+ set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ wx_err(wx, "SR-IOV enabled with %d VFs\n", num_vfs);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+ if (!wx->ring_feature[RING_F_VMDQ].limit)
+ wx->ring_feature[RING_F_VMDQ].limit = 1;
+ wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
+
+ wx_alloc_vf_macvlans(wx, num_vfs);
+ /* Initialize default switching mode VEB */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
+
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ /* enable spoof checking for all VFs */
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ wx->vfinfo[i].spoofchk_enabled = true;
+ wx->vfinfo[i].link_enable = true;
+ /* Untrust all VFs */
+ wx->vfinfo[i].trusted = false;
+ /* set the default xcast mode */
+ wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
+ }
+
+ if (wx->mac.type == wx_mac_sp) {
+ if (num_vfs < 32)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ } else {
+ value = WX_CFG_PORT_CTL_NUM_VT_8;
+ }
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK,
+ value);
+
+ return 0;
+}
+
+static void wx_sriov_reinit(struct wx *wx)
+{
+ rtnl_lock();
+ wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
+ rtnl_unlock();
+}
+
+int wx_disable_sriov(struct wx *wx)
+{
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(wx->pdev)) {
+ wx_err(wx, "Unloading driver while VFs are assigned.\n");
+ return -EPERM;
+ }
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(wx->pdev);
+
+ /* set num VFs to 0 to prevent access to vfinfo */
+ wx->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(wx->vfinfo);
+ wx->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(wx->mv_list);
+ wx->mv_list = NULL;
+
+ /* set default pool back to 0 */
+ wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
+ wx->ring_feature[RING_F_VMDQ].offset = 0;
+
+ clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ /* Disable VMDq flag so device will be set in VM mode */
+ if (wx->ring_feature[RING_F_VMDQ].limit == 1)
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_disable_sriov);
+
+static int wx_pci_sriov_enable(struct pci_dev *dev,
+ int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+ int err = 0, i;
+
+ err = __wx_enable_sriov(wx, num_vfs);
+ if (err)
+ goto err_out;
+
+ wx->num_vfs = num_vfs;
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_vf_configuration(dev, (i | BIT(31)));
+
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ wx_sriov_reinit(wx);
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+
+ return num_vfs;
+err_out:
+ return err;
+}
+
+static int wx_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+ int err;
+
+ err = wx_disable_sriov(wx);
+
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ if (!err)
+ wx_sriov_reinit(wx);
+
+ return err;
+}
+
+static int wx_check_sriov_allowed(struct wx *wx, int num_vfs)
+{
+ u16 max_vfs;
+
+ max_vfs = (wx->mac.type == wx_mac_sp) ? 63 : 7;
+
+ if (num_vfs > max_vfs)
+ return -EPERM;
+
+ return 0;
+}
+
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ int err;
+
+ err = wx_check_sriov_allowed(wx, num_vfs);
+ if (err)
+ return err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ wx_pci_sriov_disable(pdev);
+ return 0;
+ }
+
+ wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = wx_pci_sriov_enable(pdev, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
+}
+EXPORT_SYMBOL(wx_pci_sriov_configure);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
new file mode 100644
index 000000000000..17b547ae8862
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_SRIOV_H_
+#define _WX_SRIOV_H_
+
+int wx_disable_sriov(struct wx *wx);
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
+
+#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index caa2f4157834..7dad022e01e9 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -18,6 +18,7 @@
/* MSI-X capability fields masks */
#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
#define WX_PCI_LINK_STATUS 0xB2
+#define WX_MAX_PF_MACVLANS 15
/**************** Global Registers ****************************/
/* chip control Registers */
@@ -88,6 +89,9 @@
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
+#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
+#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
+#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
/* GPIO Registers */
#define WX_GPIO_DR 0x14800
@@ -161,6 +165,7 @@
/******************************* PSR Registers *******************************/
/* psr control */
#define WX_PSR_CTL 0x15000
+#define WX_PSR_VM_CTL 0x151B0
/* Header split receive */
#define WX_PSR_CTL_SW_EN BIT(18)
#define WX_PSR_CTL_RSC_ACK BIT(17)
@@ -181,6 +186,7 @@
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
/* VM L2 contorl */
#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
@@ -943,6 +949,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum {
RING_F_NONE = 0,
RING_F_RSS,
+ RING_F_VMDQ,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
@@ -990,9 +997,34 @@ enum wx_state {
WX_STATE_RESETTING,
WX_STATE_NBITS, /* must be last */
};
+
+struct vf_data_storage {
+ struct pci_dev *vfdev;
+ unsigned char vf_mac_addr[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool link_enable;
+ bool trusted;
+ int xcast_mode;
+};
+
+struct vf_macvlans {
+ struct list_head l;
+ int vf;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
+};
+
+enum wx_pf_flags {
+ WX_FLAG_VMDQ_ENABLED,
+ WX_FLAG_SRIOV_ENABLED,
+ WX_PF_FLAGS_NBITS /* must be last */
+};
+
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_BITMAP(state, WX_STATE_NBITS);
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
void *priv;
u8 __iomem *hw_addr;
@@ -1082,6 +1114,12 @@ struct wx {
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+
+ int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
};
--
2.44.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH net-next v4 3/6] net: libwx: Redesign flow when sriov is enabled
[not found] <20240604155850.51983-1-mengyuanlou@net-swift.com>
2024-06-04 15:57 ` [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers Mengyuan Lou
2024-06-04 15:57 ` [PATCH net-next v4 2/6] net: libwx: Add sriov api for wangxun nics Mengyuan Lou
@ 2024-06-04 15:57 ` Mengyuan Lou
2024-06-05 8:54 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 4/6] net: libwx: Add msg task func Mengyuan Lou
` (2 subsequent siblings)
5 siblings, 1 reply; 14+ messages in thread
From: Mengyuan Lou @ 2024-06-04 15:57 UTC (permalink / raw)
To: netdev; +Cc: jiawenwu, duanqiangwen, Mengyuan Lou
Reallocate queue and int resources when sriov is enabled.
Redefine macro VMDQ to make it work in VT mode.
Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
---
drivers/net/ethernet/wangxun/libwx/wx_hw.c | 293 ++++++++++++++++++-
drivers/net/ethernet/wangxun/libwx/wx_lib.c | 129 +++++++-
drivers/net/ethernet/wangxun/libwx/wx_type.h | 37 ++-
3 files changed, 442 insertions(+), 17 deletions(-)
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 7c4b6881a93f..8affcb9f7dbb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -10,6 +10,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_sriov.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
@@ -804,11 +805,28 @@ static void wx_sync_mac_table(struct wx *wx)
}
}
+static void wx_full_sync_mac_table(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ wx_set_rar(wx, i,
+ wx->mac_table[i].addr,
+ wx->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wx, i);
+ }
+ wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
+ }
+}
+
/* this function destroys the first RAR entry */
void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
{
memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
- wx->mac_table[0].pools = 1ULL;
+ wx->mac_table[0].pools = BIT(VMDQ_P(0));
wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
wx_set_rar(wx, 0, wx->mac_table[0].addr,
wx->mac_table[0].pools,
@@ -1046,6 +1064,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
wx_dbg(wx, "Update mc addr list Complete\n");
}
+static void wx_restore_vf_multicasts(struct wx *wx)
+{
+ u32 i, j, vector_bit, vector_reg;
+ struct vf_data_storage *vfinfo;
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i));
+
+ vfinfo = &wx->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ wx->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & GENMASK(6, 0);
+ vector_bit = vfinfo->vf_mc_hashes[j] & GENMASK(4, 0);
+ wr32m(wx, WX_PSR_MC_TBL(vector_reg),
+ BIT(vector_bit), BIT(vector_bit));
+ /* errata 5: maintain a copy of the reg table conf */
+ wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
+ }
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(i), vmolr);
+ }
+
+ /* Restore any VF macvlans */
+ wx_full_sync_mac_table(wx);
+}
+
/**
* wx_write_mc_addr_list - write multicast addresses to MTA
* @netdev: network interface device structure
@@ -1063,6 +1110,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev)
wx_update_mc_addr_list(wx, netdev);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ wx_restore_vf_multicasts(wx);
+
return netdev_mc_count(netdev);
}
@@ -1083,7 +1133,7 @@ int wx_set_mac(struct net_device *netdev, void *p)
if (retval)
return retval;
- wx_del_mac_filter(wx, wx->mac.addr, 0);
+ wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0));
eth_hw_addr_set(netdev, addr->sa_data);
memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
@@ -1178,6 +1228,10 @@ static int wx_hpbthresh(struct wx *wx)
/* Calculate delay value for device */
dv_id = WX_DV(link, tc);
+ /* Loopback switch introduces additional latency */
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ dv_id += WX_B2BT(tc);
+
/* Delay value is calculated in bit times convert to KB */
kb = WX_BT2KB(dv_id);
rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
@@ -1233,12 +1287,106 @@ static void wx_pbthresh_setup(struct wx *wx)
wx->fc.low_water = 0;
}
+static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 pfvfspoof, reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset));
+ if (enable)
+ pfvfspoof |= BIT(vf_shift);
+ else
+ pfvfspoof &= ~BIT(vf_shift);
+ wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
+}
+
+static int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ u32 index = vf / 32, vf_bit = vf % 32;
+ struct wx *wx = netdev_priv(netdev);
+ u32 regval;
+
+ if (vf >= wx->num_vfs)
+ return -EINVAL;
+
+ wx->vfinfo[vf].spoofchk_enabled = setting;
+
+ regval = (setting << vf_bit);
+ wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval);
+
+ if (wx->vfinfo[vf].vlan_count)
+ wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval);
+
+ return 0;
+}
+
+static void wx_configure_virtualization(struct wx *wx)
+{
+ u16 pool = wx->num_rx_pools;
+ u32 reg_offset, vf_shift;
+ u32 i;
+
+ if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return;
+
+ wr32m(wx, WX_PSR_VM_CTL,
+ WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN,
+ FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) |
+ WX_PSR_VM_CTL_REPLEN);
+ while (pool--)
+ wr32m(wx, WX_PSR_VM_L2CTL(pool), WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
+
+ if (wx->mac.type == wx_mac_sp) {
+ vf_shift = VMDQ_P(0) % 32;
+ reg_offset = VMDQ_P(0) / 32;
+
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1);
+ wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1);
+ } else {
+ vf_shift = BIT(VMDQ_P(0));
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(0), vf_shift);
+ wr32(wx, WX_TDM_VF_TE(0), vf_shift);
+ }
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ clear_bit(WX_FLAG2_VLAN_PROMISC, wx->flags);
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (!wx->vfinfo[i].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, i, false);
+ /* enable ethertype anti spoofing if hw supports it */
+ wx_set_ethertype_anti_spoofing(wx, true, i);
+ }
+}
+
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
- value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ if (wx->mac.type == wx_mac_em) {
+ value = (wx->num_vfs == 0) ?
+ WX_CFG_PORT_CTL_NUM_VT_NONE :
+ WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ if (wx->ring_feature[RING_F_RSS].indices == 4)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ } else {
+ value = 0;
+ }
+ }
+
+ value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK |
WX_CFG_PORT_CTL_D_VLAN |
WX_CFG_PORT_CTL_QINQ,
value);
@@ -1297,6 +1445,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable)
}
}
+static void wx_vlan_promisc_enable(struct wx *wx)
+{
+ u32 vlnctrl, i, vind, bits, reg_idx;
+
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ /* we need to keep the VLAN filter on in SRIOV */
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ } else {
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ return;
+ }
+ /* We are already in VLAN promisc, nothing to do */
+ if (test_bit(WX_FLAG2_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ set_bit(WX_FLAG2_VLAN_PROMISC, wx->flags);
+ /* Add PF to all active pools */
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ reg_idx = VMDQ_P(0) / 32;
+ vind = VMDQ_P(0) % 32;
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits |= BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* Set all bits in the VLAN filter table array */
+ for (i = 0; i < wx->mac.vft_size; i++)
+ wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX);
+}
+
+static void wx_scrub_vfta(struct wx *wx)
+{
+ u32 i, vid, bits, vfta, vind, vlvf, reg_idx;
+
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX);
+ /* pull VLAN ID from VLVF */
+ vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN;
+ if (vlvf & WX_PSR_VLAN_SWC_VIEN) {
+ /* if PF is part of this then continue */
+ if (test_bit(vid, wx->active_vlans))
+ continue;
+ }
+ /* remove PF from the pool */
+ reg_idx = VMDQ_P(0) / 32;
+ vind = VMDQ_P(0) % 32;
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits &= ~BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* extract values from vft_shadow and write back to VFTA */
+ for (i = 0; i < wx->mac.vft_size; i++) {
+ vfta = wx->mac.vft_shadow[i];
+ wr32(wx, WX_PSR_VLAN_TBL(i), vfta);
+ }
+}
+
+static void wx_vlan_promisc_disable(struct wx *wx)
+{
+ u32 vlnctrl;
+
+ /* configure vlan filtering */
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ /* We are not in VLAN promisc, nothing to do */
+ if (!test_bit(WX_FLAG2_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ clear_bit(WX_FLAG2_VLAN_PROMISC, wx->flags);
+ wx_scrub_vfta(wx);
+}
+
void wx_set_rx_mode(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -1309,7 +1534,7 @@ void wx_set_rx_mode(struct net_device *netdev)
/* Check for Promiscuous and All Multicast modes */
fctrl = rd32(wx, WX_PSR_CTL);
fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
- vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)));
vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
WX_PSR_VM_L2CTL_MPE |
WX_PSR_VM_L2CTL_ROPE |
@@ -1330,7 +1555,10 @@ void wx_set_rx_mode(struct net_device *netdev)
fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
/* pf don't want packets routing to vf, so clear UPE */
vmolr |= WX_PSR_VM_L2CTL_MPE;
- vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
if (netdev->flags & IFF_ALLMULTI) {
@@ -1353,7 +1581,7 @@ void wx_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = wx_write_uc_addr_list(netdev, 0);
+ count = wx_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
vmolr |= WX_PSR_VM_L2CTL_UPE;
@@ -1371,7 +1599,7 @@ void wx_set_rx_mode(struct net_device *netdev)
wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
wr32(wx, WX_PSR_CTL, fctrl);
- wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+ wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr);
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_STAG_RX))
@@ -1379,6 +1607,10 @@ void wx_set_rx_mode(struct net_device *netdev)
else
wx_vlan_strip_control(wx, false);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ wx_vlan_promisc_disable(wx);
+ else
+ wx_vlan_promisc_enable(wx);
}
EXPORT_SYMBOL(wx_set_rx_mode);
@@ -1621,6 +1853,13 @@ static void wx_setup_reta(struct wx *wx)
u32 random_key_size = WX_RSS_KEY_SIZE / 4;
u32 i, j;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
+ if (wx->mac.type == wx_mac_sp)
+ rss_i = rss_i < 4 ? 4 : rss_i;
+ else if (wx->mac.type == wx_mac_em)
+ rss_i = 1;
+ }
+
/* Fill out hash function seeds */
for (i = 0; i < random_key_size; i++)
wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
@@ -1638,10 +1877,40 @@ static void wx_setup_reta(struct wx *wx)
wx_store_reta(wx);
}
+static void wx_setup_psrtype(struct wx *wx)
+{
+ int rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 psrtype;
+ int pool;
+
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_OUTL2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+
+ if (wx->mac.type == wx_mac_sp) {
+ if (rss_i > 3)
+ psrtype |= FIELD_PREP(GENMASK(31, 29), 2);
+ else if (rss_i > 1)
+ psrtype |= FIELD_PREP(GENMASK(31, 29), 1);
+
+ for_each_set_bit(pool, &wx->fwd_bitmask, 32)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ } else {
+ for_each_set_bit(pool, &wx->fwd_bitmask, 8)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ }
+}
+
static void wx_setup_mrqc(struct wx *wx)
{
u32 rss_field = 0;
+ /* VT, and RSS do not coexist at the same time */
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return;
+
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
@@ -1671,16 +1940,11 @@ static void wx_setup_mrqc(struct wx *wx)
**/
void wx_configure_rx(struct wx *wx)
{
- u32 psrtype, i;
int ret;
+ u32 i;
wx_disable_rx(wx);
-
- psrtype = WX_RDB_PL_CFG_L4HDR |
- WX_RDB_PL_CFG_L3HDR |
- WX_RDB_PL_CFG_L2HDR |
- WX_RDB_PL_CFG_TUN_TUNHDR;
- wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+ wx_setup_psrtype(wx);
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
@@ -1728,6 +1992,7 @@ void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
wx_pbthresh_setup(wx);
+ wx_configure_virtualization(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 68bde91b67a0..8e4c0e24a4a3 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1558,6 +1558,65 @@ void wx_napi_disable_all(struct wx *wx)
}
EXPORT_SYMBOL(wx_napi_disable_all);
+static bool wx_set_vmdq_queues(struct wx *wx)
+{
+ u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit;
+ u16 rss_i = wx->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = WX_RSS_DISABLED_MASK;
+ u16 vmdq_m = 0;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+ /* Add starting offset to total pool count */
+ vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
+
+ if (wx->mac.type == wx_mac_sp) {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 64, vmdq_i);
+
+ /* 64 pool mode with 2 queues per pool, or
+ * 16/32/64 pool mode with 1 queue per pool
+ */
+ if (vmdq_i > 32 || rss_i < 4) {
+ vmdq_m = WX_VMDQ_2Q_MASK;
+ rss_m = WX_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = WX_VMDQ_4Q_MASK;
+ rss_m = WX_RSS_4Q_MASK;
+ rss_i = 4;
+ }
+ } else {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 8, vmdq_i);
+
+ /* when VMDQ on, disable RSS */
+ rss_i = 1;
+ }
+
+ /* remove the starting offset from the pool count */
+ vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ wx->ring_feature[RING_F_RSS].indices = rss_i;
+ wx->ring_feature[RING_F_RSS].mask = rss_m;
+
+ wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
+ wx->num_rx_pools = vmdq_i;
+ wx->num_rx_queues_per_pool = rss_i;
+
+ wx->num_rx_queues = vmdq_i * rss_i;
+ wx->num_tx_queues = vmdq_i * rss_i;
+
+ return true;
+}
+
/**
* wx_set_rss_queues: Allocate queues for RSS
* @wx: board private structure to initialize
@@ -1574,6 +1633,11 @@ static void wx_set_rss_queues(struct wx *wx)
f = &wx->ring_feature[RING_F_RSS];
f->indices = f->limit;
+ if (wx->mac.type == wx_mac_sp)
+ f->mask = WX_RSS_64Q_MASK;
+ else
+ f->mask = WX_RSS_8Q_MASK;
+
wx->num_rx_queues = f->limit;
wx->num_tx_queues = f->limit;
}
@@ -1585,6 +1649,9 @@ static void wx_set_num_queues(struct wx *wx)
wx->num_tx_queues = 1;
wx->queues_per_pool = 1;
+ if (wx_set_vmdq_queues(wx))
+ return;
+
wx_set_rss_queues(wx);
}
@@ -1665,6 +1732,10 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM))
return ret;
+ /* Disable VMDq support */
+ dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n");
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+
/* Disable RSS */
dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
wx->ring_feature[RING_F_RSS].limit = 1;
@@ -1690,6 +1761,49 @@ static int wx_set_interrupt_capability(struct wx *wx)
return 0;
}
+static bool wx_cache_ring_vmdq(struct wx *wx)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS];
+ u16 reg_idx;
+ int i;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+
+ if (wx->mac.type == wx_mac_sp) {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->rx_ring[i]->reg_idx = reg_idx;
+ }
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->tx_ring[i]->reg_idx = reg_idx;
+ }
+ } else {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->rx_ring[i]->reg_idx = reg_idx + i;
+
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_tx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->tx_ring[i]->reg_idx = reg_idx + i;
+ }
+
+ return true;
+}
+
/**
* wx_cache_ring_rss - Descriptor ring to register mapping for RSS
* @wx: board private structure to initialize
@@ -1701,6 +1815,9 @@ static void wx_cache_ring_rss(struct wx *wx)
{
u16 i;
+ if (wx_cache_ring_vmdq(wx))
+ return;
+
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->reg_idx = i;
@@ -2089,7 +2206,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
- msix_vector += 1; /* offset for queue vectors */
+ if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7))
+ msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2134,10 +2252,17 @@ void wx_configure_vectors(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
u32 eitrsel = 0;
- u16 v_idx;
+ u16 v_idx, i;
if (pdev->msix_enabled) {
/* Populate MSIX to EITR Select */
+ if (wx->mac.type == wx_mac_sp) {
+ if (wx->num_vfs >= 32)
+ eitrsel = BIT(wx->num_vfs % 32) - 1;
+ } else if (wx->mac.type == wx_mac_em) {
+ for (i = 0; i < wx->num_vfs; i++)
+ eitrsel |= BIT(i);
+ }
wr32(wx, WX_PX_ITRSEL, eitrsel);
/* use EIAM to auto-mask when MSI-X interrupt is asserted
* this saves a register write for every interrupt
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 7dad022e01e9..126416534181 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -19,6 +19,7 @@
#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
#define WX_PCI_LINK_STATUS 0xB2
#define WX_MAX_PF_MACVLANS 15
+#define WX_MAX_VF_MC_ENTRIES 30
/**************** Global Registers ****************************/
/* chip control Registers */
@@ -75,6 +76,7 @@
#define WX_MAC_LXONOFFRXC 0x11E0C
/*********************** Receive DMA registers **************************/
+#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4))
#define WX_RDM_DRP_PKT 0x12500
#define WX_RDM_PKT_CNT 0x12504
#define WX_RDM_BYTE_CNT_LSB 0x12508
@@ -89,6 +91,7 @@
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
+#define WX_CFG_PORT_CTL_NUM_VT_NONE 0
#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
@@ -114,6 +117,10 @@
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
+#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4))
+#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4))
+#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4))
+
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
@@ -186,6 +193,7 @@
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */
#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
/* VM L2 contorl */
@@ -230,6 +238,7 @@
#define WX_PSR_VLAN_SWC 0x16220
#define WX_PSR_VLAN_SWC_VM_L 0x16224
#define WX_PSR_VLAN_SWC_VM_H 0x16228
+#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4))
#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */
/* VLAN pool filtering masks */
#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
@@ -244,6 +253,10 @@
#define WX_RSC_ST 0x17004
#define WX_RSC_ST_RSEC_RDY BIT(0)
+/*********************** Transmit DMA registers **************************/
+/* transmit global control */
+#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4))
+
/****************************** TDB ******************************************/
#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
@@ -371,6 +384,15 @@ enum WX_MSCA_CMD_value {
/* Number of 80 microseconds we wait for PCI Express master disable */
#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000
+#define WX_RSS_64Q_MASK 0x3F
+#define WX_RSS_8Q_MASK 0x7
+#define WX_RSS_4Q_MASK 0x3
+#define WX_RSS_2Q_MASK 0x1
+#define WX_RSS_DISABLED_MASK 0x0
+
+#define WX_VMDQ_4Q_MASK 0x7C
+#define WX_VMDQ_2Q_MASK 0x7E
+
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
@@ -435,7 +457,12 @@ enum WX_MSCA_CMD_value {
#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
-#define VMDQ_P(p) p
+/* must account for pools assigned to VFs. */
+#ifdef CONFIG_PCI_IOV
+#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset)
+#else
+#define VMDQ_P(p) (p)
+#endif
/* Supported Rx Buffer Sizes */
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
@@ -1005,6 +1032,10 @@ struct vf_data_storage {
bool link_enable;
bool trusted;
int xcast_mode;
+
+ u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 vlan_count;
};
struct vf_macvlans {
@@ -1017,6 +1048,7 @@ struct vf_macvlans {
enum wx_pf_flags {
WX_FLAG_VMDQ_ENABLED,
+ WX_FLAG2_VLAN_PROMISC,
WX_FLAG_SRIOV_ENABLED,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1085,6 +1117,8 @@ struct wx {
struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
struct wx_ring *rx_ring[64];
struct wx_q_vector *q_vector[64];
+ int num_rx_pools; /* does not include pools assigned to VFs */
+ int num_rx_queues_per_pool;
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
@@ -1118,6 +1152,7 @@ struct wx {
struct vf_data_storage *vfinfo;
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
+ unsigned long fwd_bitmask; /* bitmask indicating in use pools */
int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
--
2.44.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH net-next v4 4/6] net: libwx: Add msg task func
[not found] <20240604155850.51983-1-mengyuanlou@net-swift.com>
` (2 preceding siblings ...)
2024-06-04 15:57 ` [PATCH net-next v4 3/6] net: libwx: Redesign flow when sriov is enabled Mengyuan Lou
@ 2024-06-04 15:57 ` Mengyuan Lou
2024-06-05 9:41 ` Wojciech Drewek
2024-06-05 18:44 ` Simon Horman
2024-06-04 15:57 ` [PATCH net-next v4 5/6] net: ngbe: add sriov function support Mengyuan Lou
2024-06-04 15:57 ` [PATCH net-next v4 6/6] net: txgbe: " Mengyuan Lou
5 siblings, 2 replies; 14+ messages in thread
From: Mengyuan Lou @ 2024-06-04 15:57 UTC (permalink / raw)
To: netdev; +Cc: jiawenwu, duanqiangwen, Mengyuan Lou
Implement wx_msg_task which is used to process mailbox
messages sent by vf.
Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
---
drivers/net/ethernet/wangxun/libwx/wx_hw.c | 12 +-
drivers/net/ethernet/wangxun/libwx/wx_hw.h | 4 +
drivers/net/ethernet/wangxun/libwx/wx_mbx.h | 50 ++
drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 725 ++++++++++++++++++
drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 1 +
drivers/net/ethernet/wangxun/libwx/wx_type.h | 17 +
6 files changed, 805 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 8affcb9f7dbb..cd86b2508db4 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -851,7 +851,7 @@ void wx_flush_sw_mac_table(struct wx *wx)
}
EXPORT_SYMBOL(wx_flush_sw_mac_table);
-static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -881,8 +881,9 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
}
return -ENOMEM;
}
+EXPORT_SYMBOL(wx_add_mac_filter);
-static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -905,6 +906,7 @@ static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
}
return -ENOMEM;
}
+EXPORT_SYMBOL(wx_del_mac_filter);
static int wx_available_rars(struct wx *wx)
{
@@ -1302,7 +1304,7 @@ static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
}
-static int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
u32 index = vf / 32, vf_bit = vf % 32;
struct wx *wx = netdev_priv(netdev);
@@ -1321,6 +1323,7 @@ static int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
return 0;
}
+EXPORT_SYMBOL(wx_set_vf_spoofchk);
static void wx_configure_virtualization(struct wx *wx)
{
@@ -2347,7 +2350,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
{
u32 bitindex, vfta, targetbit;
bool vfta_changed = false;
@@ -2393,6 +2396,7 @@ static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
return 0;
}
+EXPORT_SYMBOL(wx_set_vfta);
/**
* wx_clear_vfta - Clear VLAN filter table
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 9e219fa717a2..ea2b7c932274 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -25,9 +25,12 @@ void wx_init_eeprom_params(struct wx *wx);
void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
void wx_init_rx_addrs(struct wx *wx);
void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool);
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool);
void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
@@ -39,6 +42,7 @@ int wx_stop_adapter(struct wx *wx);
void wx_reset_misc(struct wx *wx);
int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
index 3c70654a8b14..00a9dda8365c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -21,10 +21,60 @@
#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
+#define WX_VT_MSGTYPE_ACK BIT(31)
+#define WX_VT_MSGTYPE_NACK BIT(30)
+#define WX_VT_MSGTYPE_CTS BIT(29)
+#define WX_VT_MSGINFO_SHIFT 16
#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
+enum wx_pfvf_api_rev {
+ wx_mbox_api_null,
+ wx_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ wx_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ wx_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ wx_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ wx_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ wx_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
+#define WX_VF_RESET 0x01 /* VF requests reset */
+#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* mailbox API, version 1.2 VF requests */
+#define WX_VF_GET_RETA 0x0a /* VF request for RETA */
+#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define WX_VF_UPDATE_XCAST_MODE 0x0c
+#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */
+#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */
+#define WX_VF_BACKUP 0x8001 /* VF requests backup */
+
+#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */
+#define WX_PF_NOFITY_VF_LINK_STATUS 0x1
+#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31)
+
+#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define WX_VF_DEF_QUEUE 4 /* Default queue offset */
+
+#define WX_VF_PERMADDR_MSG_LEN 4
+
enum wxvf_xcast_modes {
WXVF_XCAST_MODE_NONE = 0,
+ WXVF_XCAST_MODE_MULTI,
+ WXVF_XCAST_MODE_ALLMULTI,
+ WXVF_XCAST_MODE_PROMISC,
};
int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
index 032b75f23460..315d51961449 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include "wx_type.h"
+#include "wx_hw.h"
#include "wx_mbx.h"
#include "wx_sriov.h"
@@ -219,3 +220,727 @@ int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
EXPORT_SYMBOL(wx_pci_sriov_configure);
+
+static int wx_set_vf_mac(struct wx *wx, u16 vf, unsigned char *mac_addr)
+{
+ int ret = 0;
+
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ ret = wx_add_mac_filter(wx, mac_addr, vf);
+ if (ret >= 0)
+ memcpy(wx->vfinfo[vf].vf_mac_addr, mac_addr, ETH_ALEN);
+ else
+ memset(wx->vfinfo[vf].vf_mac_addr, 0, ETH_ALEN);
+
+ return ret;
+}
+
+static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
+{
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+
+ vmolr |= WX_PSR_VM_L2CTL_BAM;
+ if (aupe)
+ vmolr |= WX_PSR_VM_L2CTL_AUPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
+{
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
+ WX_TDM_VLAN_INS_VLANA_DEFAULT;
+
+ wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
+}
+
+static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
+{
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ return wx_set_vfta(wx, vid, vf, (bool)add);
+}
+
+static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 index = vf / 32, vf_bit = vf % 32;
+ u32 pfvfspoof;
+
+ pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
+ if (enable)
+ pfvfspoof |= BIT(vf_bit);
+ else
+ pfvfspoof &= ~BIT(vf_bit);
+ wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
+}
+
+static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u32 reg = 0, n = vf * q_per_pool / 32;
+ u32 i = vf * q_per_pool;
+
+ reg = rd32(wx, WX_RDM_PF_QDE(n));
+ for (i = (vf * q_per_pool - n * 32);
+ i < ((vf + 1) * q_per_pool - n * 32);
+ i++) {
+ if (qde == 1)
+ reg |= qde << i;
+ else
+ reg &= qde << i;
+ }
+
+ wr32(wx, WX_RDM_PF_QDE(n), reg);
+}
+
+static void wx_clear_vmvir(struct wx *wx, u32 vf)
+{
+ wr32(wx, WX_TDM_VLAN_INS(vf), 0);
+}
+
+static void wx_set_vf_rx_tx(struct wx *wx, int vf)
+{
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+ u32 index, vf_bit;
+
+ vf_bit = vf % 32;
+ index = vf / 32;
+
+ reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
+ reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
+
+ if (wx->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | BIT(vf_bit);
+ reg_req_rx = reg_cur_rx | BIT(vf_bit);
+ /* Enable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
+ } else {
+ reg_req_tx = BIT(vf_bit);
+ reg_req_rx = BIT(vf_bit);
+ /* Disable particular VF */
+ if (reg_cur_tx & reg_req_tx)
+ wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
+ if (reg_cur_rx & reg_req_rx)
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
+ }
+}
+
+static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+
+ /* verify the PF is supporting the correct APIs */
+ switch (wx->vfinfo[vf].vf_api) {
+ case wx_mbox_api_11 ... wx_mbox_api_20:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* only allow 1 Tx queue for bandwidth limiting */
+ msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
+ msgbuf[WX_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[WX_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[WX_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
+}
+
+static void wx_vf_reset_event(struct wx *wx, u16 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u8 num_tcs = netdev_get_num_tc(wx->netdev);
+
+ /* add PF assigned VLAN or VLAN 0 */
+ wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
+
+ /* reset offloads to defaults */
+ wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ wx_clear_vmvir(wx, vf);
+ } else {
+ if (vfinfo->pf_qos || !num_tcs)
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ wx->default_up, vf);
+ }
+
+ /* reset multicast table array for vf */
+ wx->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ wx_set_rx_mode(wx->netdev);
+
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+
+ /* reset VF api back to unknown */
+ wx->vfinfo[vf].vf_api = wx_mbox_api_10;
+}
+
+static void wx_vf_reset_msg(struct wx *wx, u16 vf)
+{
+ unsigned char *vf_mac = wx->vfinfo[vf].vf_mac_addr;
+ struct net_device *dev = wx->netdev;
+ u32 msgbuf[5] = {0, 0, 0, 0, 0};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 reg = 0, index, vf_bit;
+ int pf_max_frame;
+
+ /* reset the filters for the device */
+ wx_vf_reset_event(wx, vf);
+
+ /* set vf mac address */
+ if (!is_zero_ether_addr(vf_mac))
+ wx_set_vf_mac(wx, vf, vf_mac);
+
+ vf_bit = vf % 32;
+ index = vf / 32;
+
+ /* force drop enable for all VF Rx queues */
+ wx_write_qde(wx, vf, 1);
+
+ /* set transmit and receive for vf */
+ wx_set_vf_rx_tx(wx, vf);
+
+ pf_max_frame = dev->mtu + ETH_HLEN;
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg = BIT(vf_bit);
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg);
+
+ /* enable VF mailbox for further messages */
+ wx->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = WX_VF_RESET;
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_err(wx, "VF %d has no MAC address assigned", vf);
+ }
+
+ /* Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = wx->mac.mc_filter_type;
+ wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
+}
+
+static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+
+ if (wx->vfinfo[vf].pf_set_mac &&
+ memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
+ wx_err(wx,
+ "VF %d attempted to set a MAC address but it already had a MAC address.",
+ vf);
+ return -EBUSY;
+ }
+ return wx_set_vf_mac(wx, vf, new_mac) < 0;
+}
+
+static int wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
+ >> WX_VT_MSGINFO_SHIFT;
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ u32 vector_bit, vector_reg, mta_reg, i;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+
+ /* only so many hash values supported */
+ entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
+ /* salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /* VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++)
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ /* errata 5: maintain a copy of the register table conf */
+ mta_reg = wx->mac.mta_shadow[vector_reg];
+ mta_reg |= (1 << vector_bit);
+ wx->mac.mta_shadow[vector_reg] = mta_reg;
+ wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
+ }
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+
+ return 0;
+}
+
+static int wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
+{
+ struct net_device *netdev = wx->netdev;
+ u32 index, vf_bit, vfre;
+ u32 max_frs, reg_val;
+ int pf_max_frame;
+ int err = 0;
+
+ pf_max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ switch (wx->vfinfo[vf].vf_api) {
+ case wx_mbox_api_11 ... wx_mbox_api_13:
+ /* Version 1.1 supports jumbo frames on VFs if PF has
+ * jumbo frames enabled which means legacy VFs are
+ * disabled
+ */
+ if (pf_max_frame > ETH_FRAME_LEN)
+ break;
+ fallthrough;
+ default:
+ /* If the PF or VF are running w/ jumbo frames enabled
+ * we need to shut down the VF Rx path as we cannot
+ * support jumbo frames on legacy VFs
+ */
+ if (pf_max_frame > ETH_FRAME_LEN ||
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)))
+ err = -EINVAL;
+ break;
+ }
+
+ /* determine VF receive enable location */
+ vf_bit = vf % 32;
+ index = vf / 32;
+
+ /* enable or disable receive depending on error */
+ vfre = rd32(wx, WX_RDM_VF_RE(index));
+ if (err)
+ vfre &= ~BIT(vf_bit);
+ else
+ vfre |= BIT(vf_bit);
+ wr32(wx, WX_RDM_VF_RE(index), vfre);
+
+ if (err) {
+ wx_err(wx, "VF max_frame %d out of range\n", max_frame);
+ return err;
+ }
+ /* pull current max frame size from hardware */
+ max_frs = DIV_ROUND_UP(max_frame, 1024);
+ reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
+ if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
+ wr32(wx, WX_MAC_WDG_TIMEOUT, max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
+
+ return 0;
+}
+
+static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
+{
+ int regindex;
+ u32 vlvf;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
+ regindex = -EINVAL;
+
+ return regindex;
+}
+
+static int wx_set_vf_macvlan(struct wx *wx,
+ u16 vf, int index, unsigned char *mac_addr)
+{
+ struct vf_macvlans *entry;
+ struct list_head *pos;
+ int retval = 0;
+
+ if (index <= 1) {
+ list_for_each(pos, &wx->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ wx_del_mac_filter(wx, entry->vf_macvlan, vf);
+ }
+ }
+ }
+
+ /* If index was zero then we were asked to clear the uc list
+ * for the VF. We're done.
+ */
+ if (!index)
+ return 0;
+
+ entry = NULL;
+
+ list_for_each(pos, &wx->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->free)
+ break;
+ }
+
+ /* If we traversed the entire list and didn't find a free entry
+ * then we're out of space on the RAR table. Also entry may
+ * be NULL because the original memory allocation for the list
+ * failed, which is not fatal but does mean we can't support
+ * VF requests for MACVLAN because we couldn't allocate
+ * memory for the list manangbeent required.
+ */
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ retval = wx_add_mac_filter(wx, mac_addr, vf);
+ if (retval >= 0) {
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+ }
+
+ return retval;
+}
+
+static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
+ int err;
+
+ if (add)
+ wx->vfinfo[vf].vlan_count++;
+ else if (wx->vfinfo[vf].vlan_count)
+ wx->vfinfo[vf].vlan_count--;
+
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && wx->netdev->flags & IFF_PROMISC)
+ err = wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+
+ err = wx_set_vf_vlan(wx, add, vid, vf);
+ if (!err && wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vlan_anti_spoofing(wx, true, vf);
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && wx->netdev->flags & IFF_PROMISC) {
+ u32 bits = 0, vlvf;
+ int reg_ndx;
+
+ reg_ndx = wx_find_vlvf_entry(wx, vid);
+ if (reg_ndx < 0)
+ goto out;
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ bits &= ~BIT(VMDQ_P(0));
+ if (wx->mac.type == wx_mac_sp)
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ } else {
+ if (wx->mac.type == wx_mac_sp)
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ bits &= ~BIT(VMDQ_P(0) % 32);
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ }
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid && !bits)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+ }
+
+out:
+ return err;
+}
+
+static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
+ WX_VT_MSGINFO_SHIFT;
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int err;
+
+ if (wx->vfinfo[vf].pf_set_mac && index > 0) {
+ wx_err(wx, "VF %d requested MACVLAN filter but is administratively denied\n", vf);
+ return -EINVAL;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+ /* If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, vf, false);
+ }
+
+ err = wx_set_vf_macvlan(wx, vf, index, new_mac);
+ if (err == -ENOSPC)
+ wx_err(wx,
+ "VF %d has requested a MACVLAN filter but there is no space for it\n",
+ vf);
+
+ return err < 0;
+}
+
+static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case wx_mbox_api_10 ... wx_mbox_api_13:
+ wx->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
+ return -EINVAL;
+ }
+}
+
+static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ /* verify the PF is supporting the correct API */
+ switch (wx->vfinfo[vf].vf_api) {
+ case wx_mbox_api_12 ... wx_mbox_api_13:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[1] = wx->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
+static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ unsigned long fw_version = 0ULL;
+ int ret = 0;
+
+ /* verify the PF is supporting the correct API */
+ switch (wx->vfinfo[vf].vf_api) {
+ case wx_mbox_api_12 ... wx_mbox_api_13:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
+ if (ret)
+ return -EOPNOTSUPP;
+ msgbuf[1] = fw_version;
+
+ return 0;
+}
+
+static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ /* verify the PF is supporting the correct APIs */
+ switch (wx->vfinfo[vf].vf_api) {
+ case wx_mbox_api_12:
+ /* promisc introduced in 1.3 version */
+ if (xcast_mode == WXVF_XCAST_MODE_PROMISC)
+ return -EOPNOTSUPP;
+ fallthrough;
+ case wx_mbox_api_13:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (wx->vfinfo[vf].xcast_mode == xcast_mode)
+ goto out;
+
+ switch (xcast_mode) {
+ case WXVF_XCAST_MODE_NONE:
+ disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ enable = 0;
+ break;
+ case WXVF_XCAST_MODE_MULTI:
+ disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
+ break;
+ case WXVF_XCAST_MODE_ALLMULTI:
+ disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | WX_PSR_VM_L2CTL_MPE;
+ break;
+ case WXVF_XCAST_MODE_PROMISC:
+ disable = 0;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+
+ wx->vfinfo[vf].xcast_mode = xcast_mode;
+out:
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
+static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
+{
+ u16 mbx_size = WX_VXMAILBOX_SIZE;
+ u32 msgbuf[WX_VXMAILBOX_SIZE];
+ int retval;
+
+ retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
+ if (retval) {
+ wx_err(wx, "Error receiving message from VF\n");
+ return;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
+ return;
+
+ if (msgbuf[0] == WX_VF_RESET) {
+ wx_vf_reset_msg(wx, vf);
+ return;
+ }
+
+ /* until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+ if (!wx->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_write_mbx_pf(wx, msgbuf, 1, vf);
+ return;
+ }
+
+ switch ((msgbuf[0] & U16_MAX)) {
+ case WX_VF_SET_MAC_ADDR:
+ retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_MULTICAST:
+ retval = wx_set_vf_multicasts(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_VLAN:
+ retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_LPE:
+ if (msgbuf[1] > WX_MAX_JUMBO_FRAME_SIZE) {
+ wx_err(wx, "VF max_frame %d out of range\n", msgbuf[1]);
+ return;
+ }
+ retval = wx_set_vf_lpe(wx, msgbuf[1], vf);
+ break;
+ case WX_VF_SET_MACVLAN:
+ retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_API_NEGOTIATE:
+ retval = wx_negotiate_vf_api(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_QUEUES:
+ retval = wx_get_vf_queues(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_LINK_STATE:
+ retval = wx_get_vf_link_state(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_FW_VERSION:
+ retval = wx_get_fw_version(wx, msgbuf, vf);
+ break;
+ case WX_VF_UPDATE_XCAST_MODE:
+ retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
+ break;
+ case WX_VF_BACKUP:
+ break;
+ default:
+ wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+
+ wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
+}
+
+static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
+{
+ u32 msg = WX_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!wx->vfinfo[vf].clear_to_send)
+ wx_write_mbx_pf(wx, &msg, 1, vf);
+}
+
+void wx_msg_task(struct wx *wx)
+{
+ u16 vf;
+
+ for (vf = 0; vf < wx->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!wx_check_for_rst_pf(wx, vf))
+ wx_vf_reset_event(wx, vf);
+
+ /* process any messages pending */
+ if (!wx_check_for_msg_pf(wx, vf))
+ wx_rcv_msg_from_vf(wx, vf);
+
+ /* process any acks */
+ if (!wx_check_for_ack_pf(wx, vf))
+ wx_rcv_ack_from_vf(wx, vf);
+ }
+}
+EXPORT_SYMBOL(wx_msg_task);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
index 17b547ae8862..f311774a2a18 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -6,5 +6,6 @@
int wx_disable_sriov(struct wx *wx);
int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void wx_msg_task(struct wx *wx);
#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 126416534181..3a7931c2e4bc 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -77,6 +77,8 @@
/*********************** Receive DMA registers **************************/
#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4))
+#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4))
+#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4))
#define WX_RDM_DRP_PKT 0x12500
#define WX_RDM_PKT_CNT 0x12504
#define WX_RDM_BYTE_CNT_LSB 0x12508
@@ -120,6 +122,7 @@
#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4))
#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4))
#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4))
+#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4))
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
@@ -200,6 +203,7 @@
#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */
#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
@@ -243,6 +247,7 @@
/* VLAN pool filtering masks */
#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
#define WX_PSR_VLAN_SWC_ENTRIES 64
+#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0)
/********************************* RSEC **************************************/
/* general rsec */
@@ -256,6 +261,9 @@
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4))
+#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4))
+/* Per VF Port VLAN insertion rules */
+#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/
/****************************** TDB ******************************************/
#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
@@ -296,6 +304,9 @@
#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
+
+#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0)
+#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2
/* MDIO Registers */
#define WX_MSCA 0x11200
#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
@@ -1032,6 +1043,11 @@ struct vf_data_storage {
bool link_enable;
bool trusted;
int xcast_mode;
+ unsigned int vf_api;
+ bool clear_to_send;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ bool pf_set_mac;
u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
@@ -1139,6 +1155,7 @@ struct wx {
u32 wol;
u16 bd_number;
+ bool default_up;
struct wx_hw_stats stats;
u64 tx_busy;
--
2.44.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH net-next v4 5/6] net: ngbe: add sriov function support
[not found] <20240604155850.51983-1-mengyuanlou@net-swift.com>
` (3 preceding siblings ...)
2024-06-04 15:57 ` [PATCH net-next v4 4/6] net: libwx: Add msg task func Mengyuan Lou
@ 2024-06-04 15:57 ` Mengyuan Lou
2024-06-05 9:44 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 6/6] net: txgbe: " Mengyuan Lou
5 siblings, 1 reply; 14+ messages in thread
From: Mengyuan Lou @ 2024-06-04 15:57 UTC (permalink / raw)
To: netdev; +Cc: jiawenwu, duanqiangwen, Mengyuan Lou
Add sriov_configure for driver ops.
Add mailbox handler wx_msg_task for ngbe in
the interrupt handler.
Add the notification flow when the vfs exist.
Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
---
drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 31 ++++++++++
drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 2 +
drivers/net/ethernet/wangxun/libwx/wx_type.h | 2 +
drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 58 +++++++++++++++++--
drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 10 ++++
drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 2 +
6 files changed, 101 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
index 315d51961449..6d470cd0f317 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -944,3 +944,34 @@ void wx_msg_task(struct wx *wx)
}
}
EXPORT_SYMBOL(wx_msg_task);
+
+void wx_disable_vf_rx_tx(struct wx *wx)
+{
+ wr32(wx, WX_TDM_VFTE_CLR(0), 0);
+ wr32(wx, WX_RDM_VFRE_CLR(0), 0);
+ if (wx->mac.type == wx_mac_sp) {
+ wr32(wx, WX_TDM_VFTE_CLR(1), 0);
+ wr32(wx, WX_RDM_VFRE_CLR(1), 0);
+ }
+}
+EXPORT_SYMBOL(wx_disable_vf_rx_tx);
+
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
+{
+ u32 msgbuf[2] = {0, 0};
+ u16 i;
+
+ if (!wx->num_vfs)
+ return;
+ msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
+ if (link_up)
+ msgbuf[1] = (wx->speed << 1) | link_up;
+ if (wx->notify_not_runnning)
+ msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
+ for (i = 0 ; i < wx->num_vfs; i++) {
+ if (wx->vfinfo[i].clear_to_send)
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, msgbuf, 2, i);
+ }
+}
+EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
index f311774a2a18..7e45b3f71a7b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -7,5 +7,7 @@
int wx_disable_sriov(struct wx *wx);
int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
void wx_msg_task(struct wx *wx);
+void wx_disable_vf_rx_tx(struct wx *wx);
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 3a7931c2e4bc..b8f0bf93a0fb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -87,6 +87,7 @@
/************************* Port Registers ************************************/
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
+#define WX_CFG_PORT_CTL_PFRSTD BIT(14)
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
@@ -1102,6 +1103,7 @@ struct wx {
enum wx_reset_type reset_type;
/* PHY stuff */
+ bool notify_not_runnning;
unsigned int link;
int speed;
int duplex;
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index e894e01d030d..583e8e882f17 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -14,6 +14,8 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -128,6 +130,10 @@ static int ngbe_sw_init(struct wx *wx)
wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->setup_tc = ngbe_setup_tc;
+ set_bit(0, &wx->fwd_bitmask);
+
return 0;
}
@@ -197,11 +203,25 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
{
- struct wx *wx = data;
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ u32 eicr;
- /* re-enable the original interrupt state, no lsc, no queues */
- if (netif_running(wx->netdev))
- ngbe_irq_enable(wx, false);
+ q_vector = wx->q_vector[0];
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ if (eicr & NGBE_PX_MISC_IC_VF_MBOX)
+ wx_msg_task(wx);
+
+ if (wx->num_vfs == 7) {
+ napi_schedule_irqoff(&q_vector->napi);
+ ngbe_irq_enable(wx, true);
+ } else {
+ /* re-enable the original interrupt state, no lsc, no queues */
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, false);
+ }
return IRQ_HANDLED;
}
@@ -291,6 +311,22 @@ static void ngbe_disable_device(struct wx *wx)
struct net_device *netdev = wx->netdev;
u32 i;
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ wx->notify_not_runnning = true;
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+ wx->notify_not_runnning = false;
+
+ /* Disable all VFTE/VFRE TX/RX */
+ wx_disable_vf_rx_tx(wx);
+ }
+
/* disable all enabled rx queues */
for (i = 0; i < wx->num_rx_queues; i++)
/* this call also flushes the previous write */
@@ -313,10 +349,17 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
+static void ngbe_reset(struct wx *wx)
+{
+ wx_flush_sw_mac_table(wx);
+ wx_mac_set_default_filter(wx, wx->mac.addr);
+}
+
void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
+ ngbe_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -339,6 +382,11 @@ void ngbe_up(struct wx *wx)
ngbe_sfp_modules_txrx_powerctl(wx, true);
phylink_start(wx->phylink);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD);
+ if (wx->num_vfs)
+ wx_ping_all_vfs_with_link_status(wx, false);
}
/**
@@ -723,6 +771,7 @@ static void ngbe_remove(struct pci_dev *pdev)
struct net_device *netdev;
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
@@ -782,6 +831,7 @@ static struct pci_driver ngbe_driver = {
.suspend = ngbe_suspend,
.resume = ngbe_resume,
.shutdown = ngbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(ngbe_driver);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index ec54b18c5fe7..dd01aec87b02 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -8,6 +8,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -64,6 +65,11 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
+ struct wx *wx = phylink_to_wx(config);
+
+ wx->speed = 0;
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -103,6 +109,10 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+
+ wx->speed = speed;
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index f48ed7fc1805..bb70af035c39 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -72,11 +72,13 @@
#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IC_VF_MBOX | \
NGBE_PX_MISC_IEN_GPIO)
#define NGBE_INTR_ALL 0x1FF
--
2.44.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH net-next v4 6/6] net: txgbe: add sriov function support
[not found] <20240604155850.51983-1-mengyuanlou@net-swift.com>
` (4 preceding siblings ...)
2024-06-04 15:57 ` [PATCH net-next v4 5/6] net: ngbe: add sriov function support Mengyuan Lou
@ 2024-06-04 15:57 ` Mengyuan Lou
2024-06-05 9:48 ` Wojciech Drewek
5 siblings, 1 reply; 14+ messages in thread
From: Mengyuan Lou @ 2024-06-04 15:57 UTC (permalink / raw)
To: netdev; +Cc: jiawenwu, duanqiangwen, Mengyuan Lou
Add sriov_configure for driver ops.
Add ndo_vf_ops for txgbe netdev ops.
Add mailbox handler wx_msg_task for txgbe.
Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
---
drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 42 +++++++++++++++++++
drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 1 +
drivers/net/ethernet/wangxun/libwx/wx_type.h | 1 +
.../net/ethernet/wangxun/txgbe/txgbe_irq.c | 25 +++++++++--
.../net/ethernet/wangxun/txgbe/txgbe_main.c | 23 ++++++++++
.../net/ethernet/wangxun/txgbe/txgbe_phy.c | 8 ++++
.../net/ethernet/wangxun/txgbe/txgbe_type.h | 4 +-
7 files changed, 100 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
index 6d470cd0f317..375295578cff 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -302,6 +302,15 @@ static void wx_clear_vmvir(struct wx *wx, u32 vf)
wr32(wx, WX_TDM_VLAN_INS(vf), 0);
}
+static void wx_ping_vf(struct wx *wx, int vf)
+{
+ u32 ping = WX_PF_CONTROL_MSG;
+
+ if (wx->vfinfo[vf].clear_to_send)
+ ping |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, &ping, 1, vf);
+}
+
static void wx_set_vf_rx_tx(struct wx *wx, int vf)
{
u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
@@ -975,3 +984,36 @@ void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
}
}
EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
+
+static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
+{
+ wx->vfinfo[vf].link_state = state;
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (netif_running(wx->netdev))
+ wx->vfinfo[vf].link_enable = true;
+ else
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ wx->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ }
+ /* restart the VF */
+ wx->vfinfo[vf].clear_to_send = false;
+ wx_ping_vf(wx, vf);
+
+ wx_set_vf_rx_tx(wx, vf);
+}
+
+void wx_set_all_vfs(struct wx *wx)
+{
+ int i;
+
+ for (i = 0 ; i < wx->num_vfs; i++)
+ wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
+}
+EXPORT_SYMBOL(wx_set_all_vfs);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
index 7e45b3f71a7b..122d9c561ff5 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -9,5 +9,6 @@ int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
void wx_msg_task(struct wx *wx);
void wx_disable_vf_rx_tx(struct wx *wx);
void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
+void wx_set_all_vfs(struct wx *wx);
#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b8f0bf93a0fb..1a4830eab763 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -1053,6 +1053,7 @@ struct vf_data_storage {
u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
u16 vlan_count;
+ int link_state;
};
struct vf_macvlans {
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index b3e3605d1edb..e6be98865c2d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -7,6 +7,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
@@ -176,6 +177,24 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
.map = txgbe_misc_irq_domain_map,
};
+static irqreturn_t txgbe_irq_handler(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC) & TXGBE_PX_MISC_IEN_MASK;
+ if (!eicr)
+ return IRQ_NONE;
+ txgbe->eicr = eicr;
+ if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
+ wx_msg_task(txgbe->wx);
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
{
struct txgbe *txgbe = data;
@@ -184,7 +203,7 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
unsigned int sub_irq;
u32 eicr;
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ eicr = txgbe->eicr;
if (eicr & TXGBE_PX_MISC_GPIO) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
handle_nested_irq(sub_irq);
@@ -226,7 +245,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- txgbe->misc.nirqs = 2;
+ txgbe->misc.nirqs = TXGBE_IRQ_MAX;
txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
if (!txgbe->misc.domain)
@@ -241,7 +260,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
else
txgbe->misc.irq = wx->pdev->irq;
- err = request_threaded_irq(txgbe->misc.irq, NULL,
+ err = request_threaded_irq(txgbe->misc.irq, txgbe_irq_handler,
txgbe_misc_irq_handle,
IRQF_ONESHOT,
wx->netdev->name, txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 8c7a74981b90..fbfd281f7e8b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -14,6 +14,8 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
@@ -99,6 +101,12 @@ static void txgbe_up_complete(struct wx *wx)
/* enable transmits */
netif_tx_start_all_queues(netdev);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD,
+ WX_CFG_PORT_CTL_PFRSTD);
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
}
static void txgbe_reset(struct wx *wx)
@@ -144,6 +152,16 @@ static void txgbe_disable_device(struct wx *wx)
wx_err(wx, "%s: invalid bus lan id %d\n",
__func__, wx->bus.func);
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
+ }
+
if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac transmiter */
@@ -268,8 +286,11 @@ static int txgbe_sw_init(struct wx *wx)
/* set default work limits */
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->setup_tc = txgbe_setup_tc;
wx->do_reset = txgbe_do_reset;
+ set_bit(0, &wx->fwd_bitmask);
return 0;
}
@@ -725,6 +746,7 @@ static void txgbe_remove(struct pci_dev *pdev)
struct net_device *netdev;
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
@@ -746,6 +768,7 @@ static struct pci_driver txgbe_driver = {
.probe = txgbe_probe,
.remove = txgbe_remove,
.shutdown = txgbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(txgbe_driver);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 5f502265f0a6..76635d4366e4 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -16,6 +16,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
#include "txgbe_hw.h"
@@ -179,6 +180,9 @@ static void txgbe_mac_link_down(struct phylink_config *config,
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+ wx->speed = 0;
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -215,6 +219,10 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+
+ wx->speed = speed;
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index f434a7865cb7..e84d10adf4c1 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -71,12 +71,13 @@
#define TXGBE_PX_MISC_ETH_LK BIT(18)
#define TXGBE_PX_MISC_ETH_AN BIT(19)
#define TXGBE_PX_MISC_INT_ERR BIT(20)
+#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define TXGBE_PX_MISC_GPIO BIT(26)
#define TXGBE_PX_MISC_IEN_MASK \
(TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
- TXGBE_PX_MISC_GPIO)
+ TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_GPIO)
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
@@ -196,6 +197,7 @@ struct txgbe {
struct gpio_chip *gpio;
unsigned int gpio_irq;
unsigned int link_irq;
+ u32 eicr;
};
#endif /* _TXGBE_TYPE_H_ */
--
2.44.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers
2024-06-04 15:57 ` [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers Mengyuan Lou
@ 2024-06-05 5:03 ` Przemek Kitszel
2024-06-05 7:31 ` Wojciech Drewek
1 sibling, 0 replies; 14+ messages in thread
From: Przemek Kitszel @ 2024-06-05 5:03 UTC (permalink / raw)
To: Mengyuan Lou, netdev; +Cc: jiawenwu, duanqiangwen
On 6/4/24 17:57, Mengyuan Lou wrote:
> Implements the mailbox interfaces for wangxun pf drivers
> ngbe and txgbe.
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
> ---
> drivers/net/ethernet/wangxun/libwx/Makefile | 2 +-
> drivers/net/ethernet/wangxun/libwx/wx_mbx.c | 189 +++++++++++++++++++
> drivers/net/ethernet/wangxun/libwx/wx_mbx.h | 32 ++++
> drivers/net/ethernet/wangxun/libwx/wx_type.h | 5 +
> 4 files changed, 227 insertions(+), 1 deletion(-)
> create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_mbx.c
> create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_mbx.h
>
Please change your future submissions to have cover letter message-id
linked with actual patches, instead of two separate threads. Please also
post URLs to previous versions.
> diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
> index 42ccd6e4052e..913a978c9032 100644
> --- a/drivers/net/ethernet/wangxun/libwx/Makefile
> +++ b/drivers/net/ethernet/wangxun/libwx/Makefile
> @@ -4,4 +4,4 @@
>
> obj-$(CONFIG_LIBWX) += libwx.o
>
> -libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
> +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_mbx.o
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
> new file mode 100644
> index 000000000000..e7d7178a1f13
> --- /dev/null
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
> @@ -0,0 +1,189 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
> +#include <linux/pci.h>
> +#include "wx_type.h"
> +#include "wx_mbx.h"
> +
> +/**
> + * wx_obtain_mbx_lock_pf - obtain mailbox lock
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if we obtained the mailbox lock
> + **/
> +static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf)
> +{
> + int ret = -EBUSY;
> + int count = 5;
> + u32 mailbox;
> +
> + while (count--) {
> + /* Take ownership of the buffer */
> + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU);
> +
> + /* reserve mailbox for vf use */
> + mailbox = rd32(wx, WX_PXMAILBOX(vf));
> + if (mailbox & WX_PXMAILBOX_PFU) {
> + ret = 0;
> + break;
> + }
> + udelay(10);
not needed delay on the last loop step
> + }
> +
> + if (ret)
> + wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf);
> +
> + return ret;
> +}
> +
> +static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index)
> +{
> + u32 mbvficr = rd32(wx, WX_MBVFICR(index));
> + int ret = -EBUSY;
please invert the flow (and use this as general rule), so you will have:
if (err) {
error_handling();
return -ESTH;
}
normal_code();
return 0;
> +
> + if (mbvficr & mask) {
> + ret = 0;
> + wr32(wx, WX_MBVFICR(index), mask);
you are checking if any bit of mask is set, then writing
this value into the register; not an error per-se, but also not an
obvious thing
> + }
> +
> + return ret;
> +}
> +
> +/**
> + * wx_check_for_ack_pf - checks to see if the VF has ACKed
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
> + **/
> +int wx_check_for_ack_pf(struct wx *wx, u16 vf)
> +{
> + u32 index = vf / 16, vf_bit = vf % 16;
> +
> + return wx_check_for_bit_pf(wx,
> + FIELD_PREP(WX_MBVFICR_VFACK_MASK, BIT(vf_bit)),
> + index);
> +}
> +EXPORT_SYMBOL(wx_check_for_ack_pf);
> +
> +/**
> + * wx_check_for_msg_pf - checks to see if the VF has sent mail
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
> + **/
> +int wx_check_for_msg_pf(struct wx *wx, u16 vf)
> +{
> + u32 index = vf / 16, vf_bit = vf % 16;
> +
> + return wx_check_for_bit_pf(wx,
> + FIELD_PREP(WX_MBVFICR_VFREQ_MASK, BIT(vf_bit)),
> + index);
> +}
> +EXPORT_SYMBOL(wx_check_for_msg_pf);
> +
> +/**
> + * wx_write_mbx_pf - Places a message in the mailbox
> + * @wx: pointer to the HW structure
> + * @msg: The message buffer
> + * @size: Length of buffer
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if it successfully copied message into the buffer
s/return:/Return:/
s/SUCCESS/0/ ;)
> + **/
> +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
> +{
> + struct wx_mbx_info *mbx = &wx->mbx;
> + int ret, i;
> +
> + if (size > mbx->size) {
> + wx_err(wx, "Invalid mailbox message size %d", size);
> + ret = -EINVAL;
> + goto out_no_write;
> + }
> +
> + /* lock the mailbox to prevent pf/vf race condition */
> + ret = wx_obtain_mbx_lock_pf(wx, vf);
> + if (ret)
> + goto out_no_write;
> +
> + /* flush msg and acks as we are overwriting the message buffer */
how many messages does fit into mbox?
> + wx_check_for_msg_pf(wx, vf);
> + wx_check_for_ack_pf(wx, vf);
> +
> + /* copy the caller specified message to the mailbox memory buffer */
> + for (i = 0; i < size; i++)
> + wr32a(wx, WX_PXMBMEM(vf), i, msg[i]);
> +
> + /* Interrupt VF to tell it a message has been sent and release buffer*/
> + /* set mirrored mailbox flags */
> + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS);
> + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS);
> +
> +out_no_write:
> + return ret;
> +}
> +EXPORT_SYMBOL(wx_write_mbx_pf);
> +
> +/**
> + * wx_read_mbx_pf - Read a message from the mailbox
> + * @wx: pointer to the HW structure
> + * @msg: The message buffer
> + * @size: Length of buffer
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if VF copy a message from the mailbox buffer.
> + **/
> +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
> +{
> + struct wx_mbx_info *mbx = &wx->mbx;
> + int ret;
> + u16 i;
> +
> + /* limit read to size of mailbox */
> + if (size > mbx->size)
> + size = mbx->size;
> +
> + /* lock the mailbox to prevent pf/vf race condition */
> + ret = wx_obtain_mbx_lock_pf(wx, vf);
> + if (ret)
> + goto out_no_read;
> +
> + /* copy the message to the mailbox memory buffer */
> + for (i = 0; i < size; i++)
> + msg[i] = rd32a(wx, WX_PXMBMEM(vf), i);
> +
> + /* Acknowledge the message and release buffer */
> + /* set mirrored mailbox flags */
> + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK);
> + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK);
> +out_no_read:
> + return ret;
> +}
> +EXPORT_SYMBOL(wx_read_mbx_pf);
> +
> +/**
> + * wx_check_for_rst_pf - checks to see if the VF has reset
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
> + **/
> +int wx_check_for_rst_pf(struct wx *wx, u16 vf)
> +{
> + u32 reg_offset = vf / 32;
> + u32 vf_shift = vf % 32;
> + int ret = -EBUSY;
> + u32 vflre = 0;
> +
> + vflre = rd32(wx, WX_VFLRE(reg_offset));
> +
> + if (vflre & BIT(vf_shift)) {
ditto error vs normal flow, please apply to whole series
> + ret = 0;
> + wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift));
> + }
> +
> + return ret;
> +}
> +EXPORT_SYMBOL(wx_check_for_rst_pf);
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
> new file mode 100644
> index 000000000000..1579096fb6ad
> --- /dev/null
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
> @@ -0,0 +1,32 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
> +#ifndef _WX_MBX_H_
> +#define _WX_MBX_H_
> +
> +#define WX_VXMAILBOX_SIZE 15
> +
> +/* PF Registers */
> +#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */
> +#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */
> +#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
> +#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
> +
> +#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
> +
> +#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
> +#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */
> +
> +/* SR-IOV specific macros */
> +#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */
> +#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
> +#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
> +
> +#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
> +
> +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
> +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
> +int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
> +int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
> +int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
> +
> +#endif /* _WX_MBX_H_ */
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> index 5aaf7b1fa2db..caa2f4157834 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> @@ -674,6 +674,10 @@ struct wx_bus_info {
> u16 device;
> };
>
> +struct wx_mbx_info {
> + u16 size;
> +};
> +
> struct wx_thermal_sensor_data {
> s16 temp;
> s16 alarm_thresh;
> @@ -995,6 +999,7 @@ struct wx {
> struct pci_dev *pdev;
> struct net_device *netdev;
> struct wx_bus_info bus;
> + struct wx_mbx_info mbx;
> struct wx_mac_info mac;
> enum em_mac_type mac_type;
> enum sp_media_type media_type;
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers
2024-06-04 15:57 ` [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers Mengyuan Lou
2024-06-05 5:03 ` Przemek Kitszel
@ 2024-06-05 7:31 ` Wojciech Drewek
1 sibling, 0 replies; 14+ messages in thread
From: Wojciech Drewek @ 2024-06-05 7:31 UTC (permalink / raw)
To: Mengyuan Lou, netdev; +Cc: jiawenwu, duanqiangwen
On 04.06.2024 17:57, Mengyuan Lou wrote:
> Implements the mailbox interfaces for wangxun pf drivers
> ngbe and txgbe.
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
> ---
> drivers/net/ethernet/wangxun/libwx/Makefile | 2 +-
> drivers/net/ethernet/wangxun/libwx/wx_mbx.c | 189 +++++++++++++++++++
> drivers/net/ethernet/wangxun/libwx/wx_mbx.h | 32 ++++
> drivers/net/ethernet/wangxun/libwx/wx_type.h | 5 +
> 4 files changed, 227 insertions(+), 1 deletion(-)
> create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_mbx.c
> create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_mbx.h
>
> diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
> index 42ccd6e4052e..913a978c9032 100644
> --- a/drivers/net/ethernet/wangxun/libwx/Makefile
> +++ b/drivers/net/ethernet/wangxun/libwx/Makefile
> @@ -4,4 +4,4 @@
>
> obj-$(CONFIG_LIBWX) += libwx.o
>
> -libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
> +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_mbx.o
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
> new file mode 100644
> index 000000000000..e7d7178a1f13
> --- /dev/null
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
> @@ -0,0 +1,189 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
> +#include <linux/pci.h>
> +#include "wx_type.h"
> +#include "wx_mbx.h"
> +
> +/**
> + * wx_obtain_mbx_lock_pf - obtain mailbox lock
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if we obtained the mailbox lock
> + **/
> +static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf)
> +{
> + int ret = -EBUSY;
> + int count = 5;
> + u32 mailbox;
> +
> + while (count--) {
> + /* Take ownership of the buffer */
> + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU);
> +
> + /* reserve mailbox for vf use */
> + mailbox = rd32(wx, WX_PXMAILBOX(vf));
> + if (mailbox & WX_PXMAILBOX_PFU) {
> + ret = 0;
> + break;
> + }
> + udelay(10);
> + }
> +
> + if (ret)
> + wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf);
> +
> + return ret;
> +}
> +
> +static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index)
> +{
> + u32 mbvficr = rd32(wx, WX_MBVFICR(index));
> + int ret = -EBUSY;
@ret is unnecessary...
> +
> + if (mbvficr & mask) {
> + ret = 0;
> + wr32(wx, WX_MBVFICR(index), mask);
return 0 here...
> + }
> +
> + return ret;
and return -EBUSY here
> +}
> +
> +/**
> + * wx_check_for_ack_pf - checks to see if the VF has ACKed
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
> + **/
> +int wx_check_for_ack_pf(struct wx *wx, u16 vf)
> +{
> + u32 index = vf / 16, vf_bit = vf % 16;
> +
> + return wx_check_for_bit_pf(wx,
> + FIELD_PREP(WX_MBVFICR_VFACK_MASK, BIT(vf_bit)),
> + index);
> +}
> +EXPORT_SYMBOL(wx_check_for_ack_pf);
> +
> +/**
> + * wx_check_for_msg_pf - checks to see if the VF has sent mail
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
> + **/
> +int wx_check_for_msg_pf(struct wx *wx, u16 vf)
> +{
> + u32 index = vf / 16, vf_bit = vf % 16;
> +
> + return wx_check_for_bit_pf(wx,
> + FIELD_PREP(WX_MBVFICR_VFREQ_MASK, BIT(vf_bit)),
> + index);
> +}
> +EXPORT_SYMBOL(wx_check_for_msg_pf);
> +
> +/**
> + * wx_write_mbx_pf - Places a message in the mailbox
> + * @wx: pointer to the HW structure
> + * @msg: The message buffer
> + * @size: Length of buffer
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if it successfully copied message into the buffer
> + **/
> +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
> +{
> + struct wx_mbx_info *mbx = &wx->mbx;
> + int ret, i;
> +
> + if (size > mbx->size) {
> + wx_err(wx, "Invalid mailbox message size %d", size);
> + ret = -EINVAL;
> + goto out_no_write;
you don't need goto. just return -EINVAL here...
> + }
> +
> + /* lock the mailbox to prevent pf/vf race condition */
> + ret = wx_obtain_mbx_lock_pf(wx, vf);
> + if (ret)
> + goto out_no_write;
return @ret here...
> +
> + /* flush msg and acks as we are overwriting the message buffer */
> + wx_check_for_msg_pf(wx, vf);
> + wx_check_for_ack_pf(wx, vf);
> +
> + /* copy the caller specified message to the mailbox memory buffer */
> + for (i = 0; i < size; i++)
> + wr32a(wx, WX_PXMBMEM(vf), i, msg[i]);
> +
> + /* Interrupt VF to tell it a message has been sent and release buffer*/
> + /* set mirrored mailbox flags */
> + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS);
> + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS);
> +
> +out_no_write:
> + return ret;
and return 0 here
> +}
> +EXPORT_SYMBOL(wx_write_mbx_pf);
> +
> +/**
> + * wx_read_mbx_pf - Read a message from the mailbox
> + * @wx: pointer to the HW structure
> + * @msg: The message buffer
> + * @size: Length of buffer
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if VF copy a message from the mailbox buffer.
> + **/
> +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
> +{
> + struct wx_mbx_info *mbx = &wx->mbx;
> + int ret;
> + u16 i;
> +
> + /* limit read to size of mailbox */
> + if (size > mbx->size)
> + size = mbx->size;
> +
> + /* lock the mailbox to prevent pf/vf race condition */
> + ret = wx_obtain_mbx_lock_pf(wx, vf);
> + if (ret)
> + goto out_no_read;
just return @ret...
> +
> + /* copy the message to the mailbox memory buffer */
> + for (i = 0; i < size; i++)
> + msg[i] = rd32a(wx, WX_PXMBMEM(vf), i);
> +
> + /* Acknowledge the message and release buffer */
> + /* set mirrored mailbox flags */
> + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK);
> + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK);
> +out_no_read:
> + return ret;
and return 0
> +}
> +EXPORT_SYMBOL(wx_read_mbx_pf);
> +
> +/**
> + * wx_check_for_rst_pf - checks to see if the VF has reset
> + * @wx: pointer to the HW structure
> + * @vf: the VF index
> + *
> + * return: return SUCCESS if the VF has set the Status bit or else -EBUSY
> + **/
> +int wx_check_for_rst_pf(struct wx *wx, u16 vf)
> +{
> + u32 reg_offset = vf / 32;
> + u32 vf_shift = vf % 32;
> + int ret = -EBUSY;
Again @ret is not needed
> + u32 vflre = 0;
> +
> + vflre = rd32(wx, WX_VFLRE(reg_offset));
> +
> + if (vflre & BIT(vf_shift)) {
> + ret = 0;
> + wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift));
> + }
> +
> + return ret;
> +}
> +EXPORT_SYMBOL(wx_check_for_rst_pf);
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
> new file mode 100644
> index 000000000000..1579096fb6ad
> --- /dev/null
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
> @@ -0,0 +1,32 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
> +#ifndef _WX_MBX_H_
> +#define _WX_MBX_H_
> +
> +#define WX_VXMAILBOX_SIZE 15
> +
> +/* PF Registers */
> +#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */
> +#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */
> +#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
> +#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
> +
> +#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
> +
> +#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
> +#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */
> +
> +/* SR-IOV specific macros */
> +#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */
> +#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
> +#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
> +
> +#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
> +
> +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
> +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
> +int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
> +int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
> +int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
> +
> +#endif /* _WX_MBX_H_ */
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> index 5aaf7b1fa2db..caa2f4157834 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> @@ -674,6 +674,10 @@ struct wx_bus_info {
> u16 device;
> };
>
> +struct wx_mbx_info {
> + u16 size;
> +};
> +
> struct wx_thermal_sensor_data {
> s16 temp;
> s16 alarm_thresh;
> @@ -995,6 +999,7 @@ struct wx {
> struct pci_dev *pdev;
> struct net_device *netdev;
> struct wx_bus_info bus;
> + struct wx_mbx_info mbx;
> struct wx_mac_info mac;
> enum em_mac_type mac_type;
> enum sp_media_type media_type;
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 2/6] net: libwx: Add sriov api for wangxun nics
2024-06-04 15:57 ` [PATCH net-next v4 2/6] net: libwx: Add sriov api for wangxun nics Mengyuan Lou
@ 2024-06-05 7:42 ` Wojciech Drewek
0 siblings, 0 replies; 14+ messages in thread
From: Wojciech Drewek @ 2024-06-05 7:42 UTC (permalink / raw)
To: Mengyuan Lou, netdev; +Cc: jiawenwu, duanqiangwen
On 04.06.2024 17:57, Mengyuan Lou wrote:
> Implement sriov_configure interface for wangxun nics in libwx.
you could elaborate a bit more
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
> ---
> drivers/net/ethernet/wangxun/libwx/Makefile | 2 +-
> drivers/net/ethernet/wangxun/libwx/wx_mbx.h | 4 +
> drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 221 ++++++++++++++++++
> drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 10 +
> drivers/net/ethernet/wangxun/libwx/wx_type.h | 38 +++
> 5 files changed, 274 insertions(+), 1 deletion(-)
> create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> create mode 100644 drivers/net/ethernet/wangxun/libwx/wx_sriov.h
>
> diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
> index 913a978c9032..5b996d973d29 100644
> --- a/drivers/net/ethernet/wangxun/libwx/Makefile
> +++ b/drivers/net/ethernet/wangxun/libwx/Makefile
> @@ -4,4 +4,4 @@
>
> obj-$(CONFIG_LIBWX) += libwx.o
>
> -libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_mbx.o
> +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_mbx.o wx_sriov.o
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
> index 1579096fb6ad..3c70654a8b14 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
> @@ -23,6 +23,10 @@
>
> #define WX_VT_MSGINFO_MASK GENMASK(23, 16)
>
> +enum wxvf_xcast_modes {
> + WXVF_XCAST_MODE_NONE = 0,
> +};
> +
> int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
> int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
> int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> new file mode 100644
> index 000000000000..032b75f23460
> --- /dev/null
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> @@ -0,0 +1,221 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
> +
> +#include <linux/etherdevice.h>
> +#include <linux/pci.h>
> +
> +#include "wx_type.h"
> +#include "wx_mbx.h"
> +#include "wx_sriov.h"
> +
> +static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
> +{
> + unsigned int vfn = (event_mask & GENMASK(5, 0));
> + struct wx *wx = pci_get_drvdata(pdev);
> +
> + bool enable = ((event_mask & BIT(31)) != 0);
> +
> + if (enable)
> + eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
> +}
> +
> +static void wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
> +{
> + struct vf_macvlans *mv_list;
> + int num_vf_macvlans, i;
> +
> + /* Initialize list of VF macvlans */
> + INIT_LIST_HEAD(&wx->vf_mvs.l);
> +
> + num_vf_macvlans = wx->mac.num_rar_entries -
> + (WX_MAX_PF_MACVLANS + 1 + num_vfs);
> + if (!num_vf_macvlans)
> + return;
> +
> + mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
> + GFP_KERNEL);
> + if (mv_list) {
> + for (i = 0; i < num_vf_macvlans; i++) {
> + mv_list[i].vf = -1;
> + mv_list[i].free = true;
> + list_add(&mv_list[i].l, &wx->vf_mvs.l);
> + }
> + wx->mv_list = mv_list;
> + }
> +}
> +
> +static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
> +{
> + u32 value = 0;
> + int i;
> +
> + set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
> + wx_err(wx, "SR-IOV enabled with %d VFs\n", num_vfs);
> +
> + /* Enable VMDq flag so device will be set in VM mode */
> + set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
> + if (!wx->ring_feature[RING_F_VMDQ].limit)
> + wx->ring_feature[RING_F_VMDQ].limit = 1;
> + wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
> +
> + wx_alloc_vf_macvlans(wx, num_vfs);
> + /* Initialize default switching mode VEB */
> + wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
> +
> + /* If call to enable VFs succeeded then allocate memory
> + * for per VF control structures.
> + */
> + wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), GFP_KERNEL);
> + if (!wx->vfinfo)
> + return -ENOMEM;
> +
> + /* enable spoof checking for all VFs */
> + for (i = 0; i < num_vfs; i++) {
> + /* enable spoof checking for all VFs */
> + wx->vfinfo[i].spoofchk_enabled = true;
> + wx->vfinfo[i].link_enable = true;
> + /* Untrust all VFs */
> + wx->vfinfo[i].trusted = false;
> + /* set the default xcast mode */
> + wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
> + }
> +
> + if (wx->mac.type == wx_mac_sp) {
> + if (num_vfs < 32)
> + value = WX_CFG_PORT_CTL_NUM_VT_32;
> + else
> + value = WX_CFG_PORT_CTL_NUM_VT_64;
> + } else {
> + value = WX_CFG_PORT_CTL_NUM_VT_8;
> + }
> + wr32m(wx, WX_CFG_PORT_CTL,
> + WX_CFG_PORT_CTL_NUM_VT_MASK,
> + value);
> +
> + return 0;
> +}
> +
> +static void wx_sriov_reinit(struct wx *wx)
> +{
> + rtnl_lock();
> + wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
> + rtnl_unlock();
> +}
> +
> +int wx_disable_sriov(struct wx *wx)
> +{
> + /* If our VFs are assigned we cannot shut down SR-IOV
> + * without causing issues, so just leave the hardware
> + * available but disabled
> + */
> + if (pci_vfs_assigned(wx->pdev)) {
> + wx_err(wx, "Unloading driver while VFs are assigned.\n");
> + return -EPERM;
> + }
> + /* disable iov and allow time for transactions to clear */
> + pci_disable_sriov(wx->pdev);
> +
> + /* set num VFs to 0 to prevent access to vfinfo */
> + wx->num_vfs = 0;
> +
> + /* free VF control structures */
> + kfree(wx->vfinfo);
> + wx->vfinfo = NULL;
> +
> + /* free macvlan list */
> + kfree(wx->mv_list);
> + wx->mv_list = NULL;
> +
> + /* set default pool back to 0 */
> + wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
> + wx->ring_feature[RING_F_VMDQ].offset = 0;
> +
> + clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
> + /* Disable VMDq flag so device will be set in VM mode */
> + if (wx->ring_feature[RING_F_VMDQ].limit == 1)
> + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
> +
> + return 0;
> +}
> +EXPORT_SYMBOL(wx_disable_sriov);
> +
> +static int wx_pci_sriov_enable(struct pci_dev *dev,
> + int num_vfs)
> +{
> + struct wx *wx = pci_get_drvdata(dev);
> + int err = 0, i;
> +
> + err = __wx_enable_sriov(wx, num_vfs);
> + if (err)
> + goto err_out;
> +
> + wx->num_vfs = num_vfs;
> + for (i = 0; i < wx->num_vfs; i++)
> + wx_vf_configuration(dev, (i | BIT(31)));
> +
> + /* reset before enabling SRIOV to avoid mailbox issues */
> + wx_sriov_reinit(wx);
> +
> + err = pci_enable_sriov(dev, num_vfs);
> + if (err) {
Shouldn't you unroll previous configuration if pci_enable_sriov fails?
> + wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
> + goto err_out;
> + }
> +
> + return num_vfs;
> +err_out:
> + return err;
> +}
> +
> +static int wx_pci_sriov_disable(struct pci_dev *dev)
> +{
> + struct wx *wx = pci_get_drvdata(dev);
> + int err;
> +
> + err = wx_disable_sriov(wx);
> +
> + /* reset before enabling SRIOV to avoid mailbox issues */
> + if (!err)
> + wx_sriov_reinit(wx);
> +
> + return err;
> +}
> +
> +static int wx_check_sriov_allowed(struct wx *wx, int num_vfs)
> +{
> + u16 max_vfs;
> +
> + max_vfs = (wx->mac.type == wx_mac_sp) ? 63 : 7;
> +
> + if (num_vfs > max_vfs)
> + return -EPERM;
> +
> + return 0;
> +}
> +
> +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
> +{
> + struct wx *wx = pci_get_drvdata(pdev);
> + int err;
> +
> + err = wx_check_sriov_allowed(wx, num_vfs);
> + if (err)
> + return err;
> +
> + if (!num_vfs) {
> + if (!pci_vfs_assigned(pdev)) {
> + wx_pci_sriov_disable(pdev);
> + return 0;
> + }
> +
> + wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
> + return -EBUSY;
> + }
> +
> + err = wx_pci_sriov_enable(pdev, num_vfs);
> + if (err)
> + return err;
> +
> + return num_vfs;
> +}
> +EXPORT_SYMBOL(wx_pci_sriov_configure);
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> new file mode 100644
> index 000000000000..17b547ae8862
> --- /dev/null
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> @@ -0,0 +1,10 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
> +
> +#ifndef _WX_SRIOV_H_
> +#define _WX_SRIOV_H_
> +
> +int wx_disable_sriov(struct wx *wx);
> +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
> +
> +#endif /* _WX_SRIOV_H_ */
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> index caa2f4157834..7dad022e01e9 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> @@ -18,6 +18,7 @@
> /* MSI-X capability fields masks */
> #define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
> #define WX_PCI_LINK_STATUS 0xB2
> +#define WX_MAX_PF_MACVLANS 15
>
> /**************** Global Registers ****************************/
> /* chip control Registers */
> @@ -88,6 +89,9 @@
> #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
> #define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
>
> +#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
> +#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
> +#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
>
> /* GPIO Registers */
> #define WX_GPIO_DR 0x14800
> @@ -161,6 +165,7 @@
> /******************************* PSR Registers *******************************/
> /* psr control */
> #define WX_PSR_CTL 0x15000
> +#define WX_PSR_VM_CTL 0x151B0
> /* Header split receive */
> #define WX_PSR_CTL_SW_EN BIT(18)
> #define WX_PSR_CTL_RSC_ACK BIT(17)
> @@ -181,6 +186,7 @@
> /* mcasst/ucast overflow tbl */
> #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
> #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
> +#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
>
> /* VM L2 contorl */
> #define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
> @@ -943,6 +949,7 @@ struct wx_ring_feature {
> enum wx_ring_f_enum {
> RING_F_NONE = 0,
> RING_F_RSS,
> + RING_F_VMDQ,
> RING_F_ARRAY_SIZE /* must be last in enum set */
> };
>
> @@ -990,9 +997,34 @@ enum wx_state {
> WX_STATE_RESETTING,
> WX_STATE_NBITS, /* must be last */
> };
> +
> +struct vf_data_storage {
> + struct pci_dev *vfdev;
> + unsigned char vf_mac_addr[ETH_ALEN];
> + bool spoofchk_enabled;
> + bool link_enable;
> + bool trusted;
> + int xcast_mode;
> +};
> +
> +struct vf_macvlans {
> + struct list_head l;
l is too short IMO
> + int vf;
> + bool free;
> + bool is_macvlan;
> + u8 vf_macvlan[ETH_ALEN];
> +};
> +
> +enum wx_pf_flags {
> + WX_FLAG_VMDQ_ENABLED,
> + WX_FLAG_SRIOV_ENABLED,
> + WX_PF_FLAGS_NBITS /* must be last */
> +};
> +
> struct wx {
> unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
> DECLARE_BITMAP(state, WX_STATE_NBITS);
> + DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
>
> void *priv;
> u8 __iomem *hw_addr;
> @@ -1082,6 +1114,12 @@ struct wx {
> u64 hw_csum_rx_error;
> u64 alloc_rx_buff_failed;
>
> + unsigned int num_vfs;
> + struct vf_data_storage *vfinfo;
> + struct vf_macvlans vf_mvs;
> + struct vf_macvlans *mv_list;
> +
> + int (*setup_tc)(struct net_device *netdev, u8 tc);
> void (*do_reset)(struct net_device *netdev);
> };
>
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 3/6] net: libwx: Redesign flow when sriov is enabled
2024-06-04 15:57 ` [PATCH net-next v4 3/6] net: libwx: Redesign flow when sriov is enabled Mengyuan Lou
@ 2024-06-05 8:54 ` Wojciech Drewek
0 siblings, 0 replies; 14+ messages in thread
From: Wojciech Drewek @ 2024-06-05 8:54 UTC (permalink / raw)
To: Mengyuan Lou, netdev; +Cc: jiawenwu, duanqiangwen
On 04.06.2024 17:57, Mengyuan Lou wrote:
> Reallocate queue and int resources when sriov is enabled.
> Redefine macro VMDQ to make it work in VT mode.
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
> ---
Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
> drivers/net/ethernet/wangxun/libwx/wx_hw.c | 293 ++++++++++++++++++-
> drivers/net/ethernet/wangxun/libwx/wx_lib.c | 129 +++++++-
> drivers/net/ethernet/wangxun/libwx/wx_type.h | 37 ++-
> 3 files changed, 442 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
> index 7c4b6881a93f..8affcb9f7dbb 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
> @@ -10,6 +10,7 @@
>
> #include "wx_type.h"
> #include "wx_lib.h"
> +#include "wx_sriov.h"
> #include "wx_hw.h"
>
> static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
> @@ -804,11 +805,28 @@ static void wx_sync_mac_table(struct wx *wx)
> }
> }
>
> +static void wx_full_sync_mac_table(struct wx *wx)
> +{
> + int i;
> +
> + for (i = 0; i < wx->mac.num_rar_entries; i++) {
> + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
> + wx_set_rar(wx, i,
> + wx->mac_table[i].addr,
> + wx->mac_table[i].pools,
> + WX_PSR_MAC_SWC_AD_H_AV);
> + } else {
> + wx_clear_rar(wx, i);
> + }
> + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
> + }
> +}
> +
> /* this function destroys the first RAR entry */
> void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
> {
> memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
> - wx->mac_table[0].pools = 1ULL;
> + wx->mac_table[0].pools = BIT(VMDQ_P(0));
> wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
> wx_set_rar(wx, 0, wx->mac_table[0].addr,
> wx->mac_table[0].pools,
> @@ -1046,6 +1064,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
> wx_dbg(wx, "Update mc addr list Complete\n");
> }
>
> +static void wx_restore_vf_multicasts(struct wx *wx)
> +{
> + u32 i, j, vector_bit, vector_reg;
> + struct vf_data_storage *vfinfo;
> +
> + for (i = 0; i < wx->num_vfs; i++) {
> + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i));
> +
> + vfinfo = &wx->vfinfo[i];
> + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
> + wx->addr_ctrl.mta_in_use++;
> + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & GENMASK(6, 0);
> + vector_bit = vfinfo->vf_mc_hashes[j] & GENMASK(4, 0);
> + wr32m(wx, WX_PSR_MC_TBL(vector_reg),
> + BIT(vector_bit), BIT(vector_bit));
> + /* errata 5: maintain a copy of the reg table conf */
> + wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
> + }
> + if (vfinfo->num_vf_mc_hashes)
> + vmolr |= WX_PSR_VM_L2CTL_ROMPE;
> + else
> + vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
> + wr32(wx, WX_PSR_VM_L2CTL(i), vmolr);
> + }
> +
> + /* Restore any VF macvlans */
> + wx_full_sync_mac_table(wx);
> +}
> +
> /**
> * wx_write_mc_addr_list - write multicast addresses to MTA
> * @netdev: network interface device structure
> @@ -1063,6 +1110,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev)
>
> wx_update_mc_addr_list(wx, netdev);
>
> + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
> + wx_restore_vf_multicasts(wx);
> +
> return netdev_mc_count(netdev);
> }
>
> @@ -1083,7 +1133,7 @@ int wx_set_mac(struct net_device *netdev, void *p)
> if (retval)
> return retval;
>
> - wx_del_mac_filter(wx, wx->mac.addr, 0);
> + wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0));
> eth_hw_addr_set(netdev, addr->sa_data);
> memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
>
> @@ -1178,6 +1228,10 @@ static int wx_hpbthresh(struct wx *wx)
> /* Calculate delay value for device */
> dv_id = WX_DV(link, tc);
>
> + /* Loopback switch introduces additional latency */
> + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
> + dv_id += WX_B2BT(tc);
> +
> /* Delay value is calculated in bit times convert to KB */
> kb = WX_BT2KB(dv_id);
> rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
> @@ -1233,12 +1287,106 @@ static void wx_pbthresh_setup(struct wx *wx)
> wx->fc.low_water = 0;
> }
>
> +static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
> +{
> + u32 pfvfspoof, reg_offset, vf_shift;
> +
> + vf_shift = vf % 32;
> + reg_offset = vf / 32;
> +
> + pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset));
> + if (enable)
> + pfvfspoof |= BIT(vf_shift);
> + else
> + pfvfspoof &= ~BIT(vf_shift);
> + wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
> +}
> +
> +static int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
> +{
> + u32 index = vf / 32, vf_bit = vf % 32;
> + struct wx *wx = netdev_priv(netdev);
> + u32 regval;
> +
> + if (vf >= wx->num_vfs)
> + return -EINVAL;
> +
> + wx->vfinfo[vf].spoofchk_enabled = setting;
> +
> + regval = (setting << vf_bit);
> + wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval);
> +
> + if (wx->vfinfo[vf].vlan_count)
> + wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval);
> +
> + return 0;
> +}
> +
> +static void wx_configure_virtualization(struct wx *wx)
> +{
> + u16 pool = wx->num_rx_pools;
> + u32 reg_offset, vf_shift;
> + u32 i;
> +
> + if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
> + return;
> +
> + wr32m(wx, WX_PSR_VM_CTL,
> + WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN,
> + FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) |
> + WX_PSR_VM_CTL_REPLEN);
> + while (pool--)
> + wr32m(wx, WX_PSR_VM_L2CTL(pool), WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
> +
> + if (wx->mac.type == wx_mac_sp) {
> + vf_shift = VMDQ_P(0) % 32;
> + reg_offset = VMDQ_P(0) / 32;
> +
> + /* Enable only the PF pools for Tx/Rx */
> + wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift));
> + wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1);
> + wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift));
> + wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1);
> + } else {
> + vf_shift = BIT(VMDQ_P(0));
> + /* Enable only the PF pools for Tx/Rx */
> + wr32(wx, WX_RDM_VF_RE(0), vf_shift);
> + wr32(wx, WX_TDM_VF_TE(0), vf_shift);
> + }
> +
> + /* clear VLAN promisc flag so VFTA will be updated if necessary */
> + clear_bit(WX_FLAG2_VLAN_PROMISC, wx->flags);
> +
> + for (i = 0; i < wx->num_vfs; i++) {
> + if (!wx->vfinfo[i].spoofchk_enabled)
> + wx_set_vf_spoofchk(wx->netdev, i, false);
> + /* enable ethertype anti spoofing if hw supports it */
> + wx_set_ethertype_anti_spoofing(wx, true, i);
> + }
> +}
> +
> static void wx_configure_port(struct wx *wx)
> {
> u32 value, i;
>
> - value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
> + if (wx->mac.type == wx_mac_em) {
> + value = (wx->num_vfs == 0) ?
> + WX_CFG_PORT_CTL_NUM_VT_NONE :
> + WX_CFG_PORT_CTL_NUM_VT_8;
> + } else {
> + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
> + if (wx->ring_feature[RING_F_RSS].indices == 4)
> + value = WX_CFG_PORT_CTL_NUM_VT_32;
> + else
> + value = WX_CFG_PORT_CTL_NUM_VT_64;
> + } else {
> + value = 0;
> + }
> + }
> +
> + value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
> wr32m(wx, WX_CFG_PORT_CTL,
> + WX_CFG_PORT_CTL_NUM_VT_MASK |
> WX_CFG_PORT_CTL_D_VLAN |
> WX_CFG_PORT_CTL_QINQ,
> value);
> @@ -1297,6 +1445,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable)
> }
> }
>
> +static void wx_vlan_promisc_enable(struct wx *wx)
> +{
> + u32 vlnctrl, i, vind, bits, reg_idx;
> +
> + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
> + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
> + /* we need to keep the VLAN filter on in SRIOV */
> + vlnctrl |= WX_PSR_VLAN_CTL_VFE;
> + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
> + } else {
> + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
> + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
> + return;
> + }
> + /* We are already in VLAN promisc, nothing to do */
> + if (test_bit(WX_FLAG2_VLAN_PROMISC, wx->flags))
> + return;
> + /* Set flag so we don't redo unnecessary work */
> + set_bit(WX_FLAG2_VLAN_PROMISC, wx->flags);
> + /* Add PF to all active pools */
> + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
> + wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
> + reg_idx = VMDQ_P(0) / 32;
> + vind = VMDQ_P(0) % 32;
> + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
> + bits |= BIT(vind);
> + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
> + }
> + /* Set all bits in the VLAN filter table array */
> + for (i = 0; i < wx->mac.vft_size; i++)
> + wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX);
> +}
> +
> +static void wx_scrub_vfta(struct wx *wx)
> +{
> + u32 i, vid, bits, vfta, vind, vlvf, reg_idx;
> +
> + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
> + wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
> + vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX);
> + /* pull VLAN ID from VLVF */
> + vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN;
> + if (vlvf & WX_PSR_VLAN_SWC_VIEN) {
> + /* if PF is part of this then continue */
> + if (test_bit(vid, wx->active_vlans))
> + continue;
> + }
> + /* remove PF from the pool */
> + reg_idx = VMDQ_P(0) / 32;
> + vind = VMDQ_P(0) % 32;
> + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
> + bits &= ~BIT(vind);
> + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
> + }
> + /* extract values from vft_shadow and write back to VFTA */
> + for (i = 0; i < wx->mac.vft_size; i++) {
> + vfta = wx->mac.vft_shadow[i];
> + wr32(wx, WX_PSR_VLAN_TBL(i), vfta);
> + }
> +}
> +
> +static void wx_vlan_promisc_disable(struct wx *wx)
> +{
> + u32 vlnctrl;
> +
> + /* configure vlan filtering */
> + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
> + vlnctrl |= WX_PSR_VLAN_CTL_VFE;
> + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
> + /* We are not in VLAN promisc, nothing to do */
> + if (!test_bit(WX_FLAG2_VLAN_PROMISC, wx->flags))
> + return;
> + /* Set flag so we don't redo unnecessary work */
> + clear_bit(WX_FLAG2_VLAN_PROMISC, wx->flags);
> + wx_scrub_vfta(wx);
> +}
> +
> void wx_set_rx_mode(struct net_device *netdev)
> {
> struct wx *wx = netdev_priv(netdev);
> @@ -1309,7 +1534,7 @@ void wx_set_rx_mode(struct net_device *netdev)
> /* Check for Promiscuous and All Multicast modes */
> fctrl = rd32(wx, WX_PSR_CTL);
> fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
> - vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
> + vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)));
> vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
> WX_PSR_VM_L2CTL_MPE |
> WX_PSR_VM_L2CTL_ROPE |
> @@ -1330,7 +1555,10 @@ void wx_set_rx_mode(struct net_device *netdev)
> fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
> /* pf don't want packets routing to vf, so clear UPE */
> vmolr |= WX_PSR_VM_L2CTL_MPE;
> - vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
> + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) &&
> + test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
> + vlnctrl |= WX_PSR_VLAN_CTL_VFE;
> + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
> }
>
> if (netdev->flags & IFF_ALLMULTI) {
> @@ -1353,7 +1581,7 @@ void wx_set_rx_mode(struct net_device *netdev)
> * sufficient space to store all the addresses then enable
> * unicast promiscuous mode
> */
> - count = wx_write_uc_addr_list(netdev, 0);
> + count = wx_write_uc_addr_list(netdev, VMDQ_P(0));
> if (count < 0) {
> vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
> vmolr |= WX_PSR_VM_L2CTL_UPE;
> @@ -1371,7 +1599,7 @@ void wx_set_rx_mode(struct net_device *netdev)
>
> wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
> wr32(wx, WX_PSR_CTL, fctrl);
> - wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
> + wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr);
>
> if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
> (features & NETIF_F_HW_VLAN_STAG_RX))
> @@ -1379,6 +1607,10 @@ void wx_set_rx_mode(struct net_device *netdev)
> else
> wx_vlan_strip_control(wx, false);
>
> + if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
> + wx_vlan_promisc_disable(wx);
> + else
> + wx_vlan_promisc_enable(wx);
> }
> EXPORT_SYMBOL(wx_set_rx_mode);
>
> @@ -1621,6 +1853,13 @@ static void wx_setup_reta(struct wx *wx)
> u32 random_key_size = WX_RSS_KEY_SIZE / 4;
> u32 i, j;
>
> + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
> + if (wx->mac.type == wx_mac_sp)
> + rss_i = rss_i < 4 ? 4 : rss_i;
> + else if (wx->mac.type == wx_mac_em)
> + rss_i = 1;
> + }
> +
> /* Fill out hash function seeds */
> for (i = 0; i < random_key_size; i++)
> wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
> @@ -1638,10 +1877,40 @@ static void wx_setup_reta(struct wx *wx)
> wx_store_reta(wx);
> }
>
> +static void wx_setup_psrtype(struct wx *wx)
> +{
> + int rss_i = wx->ring_feature[RING_F_RSS].indices;
> + u32 psrtype;
> + int pool;
> +
> + psrtype = WX_RDB_PL_CFG_L4HDR |
> + WX_RDB_PL_CFG_L3HDR |
> + WX_RDB_PL_CFG_L2HDR |
> + WX_RDB_PL_CFG_TUN_OUTL2HDR |
> + WX_RDB_PL_CFG_TUN_TUNHDR;
> +
> + if (wx->mac.type == wx_mac_sp) {
> + if (rss_i > 3)
> + psrtype |= FIELD_PREP(GENMASK(31, 29), 2);
> + else if (rss_i > 1)
> + psrtype |= FIELD_PREP(GENMASK(31, 29), 1);
> +
> + for_each_set_bit(pool, &wx->fwd_bitmask, 32)
> + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
> + } else {
> + for_each_set_bit(pool, &wx->fwd_bitmask, 8)
> + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
> + }
> +}
> +
> static void wx_setup_mrqc(struct wx *wx)
> {
> u32 rss_field = 0;
>
> + /* VT, and RSS do not coexist at the same time */
> + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
> + return;
> +
> /* Disable indicating checksum in descriptor, enables RSS hash */
> wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
>
> @@ -1671,16 +1940,11 @@ static void wx_setup_mrqc(struct wx *wx)
> **/
> void wx_configure_rx(struct wx *wx)
> {
> - u32 psrtype, i;
> int ret;
> + u32 i;
>
> wx_disable_rx(wx);
> -
> - psrtype = WX_RDB_PL_CFG_L4HDR |
> - WX_RDB_PL_CFG_L3HDR |
> - WX_RDB_PL_CFG_L2HDR |
> - WX_RDB_PL_CFG_TUN_TUNHDR;
> - wr32(wx, WX_RDB_PL_CFG(0), psrtype);
> + wx_setup_psrtype(wx);
>
> /* enable hw crc stripping */
> wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
> @@ -1728,6 +1992,7 @@ void wx_configure(struct wx *wx)
> {
> wx_set_rxpba(wx);
> wx_pbthresh_setup(wx);
> + wx_configure_virtualization(wx);
> wx_configure_port(wx);
>
> wx_set_rx_mode(wx->netdev);
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
> index 68bde91b67a0..8e4c0e24a4a3 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
> @@ -1558,6 +1558,65 @@ void wx_napi_disable_all(struct wx *wx)
> }
> EXPORT_SYMBOL(wx_napi_disable_all);
>
> +static bool wx_set_vmdq_queues(struct wx *wx)
> +{
> + u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit;
> + u16 rss_i = wx->ring_feature[RING_F_RSS].limit;
> + u16 rss_m = WX_RSS_DISABLED_MASK;
> + u16 vmdq_m = 0;
> +
> + /* only proceed if VMDq is enabled */
> + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
> + return false;
> + /* Add starting offset to total pool count */
> + vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
> +
> + if (wx->mac.type == wx_mac_sp) {
> + /* double check we are limited to maximum pools */
> + vmdq_i = min_t(u16, 64, vmdq_i);
> +
> + /* 64 pool mode with 2 queues per pool, or
> + * 16/32/64 pool mode with 1 queue per pool
> + */
> + if (vmdq_i > 32 || rss_i < 4) {
> + vmdq_m = WX_VMDQ_2Q_MASK;
> + rss_m = WX_RSS_2Q_MASK;
> + rss_i = min_t(u16, rss_i, 2);
> + /* 32 pool mode with 4 queues per pool */
> + } else {
> + vmdq_m = WX_VMDQ_4Q_MASK;
> + rss_m = WX_RSS_4Q_MASK;
> + rss_i = 4;
> + }
> + } else {
> + /* double check we are limited to maximum pools */
> + vmdq_i = min_t(u16, 8, vmdq_i);
> +
> + /* when VMDQ on, disable RSS */
> + rss_i = 1;
> + }
> +
> + /* remove the starting offset from the pool count */
> + vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
> +
> + /* save features for later use */
> + wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
> + wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
> +
> + /* limit RSS based on user input and save for later use */
> + wx->ring_feature[RING_F_RSS].indices = rss_i;
> + wx->ring_feature[RING_F_RSS].mask = rss_m;
> +
> + wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
> + wx->num_rx_pools = vmdq_i;
> + wx->num_rx_queues_per_pool = rss_i;
> +
> + wx->num_rx_queues = vmdq_i * rss_i;
> + wx->num_tx_queues = vmdq_i * rss_i;
> +
> + return true;
> +}
> +
> /**
> * wx_set_rss_queues: Allocate queues for RSS
> * @wx: board private structure to initialize
> @@ -1574,6 +1633,11 @@ static void wx_set_rss_queues(struct wx *wx)
> f = &wx->ring_feature[RING_F_RSS];
> f->indices = f->limit;
>
> + if (wx->mac.type == wx_mac_sp)
> + f->mask = WX_RSS_64Q_MASK;
> + else
> + f->mask = WX_RSS_8Q_MASK;
> +
> wx->num_rx_queues = f->limit;
> wx->num_tx_queues = f->limit;
> }
> @@ -1585,6 +1649,9 @@ static void wx_set_num_queues(struct wx *wx)
> wx->num_tx_queues = 1;
> wx->queues_per_pool = 1;
>
> + if (wx_set_vmdq_queues(wx))
> + return;
> +
> wx_set_rss_queues(wx);
> }
>
> @@ -1665,6 +1732,10 @@ static int wx_set_interrupt_capability(struct wx *wx)
> if (ret == 0 || (ret == -ENOMEM))
> return ret;
>
> + /* Disable VMDq support */
> + dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n");
> + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
> +
> /* Disable RSS */
> dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
> wx->ring_feature[RING_F_RSS].limit = 1;
> @@ -1690,6 +1761,49 @@ static int wx_set_interrupt_capability(struct wx *wx)
> return 0;
> }
>
> +static bool wx_cache_ring_vmdq(struct wx *wx)
> +{
> + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
> + struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS];
> + u16 reg_idx;
> + int i;
> +
> + /* only proceed if VMDq is enabled */
> + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
> + return false;
> +
> + if (wx->mac.type == wx_mac_sp) {
> + /* start at VMDq register offset for SR-IOV enabled setups */
> + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
> + for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
> + /* If we are greater than indices move to next pool */
> + if ((reg_idx & ~vmdq->mask) >= rss->indices)
> + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
> + wx->rx_ring[i]->reg_idx = reg_idx;
> + }
> + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
> + for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) {
> + /* If we are greater than indices move to next pool */
> + if ((reg_idx & rss->mask) >= rss->indices)
> + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
> + wx->tx_ring[i]->reg_idx = reg_idx;
> + }
> + } else {
> + /* start at VMDq register offset for SR-IOV enabled setups */
> + reg_idx = vmdq->offset;
> + for (i = 0; i < wx->num_rx_queues; i++)
> + /* If we are greater than indices move to next pool */
> + wx->rx_ring[i]->reg_idx = reg_idx + i;
> +
> + reg_idx = vmdq->offset;
> + for (i = 0; i < wx->num_tx_queues; i++)
> + /* If we are greater than indices move to next pool */
> + wx->tx_ring[i]->reg_idx = reg_idx + i;
> + }
> +
> + return true;
> +}
> +
> /**
> * wx_cache_ring_rss - Descriptor ring to register mapping for RSS
> * @wx: board private structure to initialize
> @@ -1701,6 +1815,9 @@ static void wx_cache_ring_rss(struct wx *wx)
> {
> u16 i;
>
> + if (wx_cache_ring_vmdq(wx))
> + return;
> +
> for (i = 0; i < wx->num_rx_queues; i++)
> wx->rx_ring[i]->reg_idx = i;
>
> @@ -2089,7 +2206,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
> wr32(wx, WX_PX_MISC_IVAR, ivar);
> } else {
> /* tx or rx causes */
> - msix_vector += 1; /* offset for queue vectors */
> + if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7))
> + msix_vector += 1; /* offset for queue vectors */
> msix_vector |= WX_PX_IVAR_ALLOC_VAL;
> index = ((16 * (queue & 1)) + (8 * direction));
> ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
> @@ -2134,10 +2252,17 @@ void wx_configure_vectors(struct wx *wx)
> {
> struct pci_dev *pdev = wx->pdev;
> u32 eitrsel = 0;
> - u16 v_idx;
> + u16 v_idx, i;
>
> if (pdev->msix_enabled) {
> /* Populate MSIX to EITR Select */
> + if (wx->mac.type == wx_mac_sp) {
> + if (wx->num_vfs >= 32)
> + eitrsel = BIT(wx->num_vfs % 32) - 1;
> + } else if (wx->mac.type == wx_mac_em) {
> + for (i = 0; i < wx->num_vfs; i++)
> + eitrsel |= BIT(i);
> + }
> wr32(wx, WX_PX_ITRSEL, eitrsel);
> /* use EIAM to auto-mask when MSI-X interrupt is asserted
> * this saves a register write for every interrupt
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> index 7dad022e01e9..126416534181 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> @@ -19,6 +19,7 @@
> #define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
> #define WX_PCI_LINK_STATUS 0xB2
> #define WX_MAX_PF_MACVLANS 15
> +#define WX_MAX_VF_MC_ENTRIES 30
>
> /**************** Global Registers ****************************/
> /* chip control Registers */
> @@ -75,6 +76,7 @@
> #define WX_MAC_LXONOFFRXC 0x11E0C
>
> /*********************** Receive DMA registers **************************/
> +#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4))
> #define WX_RDM_DRP_PKT 0x12500
> #define WX_RDM_PKT_CNT 0x12504
> #define WX_RDM_BYTE_CNT_LSB 0x12508
> @@ -89,6 +91,7 @@
> #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
> #define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
>
> +#define WX_CFG_PORT_CTL_NUM_VT_NONE 0
> #define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
> #define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
> #define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
> @@ -114,6 +117,10 @@
> /*********************** Transmit DMA registers **************************/
> /* transmit global control */
> #define WX_TDM_CTL 0x18000
> +#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4))
> +#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4))
> +#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4))
> +
> /* TDM CTL BIT */
> #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
> #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
> @@ -186,6 +193,7 @@
> /* mcasst/ucast overflow tbl */
> #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
> #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
> +#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */
> #define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
>
> /* VM L2 contorl */
> @@ -230,6 +238,7 @@
> #define WX_PSR_VLAN_SWC 0x16220
> #define WX_PSR_VLAN_SWC_VM_L 0x16224
> #define WX_PSR_VLAN_SWC_VM_H 0x16228
> +#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4))
> #define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */
> /* VLAN pool filtering masks */
> #define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
> @@ -244,6 +253,10 @@
> #define WX_RSC_ST 0x17004
> #define WX_RSC_ST_RSEC_RDY BIT(0)
>
> +/*********************** Transmit DMA registers **************************/
> +/* transmit global control */
> +#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4))
> +
> /****************************** TDB ******************************************/
> #define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
> #define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
> @@ -371,6 +384,15 @@ enum WX_MSCA_CMD_value {
> /* Number of 80 microseconds we wait for PCI Express master disable */
> #define WX_PCI_MASTER_DISABLE_TIMEOUT 80000
>
> +#define WX_RSS_64Q_MASK 0x3F
> +#define WX_RSS_8Q_MASK 0x7
> +#define WX_RSS_4Q_MASK 0x3
> +#define WX_RSS_2Q_MASK 0x1
> +#define WX_RSS_DISABLED_MASK 0x0
> +
> +#define WX_VMDQ_4Q_MASK 0x7C
> +#define WX_VMDQ_2Q_MASK 0x7E
> +
> /****************** Manageablility Host Interface defines ********************/
> #define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
> #define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
> @@ -435,7 +457,12 @@ enum WX_MSCA_CMD_value {
> #define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
>
> #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
> -#define VMDQ_P(p) p
> +/* must account for pools assigned to VFs. */
> +#ifdef CONFIG_PCI_IOV
> +#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset)
> +#else
> +#define VMDQ_P(p) (p)
> +#endif
>
> /* Supported Rx Buffer Sizes */
> #define WX_RXBUFFER_256 256 /* Used for skb receive header */
> @@ -1005,6 +1032,10 @@ struct vf_data_storage {
> bool link_enable;
> bool trusted;
> int xcast_mode;
> +
> + u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
> + u16 num_vf_mc_hashes;
> + u16 vlan_count;
> };
>
> struct vf_macvlans {
> @@ -1017,6 +1048,7 @@ struct vf_macvlans {
>
> enum wx_pf_flags {
> WX_FLAG_VMDQ_ENABLED,
> + WX_FLAG2_VLAN_PROMISC,
> WX_FLAG_SRIOV_ENABLED,
> WX_PF_FLAGS_NBITS /* must be last */
> };
> @@ -1085,6 +1117,8 @@ struct wx {
> struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
> struct wx_ring *rx_ring[64];
> struct wx_q_vector *q_vector[64];
> + int num_rx_pools; /* does not include pools assigned to VFs */
> + int num_rx_queues_per_pool;
>
> unsigned int queues_per_pool;
> struct msix_entry *msix_q_entries;
> @@ -1118,6 +1152,7 @@ struct wx {
> struct vf_data_storage *vfinfo;
> struct vf_macvlans vf_mvs;
> struct vf_macvlans *mv_list;
> + unsigned long fwd_bitmask; /* bitmask indicating in use pools */
>
> int (*setup_tc)(struct net_device *netdev, u8 tc);
> void (*do_reset)(struct net_device *netdev);
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 4/6] net: libwx: Add msg task func
2024-06-04 15:57 ` [PATCH net-next v4 4/6] net: libwx: Add msg task func Mengyuan Lou
@ 2024-06-05 9:41 ` Wojciech Drewek
2024-06-05 18:44 ` Simon Horman
1 sibling, 0 replies; 14+ messages in thread
From: Wojciech Drewek @ 2024-06-05 9:41 UTC (permalink / raw)
To: Mengyuan Lou, netdev; +Cc: jiawenwu, duanqiangwen
On 04.06.2024 17:57, Mengyuan Lou wrote:
> Implement wx_msg_task which is used to process mailbox
> messages sent by vf.
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
> ---
> drivers/net/ethernet/wangxun/libwx/wx_hw.c | 12 +-
> drivers/net/ethernet/wangxun/libwx/wx_hw.h | 4 +
> drivers/net/ethernet/wangxun/libwx/wx_mbx.h | 50 ++
> drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 725 ++++++++++++++++++
> drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 1 +
> drivers/net/ethernet/wangxun/libwx/wx_type.h | 17 +
> 6 files changed, 805 insertions(+), 4 deletions(-)
>
<...>
> +
> +static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
> +{
> + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
> + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
> + u32 reg = 0, n = vf * q_per_pool / 32;
> + u32 i = vf * q_per_pool;
> +
> + reg = rd32(wx, WX_RDM_PF_QDE(n));
> + for (i = (vf * q_per_pool - n * 32);
> + i < ((vf + 1) * q_per_pool - n * 32);
> + i++) {
> + if (qde == 1)
> + reg |= qde << i;
> + else
> + reg &= qde << i;
> + }
> +
> + wr32(wx, WX_RDM_PF_QDE(n), reg);
> +}
> +
> +static void wx_clear_vmvir(struct wx *wx, u32 vf)
> +{
> + wr32(wx, WX_TDM_VLAN_INS(vf), 0);
> +}
> +
> +static void wx_set_vf_rx_tx(struct wx *wx, int vf)
> +{
> + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
> + u32 index, vf_bit;
> +
> + vf_bit = vf % 32;
> + index = vf / 32;
I've seen those calculations a few times, you could define a macro for them:
wx_get_vf_index
than you could leave a comment explaining them
> +
> + reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
> + reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
> +
> + if (wx->vfinfo[vf].link_enable) {
> + reg_req_tx = reg_cur_tx | BIT(vf_bit);
> + reg_req_rx = reg_cur_rx | BIT(vf_bit);
> + /* Enable particular VF */
> + if (reg_cur_tx != reg_req_tx)
> + wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
> + if (reg_cur_rx != reg_req_rx)
> + wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
> + } else {
> + reg_req_tx = BIT(vf_bit);
> + reg_req_rx = BIT(vf_bit);
> + /* Disable particular VF */
> + if (reg_cur_tx & reg_req_tx)
> + wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
> + if (reg_cur_rx & reg_req_rx)
> + wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
> + }
> +}
> +
> +static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
> + unsigned int default_tc = 0;
> +
> + /* verify the PF is supporting the correct APIs */
> + switch (wx->vfinfo[vf].vf_api) {
> + case wx_mbox_api_11 ... wx_mbox_api_20:
> + break;
> + default:
> + return -EINVAL;
> + }
> +
> + /* only allow 1 Tx queue for bandwidth limiting */
> + msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
> + msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
> +
> + if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
> + msgbuf[WX_VF_TRANS_VLAN] = 1;
> + else
> + msgbuf[WX_VF_TRANS_VLAN] = 0;
> +
> + /* notify VF of default queue */
> + msgbuf[WX_VF_DEF_QUEUE] = default_tc;
> +
> + return 0;
> +}
> +
> +static void wx_vf_reset_event(struct wx *wx, u16 vf)
> +{
> + struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
> + u8 num_tcs = netdev_get_num_tc(wx->netdev);
> +
> + /* add PF assigned VLAN or VLAN 0 */
> + wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
> +
> + /* reset offloads to defaults */
> + wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
> +
> + /* set outgoing tags for VFs */
> + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
> + wx_clear_vmvir(wx, vf);
> + } else {
> + if (vfinfo->pf_qos || !num_tcs)
> + wx_set_vmvir(wx, vfinfo->pf_vlan,
> + vfinfo->pf_qos, vf);
> + else
> + wx_set_vmvir(wx, vfinfo->pf_vlan,
> + wx->default_up, vf);
> + }
> +
> + /* reset multicast table array for vf */
> + wx->vfinfo[vf].num_vf_mc_hashes = 0;
> +
> + /* Flush and reset the mta with the new values */
> + wx_set_rx_mode(wx->netdev);
> +
> + wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
> +
> + /* reset VF api back to unknown */
> + wx->vfinfo[vf].vf_api = wx_mbox_api_10;
> +}
> +
> +static void wx_vf_reset_msg(struct wx *wx, u16 vf)
> +{
> + unsigned char *vf_mac = wx->vfinfo[vf].vf_mac_addr;
> + struct net_device *dev = wx->netdev;
> + u32 msgbuf[5] = {0, 0, 0, 0, 0};
> + u8 *addr = (u8 *)(&msgbuf[1]);
> + u32 reg = 0, index, vf_bit;
> + int pf_max_frame;
> +
> + /* reset the filters for the device */
> + wx_vf_reset_event(wx, vf);
> +
> + /* set vf mac address */
> + if (!is_zero_ether_addr(vf_mac))
> + wx_set_vf_mac(wx, vf, vf_mac);
> +
> + vf_bit = vf % 32;
> + index = vf / 32;
> +
> + /* force drop enable for all VF Rx queues */
> + wx_write_qde(wx, vf, 1);
> +
> + /* set transmit and receive for vf */
> + wx_set_vf_rx_tx(wx, vf);
> +
> + pf_max_frame = dev->mtu + ETH_HLEN;
> +
> + if (pf_max_frame > ETH_FRAME_LEN)
> + reg = BIT(vf_bit);
> + wr32(wx, WX_RDM_VFRE_CLR(index), reg);
> +
> + /* enable VF mailbox for further messages */
> + wx->vfinfo[vf].clear_to_send = true;
> +
> + /* reply to reset with ack and vf mac address */
> + msgbuf[0] = WX_VF_RESET;
> + if (!is_zero_ether_addr(vf_mac)) {
> + msgbuf[0] |= WX_VT_MSGTYPE_ACK;
> + memcpy(addr, vf_mac, ETH_ALEN);
> + } else {
> + msgbuf[0] |= WX_VT_MSGTYPE_NACK;
> + wx_err(wx, "VF %d has no MAC address assigned", vf);
> + }
> +
> + /* Piggyback the multicast filter type so VF can compute the
> + * correct vectors
> + */
> + msgbuf[3] = wx->mac.mc_filter_type;
> + wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
> +}
> +
> +static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> + u8 *new_mac = ((u8 *)(&msgbuf[1]));
> +
> + if (!is_valid_ether_addr(new_mac)) {
> + wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
> + return -EINVAL;
> + }
> +
> + if (wx->vfinfo[vf].pf_set_mac &&
> + memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
> + wx_err(wx,
> + "VF %d attempted to set a MAC address but it already had a MAC address.",
> + vf);
> + return -EBUSY;
> + }
nit: space bfore return
> + return wx_set_vf_mac(wx, vf, new_mac) < 0;
> +}
> +
> +static int wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
this functions can't fail so no need to return
> +{
> + u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
> + >> WX_VT_MSGINFO_SHIFT;
> + struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
> + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
> + u32 vector_bit, vector_reg, mta_reg, i;
> + u16 *hash_list = (u16 *)&msgbuf[1];
> +
> + /* only so many hash values supported */
> + entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
> + /* salt away the number of multi cast addresses assigned
> + * to this VF for later use to restore when the PF multi cast
> + * list changes
> + */
> + vfinfo->num_vf_mc_hashes = entries;
> +
> + /* VFs are limited to using the MTA hash table for their multicast
> + * addresses
> + */
> + for (i = 0; i < entries; i++)
> + vfinfo->vf_mc_hashes[i] = hash_list[i];
> +
> + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
> + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
> + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
> + /* errata 5: maintain a copy of the register table conf */
> + mta_reg = wx->mac.mta_shadow[vector_reg];
> + mta_reg |= (1 << vector_bit);
> + wx->mac.mta_shadow[vector_reg] = mta_reg;
> + wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
> + }
> + vmolr |= WX_PSR_VM_L2CTL_ROMPE;
> + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
> +
> + return 0;
> +}
> +
> +static int wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
> +{
> + struct net_device *netdev = wx->netdev;
> + u32 index, vf_bit, vfre;
> + u32 max_frs, reg_val;
> + int pf_max_frame;
> + int err = 0;
> +
> + pf_max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
> + switch (wx->vfinfo[vf].vf_api) {
> + case wx_mbox_api_11 ... wx_mbox_api_13:
> + /* Version 1.1 supports jumbo frames on VFs if PF has
> + * jumbo frames enabled which means legacy VFs are
> + * disabled
> + */
> + if (pf_max_frame > ETH_FRAME_LEN)
> + break;
> + fallthrough;
> + default:
> + /* If the PF or VF are running w/ jumbo frames enabled
> + * we need to shut down the VF Rx path as we cannot
> + * support jumbo frames on legacy VFs
> + */
> + if (pf_max_frame > ETH_FRAME_LEN ||
> + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)))
> + err = -EINVAL;
return -EINVAL here?
> + break;
> + }
> +
> + /* determine VF receive enable location */
> + vf_bit = vf % 32;
> + index = vf / 32;
> +
> + /* enable or disable receive depending on error */
> + vfre = rd32(wx, WX_RDM_VF_RE(index));
> + if (err)
> + vfre &= ~BIT(vf_bit);
> + else
> + vfre |= BIT(vf_bit);
> + wr32(wx, WX_RDM_VF_RE(index), vfre);
> +
> + if (err) {> + wx_err(wx, "VF max_frame %d out of range\n", max_frame);
> + return err;
> + }
> + /* pull current max frame size from hardware */
> + max_frs = DIV_ROUND_UP(max_frame, 1024);
> + reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
> + if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
> + wr32(wx, WX_MAC_WDG_TIMEOUT, max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
> +
> + return 0;
> +}
> +
> +static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
> +{
> + int regindex;
> + u32 vlvf;
> +
> + /* short cut the special case */
> + if (vlan == 0)
> + return 0;
> +
> + /* Search for the vlan id in the VLVF entries */
> + for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
> + wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
> + vlvf = rd32(wx, WX_PSR_VLAN_SWC);
> + if ((vlvf & VLAN_VID_MASK) == vlan)
> + break;
> + }
> +
> + /* Return a negative value if not found */
> + if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
> + regindex = -EINVAL;
> +
> + return regindex;
> +}
> +
> +static int wx_set_vf_macvlan(struct wx *wx,
> + u16 vf, int index, unsigned char *mac_addr)
> +{
> + struct vf_macvlans *entry;
> + struct list_head *pos;
> + int retval = 0;
> +
> + if (index <= 1) {
> + list_for_each(pos, &wx->vf_mvs.l) {
> + entry = list_entry(pos, struct vf_macvlans, l);
> + if (entry->vf == vf) {
> + entry->vf = -1;
> + entry->free = true;
> + entry->is_macvlan = false;
> + wx_del_mac_filter(wx, entry->vf_macvlan, vf);
> + }
> + }
> + }
> +
> + /* If index was zero then we were asked to clear the uc list
> + * for the VF. We're done.
> + */
> + if (!index)
> + return 0;
> +
> + entry = NULL;
> +
> + list_for_each(pos, &wx->vf_mvs.l) {
> + entry = list_entry(pos, struct vf_macvlans, l);
> + if (entry->free)
> + break;
> + }
> +
> + /* If we traversed the entire list and didn't find a free entry
> + * then we're out of space on the RAR table. Also entry may
> + * be NULL because the original memory allocation for the list
> + * failed, which is not fatal but does mean we can't support
> + * VF requests for MACVLAN because we couldn't allocate
> + * memory for the list manangbeent required.
> + */
> + if (!entry || !entry->free)
> + return -ENOSPC;
> +
> + retval = wx_add_mac_filter(wx, mac_addr, vf);
> + if (retval >= 0) {
> + entry->free = false;
> + entry->is_macvlan = true;
> + entry->vf = vf;
> + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
> + }
> +
> + return retval;
> +}
> +
> +static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> + int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
> + int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
> + int err;
> +
> + if (add)
> + wx->vfinfo[vf].vlan_count++;
> + else if (wx->vfinfo[vf].vlan_count)
> + wx->vfinfo[vf].vlan_count--;
> +
> + /* in case of promiscuous mode any VLAN filter set for a VF must
> + * also have the PF pool added to it.
> + */
> + if (add && wx->netdev->flags & IFF_PROMISC)
> + err = wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
err returned here is immediately overwritten, should we check it and return it?
> +
> + err = wx_set_vf_vlan(wx, add, vid, vf);
> + if (!err && wx->vfinfo[vf].spoofchk_enabled)
> + wx_set_vlan_anti_spoofing(wx, true, vf);
> +
> + /* Go through all the checks to see if the VLAN filter should
> + * be wiped completely.
> + */
> + if (!add && wx->netdev->flags & IFF_PROMISC) {
> + u32 bits = 0, vlvf;
> + int reg_ndx;
> +
> + reg_ndx = wx_find_vlvf_entry(wx, vid);
> + if (reg_ndx < 0)
> + goto out;
It would be simpler to just return here, no need for goto
> + wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
> + vlvf = rd32(wx, WX_PSR_VLAN_SWC);
> + /* See if any other pools are set for this VLAN filter
> + * entry other than the PF.
> + */
> + if (VMDQ_P(0) < 32) {
> + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
> + bits &= ~BIT(VMDQ_P(0));
> + if (wx->mac.type == wx_mac_sp)
> + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
> + } else {
> + if (wx->mac.type == wx_mac_sp)
> + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
> + bits &= ~BIT(VMDQ_P(0) % 32);
> + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
> + }
> + /* If the filter was removed then ensure PF pool bit
> + * is cleared if the PF only added itself to the pool
> + * because the PF is in promiscuous mode.
> + */
> + if ((vlvf & VLAN_VID_MASK) == vid && !bits)
> + wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
> + }
> +
> +out:
> + return err;
> +}
> +
> +static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> + int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
> + WX_VT_MSGINFO_SHIFT;
> + u8 *new_mac = ((u8 *)(&msgbuf[1]));
> + int err;
> +
> + if (wx->vfinfo[vf].pf_set_mac && index > 0) {
> + wx_err(wx, "VF %d requested MACVLAN filter but is administratively denied\n", vf);
> + return -EINVAL;
> + }
> +
> + /* An non-zero index indicates the VF is setting a filter */
> + if (index) {
> + if (!is_valid_ether_addr(new_mac)) {
> + wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
> + return -EINVAL;
> + }
> + /* If the VF is allowed to set MAC filters then turn off
> + * anti-spoofing to avoid false positives.
> + */
> + if (wx->vfinfo[vf].spoofchk_enabled)
> + wx_set_vf_spoofchk(wx->netdev, vf, false);
> + }
> +
> + err = wx_set_vf_macvlan(wx, vf, index, new_mac);
> + if (err == -ENOSPC)
> + wx_err(wx,
> + "VF %d has requested a MACVLAN filter but there is no space for it\n",
> + vf);
> +
> + return err < 0;
> +}
> +
> +static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> + int api = msgbuf[1];
> +
> + switch (api) {
> + case wx_mbox_api_10 ... wx_mbox_api_13:
> + wx->vfinfo[vf].vf_api = api;
> + return 0;
> + default:
> + wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
> + return -EINVAL;
> + }
> +}
> +
> +static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> + /* verify the PF is supporting the correct API */
> + switch (wx->vfinfo[vf].vf_api) {
> + case wx_mbox_api_12 ... wx_mbox_api_13:
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
> +
> + msgbuf[1] = wx->vfinfo[vf].link_enable;
> +
> + return 0;
> +}
> +
> +static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> + unsigned long fw_version = 0ULL;
> + int ret = 0;
> +
> + /* verify the PF is supporting the correct API */
> + switch (wx->vfinfo[vf].vf_api) {
> + case wx_mbox_api_12 ... wx_mbox_api_13:
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
> +
> + ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
> + if (ret)
> + return -EOPNOTSUPP;
> + msgbuf[1] = fw_version;
> +
> + return 0;
> +}
> +
> +static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> + int xcast_mode = msgbuf[1];
> + u32 vmolr, disable, enable;
> +
> + /* verify the PF is supporting the correct APIs */
> + switch (wx->vfinfo[vf].vf_api) {
> + case wx_mbox_api_12:
> + /* promisc introduced in 1.3 version */
> + if (xcast_mode == WXVF_XCAST_MODE_PROMISC)
> + return -EOPNOTSUPP;
> + fallthrough;
> + case wx_mbox_api_13:
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
nit: space
> + if (wx->vfinfo[vf].xcast_mode == xcast_mode)
> + goto out;
> +
> + switch (xcast_mode) {
> + case WXVF_XCAST_MODE_NONE:
> + disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
> + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> + enable = 0;
> + break;
> + case WXVF_XCAST_MODE_MULTI:
> + disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
> + break;
> + case WXVF_XCAST_MODE_ALLMULTI:
> + disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | WX_PSR_VM_L2CTL_MPE;
> + break;
> + case WXVF_XCAST_MODE_PROMISC:
> + disable = 0;
> + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
> + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
> +
> + vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
> + vmolr &= ~disable;
> + vmolr |= enable;
> + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
> +
> + wx->vfinfo[vf].xcast_mode = xcast_mode;
> +out:
> + msgbuf[1] = xcast_mode;
> +
> + return 0;
> +}
> +
<...>
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 5/6] net: ngbe: add sriov function support
2024-06-04 15:57 ` [PATCH net-next v4 5/6] net: ngbe: add sriov function support Mengyuan Lou
@ 2024-06-05 9:44 ` Wojciech Drewek
0 siblings, 0 replies; 14+ messages in thread
From: Wojciech Drewek @ 2024-06-05 9:44 UTC (permalink / raw)
To: Mengyuan Lou, netdev; +Cc: jiawenwu, duanqiangwen
On 04.06.2024 17:57, Mengyuan Lou wrote:
> Add sriov_configure for driver ops.
> Add mailbox handler wx_msg_task for ngbe in
> the interrupt handler.
> Add the notification flow when the vfs exist.
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
> ---
Only one nit
Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
> drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 31 ++++++++++
> drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 2 +
> drivers/net/ethernet/wangxun/libwx/wx_type.h | 2 +
> drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 58 +++++++++++++++++--
> drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 10 ++++
> drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 2 +
> 6 files changed, 101 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> index 315d51961449..6d470cd0f317 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> @@ -944,3 +944,34 @@ void wx_msg_task(struct wx *wx)
> }
> }
> EXPORT_SYMBOL(wx_msg_task);
> +
> +void wx_disable_vf_rx_tx(struct wx *wx)
> +{
> + wr32(wx, WX_TDM_VFTE_CLR(0), 0);
> + wr32(wx, WX_RDM_VFRE_CLR(0), 0);
> + if (wx->mac.type == wx_mac_sp) {
> + wr32(wx, WX_TDM_VFTE_CLR(1), 0);
> + wr32(wx, WX_RDM_VFRE_CLR(1), 0);
> + }
> +}
> +EXPORT_SYMBOL(wx_disable_vf_rx_tx);
> +
> +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
> +{
> + u32 msgbuf[2] = {0, 0};
> + u16 i;
> +
> + if (!wx->num_vfs)
> + return;
> + msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
> + if (link_up)
> + msgbuf[1] = (wx->speed << 1) | link_up;
> + if (wx->notify_not_runnning)
> + msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
> + for (i = 0 ; i < wx->num_vfs; i++) {
> + if (wx->vfinfo[i].clear_to_send)
> + msgbuf[0] |= WX_VT_MSGTYPE_CTS;
> + wx_write_mbx_pf(wx, msgbuf, 2, i);
> + }
> +}
> +EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> index f311774a2a18..7e45b3f71a7b 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> @@ -7,5 +7,7 @@
> int wx_disable_sriov(struct wx *wx);
> int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
> void wx_msg_task(struct wx *wx);
> +void wx_disable_vf_rx_tx(struct wx *wx);
> +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
>
> #endif /* _WX_SRIOV_H_ */
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> index 3a7931c2e4bc..b8f0bf93a0fb 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> @@ -87,6 +87,7 @@
> /************************* Port Registers ************************************/
> /* port cfg Registers */
> #define WX_CFG_PORT_CTL 0x14400
> +#define WX_CFG_PORT_CTL_PFRSTD BIT(14)
> #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
> #define WX_CFG_PORT_CTL_QINQ BIT(2)
> #define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
> @@ -1102,6 +1103,7 @@ struct wx {
> enum wx_reset_type reset_type;
>
> /* PHY stuff */
> + bool notify_not_runnning;
maybe notify_down?
> unsigned int link;
> int speed;
> int duplex;
> diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
> index e894e01d030d..583e8e882f17 100644
> --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
> +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
> @@ -14,6 +14,8 @@
> #include "../libwx/wx_type.h"
> #include "../libwx/wx_hw.h"
> #include "../libwx/wx_lib.h"
> +#include "../libwx/wx_mbx.h"
> +#include "../libwx/wx_sriov.h"
> #include "ngbe_type.h"
> #include "ngbe_mdio.h"
> #include "ngbe_hw.h"
> @@ -128,6 +130,10 @@ static int ngbe_sw_init(struct wx *wx)
> wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
> wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
>
> + wx->mbx.size = WX_VXMAILBOX_SIZE;
> + wx->setup_tc = ngbe_setup_tc;
> + set_bit(0, &wx->fwd_bitmask);
> +
> return 0;
> }
>
> @@ -197,11 +203,25 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
>
> static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
> {
> - struct wx *wx = data;
> + struct wx_q_vector *q_vector;
> + struct wx *wx = data;
> + u32 eicr;
>
> - /* re-enable the original interrupt state, no lsc, no queues */
> - if (netif_running(wx->netdev))
> - ngbe_irq_enable(wx, false);
> + q_vector = wx->q_vector[0];
> +
> + eicr = wx_misc_isb(wx, WX_ISB_MISC);
> +
> + if (eicr & NGBE_PX_MISC_IC_VF_MBOX)
> + wx_msg_task(wx);
> +
> + if (wx->num_vfs == 7) {
> + napi_schedule_irqoff(&q_vector->napi);
> + ngbe_irq_enable(wx, true);
> + } else {
> + /* re-enable the original interrupt state, no lsc, no queues */
> + if (netif_running(wx->netdev))
> + ngbe_irq_enable(wx, false);
> + }
>
> return IRQ_HANDLED;
> }
> @@ -291,6 +311,22 @@ static void ngbe_disable_device(struct wx *wx)
> struct net_device *netdev = wx->netdev;
> u32 i;
>
> + if (wx->num_vfs) {
> + /* Clear EITR Select mapping */
> + wr32(wx, WX_PX_ITRSEL, 0);
> +
> + /* Mark all the VFs as inactive */
> + for (i = 0 ; i < wx->num_vfs; i++)
> + wx->vfinfo[i].clear_to_send = 0;
> + wx->notify_not_runnning = true;
> + /* ping all the active vfs to let them know we are going down */
> + wx_ping_all_vfs_with_link_status(wx, false);
> + wx->notify_not_runnning = false;
> +
> + /* Disable all VFTE/VFRE TX/RX */
> + wx_disable_vf_rx_tx(wx);
> + }
> +
> /* disable all enabled rx queues */
> for (i = 0; i < wx->num_rx_queues; i++)
> /* this call also flushes the previous write */
> @@ -313,10 +349,17 @@ static void ngbe_disable_device(struct wx *wx)
> wx_update_stats(wx);
> }
>
> +static void ngbe_reset(struct wx *wx)
> +{
> + wx_flush_sw_mac_table(wx);
> + wx_mac_set_default_filter(wx, wx->mac.addr);
> +}
> +
> void ngbe_down(struct wx *wx)
> {
> phylink_stop(wx->phylink);
> ngbe_disable_device(wx);
> + ngbe_reset(wx);
> wx_clean_all_tx_rings(wx);
> wx_clean_all_rx_rings(wx);
> }
> @@ -339,6 +382,11 @@ void ngbe_up(struct wx *wx)
> ngbe_sfp_modules_txrx_powerctl(wx, true);
>
> phylink_start(wx->phylink);
> + /* Set PF Reset Done bit so PF/VF Mail Ops can work */
> + wr32m(wx, WX_CFG_PORT_CTL,
> + WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD);
> + if (wx->num_vfs)
> + wx_ping_all_vfs_with_link_status(wx, false);
> }
>
> /**
> @@ -723,6 +771,7 @@ static void ngbe_remove(struct pci_dev *pdev)
> struct net_device *netdev;
>
> netdev = wx->netdev;
> + wx_disable_sriov(wx);
> unregister_netdev(netdev);
> phylink_destroy(wx->phylink);
> pci_release_selected_regions(pdev,
> @@ -782,6 +831,7 @@ static struct pci_driver ngbe_driver = {
> .suspend = ngbe_suspend,
> .resume = ngbe_resume,
> .shutdown = ngbe_shutdown,
> + .sriov_configure = wx_pci_sriov_configure,
> };
>
> module_pci_driver(ngbe_driver);
> diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
> index ec54b18c5fe7..dd01aec87b02 100644
> --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
> +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
> @@ -8,6 +8,7 @@
>
> #include "../libwx/wx_type.h"
> #include "../libwx/wx_hw.h"
> +#include "../libwx/wx_sriov.h"
> #include "ngbe_type.h"
> #include "ngbe_mdio.h"
>
> @@ -64,6 +65,11 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
> static void ngbe_mac_link_down(struct phylink_config *config,
> unsigned int mode, phy_interface_t interface)
> {
> + struct wx *wx = phylink_to_wx(config);
> +
> + wx->speed = 0;
> + /* ping all the active vfs to let them know we are going down */
> + wx_ping_all_vfs_with_link_status(wx, false);
> }
>
> static void ngbe_mac_link_up(struct phylink_config *config,
> @@ -103,6 +109,10 @@ static void ngbe_mac_link_up(struct phylink_config *config,
> wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
> reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
> wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
> +
> + wx->speed = speed;
> + /* ping all the active vfs to let them know we are going up */
> + wx_ping_all_vfs_with_link_status(wx, true);
> }
>
> static const struct phylink_mac_ops ngbe_mac_ops = {
> diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
> index f48ed7fc1805..bb70af035c39 100644
> --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
> +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
> @@ -72,11 +72,13 @@
> #define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
> #define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
> #define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
> +#define NGBE_PX_MISC_IC_VF_MBOX BIT(23)
> #define NGBE_PX_MISC_IEN_GPIO BIT(26)
> #define NGBE_PX_MISC_IEN_MASK ( \
> NGBE_PX_MISC_IEN_DEV_RST | \
> NGBE_PX_MISC_IEN_ETH_LK | \
> NGBE_PX_MISC_IEN_INT_ERR | \
> + NGBE_PX_MISC_IC_VF_MBOX | \
> NGBE_PX_MISC_IEN_GPIO)
>
> #define NGBE_INTR_ALL 0x1FF
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 6/6] net: txgbe: add sriov function support
2024-06-04 15:57 ` [PATCH net-next v4 6/6] net: txgbe: " Mengyuan Lou
@ 2024-06-05 9:48 ` Wojciech Drewek
0 siblings, 0 replies; 14+ messages in thread
From: Wojciech Drewek @ 2024-06-05 9:48 UTC (permalink / raw)
To: Mengyuan Lou, netdev; +Cc: jiawenwu, duanqiangwen
On 04.06.2024 17:57, Mengyuan Lou wrote:
> Add sriov_configure for driver ops.
> Add ndo_vf_ops for txgbe netdev ops.
> Add mailbox handler wx_msg_task for txgbe.
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
> ---
Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
> drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 42 +++++++++++++++++++
> drivers/net/ethernet/wangxun/libwx/wx_sriov.h | 1 +
> drivers/net/ethernet/wangxun/libwx/wx_type.h | 1 +
> .../net/ethernet/wangxun/txgbe/txgbe_irq.c | 25 +++++++++--
> .../net/ethernet/wangxun/txgbe/txgbe_main.c | 23 ++++++++++
> .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 8 ++++
> .../net/ethernet/wangxun/txgbe/txgbe_type.h | 4 +-
> 7 files changed, 100 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> index 6d470cd0f317..375295578cff 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
> @@ -302,6 +302,15 @@ static void wx_clear_vmvir(struct wx *wx, u32 vf)
> wr32(wx, WX_TDM_VLAN_INS(vf), 0);
> }
>
> +static void wx_ping_vf(struct wx *wx, int vf)
> +{
> + u32 ping = WX_PF_CONTROL_MSG;
> +
> + if (wx->vfinfo[vf].clear_to_send)
> + ping |= WX_VT_MSGTYPE_CTS;
> + wx_write_mbx_pf(wx, &ping, 1, vf);
> +}
> +
> static void wx_set_vf_rx_tx(struct wx *wx, int vf)
> {
> u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
> @@ -975,3 +984,36 @@ void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
> }
> }
> EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
> +
> +static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
> +{
> + wx->vfinfo[vf].link_state = state;
> + switch (state) {
> + case IFLA_VF_LINK_STATE_AUTO:
> + if (netif_running(wx->netdev))
> + wx->vfinfo[vf].link_enable = true;
> + else
> + wx->vfinfo[vf].link_enable = false;
> + break;
> + case IFLA_VF_LINK_STATE_ENABLE:
> + wx->vfinfo[vf].link_enable = true;
> + break;
> + case IFLA_VF_LINK_STATE_DISABLE:
> + wx->vfinfo[vf].link_enable = false;
> + break;
> + }
> + /* restart the VF */
> + wx->vfinfo[vf].clear_to_send = false;
> + wx_ping_vf(wx, vf);
> +
> + wx_set_vf_rx_tx(wx, vf);
> +}
> +
> +void wx_set_all_vfs(struct wx *wx)
> +{
> + int i;
> +
> + for (i = 0 ; i < wx->num_vfs; i++)
> + wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
> +}
> +EXPORT_SYMBOL(wx_set_all_vfs);
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> index 7e45b3f71a7b..122d9c561ff5 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
> @@ -9,5 +9,6 @@ int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
> void wx_msg_task(struct wx *wx);
> void wx_disable_vf_rx_tx(struct wx *wx);
> void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
> +void wx_set_all_vfs(struct wx *wx);
>
> #endif /* _WX_SRIOV_H_ */
> diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> index b8f0bf93a0fb..1a4830eab763 100644
> --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
> +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
> @@ -1053,6 +1053,7 @@ struct vf_data_storage {
> u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
> u16 num_vf_mc_hashes;
> u16 vlan_count;
> + int link_state;
> };
>
> struct vf_macvlans {
> diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
> index b3e3605d1edb..e6be98865c2d 100644
> --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
> +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
> @@ -7,6 +7,7 @@
> #include "../libwx/wx_type.h"
> #include "../libwx/wx_lib.h"
> #include "../libwx/wx_hw.h"
> +#include "../libwx/wx_sriov.h"
> #include "txgbe_type.h"
> #include "txgbe_phy.h"
> #include "txgbe_irq.h"
> @@ -176,6 +177,24 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
> .map = txgbe_misc_irq_domain_map,
> };
>
> +static irqreturn_t txgbe_irq_handler(int irq, void *data)
> +{
> + struct txgbe *txgbe = data;
> + struct wx *wx = txgbe->wx;
> + u32 eicr;
> +
> + eicr = wx_misc_isb(wx, WX_ISB_MISC) & TXGBE_PX_MISC_IEN_MASK;
> + if (!eicr)
> + return IRQ_NONE;
> + txgbe->eicr = eicr;
> + if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
> + wx_msg_task(txgbe->wx);
> + wx_intr_enable(wx, TXGBE_INTR_MISC);
> + }
> +
> + return IRQ_WAKE_THREAD;
> +}
> +
> static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
> {
> struct txgbe *txgbe = data;
> @@ -184,7 +203,7 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
> unsigned int sub_irq;
> u32 eicr;
>
> - eicr = wx_misc_isb(wx, WX_ISB_MISC);
> + eicr = txgbe->eicr;
> if (eicr & TXGBE_PX_MISC_GPIO) {
> sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
> handle_nested_irq(sub_irq);
> @@ -226,7 +245,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
> struct wx *wx = txgbe->wx;
> int hwirq, err;
>
> - txgbe->misc.nirqs = 2;
> + txgbe->misc.nirqs = TXGBE_IRQ_MAX;
> txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
> &txgbe_misc_irq_domain_ops, txgbe);
> if (!txgbe->misc.domain)
> @@ -241,7 +260,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
> else
> txgbe->misc.irq = wx->pdev->irq;
>
> - err = request_threaded_irq(txgbe->misc.irq, NULL,
> + err = request_threaded_irq(txgbe->misc.irq, txgbe_irq_handler,
> txgbe_misc_irq_handle,
> IRQF_ONESHOT,
> wx->netdev->name, txgbe);
> diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
> index 8c7a74981b90..fbfd281f7e8b 100644
> --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
> +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
> @@ -14,6 +14,8 @@
> #include "../libwx/wx_type.h"
> #include "../libwx/wx_lib.h"
> #include "../libwx/wx_hw.h"
> +#include "../libwx/wx_mbx.h"
> +#include "../libwx/wx_sriov.h"
> #include "txgbe_type.h"
> #include "txgbe_hw.h"
> #include "txgbe_phy.h"
> @@ -99,6 +101,12 @@ static void txgbe_up_complete(struct wx *wx)
>
> /* enable transmits */
> netif_tx_start_all_queues(netdev);
> +
> + /* Set PF Reset Done bit so PF/VF Mail Ops can work */
> + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD,
> + WX_CFG_PORT_CTL_PFRSTD);
> + /* update setting rx tx for all active vfs */
> + wx_set_all_vfs(wx);
> }
>
> static void txgbe_reset(struct wx *wx)
> @@ -144,6 +152,16 @@ static void txgbe_disable_device(struct wx *wx)
> wx_err(wx, "%s: invalid bus lan id %d\n",
> __func__, wx->bus.func);
>
> + if (wx->num_vfs) {
> + /* Clear EITR Select mapping */
> + wr32(wx, WX_PX_ITRSEL, 0);
> + /* Mark all the VFs as inactive */
> + for (i = 0 ; i < wx->num_vfs; i++)
> + wx->vfinfo[i].clear_to_send = 0;
> + /* update setting rx tx for all active vfs */
> + wx_set_all_vfs(wx);
> + }
> +
> if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
> ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
> /* disable mac transmiter */
> @@ -268,8 +286,11 @@ static int txgbe_sw_init(struct wx *wx)
> /* set default work limits */
> wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
> wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
> + wx->mbx.size = WX_VXMAILBOX_SIZE;
>
> + wx->setup_tc = txgbe_setup_tc;
> wx->do_reset = txgbe_do_reset;
> + set_bit(0, &wx->fwd_bitmask);
>
> return 0;
> }
> @@ -725,6 +746,7 @@ static void txgbe_remove(struct pci_dev *pdev)
> struct net_device *netdev;
>
> netdev = wx->netdev;
> + wx_disable_sriov(wx);
> unregister_netdev(netdev);
>
> txgbe_remove_phy(txgbe);
> @@ -746,6 +768,7 @@ static struct pci_driver txgbe_driver = {
> .probe = txgbe_probe,
> .remove = txgbe_remove,
> .shutdown = txgbe_shutdown,
> + .sriov_configure = wx_pci_sriov_configure,
> };
>
> module_pci_driver(txgbe_driver);
> diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
> index 5f502265f0a6..76635d4366e4 100644
> --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
> +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
> @@ -16,6 +16,7 @@
> #include "../libwx/wx_type.h"
> #include "../libwx/wx_lib.h"
> #include "../libwx/wx_hw.h"
> +#include "../libwx/wx_sriov.h"
> #include "txgbe_type.h"
> #include "txgbe_phy.h"
> #include "txgbe_hw.h"
> @@ -179,6 +180,9 @@ static void txgbe_mac_link_down(struct phylink_config *config,
> struct wx *wx = phylink_to_wx(config);
>
> wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
> + wx->speed = 0;
> + /* ping all the active vfs to let them know we are going down */
> + wx_ping_all_vfs_with_link_status(wx, false);
> }
>
> static void txgbe_mac_link_up(struct phylink_config *config,
> @@ -215,6 +219,10 @@ static void txgbe_mac_link_up(struct phylink_config *config,
> wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
> wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
> wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
> +
> + wx->speed = speed;
> + /* ping all the active vfs to let them know we are going up */
> + wx_ping_all_vfs_with_link_status(wx, true);
> }
>
> static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
> diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
> index f434a7865cb7..e84d10adf4c1 100644
> --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
> +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
> @@ -71,12 +71,13 @@
> #define TXGBE_PX_MISC_ETH_LK BIT(18)
> #define TXGBE_PX_MISC_ETH_AN BIT(19)
> #define TXGBE_PX_MISC_INT_ERR BIT(20)
> +#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23)
> #define TXGBE_PX_MISC_GPIO BIT(26)
> #define TXGBE_PX_MISC_IEN_MASK \
> (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
> TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
> TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
> - TXGBE_PX_MISC_GPIO)
> + TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_GPIO)
>
> /* Port cfg registers */
> #define TXGBE_CFG_PORT_ST 0x14404
> @@ -196,6 +197,7 @@ struct txgbe {
> struct gpio_chip *gpio;
> unsigned int gpio_irq;
> unsigned int link_irq;
> + u32 eicr;
> };
>
> #endif /* _TXGBE_TYPE_H_ */
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH net-next v4 4/6] net: libwx: Add msg task func
2024-06-04 15:57 ` [PATCH net-next v4 4/6] net: libwx: Add msg task func Mengyuan Lou
2024-06-05 9:41 ` Wojciech Drewek
@ 2024-06-05 18:44 ` Simon Horman
1 sibling, 0 replies; 14+ messages in thread
From: Simon Horman @ 2024-06-05 18:44 UTC (permalink / raw)
To: Mengyuan Lou; +Cc: netdev, jiawenwu, duanqiangwen
On Tue, Jun 04, 2024 at 11:57:33PM +0800, Mengyuan Lou wrote:
> Implement wx_msg_task which is used to process mailbox
> messages sent by vf.
>
> Signed-off-by: Mengyuan Lou <mengyuanlou@net-swift.com>
Hi Mengyuan Lou,
Some minor comments from my side.
...
> +static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> + u8 *new_mac = ((u8 *)(&msgbuf[1]));
> +
> + if (!is_valid_ether_addr(new_mac)) {
> + wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
> + return -EINVAL;
> + }
> +
> + if (wx->vfinfo[vf].pf_set_mac &&
> + memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
> + wx_err(wx,
> + "VF %d attempted to set a MAC address but it already had a MAC address.",
> + vf);
> + return -EBUSY;
> + }
> + return wx_set_vf_mac(wx, vf, new_mac) < 0;
This seems to return a bool - true on error, false otherwise.
But I think it would be more natural to consistently return
a negative error value on error - as is done above, and 0 on success.
So perhaps something like this (completely untested!):
err = wx_set_vf_mac(wx, vf, index, new_mac);
if (err)
return err;
return 0;
> +}
...
> +static int wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
> +{
> + struct net_device *netdev = wx->netdev;
> + u32 index, vf_bit, vfre;
> + u32 max_frs, reg_val;
> + int pf_max_frame;
> + int err = 0;
> +
> + pf_max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
> + switch (wx->vfinfo[vf].vf_api) {
> + case wx_mbox_api_11 ... wx_mbox_api_13:
> + /* Version 1.1 supports jumbo frames on VFs if PF has
> + * jumbo frames enabled which means legacy VFs are
> + * disabled
> + */
> + if (pf_max_frame > ETH_FRAME_LEN)
> + break;
> + fallthrough;
> + default:
> + /* If the PF or VF are running w/ jumbo frames enabled
> + * we need to shut down the VF Rx path as we cannot
> + * support jumbo frames on legacy VFs
> + */
> + if (pf_max_frame > ETH_FRAME_LEN ||
> + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)))
> + err = -EINVAL;
> + break;
> + }
> +
> + /* determine VF receive enable location */
> + vf_bit = vf % 32;
> + index = vf / 32;
> +
> + /* enable or disable receive depending on error */
> + vfre = rd32(wx, WX_RDM_VF_RE(index));
> + if (err)
> + vfre &= ~BIT(vf_bit);
> + else
> + vfre |= BIT(vf_bit);
> + wr32(wx, WX_RDM_VF_RE(index), vfre);
> +
> + if (err) {
> + wx_err(wx, "VF max_frame %d out of range\n", max_frame);
> + return err;
> + }
> + /* pull current max frame size from hardware */
> + max_frs = DIV_ROUND_UP(max_frame, 1024);
> + reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
> + if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
> + wr32(wx, WX_MAC_WDG_TIMEOUT, max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
nit: This could trivially be line wrapped to <= 80 columns wide
wr32(wx, WX_MAC_WDG_TIMEOUT,
max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
Flagged by checkpatch.pl --max-line-length=80
> +
> + return 0;
> +}
...
> +static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> + int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
> + WX_VT_MSGINFO_SHIFT;
> + u8 *new_mac = ((u8 *)(&msgbuf[1]));
> + int err;
> +
> + if (wx->vfinfo[vf].pf_set_mac && index > 0) {
> + wx_err(wx, "VF %d requested MACVLAN filter but is administratively denied\n", vf);
> + return -EINVAL;
> + }
> +
> + /* An non-zero index indicates the VF is setting a filter */
> + if (index) {
> + if (!is_valid_ether_addr(new_mac)) {
> + wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
> + return -EINVAL;
> + }
> + /* If the VF is allowed to set MAC filters then turn off
> + * anti-spoofing to avoid false positives.
> + */
> + if (wx->vfinfo[vf].spoofchk_enabled)
> + wx_set_vf_spoofchk(wx->netdev, vf, false);
> + }
> +
> + err = wx_set_vf_macvlan(wx, vf, index, new_mac);
> + if (err == -ENOSPC)
> + wx_err(wx,
> + "VF %d has requested a MACVLAN filter but there is no space for it\n",
> + vf);
> +
> + return err < 0;
As per my comment on wx_set_vf_mac_addr(),
this return scheme seems a little unorthodox.
I'd suggest something like this (completely untested!):
err = wx_set_vf_macvlan(wx, vf, index, new_mac);
if (err == -ENOSPC)
wx_err(...)
if (err)
return err;
return 0;
> +}
...
> +static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> + int xcast_mode = msgbuf[1];
> + u32 vmolr, disable, enable;
> +
> + /* verify the PF is supporting the correct APIs */
> + switch (wx->vfinfo[vf].vf_api) {
> + case wx_mbox_api_12:
> + /* promisc introduced in 1.3 version */
> + if (xcast_mode == WXVF_XCAST_MODE_PROMISC)
> + return -EOPNOTSUPP;
> + fallthrough;
> + case wx_mbox_api_13:
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
> + if (wx->vfinfo[vf].xcast_mode == xcast_mode)
> + goto out;
> +
> + switch (xcast_mode) {
> + case WXVF_XCAST_MODE_NONE:
> + disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
> + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
nit: This could also be trivially line-wrapped to less than 80 columns wide.
Likewise a few times below.
> + enable = 0;
> + break;
> + case WXVF_XCAST_MODE_MULTI:
> + disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
> + break;
> + case WXVF_XCAST_MODE_ALLMULTI:
> + disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | WX_PSR_VM_L2CTL_MPE;
> + break;
> + case WXVF_XCAST_MODE_PROMISC:
> + disable = 0;
> + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
> + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
> +
> + vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
> + vmolr &= ~disable;
> + vmolr |= enable;
> + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
> +
> + wx->vfinfo[vf].xcast_mode = xcast_mode;
> +out:
> + msgbuf[1] = xcast_mode;
> +
> + return 0;
> +}
...
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2024-06-05 18:44 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20240604155850.51983-1-mengyuanlou@net-swift.com>
2024-06-04 15:57 ` [PATCH net-next v4 1/6] net: libwx: Add malibox api for wangxun pf drivers Mengyuan Lou
2024-06-05 5:03 ` Przemek Kitszel
2024-06-05 7:31 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 2/6] net: libwx: Add sriov api for wangxun nics Mengyuan Lou
2024-06-05 7:42 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 3/6] net: libwx: Redesign flow when sriov is enabled Mengyuan Lou
2024-06-05 8:54 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 4/6] net: libwx: Add msg task func Mengyuan Lou
2024-06-05 9:41 ` Wojciech Drewek
2024-06-05 18:44 ` Simon Horman
2024-06-04 15:57 ` [PATCH net-next v4 5/6] net: ngbe: add sriov function support Mengyuan Lou
2024-06-05 9:44 ` Wojciech Drewek
2024-06-04 15:57 ` [PATCH net-next v4 6/6] net: txgbe: " Mengyuan Lou
2024-06-05 9:48 ` Wojciech Drewek
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).