public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Junyang Han <han.junyang@zte.com.cn>
To: andrew+netdev@lunn.ch
Cc: netdev@vger.kernel.org, vadim.fedorenko@linux.dev,
	davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
	pabeni@redhat.com, han.junyang@zte.com.cn, ran.ming@zte.com.cn,
	han.chengfei@zte.com.cn, zhang.yanze@zte.com.cn
Subject: [PATCH net-next v3 3/3] net/ethernet/zte/dinghai: add hardware register access and PCI capability scanning
Date: Thu, 30 Apr 2026 23:11:38 +0800	[thread overview]
Message-ID: <20260430151138.2813381-4-han.junyang@zte.com.cn> (raw)
In-Reply-To: <20260430151138.2813381-1-han.junyang@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 19448 bytes --]

Implement PCI configuration space access, BAR mapping, capability
scanning (common/notify/device), and hardware queue register
definitions for DingHai PF device.

Signed-off-by: Junyang Han <han.junyang@zte.com.cn>
---
 drivers/net/ethernet/zte/dinghai/dh_queue.h |  71 ++++
 drivers/net/ethernet/zte/dinghai/en_pf.c    | 446 +++++++++++++++++++-
 drivers/net/ethernet/zte/dinghai/en_pf.h    |  38 ++
 3 files changed, 552 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/ethernet/zte/dinghai/dh_queue.h

diff --git a/drivers/net/ethernet/zte/dinghai/dh_queue.h b/drivers/net/ethernet/zte/dinghai/dh_queue.h
new file mode 100644
index 000000000000..5067c73fed33
--- /dev/null
+++ b/drivers/net/ethernet/zte/dinghai/dh_queue.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ZTE DingHai Ethernet driver - PCI capability definitions
+ * Copyright (c) 2022-2026, ZTE Corporation.
+ */
+
+#ifndef __DH_QUEUE_H__
+#define __DH_QUEUE_H__
+
+/* Vector value used to disable MSI for queue */
+#define ZXDH_MSI_NO_VECTOR      0xff
+
+/* Status byte for guest to report progress, and synchronize features */
+/* We have seen device and processed generic fields */
+#define ZXDH_CONFIG_S_ACKNOWLEDGE 1
+/* We have found a driver for the device. */
+#define ZXDH_CONFIG_S_DRIVER      2
+/* Driver has used its parts of the config, and is happy */
+#define ZXDH_CONFIG_S_DRIVER_OK   4
+/* Driver has finished configuring features */
+#define ZXDH_CONFIG_S_FEATURES_OK 8
+/* Device entered invalid state, driver must reset it */
+#define ZXDH_CONFIG_S_NEEDS_RESET 0x40
+/* We've given up on this device */
+#define ZXDH_CONFIG_S_FAILED      0x80
+
+/* This is the PCI capability header: */
+struct zxdh_pf_pci_cap {
+	__u8 cap_vndr;		/* Generic PCI field: PCI_CAP_ID_VNDR */
+	__u8 cap_next;		/* Generic PCI field: next ptr. */
+	__u8 cap_len;		/* Generic PCI field: capability length */
+	__u8 cfg_type;		/* Identifies the structure. */
+	__u8 bar;		/* Where to find it. */
+	__u8 id;		/* Multiple capabilities of the same type */
+	__u8 padding[2];		/* Pad to full dword. */
+	__le32 offset;		/* Offset within bar. */
+	__le32 length;		/* Length of the structure, in bytes. */
+};
+
+/* Fields in ZXDH_PF_PCI_CAP_COMMON_CFG: */
+struct zxdh_pf_pci_common_cfg {
+	/* About the whole device. */
+	__le32 device_feature_select; /* read-write */
+	__le32 device_feature;	/* read-only */
+	__le32 guest_feature_select; /* read-write */
+	__le32 guest_feature;		/* read-write */
+	__le16 msix_config;		/* read-write */
+	__le16 num_queues;		/* read-only */
+	__u8 device_status;		/* read-write */
+	__u8 config_generation;	/* read-only */
+
+	/* About a specific virtqueue. */
+	__le16 queue_select;		/* read-write */
+	__le16 queue_size;		/* read-write, power of 2. */
+	__le16 queue_msix_vector;	/* read-write */
+	__le16 queue_enable;		/* read-write */
+	__le16 queue_notify_off;	/* read-only */
+	__le32 queue_desc_lo;		/* read-write */
+	__le32 queue_desc_hi;		/* read-write */
+	__le32 queue_avail_lo;		/* read-write */
+	__le32 queue_avail_hi;		/* read-write */
+	__le32 queue_used_lo;		/* read-write */
+	__le32 queue_used_hi;		/* read-write */
+};
+
+struct zxdh_pf_pci_notify_cap {
+	struct zxdh_pf_pci_cap cap;
+	__le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+#endif /* __DH_QUEUE_H__ */
diff --git a/drivers/net/ethernet/zte/dinghai/en_pf.c b/drivers/net/ethernet/zte/dinghai/en_pf.c
index f4a923b76037..211893b2f8e3 100644
--- a/drivers/net/ethernet/zte/dinghai/en_pf.c
+++ b/drivers/net/ethernet/zte/dinghai/en_pf.c
@@ -9,6 +9,7 @@
 #include <net/devlink.h>
 #include "en_pf.h"
 #include "dh_log.h"
+#include "dh_queue.h"
 
 MODULE_AUTHOR("Junyang Han <han.junyang@zte.com.cn>");
 MODULE_DESCRIPTION("ZTE DingHai series Ethernet driver");
@@ -64,7 +65,7 @@ static int dh_pf_pci_init(struct dh_core_dev *dev)
 	pf_dev = dev->priv;
 	pf_dev->pci_ioremap_addr[0] =
 		ioremap(pci_resource_start(dev->pdev, 0),
-				  pci_resource_len(dev->pdev, 0));
+			pci_resource_len(dev->pdev, 0));
 	if (!pf_dev->pci_ioremap_addr[0]) {
 		ret = -ENOMEM;
 		LOG_ERR(dev, "ioremap(0x%llx, 0x%llx) failed\n",
@@ -77,7 +78,7 @@ static int dh_pf_pci_init(struct dh_core_dev *dev)
 
 err_pci_save_state:
 	pci_release_selected_regions(dev->pdev,
-				pci_select_bars(dev->pdev, IORESOURCE_MEM));
+				     pci_select_bars(dev->pdev, IORESOURCE_MEM));
 err_pci:
 	pci_disable_device(dev->pdev);
 	return ret;
@@ -90,10 +91,449 @@ void dh_pf_pci_close(struct dh_core_dev *dev)
 	pf_dev = dev->priv;
 	iounmap(pf_dev->pci_ioremap_addr[0]);
 	pci_release_selected_regions(dev->pdev,
-				pci_select_bars(dev->pdev, IORESOURCE_MEM));
+				     pci_select_bars(dev->pdev, IORESOURCE_MEM));
 	pci_disable_device(dev->pdev);
 }
 
+int zxdh_pf_pci_find_capability(struct pci_dev *pdev, u8 cfg_type,
+				u32 ioresource_types, int *bars)
+{
+	int pos;
+	u8 type;
+	u8 bar;
+
+	for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0;
+	     pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) {
+		pci_read_config_byte(pdev,
+				     pos + offsetof(struct zxdh_pf_pci_cap,
+							cfg_type), &type);
+		pci_read_config_byte(pdev,
+				     pos + offsetof(struct zxdh_pf_pci_cap, bar), &bar);
+
+		/* ignore structures with reserved BAR values */
+		if (bar > ZXDH_PF_MAX_BAR_VAL)
+			continue;
+
+		if (type == cfg_type) {
+			if (pci_resource_len(pdev, bar) &&
+			    pci_resource_flags(pdev, bar) & ioresource_types) {
+				*bars |= (1 << bar);
+				return pos;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void __iomem *zxdh_pf_map_capability(struct dh_core_dev *dh_dev, int off,
+				     size_t minlen, u32 align,
+				     u32 start, u32 size,
+				     size_t *len, resource_size_t *pa,
+				     u32 *bar_off)
+{
+	struct pci_dev *pdev = dh_dev->pdev;
+	void __iomem *p;
+	u32 offset;
+	u32 length;
+	u8 bar;
+
+	pci_read_config_byte(pdev,
+			     off + offsetof(struct zxdh_pf_pci_cap, bar), &bar);
+	pci_read_config_dword(pdev,
+			      off + offsetof(struct zxdh_pf_pci_cap,
+						offset), &offset);
+	pci_read_config_dword(pdev,
+			      off + offsetof(struct zxdh_pf_pci_cap,
+						length), &length);
+
+	if (bar_off)
+		*bar_off = offset;
+
+	if (length <= start) {
+		LOG_ERR(dh_dev, "bad capability len %u (>%u expected)\n",
+			length, start);
+		return NULL;
+	}
+
+	if (length - start < minlen) {
+		LOG_ERR(dh_dev, "bad capability len %u (>=%zu expected)\n",
+			length, minlen);
+		return NULL;
+	}
+
+	length -= start;
+	if (start + offset < offset) {
+		LOG_ERR(dh_dev, "map wrap-around %u+%u\n", start, offset);
+		return NULL;
+	}
+
+	offset += start;
+	if (offset & (align - 1)) {
+		LOG_ERR(dh_dev, "offset %u not aligned to %u\n", offset, align);
+		return NULL;
+	}
+
+	if (length > size)
+		length = size;
+
+	if (len)
+		*len = length;
+
+	if (minlen + offset < minlen ||
+	    minlen + offset > pci_resource_len(pdev, bar)) {
+		LOG_ERR(dh_dev,
+			"map custom queue %zu@%u out of range on bar %i length %lu\n",
+			minlen, offset, bar,
+			(unsigned long)pci_resource_len(pdev, bar));
+		return NULL;
+	}
+
+	p = pci_iomap_range(pdev, bar, offset, length);
+	if (!p) {
+		LOG_ERR(dh_dev, "unable to map custom queue %u@%u on bar %i\n",
+			length, offset, bar);
+	} else if (pa) {
+		*pa = pci_resource_start(pdev, bar) + offset;
+	}
+
+	return p;
+}
+
+int zxdh_pf_common_cfg_init(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	struct pci_dev *pdev = dh_dev->pdev;
+	int common;
+
+	/* check for a common config: if not, use legacy mode (bar 0). */
+	common = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_COMMON_CFG,
+					     IORESOURCE_IO | IORESOURCE_MEM,
+					     &pf_dev->modern_bars);
+	if (common == 0) {
+		LOG_ERR(dh_dev,
+			"missing capabilities %i, leaving for legacy driver\n",
+			common);
+		return -ENODEV;
+	}
+
+	pf_dev->common = zxdh_pf_map_capability(dh_dev, common,
+						sizeof(struct zxdh_pf_pci_common_cfg),
+						ZXDH_PF_ALIGN4, 0,
+						sizeof(struct zxdh_pf_pci_common_cfg),
+						NULL, NULL, NULL);
+	if (!pf_dev->common) {
+		LOG_ERR(dh_dev, "pf_dev->common is null\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int zxdh_pf_notify_cfg_init(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	struct pci_dev *pdev = dh_dev->pdev;
+	size_t notify_length;
+	size_t notify_offset;
+	int notify;
+
+	/* If common is there, these should be too... */
+	notify = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_NOTIFY_CFG,
+					     IORESOURCE_IO | IORESOURCE_MEM,
+					     &pf_dev->modern_bars);
+	if (notify == 0) {
+		LOG_ERR(dh_dev, "missing capabilities %i\n", notify);
+		return -EINVAL;
+	}
+
+	pci_read_config_dword(pdev,
+			      notify + offsetof(struct zxdh_pf_pci_notify_cap,
+				notify_off_multiplier),
+		&pf_dev->notify_offset_multiplier);
+	pci_read_config_dword(pdev,
+			      notify + offsetof(struct zxdh_pf_pci_notify_cap,
+				cap.length), &notify_length);
+	pci_read_config_dword(pdev,
+			      notify + offsetof(struct zxdh_pf_pci_notify_cap,
+				cap.offset), &notify_offset);
+
+	/* We don't know how many VQs we'll map, ahead of the time.
+	 * If notify length is small, map it all now. Otherwise,
+	 * map each VQ individually later.
+	 */
+	if (notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
+		pf_dev->notify_base = zxdh_pf_map_capability(dh_dev, notify,
+							     ZXDH_PF_MAP_MINLEN2,
+							    ZXDH_PF_ALIGN2, 0,
+							    notify_length,
+							    &pf_dev->notify_len,
+							    &pf_dev->notify_pa, NULL);
+		if (!pf_dev->notify_base) {
+			LOG_ERR(dh_dev, "pf_dev->notify_base is null\n");
+			return -EINVAL;
+		}
+	} else {
+		pf_dev->notify_map_cap = notify;
+	}
+
+	return 0;
+}
+
+int zxdh_pf_device_cfg_init(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	struct pci_dev *pdev = dh_dev->pdev;
+	int device;
+
+	/* Device capability is only mandatory for
+	 * devices that have device-specific configuration.
+	 */
+	device = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_DEVICE_CFG,
+					     IORESOURCE_IO | IORESOURCE_MEM,
+					     &pf_dev->modern_bars);
+
+	/* we don't know how much we should map,
+	 * but PAGE_SIZE is more than enough for all existing devices.
+	 */
+	if (device) {
+		pf_dev->device = zxdh_pf_map_capability(dh_dev, device, 0,
+							ZXDH_PF_ALIGN4, 0, PAGE_SIZE,
+						       &pf_dev->device_len, NULL,
+						       &pf_dev->dev_cfg_bar_off);
+		if (!pf_dev->device) {
+			LOG_ERR(dh_dev, "pf_dev->device is null\n");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+void zxdh_pf_modern_cfg_uninit(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	struct pci_dev *pdev = dh_dev->pdev;
+
+	if (pf_dev->device)
+		pci_iounmap(pdev, pf_dev->device);
+	if (pf_dev->notify_base)
+		pci_iounmap(pdev, pf_dev->notify_base);
+	pci_iounmap(pdev, pf_dev->common);
+}
+
+int zxdh_pf_modern_cfg_init(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	struct pci_dev *pdev = dh_dev->pdev;
+	int ret;
+
+	ret = zxdh_pf_common_cfg_init(dh_dev);
+	if (ret) {
+		LOG_ERR(dh_dev, "zxdh_pf_common_cfg_init failed: %d\n", ret);
+		return -EINVAL;
+	}
+
+	ret = zxdh_pf_notify_cfg_init(dh_dev);
+	if (ret) {
+		LOG_ERR(dh_dev, "zxdh_pf_notify_cfg_init failed: %d\n", ret);
+		goto err_map_notify;
+	}
+
+	ret = zxdh_pf_device_cfg_init(dh_dev);
+	if (ret) {
+		LOG_ERR(dh_dev, "zxdh_pf_device_cfg_init failed: %d\n", ret);
+		goto err_map_device;
+	}
+
+	return 0;
+
+err_map_device:
+	if (pf_dev->notify_base)
+		pci_iounmap(pdev, pf_dev->notify_base);
+err_map_notify:
+	pci_iounmap(pdev, pf_dev->common);
+	return -EINVAL;
+}
+
+u16 zxdh_pf_get_queue_notify_off(struct dh_core_dev *dh_dev,
+				 u16 phy_index, u16 index)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+	if (pf_dev->packed_status)
+		iowrite16(phy_index, &pf_dev->common->queue_select);
+	else
+		iowrite16(index, &pf_dev->common->queue_select);
+
+	return ioread16(&pf_dev->common->queue_notify_off);
+}
+
+void __iomem *zxdh_pf_map_vq_notify(struct dh_core_dev *dh_dev,
+				    u16 phy_index, u16 index,
+				     resource_size_t *pa)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	u16 off;
+
+	off = zxdh_pf_get_queue_notify_off(dh_dev, phy_index, index);
+
+	if (pf_dev->notify_base) {
+		/* offset should not wrap */
+		if ((u64)off *
+			pf_dev->notify_offset_multiplier + 2 > pf_dev->notify_len) {
+			LOG_ERR(dh_dev,
+				"bad notification offset %u (x %u) for queue %u > %zd",
+				off, pf_dev->notify_offset_multiplier, phy_index,
+				pf_dev->notify_len);
+			return NULL;
+		}
+
+		if (pa)
+			*pa = pf_dev->notify_pa + off * pf_dev->notify_offset_multiplier;
+
+		return pf_dev->notify_base + off * pf_dev->notify_offset_multiplier;
+	} else {
+		return zxdh_pf_map_capability(dh_dev, pf_dev->notify_map_cap, 2, 2,
+					      off * pf_dev->notify_offset_multiplier,
+					      2, NULL, pa, NULL);
+	}
+}
+
+void zxdh_pf_unmap_vq_notify(struct dh_core_dev *dh_dev, void *priv)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+	if (!pf_dev->notify_base)
+		pci_iounmap(dh_dev->pdev, priv);
+}
+
+void zxdh_pf_set_status(struct dh_core_dev *dh_dev, u8 status)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+	iowrite8(status, &pf_dev->common->device_status);
+}
+
+u8 zxdh_pf_get_status(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+	return ioread8(&pf_dev->common->device_status);
+}
+
+static u8 zxdh_pf_get_cfg_gen(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	u8 config_generation;
+
+	config_generation = ioread8(&pf_dev->common->config_generation);
+	LOG_INFO(dh_dev, "config_generation is %d\n", config_generation);
+
+	return config_generation;
+}
+
+void zxdh_pf_get_vf_mac(struct dh_core_dev *dh_dev, u8 *mac, int vf_id)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	u32 DEV_MAC_L;
+	u16 DEV_MAC_H;
+
+	if (pf_dev->pf_sriov_cap_base) {
+		DEV_MAC_L = ioread32(pf_dev->pf_sriov_cap_base +
+				     (pf_dev->sriov_bar_size) * vf_id +
+				     pf_dev->dev_cfg_bar_off);
+		mac[0] = DEV_MAC_L & 0xff;
+		mac[1] = (DEV_MAC_L >> 8) & 0xff;
+		mac[2] = (DEV_MAC_L >> 16) & 0xff;
+		mac[3] = (DEV_MAC_L >> 24) & 0xff;
+		DEV_MAC_H = ioread16(pf_dev->pf_sriov_cap_base +
+				      (pf_dev->sriov_bar_size) * vf_id +
+				      pf_dev->dev_cfg_bar_off +
+				      ZXDH_DEV_MAC_HIGH_OFFSET);
+		mac[4] = DEV_MAC_H & 0xff;
+		mac[5] = (DEV_MAC_H >> 8) & 0xff;
+	}
+}
+
+void zxdh_pf_set_vf_mac_reg(struct zxdh_pf_device *pf_dev,
+			    u8 *mac, int vf_id)
+{
+	u32 DEV_MAC_L;
+	u16 DEV_MAC_H;
+
+	if (pf_dev->pf_sriov_cap_base) {
+		DEV_MAC_L = mac[0] | (mac[1] << 8) |
+					(mac[2] << 16) | (mac[3] << 24);
+		DEV_MAC_H = mac[4] | (mac[5] << 8);
+		iowrite32(DEV_MAC_L, (pf_dev->pf_sriov_cap_base +
+			  (pf_dev->sriov_bar_size) * vf_id +
+			  pf_dev->dev_cfg_bar_off));
+		iowrite16(DEV_MAC_H, (pf_dev->pf_sriov_cap_base +
+			  (pf_dev->sriov_bar_size) * vf_id +
+			  pf_dev->dev_cfg_bar_off +
+			  ZXDH_DEV_MAC_HIGH_OFFSET));
+	}
+}
+
+void zxdh_pf_set_vf_mac(struct dh_core_dev *dh_dev, u8 *mac, int vf_id)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+	zxdh_pf_set_vf_mac_reg(pf_dev, mac, vf_id);
+}
+
+void zxdh_set_mac(struct dh_core_dev *dh_dev, u8 *mac)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	u32 DEV_MAC_L;
+	u16 DEV_MAC_H;
+
+	DEV_MAC_L = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	DEV_MAC_H = mac[4] | (mac[5] << 8);
+	iowrite32(DEV_MAC_L, pf_dev->device);
+	iowrite16(DEV_MAC_H, pf_dev->device + ZXDH_DEV_MAC_HIGH_OFFSET);
+}
+
+void zxdh_get_mac(struct dh_core_dev *dh_dev, u8 *mac)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	u32 DEV_MAC_L;
+	u16 DEV_MAC_H;
+
+	DEV_MAC_L = ioread32(pf_dev->device);
+	mac[0] = DEV_MAC_L & 0xff;
+	mac[1] = (DEV_MAC_L >> 8) & 0xff;
+	mac[2] = (DEV_MAC_L >> 16) & 0xff;
+	mac[3] = (DEV_MAC_L >> 24) & 0xff;
+	DEV_MAC_H = ioread16(pf_dev->device + ZXDH_DEV_MAC_HIGH_OFFSET);
+	mac[4] = DEV_MAC_H & 0xff;
+	mac[5] = (DEV_MAC_H >> 8) & 0xff;
+}
+
+u64 zxdh_pf_get_features(struct dh_core_dev *dh_dev)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+	u64 device_feature;
+
+	iowrite32(0, &pf_dev->common->device_feature_select);
+	device_feature = ioread32(&pf_dev->common->device_feature);
+	iowrite32(1, &pf_dev->common->device_feature_select);
+	device_feature |= ((u64)ioread32(&pf_dev->common->device_feature)
+						<< 32);
+
+	return device_feature;
+}
+
+void zxdh_pf_set_features(struct dh_core_dev *dh_dev, u64 features)
+{
+	struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+	iowrite32(0, &pf_dev->common->guest_feature_select);
+	iowrite32((u32)features, &pf_dev->common->guest_feature);
+	iowrite32(1, &pf_dev->common->guest_feature_select);
+	iowrite32(features >> 32, &pf_dev->common->guest_feature);
+}
+
 static int dh_pf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct dh_core_dev *dh_dev;
diff --git a/drivers/net/ethernet/zte/dinghai/en_pf.h b/drivers/net/ethernet/zte/dinghai/en_pf.h
index 0c47f3a38a9d..33e3c957e3a3 100644
--- a/drivers/net/ethernet/zte/dinghai/en_pf.h
+++ b/drivers/net/ethernet/zte/dinghai/en_pf.h
@@ -15,6 +15,24 @@
 #define ZXDH_PF_DEVICE_ID	0x8040
 #define ZXDH_VF_DEVICE_ID	0x8041
 
+/* Common configuration */
+#define ZXDH_PCI_CAP_COMMON_CFG	1
+/* Notifications */
+#define ZXDH_PCI_CAP_NOTIFY_CFG	2
+/* ISR access */
+#define ZXDH_PCI_CAP_ISR_CFG		3
+/* Device specific configuration */
+#define ZXDH_PCI_CAP_DEVICE_CFG	4
+/* PCI configuration access */
+#define ZXDH_PCI_CAP_PCI_CFG		5
+
+#define ZXDH_PF_MAX_BAR_VAL		0x5
+#define ZXDH_PF_ALIGN4			4
+#define ZXDH_PF_ALIGN2			2
+#define ZXDH_PF_MAP_MINLEN2		2
+
+#define ZXDH_DEV_MAC_HIGH_OFFSET	4
+
 enum dh_coredev_type {
 	DH_COREDEV_PF,
 	DH_COREDEV_VF,
@@ -34,7 +52,27 @@ struct dh_core_dev {
 };
 
 struct zxdh_pf_device {
+	struct zxdh_pf_pci_common_cfg __iomem *common;
+	/* Device-specific data (non-legacy mode)  */
+	/* Base of vq notifications (non-legacy mode). */
+	void __iomem *device;
+	void __iomem *notify_base;
+	void __iomem *pf_sriov_cap_base;
+	/* Physical base of vq notifications */
+	resource_size_t notify_pa;
+	/* So we can sanity-check accesses. */
+	size_t notify_len;
+	size_t device_len;
+	/* Capability for when we need to map notifications per-vq. */
+	s32 notify_map_cap;
+	u32 notify_offset_multiplier;
+	/* Multiply queue_notify_off by this value. (non-legacy mode). */
+	s32 modern_bars;
+
 	void __iomem *pci_ioremap_addr[6];
+	u64 sriov_bar_size;
+	u32 dev_cfg_bar_off;
+	bool packed_status;
 	bool bar_chan_valid;
 	bool vepa;
 	struct mutex irq_lock; /* Protects IRQ operations */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 49437 bytes --]

      parent reply	other threads:[~2026-04-30 15:42 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-30 15:11 [PATCH net-next v3 0/3] Add ZTE DingHai Ethernet PF driver Junyang Han
2026-04-30 15:11 ` [PATCH net-next v3 1/3] net/ethernet: add ZTE network driver support Junyang Han
2026-04-30 19:34   ` Andrew Lunn
2026-04-30 15:11 ` [PATCH net-next v3 2/3] net/ethernet/zte/dinghai: add logging infrastructure Junyang Han
2026-04-30 19:36   ` Andrew Lunn
2026-04-30 15:11 ` Junyang Han [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260430151138.2813381-4-han.junyang@zte.com.cn \
    --to=han.junyang@zte.com.cn \
    --cc=andrew+netdev@lunn.ch \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=han.chengfei@zte.com.cn \
    --cc=kuba@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=ran.ming@zte.com.cn \
    --cc=vadim.fedorenko@linux.dev \
    --cc=zhang.yanze@zte.com.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox