From: Junyang Han <han.junyang@zte.com.cn>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, andrew+netdev@lunn.ch, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, han.junyang@zte.com.cn,
ran.ming@zte.com.cn, han.chengfei@zte.com.cn,
zhang.yanze@zte.com.cn
Subject: [PATCH net-next v2 3/3] net/ethernet/zte/dinghai: add hardware register access and PCI capability scanning
Date: Wed, 22 Apr 2026 22:49:01 +0800 [thread overview]
Message-ID: <20260422144901.2403456-4-han.junyang@zte.com.cn> (raw)
In-Reply-To: <20260422144901.2403456-1-han.junyang@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 18761 bytes --]
Implement PCI configuration space access, BAR mapping, capability
scanning (common/notify/device), and hardware queue register
definitions for DingHai PF device.
Signed-off-by: Junyang Han <han.junyang@zte.com.cn>
---
drivers/net/ethernet/zte/dinghai/dh_queue.h | 71 ++++
drivers/net/ethernet/zte/dinghai/en_pf.c | 410 ++++++++++++++++++++
drivers/net/ethernet/zte/dinghai/en_pf.h | 38 ++
3 files changed, 519 insertions(+)
create mode 100644 drivers/net/ethernet/zte/dinghai/dh_queue.h
diff --git a/drivers/net/ethernet/zte/dinghai/dh_queue.h b/drivers/net/ethernet/zte/dinghai/dh_queue.h
new file mode 100644
index 000000000000..5067c73fed33
--- /dev/null
+++ b/drivers/net/ethernet/zte/dinghai/dh_queue.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ZTE DingHai Ethernet driver - PCI capability definitions
+ * Copyright (c) 2022-2026, ZTE Corporation.
+ */
+
+#ifndef __DH_QUEUE_H__
+#define __DH_QUEUE_H__
+
+/* Vector value used to disable MSI for queue */
+#define ZXDH_MSI_NO_VECTOR 0xff
+
+/* Status byte for guest to report progress, and synchronize features */
+/* We have seen device and processed generic fields */
+#define ZXDH_CONFIG_S_ACKNOWLEDGE 1
+/* We have found a driver for the device. */
+#define ZXDH_CONFIG_S_DRIVER 2
+/* Driver has used its parts of the config, and is happy */
+#define ZXDH_CONFIG_S_DRIVER_OK 4
+/* Driver has finished configuring features */
+#define ZXDH_CONFIG_S_FEATURES_OK 8
+/* Device entered invalid state, driver must reset it */
+#define ZXDH_CONFIG_S_NEEDS_RESET 0x40
+/* We've given up on this device */
+#define ZXDH_CONFIG_S_FAILED 0x80
+
+/* This is the PCI capability header: */
+struct zxdh_pf_pci_cap {
+ __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ __u8 cap_next; /* Generic PCI field: next ptr. */
+ __u8 cap_len; /* Generic PCI field: capability length */
+ __u8 cfg_type; /* Identifies the structure. */
+ __u8 bar; /* Where to find it. */
+ __u8 id; /* Multiple capabilities of the same type */
+ __u8 padding[2]; /* Pad to full dword. */
+ __le32 offset; /* Offset within bar. */
+ __le32 length; /* Length of the structure, in bytes. */
+};
+
+/* Fields in ZXDH_PF_PCI_CAP_COMMON_CFG: */
+struct zxdh_pf_pci_common_cfg {
+ /* About the whole device. */
+ __le32 device_feature_select; /* read-write */
+ __le32 device_feature; /* read-only */
+ __le32 guest_feature_select; /* read-write */
+ __le32 guest_feature; /* read-write */
+ __le16 msix_config; /* read-write */
+ __le16 num_queues; /* read-only */
+ __u8 device_status; /* read-write */
+ __u8 config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ __le16 queue_select; /* read-write */
+ __le16 queue_size; /* read-write, power of 2. */
+ __le16 queue_msix_vector; /* read-write */
+ __le16 queue_enable; /* read-write */
+ __le16 queue_notify_off; /* read-only */
+ __le32 queue_desc_lo; /* read-write */
+ __le32 queue_desc_hi; /* read-write */
+ __le32 queue_avail_lo; /* read-write */
+ __le32 queue_avail_hi; /* read-write */
+ __le32 queue_used_lo; /* read-write */
+ __le32 queue_used_hi; /* read-write */
+};
+
+struct zxdh_pf_pci_notify_cap {
+ struct zxdh_pf_pci_cap cap;
+ __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+#endif /* __DH_QUEUE_H__ */
diff --git a/drivers/net/ethernet/zte/dinghai/en_pf.c b/drivers/net/ethernet/zte/dinghai/en_pf.c
index 70dad28de544..0dd4dcbdefb0 100644
--- a/drivers/net/ethernet/zte/dinghai/en_pf.c
+++ b/drivers/net/ethernet/zte/dinghai/en_pf.c
@@ -9,6 +9,7 @@
#include <net/devlink.h>
#include "en_pf.h"
#include "dh_log.h"
+#include "dh_queue.h"
MODULE_AUTHOR("Junyang Han <han.junyang@zte.com.cn>");
MODULE_DESCRIPTION("ZTE Corporation network adapters (DingHai series) Ethernet driver");
@@ -92,6 +93,415 @@ void dh_pf_pci_close(struct dh_core_dev *dev)
pci_disable_device(dev->pdev);
}
+int32_t zxdh_pf_pci_find_capability(struct pci_dev *pdev, uint8_t cfg_type,
+ uint32_t ioresource_types, int32_t *bars)
+{
+ int32_t pos = 0;
+ uint8_t type = 0;
+ uint8_t bar = 0;
+
+ for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0;
+ pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) {
+ pci_read_config_byte(pdev, pos + offsetof(struct zxdh_pf_pci_cap, cfg_type), &type);
+ pci_read_config_byte(pdev, pos + offsetof(struct zxdh_pf_pci_cap, bar), &bar);
+
+ /* ignore structures with reserved BAR values */
+ if (bar > ZXDH_PF_MAX_BAR_VAL)
+ continue;
+
+ if (type == cfg_type) {
+ if (pci_resource_len(pdev, bar) &&
+ pci_resource_flags(pdev, bar) & ioresource_types) {
+ *bars |= (1 << bar);
+ return pos;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void __iomem *zxdh_pf_map_capability(struct dh_core_dev *dh_dev, int32_t off,
+ size_t minlen, uint32_t align,
+ uint32_t start, uint32_t size,
+ size_t *len, resource_size_t *pa,
+ uint32_t *bar_off)
+{
+ struct pci_dev *pdev = dh_dev->pdev;
+ uint8_t bar = 0;
+ uint32_t offset = 0;
+ uint32_t length = 0;
+ void __iomem *p = NULL;
+
+ pci_read_config_byte(pdev, off + offsetof(struct zxdh_pf_pci_cap, bar), &bar);
+ pci_read_config_dword(pdev, off + offsetof(struct zxdh_pf_pci_cap, offset), &offset);
+ pci_read_config_dword(pdev, off + offsetof(struct zxdh_pf_pci_cap, length), &length);
+
+ if (bar_off)
+ *bar_off = offset;
+
+ if (length <= start) {
+ LOG_ERR(dh_dev, "bad capability len %u (>%u expected)\n", length, start);
+ return NULL;
+ }
+
+ if (length - start < minlen) {
+ LOG_ERR(dh_dev, "bad capability len %u (>=%zu expected)\n", length, minlen);
+ return NULL;
+ }
+
+ length -= start;
+ if (start + offset < offset) {
+ LOG_ERR(dh_dev, "map wrap-around %u+%u\n", start, offset);
+ return NULL;
+ }
+
+ offset += start;
+ if (offset & (align - 1)) {
+ LOG_ERR(dh_dev, "offset %u not aligned to %u\n", offset, align);
+ return NULL;
+ }
+
+ if (length > size)
+ length = size;
+
+ if (len)
+ *len = length;
+
+ if (minlen + offset < minlen || minlen + offset > pci_resource_len(pdev, bar)) {
+ LOG_ERR(dh_dev, "map custom queue %zu@%u out of range on bar %i length %lu\n",
+ minlen, offset, bar, (unsigned long)pci_resource_len(pdev, bar));
+ return NULL;
+ }
+
+ p = pci_iomap_range(pdev, bar, offset, length);
+ if (!p) {
+ LOG_ERR(dh_dev, "unable to map custom queue %u@%u on bar %i\n", length, offset, bar);
+ } else if (pa) {
+ *pa = pci_resource_start(pdev, bar) + offset;
+ }
+
+ return p;
+}
+
+int32_t zxdh_pf_common_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t common = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ /* check for a common config: if not, use legacy mode (bar 0). */
+ common = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_COMMON_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &pf_dev->modern_bars);
+ if (common == 0) {
+ LOG_ERR(dh_dev, "missing capabilities %i, leaving for legacy driver\n", common);
+ return -ENODEV;
+ }
+
+ pf_dev->common = zxdh_pf_map_capability(dh_dev, common,
+ sizeof(struct zxdh_pf_pci_common_cfg),
+ ZXDH_PF_ALIGN4, 0,
+ sizeof(struct zxdh_pf_pci_common_cfg),
+ NULL, NULL, NULL);
+ if (!pf_dev->common) {
+ LOG_ERR(dh_dev, "pf_dev->common is null\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int32_t zxdh_pf_notify_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t notify = 0;
+ size_t notify_length = 0;
+ size_t notify_offset = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ /* If common is there, these should be too... */
+ notify = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_NOTIFY_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &pf_dev->modern_bars);
+ if (notify == 0) {
+ LOG_ERR(dh_dev, "missing capabilities %i\n", notify);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(pdev, notify + offsetof(struct zxdh_pf_pci_notify_cap,
+ notify_off_multiplier), &pf_dev->notify_offset_multiplier);
+ pci_read_config_dword(pdev, notify + offsetof(struct zxdh_pf_pci_notify_cap,
+ cap.length), ¬ify_length);
+ pci_read_config_dword(pdev, notify + offsetof(struct zxdh_pf_pci_notify_cap,
+ cap.offset), ¬ify_offset);
+
+ /* We don't know how many VQs we'll map, ahead of the time.
+ * If notify length is small, map it all now. Otherwise, map each VQ individually later.
+ */
+ if (notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
+ pf_dev->notify_base = zxdh_pf_map_capability(dh_dev, notify,
+ ZXDH_PF_MAP_MINLEN2,
+ ZXDH_PF_ALIGN2, 0,
+ notify_length,
+ &pf_dev->notify_len,
+ &pf_dev->notify_pa, NULL);
+ if (!pf_dev->notify_base) {
+ LOG_ERR(dh_dev, "pf_dev->notify_base is null\n");
+ return -EINVAL;
+ }
+ } else {
+ pf_dev->notify_map_cap = notify;
+ }
+
+ return 0;
+}
+
+int32_t zxdh_pf_device_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t device = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ /* Device capability is only mandatory for devices that have device-specific configuration. */
+ device = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_DEVICE_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &pf_dev->modern_bars);
+
+ /* we don't know how much we should map, but PAGE_SIZE is more than enough for all existing devices. */
+ if (device) {
+ pf_dev->device = zxdh_pf_map_capability(dh_dev, device, 0,
+ ZXDH_PF_ALIGN4, 0, PAGE_SIZE,
+ &pf_dev->device_len, NULL,
+ &pf_dev->dev_cfg_bar_off);
+ if (!pf_dev->device) {
+ LOG_ERR(dh_dev, "pf_dev->device is null\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void zxdh_pf_modern_cfg_uninit(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ if (pf_dev->device)
+ pci_iounmap(pdev, pf_dev->device);
+ if (pf_dev->notify_base)
+ pci_iounmap(pdev, pf_dev->notify_base);
+ pci_iounmap(pdev, pf_dev->common);
+}
+
+int32_t zxdh_pf_modern_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t ret = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ ret = zxdh_pf_common_cfg_init(dh_dev);
+ if (ret) {
+ LOG_ERR(dh_dev, "zxdh_pf_common_cfg_init failed: %d\n", ret);
+ return -EINVAL;
+ }
+
+ ret = zxdh_pf_notify_cfg_init(dh_dev);
+ if (ret) {
+ LOG_ERR(dh_dev, "zxdh_pf_notify_cfg_init failed: %d\n", ret);
+ goto err_map_notify;
+ }
+
+ ret = zxdh_pf_device_cfg_init(dh_dev);
+ if (ret) {
+ LOG_ERR(dh_dev, "zxdh_pf_device_cfg_init failed: %d\n", ret);
+ goto err_map_device;
+ }
+
+ return 0;
+
+err_map_device:
+ if (pf_dev->notify_base)
+ pci_iounmap(pdev, pf_dev->notify_base);
+err_map_notify:
+ pci_iounmap(pdev, pf_dev->common);
+ return -EINVAL;
+}
+
+uint16_t zxdh_pf_get_queue_notify_off(struct dh_core_dev *dh_dev,
+ uint16_t phy_index, uint16_t index)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ if (pf_dev->packed_status)
+ iowrite16(phy_index, &pf_dev->common->queue_select);
+ else
+ iowrite16(index, &pf_dev->common->queue_select);
+
+ return ioread16(&pf_dev->common->queue_notify_off);
+}
+
+void __iomem *zxdh_pf_map_vq_notify(struct dh_core_dev *dh_dev,
+ uint16_t phy_index, uint16_t index,
+ resource_size_t *pa)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ uint16_t off = 0;
+
+ off = zxdh_pf_get_queue_notify_off(dh_dev, phy_index, index);
+
+ if (pf_dev->notify_base) {
+ /* offset should not wrap */
+ if ((uint64_t)off * pf_dev->notify_offset_multiplier + 2 > pf_dev->notify_len) {
+ LOG_ERR(dh_dev, "bad notification offset %u (x %u) for queue %u > %zd",
+ off, pf_dev->notify_offset_multiplier, phy_index,
+ pf_dev->notify_len);
+ return NULL;
+ }
+
+ if (pa)
+ *pa = pf_dev->notify_pa + off * pf_dev->notify_offset_multiplier;
+
+ return pf_dev->notify_base + off * pf_dev->notify_offset_multiplier;
+ } else {
+ return zxdh_pf_map_capability(dh_dev, pf_dev->notify_map_cap, 2, 2,
+ off * pf_dev->notify_offset_multiplier,
+ 2, NULL, pa, NULL);
+ }
+}
+
+void zxdh_pf_unmap_vq_notify(struct dh_core_dev *dh_dev, void *priv)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ if (!pf_dev->notify_base)
+ pci_iounmap(dh_dev->pdev, priv);
+}
+
+void zxdh_pf_set_status(struct dh_core_dev *dh_dev, uint8_t status)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ iowrite8(status, &pf_dev->common->device_status);
+}
+
+uint8_t zxdh_pf_get_status(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ return ioread8(&pf_dev->common->device_status);
+}
+
+static uint8_t zxdh_pf_get_cfg_gen(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ uint8_t config_generation = 0;
+
+ config_generation = ioread8(&pf_dev->common->config_generation);
+ LOG_INFO(dh_dev, "config_generation is %d\n", config_generation);
+
+ return config_generation;
+}
+
+void zxdh_pf_get_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ if (pf_dev->pf_sriov_cap_base) {
+ DEV_MAC_L = ioread32(pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off);
+ mac[0] = DEV_MAC_L & 0xff;
+ mac[1] = (DEV_MAC_L >> 8) & 0xff;
+ mac[2] = (DEV_MAC_L >> 16) & 0xff;
+ mac[3] = (DEV_MAC_L >> 24) & 0xff;
+ DEV_MAC_H = ioread16(pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off +
+ ZXDH_DEV_MAC_HIGH_OFFSET);
+ mac[4] = DEV_MAC_H & 0xff;
+ mac[5] = (DEV_MAC_H >> 8) & 0xff;
+ }
+}
+
+void zxdh_pf_set_vf_mac_reg(struct zxdh_pf_device *pf_dev, uint8_t *mac, int32_t vf_id)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+
+ if (pf_dev->pf_sriov_cap_base) {
+ DEV_MAC_L = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ DEV_MAC_H = mac[4] | (mac[5] << 8);
+ iowrite32(DEV_MAC_L, (pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off));
+ iowrite16(DEV_MAC_H, (pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off +
+ ZXDH_DEV_MAC_HIGH_OFFSET));
+ }
+}
+
+void zxdh_pf_set_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ zxdh_pf_set_vf_mac_reg(pf_dev, mac, vf_id);
+}
+
+void zxdh_set_mac(struct dh_core_dev *dh_dev, uint8_t *mac)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ DEV_MAC_L = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ DEV_MAC_H = mac[4] | (mac[5] << 8);
+ iowrite32(DEV_MAC_L, pf_dev->device);
+ iowrite16(DEV_MAC_H, pf_dev->device + ZXDH_DEV_MAC_HIGH_OFFSET);
+}
+
+void zxdh_get_mac(struct dh_core_dev *dh_dev, uint8_t *mac)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ DEV_MAC_L = ioread32(pf_dev->device);
+ mac[0] = DEV_MAC_L & 0xff;
+ mac[1] = (DEV_MAC_L >> 8) & 0xff;
+ mac[2] = (DEV_MAC_L >> 16) & 0xff;
+ mac[3] = (DEV_MAC_L >> 24) & 0xff;
+ DEV_MAC_H = ioread16(pf_dev->device + ZXDH_DEV_MAC_HIGH_OFFSET);
+ mac[4] = DEV_MAC_H & 0xff;
+ mac[5] = (DEV_MAC_H >> 8) & 0xff;
+}
+
+uint64_t zxdh_pf_get_features(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ uint64_t device_feature = 0;
+
+ iowrite32(0, &pf_dev->common->device_feature_select);
+ device_feature = ioread32(&pf_dev->common->device_feature);
+ iowrite32(1, &pf_dev->common->device_feature_select);
+ device_feature |= ((uint64_t)ioread32(&pf_dev->common->device_feature) << 32);
+
+ return device_feature;
+}
+
+void zxdh_pf_set_features(struct dh_core_dev *dh_dev, uint64_t features)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ iowrite32(0, &pf_dev->common->guest_feature_select);
+ iowrite32((uint32_t)features, &pf_dev->common->guest_feature);
+ iowrite32(1, &pf_dev->common->guest_feature_select);
+ iowrite32(features >> 32, &pf_dev->common->guest_feature);
+}
+
static int dh_pf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct dh_core_dev *dh_dev = NULL;
diff --git a/drivers/net/ethernet/zte/dinghai/en_pf.h b/drivers/net/ethernet/zte/dinghai/en_pf.h
index a8b324adb948..0c4172c513a9 100644
--- a/drivers/net/ethernet/zte/dinghai/en_pf.h
+++ b/drivers/net/ethernet/zte/dinghai/en_pf.h
@@ -15,6 +15,24 @@
#define ZXDH_PF_DEVICE_ID 0x8040
#define ZXDH_VF_DEVICE_ID 0x8041
+/* Common configuration */
+#define ZXDH_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define ZXDH_PCI_CAP_NOTIFY_CFG 2
+/* ISR access */
+#define ZXDH_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define ZXDH_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define ZXDH_PCI_CAP_PCI_CFG 5
+
+#define ZXDH_PF_MAX_BAR_VAL 0x5
+#define ZXDH_PF_ALIGN4 4
+#define ZXDH_PF_ALIGN2 2
+#define ZXDH_PF_MAP_MINLEN2 2
+
+#define ZXDH_DEV_MAC_HIGH_OFFSET 4
+
enum dh_coredev_type {
DH_COREDEV_PF,
DH_COREDEV_VF,
@@ -34,7 +52,27 @@ struct dh_core_dev {
};
struct zxdh_pf_device {
+ struct zxdh_pf_pci_common_cfg __iomem *common;
+ /* Device-specific data (non-legacy mode) */
+ /* Base of vq notifications (non-legacy mode). */
+ void __iomem *device;
+ void __iomem *notify_base;
+ void __iomem *pf_sriov_cap_base;
+ /* Physical base of vq notifications */
+ resource_size_t notify_pa;
+ /* So we can sanity-check accesses. */
+ size_t notify_len;
+ size_t device_len;
+ /* Capability for when we need to map notifications per-vq. */
+ int32_t notify_map_cap;
+ uint32_t notify_offset_multiplier;
+ /* Multiply queue_notify_off by this value. (non-legacy mode). */
+ int32_t modern_bars;
+
uint64_t pci_ioremap_addr[6];
+ uint64_t sriov_bar_size;
+ uint32_t dev_cfg_bar_off;
+ bool packed_status;
bool bar_chan_valid;
bool vepa;
struct mutex irq_lock; /* Protects IRQ operations */
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 45260 bytes --]
next prev parent reply other threads:[~2026-04-22 15:19 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-22 14:48 [PATCH net-next v2 0/3] Add ZTE DingHai Ethernet PF driver Junyang Han
2026-04-22 14:48 ` [PATCH net-next v2 1/3] net/ethernet: add ZTE network driver support Junyang Han
2026-04-22 16:24 ` Andrew Lunn
2026-04-22 14:49 ` [PATCH net-next v2 2/3] net/ethernet/zte/dinghai: add logging infrastructure Junyang Han
2026-04-22 14:49 ` Junyang Han [this message]
2026-04-22 21:54 ` [PATCH net-next v2 3/3] net/ethernet/zte/dinghai: add hardware register access and PCI capability scanning Vadim Fedorenko
2026-04-22 16:19 ` [PATCH net-next v2 0/3] Add ZTE DingHai Ethernet PF driver Andrew Lunn
2026-04-24 7:26 ` han.junyang
2026-04-24 7:34 ` han.junyang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260422144901.2403456-4-han.junyang@zte.com.cn \
--to=han.junyang@zte.com.cn \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=han.chengfei@zte.com.cn \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=ran.ming@zte.com.cn \
--cc=zhang.yanze@zte.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.