From: Junyang Han <han.junyang@zte.com.cn>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, andrew+netdev@lunn.ch, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, han.junyang@zte.com.cn,
ran.ming@zte.com.cn, han.chengfei@zte.com.cn,
zhang.yanze@zte.com.cn
Subject: [PATCH net-next 3/3] net/ethernet/zte/dinghai: add hardware register access and PCI capability scanning
Date: Wed, 15 Apr 2026 09:53:34 +0800 [thread overview]
Message-ID: <20260415015334.2018453-3-han.junyang@zte.com.cn> (raw)
In-Reply-To: <20260415015334.2018453-1-han.junyang@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 18746 bytes --]
Implement PCI configuration space access, BAR mapping, capability
scanning (common/notify/device), and hardware queue register
definitions for DingHai PF device.
Signed-off-by: Junyang Han <han.junyang@zte.com.cn>
---
drivers/net/ethernet/zte/dinghai/dh_queue.h | 71 ++++
drivers/net/ethernet/zte/dinghai/en_pf.c | 411 ++++++++++++++++++++
drivers/net/ethernet/zte/dinghai/en_pf.h | 41 ++
3 files changed, 523 insertions(+)
create mode 100644 drivers/net/ethernet/zte/dinghai/dh_queue.h
diff --git a/drivers/net/ethernet/zte/dinghai/dh_queue.h b/drivers/net/ethernet/zte/dinghai/dh_queue.h
new file mode 100644
index 000000000000..1e7d64ecbbf3
--- /dev/null
+++ b/drivers/net/ethernet/zte/dinghai/dh_queue.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ZTE DingHai Ethernet driver - PCI capability definitions
+ * Copyright (c) 2022-2024, ZTE Corporation.
+ */
+
+#ifndef __DH_QUEUE_H__
+#define __DH_QUEUE_H__
+
+/* Vector value used to disable MSI for queue */
+#define ZXDH_MSI_NO_VECTOR 0xff
+
+/* Status byte for guest to report progress, and synchronize features */
+/* We have seen device and processed generic fields */
+#define ZXDH_CONFIG_S_ACKNOWLEDGE 1
+/* We have found a driver for the device. */
+#define ZXDH_CONFIG_S_DRIVER 2
+/* Driver has used its parts of the config, and is happy */
+#define ZXDH_CONFIG_S_DRIVER_OK 4
+/* Driver has finished configuring features */
+#define ZXDH_CONFIG_S_FEATURES_OK 8
+/* Device entered invalid state, driver must reset it */
+#define ZXDH_CONFIG_S_NEEDS_RESET 0x40
+/* We've given up on this device */
+#define ZXDH_CONFIG_S_FAILED 0x80
+
+/* This is the PCI capability header: */
+struct zxdh_pf_pci_cap {
+ __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ __u8 cap_next; /* Generic PCI field: next ptr. */
+ __u8 cap_len; /* Generic PCI field: capability length */
+ __u8 cfg_type; /* Identifies the structure. */
+ __u8 bar; /* Where to find it. */
+ __u8 id; /* Multiple capabilities of the same type */
+ __u8 padding[2]; /* Pad to full dword. */
+ __le32 offset; /* Offset within bar. */
+ __le32 length; /* Length of the structure, in bytes. */
+};
+
+/* Fields in ZXDH_PF_PCI_CAP_COMMON_CFG: */
+struct zxdh_pf_pci_common_cfg {
+ /* About the whole device. */
+ __le32 device_feature_select; /* read-write */
+ __le32 device_feature; /* read-only */
+ __le32 guest_feature_select; /* read-write */
+ __le32 guest_feature; /* read-write */
+ __le16 msix_config; /* read-write */
+ __le16 num_queues; /* read-only */
+ __u8 device_status; /* read-write */
+ __u8 config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ __le16 queue_select; /* read-write */
+ __le16 queue_size; /* read-write, power of 2. */
+ __le16 queue_msix_vector; /* read-write */
+ __le16 queue_enable; /* read-write */
+ __le16 queue_notify_off; /* read-only */
+ __le32 queue_desc_lo; /* read-write */
+ __le32 queue_desc_hi; /* read-write */
+ __le32 queue_avail_lo; /* read-write */
+ __le32 queue_avail_hi; /* read-write */
+ __le32 queue_used_lo; /* read-write */
+ __le32 queue_used_hi; /* read-write */
+};
+
+struct zxdh_pf_pci_notify_cap {
+ struct zxdh_pf_pci_cap cap;
+ __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+#endif /* __DH_QUEUE_H__ */
diff --git a/drivers/net/ethernet/zte/dinghai/en_pf.c b/drivers/net/ethernet/zte/dinghai/en_pf.c
index 2d2740223401..c29299ad629e 100644
--- a/drivers/net/ethernet/zte/dinghai/en_pf.c
+++ b/drivers/net/ethernet/zte/dinghai/en_pf.c
@@ -107,6 +107,417 @@ void dh_pf_pci_close(struct dh_core_dev *dev)
pci_disable_device(dev->pdev);
}
+int32_t zxdh_pf_pci_find_capability(struct pci_dev *pdev, uint8_t cfg_type,
+ uint32_t ioresource_types, int32_t *bars)
+{
+ int32_t pos = 0;
+ uint8_t type = 0;
+ uint8_t bar = 0;
+
+ for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0;
+ pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) {
+ pci_read_config_byte(pdev, pos + offsetof(struct zxdh_pf_pci_cap, cfg_type), &type);
+ pci_read_config_byte(pdev, pos + offsetof(struct zxdh_pf_pci_cap, bar), &bar);
+
+ /* ignore structures with reserved BAR values */
+ if (bar > ZXDH_PF_MAX_BAR_VAL)
+ continue;
+
+ if (type == cfg_type) {
+ if (pci_resource_len(pdev, bar) &&
+ pci_resource_flags(pdev, bar) & ioresource_types) {
+ *bars |= (1 << bar);
+ return pos;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void __iomem *zxdh_pf_map_capability(struct dh_core_dev *dh_dev, int32_t off,
+ size_t minlen, uint32_t align,
+ uint32_t start, uint32_t size,
+ size_t *len, resource_size_t *pa,
+ uint32_t *bar_off)
+{
+ struct pci_dev *pdev = dh_dev->pdev;
+ uint8_t bar = 0;
+ uint32_t offset = 0;
+ uint32_t length = 0;
+ void __iomem *p = NULL;
+
+ pci_read_config_byte(pdev, off + offsetof(struct zxdh_pf_pci_cap, bar), &bar);
+ pci_read_config_dword(pdev, off + offsetof(struct zxdh_pf_pci_cap, offset), &offset);
+ pci_read_config_dword(pdev, off + offsetof(struct zxdh_pf_pci_cap, length), &length);
+
+ if (bar_off)
+ *bar_off = offset;
+
+ if (length <= start) {
+ LOG_ERR("bad capability len %u (>%u expected)\n", length, start);
+ return NULL;
+ }
+
+ if (length - start < minlen) {
+ LOG_ERR("bad capability len %u (>=%zu expected)\n", length, minlen);
+ return NULL;
+ }
+
+ length -= start;
+ if (start + offset < offset) {
+ LOG_ERR("map wrap-around %u+%u\n", start, offset);
+ return NULL;
+ }
+
+ offset += start;
+ if (offset & (align - 1)) {
+ LOG_ERR("offset %u not aligned to %u\n", offset, align);
+ return NULL;
+ }
+
+ if (length > size)
+ length = size;
+
+ if (len)
+ *len = length;
+
+ if (minlen + offset < minlen || minlen + offset > pci_resource_len(pdev, bar)) {
+ LOG_ERR("map custom queue %zu@%u out of range on bar %i length %lu\n",
+ minlen, offset, bar, (unsigned long)pci_resource_len(pdev, bar));
+ return NULL;
+ }
+
+ p = pci_iomap_range(pdev, bar, offset, length);
+ if (unlikely(!p)) {
+ LOG_ERR("unable to map custom queue %u@%u on bar %i\n", length, offset, bar);
+ } else if (pa) {
+ *pa = pci_resource_start(pdev, bar) + offset;
+ }
+
+ return p;
+}
+
+int32_t zxdh_pf_common_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t common = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ /* check for a common config: if not, use legacy mode (bar 0). */
+ common = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_COMMON_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &pf_dev->modern_bars);
+ if (common == 0) {
+ LOG_ERR("missing capabilities %i, leaving for legacy driver\n", common);
+ return -ENODEV;
+ }
+
+ pf_dev->common = zxdh_pf_map_capability(dh_dev, common,
+ sizeof(struct zxdh_pf_pci_common_cfg),
+ ZXDH_PF_ALIGN4, 0,
+ sizeof(struct zxdh_pf_pci_common_cfg),
+ NULL, NULL, NULL);
+ if (unlikely(!pf_dev->common)) {
+ LOG_ERR("pf_dev->common is null\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int32_t zxdh_pf_notify_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t notify = 0;
+ uint32_t notify_length = 0;
+ uint32_t notify_offset = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ /* If common is there, these should be too... */
+ notify = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_NOTIFY_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &pf_dev->modern_bars);
+ if (notify == 0) {
+ LOG_ERR("missing capabilities %i\n", notify);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(pdev, notify + offsetof(struct zxdh_pf_pci_notify_cap,
+ notify_off_multiplier), &pf_dev->notify_offset_multiplier);
+ pci_read_config_dword(pdev, notify + offsetof(struct zxdh_pf_pci_notify_cap,
+ cap.length), ¬ify_length);
+ pci_read_config_dword(pdev, notify + offsetof(struct zxdh_pf_pci_notify_cap,
+ cap.offset), ¬ify_offset);
+
+ /* We don't know how many VQs we'll map, ahead of the time.
+ * If notify length is small, map it all now. Otherwise, map each VQ individually later.
+ */
+ if ((uint64_t)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
+ pf_dev->notify_base = zxdh_pf_map_capability(dh_dev, notify,
+ ZXDH_PF_MAP_MINLEN2,
+ ZXDH_PF_ALIGN2, 0,
+ notify_length,
+ &pf_dev->notify_len,
+ &pf_dev->notify_pa, NULL);
+ if (unlikely(!pf_dev->notify_base)) {
+ LOG_ERR("pf_dev->notify_base is null\n");
+ return -EINVAL;
+ }
+ } else {
+ pf_dev->notify_map_cap = notify;
+ }
+
+ return 0;
+}
+
+int32_t zxdh_pf_device_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t device = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ /* Device capability is only mandatory for devices that have device-specific configuration. */
+ device = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_DEVICE_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &pf_dev->modern_bars);
+
+ /* we don't know how much we should map, but PAGE_SIZE is more than enough for all existing devices. */
+ if (device) {
+ pf_dev->device = zxdh_pf_map_capability(dh_dev, device, 0,
+ ZXDH_PF_ALIGN4, 0, PAGE_SIZE,
+ &pf_dev->device_len, NULL,
+ &pf_dev->dev_cfg_bar_off);
+ if (unlikely(!pf_dev->device)) {
+ LOG_ERR("pf_dev->device is null\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void zxdh_pf_modern_cfg_uninit(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ if (pf_dev->device)
+ pci_iounmap(pdev, pf_dev->device);
+ if (pf_dev->notify_base)
+ pci_iounmap(pdev, pf_dev->notify_base);
+ pci_iounmap(pdev, pf_dev->common);
+}
+
+int32_t zxdh_pf_modern_cfg_init(struct dh_core_dev *dh_dev)
+{
+ int32_t ret = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ struct pci_dev *pdev = dh_dev->pdev;
+
+ ret = zxdh_pf_common_cfg_init(dh_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_common_cfg_init failed: %d\n", ret);
+ return -EINVAL;
+ }
+
+ ret = zxdh_pf_notify_cfg_init(dh_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_notify_cfg_init failed: %d\n", ret);
+ goto err_map_notify;
+ }
+
+ ret = zxdh_pf_device_cfg_init(dh_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_device_cfg_init failed: %d\n", ret);
+ goto err_map_device;
+ }
+
+ return 0;
+
+err_map_device:
+ if (pf_dev->notify_base)
+ pci_iounmap(pdev, pf_dev->notify_base);
+err_map_notify:
+ pci_iounmap(pdev, pf_dev->common);
+ return -EINVAL;
+}
+
+uint16_t zxdh_pf_get_queue_notify_off(struct dh_core_dev *dh_dev,
+ uint16_t phy_index, uint16_t index)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ if (pf_dev->packed_status)
+ iowrite16(phy_index, &pf_dev->common->queue_select);
+ else
+ iowrite16(index, &pf_dev->common->queue_select);
+
+ return ioread16(&pf_dev->common->queue_notify_off);
+}
+
+void __iomem *zxdh_pf_map_vq_notify(struct dh_core_dev *dh_dev,
+ uint16_t phy_index, uint16_t index,
+ resource_size_t *pa)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ uint16_t off = 0;
+
+ off = zxdh_pf_get_queue_notify_off(dh_dev, phy_index, index);
+
+ if (pf_dev->notify_base) {
+ /* offset should not wrap */
+ if ((uint64_t)off * pf_dev->notify_offset_multiplier + 2 > pf_dev->notify_len) {
+ LOG_ERR("bad notification offset %u (x %u) for queue %u > %zd",
+ off, pf_dev->notify_offset_multiplier, phy_index,
+ pf_dev->notify_len);
+ return NULL;
+ }
+
+ if (pa)
+ *pa = pf_dev->notify_pa + off * pf_dev->notify_offset_multiplier;
+
+ return pf_dev->notify_base + off * pf_dev->notify_offset_multiplier;
+ } else {
+ return zxdh_pf_map_capability(dh_dev, pf_dev->notify_map_cap, 2, 2,
+ off * pf_dev->notify_offset_multiplier,
+ 2, NULL, pa, NULL);
+ }
+}
+
+void zxdh_pf_unmap_vq_notify(struct dh_core_dev *dh_dev, void *priv)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ if (!pf_dev->notify_base)
+ pci_iounmap(dh_dev->pdev, priv);
+}
+
+void zxdh_pf_set_status(struct dh_core_dev *dh_dev, uint8_t status)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ iowrite8(status, &pf_dev->common->device_status);
+}
+
+uint8_t zxdh_pf_get_status(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ return ioread8(&pf_dev->common->device_status);
+}
+
+static uint8_t zxdh_pf_get_cfg_gen(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ uint8_t config_generation = 0;
+
+ config_generation = ioread8(&pf_dev->common->config_generation);
+ LOG_INFO("config_generation is %d\n", config_generation);
+
+ return config_generation;
+}
+
+void zxdh_pf_get_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ if (pf_dev->pf_sriov_cap_base) {
+ DEV_MAC_L = ioread32((void __iomem *)(pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off));
+ mac[0] = DEV_MAC_L & 0xff;
+ mac[1] = (DEV_MAC_L >> 8) & 0xff;
+ mac[2] = (DEV_MAC_L >> 16) & 0xff;
+ mac[3] = (DEV_MAC_L >> 24) & 0xff;
+ DEV_MAC_H = ioread16((void __iomem *)(pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off +
+ ZXDH_DEV_MAC_HIGH_OFFSET));
+ mac[4] = DEV_MAC_H & 0xff;
+ mac[5] = (DEV_MAC_H >> 8) & 0xff;
+ }
+}
+
+void zxdh_pf_set_vf_mac_reg(struct zxdh_pf_device *pf_dev, uint8_t *mac, int32_t vf_id)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+
+ if (pf_dev->pf_sriov_cap_base) {
+ DEV_MAC_L = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ DEV_MAC_H = mac[4] | (mac[5] << 8);
+ iowrite32(DEV_MAC_L, (void __iomem *)(pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off));
+ iowrite16(DEV_MAC_H, (void __iomem *)(pf_dev->pf_sriov_cap_base +
+ (pf_dev->sriov_bar_size) * vf_id +
+ pf_dev->dev_cfg_bar_off +
+ ZXDH_DEV_MAC_HIGH_OFFSET));
+ }
+}
+
+void zxdh_pf_set_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ zxdh_pf_set_vf_mac_reg(pf_dev, mac, vf_id);
+}
+
+void zxdh_set_mac(struct dh_core_dev *dh_dev, uint8_t *mac)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ DEV_MAC_L = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ DEV_MAC_H = mac[4] | (mac[5] << 8);
+ iowrite32(DEV_MAC_L, pf_dev->device);
+ iowrite16(DEV_MAC_H, (void __iomem *)((uint8_t *)pf_dev->device +
+ ZXDH_DEV_MAC_HIGH_OFFSET));
+}
+
+void zxdh_get_mac(struct dh_core_dev *dh_dev, uint8_t *mac)
+{
+ uint32_t DEV_MAC_L = 0;
+ uint16_t DEV_MAC_H = 0;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ DEV_MAC_L = ioread32(pf_dev->device);
+ mac[0] = DEV_MAC_L & 0xff;
+ mac[1] = (DEV_MAC_L >> 8) & 0xff;
+ mac[2] = (DEV_MAC_L >> 16) & 0xff;
+ mac[3] = (DEV_MAC_L >> 24) & 0xff;
+ DEV_MAC_H = ioread16((void __iomem *)((uint8_t *)pf_dev->device +
+ ZXDH_DEV_MAC_HIGH_OFFSET));
+ mac[4] = DEV_MAC_H & 0xff;
+ mac[5] = (DEV_MAC_H >> 8) & 0xff;
+}
+
+uint64_t zxdh_pf_get_features(struct dh_core_dev *dh_dev)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+ uint64_t device_feature = 0;
+
+ iowrite32(0, &pf_dev->common->device_feature_select);
+ device_feature = ioread32(&pf_dev->common->device_feature);
+ iowrite32(1, &pf_dev->common->device_feature_select);
+ device_feature |= ((uint64_t)ioread32(&pf_dev->common->device_feature) << 32);
+
+ return device_feature;
+}
+
+void zxdh_pf_set_features(struct dh_core_dev *dh_dev, uint64_t features)
+{
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev);
+
+ iowrite32(0, &pf_dev->common->guest_feature_select);
+ iowrite32((uint32_t)features, &pf_dev->common->guest_feature);
+ iowrite32(1, &pf_dev->common->guest_feature_select);
+ iowrite32(features >> 32, &pf_dev->common->guest_feature);
+}
+
static int dh_pf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct dh_core_dev *dh_dev = NULL;
diff --git a/drivers/net/ethernet/zte/dinghai/en_pf.h b/drivers/net/ethernet/zte/dinghai/en_pf.h
index 0d3880b0aede..197b21788576 100644
--- a/drivers/net/ethernet/zte/dinghai/en_pf.h
+++ b/drivers/net/ethernet/zte/dinghai/en_pf.h
@@ -10,11 +10,31 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/mutex.h>
+#include "dh_log.h"
+#include "dh_queue.h"
#define ZXDH_PF_VENDOR_ID 0x1cf2
#define ZXDH_PF_DEVICE_ID 0x8040
#define ZXDH_VF_DEVICE_ID 0x8041
+/* Common configuration */
+#define ZXDH_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define ZXDH_PCI_CAP_NOTIFY_CFG 2
+/* ISR access */
+#define ZXDH_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define ZXDH_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define ZXDH_PCI_CAP_PCI_CFG 5
+
+#define ZXDH_PF_MAX_BAR_VAL 0x5
+#define ZXDH_PF_ALIGN4 4
+#define ZXDH_PF_ALIGN2 2
+#define ZXDH_PF_MAP_MINLEN2 2
+
+#define ZXDH_DEV_MAC_HIGH_OFFSET 4
+
enum dh_coredev_type {
DH_COREDEV_PF,
DH_COREDEV_VF,
@@ -34,6 +54,27 @@ struct dh_core_dev {
};
struct zxdh_pf_device {
+ struct zxdh_pf_pci_common_cfg __iomem *common;
+ /* Device-specific data (non-legacy mode) */
+ /* Base of vq notifications (non-legacy mode). */
+ void __iomem *device;
+ void __iomem *notify_base;
+ void __iomem *pf_sriov_cap_base;
+ /* Physical base of vq notifications */
+ resource_size_t notify_pa;
+ /* So we can sanity-check accesses. */
+ size_t notify_len;
+ size_t device_len;
+ /* Capability for when we need to map notifications per-vq. */
+ int32_t notify_map_cap;
+ uint32_t notify_offset_multiplier;
+ /* Multiply queue_notify_off by this value. (non-legacy mode). */
+ int32_t modern_bars;
+
+ uint64_t pci_ioremap_addr[6];
+ uint64_t sriov_bar_size;
+ uint32_t dev_cfg_bar_off;
+ bool packed_status;
bool bar_chan_valid;
bool vepa;
struct mutex irq_lock; /* Protects IRQ operations */
--
2.43.0
[-- Attachment #1.1.2: Type: text/html , Size: 45357 bytes --]
next prev parent reply other threads:[~2026-04-15 2:23 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-15 1:53 [PATCH net-next 1/3] net/ethernet: add ZTE network driver support Junyang Han
2026-04-15 1:53 ` [PATCH net-next 2/3] net/ethernet/zte/dinghai: add logging infrastructure Junyang Han
2026-04-15 14:19 ` Andrew Lunn
2026-04-15 1:53 ` Junyang Han [this message]
2026-04-15 14:31 ` [PATCH net-next 3/3] net/ethernet/zte/dinghai: add hardware register access and PCI capability scanning Andrew Lunn
2026-04-15 14:10 ` [PATCH net-next 1/3] net/ethernet: add ZTE network driver support Andrew Lunn
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260415015334.2018453-3-han.junyang@zte.com.cn \
--to=han.junyang@zte.com.cn \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=han.chengfei@zte.com.cn \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=ran.ming@zte.com.cn \
--cc=zhang.yanze@zte.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox