From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
Marcel Apfelbaum <marcel.apfelbaum@gmail.com>,
Akihiko Odaki <akihiko.odaki@daynix.com>,
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com>,
Jason Wang <jasowang@redhat.com>, Keith Busch <kbusch@kernel.org>,
Klaus Jensen <its@irrelevant.dk>,
Jesper Devantier <foss@defmacro.it>,
qemu-block@nongnu.org
Subject: [PULL 13/19] Revert "pcie_sriov: Ensure VF function number does not overflow"
Date: Thu, 1 Aug 2024 06:36:42 -0400 [thread overview]
Message-ID: <19c45c00dc6a52f80f27dabbd28de1b770c16a89.1722508478.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1722508478.git.mst@redhat.com>
This reverts commit 77718701157f6ca77ea7a57b536fa0a22f676082.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
docs/pcie_sriov.txt | 8 +++-----
include/hw/pci/pcie_sriov.h | 5 ++---
hw/net/igb.c | 13 +++----------
hw/nvme/ctrl.c | 24 ++++++++----------------
hw/pci/pcie_sriov.c | 19 ++-----------------
5 files changed, 18 insertions(+), 51 deletions(-)
diff --git a/docs/pcie_sriov.txt b/docs/pcie_sriov.txt
index ab2142807f..a47aad0bfa 100644
--- a/docs/pcie_sriov.txt
+++ b/docs/pcie_sriov.txt
@@ -52,11 +52,9 @@ setting up a BAR for a VF.
...
/* Add and initialize the SR/IOV capability */
- if (!pcie_sriov_pf_init(d, 0x200, "your_virtual_dev",
- vf_devid, initial_vfs, total_vfs,
- fun_offset, stride, errp)) {
- return;
- }
+ pcie_sriov_pf_init(d, 0x200, "your_virtual_dev",
+ vf_devid, initial_vfs, total_vfs,
+ fun_offset, stride);
/* Set up individual VF BARs (parameters as for normal BARs) */
pcie_sriov_pf_init_vf_bar( ... )
diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h
index aa704e8f9d..450cbef6c2 100644
--- a/include/hw/pci/pcie_sriov.h
+++ b/include/hw/pci/pcie_sriov.h
@@ -27,11 +27,10 @@ typedef struct PCIESriovVF {
uint16_t vf_number; /* Logical VF number of this function */
} PCIESriovVF;
-bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
+void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
const char *vfname, uint16_t vf_dev_id,
uint16_t init_vfs, uint16_t total_vfs,
- uint16_t vf_offset, uint16_t vf_stride,
- Error **errp);
+ uint16_t vf_offset, uint16_t vf_stride);
void pcie_sriov_pf_exit(PCIDevice *dev);
/* Set up a VF bar in the SR/IOV bar area */
diff --git a/hw/net/igb.c b/hw/net/igb.c
index b6ca2f1b8a..b92bba402e 100644
--- a/hw/net/igb.c
+++ b/hw/net/igb.c
@@ -446,16 +446,9 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
pcie_ari_init(pci_dev, 0x150);
- if (!pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET,
- TYPE_IGBVF, IGB_82576_VF_DEV_ID,
- IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
- IGB_VF_OFFSET, IGB_VF_STRIDE,
- errp)) {
- pcie_cap_exit(pci_dev);
- igb_cleanup_msix(s);
- msi_uninit(pci_dev);
- return;
- }
+ pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF,
+ IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
+ IGB_VF_OFFSET, IGB_VF_STRIDE);
pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MMIO_BAR_IDX,
PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH,
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index e86ea2e7ce..c6d4f61a47 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -8271,8 +8271,7 @@ out:
return pow2ceil(bar_size);
}
-static bool nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset,
- Error **errp)
+static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset)
{
uint16_t vf_dev_id = n->params.use_intel_id ?
PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME;
@@ -8281,17 +8280,12 @@ static bool nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset,
le16_to_cpu(cap->vifrsm),
NULL, NULL);
- if (!pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id,
- n->params.sriov_max_vfs, n->params.sriov_max_vfs,
- NVME_VF_OFFSET, NVME_VF_STRIDE,
- errp)) {
- return false;
- }
+ pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id,
+ n->params.sriov_max_vfs, n->params.sriov_max_vfs,
+ NVME_VF_OFFSET, NVME_VF_STRIDE);
pcie_sriov_pf_init_vf_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64, bar_size);
-
- return true;
}
static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset)
@@ -8416,12 +8410,6 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
return false;
}
- if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs &&
- !nvme_init_sriov(n, pci_dev, 0x120, errp)) {
- msix_uninit(pci_dev, &n->bar0, &n->bar0);
- return false;
- }
-
nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
pcie_cap_deverr_init(pci_dev);
@@ -8451,6 +8439,10 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
nvme_init_pmr(n, pci_dev);
}
+ if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) {
+ nvme_init_sriov(n, pci_dev, 0x120);
+ }
+
return true;
}
diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c
index f0bde0d3fc..499becd527 100644
--- a/hw/pci/pcie_sriov.c
+++ b/hw/pci/pcie_sriov.c
@@ -24,27 +24,14 @@ static PCIDevice *register_vf(PCIDevice *pf, int devfn,
const char *name, uint16_t vf_num);
static void unregister_vfs(PCIDevice *dev);
-bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
+void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
const char *vfname, uint16_t vf_dev_id,
uint16_t init_vfs, uint16_t total_vfs,
- uint16_t vf_offset, uint16_t vf_stride,
- Error **errp)
+ uint16_t vf_offset, uint16_t vf_stride)
{
uint8_t *cfg = dev->config + offset;
uint8_t *wmask;
- if (total_vfs) {
- uint16_t ari_cap = pcie_find_capability(dev, PCI_EXT_CAP_ID_ARI);
- uint16_t first_vf_devfn = dev->devfn + vf_offset;
- uint16_t last_vf_devfn = first_vf_devfn + vf_stride * (total_vfs - 1);
-
- if ((!ari_cap && PCI_SLOT(dev->devfn) != PCI_SLOT(last_vf_devfn)) ||
- last_vf_devfn >= PCI_DEVFN_MAX) {
- error_setg(errp, "VF function number overflows");
- return false;
- }
- }
-
pcie_add_capability(dev, PCI_EXT_CAP_ID_SRIOV, 1,
offset, PCI_EXT_CAP_SRIOV_SIZEOF);
dev->exp.sriov_cap = offset;
@@ -82,8 +69,6 @@ bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
pci_set_word(wmask + PCI_SRIOV_SYS_PGSIZE, 0x553);
qdev_prop_set_bit(&dev->qdev, "multifunction", true);
-
- return true;
}
void pcie_sriov_pf_exit(PCIDevice *dev)
--
MST
next prev parent reply other threads:[~2024-08-01 10:37 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-01 10:35 [PULL 00/19] virtio,pci,pc: fixes Michael S. Tsirkin
2024-08-01 10:35 ` [PULL 01/19] virtio-rng: block max-bytes=0 Michael S. Tsirkin
2024-08-01 10:35 ` [PULL 02/19] Revert "docs: Document composable SR-IOV device" Michael S. Tsirkin
2024-08-01 10:35 ` [PULL 03/19] Revert "virtio-net: Implement SR-IOV VF" Michael S. Tsirkin
2024-08-01 10:35 ` [PULL 04/19] Revert "virtio-pci: Implement SR-IOV PF" Michael S. Tsirkin
2024-08-01 10:35 ` [PULL 05/19] Revert "pcie_sriov: Allow user to create SR-IOV device" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 06/19] Revert "pcie_sriov: Check PCI Express for SR-IOV PF" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 07/19] Revert "pcie_sriov: Ensure PF and VF are mutually exclusive" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 08/19] Revert "hw/pci: Fix SR-IOV VF number calculation" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 09/19] Revert "pcie_sriov: Register VFs after migration" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 10/19] Revert "pcie_sriov: Remove num_vfs from PCIESriovPF" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 11/19] Revert "pcie_sriov: Release VFs failed to realize" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 12/19] Revert "pcie_sriov: Reuse SR-IOV VF device instances" Michael S. Tsirkin
2024-08-01 10:36 ` Michael S. Tsirkin [this message]
2024-08-01 10:36 ` [PULL 14/19] Revert "pcie_sriov: Do not manually unrealize" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 15/19] Revert "hw/ppc/spapr_pci: Do not reject VFs created after a PF" Michael S. Tsirkin
2024-08-01 10:36 ` [PULL 16/19] Revert "hw/ppc/spapr_pci: Do not create DT for disabled PCI device" Michael S. Tsirkin
2024-08-01 10:37 ` [PULL 17/19] Revert "hw/pci: Rename has_power to enabled" Michael S. Tsirkin
2024-08-01 10:37 ` [PULL 18/19] hw/i386/amd_iommu: Don't leak memory in amdvi_update_iotlb() Michael S. Tsirkin
2024-08-01 10:37 ` [PULL 19/19] intel_iommu: Fix for IQA reg read dropped DW field Michael S. Tsirkin
2024-08-01 22:18 ` [PULL 00/19] virtio,pci,pc: fixes Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=19c45c00dc6a52f80f27dabbd28de1b770c16a89.1722508478.git.mst@redhat.com \
--to=mst@redhat.com \
--cc=akihiko.odaki@daynix.com \
--cc=foss@defmacro.it \
--cc=its@irrelevant.dk \
--cc=jasowang@redhat.com \
--cc=kbusch@kernel.org \
--cc=marcel.apfelbaum@gmail.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=sriram.yagnaraman@ericsson.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).