qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Cornelia Huck <cohuck@redhat.com>
To: Alex Williamson <alex.williamson@redhat.com>
Cc: qemu-s390x@nongnu.org, Thomas Huth <thuth@redhat.com>,
	qemu-devel@nongnu.org, Pierre Morel <pmorel@linux.ibm.com>,
	Matthew Rosato <mjrosato@linux.ibm.com>
Subject: Re: [PULL v3 26/32] s390x/pci: use a PCI Group structure
Date: Tue, 17 Nov 2020 12:43:49 +0100	[thread overview]
Message-ID: <20201117124349.2e1e2fe3.cohuck@redhat.com> (raw)
In-Reply-To: <160426456672.24886.4745091679423774723.stgit@gimli.home>

On Sun, 01 Nov 2020 14:02:46 -0700
Alex Williamson <alex.williamson@redhat.com> wrote:

> From: Pierre Morel <pmorel@linux.ibm.com>
> 
> We use a S390PCIGroup structure to hold the information related to a
> zPCI Function group.
> 
> This allows us to be ready to support multiple groups and to retrieve
> the group information from the host.
> 
> Signed-off-by: Pierre Morel <pmorel@linux.ibm.com>
> Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com>
> Reviewed-by: Cornelia Huck <cohuck@redhat.com>
> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
> ---
>  hw/s390x/s390-pci-bus.c         |   42 +++++++++++++++++++++++++++++++++++++++
>  hw/s390x/s390-pci-inst.c        |   23 +++++++++++++--------
>  include/hw/s390x/s390-pci-bus.h |   10 +++++++++
>  3 files changed, 66 insertions(+), 9 deletions(-)

I just bisected a regression down to this commit.

s390x tcg guest on x86, virtio-pci devices are not detected. The
relevant feature bits are visible to the guest. Same breakage with
different guest kernels.

KVM guests and s390x tcg guests on s390x are fine, so I assume an
endianness issue somewhere. Nothing jumps out to me, though.

> 
> diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
> index 218717397ae1..4c7f06d5cf95 100644
> --- a/hw/s390x/s390-pci-bus.c
> +++ b/hw/s390x/s390-pci-bus.c
> @@ -738,6 +738,46 @@ static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
>      object_unref(OBJECT(iommu));
>  }
>  
> +static S390PCIGroup *s390_group_create(int id)
> +{
> +    S390PCIGroup *group;
> +    S390pciState *s = s390_get_phb();
> +
> +    group = g_new0(S390PCIGroup, 1);
> +    group->id = id;
> +    QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link);
> +    return group;
> +}
> +
> +S390PCIGroup *s390_group_find(int id)
> +{
> +    S390PCIGroup *group;
> +    S390pciState *s = s390_get_phb();
> +
> +    QTAILQ_FOREACH(group, &s->zpci_groups, link) {
> +        if (group->id == id) {
> +            return group;
> +        }
> +    }
> +    return NULL;
> +}
> +
> +static void s390_pci_init_default_group(void)
> +{
> +    S390PCIGroup *group;
> +    ClpRspQueryPciGrp *resgrp;
> +
> +    group = s390_group_create(ZPCI_DEFAULT_FN_GRP);
> +    resgrp = &group->zpci_group;
> +    resgrp->fr = 1;
> +    stq_p(&resgrp->dasm, 0);
> +    stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
> +    stw_p(&resgrp->mui, DEFAULT_MUI);
> +    stw_p(&resgrp->i, 128);
> +    stw_p(&resgrp->maxstbl, 128);
> +    resgrp->version = 0;
> +}
> +
>  static void s390_pcihost_realize(DeviceState *dev, Error **errp)
>  {
>      PCIBus *b;
> @@ -766,7 +806,9 @@ static void s390_pcihost_realize(DeviceState *dev, Error **errp)
>      QTAILQ_INIT(&s->pending_sei);
>      QTAILQ_INIT(&s->zpci_devs);
>      QTAILQ_INIT(&s->zpci_dma_limit);
> +    QTAILQ_INIT(&s->zpci_groups);
>  
> +    s390_pci_init_default_group();
>      css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false,
>                               S390_ADAPTER_SUPPRESSIBLE, errp);
>  }
> diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
> index 4eadd9e79416..c25b2a67efe0 100644
> --- a/hw/s390x/s390-pci-inst.c
> +++ b/hw/s390x/s390-pci-inst.c
> @@ -298,21 +298,25 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
>          stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
>          stl_p(&resquery->fid, pbdev->fid);
>          stw_p(&resquery->pchid, 0);
> -        stw_p(&resquery->ug, 1);
> +        stw_p(&resquery->ug, ZPCI_DEFAULT_FN_GRP);
>          stl_p(&resquery->uid, pbdev->uid);
>          stw_p(&resquery->hdr.rsp, CLP_RC_OK);
>          break;
>      }
>      case CLP_QUERY_PCI_FNGRP: {
>          ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
> -        resgrp->fr = 1;
> -        stq_p(&resgrp->dasm, 0);
> -        stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
> -        stw_p(&resgrp->mui, DEFAULT_MUI);
> -        stw_p(&resgrp->i, 128);
> -        stw_p(&resgrp->maxstbl, 128);
> -        resgrp->version = 0;
>  
> +        ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh;
> +        S390PCIGroup *group;
> +
> +        group = s390_group_find(reqgrp->g);
> +        if (!group) {
> +            /* We do not allow access to unknown groups */
> +            /* The group must have been obtained with a vfio device */
> +            stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
> +            goto out;
> +        }
> +        memcpy(resgrp, &group->zpci_group, sizeof(ClpRspQueryPciGrp));
>          stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
>          break;
>      }
> @@ -787,7 +791,8 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
>      }
>      /* Length must be greater than 8, a multiple of 8 */
>      /* and not greater than maxstbl */
> -    if ((len <= 8) || (len % 8) || (len > pbdev->maxstbl)) {
> +    if ((len <= 8) || (len % 8) ||
> +        (len > pbdev->pci_group->zpci_group.maxstbl)) {
>          goto specification_error;
>      }
>      /* Do not cross a 4K-byte boundary */
> diff --git a/include/hw/s390x/s390-pci-bus.h b/include/hw/s390x/s390-pci-bus.h
> index 5f339e57fb68..869c0f254b7f 100644
> --- a/include/hw/s390x/s390-pci-bus.h
> +++ b/include/hw/s390x/s390-pci-bus.h
> @@ -316,6 +316,14 @@ typedef struct ZpciFmb {
>  } ZpciFmb;
>  QEMU_BUILD_BUG_MSG(offsetof(ZpciFmb, fmt0) != 48, "padding in ZpciFmb");
>  
> +#define ZPCI_DEFAULT_FN_GRP 0x20
> +typedef struct S390PCIGroup {
> +    ClpRspQueryPciGrp zpci_group;
> +    int id;
> +    QTAILQ_ENTRY(S390PCIGroup) link;
> +} S390PCIGroup;
> +S390PCIGroup *s390_group_find(int id);
> +
>  struct S390PCIBusDevice {
>      DeviceState qdev;
>      PCIDevice *pdev;
> @@ -333,6 +341,7 @@ struct S390PCIBusDevice {
>      uint16_t noi;
>      uint16_t maxstbl;
>      uint8_t sum;
> +    S390PCIGroup *pci_group;
>      S390MsixInfo msix;
>      AdapterRoutes routes;
>      S390PCIIOMMU *iommu;
> @@ -358,6 +367,7 @@ struct S390pciState {
>      QTAILQ_HEAD(, SeiContainer) pending_sei;
>      QTAILQ_HEAD(, S390PCIBusDevice) zpci_devs;
>      QTAILQ_HEAD(, S390PCIDMACount) zpci_dma_limit;
> +    QTAILQ_HEAD(, S390PCIGroup) zpci_groups;
>  };
>  
>  S390pciState *s390_get_phb(void);
> 



  reply	other threads:[~2020-11-17 11:45 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-01 20:58 [PULL v3 00/32] VFIO updates 2020-11-01 (for QEMU 5.2 soft-freeze) Alex Williamson
2020-11-01 20:59 ` [PULL v3 01/32] vfio: Add function to unmap VFIO region Alex Williamson
2020-11-01 20:59 ` [PULL v3 02/32] vfio: Add vfio_get_object callback to VFIODeviceOps Alex Williamson
2020-11-01 20:59 ` [PULL v3 03/32] vfio: Add save and load functions for VFIO PCI devices Alex Williamson
2020-11-01 20:59 ` [PULL v3 04/32] vfio: Add migration region initialization and finalize function Alex Williamson
2020-11-05 23:55   ` Peter Maydell
2020-11-06  0:32     ` Alex Williamson
2020-11-01 20:59 ` [PULL v3 05/32] vfio: Add VM state change handler to know state of VM Alex Williamson
2020-11-01 20:59 ` [PULL v3 06/32] vfio: Add migration state change notifier Alex Williamson
2020-11-01 21:00 ` [PULL v3 07/32] vfio: Register SaveVMHandlers for VFIO device Alex Williamson
2020-11-01 21:00 ` [PULL v3 08/32] vfio: Add save state functions to SaveVMHandlers Alex Williamson
2020-11-01 21:00 ` [PULL v3 09/32] vfio: Add load " Alex Williamson
2020-11-01 21:00 ` [PULL v3 10/32] memory: Set DIRTY_MEMORY_MIGRATION when IOMMU is enabled Alex Williamson
2020-11-01 21:00 ` [PULL v3 11/32] vfio: Get migration capability flags for container Alex Williamson
2020-12-15  8:17   ` zhukeqian
2020-11-01 21:00 ` [PULL v3 12/32] vfio: Add function to start and stop dirty pages tracking Alex Williamson
2020-11-01 21:01 ` [PULL v3 13/32] vfio: Add vfio_listener_log_sync to mark dirty pages Alex Williamson
2020-11-01 21:01 ` [PULL v3 14/32] vfio: Dirty page tracking when vIOMMU is enabled Alex Williamson
2020-11-01 21:01 ` [PULL v3 15/32] vfio: Add ioctl to get dirty pages bitmap during dma unmap Alex Williamson
2020-12-15  8:06   ` zhukeqian
2020-11-01 21:01 ` [PULL v3 16/32] vfio: Make vfio-pci device migration capable Alex Williamson
2020-11-01 21:01 ` [PULL v3 17/32] qapi: Add VFIO devices migration stats in Migration stats Alex Williamson
2020-11-01 21:01 ` [PULL v3 18/32] update-linux-headers: Add vfio_zdev.h Alex Williamson
2020-11-01 21:01 ` [PULL v3 19/32] linux-headers: update against 5.10-rc1 Alex Williamson
2020-11-01 21:01 ` [PULL v3 20/32] s390x/pci: Move header files to include/hw/s390x Alex Williamson
2020-11-01 21:02 ` [PULL v3 21/32] vfio: Create shared routine for scanning info capabilities Alex Williamson
2020-11-01 21:02 ` [PULL v3 22/32] vfio: Find DMA available capability Alex Williamson
2020-11-01 21:02 ` [PULL v3 23/32] s390x/pci: Add routine to get the vfio dma available count Alex Williamson
2020-11-03  5:49   ` Philippe Mathieu-Daudé
2020-11-03 10:45     ` Cornelia Huck
2020-11-03 10:57       ` Philippe Mathieu-Daudé
2020-11-03 11:08       ` Philippe Mathieu-Daudé
2020-11-03 11:15         ` Cornelia Huck
2020-11-01 21:02 ` [PULL v3 24/32] s390x/pci: Honor DMA limits set by vfio Alex Williamson
2020-11-01 21:02 ` [PULL v3 25/32] s390x/pci: create a header dedicated to PCI CLP Alex Williamson
2020-11-01 21:02 ` [PULL v3 26/32] s390x/pci: use a PCI Group structure Alex Williamson
2020-11-17 11:43   ` Cornelia Huck [this message]
2020-11-17 11:55     ` Philippe Mathieu-Daudé
2020-11-17 12:06       ` Cornelia Huck
2020-11-17 12:09         ` Philippe Mathieu-Daudé
2020-11-01 21:02 ` [PULL v3 27/32] s390x/pci: clean up s390 PCI groups Alex Williamson
2020-11-01 21:03 ` [PULL v3 28/32] s390x/pci: use a PCI Function structure Alex Williamson
2020-11-01 21:03 ` [PULL v3 29/32] vfio: Add routine for finding VFIO_DEVICE_GET_INFO capabilities Alex Williamson
2020-11-01 21:03 ` [PULL v3 30/32] s390x/pci: get zPCI function info from host Alex Williamson
2020-11-01 21:03 ` [PULL v3 31/32] hw/vfio: Use lock guard macros Alex Williamson
2020-11-01 21:03 ` [PULL v3 32/32] vfio: fix incorrect print type Alex Williamson
2020-11-02 10:57 ` [PULL v3 00/32] VFIO updates 2020-11-01 (for QEMU 5.2 soft-freeze) Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201117124349.2e1e2fe3.cohuck@redhat.com \
    --to=cohuck@redhat.com \
    --cc=alex.williamson@redhat.com \
    --cc=mjrosato@linux.ibm.com \
    --cc=pmorel@linux.ibm.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-s390x@nongnu.org \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).