From: Stefan Bader <stefan.bader@canonical.com>
To: Jan Beulich <JBeulich@suse.com>
Cc: xiantao.zhang@intel.com, suravee.suthikulpanit@amd.com,
	xen-devel@lists.xen.org
Subject: Re: [PATCH v5] AMD IOMMU: fix Dom0 device setup failure for host bridges
Date: Thu, 12 Sep 2013 11:42:55 +0200	[thread overview]
Message-ID: <52318C9F.6080202@canonical.com> (raw)
In-Reply-To: <523194B302000078000F2A12@nat28.tlf.novell.com>
[-- Attachment #1.1: Type: text/plain, Size: 5974 bytes --]
On 12.09.2013 10:17, Jan Beulich wrote:
> The host bridge device (i.e. 0x18 for AMD) does not require IOMMU, and
> therefore is not included in the IVRS. The current logic tries to map
> all PCI devices to an IOMMU. In this case, "xl dmesg" shows the
> following message on AMD sytem.
> 
> (XEN) setup 0000:00:18.0 for d0 failed (-19)
> (XEN) setup 0000:00:18.1 for d0 failed (-19)
> (XEN) setup 0000:00:18.2 for d0 failed (-19)
> (XEN) setup 0000:00:18.3 for d0 failed (-19)
> (XEN) setup 0000:00:18.4 for d0 failed (-19)
> (XEN) setup 0000:00:18.5 for d0 failed (-19)
> 
> This patch adds a new device type (i.e. DEV_TYPE_PCI_HOST_BRIDGE) which
> corresponds to PCI class code 0x06 and sub-class 0x00. Then, it uses
> this new type to filter when trying to map device to IOMMU.
> 
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> Reported-by: Stefan Bader <stefan.bader@canonical.com>
> 
> On VT-d refuse (un)mapping host bridges for other than the hardware
> domain.
> 
> Coding style cleanup.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
As this looks to be about final.
Tested-by: Stefan Bader <stefan.bader@canonical.com>
> 
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -147,9 +147,10 @@ static void amd_iommu_setup_domain_devic
>  
>          amd_iommu_flush_device(iommu, req_id);
>  
> -        AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, "
> +        AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, type = %#x, "
>                          "root table = %#"PRIx64", "
> -                        "domain = %d, paging mode = %d\n", req_id,
> +                        "domain = %d, paging mode = %d\n",
> +                        req_id, pdev->type,
>                          page_to_maddr(hd->root_table),
>                          hd->domain_id, hd->paging_mode);
>      }
> @@ -175,6 +176,15 @@ static int __init amd_iommu_setup_dom0_d
>  
>      if ( unlikely(!iommu) )
>      {
> +        /* Filter the bridge devices */
> +        if ( pdev->type == DEV_TYPE_PCI_HOST_BRIDGE )
> +        {
> +            AMD_IOMMU_DEBUG("Skipping host bridge %04x:%02x:%02x.%u\n",
> +                            pdev->seg, PCI_BUS(bdf), PCI_SLOT(bdf),
> +                            PCI_FUNC(bdf));
> +            return 0;
> +        }
> +
>          AMD_IOMMU_DEBUG("No iommu for device %04x:%02x:%02x.%u\n",
>                          pdev->seg, pdev->bus,
>                          PCI_SLOT(devfn), PCI_FUNC(devfn));
> --- a/xen/drivers/passthrough/pci.c
> +++ b/xen/drivers/passthrough/pci.c
> @@ -194,9 +194,6 @@ static struct pci_dev *alloc_pdev(struct
>          u16 cap;
>          u8 sec_bus, sub_bus;
>  
> -        case DEV_TYPE_PCIe_BRIDGE:
> -            break;
> -
>          case DEV_TYPE_PCIe2PCI_BRIDGE:
>          case DEV_TYPE_LEGACY_PCI_BRIDGE:
>              sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),
> @@ -244,6 +241,8 @@ static struct pci_dev *alloc_pdev(struct
>              break;
>  
>          case DEV_TYPE_PCI:
> +        case DEV_TYPE_PCIe_BRIDGE:
> +        case DEV_TYPE_PCI_HOST_BRIDGE:
>              break;
>  
>          default:
> @@ -697,6 +696,7 @@ void pci_release_devices(struct domain *
>      spin_unlock(&pcidevs_lock);
>  }
>  
> +#define PCI_CLASS_BRIDGE_HOST    0x0600
>  #define PCI_CLASS_BRIDGE_PCI     0x0604
>  
>  enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn)
> @@ -720,6 +720,8 @@ enum pdev_type pdev_type(u16 seg, u8 bus
>              return DEV_TYPE_PCI2PCIe_BRIDGE;
>          }
>          return DEV_TYPE_PCIe_BRIDGE;
> +    case PCI_CLASS_BRIDGE_HOST:
> +        return DEV_TYPE_PCI_HOST_BRIDGE;
>  
>      case 0x0000: case 0xffff:
>          return DEV_TYPE_PCI_UNKNOWN;
> --- a/xen/drivers/passthrough/vtd/intremap.c
> +++ b/xen/drivers/passthrough/vtd/intremap.c
> @@ -442,6 +442,7 @@ static void set_msi_source_id(struct pci
>      case DEV_TYPE_PCIe_ENDPOINT:
>      case DEV_TYPE_PCIe_BRIDGE:
>      case DEV_TYPE_PCIe2PCI_BRIDGE:
> +    case DEV_TYPE_PCI_HOST_BRIDGE:
>          switch ( pdev->phantom_stride )
>          {
>          case 1: sq = SQ_13_IGNORE_3; break;
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -1433,6 +1433,15 @@ static int domain_context_mapping(
>  
>      switch ( pdev->type )
>      {
> +    case DEV_TYPE_PCI_HOST_BRIDGE:
> +        if ( iommu_verbose )
> +            dprintk(VTDPREFIX, "d%d:Hostbridge: skip %04x:%02x:%02x.%u map\n",
> +                    domain->domain_id, seg, bus,
> +                    PCI_SLOT(devfn), PCI_FUNC(devfn));
> +        if ( !is_hardware_domain(domain) )
> +            return -EPERM;
> +        break;
> +
>      case DEV_TYPE_PCIe_BRIDGE:
>      case DEV_TYPE_PCIe2PCI_BRIDGE:
>      case DEV_TYPE_LEGACY_PCI_BRIDGE:
> @@ -1563,6 +1572,15 @@ static int domain_context_unmap(
>  
>      switch ( pdev->type )
>      {
> +    case DEV_TYPE_PCI_HOST_BRIDGE:
> +        if ( iommu_verbose )
> +            dprintk(VTDPREFIX, "d%d:Hostbridge: skip %04x:%02x:%02x.%u unmap\n",
> +                    domain->domain_id, seg, bus,
> +                    PCI_SLOT(devfn), PCI_FUNC(devfn));
> +        if ( !is_hardware_domain(domain) )
> +            return -EPERM;
> +        goto out;
> +
>      case DEV_TYPE_PCIe_BRIDGE:
>      case DEV_TYPE_PCIe2PCI_BRIDGE:
>      case DEV_TYPE_LEGACY_PCI_BRIDGE:
> --- a/xen/include/xen/pci.h
> +++ b/xen/include/xen/pci.h
> @@ -63,6 +63,7 @@ struct pci_dev {
>          DEV_TYPE_PCIe2PCI_BRIDGE,   // PCIe-to-PCI/PCIx bridge
>          DEV_TYPE_PCI2PCIe_BRIDGE,   // PCI/PCIx-to-PCIe bridge
>          DEV_TYPE_LEGACY_PCI_BRIDGE, // Legacy PCI bridge
> +        DEV_TYPE_PCI_HOST_BRIDGE,   // PCI Host bridge
>          DEV_TYPE_PCI,
>      } type;
>  
> 
> 
[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 899 bytes --]
[-- Attachment #2: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply	other threads:[~2013-09-12  9:42 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-09-11 19:39 [PATCH 1/1 V4] x86/AMD: Fix setup ssss:bb:dd:f for d0 failed suravee.suthikulpanit
2013-09-12  8:17 ` [PATCH v5] AMD IOMMU: fix Dom0 device setup failure for host bridges Jan Beulich
2013-09-12  9:42   ` Stefan Bader [this message]
2013-09-18 12:51   ` Ping: " Jan Beulich
2013-09-18 22:27     ` Auld, Will
2013-09-26  8:09       ` Jan Beulich
2013-09-27  7:10         ` Zhang, Xiantao
2013-09-27  7:19           ` Jan Beulich
2013-09-27  7:28             ` Zhang, Xiantao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox
  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):
  git send-email \
    --in-reply-to=52318C9F.6080202@canonical.com \
    --to=stefan.bader@canonical.com \
    --cc=JBeulich@suse.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xen.org \
    --cc=xiantao.zhang@intel.com \
    /path/to/YOUR_REPLY
  https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
  Be sure your reply has a Subject: header at the top and a blank line
  before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).