* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-06 16:03 ` [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine Alexander Graf
@ 2015-01-07 15:52 ` Claudio Fontana
2015-01-07 21:47 ` Alexander Graf
2015-01-08 10:31 ` Peter Maydell
2015-01-12 16:20 ` Claudio Fontana
2015-01-12 16:49 ` alvise rigo
2 siblings, 2 replies; 44+ messages in thread
From: Claudio Fontana @ 2015-01-07 15:52 UTC (permalink / raw)
To: Alexander Graf, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
On 06.01.2015 17:03, Alexander Graf wrote:
> Now that we have a working "generic" PCIe host bridge driver, we can plug
> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>
> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
> into an AArch64 VM with this and they all lived happily ever after.
>
> Signed-off-by: Alexander Graf <agraf@suse.de>
>
> ---
>
> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
> systems. If you want to use it with AArch64 guests, please apply the following
> patch or wait until upstream cleaned up the code properly:
>
> http://csgraf.de/agraf/pci/pci-3.19.patch
> ---
> default-configs/arm-softmmu.mak | 2 +
> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
> 2 files changed, 80 insertions(+), 5 deletions(-)
>
> diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
> index f3513fa..7671ee2 100644
> --- a/default-configs/arm-softmmu.mak
> +++ b/default-configs/arm-softmmu.mak
> @@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
> CONFIG_VERSATILE_PCI=y
> CONFIG_VERSATILE_I2C=y
>
> +CONFIG_PCI_GENERIC=y
> +
> CONFIG_SDHCI=y
> CONFIG_INTEGRATOR_DEBUG=y
>
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index 2353440..b7635ac 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -42,6 +42,7 @@
> #include "exec/address-spaces.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "hw/pci-host/gpex.h"
>
> #define NUM_VIRTIO_TRANSPORTS 32
>
> @@ -69,6 +70,7 @@ enum {
> VIRT_MMIO,
> VIRT_RTC,
> VIRT_FW_CFG,
> + VIRT_PCIE,
> };
>
> typedef struct MemMapEntry {
> @@ -129,13 +131,14 @@ static const MemMapEntry a15memmap[] = {
> [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
> [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
> /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
> - /* 0x10000000 .. 0x40000000 reserved for PCI */
> + [VIRT_PCIE] = { 0x10000000, 0x30000000 },
> [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
> };
>
> static const int a15irqmap[] = {
> [VIRT_UART] = 1,
> [VIRT_RTC] = 2,
> + [VIRT_PCIE] = 3,
> [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
> };
>
> @@ -312,7 +315,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
> }
> }
>
> -static void fdt_add_gic_node(const VirtBoardInfo *vbi)
> +static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
> {
> uint32_t gic_phandle;
>
> @@ -331,9 +334,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
> 2, vbi->memmap[VIRT_GIC_CPU].base,
> 2, vbi->memmap[VIRT_GIC_CPU].size);
> qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
> +
> + return gic_phandle;
> }
>
> -static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> +static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> {
> /* We create a standalone GIC v2 */
> DeviceState *gicdev;
> @@ -380,7 +385,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> pic[i] = qdev_get_gpio_in(gicdev, i);
> }
>
> - fdt_add_gic_node(vbi);
> + return fdt_add_gic_node(vbi);
> }
>
> static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
> @@ -556,6 +561,71 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
> g_free(nodename);
> }
>
> +static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
> + uint32_t gic_phandle)
> +{
> + hwaddr base = vbi->memmap[VIRT_PCIE].base;
> + hwaddr size = vbi->memmap[VIRT_PCIE].size;
> + hwaddr size_ioport = 64 * 1024;
> + hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN;
> + hwaddr size_mmio = size - size_ecam - size_ioport;
> + hwaddr base_mmio = base;
> + hwaddr base_ioport = base_mmio + size_mmio;
> + hwaddr base_ecam = base_ioport + size_ioport;
> + int irq = vbi->irqmap[VIRT_PCIE];
> + MemoryRegion *mmio_alias;
> + MemoryRegion *mmio_reg;
> + DeviceState *dev;
> + char *nodename;
> +
> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
> +
> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
> + qdev_init_nofail(dev);
> +
> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
> +
> + /* Map the MMIO window at the same spot in bus and cpu layouts */
> + mmio_alias = g_new0(MemoryRegion, 1);
> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
> + mmio_reg, base_mmio, size_mmio);
> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
> +
> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
> +
> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
> + qemu_fdt_add_subnode(vbi->fdt, nodename);
> + qemu_fdt_setprop_string(vbi->fdt, nodename,
> + "compatible", "pci-host-ecam-generic");
is this the only compatible string we should set here? Is this not legacy pci compatible?
In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
would it be sensible to make it a list and include "pci" as well?
> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
> +
> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
> + 2, base_ecam, 2, size_ecam);
> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
> + 1, 0x01000000, 2, 0,
> + 2, base_ioport, 2, size_ioport,
> +
> + 1, 0x02000000, 2, base_mmio,
> + 2, base_mmio, 2, size_mmio);
> +
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
> + 0, 0, 0, /* device */
> + 0, /* PCI irq */
> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
> + 0, 0, 0, /* device */
> + 0 /* PCI irq */);
Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series. Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
Thanks,
Claudio
> +
> + g_free(nodename);
> +}
> +
> static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
> {
> const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
> @@ -573,6 +643,7 @@ static void machvirt_init(MachineState *machine)
> MemoryRegion *ram = g_new(MemoryRegion, 1);
> const char *cpu_model = machine->cpu_model;
> VirtBoardInfo *vbi;
> + uint32_t gic_phandle;
>
> if (!cpu_model) {
> cpu_model = "cortex-a15";
> @@ -634,12 +705,14 @@ static void machvirt_init(MachineState *machine)
>
> create_flash(vbi);
>
> - create_gic(vbi, pic);
> + gic_phandle = create_gic(vbi, pic);
>
> create_uart(vbi, pic);
>
> create_rtc(vbi, pic);
>
> + create_pcie(vbi, pic, gic_phandle);
> +
> /* Create mmio transports, so the user can create virtio backends
> * (which will be automatically plugged in to the transports). If
> * no backend is created the transport will just sit harmlessly idle.
>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-07 15:52 ` Claudio Fontana
@ 2015-01-07 21:47 ` Alexander Graf
2015-01-08 12:55 ` Claudio Fontana
2015-01-08 10:31 ` Peter Maydell
1 sibling, 1 reply; 44+ messages in thread
From: Alexander Graf @ 2015-01-07 21:47 UTC (permalink / raw)
To: Claudio Fontana, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
On 07.01.15 16:52, Claudio Fontana wrote:
> On 06.01.2015 17:03, Alexander Graf wrote:
>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>
>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>> into an AArch64 VM with this and they all lived happily ever after.
>>
>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>
>> ---
>>
>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>> systems. If you want to use it with AArch64 guests, please apply the following
>> patch or wait until upstream cleaned up the code properly:
>>
>> http://csgraf.de/agraf/pci/pci-3.19.patch
>> ---
>> default-configs/arm-softmmu.mak | 2 +
>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>
>> diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
>> index f3513fa..7671ee2 100644
>> --- a/default-configs/arm-softmmu.mak
>> +++ b/default-configs/arm-softmmu.mak
>> @@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
>> CONFIG_VERSATILE_PCI=y
>> CONFIG_VERSATILE_I2C=y
>>
>> +CONFIG_PCI_GENERIC=y
>> +
>> CONFIG_SDHCI=y
>> CONFIG_INTEGRATOR_DEBUG=y
>>
>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>> index 2353440..b7635ac 100644
>> --- a/hw/arm/virt.c
>> +++ b/hw/arm/virt.c
>> @@ -42,6 +42,7 @@
>> #include "exec/address-spaces.h"
>> #include "qemu/bitops.h"
>> #include "qemu/error-report.h"
>> +#include "hw/pci-host/gpex.h"
>>
>> #define NUM_VIRTIO_TRANSPORTS 32
>>
>> @@ -69,6 +70,7 @@ enum {
>> VIRT_MMIO,
>> VIRT_RTC,
>> VIRT_FW_CFG,
>> + VIRT_PCIE,
>> };
>>
>> typedef struct MemMapEntry {
>> @@ -129,13 +131,14 @@ static const MemMapEntry a15memmap[] = {
>> [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
>> [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
>> /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
>> - /* 0x10000000 .. 0x40000000 reserved for PCI */
>> + [VIRT_PCIE] = { 0x10000000, 0x30000000 },
>> [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
>> };
>>
>> static const int a15irqmap[] = {
>> [VIRT_UART] = 1,
>> [VIRT_RTC] = 2,
>> + [VIRT_PCIE] = 3,
>> [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
>> };
>>
>> @@ -312,7 +315,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
>> }
>> }
>>
>> -static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>> +static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
>> {
>> uint32_t gic_phandle;
>>
>> @@ -331,9 +334,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>> 2, vbi->memmap[VIRT_GIC_CPU].base,
>> 2, vbi->memmap[VIRT_GIC_CPU].size);
>> qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
>> +
>> + return gic_phandle;
>> }
>>
>> -static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>> +static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>> {
>> /* We create a standalone GIC v2 */
>> DeviceState *gicdev;
>> @@ -380,7 +385,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>> pic[i] = qdev_get_gpio_in(gicdev, i);
>> }
>>
>> - fdt_add_gic_node(vbi);
>> + return fdt_add_gic_node(vbi);
>> }
>>
>> static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
>> @@ -556,6 +561,71 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
>> g_free(nodename);
>> }
>>
>> +static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
>> + uint32_t gic_phandle)
>> +{
>> + hwaddr base = vbi->memmap[VIRT_PCIE].base;
>> + hwaddr size = vbi->memmap[VIRT_PCIE].size;
>> + hwaddr size_ioport = 64 * 1024;
>> + hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN;
>> + hwaddr size_mmio = size - size_ecam - size_ioport;
>> + hwaddr base_mmio = base;
>> + hwaddr base_ioport = base_mmio + size_mmio;
>> + hwaddr base_ecam = base_ioport + size_ioport;
>> + int irq = vbi->irqmap[VIRT_PCIE];
>> + MemoryRegion *mmio_alias;
>> + MemoryRegion *mmio_reg;
>> + DeviceState *dev;
>> + char *nodename;
>> +
>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>> +
>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>> + qdev_init_nofail(dev);
>> +
>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>> +
>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>> + mmio_alias = g_new0(MemoryRegion, 1);
>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>> + mmio_reg, base_mmio, size_mmio);
>> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
>> +
>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
>> +
>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>> + "compatible", "pci-host-ecam-generic");
>
> is this the only compatible string we should set here? Is this not legacy pci compatible?
> In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
> would it be sensible to make it a list and include "pci" as well?
I couldn't find anything that defines what an "pci" compatible should
look like. We definitely don't implement the legacy PCI config space
accessor registers.
>
>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>> +
>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>> + 2, base_ecam, 2, size_ecam);
>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>> + 1, 0x01000000, 2, 0,
>> + 2, base_ioport, 2, size_ioport,
>> +
>> + 1, 0x02000000, 2, base_mmio,
>> + 2, base_mmio, 2, size_mmio);
>> +
>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>> + 0, 0, 0, /* device */
>> + 0, /* PCI irq */
>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
>> + 0, 0, 0, /* device */
>> + 0 /* PCI irq */);
>
> Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series.
How exactly is this undocumented? The "mask" is a mask over the first
fields of an interrupt-map row plus an IRQ offset. So the mask above
means "Any device with any function and any IRQ on it, map to device IRQ
0" which maps to vbi->irqmap[VIRT_PCIE] (IRQ 3).
> Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
You're saying you'd prefer a define?
> How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
His implementation explicitly listed every PCI slot, while mine actually
makes use of the mask and simply routes everything to a single IRQ line.
The benefit of masking devfn out is that you don't need to worry about
the number of slots you support - and anything performance critical
should go via MSI-X anyway ;).
So what exactly do you test it with? I've successfully received IRQs
with a Linux guest, so I'm slightly puzzled you're running into problems.
Alex
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-07 21:47 ` Alexander Graf
@ 2015-01-08 12:55 ` Claudio Fontana
2015-01-08 13:26 ` Alexander Graf
2015-01-08 13:36 ` alvise rigo
0 siblings, 2 replies; 44+ messages in thread
From: Claudio Fontana @ 2015-01-08 12:55 UTC (permalink / raw)
To: Alexander Graf, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
Alvise Rigo
(added cc: Alvise which I mistakenly assumed was in Cc: already)
On 07.01.2015 22:47, Alexander Graf wrote:
>
>
> On 07.01.15 16:52, Claudio Fontana wrote:
>> On 06.01.2015 17:03, Alexander Graf wrote:
>>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>>
>>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>>> into an AArch64 VM with this and they all lived happily ever after.
>>>
>>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>>
>>> ---
>>>
>>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>>> systems. If you want to use it with AArch64 guests, please apply the following
>>> patch or wait until upstream cleaned up the code properly:
>>>
>>> http://csgraf.de/agraf/pci/pci-3.19.patch
>>> ---
>>> default-configs/arm-softmmu.mak | 2 +
>>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
>>> index f3513fa..7671ee2 100644
>>> --- a/default-configs/arm-softmmu.mak
>>> +++ b/default-configs/arm-softmmu.mak
>>> @@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
>>> CONFIG_VERSATILE_PCI=y
>>> CONFIG_VERSATILE_I2C=y
>>>
>>> +CONFIG_PCI_GENERIC=y
>>> +
>>> CONFIG_SDHCI=y
>>> CONFIG_INTEGRATOR_DEBUG=y
>>>
>>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>>> index 2353440..b7635ac 100644
>>> --- a/hw/arm/virt.c
>>> +++ b/hw/arm/virt.c
>>> @@ -42,6 +42,7 @@
>>> #include "exec/address-spaces.h"
>>> #include "qemu/bitops.h"
>>> #include "qemu/error-report.h"
>>> +#include "hw/pci-host/gpex.h"
>>>
>>> #define NUM_VIRTIO_TRANSPORTS 32
>>>
>>> @@ -69,6 +70,7 @@ enum {
>>> VIRT_MMIO,
>>> VIRT_RTC,
>>> VIRT_FW_CFG,
>>> + VIRT_PCIE,
>>> };
>>>
>>> typedef struct MemMapEntry {
>>> @@ -129,13 +131,14 @@ static const MemMapEntry a15memmap[] = {
>>> [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
>>> [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
>>> /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
>>> - /* 0x10000000 .. 0x40000000 reserved for PCI */
>>> + [VIRT_PCIE] = { 0x10000000, 0x30000000 },
>>> [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
>>> };
>>>
>>> static const int a15irqmap[] = {
>>> [VIRT_UART] = 1,
>>> [VIRT_RTC] = 2,
>>> + [VIRT_PCIE] = 3,
>>> [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
>>> };
>>>
>>> @@ -312,7 +315,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
>>> }
>>> }
>>>
>>> -static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>>> +static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
>>> {
>>> uint32_t gic_phandle;
>>>
>>> @@ -331,9 +334,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>>> 2, vbi->memmap[VIRT_GIC_CPU].base,
>>> 2, vbi->memmap[VIRT_GIC_CPU].size);
>>> qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
>>> +
>>> + return gic_phandle;
>>> }
>>>
>>> -static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>>> +static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>>> {
>>> /* We create a standalone GIC v2 */
>>> DeviceState *gicdev;
>>> @@ -380,7 +385,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>>> pic[i] = qdev_get_gpio_in(gicdev, i);
>>> }
>>>
>>> - fdt_add_gic_node(vbi);
>>> + return fdt_add_gic_node(vbi);
>>> }
>>>
>>> static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
>>> @@ -556,6 +561,71 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
>>> g_free(nodename);
>>> }
>>>
>>> +static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
>>> + uint32_t gic_phandle)
>>> +{
>>> + hwaddr base = vbi->memmap[VIRT_PCIE].base;
>>> + hwaddr size = vbi->memmap[VIRT_PCIE].size;
>>> + hwaddr size_ioport = 64 * 1024;
>>> + hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN;
>>> + hwaddr size_mmio = size - size_ecam - size_ioport;
>>> + hwaddr base_mmio = base;
>>> + hwaddr base_ioport = base_mmio + size_mmio;
>>> + hwaddr base_ecam = base_ioport + size_ioport;
>>> + int irq = vbi->irqmap[VIRT_PCIE];
>>> + MemoryRegion *mmio_alias;
>>> + MemoryRegion *mmio_reg;
>>> + DeviceState *dev;
>>> + char *nodename;
>>> +
>>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>>> +
>>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>>> + qdev_init_nofail(dev);
>>> +
>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>>> +
>>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>>> + mmio_alias = g_new0(MemoryRegion, 1);
>>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>>> + mmio_reg, base_mmio, size_mmio);
>>> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
>>> +
>>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
>>> +
>>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>>> + "compatible", "pci-host-ecam-generic");
>>
>> is this the only compatible string we should set here? Is this not legacy pci compatible?
>> In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
>> would it be sensible to make it a list and include "pci" as well?
>
> I couldn't find anything that defines what an "pci" compatible should
> look like. We definitely don't implement the legacy PCI config space
> accessor registers.
I see, I assumed this patch would support both CAM and ECAM as configuration methods, while now I understand
Alvise's patches support only CAM, while these support only ECAM..
So basically I should look at the compatible string and then choose configuration method accordingly.
I wonder if I should deal with the case where the compatible string contains both ECAM and CAM.
>>
>>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>>> +
>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>>> + 2, base_ecam, 2, size_ecam);
>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>>> + 1, 0x01000000, 2, 0,
>>> + 2, base_ioport, 2, size_ioport,
>>> +
>>> + 1, 0x02000000, 2, base_mmio,
>>> + 2, base_mmio, 2, size_mmio);
>>> +
>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>>> + 0, 0, 0, /* device */
>>> + 0, /* PCI irq */
>>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
>>> + 0, 0, 0, /* device */
>>> + 0 /* PCI irq */);
>>
>> Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series.
>
> How exactly is this undocumented? The "mask" is a mask over the first
> fields of an interrupt-map row plus an IRQ offset. So the mask above
> means "Any device with any function and any IRQ on it, map to device IRQ
> 0" which maps to vbi->irqmap[VIRT_PCIE] (IRQ 3).
(see my answer to Peter below in thread)
this is a bit different to what Alvise's series is doing I think (see later).
>
>> Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
>
> You're saying you'd prefer a define?
Yes that would be helpful :)
>
>> How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
>
> His implementation explicitly listed every PCI slot, while mine actually
> makes use of the mask and simply routes everything to a single IRQ line.
>
> The benefit of masking devfn out is that you don't need to worry about
> the number of slots you support - and anything performance critical
> should go via MSI-X anyway ;).
The benefit for me (but for me only probably..) is that with one IRQ per slot I didn't have to implement shared irqs and msi / msi-x in the guest yet. But that should be done eventually anyway..
>
> So what exactly do you test it with? I've successfully received IRQs
> with a Linux guest, so I'm slightly puzzled you're running into problems.
Of course, I am just implementing PCI guest support for OSv/AArch64.
Works with the legacy pci support using Alvise's series to do virtio-pci with block, net, rng etc,
but I am now considering going for PCIE support directly although that means more implementation work for me.
>
>
> Alex
>
Ciao & thanks,
Claudio
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-08 12:55 ` Claudio Fontana
@ 2015-01-08 13:26 ` Alexander Graf
2015-01-08 15:01 ` Claudio Fontana
2015-01-08 13:36 ` alvise rigo
1 sibling, 1 reply; 44+ messages in thread
From: Alexander Graf @ 2015-01-08 13:26 UTC (permalink / raw)
To: Claudio Fontana, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
On 08.01.15 13:55, Claudio Fontana wrote:
> (added cc: Alvise which I mistakenly assumed was in Cc: already)
He was in CC :). Now he's there twice.
>
> On 07.01.2015 22:47, Alexander Graf wrote:
>>
>>
>> On 07.01.15 16:52, Claudio Fontana wrote:
>>> On 06.01.2015 17:03, Alexander Graf wrote:
>>>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>>>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>>>
>>>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>>>> into an AArch64 VM with this and they all lived happily ever after.
>>>>
>>>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>>>
>>>> ---
>>>>
>>>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>>>> systems. If you want to use it with AArch64 guests, please apply the following
>>>> patch or wait until upstream cleaned up the code properly:
>>>>
>>>> http://csgraf.de/agraf/pci/pci-3.19.patch
>>>> ---
>>>> default-configs/arm-softmmu.mak | 2 +
>>>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>>>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>>>
[...]
>>>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>>>> +
>>>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>>>> + qdev_init_nofail(dev);
>>>> +
>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>>>> +
>>>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>>>> + mmio_alias = g_new0(MemoryRegion, 1);
>>>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>>>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>>>> + mmio_reg, base_mmio, size_mmio);
>>>> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
>>>> +
>>>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
>>>> +
>>>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>>>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>>>> + "compatible", "pci-host-ecam-generic");
>>>
>>> is this the only compatible string we should set here? Is this not legacy pci compatible?
>>> In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
>>> would it be sensible to make it a list and include "pci" as well?
>>
>> I couldn't find anything that defines what an "pci" compatible should
>> look like. We definitely don't implement the legacy PCI config space
>> accessor registers.
>
> I see, I assumed this patch would support both CAM and ECAM as configuration methods, while now I understand
> Alvise's patches support only CAM, while these support only ECAM..
> So basically I should look at the compatible string and then choose configuration method accordingly.
> I wonder if I should deal with the case where the compatible string contains both ECAM and CAM.
Well, original PCI didn't even do CAM. You only had 2 32bit ioport
registers that you tunnel all config space access through.
>
>>>
>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>>>> +
>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>>>> + 2, base_ecam, 2, size_ecam);
>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>>>> + 1, 0x01000000, 2, 0,
>>>> + 2, base_ioport, 2, size_ioport,
>>>> +
>>>> + 1, 0x02000000, 2, base_mmio,
>>>> + 2, base_mmio, 2, size_mmio);
>>>> +
>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>>>> + 0, 0, 0, /* device */
>>>> + 0, /* PCI irq */
>>>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>>>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
>>>> + 0, 0, 0, /* device */
>>>> + 0 /* PCI irq */);
>>>
>>> Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series.
>>
>> How exactly is this undocumented? The "mask" is a mask over the first
>> fields of an interrupt-map row plus an IRQ offset. So the mask above
>> means "Any device with any function and any IRQ on it, map to device IRQ
>> 0" which maps to vbi->irqmap[VIRT_PCIE] (IRQ 3).
>
> (see my answer to Peter below in thread)
>
> this is a bit different to what Alvise's series is doing I think (see later).
Yes, but it's easier :).
>
>>
>>> Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
>>
>> You're saying you'd prefer a define?
>
> Yes that would be helpful :)
>
>>
>>> How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
>>
>> His implementation explicitly listed every PCI slot, while mine actually
>> makes use of the mask and simply routes everything to a single IRQ line.
>>
>> The benefit of masking devfn out is that you don't need to worry about
>> the number of slots you support - and anything performance critical
>> should go via MSI-X anyway ;).
>
> The benefit for me (but for me only probably..) is that with one IRQ per slot I didn't have to implement shared irqs and msi / msi-x in the guest yet. But that should be done eventually anyway..
You most likely wouldn't get one IRQ per slot anyway. Most PHBs expose 4
outgoing IRQ lines, so you'll need to deal with sharing IRQs regardless.
Also, sharing IRQ lines isn't incredibly black magic and quite a
fundamental PCI IRQ concept, so I'm glad I'm pushing you into the
direction of implementing it early on :).
>
>>
>> So what exactly do you test it with? I've successfully received IRQs
>> with a Linux guest, so I'm slightly puzzled you're running into problems.
>
> Of course, I am just implementing PCI guest support for OSv/AArch64.
> Works with the legacy pci support using Alvise's series to do virtio-pci with block, net, rng etc,
> but I am now considering going for PCIE support directly although that means more implementation work for me.
Both should look quite similar from your point of view. If you do the
level interrupt handling, irq masking and window handling correctly the
only difference should be CAM vs ECAM.
I'm glad to hear that it breaks your implementation though and not
something already existing (like Linux, BSD, etc). That means there's a
good chance my patches are correct and you just took some shortcuts
which can be easily fixed :).
Alex
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-08 13:26 ` Alexander Graf
@ 2015-01-08 15:01 ` Claudio Fontana
2015-01-12 16:23 ` Claudio Fontana
0 siblings, 1 reply; 44+ messages in thread
From: Claudio Fontana @ 2015-01-08 15:01 UTC (permalink / raw)
To: Alexander Graf, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
On 08.01.2015 14:26, Alexander Graf wrote:
>
>
> On 08.01.15 13:55, Claudio Fontana wrote:
>> (added cc: Alvise which I mistakenly assumed was in Cc: already)
>
> He was in CC :). Now he's there twice.
>
>>
>> On 07.01.2015 22:47, Alexander Graf wrote:
>>>
>>>
>>> On 07.01.15 16:52, Claudio Fontana wrote:
>>>> On 06.01.2015 17:03, Alexander Graf wrote:
>>>>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>>>>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>>>>
>>>>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>>>>> into an AArch64 VM with this and they all lived happily ever after.
>>>>>
>>>>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>>>>
>>>>> ---
>>>>>
>>>>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>>>>> systems. If you want to use it with AArch64 guests, please apply the following
>>>>> patch or wait until upstream cleaned up the code properly:
>>>>>
>>>>> http://csgraf.de/agraf/pci/pci-3.19.patch
>>>>> ---
>>>>> default-configs/arm-softmmu.mak | 2 +
>>>>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>>>>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>>>>
>
> [...]
>
>>>>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>>>>> +
>>>>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>>>>> + qdev_init_nofail(dev);
>>>>> +
>>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>>>>> +
>>>>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>>>>> + mmio_alias = g_new0(MemoryRegion, 1);
>>>>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>>>>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>>>>> + mmio_reg, base_mmio, size_mmio);
>>>>> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
>>>>> +
>>>>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
>>>>> +
>>>>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>>>>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>>>>> + "compatible", "pci-host-ecam-generic");
>>>>
>>>> is this the only compatible string we should set here? Is this not legacy pci compatible?
>>>> In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
>>>> would it be sensible to make it a list and include "pci" as well?
>>>
>>> I couldn't find anything that defines what an "pci" compatible should
>>> look like. We definitely don't implement the legacy PCI config space
>>> accessor registers.
>>
>> I see, I assumed this patch would support both CAM and ECAM as configuration methods, while now I understand
>> Alvise's patches support only CAM, while these support only ECAM..
>> So basically I should look at the compatible string and then choose configuration method accordingly.
>> I wonder if I should deal with the case where the compatible string contains both ECAM and CAM.
>
> Well, original PCI didn't even do CAM. You only had 2 32bit ioport
> registers that you tunnel all config space access through.
>
>>
>>>>
>>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>>>>> +
>>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>>>>> + 2, base_ecam, 2, size_ecam);
>>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>>>>> + 1, 0x01000000, 2, 0,
>>>>> + 2, base_ioport, 2, size_ioport,
>>>>> +
>>>>> + 1, 0x02000000, 2, base_mmio,
>>>>> + 2, base_mmio, 2, size_mmio);
>>>>> +
>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>>>>> + 0, 0, 0, /* device */
>>>>> + 0, /* PCI irq */
>>>>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>>>>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
>>>>> + 0, 0, 0, /* device */
>>>>> + 0 /* PCI irq */);
>>>>
>>>> Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series.
>>>
>>> How exactly is this undocumented? The "mask" is a mask over the first
>>> fields of an interrupt-map row plus an IRQ offset. So the mask above
>>> means "Any device with any function and any IRQ on it, map to device IRQ
>>> 0" which maps to vbi->irqmap[VIRT_PCIE] (IRQ 3).
>>
>> (see my answer to Peter below in thread)
>>
>> this is a bit different to what Alvise's series is doing I think (see later).
>
> Yes, but it's easier :).
>
>>
>>>
>>>> Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
>>>
>>> You're saying you'd prefer a define?
>>
>> Yes that would be helpful :)
>>
>>>
>>>> How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
>>>
>>> His implementation explicitly listed every PCI slot, while mine actually
>>> makes use of the mask and simply routes everything to a single IRQ line.
>>>
>>> The benefit of masking devfn out is that you don't need to worry about
>>> the number of slots you support - and anything performance critical
>>> should go via MSI-X anyway ;).
>>
>> The benefit for me (but for me only probably..) is that with one IRQ per slot I didn't have to implement shared irqs and msi / msi-x in the guest yet. But that should be done eventually anyway..
>
> You most likely wouldn't get one IRQ per slot anyway. Most PHBs expose 4
> outgoing IRQ lines, so you'll need to deal with sharing IRQs regardless.
>
> Also, sharing IRQ lines isn't incredibly black magic and quite a
> fundamental PCI IRQ concept, so I'm glad I'm pushing you into the
> direction of implementing it early on :).
>
damn open source making me work lol :)
It's actually good to have these two implementations, so I can validate my guest support against both series and ensure that I don't hardcode too many shortcuts.
Thanks,
Claudio
>>
>>>
>>> So what exactly do you test it with? I've successfully received IRQs
>>> with a Linux guest, so I'm slightly puzzled you're running into problems.
>>
>> Of course, I am just implementing PCI guest support for OSv/AArch64.
>> Works with the legacy pci support using Alvise's series to do virtio-pci with block, net, rng etc,
>> but I am now considering going for PCIE support directly although that means more implementation work for me.
>
> Both should look quite similar from your point of view. If you do the
> level interrupt handling, irq masking and window handling correctly the
> only difference should be CAM vs ECAM.
>
> I'm glad to hear that it breaks your implementation though and not
> something already existing (like Linux, BSD, etc). That means there's a
> good chance my patches are correct and you just took some shortcuts
> which can be easily fixed :).
>
>
> Alex
>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-08 15:01 ` Claudio Fontana
@ 2015-01-12 16:23 ` Claudio Fontana
2015-01-12 16:35 ` Alexander Graf
0 siblings, 1 reply; 44+ messages in thread
From: Claudio Fontana @ 2015-01-12 16:23 UTC (permalink / raw)
To: Alexander Graf, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
On 08.01.2015 16:01, Claudio Fontana wrote:
> On 08.01.2015 14:26, Alexander Graf wrote:
>>
>>
>> On 08.01.15 13:55, Claudio Fontana wrote:
>>> (added cc: Alvise which I mistakenly assumed was in Cc: already)
>>
>> He was in CC :). Now he's there twice.
>>
>>>
>>> On 07.01.2015 22:47, Alexander Graf wrote:
>>>>
>>>>
>>>> On 07.01.15 16:52, Claudio Fontana wrote:
>>>>> On 06.01.2015 17:03, Alexander Graf wrote:
>>>>>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>>>>>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>>>>>
>>>>>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>>>>>> into an AArch64 VM with this and they all lived happily ever after.
>>>>>>
>>>>>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>>>>>
>>>>>> ---
>>>>>>
>>>>>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>>>>>> systems. If you want to use it with AArch64 guests, please apply the following
>>>>>> patch or wait until upstream cleaned up the code properly:
>>>>>>
>>>>>> http://csgraf.de/agraf/pci/pci-3.19.patch
>>>>>> ---
>>>>>> default-configs/arm-softmmu.mak | 2 +
>>>>>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>>>>>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>>>>>
>>
>> [...]
>>
>>>>>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>>>>>> +
>>>>>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>>>>>> + qdev_init_nofail(dev);
>>>>>> +
>>>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>>>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>>>>>> +
>>>>>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>>>>>> + mmio_alias = g_new0(MemoryRegion, 1);
>>>>>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>>>>>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>>>>>> + mmio_reg, base_mmio, size_mmio);
>>>>>> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
>>>>>> +
>>>>>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
>>>>>> +
>>>>>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>>>>>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>>>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>>>>>> + "compatible", "pci-host-ecam-generic");
>>>>>
>>>>> is this the only compatible string we should set here? Is this not legacy pci compatible?
>>>>> In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
>>>>> would it be sensible to make it a list and include "pci" as well?
>>>>
>>>> I couldn't find anything that defines what an "pci" compatible should
>>>> look like. We definitely don't implement the legacy PCI config space
>>>> accessor registers.
>>>
>>> I see, I assumed this patch would support both CAM and ECAM as configuration methods, while now I understand
>>> Alvise's patches support only CAM, while these support only ECAM..
>>> So basically I should look at the compatible string and then choose configuration method accordingly.
>>> I wonder if I should deal with the case where the compatible string contains both ECAM and CAM.
>>
>> Well, original PCI didn't even do CAM. You only had 2 32bit ioport
>> registers that you tunnel all config space access through.
>>
>>>
>>>>>
>>>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>>>>>> +
>>>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>>>>>> + 2, base_ecam, 2, size_ecam);
>>>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>>>>>> + 1, 0x01000000, 2, 0,
>>>>>> + 2, base_ioport, 2, size_ioport,
>>>>>> +
>>>>>> + 1, 0x02000000, 2, base_mmio,
>>>>>> + 2, base_mmio, 2, size_mmio);
>>>>>> +
>>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>>>>>> + 0, 0, 0, /* device */
>>>>>> + 0, /* PCI irq */
>>>>>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>>>>>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
>>>>>> + 0, 0, 0, /* device */
>>>>>> + 0 /* PCI irq */);
>>>>>
>>>>> Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series.
>>>>
>>>> How exactly is this undocumented? The "mask" is a mask over the first
>>>> fields of an interrupt-map row plus an IRQ offset. So the mask above
>>>> means "Any device with any function and any IRQ on it, map to device IRQ
>>>> 0" which maps to vbi->irqmap[VIRT_PCIE] (IRQ 3).
>>>
>>> (see my answer to Peter below in thread)
>>>
>>> this is a bit different to what Alvise's series is doing I think (see later).
>>
>> Yes, but it's easier :).
>>
>>>
>>>>
>>>>> Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
>>>>
>>>> You're saying you'd prefer a define?
>>>
>>> Yes that would be helpful :)
>>>
>>>>
>>>>> How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
>>>>
>>>> His implementation explicitly listed every PCI slot, while mine actually
>>>> makes use of the mask and simply routes everything to a single IRQ line.
>>>>
>>>> The benefit of masking devfn out is that you don't need to worry about
>>>> the number of slots you support - and anything performance critical
>>>> should go via MSI-X anyway ;).
>>>
>>> The benefit for me (but for me only probably..) is that with one IRQ per slot I didn't have to implement shared irqs and msi / msi-x in the guest yet. But that should be done eventually anyway..
>>
>> You most likely wouldn't get one IRQ per slot anyway. Most PHBs expose 4
>> outgoing IRQ lines, so you'll need to deal with sharing IRQs regardless.
>>
>> Also, sharing IRQ lines isn't incredibly black magic and quite a
>> fundamental PCI IRQ concept, so I'm glad I'm pushing you into the
>> direction of implementing it early on :).
Ok I have tentatively implemented this and I tested both Alvise's series and yours and both work ok for my use case.
Note that I did not test any MSI/MSI-X, only the INTx method.
>>
>
> damn open source making me work lol :)
>
> It's actually good to have these two implementations, so I can validate my guest support against both series and ensure that I don't hardcode too many shortcuts.
>
> Thanks,
>
> Claudio
>
>>>
>>>>
>>>> So what exactly do you test it with? I've successfully received IRQs
>>>> with a Linux guest, so I'm slightly puzzled you're running into problems.
>>>
>>> Of course, I am just implementing PCI guest support for OSv/AArch64.
>>> Works with the legacy pci support using Alvise's series to do virtio-pci with block, net, rng etc,
>>> but I am now considering going for PCIE support directly although that means more implementation work for me.
>>
>> Both should look quite similar from your point of view. If you do the
>> level interrupt handling, irq masking and window handling correctly the
>> only difference should be CAM vs ECAM.
>>
>> I'm glad to hear that it breaks your implementation though and not
>> something already existing (like Linux, BSD, etc). That means there's a
>> good chance my patches are correct and you just took some shortcuts
>> which can be easily fixed :).
>>
>>
>> Alex
>>
>
>
--
Claudio Fontana
Server Virtualization Architect
Huawei Technologies Duesseldorf GmbH
Riesstraße 25 - 80992 München
office: +49 89 158834 4135
mobile: +49 15253060158
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-12 16:23 ` Claudio Fontana
@ 2015-01-12 16:35 ` Alexander Graf
0 siblings, 0 replies; 44+ messages in thread
From: Alexander Graf @ 2015-01-12 16:35 UTC (permalink / raw)
To: Claudio Fontana, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
On 12.01.15 17:23, Claudio Fontana wrote:
> On 08.01.2015 16:01, Claudio Fontana wrote:
>> On 08.01.2015 14:26, Alexander Graf wrote:
>>>
>>>
>>> On 08.01.15 13:55, Claudio Fontana wrote:
>>>> (added cc: Alvise which I mistakenly assumed was in Cc: already)
>>>
>>> He was in CC :). Now he's there twice.
>>>
>>>>
>>>> On 07.01.2015 22:47, Alexander Graf wrote:
>>>>>
>>>>>
>>>>> On 07.01.15 16:52, Claudio Fontana wrote:
>>>>>> On 06.01.2015 17:03, Alexander Graf wrote:
>>>>>>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>>>>>>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>>>>>>
>>>>>>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>>>>>>> into an AArch64 VM with this and they all lived happily ever after.
>>>>>>>
>>>>>>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>>>>>>
>>>>>>> ---
>>>>>>>
>>>>>>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>>>>>>> systems. If you want to use it with AArch64 guests, please apply the following
>>>>>>> patch or wait until upstream cleaned up the code properly:
>>>>>>>
>>>>>>> http://csgraf.de/agraf/pci/pci-3.19.patch
>>>>>>> ---
>>>>>>> default-configs/arm-softmmu.mak | 2 +
>>>>>>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>>>>>>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>>>>>>
>>>
>>> [...]
>>>
>>>>>>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>>>>>>> +
>>>>>>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>>>>>>> + qdev_init_nofail(dev);
>>>>>>> +
>>>>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>>>>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>>>>>>> +
>>>>>>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>>>>>>> + mmio_alias = g_new0(MemoryRegion, 1);
>>>>>>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>>>>>>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>>>>>>> + mmio_reg, base_mmio, size_mmio);
>>>>>>> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
>>>>>>> +
>>>>>>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
>>>>>>> +
>>>>>>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>>>>>>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>>>>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>>>>>>> + "compatible", "pci-host-ecam-generic");
>>>>>>
>>>>>> is this the only compatible string we should set here? Is this not legacy pci compatible?
>>>>>> In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
>>>>>> would it be sensible to make it a list and include "pci" as well?
>>>>>
>>>>> I couldn't find anything that defines what an "pci" compatible should
>>>>> look like. We definitely don't implement the legacy PCI config space
>>>>> accessor registers.
>>>>
>>>> I see, I assumed this patch would support both CAM and ECAM as configuration methods, while now I understand
>>>> Alvise's patches support only CAM, while these support only ECAM..
>>>> So basically I should look at the compatible string and then choose configuration method accordingly.
>>>> I wonder if I should deal with the case where the compatible string contains both ECAM and CAM.
>>>
>>> Well, original PCI didn't even do CAM. You only had 2 32bit ioport
>>> registers that you tunnel all config space access through.
>>>
>>>>
>>>>>>
>>>>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>>>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>>>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>>>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>>>>>>> +
>>>>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>>>>>>> + 2, base_ecam, 2, size_ecam);
>>>>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>>>>>>> + 1, 0x01000000, 2, 0,
>>>>>>> + 2, base_ioport, 2, size_ioport,
>>>>>>> +
>>>>>>> + 1, 0x02000000, 2, base_mmio,
>>>>>>> + 2, base_mmio, 2, size_mmio);
>>>>>>> +
>>>>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>>>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>>>>>>> + 0, 0, 0, /* device */
>>>>>>> + 0, /* PCI irq */
>>>>>>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>>>>>>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>>>>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
>>>>>>> + 0, 0, 0, /* device */
>>>>>>> + 0 /* PCI irq */);
>>>>>>
>>>>>> Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series.
>>>>>
>>>>> How exactly is this undocumented? The "mask" is a mask over the first
>>>>> fields of an interrupt-map row plus an IRQ offset. So the mask above
>>>>> means "Any device with any function and any IRQ on it, map to device IRQ
>>>>> 0" which maps to vbi->irqmap[VIRT_PCIE] (IRQ 3).
>>>>
>>>> (see my answer to Peter below in thread)
>>>>
>>>> this is a bit different to what Alvise's series is doing I think (see later).
>>>
>>> Yes, but it's easier :).
>>>
>>>>
>>>>>
>>>>>> Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
>>>>>
>>>>> You're saying you'd prefer a define?
>>>>
>>>> Yes that would be helpful :)
>>>>
>>>>>
>>>>>> How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
>>>>>
>>>>> His implementation explicitly listed every PCI slot, while mine actually
>>>>> makes use of the mask and simply routes everything to a single IRQ line.
>>>>>
>>>>> The benefit of masking devfn out is that you don't need to worry about
>>>>> the number of slots you support - and anything performance critical
>>>>> should go via MSI-X anyway ;).
>>>>
>>>> The benefit for me (but for me only probably..) is that with one IRQ per slot I didn't have to implement shared irqs and msi / msi-x in the guest yet. But that should be done eventually anyway..
>>>
>>> You most likely wouldn't get one IRQ per slot anyway. Most PHBs expose 4
>>> outgoing IRQ lines, so you'll need to deal with sharing IRQs regardless.
>>>
>>> Also, sharing IRQ lines isn't incredibly black magic and quite a
>>> fundamental PCI IRQ concept, so I'm glad I'm pushing you into the
>>> direction of implementing it early on :).
>
>
> Ok I have tentatively implemented this and I tested both Alvise's series and yours and both work ok for my use case.
>
> Note that I did not test any MSI/MSI-X, only the INTx method.
There is no MSI yet ;)
Alex
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-08 12:55 ` Claudio Fontana
2015-01-08 13:26 ` Alexander Graf
@ 2015-01-08 13:36 ` alvise rigo
1 sibling, 0 replies; 44+ messages in thread
From: alvise rigo @ 2015-01-08 13:36 UTC (permalink / raw)
To: Claudio Fontana
Cc: Peter Maydell, stuart.yoder@freescale.com, ard.biesheuvel,
Michael S. Tsirkin, Alexander Graf, QEMU Developers, Rob Herring
On Thu, Jan 8, 2015 at 1:55 PM, Claudio Fontana
<claudio.fontana@huawei.com> wrote:
> (added cc: Alvise which I mistakenly assumed was in Cc: already)
>
> On 07.01.2015 22:47, Alexander Graf wrote:
>>
>>
>> On 07.01.15 16:52, Claudio Fontana wrote:
>>> On 06.01.2015 17:03, Alexander Graf wrote:
>>>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>>>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>>>
>>>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>>>> into an AArch64 VM with this and they all lived happily ever after.
>>>>
>>>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>>>
>>>> ---
>>>>
>>>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>>>> systems. If you want to use it with AArch64 guests, please apply the following
>>>> patch or wait until upstream cleaned up the code properly:
>>>>
>>>> http://csgraf.de/agraf/pci/pci-3.19.patch
>>>> ---
>>>> default-configs/arm-softmmu.mak | 2 +
>>>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>>>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>>>
>>>> diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
>>>> index f3513fa..7671ee2 100644
>>>> --- a/default-configs/arm-softmmu.mak
>>>> +++ b/default-configs/arm-softmmu.mak
>>>> @@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
>>>> CONFIG_VERSATILE_PCI=y
>>>> CONFIG_VERSATILE_I2C=y
>>>>
>>>> +CONFIG_PCI_GENERIC=y
>>>> +
>>>> CONFIG_SDHCI=y
>>>> CONFIG_INTEGRATOR_DEBUG=y
>>>>
>>>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>>>> index 2353440..b7635ac 100644
>>>> --- a/hw/arm/virt.c
>>>> +++ b/hw/arm/virt.c
>>>> @@ -42,6 +42,7 @@
>>>> #include "exec/address-spaces.h"
>>>> #include "qemu/bitops.h"
>>>> #include "qemu/error-report.h"
>>>> +#include "hw/pci-host/gpex.h"
>>>>
>>>> #define NUM_VIRTIO_TRANSPORTS 32
>>>>
>>>> @@ -69,6 +70,7 @@ enum {
>>>> VIRT_MMIO,
>>>> VIRT_RTC,
>>>> VIRT_FW_CFG,
>>>> + VIRT_PCIE,
>>>> };
>>>>
>>>> typedef struct MemMapEntry {
>>>> @@ -129,13 +131,14 @@ static const MemMapEntry a15memmap[] = {
>>>> [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
>>>> [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
>>>> /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
>>>> - /* 0x10000000 .. 0x40000000 reserved for PCI */
>>>> + [VIRT_PCIE] = { 0x10000000, 0x30000000 },
>>>> [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
>>>> };
>>>>
>>>> static const int a15irqmap[] = {
>>>> [VIRT_UART] = 1,
>>>> [VIRT_RTC] = 2,
>>>> + [VIRT_PCIE] = 3,
>>>> [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
>>>> };
>>>>
>>>> @@ -312,7 +315,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
>>>> }
>>>> }
>>>>
>>>> -static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>>>> +static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
>>>> {
>>>> uint32_t gic_phandle;
>>>>
>>>> @@ -331,9 +334,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>>>> 2, vbi->memmap[VIRT_GIC_CPU].base,
>>>> 2, vbi->memmap[VIRT_GIC_CPU].size);
>>>> qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
>>>> +
>>>> + return gic_phandle;
>>>> }
>>>>
>>>> -static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>>>> +static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>>>> {
>>>> /* We create a standalone GIC v2 */
>>>> DeviceState *gicdev;
>>>> @@ -380,7 +385,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>>>> pic[i] = qdev_get_gpio_in(gicdev, i);
>>>> }
>>>>
>>>> - fdt_add_gic_node(vbi);
>>>> + return fdt_add_gic_node(vbi);
>>>> }
>>>>
>>>> static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
>>>> @@ -556,6 +561,71 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
>>>> g_free(nodename);
>>>> }
>>>>
>>>> +static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
>>>> + uint32_t gic_phandle)
>>>> +{
>>>> + hwaddr base = vbi->memmap[VIRT_PCIE].base;
>>>> + hwaddr size = vbi->memmap[VIRT_PCIE].size;
>>>> + hwaddr size_ioport = 64 * 1024;
>>>> + hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN;
>>>> + hwaddr size_mmio = size - size_ecam - size_ioport;
>>>> + hwaddr base_mmio = base;
>>>> + hwaddr base_ioport = base_mmio + size_mmio;
>>>> + hwaddr base_ecam = base_ioport + size_ioport;
>>>> + int irq = vbi->irqmap[VIRT_PCIE];
>>>> + MemoryRegion *mmio_alias;
>>>> + MemoryRegion *mmio_reg;
>>>> + DeviceState *dev;
>>>> + char *nodename;
>>>> +
>>>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>>>> +
>>>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>>>> + qdev_init_nofail(dev);
>>>> +
>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>>>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>>>> +
>>>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>>>> + mmio_alias = g_new0(MemoryRegion, 1);
>>>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>>>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>>>> + mmio_reg, base_mmio, size_mmio);
>>>> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
>>>> +
>>>> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
>>>> +
>>>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>>>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>>>> + "compatible", "pci-host-ecam-generic");
>>>
>>> is this the only compatible string we should set here? Is this not legacy pci compatible?
>>> In other device trees I see this mentioned as compatible = "arm,versatile-pci-hostbridge", "pci" for example,
>>> would it be sensible to make it a list and include "pci" as well?
>>
>> I couldn't find anything that defines what an "pci" compatible should
>> look like. We definitely don't implement the legacy PCI config space
>> accessor registers.
>
> I see, I assumed this patch would support both CAM and ECAM as configuration methods, while now I understand
> Alvise's patches support only CAM, while these support only ECAM..
Hi Claudio,
I've run some tests using these patches with an ARM guest and Linux.
Both virtio-net-pci and lsi (the devices I used to test my series)
were working.
So I would say that at least from a Linux perspective these patches
support both CAM and ECAM.
Regards,
alvise
> So basically I should look at the compatible string and then choose configuration method accordingly.
> I wonder if I should deal with the case where the compatible string contains both ECAM and CAM.
>
>>>
>>>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>>>> +
>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>>>> + 2, base_ecam, 2, size_ecam);
>>>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>>>> + 1, 0x01000000, 2, 0,
>>>> + 2, base_ioport, 2, size_ioport,
>>>> +
>>>> + 1, 0x02000000, 2, base_mmio,
>>>> + 2, base_mmio, 2, size_mmio);
>>>> +
>>>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>>>> + 0, 0, 0, /* device */
>>>> + 0, /* PCI irq */
>>>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>>>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>>>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
>>>> + 0, 0, 0, /* device */
>>>> + 0 /* PCI irq */);
>>>
>>> Interrupt map does not seem to work for me; incidentally this ends up being the same kind of undocumented blob that Alvise posted in his series.
>>
>> How exactly is this undocumented? The "mask" is a mask over the first
>> fields of an interrupt-map row plus an IRQ offset. So the mask above
>> means "Any device with any function and any IRQ on it, map to device IRQ
>> 0" which maps to vbi->irqmap[VIRT_PCIE] (IRQ 3).
>
> (see my answer to Peter below in thread)
>
> this is a bit different to what Alvise's series is doing I think (see later).
>
>>
>>> Can you add a good comment about what the ranges property contains (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but there is no need to be cryptic about it).
>>
>> You're saying you'd prefer a define?
>
> Yes that would be helpful :)
>
>>
>>> How does your interrupt map implementation differ from the patchset posted by Alvise? I ask because that one works for me (tm).
>>
>> His implementation explicitly listed every PCI slot, while mine actually
>> makes use of the mask and simply routes everything to a single IRQ line.
>>
>> The benefit of masking devfn out is that you don't need to worry about
>> the number of slots you support - and anything performance critical
>> should go via MSI-X anyway ;).
>
> The benefit for me (but for me only probably..) is that with one IRQ per slot I didn't have to implement shared irqs and msi / msi-x in the guest yet. But that should be done eventually anyway..
>
>>
>> So what exactly do you test it with? I've successfully received IRQs
>> with a Linux guest, so I'm slightly puzzled you're running into problems.
>
> Of course, I am just implementing PCI guest support for OSv/AArch64.
> Works with the legacy pci support using Alvise's series to do virtio-pci with block, net, rng etc,
> but I am now considering going for PCIE support directly although that means more implementation work for me.
>
>>
>>
>> Alex
>>
>
> Ciao & thanks,
>
> Claudio
>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-07 15:52 ` Claudio Fontana
2015-01-07 21:47 ` Alexander Graf
@ 2015-01-08 10:31 ` Peter Maydell
2015-01-08 12:30 ` Claudio Fontana
1 sibling, 1 reply; 44+ messages in thread
From: Peter Maydell @ 2015-01-08 10:31 UTC (permalink / raw)
To: Claudio Fontana
Cc: Rob Herring, Michael S. Tsirkin, QEMU Developers, Ard Biesheuvel,
Alvise Rigo, Stuart Yoder, Alexander Graf
On 7 January 2015 at 15:52, Claudio Fontana <claudio.fontana@huawei.com> wrote:
> Interrupt map does not seem to work for me; incidentally this ends up being
> the same kind of undocumented blob that Alvise posted in his series. Can
> you add a good comment about what the ranges property contains
> (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but
> there is no need to be cryptic about it).
The binding docs live in the kernel:
https://www.kernel.org/doc/Documentation/devicetree/bindings/pci/host-generic-pci.txt
(which reference the upstream openfirmware specs):
http://www.firmware.org/1275/practice/imap/imap0_9d.pdf
so we can provide a brief summary comment here, but if the kernel
binding docs are confusing (which they kind of are) we should really
get them improved...
On the 'compatible' string:
https://www.kernel.org/doc/Documentation/devicetree/bindings/pci/host-generic-pci.txt
doesn't say anything about using "pci" here in either the
text or the example binding.
-- PMM
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-08 10:31 ` Peter Maydell
@ 2015-01-08 12:30 ` Claudio Fontana
0 siblings, 0 replies; 44+ messages in thread
From: Claudio Fontana @ 2015-01-08 12:30 UTC (permalink / raw)
To: Peter Maydell
Cc: Rob Herring, Michael S. Tsirkin, QEMU Developers, Ard Biesheuvel,
Alvise Rigo, Stuart Yoder, Alexander Graf
On 08.01.2015 11:31, Peter Maydell wrote:
> On 7 January 2015 at 15:52, Claudio Fontana <claudio.fontana@huawei.com> wrote:
>> Interrupt map does not seem to work for me; incidentally this ends up being
>> the same kind of undocumented blob that Alvise posted in his series. Can
>> you add a good comment about what the ranges property contains
>> (the 0x01000000, 0x02000000 which I suspect means IO vs MMIO IIRC, but
>> there is no need to be cryptic about it).
>
> The binding docs live in the kernel:
> https://www.kernel.org/doc/Documentation/devicetree/bindings/pci/host-generic-pci.txt
> (which reference the upstream openfirmware specs):
> http://www.firmware.org/1275/practice/imap/imap0_9d.pdf
>
> so we can provide a brief summary comment here, but if the kernel
> binding docs are confusing (which they kind of are) we should really
> get them improved...
>
> On the 'compatible' string:
> https://www.kernel.org/doc/Documentation/devicetree/bindings/pci/host-generic-pci.txt
> doesn't say anything about using "pci" here in either the
> text or the example binding.
Thank you for these pointers.
I think that putting a comment with this information (even just a pointer to the effect of "just look at host-generic-pci.txt in the Linux kernel documentation to understand what's going on here" would be helpful, or even go directly referring to the "Open Firmware Recommended Practice: Interrupt Mapping", since QEMU should be guest OS agnostic - to some extent.. -).
Also I think Alex' proposal to use defines for IO Space vs Memory Space instead of hardcoding 0x1000000 / 0x2000000 is a good thing.
I think the kernel binding docs could be made more helpful, but since I am in the position of trying to figure this stuff out I am not in the best position to make them better..
Thanks,
Claudio
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-06 16:03 ` [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine Alexander Graf
2015-01-07 15:52 ` Claudio Fontana
@ 2015-01-12 16:20 ` Claudio Fontana
2015-01-12 16:36 ` Alexander Graf
2015-01-12 16:49 ` alvise rigo
2 siblings, 1 reply; 44+ messages in thread
From: Claudio Fontana @ 2015-01-12 16:20 UTC (permalink / raw)
To: Alexander Graf, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
Just adding a nit here below:
On 06.01.2015 17:03, Alexander Graf wrote:
> Now that we have a working "generic" PCIe host bridge driver, we can plug
> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>
> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
> into an AArch64 VM with this and they all lived happily ever after.
>
> Signed-off-by: Alexander Graf <agraf@suse.de>
>
> ---
>
> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
> systems. If you want to use it with AArch64 guests, please apply the following
> patch or wait until upstream cleaned up the code properly:
>
> http://csgraf.de/agraf/pci/pci-3.19.patch
> ---
> default-configs/arm-softmmu.mak | 2 +
> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
> 2 files changed, 80 insertions(+), 5 deletions(-)
>
> diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
> index f3513fa..7671ee2 100644
> --- a/default-configs/arm-softmmu.mak
> +++ b/default-configs/arm-softmmu.mak
> @@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
> CONFIG_VERSATILE_PCI=y
> CONFIG_VERSATILE_I2C=y
>
> +CONFIG_PCI_GENERIC=y
> +
> CONFIG_SDHCI=y
> CONFIG_INTEGRATOR_DEBUG=y
>
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index 2353440..b7635ac 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -42,6 +42,7 @@
> #include "exec/address-spaces.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "hw/pci-host/gpex.h"
>
> #define NUM_VIRTIO_TRANSPORTS 32
>
> @@ -69,6 +70,7 @@ enum {
> VIRT_MMIO,
> VIRT_RTC,
> VIRT_FW_CFG,
> + VIRT_PCIE,
> };
>
> typedef struct MemMapEntry {
> @@ -129,13 +131,14 @@ static const MemMapEntry a15memmap[] = {
> [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
> [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
> /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
> - /* 0x10000000 .. 0x40000000 reserved for PCI */
> + [VIRT_PCIE] = { 0x10000000, 0x30000000 },
> [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
> };
>
> static const int a15irqmap[] = {
> [VIRT_UART] = 1,
> [VIRT_RTC] = 2,
> + [VIRT_PCIE] = 3,
> [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
> };
>
> @@ -312,7 +315,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
> }
> }
>
> -static void fdt_add_gic_node(const VirtBoardInfo *vbi)
> +static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
> {
> uint32_t gic_phandle;
>
> @@ -331,9 +334,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
> 2, vbi->memmap[VIRT_GIC_CPU].base,
> 2, vbi->memmap[VIRT_GIC_CPU].size);
> qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
> +
> + return gic_phandle;
> }
>
> -static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> +static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> {
> /* We create a standalone GIC v2 */
> DeviceState *gicdev;
> @@ -380,7 +385,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> pic[i] = qdev_get_gpio_in(gicdev, i);
> }
>
> - fdt_add_gic_node(vbi);
> + return fdt_add_gic_node(vbi);
> }
>
> static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
> @@ -556,6 +561,71 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
> g_free(nodename);
> }
>
> +static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
> + uint32_t gic_phandle)
> +{
> + hwaddr base = vbi->memmap[VIRT_PCIE].base;
> + hwaddr size = vbi->memmap[VIRT_PCIE].size;
> + hwaddr size_ioport = 64 * 1024;
> + hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN;
> + hwaddr size_mmio = size - size_ecam - size_ioport;
> + hwaddr base_mmio = base;
> + hwaddr base_ioport = base_mmio + size_mmio;
> + hwaddr base_ecam = base_ioport + size_ioport;
> + int irq = vbi->irqmap[VIRT_PCIE];
> + MemoryRegion *mmio_alias;
> + MemoryRegion *mmio_reg;
> + DeviceState *dev;
> + char *nodename;
> +
> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
> +
> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
> + qdev_init_nofail(dev);
> +
> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
> +
> + /* Map the MMIO window at the same spot in bus and cpu layouts */
> + mmio_alias = g_new0(MemoryRegion, 1);
> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
> + mmio_reg, base_mmio, size_mmio);
> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
> +
> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
> +
> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
> + qemu_fdt_add_subnode(vbi->fdt, nodename);
> + qemu_fdt_setprop_string(vbi->fdt, nodename,
> + "compatible", "pci-host-ecam-generic");
> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
> +
> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
> + 2, base_ecam, 2, size_ecam);
> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
> + 1, 0x01000000, 2, 0,
> + 2, base_ioport, 2, size_ioport,
> +
> + 1, 0x02000000, 2, base_mmio,
> + 2, base_mmio, 2, size_mmio);
> +
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
> + 0, 0, 0, /* device */
> + 0, /* PCI irq */
> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
nit: are there two extra spaces here? (alignment)
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
> + 0, 0, 0, /* device */
> + 0 /* PCI irq */);
> +
> + g_free(nodename);
> +}
> +
> static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
> {
> const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
> @@ -573,6 +643,7 @@ static void machvirt_init(MachineState *machine)
> MemoryRegion *ram = g_new(MemoryRegion, 1);
> const char *cpu_model = machine->cpu_model;
> VirtBoardInfo *vbi;
> + uint32_t gic_phandle;
>
> if (!cpu_model) {
> cpu_model = "cortex-a15";
> @@ -634,12 +705,14 @@ static void machvirt_init(MachineState *machine)
>
> create_flash(vbi);
>
> - create_gic(vbi, pic);
> + gic_phandle = create_gic(vbi, pic);
>
> create_uart(vbi, pic);
>
> create_rtc(vbi, pic);
>
> + create_pcie(vbi, pic, gic_phandle);
> +
> /* Create mmio transports, so the user can create virtio backends
> * (which will be automatically plugged in to the transports). If
> * no backend is created the transport will just sit harmlessly idle.
>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-12 16:20 ` Claudio Fontana
@ 2015-01-12 16:36 ` Alexander Graf
0 siblings, 0 replies; 44+ messages in thread
From: Alexander Graf @ 2015-01-12 16:36 UTC (permalink / raw)
To: Claudio Fontana, qemu-devel
Cc: Peter Maydell, ard.biesheuvel, mst, rob.herring, stuart.yoder,
a.rigo
On 12.01.15 17:20, Claudio Fontana wrote:
> Just adding a nit here below:
>
> On 06.01.2015 17:03, Alexander Graf wrote:
>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>
>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>> into an AArch64 VM with this and they all lived happily ever after.
>>
>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>
>> ---
>>
>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>> systems. If you want to use it with AArch64 guests, please apply the following
>> patch or wait until upstream cleaned up the code properly:
>>
>> http://csgraf.de/agraf/pci/pci-3.19.patch
>> ---
[...]
>> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
>> + qemu_fdt_add_subnode(vbi->fdt, nodename);
>> + qemu_fdt_setprop_string(vbi->fdt, nodename,
>> + "compatible", "pci-host-ecam-generic");
>> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
>> +
>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
>> + 2, base_ecam, 2, size_ecam);
>> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
>> + 1, 0x01000000, 2, 0,
>> + 2, base_ioport, 2, size_ioport,
>> +
>> + 1, 0x02000000, 2, base_mmio,
>> + 2, base_mmio, 2, size_mmio);
>> +
>> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
>> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
>> + 0, 0, 0, /* device */
>> + 0, /* PCI irq */
>> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
>> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
>
>
> nit: are there two extra spaces here? (alignment)
Yes, because the attribute spans 2 lines ;)
Alex
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-06 16:03 ` [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine Alexander Graf
2015-01-07 15:52 ` Claudio Fontana
2015-01-12 16:20 ` Claudio Fontana
@ 2015-01-12 16:49 ` alvise rigo
2015-01-12 16:57 ` Alexander Graf
2 siblings, 1 reply; 44+ messages in thread
From: alvise rigo @ 2015-01-12 16:49 UTC (permalink / raw)
To: Alexander Graf
Cc: Peter Maydell, Ard Biesheuvel, Rob Herring, Michael S. Tsirkin,
Claudio Fontana, stuart.yoder@freescale.com, QEMU Developers
Hi Alexander,
Just a comment below.
On Tue, Jan 6, 2015 at 5:03 PM, Alexander Graf <agraf@suse.de> wrote:
> Now that we have a working "generic" PCIe host bridge driver, we can plug
> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>
> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
> into an AArch64 VM with this and they all lived happily ever after.
>
> Signed-off-by: Alexander Graf <agraf@suse.de>
>
> ---
>
> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
> systems. If you want to use it with AArch64 guests, please apply the following
> patch or wait until upstream cleaned up the code properly:
>
> http://csgraf.de/agraf/pci/pci-3.19.patch
> ---
> default-configs/arm-softmmu.mak | 2 +
> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
> 2 files changed, 80 insertions(+), 5 deletions(-)
>
> diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
> index f3513fa..7671ee2 100644
> --- a/default-configs/arm-softmmu.mak
> +++ b/default-configs/arm-softmmu.mak
> @@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
> CONFIG_VERSATILE_PCI=y
> CONFIG_VERSATILE_I2C=y
>
> +CONFIG_PCI_GENERIC=y
> +
> CONFIG_SDHCI=y
> CONFIG_INTEGRATOR_DEBUG=y
>
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index 2353440..b7635ac 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -42,6 +42,7 @@
> #include "exec/address-spaces.h"
> #include "qemu/bitops.h"
> #include "qemu/error-report.h"
> +#include "hw/pci-host/gpex.h"
>
> #define NUM_VIRTIO_TRANSPORTS 32
>
> @@ -69,6 +70,7 @@ enum {
> VIRT_MMIO,
> VIRT_RTC,
> VIRT_FW_CFG,
> + VIRT_PCIE,
> };
>
> typedef struct MemMapEntry {
> @@ -129,13 +131,14 @@ static const MemMapEntry a15memmap[] = {
> [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
> [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
> /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
> - /* 0x10000000 .. 0x40000000 reserved for PCI */
> + [VIRT_PCIE] = { 0x10000000, 0x30000000 },
> [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
> };
>
> static const int a15irqmap[] = {
> [VIRT_UART] = 1,
> [VIRT_RTC] = 2,
> + [VIRT_PCIE] = 3,
> [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
> };
>
> @@ -312,7 +315,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
> }
> }
>
> -static void fdt_add_gic_node(const VirtBoardInfo *vbi)
> +static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
> {
> uint32_t gic_phandle;
>
> @@ -331,9 +334,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
> 2, vbi->memmap[VIRT_GIC_CPU].base,
> 2, vbi->memmap[VIRT_GIC_CPU].size);
> qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
> +
> + return gic_phandle;
> }
>
> -static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> +static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> {
> /* We create a standalone GIC v2 */
> DeviceState *gicdev;
> @@ -380,7 +385,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
> pic[i] = qdev_get_gpio_in(gicdev, i);
> }
>
> - fdt_add_gic_node(vbi);
> + return fdt_add_gic_node(vbi);
> }
>
> static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
> @@ -556,6 +561,71 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
> g_free(nodename);
> }
>
> +static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
> + uint32_t gic_phandle)
> +{
> + hwaddr base = vbi->memmap[VIRT_PCIE].base;
> + hwaddr size = vbi->memmap[VIRT_PCIE].size;
> + hwaddr size_ioport = 64 * 1024;
> + hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN;
> + hwaddr size_mmio = size - size_ecam - size_ioport;
> + hwaddr base_mmio = base;
> + hwaddr base_ioport = base_mmio + size_mmio;
> + hwaddr base_ecam = base_ioport + size_ioport;
> + int irq = vbi->irqmap[VIRT_PCIE];
> + MemoryRegion *mmio_alias;
> + MemoryRegion *mmio_reg;
> + DeviceState *dev;
> + char *nodename;
> +
> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
> +
> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
> + qdev_init_nofail(dev);
> +
> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
> +
> + /* Map the MMIO window at the same spot in bus and cpu layouts */
> + mmio_alias = g_new0(MemoryRegion, 1);
> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
> + mmio_reg, base_mmio, size_mmio);
Is it safe to have both mmio_alias and mmio_reg of size_mmio bytes?
Shouldn't be the container region at least (offset + size - 1) big?
Thank you,
alvise
> + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
> +
> + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
> +
> + nodename = g_strdup_printf("/pcie@%" PRIx64, base);
> + qemu_fdt_add_subnode(vbi->fdt, nodename);
> + qemu_fdt_setprop_string(vbi->fdt, nodename,
> + "compatible", "pci-host-ecam-generic");
> + qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0, 1);
> +
> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
> + 2, base_ecam, 2, size_ecam);
> + qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
> + 1, 0x01000000, 2, 0,
> + 2, base_ioport, 2, size_ioport,
> +
> + 1, 0x02000000, 2, base_mmio,
> + 2, base_mmio, 2, size_mmio);
> +
> + qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map",
> + 0, 0, 0, /* device */
> + 0, /* PCI irq */
> + gic_phandle, GIC_FDT_IRQ_TYPE_SPI, irq,
> + GIC_FDT_IRQ_FLAGS_LEVEL_HI /* system irq */);
> + qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
> + 0, 0, 0, /* device */
> + 0 /* PCI irq */);
> +
> + g_free(nodename);
> +}
> +
> static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
> {
> const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
> @@ -573,6 +643,7 @@ static void machvirt_init(MachineState *machine)
> MemoryRegion *ram = g_new(MemoryRegion, 1);
> const char *cpu_model = machine->cpu_model;
> VirtBoardInfo *vbi;
> + uint32_t gic_phandle;
>
> if (!cpu_model) {
> cpu_model = "cortex-a15";
> @@ -634,12 +705,14 @@ static void machvirt_init(MachineState *machine)
>
> create_flash(vbi);
>
> - create_gic(vbi, pic);
> + gic_phandle = create_gic(vbi, pic);
>
> create_uart(vbi, pic);
>
> create_rtc(vbi, pic);
>
> + create_pcie(vbi, pic, gic_phandle);
> +
> /* Create mmio transports, so the user can create virtio backends
> * (which will be automatically plugged in to the transports). If
> * no backend is created the transport will just sit harmlessly idle.
> --
> 1.7.12.4
>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [Qemu-devel] [PATCH 3/4] arm: Add PCIe host bridge in virt machine
2015-01-12 16:49 ` alvise rigo
@ 2015-01-12 16:57 ` Alexander Graf
0 siblings, 0 replies; 44+ messages in thread
From: Alexander Graf @ 2015-01-12 16:57 UTC (permalink / raw)
To: alvise rigo
Cc: Peter Maydell, Ard Biesheuvel, Rob Herring, Michael S. Tsirkin,
Claudio Fontana, stuart.yoder@freescale.com, QEMU Developers
On 12.01.15 17:49, alvise rigo wrote:
> Hi Alexander,
>
> Just a comment below.
>
> On Tue, Jan 6, 2015 at 5:03 PM, Alexander Graf <agraf@suse.de> wrote:
>> Now that we have a working "generic" PCIe host bridge driver, we can plug
>> it into ARMs virt machine to always have PCIe available to normal ARM VMs.
>>
>> I've successfully managed to expose a Bochs VGA device, XHCI and an e1000
>> into an AArch64 VM with this and they all lived happily ever after.
>>
>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>
>> ---
>>
>> Linux 3.19 only supports the generic PCIe host bridge driver for 32bit ARM
>> systems. If you want to use it with AArch64 guests, please apply the following
>> patch or wait until upstream cleaned up the code properly:
>>
>> http://csgraf.de/agraf/pci/pci-3.19.patch
>> ---
>> default-configs/arm-softmmu.mak | 2 +
>> hw/arm/virt.c | 83 ++++++++++++++++++++++++++++++++++++++---
>> 2 files changed, 80 insertions(+), 5 deletions(-)
>>
>> diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
>> index f3513fa..7671ee2 100644
>> --- a/default-configs/arm-softmmu.mak
>> +++ b/default-configs/arm-softmmu.mak
>> @@ -82,6 +82,8 @@ CONFIG_ZYNQ=y
>> CONFIG_VERSATILE_PCI=y
>> CONFIG_VERSATILE_I2C=y
>>
>> +CONFIG_PCI_GENERIC=y
>> +
>> CONFIG_SDHCI=y
>> CONFIG_INTEGRATOR_DEBUG=y
>>
>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>> index 2353440..b7635ac 100644
>> --- a/hw/arm/virt.c
>> +++ b/hw/arm/virt.c
>> @@ -42,6 +42,7 @@
>> #include "exec/address-spaces.h"
>> #include "qemu/bitops.h"
>> #include "qemu/error-report.h"
>> +#include "hw/pci-host/gpex.h"
>>
>> #define NUM_VIRTIO_TRANSPORTS 32
>>
>> @@ -69,6 +70,7 @@ enum {
>> VIRT_MMIO,
>> VIRT_RTC,
>> VIRT_FW_CFG,
>> + VIRT_PCIE,
>> };
>>
>> typedef struct MemMapEntry {
>> @@ -129,13 +131,14 @@ static const MemMapEntry a15memmap[] = {
>> [VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
>> [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
>> /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
>> - /* 0x10000000 .. 0x40000000 reserved for PCI */
>> + [VIRT_PCIE] = { 0x10000000, 0x30000000 },
>> [VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
>> };
>>
>> static const int a15irqmap[] = {
>> [VIRT_UART] = 1,
>> [VIRT_RTC] = 2,
>> + [VIRT_PCIE] = 3,
>> [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
>> };
>>
>> @@ -312,7 +315,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
>> }
>> }
>>
>> -static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>> +static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
>> {
>> uint32_t gic_phandle;
>>
>> @@ -331,9 +334,11 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
>> 2, vbi->memmap[VIRT_GIC_CPU].base,
>> 2, vbi->memmap[VIRT_GIC_CPU].size);
>> qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
>> +
>> + return gic_phandle;
>> }
>>
>> -static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>> +static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>> {
>> /* We create a standalone GIC v2 */
>> DeviceState *gicdev;
>> @@ -380,7 +385,7 @@ static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
>> pic[i] = qdev_get_gpio_in(gicdev, i);
>> }
>>
>> - fdt_add_gic_node(vbi);
>> + return fdt_add_gic_node(vbi);
>> }
>>
>> static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
>> @@ -556,6 +561,71 @@ static void create_fw_cfg(const VirtBoardInfo *vbi)
>> g_free(nodename);
>> }
>>
>> +static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
>> + uint32_t gic_phandle)
>> +{
>> + hwaddr base = vbi->memmap[VIRT_PCIE].base;
>> + hwaddr size = vbi->memmap[VIRT_PCIE].size;
>> + hwaddr size_ioport = 64 * 1024;
>> + hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN;
>> + hwaddr size_mmio = size - size_ecam - size_ioport;
>> + hwaddr base_mmio = base;
>> + hwaddr base_ioport = base_mmio + size_mmio;
>> + hwaddr base_ecam = base_ioport + size_ioport;
>> + int irq = vbi->irqmap[VIRT_PCIE];
>> + MemoryRegion *mmio_alias;
>> + MemoryRegion *mmio_reg;
>> + DeviceState *dev;
>> + char *nodename;
>> +
>> + dev = qdev_create(NULL, TYPE_GPEX_HOST);
>> +
>> + qdev_prop_set_uint64(dev, "mmio_window_size", size_mmio);
>> + qdev_init_nofail(dev);
>> +
>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base_ecam);
>> + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
>> +
>> + /* Map the MMIO window at the same spot in bus and cpu layouts */
>> + mmio_alias = g_new0(MemoryRegion, 1);
>> + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
>> + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
>> + mmio_reg, base_mmio, size_mmio);
>
> Is it safe to have both mmio_alias and mmio_reg of size_mmio bytes?
> Shouldn't be the container region at least (offset + size - 1) big?
You're right. The bridge's memory region shouldn't have any size
limitation, it should just be a flat 64bit memory region that the device
creator than maps aliases into its own address space from.
Alex
^ permalink raw reply [flat|nested] 44+ messages in thread