From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Jan Beulich <jbeulich@suse.com>,
boris.ostrovsky@oracle.com,
Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH v2 24/30] x86/vmsi: add MSI emulation for hardware domain
Date: Tue, 27 Sep 2016 17:57:19 +0200 [thread overview]
Message-ID: <1474991845-27962-25-git-send-email-roger.pau@citrix.com> (raw)
In-Reply-To: <1474991845-27962-1-git-send-email-roger.pau@citrix.com>
Import the MSI handlers from QEMU into Xen. This allows Xen to detect
accesses to the MSI registers and correctly setup PIRQs for physical devices
that are then bound to the hardware domain.
The current logic only allows the usage of a single MSI interrupt per
device, so the maximum queue size announced by the device is unconditionally
set to 0 (1 vector only).
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/io.c | 59 +++++
xen/arch/x86/hvm/vmsi.c | 538 +++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/hvm/io.h | 28 +++
xen/include/asm-x86/msi.h | 32 +++
xen/include/xen/hvm/irq.h | 1 +
xen/include/xen/pci_regs.h | 4 +
6 files changed, 662 insertions(+)
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 4db0266..779babb 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -864,6 +864,7 @@ static int hvm_pt_add_register(struct hvm_pt_device *dev,
static struct hvm_pt_handler_init *hwdom_pt_handlers[] = {
&hvm_pt_bar_init,
&hvm_pt_vf_bar_init,
+ &hvm_pt_msi_init,
};
int hwdom_add_device(struct pci_dev *pdev)
@@ -931,6 +932,64 @@ int hwdom_add_device(struct pci_dev *pdev)
return 0;
}
+/* Generic handlers for HVM PCI pass-through. */
+int hvm_pt_common_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset, uint32_t *data)
+{
+ *data = handler->init_val;
+ return 0;
+}
+
+int hvm_pt_word_reg_read(struct hvm_pt_device *s, struct hvm_pt_reg *reg,
+ uint16_t *value, uint16_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint16_t valid_emu_mask = 0;
+ uint16_t *data = ®->val.word;
+
+ /* emulate word register */
+ valid_emu_mask = handler->emu_mask & valid_mask;
+ *value = HVM_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
+
+ return 0;
+}
+
+int hvm_pt_long_reg_read(struct hvm_pt_device *s, struct hvm_pt_reg *reg,
+ uint32_t *value, uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t valid_emu_mask = 0;
+ uint32_t *data = ®->val.dword;
+
+ /* emulate long register */
+ valid_emu_mask = handler->emu_mask & valid_mask;
+ *value = HVM_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
+
+ return 0;
+}
+
+int hvm_pt_long_reg_write(struct hvm_pt_device *s, struct hvm_pt_reg *reg,
+ uint32_t *val, uint32_t dev_value,
+ uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t writable_mask = 0;
+ uint32_t throughable_mask = hvm_pt_get_throughable_mask(s, handler,
+ valid_mask);
+ uint32_t *data = ®->val.dword;
+
+ /* modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value & ~handler->rw1c_mask,
+ throughable_mask);
+
+ return 0;
+}
+
static const struct hvm_io_ops dpci_portio_ops = {
.accept = dpci_portio_accept,
.read = dpci_portio_read,
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index d81c5d4..75ba429 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -624,3 +624,541 @@ void msix_write_completion(struct vcpu *v)
if ( msixtbl_write(v, ctrl_address, 4, 0) != X86EMUL_OKAY )
gdprintk(XENLOG_WARNING, "MSI-X write completion failure\n");
}
+
+/* MSI emulation. */
+
+/* Helper to check supported MSI features. */
+#define vmsi_check_type(offset, flags, what) \
+ ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
+ PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
+
+static inline uint64_t msi_addr64(struct hvm_pt_msi *msi)
+{
+ return (uint64_t)msi->addr_hi << 32 | msi->addr_lo;
+}
+
+/* Helper for updating a PIRQ-vMSI bind. */
+static int vmsi_update_bind(struct hvm_pt_msi *msi)
+{
+ xen_domctl_bind_pt_irq_t bind;
+ struct hvm_pt_device *s = container_of(msi, struct hvm_pt_device, msi);
+ int rc;
+
+ ASSERT(msi->pirq != -1);
+
+ bind.hvm_domid = DOMID_SELF;
+ bind.machine_irq = msi->pirq;
+ bind.irq_type = PT_IRQ_TYPE_MSI;
+ bind.u.msi.gvec = msi_vector(msi->data);
+ bind.u.msi.gflags = msi_gflags(msi->data, msi_addr64(msi));
+ bind.u.msi.gtable = 0;
+
+ pcidevs_lock();
+ rc = pt_irq_create_bind(current->domain, &bind);
+ pcidevs_unlock();
+ if ( rc )
+ {
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "updating of MSI failed. (err: %d)\n", rc);
+ rc = physdev_unmap_pirq(DOMID_SELF, msi->pirq);
+ if ( rc )
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "unmapping of MSI pirq %d failed. (err: %i)\n",
+ msi->pirq, rc);
+ msi->pirq = -1;
+ msi->mapped = false;
+ msi->initialized = false;
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Handlers. */
+
+/* Message Control register */
+static int vmsi_msgctrl_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset, uint32_t *data)
+{
+ struct hvm_pt_msi *msi = &s->msi;
+ struct pci_dev *pdev = s->pdev;
+ uint16_t reg_field;
+ uint8_t seg, bus, slot, func;
+
+ seg = pdev->seg;
+ bus = pdev->bus;
+ slot = PCI_SLOT(pdev->devfn);
+ func = PCI_FUNC(pdev->devfn);
+
+ /* Use I/O device register's value as initial value */
+ reg_field = pci_conf_read16(seg, bus, slot, func, real_offset);
+ if ( reg_field & PCI_MSI_FLAGS_ENABLE )
+ {
+ printk_pdev(pdev, XENLOG_INFO,
+ "MSI already enabled, disabling it first\n");
+ reg_field &= ~PCI_MSI_FLAGS_ENABLE;
+ pci_conf_write16(seg, bus, slot, func, real_offset, reg_field);
+ }
+ msi->flags |= reg_field;
+ msi->ctrl_offset = real_offset;
+ msi->initialized = false;
+ msi->mapped = false;
+
+ *data = handler->init_val | (reg_field & ~PCI_MSI_FLAGS_QMASK);
+ return 0;
+}
+
+static int vmsi_msgctrl_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ struct hvm_pt_msi *msi = &s->msi;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = hvm_pt_get_throughable_mask(s, handler,
+ valid_mask);
+ uint16_t *data = ®->val.word;
+ int rc;
+
+ /* Currently no support for multi-vector */
+ if ( *val & PCI_MSI_FLAGS_QSIZE )
+ printk_pdev(s->pdev, XENLOG_WARNING,
+ "tries to set more than 1 vector ctrl %x\n", *val);
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ /* update MSI */
+ if ( *val & PCI_MSI_FLAGS_ENABLE )
+ {
+ /* Setup MSI pirq for the first time */
+ if ( !msi->initialized )
+ {
+ struct msi_info msi_info;
+ int index = -1;
+
+ /* Init physical one */
+ printk_pdev(s->pdev, XENLOG_DEBUG, "setup MSI (register: %x).\n",
+ *val);
+
+ memset(&msi_info, 0, sizeof(msi_info));
+ msi_info.seg = s->pdev->seg;
+ msi_info.bus = s->pdev->bus;
+ msi_info.devfn = s->pdev->devfn;
+
+ rc = physdev_map_pirq(DOMID_SELF, MAP_PIRQ_TYPE_MSI, &index,
+ &msi->pirq, &msi_info);
+ if ( rc )
+ {
+ /*
+ * Do not broadcast this error, since there's nothing else
+ * that can be done (MSI setup should have been successful).
+ * Guest MSI would be actually not working.
+ */
+ *val &= ~PCI_MSI_FLAGS_ENABLE;
+
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "can not map MSI (register: %x)!\n", *val);
+ return 0;
+ }
+
+ rc = vmsi_update_bind(msi);
+ if ( rc )
+ {
+ *val &= ~PCI_MSI_FLAGS_ENABLE;
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "can not bind MSI (register: %x)!\n", *val);
+ return 0;
+ }
+ msi->initialized = true;
+ msi->mapped = true;
+ }
+ msi->flags |= PCI_MSI_FLAGS_ENABLE;
+ }
+ else if ( msi->mapped )
+ {
+ uint8_t seg, bus, slot, func;
+ uint8_t gvec = msi_vector(msi->data);
+ uint32_t gflags = msi_gflags(msi->data, msi_addr64(msi));
+ uint16_t flags;
+
+ seg = s->pdev->seg;
+ bus = s->pdev->bus;
+ slot = PCI_SLOT(s->pdev->devfn);
+ func = PCI_FUNC(s->pdev->devfn);
+
+ flags = pci_conf_read16(seg, bus, slot, func, s->msi.ctrl_offset);
+ pci_conf_write16(seg, bus, slot, func, s->msi.ctrl_offset,
+ flags & ~PCI_MSI_FLAGS_ENABLE);
+
+ if ( msi->pirq == -1 )
+ return 0;
+
+ if ( msi->initialized )
+ {
+ xen_domctl_bind_pt_irq_t bind;
+
+ printk_pdev(s->pdev, XENLOG_DEBUG,
+ "Unbind MSI with pirq %d, gvec %#x\n", msi->pirq,
+ gvec);
+
+ bind.hvm_domid = DOMID_SELF;
+ bind.irq_type = PT_IRQ_TYPE_MSI;
+ bind.machine_irq = msi->pirq;
+ bind.u.msi.gvec = gvec;
+ bind.u.msi.gflags = gflags;
+ bind.u.msi.gtable = 0;
+
+ pcidevs_lock();
+ rc = pt_irq_destroy_bind(current->domain, &bind);
+ pcidevs_unlock();
+ if ( rc )
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "can not unbind MSI (register: %x)!\n", *val);
+
+ rc = physdev_unmap_pirq(DOMID_SELF, msi->pirq);
+ if ( rc )
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "unmapping of MSI pirq %d failed. (err: %i)\n",
+ msi->pirq, rc);
+ msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
+ msi->initialized = false;
+ msi->mapped = false;
+ msi->pirq = -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize Message Upper Address register */
+static int vmsi_msgaddr64_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ /* No need to initialize in case of 32 bit type */
+ if ( !(s->msi.flags & PCI_MSI_FLAGS_64BIT) )
+ *data = HVM_PT_INVALID_REG;
+ else
+ *data = handler->init_val;
+
+ return 0;
+}
+
+/* Write Message Address register */
+static int vmsi_msgaddr32_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint32_t *val,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t writable_mask = 0;
+ uint32_t old_addr = reg->val.dword;
+ uint32_t *data = ®->val.dword;
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ s->msi.addr_lo = *data;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, 0);
+
+ /* Update MSI */
+ if ( *data != old_addr && s->msi.mapped )
+ vmsi_update_bind(&s->msi);
+
+ return 0;
+}
+
+/* Write Message Upper Address register */
+static int vmsi_msgaddr64_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint32_t *val,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ uint32_t writable_mask = 0;
+ uint32_t old_addr = reg->val.dword;
+ uint32_t *data = ®->val.dword;
+
+ /* Check whether the type is 64 bit or not */
+ if ( !(s->msi.flags & PCI_MSI_FLAGS_64BIT) )
+ {
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "Can't write to the upper address without 64 bit support\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ /* update the msi_info too */
+ s->msi.addr_hi = *data;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, 0);
+
+ /* Update MSI */
+ if ( *data != old_addr && s->msi.mapped )
+ vmsi_update_bind(&s->msi);
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Initialize Message Data register
+ */
+static int vmsi_msgdata_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi.flags;
+ uint32_t offset = handler->offset;
+
+ /* Check the offset whether matches the type or not */
+ if ( vmsi_check_type(offset, flags, DATA) )
+ *data = handler->init_val;
+ else
+ *data = HVM_PT_INVALID_REG;
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Write Message Data register
+ */
+static int vmsi_msgdata_reg_write(struct hvm_pt_device *s,
+ struct hvm_pt_reg *reg, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ struct hvm_pt_reg_handler *handler = reg->handler;
+ struct hvm_pt_msi *msi = &s->msi;
+ uint16_t writable_mask = 0;
+ uint16_t old_data = reg->val.word;
+ uint32_t offset = handler->offset;
+ uint16_t *data = ®->val.word;
+
+ /* Check the offset whether matches the type or not */
+ if ( !vmsi_check_type(offset, msi->flags, DATA) )
+ {
+ /* Exit I/O emulator */
+ printk_pdev(s->pdev, XENLOG_ERR,
+ "the offset does not match the 32/64 bit type!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Modify emulate register */
+ writable_mask = handler->emu_mask & ~handler->ro_mask & valid_mask;
+ *data = HVM_PT_MERGE_VALUE(*val, *data, writable_mask);
+ /* Update the msi_info too */
+ msi->data = *data;
+
+ /* Create value for writing to I/O device register */
+ *val = HVM_PT_MERGE_VALUE(*val, dev_value, 0);
+
+ /* Update MSI */
+ if ( *data != old_data && msi->mapped )
+ vmsi_update_bind(msi);
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Initialize Mask register
+ */
+static int vmsi_mask_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi.flags;
+
+ /* Check the offset whether matches the type or not */
+ if ( !(flags & PCI_MSI_FLAGS_MASKBIT) )
+ *data = HVM_PT_INVALID_REG;
+ else if ( vmsi_check_type(handler->offset, flags, MASK) )
+ *data = handler->init_val;
+ else
+ *data = HVM_PT_INVALID_REG;
+
+ return 0;
+}
+
+/*
+ * This function is shared between 32 and 64 bits MSI implementations
+ * Initialize Pending register
+ */
+static int vmsi_pending_reg_init(struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi.flags;
+
+ /* check the offset whether matches the type or not */
+ if ( !(flags & PCI_MSI_FLAGS_MASKBIT) )
+ *data = HVM_PT_INVALID_REG;
+ else if ( vmsi_check_type(handler->offset, flags, PENDING) )
+ *data = handler->init_val;
+ else
+ *data = HVM_PT_INVALID_REG;
+
+ return 0;
+}
+
+/* MSI Capability Structure reg static information table */
+static struct hvm_pt_reg_handler vmsi_handler[] = {
+ /* Message Control reg */
+ {
+ .offset = PCI_MSI_FLAGS,
+ .size = 2,
+ .init_val = 0x0000,
+ .res_mask = 0xFE00,
+ .ro_mask = 0x018E,
+ .emu_mask = 0x017E,
+ .init = vmsi_msgctrl_reg_init,
+ .u.w.read = hvm_pt_word_reg_read,
+ .u.w.write = vmsi_msgctrl_reg_write,
+ },
+ /* Message Address reg */
+ {
+ .offset = PCI_MSI_ADDRESS_LO,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x00000003,
+ .emu_mask = 0xFFFFFFFF,
+ .init = hvm_pt_common_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = vmsi_msgaddr32_reg_write,
+ },
+ /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
+ {
+ .offset = PCI_MSI_ADDRESS_HI,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x00000000,
+ .emu_mask = 0xFFFFFFFF,
+ .init = vmsi_msgaddr64_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = vmsi_msgaddr64_reg_write,
+ },
+ /* Message Data reg (16 bits of data for 32-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_32,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x0000,
+ .emu_mask = 0xFFFF,
+ .init = vmsi_msgdata_reg_init,
+ .u.w.read = hvm_pt_word_reg_read,
+ .u.w.write = vmsi_msgdata_reg_write,
+ },
+ /* Message Data reg (16 bits of data for 64-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_64,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x0000,
+ .emu_mask = 0xFFFF,
+ .init = vmsi_msgdata_reg_init,
+ .u.w.read = hvm_pt_word_reg_read,
+ .u.w.write = vmsi_msgdata_reg_write,
+ },
+ /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_64, /* PCI_MSI_MASK_32 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0xFFFFFFFF,
+ .init = vmsi_mask_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_BIT, /* PCI_MSI_MASK_64 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0xFFFFFFFF,
+ .init = vmsi_mask_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_64 + 4, /* PCI_MSI_PENDING_32 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0x00000000,
+ .init = vmsi_pending_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
+ {
+ .offset = PCI_MSI_MASK_BIT + 4, /* PCI_MSI_PENDING_64 */
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0xFFFFFFFF,
+ .emu_mask = 0x00000000,
+ .init = vmsi_pending_reg_init,
+ .u.dw.read = hvm_pt_long_reg_read,
+ .u.dw.write = hvm_pt_long_reg_write,
+ },
+ /* End */
+ {
+ .size = 0,
+ },
+};
+
+static int vmsi_group_init(struct hvm_pt_device *dev,
+ struct hvm_pt_reg_group *group)
+{
+ uint8_t seg, bus, slot, func;
+ struct pci_dev *pdev = dev->pdev;
+ int msi_offset;
+ uint8_t msi_size = 0xa;
+ uint16_t flags;
+
+ dev->msi.pirq = -1;
+ seg = pdev->seg;
+ bus = pdev->bus;
+ slot = PCI_SLOT(pdev->devfn);
+ func = PCI_FUNC(pdev->devfn);
+
+ msi_offset = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
+ if ( msi_offset == 0 )
+ return -ENODEV;
+
+ group->base_offset = msi_offset;
+ flags = pci_conf_read16(seg, bus, slot, func,
+ msi_offset + PCI_MSI_FLAGS);
+
+ if ( flags & PCI_MSI_FLAGS_64BIT )
+ msi_size += 4;
+ if ( flags & PCI_MSI_FLAGS_MASKBIT )
+ msi_size += 10;
+
+ dev->msi.flags = flags;
+ group->size = msi_size;
+
+ return 0;
+}
+
+struct hvm_pt_handler_init hvm_pt_msi_init = {
+ .handlers = vmsi_handler,
+ .init = vmsi_group_init,
+};
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index bfd76ff..0f8726a 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -165,6 +165,9 @@ struct hvm_pt_reg_group;
/* Return code when register should be ignored. */
#define HVM_PT_INVALID_REG 0xFFFFFFFF
+#define HVM_PT_MERGE_VALUE(value, data, val_mask) \
+ (((value) & (val_mask)) | ((data) & ~(val_mask)))
+
/* function type for config reg */
typedef int (*hvm_pt_conf_reg_init)
(struct hvm_pt_device *, struct hvm_pt_reg_handler *, uint32_t real_offset,
@@ -350,6 +353,31 @@ struct hvm_pt_device {
/* Helper to add passed-through devices to the hardware domain. */
int hwdom_add_device(struct pci_dev *pdev);
+/* Generic handlers for HVM PCI pass-through. */
+int hvm_pt_long_reg_read(struct hvm_pt_device *, struct hvm_pt_reg *,
+ uint32_t *, uint32_t);
+int hvm_pt_long_reg_write(struct hvm_pt_device *, struct hvm_pt_reg *,
+ uint32_t *, uint32_t, uint32_t);
+int hvm_pt_word_reg_read(struct hvm_pt_device *, struct hvm_pt_reg *,
+ uint16_t *, uint16_t);
+
+int hvm_pt_common_reg_init(struct hvm_pt_device *, struct hvm_pt_reg_handler *,
+ uint32_t real_offset, uint32_t *data);
+
+static inline uint32_t hvm_pt_get_throughable_mask(
+ struct hvm_pt_device *s,
+ struct hvm_pt_reg_handler *handler,
+ uint32_t valid_mask)
+{
+ uint32_t throughable_mask = ~(handler->emu_mask | handler->ro_mask);
+
+ if ( !s->permissive )
+ throughable_mask &= ~handler->res_mask;
+
+ return throughable_mask & valid_mask;
+}
+
+
#endif /* __ASM_X86_HVM_IO_H__ */
diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h
index 9c02945..8c7fb27 100644
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -246,4 +246,36 @@ void ack_nonmaskable_msi_irq(struct irq_desc *);
void end_nonmaskable_msi_irq(struct irq_desc *, u8 vector);
void set_msi_affinity(struct irq_desc *, const cpumask_t *);
+static inline uint8_t msi_vector(uint32_t data)
+{
+ return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+}
+
+static inline uint8_t msi_dest_id(uint32_t addr)
+{
+ return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+}
+
+static inline uint32_t msi_gflags(uint32_t data, uint64_t addr)
+{
+ uint32_t result = 0;
+ int rh, dm, dest_id, deliv_mode, trig_mode;
+
+ rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
+ dm = (addr >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
+ dest_id = msi_dest_id(addr);
+ deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
+ trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+
+ result = dest_id | (rh << GFLAGS_SHIFT_RH)
+ | (dm << GFLAGS_SHIFT_DM)
+ | (deliv_mode << GFLAGS_SHIFT_DELIV_MODE)
+ | (trig_mode << GFLAGS_SHIFT_TRG_MODE);
+
+ return result;
+}
+
+/* MSI HVM pass-through handlers. */
+extern struct hvm_pt_handler_init hvm_pt_msi_init;
+
#endif /* __ASM_MSI_H */
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index 2ffaf35..4d24bf0 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -56,6 +56,7 @@ struct dev_intx_gsi_link {
#define VMSI_TRIG_MODE 0x8000
#define GFLAGS_SHIFT_RH 8
+#define GFLAGS_SHIFT_DM 9
#define GFLAGS_SHIFT_DELIV_MODE 12
#define GFLAGS_SHIFT_TRG_MODE 15
diff --git a/xen/include/xen/pci_regs.h b/xen/include/xen/pci_regs.h
index ecd6124..8db4e0e 100644
--- a/xen/include/xen/pci_regs.h
+++ b/xen/include/xen/pci_regs.h
@@ -296,6 +296,10 @@
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
+#define PCI_MSI_MASK_64 PCI_MSI_MASK_BIT
+#define PCI_MSI_MASK_32 PCI_MSI_DATA_64
+#define PCI_MSI_PENDING_32 PCI_MSI_MASK_BIT
+#define PCI_MSI_PENDING_64 20
/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
#define PCI_MSIX_FLAGS 2
--
2.7.4 (Apple Git-66)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-09-27 15:58 UTC|newest]
Thread overview: 146+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-09-27 15:56 [PATCH v2 00/30] PVHv2 Dom0 Roger Pau Monne
2016-09-27 15:56 ` [PATCH v2 01/30] xen/x86: move setup of the VM86 TSS to the domain builder Roger Pau Monne
2016-09-28 15:35 ` Jan Beulich
2016-09-29 12:57 ` Roger Pau Monne
2016-09-27 15:56 ` [PATCH v2 02/30] xen/x86: remove XENFEAT_hvm_pirqs for PVHv2 guests Roger Pau Monne
2016-09-28 16:03 ` Jan Beulich
2016-09-29 14:17 ` Roger Pau Monne
2016-09-29 16:07 ` Jan Beulich
2016-09-27 15:56 ` [PATCH v2 03/30] xen/x86: fix parameters and return value of *_set_allocation functions Roger Pau Monne
2016-09-28 9:34 ` Tim Deegan
2016-09-29 10:39 ` Jan Beulich
2016-09-29 14:33 ` Roger Pau Monne
2016-09-29 16:09 ` Jan Beulich
2016-09-30 16:48 ` George Dunlap
2016-10-03 8:05 ` Paul Durrant
2016-10-06 11:33 ` Roger Pau Monne
2016-09-27 15:56 ` [PATCH v2 04/30] xen/x86: allow calling {sh/hap}_set_allocation with the idle domain Roger Pau Monne
2016-09-29 10:43 ` Jan Beulich
2016-09-29 14:37 ` Roger Pau Monne
2016-09-29 16:10 ` Jan Beulich
2016-09-30 16:56 ` George Dunlap
2016-09-30 16:56 ` George Dunlap
2016-09-27 15:57 ` [PATCH v2 05/30] xen/x86: assert that local_events_need_delivery is not called by " Roger Pau Monne
2016-09-29 10:45 ` Jan Beulich
2016-09-30 8:32 ` Roger Pau Monne
2016-09-30 8:59 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 06/30] x86/paging: introduce paging_set_allocation Roger Pau Monne
2016-09-29 10:51 ` Jan Beulich
2016-09-29 14:51 ` Roger Pau Monne
2016-09-29 16:12 ` Jan Beulich
2016-09-29 16:57 ` Roger Pau Monne
2016-09-30 17:00 ` George Dunlap
2016-09-27 15:57 ` [PATCH v2 07/30] xen/x86: split the setup of Dom0 permissions to a function Roger Pau Monne
2016-09-29 13:47 ` Jan Beulich
2016-09-29 15:53 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 08/30] xen/x86: do the PCI scan unconditionally Roger Pau Monne
2016-09-29 13:55 ` Jan Beulich
2016-09-29 15:11 ` Roger Pau Monne
2016-09-29 16:14 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 09/30] x86/vtd: fix and simplify mapping RMRR regions Roger Pau Monne
2016-09-29 14:18 ` Jan Beulich
2016-09-30 11:27 ` Roger Pau Monne
2016-09-30 13:21 ` Jan Beulich
2016-09-30 15:02 ` Roger Pau Monne
2016-09-30 15:09 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 10/30] xen/x86: allow the emulated APICs to be enbled for the hardware domain Roger Pau Monne
2016-09-29 14:26 ` Jan Beulich
2016-09-30 15:44 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 11/30] xen/x86: split Dom0 build into PV and PVHv2 Roger Pau Monne
2016-09-30 15:03 ` Jan Beulich
2016-10-03 10:09 ` Roger Pau Monne
2016-10-04 6:54 ` Jan Beulich
2016-10-04 7:09 ` Andrew Cooper
2016-09-27 15:57 ` [PATCH v2 12/30] xen/x86: make print_e820_memory_map global Roger Pau Monne
2016-09-30 15:04 ` Jan Beulich
2016-10-03 16:23 ` Roger Pau Monne
2016-10-04 6:47 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 13/30] xen: introduce a new format specifier to print sizes in human-readable form Roger Pau Monne
2016-09-28 8:24 ` Juergen Gross
2016-09-28 11:56 ` Roger Pau Monne
2016-09-28 12:01 ` Andrew Cooper
2016-10-03 8:36 ` Paul Durrant
2016-10-11 10:27 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 14/30] xen/mm: add a ceil sufix to current page calculation routine Roger Pau Monne
2016-09-30 15:20 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 15/30] xen/x86: populate PVHv2 Dom0 physical memory map Roger Pau Monne
2016-09-30 15:52 ` Jan Beulich
2016-10-04 9:12 ` Roger Pau Monne
2016-10-04 11:16 ` Jan Beulich
2016-10-11 14:01 ` Roger Pau Monne
2016-10-12 11:51 ` Jan Beulich
2016-10-11 14:06 ` Roger Pau Monne
2016-10-12 11:58 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 16/30] xen/x86: parse Dom0 kernel for PVHv2 Roger Pau Monne
2016-10-06 15:14 ` Jan Beulich
2016-10-11 15:02 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 17/30] xen/x86: setup PVHv2 Dom0 CPUs Roger Pau Monne
2016-10-06 15:20 ` Jan Beulich
2016-10-12 11:06 ` Roger Pau Monne
2016-10-12 11:32 ` Andrew Cooper
2016-10-12 12:02 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 18/30] xen/x86: setup PVHv2 Dom0 ACPI tables Roger Pau Monne
2016-10-06 15:40 ` Jan Beulich
2016-10-06 15:48 ` Andrew Cooper
2016-10-12 15:35 ` Roger Pau Monne
2016-10-12 15:55 ` Jan Beulich
2016-10-26 11:35 ` Roger Pau Monne
2016-10-26 14:10 ` Jan Beulich
2016-10-26 15:08 ` Roger Pau Monne
2016-10-26 15:16 ` Jan Beulich
2016-10-26 16:03 ` Roger Pau Monne
2016-10-27 7:25 ` Jan Beulich
2016-10-27 11:08 ` Roger Pau Monne
2016-10-26 17:14 ` Boris Ostrovsky
2016-10-27 7:27 ` Jan Beulich
2016-10-27 11:13 ` Roger Pau Monne
2016-10-27 11:25 ` Jan Beulich
2016-10-27 13:51 ` Boris Ostrovsky
2016-10-27 14:02 ` Jan Beulich
2016-10-27 14:15 ` Boris Ostrovsky
2016-10-27 14:30 ` Jan Beulich
2016-10-27 14:40 ` Boris Ostrovsky
2016-10-27 15:04 ` Roger Pau Monne
2016-10-27 15:20 ` Jan Beulich
2016-10-27 15:37 ` Roger Pau Monne
2016-10-28 13:51 ` Boris Ostrovsky
2016-09-27 15:57 ` [PATCH v2 19/30] xen/dcpi: add a dpci passthrough handler for hardware domain Roger Pau Monne
2016-10-03 9:02 ` Paul Durrant
2016-10-06 14:31 ` Roger Pau Monne
2016-10-06 15:44 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 20/30] xen/x86: add the basic infrastructure to import QEMU passthrough code Roger Pau Monne
2016-10-03 9:54 ` Paul Durrant
2016-10-06 15:08 ` Roger Pau Monne
2016-10-06 15:52 ` Lars Kurth
2016-10-07 9:13 ` Jan Beulich
2016-10-06 15:47 ` Jan Beulich
2016-10-10 12:41 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 21/30] xen/pci: split code to size BARs from pci_add_device Roger Pau Monne
2016-10-06 16:00 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 22/30] xen/x86: support PVHv2 Dom0 BAR remapping Roger Pau Monne
2016-10-03 10:10 ` Paul Durrant
2016-10-06 15:25 ` Roger Pau Monne
2016-09-27 15:57 ` [PATCH v2 23/30] xen/x86: route legacy PCI interrupts to Dom0 Roger Pau Monne
2016-10-10 13:37 ` Jan Beulich
2016-09-27 15:57 ` Roger Pau Monne [this message]
2016-09-27 15:57 ` [PATCH v2 25/30] xen/x86: add all PCI devices to PVHv2 Dom0 Roger Pau Monne
2016-10-10 13:44 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 26/30] xen/x86: add PCIe emulation Roger Pau Monne
2016-10-03 10:46 ` Paul Durrant
2016-10-06 15:53 ` Roger Pau Monne
2016-10-10 13:57 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 27/30] x86/msixtbl: disable MSI-X intercepts for domains without an ioreq server Roger Pau Monne
2016-10-10 14:18 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 28/30] xen/x86: add MSI-X emulation to PVHv2 Dom0 Roger Pau Monne
2016-10-03 10:57 ` Paul Durrant
2016-10-06 15:58 ` Roger Pau Monne
2016-10-10 16:15 ` Jan Beulich
2016-09-27 15:57 ` [PATCH v2 29/30] xen/x86: allow PVHv2 to perform foreign memory mappings Roger Pau Monne
2016-09-30 17:36 ` George Dunlap
2016-10-10 14:21 ` Jan Beulich
2016-10-10 14:27 ` George Dunlap
2016-10-10 14:50 ` Jan Beulich
2016-10-10 14:58 ` George Dunlap
2016-09-27 15:57 ` [PATCH v2 30/30] xen: allow setting the store pfn HVM parameter Roger Pau Monne
2016-10-03 11:01 ` Paul Durrant
2016-09-28 12:22 ` [PATCH v2 00/30] PVHv2 Dom0 Roger Pau Monne
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1474991845-27962-25-git-send-email-roger.pau@citrix.com \
--to=roger.pau@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=boris.ostrovsky@oracle.com \
--cc=jbeulich@suse.com \
--cc=paul.durrant@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).