qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Wei Wang <wei.w.wang@intel.com>
To: marcandre.lureau@gmail.com, mst@redhat.com, stefanha@redhat.com,
	pbonzini@redhat.com, qemu-devel@nongnu.org,
	virtio-dev@lists.oasis-open.org
Cc: Wei Wang <wei.w.wang@intel.com>
Subject: [Qemu-devel] [PATCH v1 15/37] vhost-pci-slave/msg: VHOST_USER_SET_MEM_TABLE
Date: Sat, 17 Dec 2016 18:43:25 +0800	[thread overview]
Message-ID: <1481971427-11094-16-git-send-email-wei.w.wang@intel.com> (raw)
In-Reply-To: <1481971427-11094-1-git-send-email-wei.w.wang@intel.com>

Map the peer memory in QEMU, and prepare the memory for the guest using
MemoryRegion. The controlq message of the memory info is constructed
here, and it will be sent to the guest when the guest controlq is ready. With the
the peer memory info reveived in the message, the guest will be able to
translate any peer guest physical address to its own guest physical
address.

Also add a cleanup function to free the related memory that has been
set up.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
 hw/virtio/vhost-pci-slave.c                    | 91 +++++++++++++++++++++++++-
 include/hw/virtio/vhost-pci-slave.h            |  9 +++
 include/standard-headers/linux/vhost_pci_net.h | 11 ++++
 3 files changed, 110 insertions(+), 1 deletion(-)

diff --git a/hw/virtio/vhost-pci-slave.c b/hw/virtio/vhost-pci-slave.c
index 9b854b1..5170ab5 100644
--- a/hw/virtio/vhost-pci-slave.c
+++ b/hw/virtio/vhost-pci-slave.c
@@ -26,6 +26,20 @@
 
 VhostPCISlave *vp_slave;
 
+static void vp_slave_cleanup(void)
+{
+    int ret;
+    uint32_t i, nregions;
+
+    nregions = vp_slave->pmem_msg.nregions;
+    for (i = 0; i < nregions; i++) {
+        ret = munmap(vp_slave->mr_map_base[i], vp_slave->mr_map_size[i]);
+        if (ret < 0)
+            error_report("cleanup: failed to unmap mr");
+        memory_region_del_subregion(vp_slave->bar_mr, vp_slave->sub_mr+i);
+    }
+}
+
 static int vp_slave_write(CharBackend *chr_be, VhostUserMsg *msg)
 {
     int size;
@@ -107,6 +121,72 @@ static int vp_slave_get_queue_num(CharBackend *chr_be, VhostUserMsg *msg)
     return vp_slave_write(chr_be, msg);
 }
 
+static uint64_t vp_slave_peer_mem_size_get(VhostUserMemory *pmem)
+{
+    int i;
+    uint64_t total_size = 0;
+    uint32_t nregions = pmem->nregions;
+    VhostUserMemoryRegion *pmem_regions = pmem->regions;
+
+    for (i = 0; i < nregions; i++) {
+        total_size += pmem_regions[i].memory_size;
+    }
+
+    return total_size;
+}
+
+static int vp_slave_set_mem_table(VhostUserMsg *msg, int *fds, int fd_num)
+{
+    VhostUserMemory *pmem = &msg->payload.memory;
+    VhostUserMemoryRegion *pmem_region = pmem->regions;
+    uint32_t i, nregions = pmem->nregions;
+    struct peer_mem_msg *pmem_msg = &vp_slave->pmem_msg;
+    pmem_msg->nregions = nregions;
+    MemoryRegion *bar_mr, *sub_mr;
+    uint64_t bar_size, bar_map_offset = 0;
+    void *mr_qva;
+
+    /* Sanity Check */
+    if (fd_num != nregions)
+        error_report("SET_MEM_TABLE: fd num doesn't match region num");
+
+    if (vp_slave->bar_mr == NULL)
+        vp_slave->bar_mr = g_malloc(sizeof(MemoryRegion));
+    if (vp_slave->sub_mr == NULL)
+        vp_slave->sub_mr = g_malloc(nregions * sizeof(MemoryRegion));
+    bar_mr = vp_slave->bar_mr;
+    sub_mr = vp_slave->sub_mr;
+
+    /*
+     * The top half of the bar area holds the peer memory, and the bottom
+     * half is reserved for memory hotplug
+     */
+    bar_size = 2 * vp_slave_peer_mem_size_get(pmem);
+    bar_size = pow2ceil(bar_size);
+    memory_region_init(bar_mr, NULL, "Peer Memory", bar_size);
+    for (i = 0; i < nregions; i++) {
+        vp_slave->mr_map_size[i] = pmem_region[i].memory_size
+                                       + pmem_region[i].mmap_offset;
+        vp_slave->mr_map_base[i] = mmap(NULL, vp_slave->mr_map_size[i],
+                      PROT_READ | PROT_WRITE, MAP_SHARED, fds[i], 0);
+        if (vp_slave->mr_map_base[i] == MAP_FAILED) {
+            error_report("SET_MEM_TABLE: map peer memory region %d failed", i);
+            return -1;
+        }
+
+        mr_qva = vp_slave->mr_map_base[i] + pmem_region[i].mmap_offset;
+        memory_region_init_ram_ptr(&sub_mr[i], NULL, "Peer Memory",
+                                   pmem_region[i].memory_size, mr_qva);
+        memory_region_add_subregion(bar_mr, bar_map_offset, &sub_mr[i]);
+        bar_map_offset += pmem_region[i].memory_size;
+        pmem_msg->regions[i].gpa = pmem_region[i].guest_phys_addr;
+        pmem_msg->regions[i].size = pmem_region[i].memory_size;
+    }
+    vp_slave->bar_map_offset = bar_map_offset;
+
+    return 0;
+}
+
 static int vp_slave_can_read(void *opaque)
 {
     return VHOST_USER_HDR_SIZE;
@@ -114,7 +194,7 @@ static int vp_slave_can_read(void *opaque)
 
 static void vp_slave_read(void *opaque, const uint8_t *buf, int size)
 {
-    int ret;
+    int ret, fd_num, fds[MAX_GUEST_REGION];
     VhostUserMsg msg;
     uint8_t *p = (uint8_t *) &msg;
     CharBackend *chr_be = (CharBackend *)opaque;
@@ -165,6 +245,10 @@ static void vp_slave_read(void *opaque, const uint8_t *buf, int size)
         break;
     case VHOST_USER_SET_OWNER:
         break;
+    case VHOST_USER_SET_MEM_TABLE:
+        fd_num = qemu_chr_fe_get_msgfds(chr_be, fds, sizeof(fds) / sizeof(int));
+        vp_slave_set_mem_table(&msg, fds, fd_num);
+        break;
     default:
         error_report("vhost-pci-slave does not support msg request = %d",
                      msg.request);
@@ -198,6 +282,8 @@ int vhost_pci_slave_init(QemuOpts *opts)
         return -1;
     }
     vp_slave->feature_bits =  1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
+    vp_slave->bar_mr = NULL;
+    vp_slave->sub_mr = NULL;
     qemu_chr_fe_init(&vp_slave->chr_be, chr, &error_abort);
     qemu_chr_fe_set_handlers(&vp_slave->chr_be, vp_slave_can_read,
                              vp_slave_read, vp_slave_event,
@@ -208,7 +294,10 @@ int vhost_pci_slave_init(QemuOpts *opts)
 
 int vhost_pci_slave_cleanup(void)
 {
+    vp_slave_cleanup();
     qemu_chr_fe_deinit(&vp_slave->chr_be);
+    g_free(vp_slave->sub_mr);
+    g_free(vp_slave->bar_mr);
     g_free(vp_slave);
 
     return 0;
diff --git a/include/hw/virtio/vhost-pci-slave.h b/include/hw/virtio/vhost-pci-slave.h
index 8b162dc..03e23eb 100644
--- a/include/hw/virtio/vhost-pci-slave.h
+++ b/include/hw/virtio/vhost-pci-slave.h
@@ -2,11 +2,20 @@
 #define QEMU_VHOST_PCI_SLAVE_H
 
 #include "sysemu/char.h"
+#include "exec/memory.h"
+#include "standard-headers/linux/vhost_pci_net.h"
 
 typedef struct VhostPCISlave {
     CharBackend chr_be;
     uint16_t dev_type;
     uint64_t feature_bits;
+    /* hotplugged memory should be mapped following the offset */
+    uint64_t bar_map_offset;
+    MemoryRegion *bar_mr;
+    MemoryRegion *sub_mr;
+    void *mr_map_base[MAX_GUEST_REGION];
+    uint64_t mr_map_size[MAX_GUEST_REGION];
+    struct peer_mem_msg pmem_msg;
 } VhostPCISlave;
 
 extern VhostPCISlave *vp_slave;
diff --git a/include/standard-headers/linux/vhost_pci_net.h b/include/standard-headers/linux/vhost_pci_net.h
index bac293f..f4c8d0b 100644
--- a/include/standard-headers/linux/vhost_pci_net.h
+++ b/include/standard-headers/linux/vhost_pci_net.h
@@ -29,6 +29,17 @@
 
 #include "standard-headers/linux/virtio_ids.h"
 
+struct pmem_region_msg {
+	uint64_t gpa;
+	uint64_t size;
+};
+
+#define MAX_GUEST_REGION 8
+struct peer_mem_msg {
+	uint32_t nregions;
+	struct pmem_region_msg regions[MAX_GUEST_REGION];
+};
+
 #define VPNET_S_LINK_UP	1	/* Link is up */
 
 struct vhost_pci_net_config {
-- 
2.7.4

  parent reply	other threads:[~2016-12-17 10:45 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-17 10:43 [Qemu-devel] [PATCH v1 00/37] Implementation of vhost-pci for inter-vm commucation Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 01/37] vhost-pci-net: the fundamental vhost-pci-net device emulation Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 02/37] vhost-pci-net: the fundamental implementation of vhost-pci-net-pci Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 03/37] vhost-user: share the vhost-user protocol related structures Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 04/37] vl: add the vhost-pci-slave command line option Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 05/37] vhost-pci-slave: start the implementation of vhost-pci-slave Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 06/37] vhost-pci-slave: set up the fundamental handlers for the server socket Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 07/37] vhost-pci-slave/msg: VHOST_USER_GET_FEATURES Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 08/37] vhost-pci-slave/msg: VHOST_USER_SET_FEATURES Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 09/37] vhost-pci-slave/msg: VHOST_USER_GET_PROTOCOL_FEATURES Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 10/37] vhost-pci-slave/msg: VHOST_USER_SET_PROTOCOL_FEATURES Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 11/37] vhost-user/msg: VHOST_USER_PROTOCOL_F_SET_DEVICE_ID Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 12/37] vhost-pci-slave/msg: VHOST_USER_SET_DEVICE_ID Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 13/37] vhost-pci-slave/msg: VHOST_USER_GET_QUEUE_NUM Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 14/37] vhost-pci-slave/msg: VHOST_USER_SET_OWNER Wei Wang
2016-12-17 10:43 ` Wei Wang [this message]
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 16/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_NUM Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 17/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_BASE Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 18/37] vhost-user: send guest physical address of virtqueues to the slave Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 19/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_ADDR Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 20/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_KICK Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 21/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_CALL Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 22/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_ENABLE Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 23/37] vhost-pci-slave/msg: VHOST_USER_SET_LOG_BASE Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 24/37] vhost-pci-slave/msg: VHOST_USER_SET_LOG_FD Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 25/37] vhost-pci-slave/msg: VHOST_USER_SEND_RARP Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 26/37] vhost-pci-slave/msg: VHOST_USER_GET_VRING_BASE Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 27/37] vhost-pci-net: pass the info collected by vp_slave to the device Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 28/37] vhost-pci-net: pass the mem and vring info to the driver Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 29/37] vhost-pci-slave/msg: VHOST_USER_SET_VHOST_PCI (start) Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 30/37] vhost-pci-slave/msg: VHOST_USER_SET_VHOST_PCI (stop) Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 31/37] vhost-user/msg: send VHOST_USER_SET_VHOST_PCI (start/stop) Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 32/37] vhost-user: add asynchronous read for the vhost-user master Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 33/37] vhost-pci-net: send the negotiated feature bits to the master Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 34/37] vhost-pci-slave: add "peer_reset" Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 35/37] vhost-pci-net: start the vhost-pci-net device Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 36/37] vhost-user/msg: handling VHOST_USER_SET_FEATURES Wei Wang
2016-12-17 10:43 ` [Qemu-devel] [PATCH v1 37/37] vl: enable vhost-pci-slave Wei Wang
2016-12-17 11:38 ` [Qemu-devel] [PATCH v1 00/37] Implementation of vhost-pci for inter-vm commucation no-reply
2016-12-17 11:55 ` no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1481971427-11094-16-git-send-email-wei.w.wang@intel.com \
    --to=wei.w.wang@intel.com \
    --cc=marcandre.lureau@gmail.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=virtio-dev@lists.oasis-open.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).