qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Wei Wang <wei.w.wang@intel.com>
To: marcandre.lureau@gmail.com, mst@redhat.com, stefanha@redhat.com,
	pbonzini@redhat.com, qemu-devel@nongnu.org,
	virtio-dev@lists.oasis-open.org
Cc: Wei Wang <wei.w.wang@intel.com>
Subject: [Qemu-devel] [RESEND Patch v1 18/37] vhost-user: send guest physical address of virtqueues to the slave
Date: Mon, 19 Dec 2016 13:58:53 +0800	[thread overview]
Message-ID: <1482127152-84732-19-git-send-email-wei.w.wang@intel.com> (raw)
In-Reply-To: <1482127152-84732-1-git-send-email-wei.w.wang@intel.com>

In the vhost-pci case, the slave needs the master side guest physical
address, rather than the qemu virtual address.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
 hw/virtio/vhost.c         | 64 +++++++++++++++++++++++++++++++++--------------
 include/hw/virtio/vhost.h |  2 ++
 2 files changed, 47 insertions(+), 19 deletions(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index f7f7023..e945be8 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -26,6 +26,7 @@
 #include "hw/virtio/virtio-bus.h"
 #include "hw/virtio/virtio-access.h"
 #include "migration/migration.h"
+#include "hw/virtio/vhost-user.h"
 
 /* enabled until disconnected backend stabilizes */
 #define _VHOST_DEBUG 1
@@ -858,6 +859,12 @@ static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
     return -errno;
 }
 
+bool vhost_pci_enabled(struct vhost_dev *dev)
+{
+    return ((dev->protocol_features &
+            (1ULL << VHOST_USER_PROTOCOL_F_VHOST_PCI)) != 0);
+}
+
 static int vhost_virtqueue_start(struct vhost_dev *dev,
                                 struct VirtIODevice *vdev,
                                 struct vhost_virtqueue *vq,
@@ -901,26 +908,40 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
         }
     }
 
-    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
-    vq->desc = cpu_physical_memory_map(a, &l, 0);
-    if (!vq->desc || l != s) {
-        r = -ENOMEM;
-        goto fail_alloc_desc;
+    if (vhost_pci_enabled(dev)) {
+        vq->desc = (void *)a;
+    } else {
+        vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
+        vq->desc = cpu_physical_memory_map(a, &l, 0);
+        if (!vq->desc || l != s) {
+            r = -ENOMEM;
+            goto fail_alloc_desc;
+        }
     }
+
     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
-    vq->avail = cpu_physical_memory_map(a, &l, 0);
-    if (!vq->avail || l != s) {
-        r = -ENOMEM;
-        goto fail_alloc_avail;
+    if (vhost_pci_enabled(dev)) {
+        vq->avail = (void *)a;
+    } else {
+        vq->avail = cpu_physical_memory_map(a, &l, 0);
+        if (!vq->avail || l != s) {
+            r = -ENOMEM;
+            goto fail_alloc_avail;
+        }
     }
+
     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
-    vq->used = cpu_physical_memory_map(a, &l, 1);
-    if (!vq->used || l != s) {
-        r = -ENOMEM;
-        goto fail_alloc_used;
+    if (vhost_pci_enabled(dev)) {
+        vq->used = (void *)a;
+    } else {
+        vq->used = cpu_physical_memory_map(a, &l, 1);
+        if (!vq->used || l != s) {
+            r = -ENOMEM;
+            goto fail_alloc_used;
+        }
     }
 
     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
@@ -1003,12 +1024,17 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
                                                 vhost_vq_index);
     }
 
-    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
-                              1, virtio_queue_get_used_size(vdev, idx));
-    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
-                              0, virtio_queue_get_avail_size(vdev, idx));
-    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
-                              0, virtio_queue_get_desc_size(vdev, idx));
+    if (!vhost_pci_enabled(dev)) {
+        cpu_physical_memory_unmap(vq->used,
+                                  virtio_queue_get_used_size(vdev, idx),
+                                  1, virtio_queue_get_used_size(vdev, idx));
+        cpu_physical_memory_unmap(vq->avail,
+                                  virtio_queue_get_avail_size(vdev, idx),
+                                  0, virtio_queue_get_avail_size(vdev, idx));
+        cpu_physical_memory_unmap(vq->desc,
+                                  virtio_queue_get_desc_size(vdev, idx),
+                                  0, virtio_queue_get_desc_size(vdev, idx));
+    }
 }
 
 static void vhost_eventfd_add(MemoryListener *listener,
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 9cf32e2..9955e07 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -92,4 +92,6 @@ bool vhost_has_free_slot(void);
 int vhost_net_set_backend(struct vhost_dev *hdev,
                           struct vhost_vring_file *file);
 
+bool vhost_pci_enabled(struct vhost_dev *dev);
+
 #endif
-- 
2.7.4

  parent reply	other threads:[~2016-12-19  6:00 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-19  5:58 [Qemu-devel] [RESEND Patch v1 00/37] Implementation of vhost-pci for inter-vm commucation Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 01/37] vhost-pci-net: the fundamental vhost-pci-net device emulation Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 02/37] vhost-pci-net: the fundamental implementation of vhost-pci-net-pci Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 03/37] vhost-user: share the vhost-user protocol related structures Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 04/37] vl: add the vhost-pci-slave command line option Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 05/37] vhost-pci-slave: start the implementation of vhost-pci-slave Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 06/37] vhost-pci-slave: set up the fundamental handlers for the server socket Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 07/37] vhost-pci-slave/msg: VHOST_USER_GET_FEATURES Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 08/37] vhost-pci-slave/msg: VHOST_USER_SET_FEATURES Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 09/37] vhost-pci-slave/msg: VHOST_USER_GET_PROTOCOL_FEATURES Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 10/37] vhost-pci-slave/msg: VHOST_USER_SET_PROTOCOL_FEATURES Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 11/37] vhost-user/msg: VHOST_USER_PROTOCOL_F_SET_DEVICE_ID Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 12/37] vhost-pci-slave/msg: VHOST_USER_SET_DEVICE_ID Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 13/37] vhost-pci-slave/msg: VHOST_USER_GET_QUEUE_NUM Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 14/37] vhost-pci-slave/msg: VHOST_USER_SET_OWNER Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 15/37] vhost-pci-slave/msg: VHOST_USER_SET_MEM_TABLE Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 16/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_NUM Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 17/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_BASE Wei Wang
2016-12-19  5:58 ` Wei Wang [this message]
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 19/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_ADDR Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 20/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_KICK Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 21/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_CALL Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 22/37] vhost-pci-slave/msg: VHOST_USER_SET_VRING_ENABLE Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 23/37] vhost-pci-slave/msg: VHOST_USER_SET_LOG_BASE Wei Wang
2016-12-19  5:58 ` [Qemu-devel] [RESEND Patch v1 24/37] vhost-pci-slave/msg: VHOST_USER_SET_LOG_FD Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 25/37] vhost-pci-slave/msg: VHOST_USER_SEND_RARP Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 26/37] vhost-pci-slave/msg: VHOST_USER_GET_VRING_BASE Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 27/37] vhost-pci-net: pass the info collected by vp_slave to the device Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 28/37] vhost-pci-net: pass the mem and vring info to the driver Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 29/37] vhost-pci-slave/msg: VHOST_USER_SET_VHOST_PCI (start) Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 30/37] vhost-pci-slave/msg: VHOST_USER_SET_VHOST_PCI (stop) Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 31/37] vhost-user/msg: send VHOST_USER_SET_VHOST_PCI (start/stop) Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 32/37] vhost-user: add asynchronous read for the vhost-user master Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 33/37] vhost-pci-net: send the negotiated feature bits to the master Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 34/37] vhost-pci-slave: add "peer_reset" Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 35/37] vhost-pci-net: start the vhost-pci-net device Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 36/37] vhost-user/msg: handling VHOST_USER_SET_FEATURES Wei Wang
2016-12-19  8:28   ` Wei Wang
2016-12-19  5:59 ` [Qemu-devel] [RESEND Patch v1 37/37] vl: enable vhost-pci-slave Wei Wang
2016-12-19  7:17 ` [Qemu-devel] [RESEND Patch v1 00/37] Implementation of vhost-pci for inter-vm commucation no-reply
2016-12-19 16:43 ` Marc-André Lureau
2016-12-20  4:32   ` Wei Wang
2016-12-20  7:22     ` [Qemu-devel] [virtio-dev] " Wei Wang
2017-01-09  5:13     ` [Qemu-devel] " Wei Wang
2017-01-05  7:34   ` Wei Wang
2017-01-05  7:47     ` [Qemu-devel] [virtio-dev] " Wei Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1482127152-84732-19-git-send-email-wei.w.wang@intel.com \
    --to=wei.w.wang@intel.com \
    --cc=marcandre.lureau@gmail.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=virtio-dev@lists.oasis-open.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).