From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Peter Maydell" <peter.maydell@linaro.org>,
"Yongji Xie" <elohimes@gmail.com>,
"Yongji Xie" <xieyongji@baidu.com>,
"Maxime Coquelin" <maxime.coquelin@redhat.com>,
"Marc-André Lureau" <marcandre.lureau@redhat.com>,
"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
"Philippe Mathieu-Daudé" <f4bug@amsat.org>,
"Paolo Bonzini" <pbonzini@redhat.com>
Subject: [Qemu-devel] [PULL 21/26] libvhost-user: Support across-memory-boundary access
Date: Thu, 8 Feb 2018 21:09:16 +0200 [thread overview]
Message-ID: <1518116908-10852-22-git-send-email-mst@redhat.com> (raw)
In-Reply-To: <1518116908-10852-1-git-send-email-mst@redhat.com>
From: Yongji Xie <elohimes@gmail.com>
The sg list/indirect descriptor table may be contigious
in GPA but not in HVA address space. But libvhost-user
wasn't aware of that. This would cause out-of-bounds
access. Even a malicious guest could use it to get
information from the vhost-user backend.
Introduce a plen parameter in vu_gpa_to_va() so we can
handle this case, returning the actual mapped length.
Signed-off-by: Yongji Xie <xieyongji@baidu.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
contrib/libvhost-user/libvhost-user.h | 3 +-
contrib/libvhost-user/libvhost-user.c | 133 ++++++++++++++++++++++++++++++----
2 files changed, 122 insertions(+), 14 deletions(-)
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
index f8a730b..18f95f6 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -327,11 +327,12 @@ bool vu_dispatch(VuDev *dev);
/**
* vu_gpa_to_va:
* @dev: a VuDev context
+ * @plen: guest memory size
* @guest_addr: guest address
*
* Translate a guest address to a pointer. Returns NULL on failure.
*/
-void *vu_gpa_to_va(VuDev *dev, uint64_t guest_addr);
+void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
/**
* vu_get_queue:
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 54dbc93..2e358b5 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -118,15 +118,22 @@ vu_panic(VuDev *dev, const char *msg, ...)
/* Translate guest physical address to our virtual address. */
void *
-vu_gpa_to_va(VuDev *dev, uint64_t guest_addr)
+vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
{
int i;
+ if (*plen == 0) {
+ return NULL;
+ }
+
/* Find matching memory region. */
for (i = 0; i < dev->nregions; i++) {
VuDevRegion *r = &dev->regions[i];
if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
+ if ((guest_addr + *plen) > (r->gpa + r->size)) {
+ *plen = r->gpa + r->size - guest_addr;
+ }
return (void *)(uintptr_t)
guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
}
@@ -1116,6 +1123,37 @@ virtqueue_get_head(VuDev *dev, VuVirtq *vq,
return true;
}
+static int
+virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
+ uint64_t addr, size_t len)
+{
+ struct vring_desc *ori_desc;
+ uint64_t read_len;
+
+ if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
+ return -1;
+ }
+
+ if (len == 0) {
+ return -1;
+ }
+
+ while (len) {
+ read_len = len;
+ ori_desc = vu_gpa_to_va(dev, &read_len, addr);
+ if (!ori_desc) {
+ return -1;
+ }
+
+ memcpy(desc, ori_desc, read_len);
+ len -= read_len;
+ addr += read_len;
+ desc += read_len;
+ }
+
+ return 0;
+}
+
enum {
VIRTQUEUE_READ_DESC_ERROR = -1,
VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
@@ -1162,8 +1200,10 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
}
while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
- unsigned int max, num_bufs, indirect = 0;
+ unsigned int max, desc_len, num_bufs, indirect = 0;
+ uint64_t desc_addr, read_len;
struct vring_desc *desc;
+ struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
unsigned int i;
max = vq->vring.num;
@@ -1187,8 +1227,24 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
/* loop over the indirect descriptor table */
indirect = 1;
- max = desc[i].len / sizeof(struct vring_desc);
- desc = vu_gpa_to_va(dev, desc[i].addr);
+ desc_addr = desc[i].addr;
+ desc_len = desc[i].len;
+ max = desc_len / sizeof(struct vring_desc);
+ read_len = desc_len;
+ desc = vu_gpa_to_va(dev, &read_len, desc_addr);
+ if (unlikely(desc && read_len != desc_len)) {
+ /* Failed to use zero copy */
+ desc = NULL;
+ if (!virtqueue_read_indirect_desc(dev, desc_buf,
+ desc_addr,
+ desc_len)) {
+ desc = desc_buf;
+ }
+ }
+ if (!desc) {
+ vu_panic(dev, "Invalid indirect buffer table");
+ goto err;
+ }
num_bufs = i = 0;
}
@@ -1386,9 +1442,24 @@ virtqueue_map_desc(VuDev *dev,
return;
}
- iov[num_sg].iov_base = vu_gpa_to_va(dev, pa);
- iov[num_sg].iov_len = sz;
- num_sg++;
+ while (sz) {
+ uint64_t len = sz;
+
+ if (num_sg == max_num_sg) {
+ vu_panic(dev, "virtio: too many descriptors in indirect table");
+ return;
+ }
+
+ iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
+ if (iov[num_sg].iov_base == NULL) {
+ vu_panic(dev, "virtio: invalid address for buffers");
+ return;
+ }
+ iov[num_sg].iov_len = len;
+ num_sg++;
+ sz -= len;
+ pa += len;
+ }
*p_num_sg = num_sg;
}
@@ -1420,10 +1491,12 @@ virtqueue_alloc_element(size_t sz,
void *
vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
{
- unsigned int i, head, max;
+ unsigned int i, head, max, desc_len;
+ uint64_t desc_addr, read_len;
VuVirtqElement *elem;
unsigned out_num, in_num;
struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
struct vring_desc *desc;
int rc;
@@ -1464,8 +1537,24 @@ vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
}
/* loop over the indirect descriptor table */
- max = desc[i].len / sizeof(struct vring_desc);
- desc = vu_gpa_to_va(dev, desc[i].addr);
+ desc_addr = desc[i].addr;
+ desc_len = desc[i].len;
+ max = desc_len / sizeof(struct vring_desc);
+ read_len = desc_len;
+ desc = vu_gpa_to_va(dev, &read_len, desc_addr);
+ if (unlikely(desc && read_len != desc_len)) {
+ /* Failed to use zero copy */
+ desc = NULL;
+ if (!virtqueue_read_indirect_desc(dev, desc_buf,
+ desc_addr,
+ desc_len)) {
+ desc = desc_buf;
+ }
+ }
+ if (!desc) {
+ vu_panic(dev, "Invalid indirect buffer table");
+ return NULL;
+ }
i = 0;
}
@@ -1541,7 +1630,9 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
unsigned int len)
{
struct vring_desc *desc = vq->vring.desc;
- unsigned int i, max, min;
+ unsigned int i, max, min, desc_len;
+ uint64_t desc_addr, read_len;
+ struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
unsigned num_bufs = 0;
max = vq->vring.num;
@@ -1553,8 +1644,24 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
}
/* loop over the indirect descriptor table */
- max = desc[i].len / sizeof(struct vring_desc);
- desc = vu_gpa_to_va(dev, desc[i].addr);
+ desc_addr = desc[i].addr;
+ desc_len = desc[i].len;
+ max = desc_len / sizeof(struct vring_desc);
+ read_len = desc_len;
+ desc = vu_gpa_to_va(dev, &read_len, desc_addr);
+ if (unlikely(desc && read_len != desc_len)) {
+ /* Failed to use zero copy */
+ desc = NULL;
+ if (!virtqueue_read_indirect_desc(dev, desc_buf,
+ desc_addr,
+ desc_len)) {
+ desc = desc_buf;
+ }
+ }
+ if (!desc) {
+ vu_panic(dev, "Invalid indirect buffer table");
+ return;
+ }
i = 0;
}
--
MST
next prev parent reply other threads:[~2018-02-08 19:09 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-08 19:08 [Qemu-devel] [PULL 00/26] virtio, vhost, pci, pc: features, fixes and cleanups Michael S. Tsirkin
2018-02-08 19:08 ` [Qemu-devel] [PULL 02/26] virtio: remove event notifier cleanup call on de-assign Michael S. Tsirkin
2018-02-08 19:08 ` [Qemu-devel] [PULL 01/26] Revert "vhost: add traces for memory listeners" Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 04/26] vhost: Build temporary section list and deref after commit Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 05/26] vhost: Simplify ring verification checks Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 06/26] vhost: Merge sections added to temporary list Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 07/26] vhost: Regenerate region list from changed sections list Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 08/26] vhost: Clean out old vhost_set_memory and friends Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 09/26] vhost: Merge and delete unused callbacks Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 10/26] vhost: Move log_dirty check Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 11/26] pci-bridge/i82801b11: clear bridge registers on platform reset Michael S. Tsirkin
2018-03-23 18:42 ` Laszlo Ersek
2018-04-05 21:32 ` Michael Roth
2018-02-08 19:09 ` [Qemu-devel] [PULL 12/26] pci/bus: let it has higher migration priority Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 13/26] virtio-blk: enable multiple vectors when using multiple I/O queues Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 14/26] pci: removed the is_express field since a uniform interface was inserted Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 15/26] cryptodev: add vhost-user as a new cryptodev backend Michael S. Tsirkin
2018-02-13 16:54 ` Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 17/26] cryptodev-vhost-user: add crypto session handler Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 16/26] cryptodev: add vhost support Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 20/26] libvhost-user: Fix resource leak Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 18/26] cryptodev-vhost-user: set the key length Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 19/26] virtio-balloon: unref the memory region before continuing Michael S. Tsirkin
2018-02-08 19:09 ` Michael S. Tsirkin [this message]
2018-02-08 19:09 ` [Qemu-devel] [PULL 22/26] hw/pci-bridge: fix pcie root port's IO hints capability Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 23/26] tests: acpi: fix FADT not being compared to reference table Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 25/26] acpi-test: update FADT Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 26/26] virtio-balloon: include statistics of disk/file caches Michael S. Tsirkin
2018-02-08 19:09 ` [Qemu-devel] [PULL 24/26] lpc: drop pcie host dependency Michael S. Tsirkin
2018-02-08 19:11 ` [Qemu-devel] [PULL 03/26] virtio: improve virtio devices initialization time Michael S. Tsirkin
2018-02-09 10:06 ` [Qemu-devel] [PULL 00/26] virtio, vhost, pci, pc: features, fixes and cleanups Peter Maydell
2018-02-09 17:07 ` Michael S. Tsirkin
2018-02-12 9:35 ` Peter Maydell
2018-02-13 16:33 ` Peter Maydell
2018-02-13 16:52 ` Michael S. Tsirkin
2018-02-13 18:23 ` Peter Maydell
2018-02-14 2:21 ` Zhoujian (jay)
2018-02-10 11:26 ` Gonglei (Arei)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1518116908-10852-22-git-send-email-mst@redhat.com \
--to=mst@redhat.com \
--cc=dgilbert@redhat.com \
--cc=elohimes@gmail.com \
--cc=f4bug@amsat.org \
--cc=marcandre.lureau@redhat.com \
--cc=maxime.coquelin@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=xieyongji@baidu.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).