qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Greg Kurz <groug@kaod.org>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	Greg Kurz <groug@kaod.org>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Stefano Stabellini <stefano@aporeto.com>
Subject: [Qemu-devel] [PULL 7/8] xen-9pfs: disconnect if buffers are misconfigured
Date: Thu, 29 Jun 2017 15:43:50 +0200	[thread overview]
Message-ID: <1498743831-28676-8-git-send-email-groug@kaod.org> (raw)
In-Reply-To: <1498743831-28676-1-git-send-email-groug@kaod.org>

From: Stefano Stabellini <sstabellini@kernel.org>

Implement xen_9pfs_disconnect by unbinding the event channels. On
xen_9pfs_free, call disconnect if any event channels haven't been
disconnected.

If the frontend misconfigured the buffers set the backend to "Closing"
and disconnect it. Misconfigurations include requesting a read of more
bytes than available on the ring buffer, or claiming to be writing more
data than available on the ring buffer.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
Signed-off-by: Greg Kurz <groug@kaod.org>
---
 hw/9pfs/xen-9p-backend.c | 85 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 64 insertions(+), 21 deletions(-)

diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index a82cf817fe45..ee87f08926a2 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -54,6 +54,8 @@ typedef struct Xen9pfsDev {
     Xen9pfsRing *rings;
 } Xen9pfsDev;
 
+static void xen_9pfs_disconnect(struct XenDevice *xendev);
+
 static void xen_9pfs_in_sg(Xen9pfsRing *ring,
                            struct iovec *in_sg,
                            int *num,
@@ -125,10 +127,19 @@ static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
     struct iovec in_sg[2];
     int num;
+    ssize_t ret;
 
     xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
                    in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
-    return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
+
+    ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
+    if (ret < 0) {
+        xen_pv_printf(&xen_9pfs->xendev, 0,
+                      "Failed to encode VirtFS request type %d\n", pdu->id + 1);
+        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
+        xen_9pfs_disconnect(&xen_9pfs->xendev);
+    }
+    return ret;
 }
 
 static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
@@ -139,10 +150,19 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
     struct iovec out_sg[2];
     int num;
+    ssize_t ret;
 
     xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
                     out_sg, &num, pdu->idx);
-    return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
+
+    ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
+    if (ret < 0) {
+        xen_pv_printf(&xen_9pfs->xendev, 0,
+                      "Failed to decode VirtFS request type %d\n", pdu->id);
+        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
+        xen_9pfs_disconnect(&xen_9pfs->xendev);
+    }
+    return ret;
 }
 
 static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
@@ -170,11 +190,22 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
     Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
     int num;
+    size_t buf_size;
 
     g_free(ring->sg);
 
     ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
     xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
+
+    buf_size = iov_size(ring->sg, num);
+    if (buf_size  < size) {
+        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
+                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
+                buf_size);
+        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
+        xen_9pfs_disconnect(&xen_9pfs->xendev);
+    }
+
     *piov = ring->sg;
     *pniov = num;
 }
@@ -218,7 +249,7 @@ static int xen_9pfs_init(struct XenDevice *xendev)
 static int xen_9pfs_receive(Xen9pfsRing *ring)
 {
     P9MsgHeader h;
-    RING_IDX cons, prod, masked_prod, masked_cons;
+    RING_IDX cons, prod, masked_prod, masked_cons, queued;
     V9fsPDU *pdu;
 
     if (ring->inprogress) {
@@ -229,8 +260,8 @@ static int xen_9pfs_receive(Xen9pfsRing *ring)
     prod = ring->intf->out_prod;
     xen_rmb();
 
-    if (xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)) <
-        sizeof(h)) {
+    queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
+    if (queued < sizeof(h)) {
         return 0;
     }
     ring->inprogress = true;
@@ -241,6 +272,9 @@ static int xen_9pfs_receive(Xen9pfsRing *ring)
     xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
                          masked_prod, &masked_cons,
                          XEN_FLEX_RING_SIZE(ring->ring_order));
+    if (queued < le32_to_cpu(h.size_le)) {
+        return 0;
+    }
 
     /* cannot fail, because we only handle one request per ring at a time */
     pdu = pdu_alloc(&ring->priv->state);
@@ -269,15 +303,30 @@ static void xen_9pfs_evtchn_event(void *opaque)
     qemu_bh_schedule(ring->bh);
 }
 
-static int xen_9pfs_free(struct XenDevice *xendev)
+static void xen_9pfs_disconnect(struct XenDevice *xendev)
 {
+    Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
     int i;
+
+    for (i = 0; i < xen_9pdev->num_rings; i++) {
+        if (xen_9pdev->rings[i].evtchndev != NULL) {
+            qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
+                    NULL, NULL, NULL);
+            xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
+                             xen_9pdev->rings[i].local_port);
+            xen_9pdev->rings[i].evtchndev = NULL;
+        }
+    }
+}
+
+static int xen_9pfs_free(struct XenDevice *xendev)
+{
     Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
+    int i;
 
-    g_free(xen_9pdev->id);
-    g_free(xen_9pdev->tag);
-    g_free(xen_9pdev->path);
-    g_free(xen_9pdev->security_model);
+    if (xen_9pdev->rings[0].evtchndev != NULL) {
+        xen_9pfs_disconnect(xendev);
+    }
 
     for (i = 0; i < xen_9pdev->num_rings; i++) {
         if (xen_9pdev->rings[i].data != NULL) {
@@ -290,16 +339,15 @@ static int xen_9pfs_free(struct XenDevice *xendev)
                     xen_9pdev->rings[i].intf,
                     1);
         }
-        if (xen_9pdev->rings[i].evtchndev > 0) {
-            qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
-                    NULL, NULL, NULL);
-            xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
-                             xen_9pdev->rings[i].local_port);
-        }
         if (xen_9pdev->rings[i].bh != NULL) {
             qemu_bh_delete(xen_9pdev->rings[i].bh);
         }
     }
+
+    g_free(xen_9pdev->id);
+    g_free(xen_9pdev->tag);
+    g_free(xen_9pdev->path);
+    g_free(xen_9pdev->security_model);
     g_free(xen_9pdev->rings);
     return 0;
 }
@@ -423,11 +471,6 @@ static void xen_9pfs_alloc(struct XenDevice *xendev)
     xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
 }
 
-static void xen_9pfs_disconnect(struct XenDevice *xendev)
-{
-    /* Dynamic hotplug of PV filesystems at runtime is not supported. */
-}
-
 struct XenDevOps xen_9pfs_ops = {
     .size       = sizeof(Xen9pfsDev),
     .flags      = DEVOPS_FLAG_NEED_GNTDEV,
-- 
2.7.5

  parent reply	other threads:[~2017-06-29 13:44 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-29 13:43 [Qemu-devel] [PULL 0/8] 9pfs patches for 2.10 20170629 Greg Kurz
2017-06-29 13:43 ` [Qemu-devel] [PULL 1/8] 9pfs: local: remove: use correct path component Greg Kurz
2017-06-29 13:43 ` [Qemu-devel] [PULL 2/8] 9pfs: local: Add support for custom fmode/dmode in 9ps mapped security modes Greg Kurz
2017-06-29 13:43 ` [Qemu-devel] [PULL 3/8] 9pfs: replace g_malloc()+memcpy() with g_memdup() Greg Kurz
2017-06-29 13:43 ` [Qemu-devel] [PULL 4/8] virtio-9p: record element after sanity checks Greg Kurz
2017-06-29 13:43 ` [Qemu-devel] [PULL 5/8] virtio-9p: message header is 7-byte long Greg Kurz
2017-06-29 13:43 ` [Qemu-devel] [PULL 6/8] virtio-9p: break device if buffers are misconfigured Greg Kurz
2017-06-29 13:43 ` Greg Kurz [this message]
2017-06-29 13:43 ` [Qemu-devel] [PULL 8/8] 9pfs: handle transport errors in pdu_complete() Greg Kurz
2017-06-29 16:00 ` [Qemu-devel] [PULL 0/8] 9pfs patches for 2.10 20170629 Peter Maydell
2017-06-29 16:59   ` Greg Kurz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1498743831-28676-8-git-send-email-groug@kaod.org \
    --to=groug@kaod.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sstabellini@kernel.org \
    --cc=stefano@aporeto.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).