qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: peter.maydell@linaro.org
Cc: stefanha@gmail.com, sstabellini@kernel.org, stefanha@redhat.com,
	anthony.perard@citrix.com, xen-devel@lists.xenproject.org,
	qemu-devel@nongnu.org, Stefano Stabellini <stefano@aporeto.com>,
	jgross@suse.com,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Greg Kurz <groug@kaod.org>
Subject: [Qemu-devel] [PULL 15/21] xen/9pfs: implement in/out_iov_from_pdu and vmarshal/vunmarshal
Date: Fri, 21 Apr 2017 13:14:56 -0700	[thread overview]
Message-ID: <1492805702-19690-15-git-send-email-sstabellini@kernel.org> (raw)
In-Reply-To: <1492805702-19690-1-git-send-email-sstabellini@kernel.org>

Implement xen_9pfs_init_in/out_iov_from_pdu and
xen_9pfs_pdu_vmarshal/vunmarshall by creating new sg pointing to the
data on the ring.

This is safe as we only handle one request per ring at any given time.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
---
 hw/9pfs/xen-9p-backend.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 97 insertions(+), 2 deletions(-)

diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 60d49d1..0c1dd5e 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -57,12 +57,81 @@ typedef struct Xen9pfsDev {
     Xen9pfsRing *rings;
 } Xen9pfsDev;
 
+static void xen_9pfs_in_sg(Xen9pfsRing *ring,
+                           struct iovec *in_sg,
+                           int *num,
+                           uint32_t idx,
+                           uint32_t size)
+{
+    RING_IDX cons, prod, masked_prod, masked_cons;
+
+    cons = ring->intf->in_cons;
+    prod = ring->intf->in_prod;
+    xen_rmb();
+    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
+    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
+
+    if (masked_prod < masked_cons) {
+        in_sg[0].iov_base = ring->ring.in + masked_prod;
+        in_sg[0].iov_len = masked_cons - masked_prod;
+        *num = 1;
+    } else {
+        in_sg[0].iov_base = ring->ring.in + masked_prod;
+        in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
+        in_sg[1].iov_base = ring->ring.in;
+        in_sg[1].iov_len = masked_cons;
+        *num = 2;
+    }
+}
+
+static void xen_9pfs_out_sg(Xen9pfsRing *ring,
+                            struct iovec *out_sg,
+                            int *num,
+                            uint32_t idx)
+{
+    RING_IDX cons, prod, masked_prod, masked_cons;
+
+    cons = ring->intf->out_cons;
+    prod = ring->intf->out_prod;
+    xen_rmb();
+    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
+    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
+
+    if (masked_cons < masked_prod) {
+        out_sg[0].iov_base = ring->ring.out + masked_cons;
+        out_sg[0].iov_len = ring->out_size;
+        *num = 1;
+    } else {
+        if (ring->out_size >
+            (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
+            out_sg[0].iov_base = ring->ring.out + masked_cons;
+            out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
+                                masked_cons;
+            out_sg[1].iov_base = ring->ring.out;
+            out_sg[1].iov_len = ring->out_size -
+                                (XEN_FLEX_RING_SIZE(ring->ring_order) -
+                                 masked_cons);
+            *num = 2;
+        } else {
+            out_sg[0].iov_base = ring->ring.out + masked_cons;
+            out_sg[0].iov_len = ring->out_size;
+            *num = 1;
+        }
+    }
+}
+
 static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
                                      size_t offset,
                                      const char *fmt,
                                      va_list ap)
 {
-    return 0;
+    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
+    struct iovec in_sg[2];
+    int num;
+
+    xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+                   in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
+    return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
 }
 
 static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
@@ -70,13 +139,29 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
                                        const char *fmt,
                                        va_list ap)
 {
-    return 0;
+    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
+    struct iovec out_sg[2];
+    int num;
+
+    xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+                    out_sg, &num, pdu->idx);
+    return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
 }
 
 static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
                                            struct iovec **piov,
                                            unsigned int *pniov)
 {
+    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
+    Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
+    int num;
+
+    g_free(ring->sg);
+
+    ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
+    xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
+    *piov = ring->sg;
+    *pniov = num;
 }
 
 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
@@ -84,6 +169,16 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
                                           unsigned int *pniov,
                                           size_t size)
 {
+    Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
+    Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
+    int num;
+
+    g_free(ring->sg);
+
+    ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
+    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
+    *piov = ring->sg;
+    *pniov = num;
 }
 
 static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
-- 
1.9.1

  parent reply	other threads:[~2017-04-21 20:15 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-21 20:14 [Qemu-devel] [PULL 0/21] Please pull xen-20170421-tag for 2.10 Stefano Stabellini
2017-04-21 20:14 ` [Qemu-devel] [PULL 01/21] xen: make use of xen_xc implicit in xen_common.h inlines Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 02/21] xen: rename xen_modified_memory() to xen_hvm_modified_memory() Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 03/21] xen: create wrappers for all other uses of xc_hvm_XXX() functions Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 04/21] configure: detect presence of libxendevicemodel Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 05/21] xen: use libxendevicemodel when available Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 06/21] xen: use 5 digit xen versions Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 07/21] xen: use libxendevice model to restrict operations Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 08/21] xen: additionally restrict xenforeignmemory operations Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 09/21] configure: use pkg-config for obtaining xen version Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 10/21] xen: import ring.h from xen Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 11/21] 9p: introduce a type for the 9p header Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 12/21] xen/9pfs: introduce Xen 9pfs backend Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 13/21] xen/9pfs: connect to the frontend Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 14/21] xen/9pfs: receive requests from " Stefano Stabellini
2017-04-21 20:14   ` Stefano Stabellini [this message]
2017-04-21 20:14   ` [Qemu-devel] [PULL 16/21] xen/9pfs: send responses back to " Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 17/21] xen/9pfs: build and register Xen 9pfs backend Stefano Stabellini
2017-04-21 20:14   ` [Qemu-devel] [PULL 18/21] add xen-9p-backend to MAINTAINERS under Xen Stefano Stabellini
2017-04-21 20:15   ` [Qemu-devel] [PULL 19/21] move xen-common.c to hw/xen/ Stefano Stabellini
2017-04-21 20:15   ` [Qemu-devel] [PULL 20/21] move xen-hvm.c to hw/i386/xen/ Stefano Stabellini
2017-04-21 20:15   ` [Qemu-devel] [PULL 21/21] move xen-mapcache.c " Stefano Stabellini
2017-04-24 10:31 ` [Qemu-devel] [PULL 0/21] Please pull xen-20170421-tag for 2.10 Peter Maydell
2017-04-24 21:25   ` Stefano Stabellini
2017-04-24 21:46     ` Peter Maydell
2017-04-24 23:44       ` Stefano Stabellini
2017-04-25  6:44         ` Greg Kurz
2017-04-25  7:31         ` Markus Armbruster
2017-04-25 17:18           ` Stefano Stabellini
2017-04-26  6:44             ` Markus Armbruster

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1492805702-19690-15-git-send-email-sstabellini@kernel.org \
    --to=sstabellini@kernel.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=anthony.perard@citrix.com \
    --cc=groug@kaod.org \
    --cc=jgross@suse.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@gmail.com \
    --cc=stefanha@redhat.com \
    --cc=stefano@aporeto.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).