xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: aneesh.kumar@linux.vnet.ibm.com
Cc: xen-devel@lists.xensource.com, anthony.perard@citrix.com,
	sstabellini@kernel.org, groug@kaod.org, qemu-devel@nongnu.org
Subject: [PATCH v2 3/4] 9pfs: call v9fs_init_qiov_from_pdu before v9fs_pack
Date: Mon, 28 Nov 2016 13:27:23 -0800	[thread overview]
Message-ID: <1480368444-4310-3-git-send-email-sstabellini@kernel.org> (raw)
In-Reply-To: <1480368444-4310-1-git-send-email-sstabellini@kernel.org>

v9fs_xattr_read should not access VirtQueueElement elems directly.
Move v9fs_init_qiov_from_pdu up in the file and call
v9fs_init_qiov_from_pdu before v9fs_pack. Use v9fs_pack on the new
iovec.

Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>

---
Changes in v2:
- add a call to qemu_iovec_destroy
- fix commit description
---
 hw/9pfs/9p.c | 59 ++++++++++++++++++++++++++++++-----------------------------
 1 file changed, 30 insertions(+), 29 deletions(-)

diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 5a20a13..79d7201 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -1633,14 +1633,39 @@ out_nofid:
     pdu_complete(pdu, err);
 }
 
+/*
+ * Create a QEMUIOVector for a sub-region of PDU iovecs
+ *
+ * @qiov:       uninitialized QEMUIOVector
+ * @skip:       number of bytes to skip from beginning of PDU
+ * @size:       number of bytes to include
+ * @is_write:   true - write, false - read
+ *
+ * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
+ * with qemu_iovec_destroy().
+ */
+static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
+                                    size_t skip, size_t size,
+                                    bool is_write)
+{
+    QEMUIOVector elem;
+    struct iovec *iov;
+    unsigned int niov;
+
+    pdu->s->transport->init_iov_from_pdu(pdu, &iov, &niov, is_write);
+
+    qemu_iovec_init_external(&elem, iov, niov);
+    qemu_iovec_init(qiov, niov);
+    qemu_iovec_concat(qiov, &elem, skip, size);
+}
+
 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
                            uint64_t off, uint32_t max_count)
 {
     ssize_t err;
     size_t offset = 7;
     uint64_t read_count;
-    V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
-    VirtQueueElement *elem = v->elems[pdu->idx];
+    QEMUIOVector qiov_full;
 
     if (fidp->fs.xattr.len < off) {
         read_count = 0;
@@ -1656,9 +1681,11 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
     }
     offset += err;
 
-    err = v9fs_pack(elem->in_sg, elem->in_num, offset,
+    v9fs_init_qiov_from_pdu(&qiov_full, pdu, 0, read_count, false);
+    err = v9fs_pack(qiov_full.iov, qiov_full.niov, offset,
                     ((char *)fidp->fs.xattr.value) + off,
                     read_count);
+    qemu_iovec_destroy(&qiov_full);
     if (err < 0) {
         return err;
     }
@@ -1732,32 +1759,6 @@ static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
     return count;
 }
 
-/*
- * Create a QEMUIOVector for a sub-region of PDU iovecs
- *
- * @qiov:       uninitialized QEMUIOVector
- * @skip:       number of bytes to skip from beginning of PDU
- * @size:       number of bytes to include
- * @is_write:   true - write, false - read
- *
- * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
- * with qemu_iovec_destroy().
- */
-static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
-                                    size_t skip, size_t size,
-                                    bool is_write)
-{
-    QEMUIOVector elem;
-    struct iovec *iov;
-    unsigned int niov;
-
-    pdu->s->transport->init_iov_from_pdu(pdu, &iov, &niov, is_write);
-
-    qemu_iovec_init_external(&elem, iov, niov);
-    qemu_iovec_init(qiov, niov);
-    qemu_iovec_concat(qiov, &elem, skip, size);
-}
-
 static void coroutine_fn v9fs_read(void *opaque)
 {
     int32_t fid;
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-11-28 21:27 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-28 21:24 [PATCH v2 0/4] 9pfs: clean-up for multiple transports Stefano Stabellini
2016-11-28 21:27 ` [PATCH v2 1/4] 9pfs: move pdus to V9fsState Stefano Stabellini
2016-11-28 21:27   ` [PATCH v2 2/4] 9pfs: introduce transport specific callbacks Stefano Stabellini
2016-12-02 11:03     ` Greg Kurz
2016-11-28 21:27   ` Stefano Stabellini [this message]
2016-12-02 11:04     ` [PATCH v2 3/4] 9pfs: call v9fs_init_qiov_from_pdu before v9fs_pack Greg Kurz
2016-11-28 21:27   ` [PATCH v2 4/4] 9pfs: introduce init_out/in_iov_from_pdu Stefano Stabellini
2016-12-02 11:05     ` Greg Kurz
2016-12-02 19:02       ` [Xen-devel] " Stefano Stabellini
2016-12-03  8:57         ` Greg Kurz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1480368444-4310-3-git-send-email-sstabellini@kernel.org \
    --to=sstabellini@kernel.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=anthony.perard@citrix.com \
    --cc=groug@kaod.org \
    --cc=qemu-devel@nongnu.org \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).