qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
To: qemu-devel@nongnu.org, qemu-block@nongnu.org
Cc: kwolf@redhat.com, fam@euphon.net, stefanha@redhat.com, mreitz@redhat.com
Subject: [Qemu-devel] [PATCH 1/2] util/iov: introduce qemu_iovec_init_extended
Date: Tue, 28 May 2019 11:45:43 +0300	[thread overview]
Message-ID: <20190528084544.183558-2-vsementsov@virtuozzo.com> (raw)
In-Reply-To: <20190528084544.183558-1-vsementsov@virtuozzo.com>

Introduce new initialization API, to create requests with padding. Will
be used in the following patch.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 include/qemu/iov.h |  5 +++
 util/iov.c         | 89 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 94 insertions(+)

diff --git a/include/qemu/iov.h b/include/qemu/iov.h
index 48b45987b7..1c5be66102 100644
--- a/include/qemu/iov.h
+++ b/include/qemu/iov.h
@@ -199,6 +199,11 @@ static inline void *qemu_iovec_buf(QEMUIOVector *qiov)
 
 void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
 void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
+void qemu_iovec_init_extended(
+        QEMUIOVector *qiov,
+        void *left, size_t left_len,
+        QEMUIOVector *middle, size_t middle_offset, size_t middle_len,
+        void *right, size_t right_len);
 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
 void qemu_iovec_concat(QEMUIOVector *dst,
                        QEMUIOVector *src, size_t soffset, size_t sbytes);
diff --git a/util/iov.c b/util/iov.c
index 74e6ca8ed7..6bfd609998 100644
--- a/util/iov.c
+++ b/util/iov.c
@@ -353,6 +353,95 @@ void qemu_iovec_concat(QEMUIOVector *dst,
     qemu_iovec_concat_iov(dst, src->iov, src->niov, soffset, sbytes);
 }
 
+/*
+ * qiov_find_iov
+ *
+ * Return iov, where byte at @offset (in @qiov) is.
+ * Update @offset to be offset inside that iov to the smae byte.
+ */
+static struct iovec *qiov_find_iov(QEMUIOVector *qiov, size_t *offset)
+{
+    struct iovec *iov = qiov->iov;
+
+    assert(*offset < qiov->size);
+
+    while (*offset >= iov->iov_len) {
+        *offset -= iov->iov_len;
+        iov++;
+    }
+
+    return iov;
+}
+
+/*
+ * qiov_slice
+ *
+ * Fund subarray of iovec's, containing requested range. @head would
+ * be offset in first iov (retruned by the function), @tail would be
+ * count of extra bytes in last iov (returned iov + @niov - 1).
+ */
+static struct iovec *qiov_slice(QEMUIOVector *qiov,
+                                size_t offset, size_t len,
+                                size_t *head, size_t *tail, int *niov)
+{
+    struct iovec *iov = qiov_find_iov(qiov, &offset), *end_iov;
+    size_t end_offset;
+
+    assert(offset + len <= qiov->size);
+
+    end_offset = iov->iov_len;
+    end_iov = iov + 1;
+
+    while (end_offset - offset < len) {
+        end_offset += end_iov->iov_len;
+        end_iov++;
+    }
+
+    *niov = end_iov - iov;
+    *head = offset;
+    *tail = (end_offset - offset) - len;
+
+    return iov;
+}
+
+/*
+ * Compile new iovec, combining @head_buf buffer, sub-qiov of @mid_qiov,
+ * and @tail_buf buffer into new qiov.
+ */
+void qemu_iovec_init_extended(
+        QEMUIOVector *qiov,
+        void *head_buf, size_t head_len,
+        QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len,
+        void *tail_buf, size_t tail_len)
+{
+    size_t mid_head, mid_tail;
+    int niov;
+    struct iovec *p, *mid_iov = qiov_slice(mid_qiov, mid_offset, mid_len,
+                                           &mid_head, &mid_tail, &niov);
+
+    assert(niov);
+    qiov->niov = qiov->nalloc = niov + !!head_len + !!tail_len;
+    qiov->size = head_len + mid_len + tail_len;
+
+    p = qiov->iov = g_new(struct iovec, qiov->niov);
+    if (head_len) {
+        p->iov_base = head_buf;
+        p->iov_len = head_len;
+        p++;
+    }
+
+    memcpy(p, mid_iov, niov * sizeof(*p));
+    p[0].iov_base = (uint8_t *)p[0].iov_base + mid_head;
+    p[0].iov_len -= mid_head;
+    p[niov - 1].iov_len -= mid_tail;
+    p += niov;
+
+    if (tail_len) {
+        p->iov_base = tail_buf;
+        p->iov_len = tail_len;
+    }
+}
+
 /*
  * Check if the contents of the iovecs are all zero
  */
-- 
2.18.0



  reply	other threads:[~2019-05-28  8:47 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-28  8:45 [Qemu-devel] [PATCH 0/2] block/io: refactor padding Vladimir Sementsov-Ogievskiy
2019-05-28  8:45 ` Vladimir Sementsov-Ogievskiy [this message]
2019-05-31  8:33   ` [Qemu-devel] [PATCH 1/2] util/iov: introduce qemu_iovec_init_extended Stefan Hajnoczi
2019-05-28  8:45 ` [Qemu-devel] [PATCH 2/2] block/io: refactor padding Vladimir Sementsov-Ogievskiy
2019-05-31 10:51   ` Stefan Hajnoczi
2019-05-31 14:10     ` Vladimir Sementsov-Ogievskiy
2019-05-31 15:49       ` Kevin Wolf
2019-05-31 15:58         ` Vladimir Sementsov-Ogievskiy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190528084544.183558-2-vsementsov@virtuozzo.com \
    --to=vsementsov@virtuozzo.com \
    --cc=fam@euphon.net \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).