From: Paulina Szubarczyk <paulinaszubarczyk@gmail.com>
To: xen-devel@lists.xenproject.org, roger.pau@citrix.com
Cc: sstabellini@kernel.org, wei.liu2@citrix.com,
George.Dunlap@eu.citrix.com,
Paulina Szubarczyk <paulinaszubarczyk@gmail.com>,
ian.jackson@eu.citrix.com, P.Gawkowski@ii.pw.edu.pl,
anthony.perard@citrix.com
Subject: [PATCH 3/3] qemu-xen-dir/hw/block: Cache local buffers used in grant copy
Date: Fri, 27 May 2016 14:53:58 +0200 [thread overview]
Message-ID: <1464353638-14435-4-git-send-email-paulinaszubarczyk@gmail.com> (raw)
In-Reply-To: <1464353638-14435-1-git-send-email-paulinaszubarczyk@gmail.com>
If there are still pending requests the buffers are not free() but
cached in an array of a size max_request*BLKIF_MAX_SEGMENTS_PER_REQUEST
---
hw/block/xen_disk.c | 59 ++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 47 insertions(+), 12 deletions(-)
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 3e5eefd..ab1863b 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -125,6 +125,10 @@ struct XenBlkDev {
/* */
gboolean feature_discard;
+ /* request buffer cache */
+ void **buf_cache;
+ int buf_cache_free;
+
/* qemu block driver */
DriveInfo *dinfo;
BlockBackend *blk;
@@ -284,11 +288,16 @@ err:
return -1;
}
-static void* get_buffer(void) {
+static void* get_buffer(struct XenBlkDev *blkdev) {
void *buf;
- buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE,
+ if(blkdev->buf_cache_free <= 0) {
+ buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ } else {
+ blkdev->buf_cache_free--;
+ buf = blkdev->buf_cache[blkdev->buf_cache_free];
+ }
if (unlikely(buf == MAP_FAILED))
return NULL;
@@ -300,21 +309,40 @@ static int free_buffer(void* buf) {
return munmap(buf, 1 << XC_PAGE_SHIFT);
}
-static int free_buffers(void** page, int count)
+static int free_buffers(void** page, int count, struct XenBlkDev *blkdev)
{
- int i, r = 0;
+ int i, put_buf_cache = 0, r = 0;
+
+ if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
+ put_buf_cache = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST
+ - blkdev->buf_cache_free;
+ }
for (i = 0; i < count; i++) {
-
- if(free_buffer(page[i]))
- r = 1;
-
+ if(put_buf_cache > 0) {
+ blkdev->buf_cache[blkdev->buf_cache_free++] = page[i];
+ put_buf_cache--;
+ } else {
+ if(free_buffer(page[i]))
+ r = 1;
+ }
+
page[i] = NULL;
}
return r;
}
+static void free_buf_cache(struct XenBlkDev *blkdev) {
+ int i;
+ for(i = 0; i < blkdev->buf_cache_free; i++) {
+ free_buffer(blkdev->buf_cache[i]);
+ }
+
+ blkdev->buf_cache_free = 0;
+ free(blkdev->buf_cache);
+}
+
static int ioreq_write(struct ioreq *ioreq)
{
XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
@@ -342,7 +370,7 @@ static int ioreq_write(struct ioreq *ioreq)
offset[i] = ioreq->req.seg[i].first_sect * ioreq->blkdev->file_blk;
len[i] = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1)
* ioreq->blkdev->file_blk;
- pages[i] = get_buffer();
+ pages[i] = get_buffer(ioreq->blkdev);
if(!pages[i]) {
xen_be_printf(&ioreq->blkdev->xendev, 0,
@@ -356,7 +384,7 @@ static int ioreq_write(struct ioreq *ioreq)
xen_be_printf(&ioreq->blkdev->xendev, 0,
"failed to copy data for write %d \n", rc);
- if(free_buffers(ioreq->page, ioreq->v.niov)) {
+ if(free_buffers(ioreq->page, ioreq->v.niov, ioreq->blkdev)) {
xen_be_printf(&ioreq->blkdev->xendev, 0,
"failed to free page, errno %d \n", errno);
}
@@ -382,7 +410,7 @@ static int ioreq_read_init(struct ioreq *ioreq)
}
for (i = 0; i < ioreq->v.niov; i++) {
- ioreq->page[i] = get_buffer();
+ ioreq->page[i] = get_buffer(ioreq->blkdev);
if(!ioreq->page[i]) {
return -1;
}
@@ -469,7 +497,7 @@ static void qemu_aio_complete(void *opaque, int ret)
"failed to copy read data to guest\n");
}
case BLKIF_OP_WRITE:
- if(free_buffers(ioreq->page, ioreq->v.niov)) {
+ if(free_buffers(ioreq->page, ioreq->v.niov, ioreq->blkdev)) {
xen_be_printf(&ioreq->blkdev->xendev, 0,
"failed to free page, errno %d \n", errno);
}
@@ -936,6 +964,11 @@ static int blk_connect(struct XenDevice *xendev)
}
blkdev->cnt_map++;
+ /* create buffer cache for grant copy operations*/
+ blkdev->buf_cache_free = 0;
+ blkdev->buf_cache = calloc(max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST,
+ sizeof(void *));
+
switch (blkdev->protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
@@ -972,6 +1005,8 @@ static void blk_disconnect(struct XenDevice *xendev)
{
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
+ free_buf_cache(blkdev);
+
if (blkdev->blk) {
blk_detach_dev(blkdev->blk, blkdev);
blk_unref(blkdev->blk);
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
prev parent reply other threads:[~2016-05-27 12:55 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-05-27 12:53 [PATCH 0/3] qemu-qdisk: Replace grant map by grant copy Paulina Szubarczyk
2016-05-27 12:53 ` [PATCH 1/3] libs, gnttab, libxc: Interface for grant copy operation Paulina Szubarczyk
2016-05-27 12:53 ` [PATCH 2/3] qemu-xen-dir/hw/block/xen_disk: Replace grant map by grant copy Paulina Szubarczyk
2016-05-27 12:53 ` Paulina Szubarczyk [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1464353638-14435-4-git-send-email-paulinaszubarczyk@gmail.com \
--to=paulinaszubarczyk@gmail.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=P.Gawkowski@ii.pw.edu.pl \
--cc=anthony.perard@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=roger.pau@citrix.com \
--cc=sstabellini@kernel.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).