xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paulina Szubarczyk <paulinaszubarczyk@gmail.com>
To: xen-devel@lists.xenproject.org, roger.pau@citrix.com
Cc: sstabellini@kernel.org, wei.liu2@citrix.com,
	George.Dunlap@eu.citrix.com,
	Paulina Szubarczyk <paulinaszubarczyk@gmail.com>,
	ian.jackson@eu.citrix.com, P.Gawkowski@ii.pw.edu.pl,
	anthony.perard@citrix.com
Subject: [PATCH RESEND 4/4] qemu-xen-dir/hw/block: Cache local buffers used in grant copy
Date: Tue, 31 May 2016 06:44:58 +0200	[thread overview]
Message-ID: <1464669898-28495-5-git-send-email-paulinaszubarczyk@gmail.com> (raw)
In-Reply-To: <1464669898-28495-1-git-send-email-paulinaszubarczyk@gmail.com>

If there are still pending requests the buffers are not free() but
cached in an array of a size max_request*BLKIF_MAX_SEGMENTS_PER_REQUEST

---
 hw/block/xen_disk.c | 60 +++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 47 insertions(+), 13 deletions(-)

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 43cd9c9..cf80897 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -125,6 +125,10 @@ struct XenBlkDev {
     /* */
     gboolean            feature_discard;
 
+    /* request buffer cache */
+    void                **buf_cache;
+    int                 buf_cache_free;
+
     /* qemu block driver */
     DriveInfo           *dinfo;
     BlockBackend        *blk;
@@ -284,12 +288,16 @@ err:
     return -1;
 }
 
-
-static void* get_buffer(void) {
+static void* get_buffer(struct XenBlkDev *blkdev) {
     void *buf;
 
-    buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
+    if(blkdev->buf_cache_free <= 0) {
+        buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
                MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+    } else {
+        blkdev->buf_cache_free--;
+        buf = blkdev->buf_cache[blkdev->buf_cache_free];
+    }
 
     if (unlikely(buf == MAP_FAILED))
         return NULL;
@@ -301,21 +309,40 @@ static int free_buffer(void* buf) {
     return munmap(buf, 1 << XC_PAGE_SHIFT);
 }
 
-static int free_buffers(void** page, int count) 
+static int free_buffers(void** page, int count, struct XenBlkDev *blkdev) 
 {
-    int i, r = 0;
+    int i, put_buf_cache = 0, r = 0;
+
+    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
+        put_buf_cache = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST
+                        - blkdev->buf_cache_free;
+    }
 
     for (i = 0; i < count; i++) { 
-        
-        if(free_buffer(page[i])) 
-            r = 1;
-        
+        if(put_buf_cache > 0) {
+            blkdev->buf_cache[blkdev->buf_cache_free++] = page[i];
+            put_buf_cache--;
+        } else { 
+            if(free_buffer(page[i])) 
+                r = 1;
+        }
+
         page[i] = NULL;
     }
 
     return r;
 }
 
+static void free_buf_cache(struct XenBlkDev *blkdev) {
+    int i;
+    for(i = 0; i < blkdev->buf_cache_free; i++) {
+        free_buffer(blkdev->buf_cache[i]);
+    }
+
+    blkdev->buf_cache_free = 0;
+    free(blkdev->buf_cache);
+}
+
 static int ioreq_write(struct ioreq *ioreq) 
 {
     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
@@ -343,7 +370,7 @@ static int ioreq_write(struct ioreq *ioreq)
         offset[i] = ioreq->req.seg[i].first_sect * ioreq->blkdev->file_blk;
         len[i] = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) 
                   * ioreq->blkdev->file_blk;
-        pages[i]  = get_buffer();
+        pages[i]  = get_buffer(ioreq->blkdev);
 
         if(!pages[i]) {
             xen_be_printf(&ioreq->blkdev->xendev, 0, 
@@ -357,7 +384,7 @@ static int ioreq_write(struct ioreq *ioreq)
         xen_be_printf(&ioreq->blkdev->xendev, 0, 
                       "failed to copy data for write %d \n", rc);
 
-        if(free_buffers(ioreq->page, ioreq->v.niov)) {
+        if(free_buffers(ioreq->page, ioreq->v.niov, ioreq->blkdev)) {
             xen_be_printf(&ioreq->blkdev->xendev, 0, 
                           "failed to free page, errno %d \n", errno);
         }
@@ -383,7 +410,7 @@ static int ioreq_read_init(struct ioreq *ioreq)
     }
 
     for (i = 0; i < ioreq->v.niov; i++) {
-        ioreq->page[i] = get_buffer();
+        ioreq->page[i] = get_buffer(ioreq->blkdev);
         if(!ioreq->page[i]) {
             return -1;
         }
@@ -469,7 +496,7 @@ static void qemu_aio_complete(void *opaque, int ret)
                           "failed to copy read data to guest\n");
         }
     case BLKIF_OP_WRITE:
-        if(free_buffers(ioreq->page, ioreq->v.niov)) {
+        if(free_buffers(ioreq->page, ioreq->v.niov, ioreq->blkdev)) {
             xen_be_printf(&ioreq->blkdev->xendev, 0, 
                           "failed to free page, errno %d \n", errno);
         }
@@ -936,6 +963,11 @@ static int blk_connect(struct XenDevice *xendev)
     }
     blkdev->cnt_map++;
 
+    /* create buffer cache for grant copy operations*/
+    blkdev->buf_cache_free = 0;
+    blkdev->buf_cache = calloc(max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST, 
+                               sizeof(void *));
+
     switch (blkdev->protocol) {
     case BLKIF_PROTOCOL_NATIVE:
     {
@@ -972,6 +1004,8 @@ static void blk_disconnect(struct XenDevice *xendev)
 {
     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
 
+    free_buf_cache(blkdev);
+
     if (blkdev->blk) {
         blk_detach_dev(blkdev->blk, blkdev);
         blk_unref(blkdev->blk);
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-05-31  4:46 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-31  4:44 [PATCH RESEND 0/4] qemu-qdisk: Replace grant map by grant copy Paulina Szubarczyk
2016-05-31  4:44 ` [PATCH RESEND 1/4] libs, gnttab, libxc: Interface for grant copy operation Paulina Szubarczyk
2016-05-31  9:25   ` David Vrabel
2016-06-01  7:45     ` Paulina Szubarczyk
2016-06-01 11:22       ` David Vrabel
2016-06-01 11:42         ` Paulina Szubarczyk
2016-06-02  9:37   ` Roger Pau Monné
2016-06-06 14:47   ` Wei Liu
2016-05-31  4:44 ` [PATCH RESEND 2/4] qdisk, hw/block/xen_disk: Removal of grant mapping Paulina Szubarczyk
2016-05-31  9:26   ` David Vrabel
2016-06-02  9:41   ` Roger Pau Monné
2016-06-02  9:57     ` Paulina Szubarczyk
2016-06-02 10:22       ` David Vrabel
2016-05-31  4:44 ` [PATCH RESEND 3/4] qdisk, hw/block/xen_disk: Perform grant copy instead of grant map Paulina Szubarczyk
2016-05-31  9:37   ` David Vrabel
2016-06-01  7:52     ` Paulina Szubarczyk
2016-06-01 11:15       ` David Vrabel
2016-06-02 13:47   ` Roger Pau Monné
2016-05-31  4:44 ` Paulina Szubarczyk [this message]
2016-06-02 14:19   ` [PATCH RESEND 4/4] qemu-xen-dir/hw/block: Cache local buffers used in grant copy Roger Pau Monné
2016-06-07 13:13     ` Paulina Szubarczyk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464669898-28495-5-git-send-email-paulinaszubarczyk@gmail.com \
    --to=paulinaszubarczyk@gmail.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=P.Gawkowski@ii.pw.edu.pl \
    --cc=anthony.perard@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).