qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: "Li, Zhen-Hua" <zhen-hual@hp.com>,
	qemu-devel@nongnu.org, qemu-trivial@nongnu.org,
	qemu-stable@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
Date: Wed, 26 Mar 2014 10:55:37 +0100	[thread overview]
Message-ID: <5332A419.1090700@redhat.com> (raw)
In-Reply-To: <1395799358-16499-1-git-send-email-zhen-hual@hp.com>

Il 26/03/2014 03:02, Li, Zhen-Hua ha scritto:
> From: "Li, ZhenHua" <zhen-hual@hp.com>
>
> In virtio-blk module, when there is new request, new req structure
> will be created by malloc.  Use a req pool instead of this, will increase
> performance;
>
> Increacement: about 5% to 10%.

Can you try g_slice_new/g_slice_free instead?

Paolo

> Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
> ---
>  hw/block/virtio-blk.c |   87 ++++++++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 75 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index 8a568e5..da5b570 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
>      BlockAcctCookie acct;
>  } VirtIOBlockReq;
>
> +#define POOL_PAGE 512
> +static VirtIOBlockReq * * req_pool;
> +static char * req_pool_used;
> +static unsigned long req_pool_size = 0;
> +
> +static void remalloc_reqs(void){
> +    unsigned long old_size = req_pool_size;
> +    unsigned long int i;
> +    char * old_used = req_pool_used;
> +    VirtIOBlockReq * * old_pool = req_pool;
> +
> +    req_pool_size += POOL_PAGE;
> +    req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
> +    req_pool =  (VirtIOBlockReq * * )malloc(req_pool_size * sizeof(VirtIOBlockReq *));
> +
> +    if(old_size != 0){
> +        memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
> +        memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
> +    }
> +    for(i=old_size; i<req_pool_size; i++){
> +    	req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
> +    	req_pool_used[i] = 0;
> +    }
> +
> +    if(old_size != 0){
> +    	free(old_used);
> +    	free(old_pool);
> +    }
> +}
> +static VirtIOBlockReq * req_pool_get_new(void){
> +    unsigned long int i;
> +    char * used;
> +    VirtIOBlockReq * * req;
> +
> +    if(req_pool_size == 0){
> +        remalloc_reqs();
> +    }
> +    for(i=0, used=req_pool_used, req=req_pool;
> +            i<req_pool_size; i++, used ++, req++){
> +    	if(*used == 0){
> +            *used = 1;
> +            return *req;
> +        }
> +    }
> +    remalloc_reqs();
> +    req_pool_used[req_pool_size-POOL_PAGE] = 1;
> +    *req = req_pool[req_pool_size-POOL_PAGE];
> +    return *req;
> +}
> +
> +static void virtio_blk_free_request(VirtIOBlockReq *req0){
> +    unsigned long int i;
> +    char * used;
> +    VirtIOBlockReq * * req;
> +
> +    for(i=0, used=req_pool_used, req=req_pool;
> +            i<req_pool_size; i++, used ++, req++){
> +    	if(*req == req0){
> +            *used = 0;
> +        }
> +    }
> +}
> +
> +
>  static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
>  {
>      VirtIOBlock *s = req->dev;
> @@ -63,7 +127,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
>      } else if (action == BDRV_ACTION_REPORT) {
>          virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
>          bdrv_acct_done(s->bs, &req->acct);
> -        g_free(req);
> +        virtio_blk_free_request(req);
>      }
>
>      bdrv_error_action(s->bs, action, is_read, error);
> @@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
>
>      virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
>      bdrv_acct_done(req->dev->bs, &req->acct);
> -    g_free(req);
> +    virtio_blk_free_request(req);
>  }
>
>  static void virtio_blk_flush_complete(void *opaque, int ret)
> @@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
>
>      virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
>      bdrv_acct_done(req->dev->bs, &req->acct);
> -    g_free(req);
> +    virtio_blk_free_request(req);
>  }
> -
>  static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
>  {
> -    VirtIOBlockReq *req = g_malloc(sizeof(*req));
> +    VirtIOBlockReq *req ;
> +    req = req_pool_get_new();
>      req->dev = s;
>      req->qiov.size = 0;
>      req->next = NULL;
>      return req;
>  }
> -
>  static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
>  {
>      VirtIOBlockReq *req = virtio_blk_alloc_request(s);
>
>      if (req != NULL) {
>          if (!virtqueue_pop(s->vq, &req->elem)) {
> -            g_free(req);
> +            virtio_blk_free_request(req);
>              return NULL;
>          }
>      }
> @@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
>       */
>      if (req->elem.out_num < 2 || req->elem.in_num < 3) {
>          virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
> -        g_free(req);
> +        virtio_blk_free_request(req);
>          return;
>      }
>
> @@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
>      stl_p(&req->scsi->data_len, hdr.dxfer_len);
>
>      virtio_blk_req_complete(req, status);
> -    g_free(req);
> +    virtio_blk_free_request(req);
>      return;
>  #else
>      abort();
> @@ -242,7 +305,7 @@ fail:
>      /* Just put anything nonzero so that the ioctl fails in the guest.  */
>      stl_p(&req->scsi->errors, 255);
>      virtio_blk_req_complete(req, status);
> -    g_free(req);
> +    virtio_blk_free_request(req);
>  }
>
>  typedef struct MultiReqBuffer {
> @@ -375,7 +438,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
>                  s->blk.serial ? s->blk.serial : "",
>                  MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
>          virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> -        g_free(req);
> +        virtio_blk_free_request(req);
>      } else if (type & VIRTIO_BLK_T_OUT) {
>          qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
>                                   req->elem.out_num - 1);
> @@ -387,7 +450,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
>          virtio_blk_handle_read(req);
>      } else {
>          virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
> -        g_free(req);
> +        virtio_blk_free_request(req);
>      }
>  }
>
>

  parent reply	other threads:[~2014-03-26  9:56 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-03-26  2:02 [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free Li, Zhen-Hua
2014-03-26  2:07 ` Li, ZhenHua
2014-03-26  9:55 ` Paolo Bonzini [this message]
2014-03-26  9:59   ` Li, ZhenHua
2014-03-26 10:26     ` Paolo Bonzini
  -- strict thread matches above, loose matches on Subject: below --
2014-03-25  8:44 Li, Zhen-Hua
2014-03-26  9:27 ` Stefan Hajnoczi
2014-03-26  9:54   ` Li, ZhenHua

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5332A419.1090700@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-stable@nongnu.org \
    --cc=qemu-trivial@nongnu.org \
    --cc=zhen-hual@hp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).