* [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
@ 2014-03-26 2:02 Li, Zhen-Hua
2014-03-26 2:07 ` Li, ZhenHua
2014-03-26 9:55 ` Paolo Bonzini
0 siblings, 2 replies; 8+ messages in thread
From: Li, Zhen-Hua @ 2014-03-26 2:02 UTC (permalink / raw)
To: qemu-devel, qemu-trivial, qemu-stable; +Cc: Li, ZhenHua
From: "Li, ZhenHua" <zhen-hual@hp.com>
In virtio-blk module, when there is new request, new req structure
will be created by malloc. Use a req pool instead of this, will increase
performance;
Increacement: about 5% to 10%.
Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
---
hw/block/virtio-blk.c | 87 ++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 75 insertions(+), 12 deletions(-)
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 8a568e5..da5b570 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
BlockAcctCookie acct;
} VirtIOBlockReq;
+#define POOL_PAGE 512
+static VirtIOBlockReq * * req_pool;
+static char * req_pool_used;
+static unsigned long req_pool_size = 0;
+
+static void remalloc_reqs(void){
+ unsigned long old_size = req_pool_size;
+ unsigned long int i;
+ char * old_used = req_pool_used;
+ VirtIOBlockReq * * old_pool = req_pool;
+
+ req_pool_size += POOL_PAGE;
+ req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
+ req_pool = (VirtIOBlockReq * * )malloc(req_pool_size * sizeof(VirtIOBlockReq *));
+
+ if(old_size != 0){
+ memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
+ memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
+ }
+ for(i=old_size; i<req_pool_size; i++){
+ req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
+ req_pool_used[i] = 0;
+ }
+
+ if(old_size != 0){
+ free(old_used);
+ free(old_pool);
+ }
+}
+static VirtIOBlockReq * req_pool_get_new(void){
+ unsigned long int i;
+ char * used;
+ VirtIOBlockReq * * req;
+
+ if(req_pool_size == 0){
+ remalloc_reqs();
+ }
+ for(i=0, used=req_pool_used, req=req_pool;
+ i<req_pool_size; i++, used ++, req++){
+ if(*used == 0){
+ *used = 1;
+ return *req;
+ }
+ }
+ remalloc_reqs();
+ req_pool_used[req_pool_size-POOL_PAGE] = 1;
+ *req = req_pool[req_pool_size-POOL_PAGE];
+ return *req;
+}
+
+static void virtio_blk_free_request(VirtIOBlockReq *req0){
+ unsigned long int i;
+ char * used;
+ VirtIOBlockReq * * req;
+
+ for(i=0, used=req_pool_used, req=req_pool;
+ i<req_pool_size; i++, used ++, req++){
+ if(*req == req0){
+ *used = 0;
+ }
+ }
+}
+
+
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
{
VirtIOBlock *s = req->dev;
@@ -63,7 +127,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
} else if (action == BDRV_ACTION_REPORT) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
bdrv_acct_done(s->bs, &req->acct);
- g_free(req);
+ virtio_blk_free_request(req);
}
bdrv_error_action(s->bs, action, is_read, error);
@@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
- g_free(req);
+ virtio_blk_free_request(req);
}
static void virtio_blk_flush_complete(void *opaque, int ret)
@@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
- g_free(req);
+ virtio_blk_free_request(req);
}
-
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
{
- VirtIOBlockReq *req = g_malloc(sizeof(*req));
+ VirtIOBlockReq *req ;
+ req = req_pool_get_new();
req->dev = s;
req->qiov.size = 0;
req->next = NULL;
return req;
}
-
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
{
VirtIOBlockReq *req = virtio_blk_alloc_request(s);
if (req != NULL) {
if (!virtqueue_pop(s->vq, &req->elem)) {
- g_free(req);
+ virtio_blk_free_request(req);
return NULL;
}
}
@@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
*/
if (req->elem.out_num < 2 || req->elem.in_num < 3) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
- g_free(req);
+ virtio_blk_free_request(req);
return;
}
@@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
stl_p(&req->scsi->data_len, hdr.dxfer_len);
virtio_blk_req_complete(req, status);
- g_free(req);
+ virtio_blk_free_request(req);
return;
#else
abort();
@@ -242,7 +305,7 @@ fail:
/* Just put anything nonzero so that the ioctl fails in the guest. */
stl_p(&req->scsi->errors, 255);
virtio_blk_req_complete(req, status);
- g_free(req);
+ virtio_blk_free_request(req);
}
typedef struct MultiReqBuffer {
@@ -375,7 +438,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
s->blk.serial ? s->blk.serial : "",
MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
- g_free(req);
+ virtio_blk_free_request(req);
} else if (type & VIRTIO_BLK_T_OUT) {
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
req->elem.out_num - 1);
@@ -387,7 +450,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
virtio_blk_handle_read(req);
} else {
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
- g_free(req);
+ virtio_blk_free_request(req);
}
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
2014-03-26 2:02 [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free Li, Zhen-Hua
@ 2014-03-26 2:07 ` Li, ZhenHua
2014-03-26 9:55 ` Paolo Bonzini
1 sibling, 0 replies; 8+ messages in thread
From: Li, ZhenHua @ 2014-03-26 2:07 UTC (permalink / raw)
To: qemu-devel, qemu-trivial, qemu-stable; +Cc: Li, Zhen-Hua
I am sorry that seems it was not sent out before I subscribed to the
list. So I send this patch again.
On 03/26/2014 10:02 AM, Li, Zhen-Hua wrote:
> From: "Li, ZhenHua" <zhen-hual@hp.com>
>
> In virtio-blk module, when there is new request, new req structure
> will be created by malloc. Use a req pool instead of this, will increase
> performance;
>
> Increacement: about 5% to 10%.
>
> Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
> ---
> hw/block/virtio-blk.c | 87 ++++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 75 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index 8a568e5..da5b570 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
> BlockAcctCookie acct;
> } VirtIOBlockReq;
>
> +#define POOL_PAGE 512
> +static VirtIOBlockReq * * req_pool;
> +static char * req_pool_used;
> +static unsigned long req_pool_size = 0;
> +
> +static void remalloc_reqs(void){
> + unsigned long old_size = req_pool_size;
> + unsigned long int i;
> + char * old_used = req_pool_used;
> + VirtIOBlockReq * * old_pool = req_pool;
> +
> + req_pool_size += POOL_PAGE;
> + req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
> + req_pool = (VirtIOBlockReq * * )malloc(req_pool_size * sizeof(VirtIOBlockReq *));
> +
> + if(old_size != 0){
> + memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
> + memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
> + }
> + for(i=old_size; i<req_pool_size; i++){
> + req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
> + req_pool_used[i] = 0;
> + }
> +
> + if(old_size != 0){
> + free(old_used);
> + free(old_pool);
> + }
> +}
> +static VirtIOBlockReq * req_pool_get_new(void){
> + unsigned long int i;
> + char * used;
> + VirtIOBlockReq * * req;
> +
> + if(req_pool_size == 0){
> + remalloc_reqs();
> + }
> + for(i=0, used=req_pool_used, req=req_pool;
> + i<req_pool_size; i++, used ++, req++){
> + if(*used == 0){
> + *used = 1;
> + return *req;
> + }
> + }
> + remalloc_reqs();
> + req_pool_used[req_pool_size-POOL_PAGE] = 1;
> + *req = req_pool[req_pool_size-POOL_PAGE];
> + return *req;
> +}
> +
> +static void virtio_blk_free_request(VirtIOBlockReq *req0){
> + unsigned long int i;
> + char * used;
> + VirtIOBlockReq * * req;
> +
> + for(i=0, used=req_pool_used, req=req_pool;
> + i<req_pool_size; i++, used ++, req++){
> + if(*req == req0){
> + *used = 0;
> + }
> + }
> +}
> +
> +
> static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
> {
> VirtIOBlock *s = req->dev;
> @@ -63,7 +127,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
> } else if (action == BDRV_ACTION_REPORT) {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
> bdrv_acct_done(s->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> bdrv_error_action(s->bs, action, is_read, error);
> @@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
>
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> bdrv_acct_done(req->dev->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> static void virtio_blk_flush_complete(void *opaque, int ret)
> @@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
>
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> bdrv_acct_done(req->dev->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
> -
> static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
> {
> - VirtIOBlockReq *req = g_malloc(sizeof(*req));
> + VirtIOBlockReq *req ;
> + req = req_pool_get_new();
> req->dev = s;
> req->qiov.size = 0;
> req->next = NULL;
> return req;
> }
> -
> static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
> {
> VirtIOBlockReq *req = virtio_blk_alloc_request(s);
>
> if (req != NULL) {
> if (!virtqueue_pop(s->vq, &req->elem)) {
> - g_free(req);
> + virtio_blk_free_request(req);
> return NULL;
> }
> }
> @@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
> */
> if (req->elem.out_num < 2 || req->elem.in_num < 3) {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
> - g_free(req);
> + virtio_blk_free_request(req);
> return;
> }
>
> @@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
> stl_p(&req->scsi->data_len, hdr.dxfer_len);
>
> virtio_blk_req_complete(req, status);
> - g_free(req);
> + virtio_blk_free_request(req);
> return;
> #else
> abort();
> @@ -242,7 +305,7 @@ fail:
> /* Just put anything nonzero so that the ioctl fails in the guest. */
> stl_p(&req->scsi->errors, 255);
> virtio_blk_req_complete(req, status);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> typedef struct MultiReqBuffer {
> @@ -375,7 +438,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
> s->blk.serial ? s->blk.serial : "",
> MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> - g_free(req);
> + virtio_blk_free_request(req);
> } else if (type & VIRTIO_BLK_T_OUT) {
> qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
> req->elem.out_num - 1);
> @@ -387,7 +450,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
> virtio_blk_handle_read(req);
> } else {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
> }
>
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
2014-03-26 2:02 [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free Li, Zhen-Hua
2014-03-26 2:07 ` Li, ZhenHua
@ 2014-03-26 9:55 ` Paolo Bonzini
2014-03-26 9:59 ` Li, ZhenHua
1 sibling, 1 reply; 8+ messages in thread
From: Paolo Bonzini @ 2014-03-26 9:55 UTC (permalink / raw)
To: Li, Zhen-Hua, qemu-devel, qemu-trivial, qemu-stable
Il 26/03/2014 03:02, Li, Zhen-Hua ha scritto:
> From: "Li, ZhenHua" <zhen-hual@hp.com>
>
> In virtio-blk module, when there is new request, new req structure
> will be created by malloc. Use a req pool instead of this, will increase
> performance;
>
> Increacement: about 5% to 10%.
Can you try g_slice_new/g_slice_free instead?
Paolo
> Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
> ---
> hw/block/virtio-blk.c | 87 ++++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 75 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index 8a568e5..da5b570 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
> BlockAcctCookie acct;
> } VirtIOBlockReq;
>
> +#define POOL_PAGE 512
> +static VirtIOBlockReq * * req_pool;
> +static char * req_pool_used;
> +static unsigned long req_pool_size = 0;
> +
> +static void remalloc_reqs(void){
> + unsigned long old_size = req_pool_size;
> + unsigned long int i;
> + char * old_used = req_pool_used;
> + VirtIOBlockReq * * old_pool = req_pool;
> +
> + req_pool_size += POOL_PAGE;
> + req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
> + req_pool = (VirtIOBlockReq * * )malloc(req_pool_size * sizeof(VirtIOBlockReq *));
> +
> + if(old_size != 0){
> + memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
> + memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
> + }
> + for(i=old_size; i<req_pool_size; i++){
> + req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
> + req_pool_used[i] = 0;
> + }
> +
> + if(old_size != 0){
> + free(old_used);
> + free(old_pool);
> + }
> +}
> +static VirtIOBlockReq * req_pool_get_new(void){
> + unsigned long int i;
> + char * used;
> + VirtIOBlockReq * * req;
> +
> + if(req_pool_size == 0){
> + remalloc_reqs();
> + }
> + for(i=0, used=req_pool_used, req=req_pool;
> + i<req_pool_size; i++, used ++, req++){
> + if(*used == 0){
> + *used = 1;
> + return *req;
> + }
> + }
> + remalloc_reqs();
> + req_pool_used[req_pool_size-POOL_PAGE] = 1;
> + *req = req_pool[req_pool_size-POOL_PAGE];
> + return *req;
> +}
> +
> +static void virtio_blk_free_request(VirtIOBlockReq *req0){
> + unsigned long int i;
> + char * used;
> + VirtIOBlockReq * * req;
> +
> + for(i=0, used=req_pool_used, req=req_pool;
> + i<req_pool_size; i++, used ++, req++){
> + if(*req == req0){
> + *used = 0;
> + }
> + }
> +}
> +
> +
> static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
> {
> VirtIOBlock *s = req->dev;
> @@ -63,7 +127,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
> } else if (action == BDRV_ACTION_REPORT) {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
> bdrv_acct_done(s->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> bdrv_error_action(s->bs, action, is_read, error);
> @@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
>
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> bdrv_acct_done(req->dev->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> static void virtio_blk_flush_complete(void *opaque, int ret)
> @@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
>
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> bdrv_acct_done(req->dev->bs, &req->acct);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
> -
> static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
> {
> - VirtIOBlockReq *req = g_malloc(sizeof(*req));
> + VirtIOBlockReq *req ;
> + req = req_pool_get_new();
> req->dev = s;
> req->qiov.size = 0;
> req->next = NULL;
> return req;
> }
> -
> static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
> {
> VirtIOBlockReq *req = virtio_blk_alloc_request(s);
>
> if (req != NULL) {
> if (!virtqueue_pop(s->vq, &req->elem)) {
> - g_free(req);
> + virtio_blk_free_request(req);
> return NULL;
> }
> }
> @@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
> */
> if (req->elem.out_num < 2 || req->elem.in_num < 3) {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
> - g_free(req);
> + virtio_blk_free_request(req);
> return;
> }
>
> @@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
> stl_p(&req->scsi->data_len, hdr.dxfer_len);
>
> virtio_blk_req_complete(req, status);
> - g_free(req);
> + virtio_blk_free_request(req);
> return;
> #else
> abort();
> @@ -242,7 +305,7 @@ fail:
> /* Just put anything nonzero so that the ioctl fails in the guest. */
> stl_p(&req->scsi->errors, 255);
> virtio_blk_req_complete(req, status);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
>
> typedef struct MultiReqBuffer {
> @@ -375,7 +438,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
> s->blk.serial ? s->blk.serial : "",
> MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
> - g_free(req);
> + virtio_blk_free_request(req);
> } else if (type & VIRTIO_BLK_T_OUT) {
> qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
> req->elem.out_num - 1);
> @@ -387,7 +450,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
> virtio_blk_handle_read(req);
> } else {
> virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
> - g_free(req);
> + virtio_blk_free_request(req);
> }
> }
>
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
2014-03-26 9:55 ` Paolo Bonzini
@ 2014-03-26 9:59 ` Li, ZhenHua
2014-03-26 10:26 ` Paolo Bonzini
0 siblings, 1 reply; 8+ messages in thread
From: Li, ZhenHua @ 2014-03-26 9:59 UTC (permalink / raw)
To: Paolo Bonzini, Stefan Hajnoczi; +Cc: qemu-devel
Sorry I am confused .
There are two ways now:
1. Just use g_slice_new to replace malloc/free.
2. Use a pool as a replacement of frequently create/destroy reqs. And
when create the pool, use g_slice_new.
Which are you meaning?
Thanks
ZhenHua
On 03/26/2014 05:55 PM, Paolo Bonzini wrote:
> Il 26/03/2014 03:02, Li, Zhen-Hua ha scritto:
>> From: "Li, ZhenHua" <zhen-hual@hp.com>
>>
>> In virtio-blk module, when there is new request, new req structure
>> will be created by malloc. Use a req pool instead of this, will increase
>> performance;
>>
>> Increacement: about 5% to 10%.
>
> Can you try g_slice_new/g_slice_free instead?
>
> Paolo
>
>> Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
>> ---
>> hw/block/virtio-blk.c | 87
>> ++++++++++++++++++++++++++++++++++++++++++-------
>> 1 file changed, 75 insertions(+), 12 deletions(-)
>>
>> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
>> index 8a568e5..da5b570 100644
>> --- a/hw/block/virtio-blk.c
>> +++ b/hw/block/virtio-blk.c
>> @@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
>> BlockAcctCookie acct;
>> } VirtIOBlockReq;
>>
>> +#define POOL_PAGE 512
>> +static VirtIOBlockReq * * req_pool;
>> +static char * req_pool_used;
>> +static unsigned long req_pool_size = 0;
>> +
>> +static void remalloc_reqs(void){
>> + unsigned long old_size = req_pool_size;
>> + unsigned long int i;
>> + char * old_used = req_pool_used;
>> + VirtIOBlockReq * * old_pool = req_pool;
>> +
>> + req_pool_size += POOL_PAGE;
>> + req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
>> + req_pool = (VirtIOBlockReq * * )malloc(req_pool_size *
>> sizeof(VirtIOBlockReq *));
>> +
>> + if(old_size != 0){
>> + memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
>> + memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
>> + }
>> + for(i=old_size; i<req_pool_size; i++){
>> + req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
>> + req_pool_used[i] = 0;
>> + }
>> +
>> + if(old_size != 0){
>> + free(old_used);
>> + free(old_pool);
>> + }
>> +}
>> +static VirtIOBlockReq * req_pool_get_new(void){
>> + unsigned long int i;
>> + char * used;
>> + VirtIOBlockReq * * req;
>> +
>> + if(req_pool_size == 0){
>> + remalloc_reqs();
>> + }
>> + for(i=0, used=req_pool_used, req=req_pool;
>> + i<req_pool_size; i++, used ++, req++){
>> + if(*used == 0){
>> + *used = 1;
>> + return *req;
>> + }
>> + }
>> + remalloc_reqs();
>> + req_pool_used[req_pool_size-POOL_PAGE] = 1;
>> + *req = req_pool[req_pool_size-POOL_PAGE];
>> + return *req;
>> +}
>> +
>> +static void virtio_blk_free_request(VirtIOBlockReq *req0){
>> + unsigned long int i;
>> + char * used;
>> + VirtIOBlockReq * * req;
>> +
>> + for(i=0, used=req_pool_used, req=req_pool;
>> + i<req_pool_size; i++, used ++, req++){
>> + if(*req == req0){
>> + *used = 0;
>> + }
>> + }
>> +}
>> +
>> +
>> static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
>> {
>> VirtIOBlock *s = req->dev;
>> @@ -63,7 +127,7 @@ static int
>> virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
>> } else if (action == BDRV_ACTION_REPORT) {
>> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
>> bdrv_acct_done(s->bs, &req->acct);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> }
>>
>> bdrv_error_action(s->bs, action, is_read, error);
>> @@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque,
>> int ret)
>>
>> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
>> bdrv_acct_done(req->dev->bs, &req->acct);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> }
>>
>> static void virtio_blk_flush_complete(void *opaque, int ret)
>> @@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void
>> *opaque, int ret)
>>
>> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
>> bdrv_acct_done(req->dev->bs, &req->acct);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> }
>> -
>> static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
>> {
>> - VirtIOBlockReq *req = g_malloc(sizeof(*req));
>> + VirtIOBlockReq *req ;
>> + req = req_pool_get_new();
>> req->dev = s;
>> req->qiov.size = 0;
>> req->next = NULL;
>> return req;
>> }
>> -
>> static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
>> {
>> VirtIOBlockReq *req = virtio_blk_alloc_request(s);
>>
>> if (req != NULL) {
>> if (!virtqueue_pop(s->vq, &req->elem)) {
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> return NULL;
>> }
>> }
>> @@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq
>> *req)
>> */
>> if (req->elem.out_num < 2 || req->elem.in_num < 3) {
>> virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> return;
>> }
>>
>> @@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq
>> *req)
>> stl_p(&req->scsi->data_len, hdr.dxfer_len);
>>
>> virtio_blk_req_complete(req, status);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> return;
>> #else
>> abort();
>> @@ -242,7 +305,7 @@ fail:
>> /* Just put anything nonzero so that the ioctl fails in the
>> guest. */
>> stl_p(&req->scsi->errors, 255);
>> virtio_blk_req_complete(req, status);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> }
>>
>> typedef struct MultiReqBuffer {
>> @@ -375,7 +438,7 @@ static void
>> virtio_blk_handle_request(VirtIOBlockReq *req,
>> s->blk.serial ? s->blk.serial : "",
>> MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
>> virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> } else if (type & VIRTIO_BLK_T_OUT) {
>> qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
>> req->elem.out_num - 1);
>> @@ -387,7 +450,7 @@ static void
>> virtio_blk_handle_request(VirtIOBlockReq *req,
>> virtio_blk_handle_read(req);
>> } else {
>> virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
>> - g_free(req);
>> + virtio_blk_free_request(req);
>> }
>> }
>>
>>
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
2014-03-26 9:59 ` Li, ZhenHua
@ 2014-03-26 10:26 ` Paolo Bonzini
0 siblings, 0 replies; 8+ messages in thread
From: Paolo Bonzini @ 2014-03-26 10:26 UTC (permalink / raw)
To: Li, ZhenHua, Stefan Hajnoczi; +Cc: qemu-devel
Il 26/03/2014 10:59, Li, ZhenHua ha scritto:
> Sorry I am confused .
>
> There are two ways now:
> 1. Just use g_slice_new to replace malloc/free.
> 2. Use a pool as a replacement of frequently create/destroy reqs. And
> when create the pool, use g_slice_new.
>
> Which are you meaning?
I think both I and Stefan mean (1).
Paolo
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
@ 2014-03-25 8:44 Li, Zhen-Hua
2014-03-26 9:27 ` Stefan Hajnoczi
0 siblings, 1 reply; 8+ messages in thread
From: Li, Zhen-Hua @ 2014-03-25 8:44 UTC (permalink / raw)
To: qemu-devel; +Cc: Li, ZhenHua
From: "Li, ZhenHua" <zhen-hual@hp.com>
In virtio-blk module, when there is new request, new req structure
will be created by malloc. Use a req pool instead of this, will increase
performance;
Increacement: about 5% to 10%.
Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
---
hw/block/virtio-blk.c | 87 ++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 75 insertions(+), 12 deletions(-)
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 8a568e5..da5b570 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -39,6 +39,70 @@ typedef struct VirtIOBlockReq
BlockAcctCookie acct;
} VirtIOBlockReq;
+#define POOL_PAGE 512
+static VirtIOBlockReq * * req_pool;
+static char * req_pool_used;
+static unsigned long req_pool_size = 0;
+
+static void remalloc_reqs(void){
+ unsigned long old_size = req_pool_size;
+ unsigned long int i;
+ char * old_used = req_pool_used;
+ VirtIOBlockReq * * old_pool = req_pool;
+
+ req_pool_size += POOL_PAGE;
+ req_pool_used = (char * )malloc(req_pool_size * sizeof(char));
+ req_pool = (VirtIOBlockReq * * )malloc(req_pool_size * sizeof(VirtIOBlockReq *));
+
+ if(old_size != 0){
+ memcpy(req_pool_used, old_used, old_size*(sizeof(char)));
+ memcpy(req_pool, old_pool, old_size*(sizeof(VirtIOBlockReq *)));
+ }
+ for(i=old_size; i<req_pool_size; i++){
+ req_pool[i] = (VirtIOBlockReq *)malloc(sizeof(VirtIOBlockReq));
+ req_pool_used[i] = 0;
+ }
+
+ if(old_size != 0){
+ free(old_used);
+ free(old_pool);
+ }
+}
+static VirtIOBlockReq * req_pool_get_new(void){
+ unsigned long int i;
+ char * used;
+ VirtIOBlockReq * * req;
+
+ if(req_pool_size == 0){
+ remalloc_reqs();
+ }
+ for(i=0, used=req_pool_used, req=req_pool;
+ i<req_pool_size; i++, used ++, req++){
+ if(*used == 0){
+ *used = 1;
+ return *req;
+ }
+ }
+ remalloc_reqs();
+ req_pool_used[req_pool_size-POOL_PAGE] = 1;
+ *req = req_pool[req_pool_size-POOL_PAGE];
+ return *req;
+}
+
+static void virtio_blk_free_request(VirtIOBlockReq *req0){
+ unsigned long int i;
+ char * used;
+ VirtIOBlockReq * * req;
+
+ for(i=0, used=req_pool_used, req=req_pool;
+ i<req_pool_size; i++, used ++, req++){
+ if(*req == req0){
+ *used = 0;
+ }
+ }
+}
+
+
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
{
VirtIOBlock *s = req->dev;
@@ -63,7 +127,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
} else if (action == BDRV_ACTION_REPORT) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
bdrv_acct_done(s->bs, &req->acct);
- g_free(req);
+ virtio_blk_free_request(req);
}
bdrv_error_action(s->bs, action, is_read, error);
@@ -84,7 +148,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
- g_free(req);
+ virtio_blk_free_request(req);
}
static void virtio_blk_flush_complete(void *opaque, int ret)
@@ -99,25 +163,24 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct);
- g_free(req);
+ virtio_blk_free_request(req);
}
-
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
{
- VirtIOBlockReq *req = g_malloc(sizeof(*req));
+ VirtIOBlockReq *req ;
+ req = req_pool_get_new();
req->dev = s;
req->qiov.size = 0;
req->next = NULL;
return req;
}
-
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
{
VirtIOBlockReq *req = virtio_blk_alloc_request(s);
if (req != NULL) {
if (!virtqueue_pop(s->vq, &req->elem)) {
- g_free(req);
+ virtio_blk_free_request(req);
return NULL;
}
}
@@ -142,7 +205,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
*/
if (req->elem.out_num < 2 || req->elem.in_num < 3) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
- g_free(req);
+ virtio_blk_free_request(req);
return;
}
@@ -232,7 +295,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
stl_p(&req->scsi->data_len, hdr.dxfer_len);
virtio_blk_req_complete(req, status);
- g_free(req);
+ virtio_blk_free_request(req);
return;
#else
abort();
@@ -242,7 +305,7 @@ fail:
/* Just put anything nonzero so that the ioctl fails in the guest. */
stl_p(&req->scsi->errors, 255);
virtio_blk_req_complete(req, status);
- g_free(req);
+ virtio_blk_free_request(req);
}
typedef struct MultiReqBuffer {
@@ -375,7 +438,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
s->blk.serial ? s->blk.serial : "",
MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
- g_free(req);
+ virtio_blk_free_request(req);
} else if (type & VIRTIO_BLK_T_OUT) {
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
req->elem.out_num - 1);
@@ -387,7 +450,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
virtio_blk_handle_read(req);
} else {
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
- g_free(req);
+ virtio_blk_free_request(req);
}
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
2014-03-25 8:44 Li, Zhen-Hua
@ 2014-03-26 9:27 ` Stefan Hajnoczi
2014-03-26 9:54 ` Li, ZhenHua
0 siblings, 1 reply; 8+ messages in thread
From: Stefan Hajnoczi @ 2014-03-26 9:27 UTC (permalink / raw)
To: Li, Zhen-Hua; +Cc: qemu-devel
On Tue, Mar 25, 2014 at 04:44:48PM +0800, Li, Zhen-Hua wrote:
> From: "Li, ZhenHua" <zhen-hual@hp.com>
>
> In virtio-blk module, when there is new request, new req structure
> will be created by malloc. Use a req pool instead of this, will increase
> performance;
>
> Increacement: about 5% to 10%.
>
> Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
> ---
> hw/block/virtio-blk.c | 87 ++++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 75 insertions(+), 12 deletions(-)
Please try g_slice_new() instead of implementing a request pool
manually:
https://developer.gnome.org/glib/unstable/glib-Memory-Slices.html
We already use g_slice_new() in other places in QEMU - for example in
the virtio-blk dataplane code.
Also please provide more details about the benchmark you are running.
Which benchmark, what workload/settings, 5-10% increase of which number
(IOPS, BW/CPU%, or something else), etc.
Stefan
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free
2014-03-26 9:27 ` Stefan Hajnoczi
@ 2014-03-26 9:54 ` Li, ZhenHua
0 siblings, 0 replies; 8+ messages in thread
From: Li, ZhenHua @ 2014-03-26 9:54 UTC (permalink / raw)
To: Stefan Hajnoczi; +Cc: qemu-devel
Stefan,
Thank you for your suggestions. I will try g_slice_* and give more
performance testing results.
ZhenHua
On 03/26/2014 05:27 PM, Stefan Hajnoczi wrote:
> On Tue, Mar 25, 2014 at 04:44:48PM +0800, Li, Zhen-Hua wrote:
>> From: "Li, ZhenHua" <zhen-hual@hp.com>
>>
>> In virtio-blk module, when there is new request, new req structure
>> will be created by malloc. Use a req pool instead of this, will increase
>> performance;
>>
>> Increacement: about 5% to 10%.
>>
>> Signed-off-by: Li, ZhenHua <zhen-hual@hp.com>
>> ---
>> hw/block/virtio-blk.c | 87 ++++++++++++++++++++++++++++++++++++++++++-------
>> 1 file changed, 75 insertions(+), 12 deletions(-)
>
> Please try g_slice_new() instead of implementing a request pool
> manually:
> https://developer.gnome.org/glib/unstable/glib-Memory-Slices.html
>
> We already use g_slice_new() in other places in QEMU - for example in
> the virtio-blk dataplane code.
>
> Also please provide more details about the benchmark you are running.
> Which benchmark, what workload/settings, 5-10% increase of which number
> (IOPS, BW/CPU%, or something else), etc.
>
> Stefan
>
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2014-03-26 10:26 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-03-26 2:02 [Qemu-devel] [PATCH 1/1] virtio-blk: Use a req pool instead of malloc/free Li, Zhen-Hua
2014-03-26 2:07 ` Li, ZhenHua
2014-03-26 9:55 ` Paolo Bonzini
2014-03-26 9:59 ` Li, ZhenHua
2014-03-26 10:26 ` Paolo Bonzini
-- strict thread matches above, loose matches on Subject: below --
2014-03-25 8:44 Li, Zhen-Hua
2014-03-26 9:27 ` Stefan Hajnoczi
2014-03-26 9:54 ` Li, ZhenHua
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).