From: Paul Durrant <paul.durrant@citrix.com>
To: qemu-devel@nongnu.org, qemu-block@nongnu.org,
xen-devel@lists.xenproject.org
Cc: Tim Smith <tim.smith@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Stefano Stabellini <sstabellini@kernel.org>,
Anthony Perard <anthony.perard@citrix.com>,
Kevin Wolf <kwolf@redhat.com>, Max Reitz <mreitz@redhat.com>
Subject: [Qemu-devel] [PATCH v3 2/3] xen-block: improve response latency
Date: Wed, 12 Dec 2018 11:16:25 +0000 [thread overview]
Message-ID: <1544613386-22045-3-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1544613386-22045-1-git-send-email-paul.durrant@citrix.com>
From: Tim Smith <tim.smith@citrix.com>
If the I/O ring is full, the guest cannot send any more requests
until some responses are sent. Only sending all available responses
just before checking for new work does not leave much time for the
guest to supply new work, so this will cause stalls if the ring gets
full. Also, not completing reads as soon as possible adds latency
to the guest.
To alleviate that, complete IO requests as soon as they come back.
xen_block_send_response() already returns a value indicating whether
a notify should be sent, which is all the batching we need.
Signed-off-by: Tim Smith <tim.smith@citrix.com>
Re-based and commit comment adjusted.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Anthony Perard <anthony.perard@citrix.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Max Reitz <mreitz@redhat.com>
---
hw/block/dataplane/xen-block.c | 56 ++++++++++++++----------------------------
1 file changed, 18 insertions(+), 38 deletions(-)
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index db17ab5..b4ff2e3 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -55,11 +55,9 @@ struct XenBlockDataPlane {
blkif_back_rings_t rings;
int more_work;
QLIST_HEAD(inflight_head, XenBlockRequest) inflight;
- QLIST_HEAD(finished_head, XenBlockRequest) finished;
QLIST_HEAD(freelist_head, XenBlockRequest) freelist;
int requests_total;
int requests_inflight;
- int requests_finished;
unsigned int max_requests;
BlockBackend *blk;
QEMUBH *bh;
@@ -116,12 +114,10 @@ static void xen_block_finish_request(XenBlockRequest *request)
XenBlockDataPlane *dataplane = request->dataplane;
QLIST_REMOVE(request, list);
- QLIST_INSERT_HEAD(&dataplane->finished, request, list);
dataplane->requests_inflight--;
- dataplane->requests_finished++;
}
-static void xen_block_release_request(XenBlockRequest *request, bool finish)
+static void xen_block_release_request(XenBlockRequest *request)
{
XenBlockDataPlane *dataplane = request->dataplane;
@@ -129,11 +125,7 @@ static void xen_block_release_request(XenBlockRequest *request, bool finish)
reset_request(request);
request->dataplane = dataplane;
QLIST_INSERT_HEAD(&dataplane->freelist, request, list);
- if (finish) {
- dataplane->requests_finished--;
- } else {
- dataplane->requests_inflight--;
- }
+ dataplane->requests_inflight--;
}
/*
@@ -248,6 +240,7 @@ static int xen_block_copy_request(XenBlockRequest *request)
}
static int xen_block_do_aio(XenBlockRequest *request);
+static int xen_block_send_response(XenBlockRequest *request);
static void xen_block_complete_aio(void *opaque, int ret)
{
@@ -312,6 +305,18 @@ static void xen_block_complete_aio(void *opaque, int ret)
default:
break;
}
+ if (xen_block_send_response(request)) {
+ Error *local_err = NULL;
+
+ xen_device_notify_event_channel(dataplane->xendev,
+ dataplane->event_channel,
+ &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ }
+ xen_block_release_request(request);
+
qemu_bh_schedule(dataplane->bh);
done:
@@ -419,7 +424,7 @@ err:
return -1;
}
-static int xen_block_send_response_one(XenBlockRequest *request)
+static int xen_block_send_response(XenBlockRequest *request)
{
XenBlockDataPlane *dataplane = request->dataplane;
int send_notify = 0;
@@ -474,29 +479,6 @@ static int xen_block_send_response_one(XenBlockRequest *request)
return send_notify;
}
-/* walk finished list, send outstanding responses, free requests */
-static void xen_block_send_response_all(XenBlockDataPlane *dataplane)
-{
- XenBlockRequest *request;
- int send_notify = 0;
-
- while (!QLIST_EMPTY(&dataplane->finished)) {
- request = QLIST_FIRST(&dataplane->finished);
- send_notify += xen_block_send_response_one(request);
- xen_block_release_request(request, true);
- }
- if (send_notify) {
- Error *local_err = NULL;
-
- xen_device_notify_event_channel(dataplane->xendev,
- dataplane->event_channel,
- &local_err);
- if (local_err) {
- error_report_err(local_err);
- }
- }
-}
-
static int xen_block_get_request(XenBlockDataPlane *dataplane,
XenBlockRequest *request, RING_IDX rc)
{
@@ -547,7 +529,6 @@ static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
rp = dataplane->rings.common.sring->req_prod;
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
- xen_block_send_response_all(dataplane);
/*
* If there was more than IO_PLUG_THRESHOLD requests in flight
* when we got here, this is an indication that there the bottleneck
@@ -591,7 +572,7 @@ static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
break;
};
- if (xen_block_send_response_one(request)) {
+ if (xen_block_send_response(request)) {
Error *local_err = NULL;
xen_device_notify_event_channel(dataplane->xendev,
@@ -601,7 +582,7 @@ static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
error_report_err(local_err);
}
}
- xen_block_release_request(request, false);
+ xen_block_release_request(request);
continue;
}
@@ -657,7 +638,6 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
dataplane->file_size = blk_getlength(dataplane->blk);
QLIST_INIT(&dataplane->inflight);
- QLIST_INIT(&dataplane->finished);
QLIST_INIT(&dataplane->freelist);
if (iothread) {
--
2.1.4
next prev parent reply other threads:[~2018-12-12 11:16 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-12-12 11:16 [Qemu-devel] [PATCH v3 0/3] Performance improvements for xen_disk^Wxen-block Paul Durrant
2018-12-12 11:16 ` [Qemu-devel] [PATCH v3 1/3] xen-block: improve batching behaviour Paul Durrant
2018-12-12 11:16 ` Paul Durrant [this message]
2018-12-12 11:16 ` [Qemu-devel] [PATCH v3 3/3] xen-block: avoid repeated memory allocation Paul Durrant
2018-12-13 12:24 ` [Qemu-devel] [PATCH v3 0/3] Performance improvements for xen_disk^Wxen-block Anthony PERARD
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1544613386-22045-3-git-send-email-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=anthony.perard@citrix.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=sstabellini@kernel.org \
--cc=stefanha@redhat.com \
--cc=tim.smith@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).