qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <Paul.Durrant@citrix.com>
To: Tim Smith <tim.smith@citrix.com>,
	"xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>,
	"qemu-devel@nongnu.org" <qemu-devel@nongnu.org>,
	"qemu-block@nongnu.org" <qemu-block@nongnu.org>
Cc: Anthony Perard <anthony.perard@citrix.com>,
	Kevin Wolf <kwolf@redhat.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Max Reitz <mreitz@redhat.com>
Subject: Re: [Qemu-devel] [PATCH 2/3] Improve xen_disk response latency
Date: Fri, 2 Nov 2018 11:14:54 +0000	[thread overview]
Message-ID: <e840576884aa4d848d0ea4f58ec44ec8@AMSPEX02CL03.citrite.net> (raw)
In-Reply-To: <154115286450.11300.208978412106583040.stgit@dhcp-3-135.uk.xensource.com>



> -----Original Message-----
> From: Tim Smith [mailto:tim.smith@citrix.com]
> Sent: 02 November 2018 10:01
> To: xen-devel@lists.xenproject.org; qemu-devel@nongnu.org; qemu-
> block@nongnu.org
> Cc: Anthony Perard <anthony.perard@citrix.com>; Kevin Wolf
> <kwolf@redhat.com>; Paul Durrant <Paul.Durrant@citrix.com>; Stefano
> Stabellini <sstabellini@kernel.org>; Max Reitz <mreitz@redhat.com>
> Subject: [PATCH 2/3] Improve xen_disk response latency
> 
> If the I/O ring is full, the guest cannot send any more requests
> until some responses are sent. Only sending all available responses
> just before checking for new work does not leave much time for the
> guest to supply new work, so this will cause stalls if the ring gets
> full. Also, not completing reads as soon as possible adds latency
> to the guest.
> 
> To alleviate that, complete IO requests as soon as they come back.
> blk_send_response() already returns a value indicating whether
> a notify should be sent, which is all the batching we need.
> 
> Signed-off-by: Tim Smith <tim.smith@citrix.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> ---
>  hw/block/xen_disk.c |   43 ++++++++++++-------------------------------
>  1 file changed, 12 insertions(+), 31 deletions(-)
> 
> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> index cb2881b7e6..b506e23868 100644
> --- a/hw/block/xen_disk.c
> +++ b/hw/block/xen_disk.c
> @@ -83,11 +83,9 @@ struct XenBlkDev {
> 
>      /* request lists */
>      QLIST_HEAD(inflight_head, ioreq) inflight;
> -    QLIST_HEAD(finished_head, ioreq) finished;
>      QLIST_HEAD(freelist_head, ioreq) freelist;
>      int                 requests_total;
>      int                 requests_inflight;
> -    int                 requests_finished;
>      unsigned int        max_requests;
> 
>      gboolean            feature_discard;
> @@ -104,6 +102,9 @@ struct XenBlkDev {
>  /* Threshold of in-flight requests above which we will start using
>   * blk_io_plug()/blk_io_unplug() to batch requests */
>  #define IO_PLUG_THRESHOLD 1
> +
> +static int blk_send_response(struct ioreq *ioreq);
> +
>  /* ------------------------------------------------------------- */
> 
>  static void ioreq_reset(struct ioreq *ioreq)
> @@ -155,12 +156,10 @@ static void ioreq_finish(struct ioreq *ioreq)
>      struct XenBlkDev *blkdev = ioreq->blkdev;
> 
>      QLIST_REMOVE(ioreq, list);
> -    QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
>      blkdev->requests_inflight--;
> -    blkdev->requests_finished++;
>  }
> 
> -static void ioreq_release(struct ioreq *ioreq, bool finish)
> +static void ioreq_release(struct ioreq *ioreq)
>  {
>      struct XenBlkDev *blkdev = ioreq->blkdev;
> 
> @@ -168,11 +167,7 @@ static void ioreq_release(struct ioreq *ioreq, bool
> finish)
>      ioreq_reset(ioreq);
>      ioreq->blkdev = blkdev;
>      QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
> -    if (finish) {
> -        blkdev->requests_finished--;
> -    } else {
> -        blkdev->requests_inflight--;
> -    }
> +    blkdev->requests_inflight--;
>  }
> 
>  /*
> @@ -351,6 +346,10 @@ static void qemu_aio_complete(void *opaque, int ret)
>      default:
>          break;
>      }
> +    if (blk_send_response(ioreq)) {
> +        xen_pv_send_notify(&blkdev->xendev);
> +    }
> +    ioreq_release(ioreq);
>      qemu_bh_schedule(blkdev->bh);
> 
>  done:
> @@ -455,7 +454,7 @@ err:
>      return -1;
>  }
> 
> -static int blk_send_response_one(struct ioreq *ioreq)
> +static int blk_send_response(struct ioreq *ioreq)
>  {
>      struct XenBlkDev  *blkdev = ioreq->blkdev;
>      int               send_notify   = 0;
> @@ -504,22 +503,6 @@ static int blk_send_response_one(struct ioreq *ioreq)
>      return send_notify;
>  }
> 
> -/* walk finished list, send outstanding responses, free requests */
> -static void blk_send_response_all(struct XenBlkDev *blkdev)
> -{
> -    struct ioreq *ioreq;
> -    int send_notify = 0;
> -
> -    while (!QLIST_EMPTY(&blkdev->finished)) {
> -        ioreq = QLIST_FIRST(&blkdev->finished);
> -        send_notify += blk_send_response_one(ioreq);
> -        ioreq_release(ioreq, true);
> -    }
> -    if (send_notify) {
> -        xen_pv_send_notify(&blkdev->xendev);
> -    }
> -}
> -
>  static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq,
> RING_IDX rc)
>  {
>      switch (blkdev->protocol) {
> @@ -554,7 +537,6 @@ static void blk_handle_requests(struct XenBlkDev
> *blkdev)
>      rp = blkdev->rings.common.sring->req_prod;
>      xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
> 
> -    blk_send_response_all(blkdev);
>      /* If there was more than IO_PLUG_THRESHOLD ioreqs in flight
>       * when we got here, this is an indication that there the bottleneck
>       * is below us, so it's worth beginning to batch up I/O requests
> @@ -597,10 +579,10 @@ static void blk_handle_requests(struct XenBlkDev
> *blkdev)
>                  break;
>              };
> 
> -            if (blk_send_response_one(ioreq)) {
> +            if (blk_send_response(ioreq)) {
>                  xen_pv_send_notify(&blkdev->xendev);
>              }
> -            ioreq_release(ioreq, false);
> +            ioreq_release(ioreq);
>              continue;
>          }
> 
> @@ -646,7 +628,6 @@ static void blk_alloc(struct XenDevice *xendev)
>      trace_xen_disk_alloc(xendev->name);
> 
>      QLIST_INIT(&blkdev->inflight);
> -    QLIST_INIT(&blkdev->finished);
>      QLIST_INIT(&blkdev->freelist);
> 
>      blkdev->iothread = iothread_create(xendev->name, &err);


  reply	other threads:[~2018-11-02 11:15 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-02 10:00 [Qemu-devel] [PATCH 0/3] Performance improvements for xen_disk v2 Tim Smith
2018-11-02 10:00 ` [Qemu-devel] [PATCH 1/3] Improve xen_disk batching behaviour Tim Smith
2018-11-02 11:14   ` Paul Durrant
2018-11-02 13:53   ` Anthony PERARD
2018-11-02 10:01 ` [Qemu-devel] [PATCH 2/3] Improve xen_disk response latency Tim Smith
2018-11-02 11:14   ` Paul Durrant [this message]
2018-11-02 13:53   ` Anthony PERARD
2018-11-02 10:01 ` [Qemu-devel] [PATCH 3/3] Avoid repeated memory allocation in xen_disk Tim Smith
2018-11-02 11:15   ` Paul Durrant
2018-11-02 13:53   ` Anthony PERARD
2018-11-02 11:04 ` [Qemu-devel] xen_disk qdevification (was: [PATCH 0/3] Performance improvements for xen_disk v2) Kevin Wolf
2018-11-02 11:13   ` Paul Durrant
2018-11-02 12:14     ` Kevin Wolf
2018-11-05 15:57     ` [Qemu-devel] xen_disk qdevification Markus Armbruster
2018-11-05 16:15       ` Paul Durrant
2018-11-08 14:00       ` Paul Durrant
2018-11-08 15:21         ` Kevin Wolf
2018-11-08 15:43           ` Paul Durrant
2018-11-08 16:44             ` Paul Durrant
2018-11-09 10:27               ` Paul Durrant
2018-11-09 10:40                 ` Kevin Wolf
2018-12-12  8:59   ` [Qemu-devel] [Xen-devel] xen_disk qdevification (was: [PATCH 0/3] Performance improvements for xen_disk v2) Olaf Hering
2018-12-12  9:22     ` Paul Durrant
2018-12-12 12:03     ` Kevin Wolf
2018-12-12 12:04     ` [Qemu-devel] [Xen-devel] xen_disk qdevification Markus Armbruster
  -- strict thread matches above, loose matches on Subject: below --
2018-11-02  9:29 [Qemu-devel] [PATCH 0/3] Performance improvements for xen_disk Tim Smith
2018-11-02  9:29 ` [Qemu-devel] [PATCH 2/3] Improve xen_disk response latency Tim Smith
2018-09-07 10:21 [Qemu-devel] [PATCH 1/3] Improve xen_disk batching behaviour Tim Smith
2018-09-07 10:21 ` [Qemu-devel] [PATCH 2/3] Improve xen_disk response latency Tim Smith
2018-09-07 16:03   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e840576884aa4d848d0ea4f58ec44ec8@AMSPEX02CL03.citrite.net \
    --to=paul.durrant@citrix.com \
    --cc=anthony.perard@citrix.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sstabellini@kernel.org \
    --cc=tim.smith@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).