From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:37002) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1XRMov-0007TT-Cf for qemu-devel@nongnu.org; Tue, 09 Sep 2014 10:55:06 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1XRMom-0006xp-IM for qemu-devel@nongnu.org; Tue, 09 Sep 2014 10:55:01 -0400 Received: from lputeaux-656-01-25-125.w80-12.abo.wanadoo.fr ([80.12.84.125]:55888 helo=paradis.irqsave.net) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1XRMom-0006xK-9f for qemu-devel@nongnu.org; Tue, 09 Sep 2014 10:54:52 -0400 Date: Tue, 9 Sep 2014 16:53:57 +0200 From: =?iso-8859-1?Q?Beno=EEt?= Canet Message-ID: <20140909145356.GA355@irqsave.net> References: <1409848030-16568-1-git-send-email-ming.lei@canonical.com> <1409848030-16568-2-git-send-email-ming.lei@canonical.com> MIME-Version: 1.0 Content-Type: text/plain; charset=iso-8859-1 Content-Disposition: inline In-Reply-To: <1409848030-16568-2-git-send-email-ming.lei@canonical.com> Content-Transfer-Encoding: quoted-printable Subject: Re: [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Ming Lei Cc: Kevin Wolf , Peter Maydell , qemu-devel@nongnu.org, Stefan Hajnoczi , Paolo Bonzini , =?iso-8859-1?Q?Beno=EEt?= Canet The Friday 05 Sep 2014 =E0 00:27:07 (+0800), Ming Lei wrote : > In the enqueue path, we can't complete request, otherwise > "Co-routine re-entered recursively" may be caused, so this > patch fixes the issue with the following ideas: >=20 > - for -EAGAIN or partial completion, retry the submission by > scheduling a BH in following completion cb > - for part of completion, also update the io queue > - for other failure, return the failure if in enqueue path, > otherwise, abort all queued I/O >=20 > Signed-off-by: Ming Lei > --- > block/linux-aio.c | 106 ++++++++++++++++++++++++++++++++++++++++-----= -------- > 1 file changed, 81 insertions(+), 25 deletions(-) >=20 > diff --git a/block/linux-aio.c b/block/linux-aio.c > index 9aca758..a06576d 100644 > --- a/block/linux-aio.c > +++ b/block/linux-aio.c > @@ -38,11 +38,19 @@ struct qemu_laiocb { > QLIST_ENTRY(qemu_laiocb) node; > }; > =20 > -typedef struct { > +/* > + * TODO: support to batch I/O from multiple bs in one same > + * AIO context, one important use case is multi-lun scsi, > + * so in future the IO queue should be per AIO context. > + */ > +typedef struct LaioQueue { > struct iocb *iocbs[MAX_QUEUED_IO]; > int plugged; > - unsigned int size; > - unsigned int idx; > + uint32 size; > + uint32 idx; Sorry Ming I said crap about struct, size and idx. I initially misread that you where adding this. You where right from the start. > + > + /* handle -EAGAIN and partial completion */ > + QEMUBH *retry; > } LaioQueue; > =20 > struct qemu_laio_state { > @@ -138,6 +146,13 @@ static void qemu_laio_completion_bh(void *opaque) > } > } > =20 > +static void qemu_laio_start_retry(struct qemu_laio_state *s) > +{ > + if (s->io_q.idx) { > + qemu_bh_schedule(s->io_q.retry); > + } > +} > + > static void qemu_laio_completion_cb(EventNotifier *e) > { > struct qemu_laio_state *s =3D container_of(e, struct qemu_laio_sta= te, e); > @@ -145,6 +160,7 @@ static void qemu_laio_completion_cb(EventNotifier *= e) > if (event_notifier_test_and_clear(&s->e)) { > qemu_bh_schedule(s->completion_bh); > } > + qemu_laio_start_retry(s); > } > =20 > static void laio_cancel(BlockDriverAIOCB *blockacb) > @@ -164,6 +180,7 @@ static void laio_cancel(BlockDriverAIOCB *blockacb) > ret =3D io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event); > if (ret =3D=3D 0) { > laiocb->ret =3D -ECANCELED; > + qemu_laio_start_retry(laiocb->ctx); > return; > } > =20 > @@ -191,45 +208,80 @@ static void ioq_init(LaioQueue *io_q) > io_q->plugged =3D 0; > } > =20 > -static int ioq_submit(struct qemu_laio_state *s) > +static void abort_queue(struct qemu_laio_state *s) > +{ > + int i; > + for (i =3D 0; i < s->io_q.idx; i++) { > + struct qemu_laiocb *laiocb =3D container_of(s->io_q.iocbs[i], > + struct qemu_laiocb, > + iocb); > + laiocb->ret =3D -EIO; > + qemu_laio_process_completion(s, laiocb); > + } > +} > + > +static int ioq_submit(struct qemu_laio_state *s, bool enqueue) > { > int ret, i =3D 0; > int len =3D s->io_q.idx; > + int j =3D 0; > =20 > - do { > - ret =3D io_submit(s->ctx, len, s->io_q.iocbs); > - } while (i++ < 3 && ret =3D=3D -EAGAIN); > + if (!len) { > + return 0; > + } > =20 > - /* empty io queue */ > - s->io_q.idx =3D 0; > + ret =3D io_submit(s->ctx, len, s->io_q.iocbs); > + if (ret =3D=3D -EAGAIN) { /* retry in following completion cb */ > + return 0; > + } else if (ret < 0) { > + if (enqueue) { > + return ret; > + } > =20 > - if (ret < 0) { > - i =3D 0; > - } else { > - i =3D ret; > + /* in non-queue path, all IOs have to be completed */ > + abort_queue(s); > + ret =3D len; > + } else if (ret =3D=3D 0) { > + goto out; > } > =20 > - for (; i < len; i++) { > - struct qemu_laiocb *laiocb =3D > - container_of(s->io_q.iocbs[i], struct qemu_laiocb, iocb); > - > - laiocb->ret =3D (ret < 0) ? ret : -EIO; > - qemu_laio_process_completion(s, laiocb); > + for (i =3D ret; i < len; i++) { > + s->io_q.iocbs[j++] =3D s->io_q.iocbs[i]; > } > + > + out: > + /* > + * update io queue, for partial completion, retry will be > + * started automatically in following completion cb. > + */ > + s->io_q.idx -=3D ret; > + > return ret; > } > =20 > -static void ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb) > +static void ioq_submit_retry(void *opaque) > +{ > + struct qemu_laio_state *s =3D opaque; > + ioq_submit(s, false); > +} > + > +static int ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb) > { > unsigned int idx =3D s->io_q.idx; > =20 > + if (unlikely(idx =3D=3D s->io_q.size)) { > + return -1; > + } > + > s->io_q.iocbs[idx++] =3D iocb; > s->io_q.idx =3D idx; > =20 > - /* submit immediately if queue is full */ > - if (idx =3D=3D s->io_q.size) { > - ioq_submit(s); > + /* submit immediately if queue depth is above 2/3 */ > + if (idx > s->io_q.size * 2 / 3) { > + return ioq_submit(s, true); > } > + > + return 0; > } > =20 > void laio_io_plug(BlockDriverState *bs, void *aio_ctx) > @@ -251,7 +303,7 @@ int laio_io_unplug(BlockDriverState *bs, void *aio_= ctx, bool unplug) > } > =20 > if (s->io_q.idx > 0) { > - ret =3D ioq_submit(s); > + ret =3D ioq_submit(s, false); > } > =20 > return ret; > @@ -295,7 +347,9 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs,= void *aio_ctx, int fd, > goto out_free_aiocb; > } > } else { > - ioq_enqueue(s, iocbs); > + if (ioq_enqueue(s, iocbs) < 0) { > + goto out_free_aiocb; > + } > } > return &laiocb->common; > =20 > @@ -310,12 +364,14 @@ void laio_detach_aio_context(void *s_, AioContext= *old_context) > =20 > aio_set_event_notifier(old_context, &s->e, NULL); > qemu_bh_delete(s->completion_bh); > + qemu_bh_delete(s->io_q.retry); > } > =20 > void laio_attach_aio_context(void *s_, AioContext *new_context) > { > struct qemu_laio_state *s =3D s_; > =20 > + s->io_q.retry =3D aio_bh_new(new_context, ioq_submit_retry, s); > s->completion_bh =3D aio_bh_new(new_context, qemu_laio_completion_= bh, s); > aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb= ); > } > --=20 > 1.7.9.5 >=20 >=20