From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:34350) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1dNd5g-0003aH-Sl for qemu-devel@nongnu.org; Wed, 21 Jun 2017 06:42:30 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1dNd5c-00078j-OE for qemu-devel@nongnu.org; Wed, 21 Jun 2017 06:42:28 -0400 From: Paul Durrant Date: Wed, 21 Jun 2017 10:42:22 +0000 Message-ID: <620ed49d90a446968203ffa02d44bf95@AMSPEX02CL03.citrite.net> References: <20170620134756.9632-1-paul.durrant@citrix.com> <20170620134756.9632-3-paul.durrant@citrix.com> In-Reply-To: Content-Language: en-US Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Subject: Re: [Qemu-devel] [PATCH 2/3] xen-disk: add support for multi-page shared rings List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: 'Stefano Stabellini' Cc: "xen-devel@lists.xenproject.org" , "qemu-devel@nongnu.org" , "qemu-block@nongnu.org" , Anthony Perard , Kevin Wolf , Max Reitz > -----Original Message----- > From: Stefano Stabellini [mailto:sstabellini@kernel.org] > Sent: 20 June 2017 23:51 > To: Paul Durrant > Cc: xen-devel@lists.xenproject.org; qemu-devel@nongnu.org; qemu- > block@nongnu.org; Stefano Stabellini ; Anthony > Perard ; Kevin Wolf ; > Max Reitz > Subject: Re: [PATCH 2/3] xen-disk: add support for multi-page shared ring= s >=20 > On Tue, 20 Jun 2017, Paul Durrant wrote: > > The blkif protocol has had provision for negotiation of multi-page shar= ed > > rings for some time now and many guest OS have support in their fronten= d > > drivers. > > > > This patch makes the necessary modifications to xen-disk support a shar= ed > > ring up to order 4 (i.e. 16 pages). > > > > Signed-off-by: Paul Durrant >=20 > Thanks for the patch! >=20 You're welcome. > > --- > > Cc: Stefano Stabellini > > Cc: Anthony Perard > > Cc: Kevin Wolf > > Cc: Max Reitz > > --- > > hw/block/xen_disk.c | 141 > ++++++++++++++++++++++++++++++++++++++++------------ > > 1 file changed, 110 insertions(+), 31 deletions(-) > > > > diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c > > index 9b06e3aa81..a9942d32db 100644 > > --- a/hw/block/xen_disk.c > > +++ b/hw/block/xen_disk.c > > @@ -36,8 +36,6 @@ > > > > static int batch_maps =3D 0; > > > > -static int max_requests =3D 32; > > - > > /* ------------------------------------------------------------- */ > > > > #define BLOCK_SIZE 512 > > @@ -84,6 +82,8 @@ struct ioreq { > > BlockAcctCookie acct; > > }; > > > > +#define MAX_RING_PAGE_ORDER 4 > > + > > struct XenBlkDev { > > struct XenDevice xendev; /* must be first */ > > char *params; > > @@ -94,7 +94,8 @@ struct XenBlkDev { > > bool directiosafe; > > const char *fileproto; > > const char *filename; > > - int ring_ref; > > + unsigned int ring_ref[1 << MAX_RING_PAGE_ORDER]; > > + unsigned int nr_ring_ref; > > void *sring; > > int64_t file_blk; > > int64_t file_size; > > @@ -110,6 +111,7 @@ struct XenBlkDev { > > int requests_total; > > int requests_inflight; > > int requests_finished; > > + unsigned int max_requests; > > > > /* Persistent grants extension */ > > gboolean feature_discard; > > @@ -199,7 +201,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev > *blkdev) > > struct ioreq *ioreq =3D NULL; > > > > if (QLIST_EMPTY(&blkdev->freelist)) { > > - if (blkdev->requests_total >=3D max_requests) { > > + if (blkdev->requests_total >=3D blkdev->max_requests) { > > goto out; > > } > > /* allocate new struct */ > > @@ -905,7 +907,7 @@ static void blk_handle_requests(struct XenBlkDev > *blkdev) > > ioreq_runio_qemu_aio(ioreq); > > } > > > > - if (blkdev->more_work && blkdev->requests_inflight < max_requests)= { > > + if (blkdev->more_work && blkdev->requests_inflight < blkdev- > >max_requests) { > > qemu_bh_schedule(blkdev->bh); > > } > > } > > @@ -918,15 +920,6 @@ static void blk_bh(void *opaque) > > blk_handle_requests(blkdev); > > } > > > > -/* > > - * We need to account for the grant allocations requiring contiguous > > - * chunks; the worst case number would be > > - * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, > > - * but in order to keep things simple just use > > - * 2 * max_req * max_seg. > > - */ > > -#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) > > - > > static void blk_alloc(struct XenDevice *xendev) > > { > > struct XenBlkDev *blkdev =3D container_of(xendev, struct XenBlkDev= , > xendev); > > @@ -938,11 +931,6 @@ static void blk_alloc(struct XenDevice *xendev) > > if (xen_mode !=3D XEN_EMULATE) { > > batch_maps =3D 1; > > } > > - if (xengnttab_set_max_grants(xendev->gnttabdev, > > - MAX_GRANTS(max_requests, > BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) { > > - xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\= n", > > - strerror(errno)); > > - } > > } > > > > static void blk_parse_discard(struct XenBlkDev *blkdev) > > @@ -1037,6 +1025,9 @@ static int blk_init(struct XenDevice *xendev) > > !blkdev->feature_grant_copy); > > xenstore_write_be_int(&blkdev->xendev, "info", info); > > > > + xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order", > > + MAX_RING_PAGE_ORDER); > > + > > blk_parse_discard(blkdev); > > > > g_free(directiosafe); > > @@ -1058,12 +1049,25 @@ out_error: > > return -1; > > } > > > > +/* > > + * We need to account for the grant allocations requiring contiguous > > + * chunks; the worst case number would be > > + * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, > > + * but in order to keep things simple just use > > + * 2 * max_req * max_seg. > > + */ > > +#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) > > + > > static int blk_connect(struct XenDevice *xendev) > > { > > struct XenBlkDev *blkdev =3D container_of(xendev, struct XenBlkDev= , > xendev); > > int pers, index, qflags; > > bool readonly =3D true; > > bool writethrough =3D true; > > + int order, ring_ref; > > + unsigned int ring_size, max_grants; > > + unsigned int i; > > + uint32_t *domids; > > > > /* read-only ? */ > > if (blkdev->directiosafe) { > > @@ -1138,9 +1142,39 @@ static int blk_connect(struct XenDevice > *xendev) > > xenstore_write_be_int64(&blkdev->xendev, "sectors", > > blkdev->file_size / blkdev->file_blk); > > > > - if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev- > >ring_ref) =3D=3D -1) { > > + if (xenstore_read_fe_int(&blkdev->xendev, "ring-page-order", > > + &order) =3D=3D -1) { > > + blkdev->nr_ring_ref =3D 1; > > + > > + if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", > > + &ring_ref) =3D=3D -1) { > > + return -1; > > + } > > + blkdev->ring_ref[0] =3D ring_ref; > > + > > + } else if (order >=3D 0 && order <=3D MAX_RING_PAGE_ORDER) { > > + blkdev->nr_ring_ref =3D 1 << order; > > + > > + for (i =3D 0; i < blkdev->nr_ring_ref; i++) { > > + char *key; > > + > > + key =3D g_strdup_printf("ring-ref%u", i); > > + if (!key) { > > + return -1; > > + } > > + > > + if (xenstore_read_fe_int(&blkdev->xendev, key, > > + &ring_ref) =3D=3D -1) { >=20 > it looks like we are leaking key. >=20 Indeed it does. Good spot. >=20 > > + return -1; > > + } > > + blkdev->ring_ref[i] =3D ring_ref; > > + > > + g_free(key); > > + } > > + } else { > > return -1; >=20 > I would like to print warning if the requested ring size exceeds our > limit. >=20 Sure. That's a good idea. Cheers, Paul >=20 > > } > > + > > if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", > > &blkdev->xendev.remote_port) =3D=3D -1) { > > return -1; > > @@ -1163,41 +1197,85 @@ static int blk_connect(struct XenDevice > *xendev) > > blkdev->protocol =3D BLKIF_PROTOCOL_NATIVE; > > } > > > > - blkdev->sring =3D xengnttab_map_grant_ref(blkdev->xendev.gnttabdev= , > > - blkdev->xendev.dom, > > - blkdev->ring_ref, > > - PROT_READ | PROT_WRITE); > > + ring_size =3D XC_PAGE_SIZE * blkdev->nr_ring_ref; > > + switch (blkdev->protocol) { > > + case BLKIF_PROTOCOL_NATIVE: > > + { > > + blkdev->max_requests =3D __CONST_RING_SIZE(blkif, ring_size); > > + break; > > + } > > + case BLKIF_PROTOCOL_X86_32: > > + { > > + blkdev->max_requests =3D __CONST_RING_SIZE(blkif_x86_32, > ring_size); > > + break; > > + } > > + case BLKIF_PROTOCOL_X86_64: > > + { > > + blkdev->max_requests =3D __CONST_RING_SIZE(blkif_x86_64, > ring_size); > > + break; > > + } > > + default: > > + return -1; > > + } > > + > > + /* Calculate the maximum number of grants needed by ioreqs */ > > + max_grants =3D MAX_GRANTS(blkdev->max_requests, > > + BLKIF_MAX_SEGMENTS_PER_REQUEST); > > + /* Add on the number needed for the ring pages */ > > + max_grants +=3D blkdev->nr_ring_ref; > > + > > + if (xengnttab_set_max_grants(blkdev->xendev.gnttabdev, > max_grants)) { > > + xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\= n", > > + strerror(errno)); > > + return -1; > > + } > > + > > + domids =3D g_malloc0_n(blkdev->nr_ring_ref, sizeof(uint32_t)); > > + for (i =3D 0; i < blkdev->nr_ring_ref; i++) { > > + domids[i] =3D blkdev->xendev.dom; > > + } > > + > > + blkdev->sring =3D xengnttab_map_grant_refs(blkdev- > >xendev.gnttabdev, > > + blkdev->nr_ring_ref, > > + domids, > > + blkdev->ring_ref, > > + PROT_READ | PROT_WRITE); > > + > > + g_free(domids); > > + > > if (!blkdev->sring) { > > return -1; > > } > > + > > blkdev->cnt_map++; > > > > switch (blkdev->protocol) { > > case BLKIF_PROTOCOL_NATIVE: > > { > > blkif_sring_t *sring_native =3D blkdev->sring; > > - BACK_RING_INIT(&blkdev->rings.native, sring_native, > XC_PAGE_SIZE); > > + BACK_RING_INIT(&blkdev->rings.native, sring_native, ring_size)= ; > > break; > > } > > case BLKIF_PROTOCOL_X86_32: > > { > > blkif_x86_32_sring_t *sring_x86_32 =3D blkdev->sring; > > > > - BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, > XC_PAGE_SIZE); > > + BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, > ring_size); > > break; > > } > > case BLKIF_PROTOCOL_X86_64: > > { > > blkif_x86_64_sring_t *sring_x86_64 =3D blkdev->sring; > > > > - BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, > XC_PAGE_SIZE); > > + BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, > ring_size); > > break; > > } > > } > > > > if (blkdev->feature_persistent) { > > /* Init persistent grants */ > > - blkdev->max_grants =3D max_requests * > BLKIF_MAX_SEGMENTS_PER_REQUEST; > > + blkdev->max_grants =3D blkdev->max_requests * > > + BLKIF_MAX_SEGMENTS_PER_REQUEST; > > blkdev->persistent_gnts =3D > g_tree_new_full((GCompareDataFunc)int_cmp, > > NULL, NULL, > > batch_maps ? > > @@ -1209,9 +1287,9 @@ static int blk_connect(struct XenDevice *xendev) > > > > xen_be_bind_evtchn(&blkdev->xendev); > > > > - xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " > > + xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, nr-ring-ref %u, " > > "remote port %d, local port %d\n", > > - blkdev->xendev.protocol, blkdev->ring_ref, > > + blkdev->xendev.protocol, blkdev->nr_ring_ref, > > blkdev->xendev.remote_port, blkdev->xendev.local_por= t); > > return 0; > > } > > @@ -1228,7 +1306,8 @@ static void blk_disconnect(struct XenDevice > *xendev) > > xen_pv_unbind_evtchn(&blkdev->xendev); > > > > if (blkdev->sring) { > > - xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); > > + xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, > > + blkdev->nr_ring_ref); > > blkdev->cnt_map--; > > blkdev->sring =3D NULL; > > } > > -- > > 2.11.0 > >