* [Qemu-devel] [PATCH v2 1/2] xen: add a global indicator for grant copy being available
2017-09-22 12:07 [Qemu-devel] [PATCH v2 0/2] xen: fix gnttab handling with old dom0 kernels Juergen Gross
@ 2017-09-22 12:07 ` Juergen Gross
2017-09-22 12:07 ` [Qemu-devel] [PATCH v2 2/2] xen: dont try setting max grants multiple times Juergen Gross
1 sibling, 0 replies; 4+ messages in thread
From: Juergen Gross @ 2017-09-22 12:07 UTC (permalink / raw)
To: qemu-devel, xen-devel; +Cc: anthony.perard, kraxel, sstabellini, Juergen Gross
The Xen qdisk backend needs to test whether grant copy operations is
available in the kernel. Unfortunately this collides with using
xengnttab_set_max_grants() on some kernels as this operation has to
be the first one after opening the gnttab device.
In order to solve this problem test for the availability of grant copy
in xen_be_init() opening the gnttab device just for that purpose and
closing it again afterwards. Advertise the availability via a global
flag and use that flag in the qdisk backend.
Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
---
V2:
- gboolean -> bool (Anthony PERARD)
---
hw/block/xen_disk.c | 18 ++++++------------
hw/xen/xen_backend.c | 11 +++++++++++
include/hw/xen/xen_backend.h | 1 +
3 files changed, 18 insertions(+), 12 deletions(-)
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 536e2ee735..62506e3167 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -121,9 +121,6 @@ struct XenBlkDev {
unsigned int persistent_gnt_count;
unsigned int max_grants;
- /* Grant copy */
- gboolean feature_grant_copy;
-
/* qemu block driver */
DriveInfo *dinfo;
BlockBackend *blk;
@@ -616,7 +613,7 @@ static void qemu_aio_complete(void *opaque, int ret)
return;
}
- if (ioreq->blkdev->feature_grant_copy) {
+ if (xen_feature_grant_copy) {
switch (ioreq->req.operation) {
case BLKIF_OP_READ:
/* in case of failure ioreq->aio_errors is increased */
@@ -638,7 +635,7 @@ static void qemu_aio_complete(void *opaque, int ret)
}
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
- if (!ioreq->blkdev->feature_grant_copy) {
+ if (!xen_feature_grant_copy) {
ioreq_unmap(ioreq);
}
ioreq_finish(ioreq);
@@ -698,7 +695,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
struct XenBlkDev *blkdev = ioreq->blkdev;
- if (ioreq->blkdev->feature_grant_copy) {
+ if (xen_feature_grant_copy) {
ioreq_init_copy_buffers(ioreq);
if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
@@ -750,7 +747,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
}
default:
/* unknown operation (shouldn't happen -- parse catches this) */
- if (!ioreq->blkdev->feature_grant_copy) {
+ if (!xen_feature_grant_copy) {
ioreq_unmap(ioreq);
}
goto err;
@@ -1010,18 +1007,15 @@ static int blk_init(struct XenDevice *xendev)
blkdev->file_blk = BLOCK_SIZE;
- blkdev->feature_grant_copy =
- (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0);
-
xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
- blkdev->feature_grant_copy ? "enabled" : "disabled");
+ xen_feature_grant_copy ? "enabled" : "disabled");
/* fill info
* blk_connect supplies sector-size and sectors
*/
xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
xenstore_write_be_int(&blkdev->xendev, "feature-persistent",
- !blkdev->feature_grant_copy);
+ !xen_feature_grant_copy);
xenstore_write_be_int(&blkdev->xendev, "info", info);
xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order",
diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c
index c46cbb0759..0f849a26d2 100644
--- a/hw/xen/xen_backend.c
+++ b/hw/xen/xen_backend.c
@@ -44,6 +44,7 @@ BusState *xen_sysbus;
/* public */
struct xs_handle *xenstore = NULL;
const char *xen_protocol;
+bool xen_feature_grant_copy;
/* private */
static int debug;
@@ -519,6 +520,8 @@ void xenstore_update_fe(char *watch, struct XenDevice *xendev)
int xen_be_init(void)
{
+ xengnttab_handle *gnttabdev;
+
xenstore = xs_daemon_open();
if (!xenstore) {
xen_pv_printf(NULL, 0, "can't connect to xenstored\n");
@@ -532,6 +535,14 @@ int xen_be_init(void)
goto err;
}
+ gnttabdev = xengnttab_open(NULL, 0);
+ if (gnttabdev != NULL) {
+ if (xengnttab_grant_copy(gnttabdev, 0, NULL) == 0) {
+ xen_feature_grant_copy = true;
+ }
+ xengnttab_close(gnttabdev);
+ }
+
xen_sysdev = qdev_create(NULL, TYPE_XENSYSDEV);
qdev_init_nofail(xen_sysdev);
xen_sysbus = qbus_create(TYPE_XENSYSBUS, DEVICE(xen_sysdev), "xen-sysbus");
diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h
index 8a6fbcbe20..3a27692407 100644
--- a/include/hw/xen/xen_backend.h
+++ b/include/hw/xen/xen_backend.h
@@ -16,6 +16,7 @@
/* variables */
extern struct xs_handle *xenstore;
extern const char *xen_protocol;
+extern bool xen_feature_grant_copy;
extern DeviceState *xen_sysdev;
extern BusState *xen_sysbus;
--
2.12.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [Qemu-devel] [PATCH v2 2/2] xen: dont try setting max grants multiple times
2017-09-22 12:07 [Qemu-devel] [PATCH v2 0/2] xen: fix gnttab handling with old dom0 kernels Juergen Gross
2017-09-22 12:07 ` [Qemu-devel] [PATCH v2 1/2] xen: add a global indicator for grant copy being available Juergen Gross
@ 2017-09-22 12:07 ` Juergen Gross
2017-09-25 14:41 ` Anthony PERARD
1 sibling, 1 reply; 4+ messages in thread
From: Juergen Gross @ 2017-09-22 12:07 UTC (permalink / raw)
To: qemu-devel, xen-devel; +Cc: anthony.perard, kraxel, sstabellini, Juergen Gross
Trying to call xengnttab_set_max_grants() with the same file handle
might fail on some kernels, as this operation is allowed only once.
This is a problem for the qdisk backend as blk_connect() can be
called multiple times for a domain, e.g. in case grub-xen is being
used to boot it.
So instead of letting the generic backend code open the gnttab device
do it in blk_connect() and close it again in blk_disconnect.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- always call blk_disconnect() from blk_free() in order to have the
gnttab device node closed (Anthony Perard)
---
hw/block/xen_disk.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 62506e3167..e431bd89e8 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -1220,6 +1220,12 @@ static int blk_connect(struct XenDevice *xendev)
/* Add on the number needed for the ring pages */
max_grants += blkdev->nr_ring_ref;
+ blkdev->xendev.gnttabdev = xengnttab_open(NULL, 0);
+ if (blkdev->xendev.gnttabdev == NULL) {
+ xen_pv_printf(xendev, 0, "xengnttab_open failed: %s\n",
+ strerror(errno));
+ return -1;
+ }
if (xengnttab_set_max_grants(blkdev->xendev.gnttabdev, max_grants)) {
xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
strerror(errno));
@@ -1327,6 +1333,11 @@ static void blk_disconnect(struct XenDevice *xendev)
}
blkdev->feature_persistent = false;
}
+
+ if (blkdev->xendev.gnttabdev) {
+ xengnttab_close(blkdev->xendev.gnttabdev);
+ blkdev->xendev.gnttabdev = NULL;
+ }
}
static int blk_free(struct XenDevice *xendev)
@@ -1334,9 +1345,7 @@ static int blk_free(struct XenDevice *xendev)
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
struct ioreq *ioreq;
- if (blkdev->blk || blkdev->sring) {
- blk_disconnect(xendev);
- }
+ blk_disconnect(xendev);
while (!QLIST_EMPTY(&blkdev->freelist)) {
ioreq = QLIST_FIRST(&blkdev->freelist);
@@ -1363,7 +1372,6 @@ static void blk_event(struct XenDevice *xendev)
struct XenDevOps xen_blkdev_ops = {
.size = sizeof(struct XenBlkDev),
- .flags = DEVOPS_FLAG_NEED_GNTDEV,
.alloc = blk_alloc,
.init = blk_init,
.initialise = blk_connect,
--
2.12.3
^ permalink raw reply related [flat|nested] 4+ messages in thread