From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
peter.maydell@linaro.org, xen-devel@lists.xensource.com,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Roger Pau Monne <roger.pau@citrix.com>
Subject: [Qemu-devel] [PULL for-2.2 2/2] xen_disk: fix unmapping of persistent grants
Date: Fri, 14 Nov 2014 11:30:10 +0000 [thread overview]
Message-ID: <1415964611-20838-2-git-send-email-stefano.stabellini@eu.citrix.com> (raw)
In-Reply-To: <alpine.DEB.2.02.1411141112540.26318@kaball.uk.xensource.com>
From: Roger Pau Monne <roger.pau@citrix.com>
This patch fixes two issues with persistent grants and the disk PV backend
(Qdisk):
- Keep track of memory regions where persistent grants have been mapped
since we need to unmap them as a whole. It is not possible to unmap a
single grant if it has been batch-mapped. A new check has also been added
to make sure persistent grants are only used if the whole mapped region
can be persistently mapped in the batch_maps case.
- Unmap persistent grants before switching to the closed state, so the
frontend can also free them.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reported-by: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
hw/block/xen_disk.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 66 insertions(+), 6 deletions(-)
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 231e9a7..21842a0 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -59,6 +59,13 @@ struct PersistentGrant {
typedef struct PersistentGrant PersistentGrant;
+struct PersistentRegion {
+ void *addr;
+ int num;
+};
+
+typedef struct PersistentRegion PersistentRegion;
+
struct ioreq {
blkif_request_t req;
int16_t status;
@@ -118,6 +125,7 @@ struct XenBlkDev {
gboolean feature_discard;
gboolean feature_persistent;
GTree *persistent_gnts;
+ GSList *persistent_regions;
unsigned int persistent_gnt_count;
unsigned int max_grants;
@@ -177,6 +185,23 @@ static void destroy_grant(gpointer pgnt)
g_free(grant);
}
+static void remove_persistent_region(gpointer data, gpointer dev)
+{
+ PersistentRegion *region = data;
+ struct XenBlkDev *blkdev = dev;
+ XenGnttab gnt = blkdev->xendev.gnttabdev;
+
+ if (xc_gnttab_munmap(gnt, region->addr, region->num) != 0) {
+ xen_be_printf(&blkdev->xendev, 0,
+ "xc_gnttab_munmap region %p failed: %s\n",
+ region->addr, strerror(errno));
+ }
+ xen_be_printf(&blkdev->xendev, 3,
+ "unmapped grant region %p with %d pages\n",
+ region->addr, region->num);
+ g_free(region);
+}
+
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
{
struct ioreq *ioreq = NULL;
@@ -343,6 +368,7 @@ static int ioreq_map(struct ioreq *ioreq)
void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i, j, new_maps = 0;
PersistentGrant *grant;
+ PersistentRegion *region;
/* domids and refs variables will contain the information necessary
* to map the grants that are needed to fulfill this request.
*
@@ -421,7 +447,22 @@ static int ioreq_map(struct ioreq *ioreq)
}
}
}
- if (ioreq->blkdev->feature_persistent) {
+ if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
+ (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
+ ioreq->blkdev->max_grants))) {
+ /*
+ * If we are using persistent grants and batch mappings only
+ * add the new maps to the list of persistent grants if the whole
+ * area can be persistently mapped.
+ */
+ if (batch_maps) {
+ region = g_malloc0(sizeof(*region));
+ region->addr = ioreq->pages;
+ region->num = new_maps;
+ ioreq->blkdev->persistent_regions = g_slist_append(
+ ioreq->blkdev->persistent_regions,
+ region);
+ }
while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
&& new_maps) {
/* Go through the list of newly mapped grants and add as many
@@ -447,6 +488,7 @@ static int ioreq_map(struct ioreq *ioreq)
grant);
ioreq->blkdev->persistent_gnt_count++;
}
+ assert(!batch_maps || new_maps == 0);
}
for (i = 0; i < ioreq->v.niov; i++) {
ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
@@ -971,7 +1013,10 @@ static int blk_connect(struct XenDevice *xendev)
blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
NULL, NULL,
+ batch_maps ?
+ (GDestroyNotify)g_free :
(GDestroyNotify)destroy_grant);
+ blkdev->persistent_regions = NULL;
blkdev->persistent_gnt_count = 0;
}
@@ -1000,6 +1045,26 @@ static void blk_disconnect(struct XenDevice *xendev)
blkdev->cnt_map--;
blkdev->sring = NULL;
}
+
+ /*
+ * Unmap persistent grants before switching to the closed state
+ * so the frontend can free them.
+ *
+ * In the !batch_maps case g_tree_destroy will take care of unmapping
+ * the grant, but in the batch_maps case we need to iterate over every
+ * region in persistent_regions and unmap it.
+ */
+ if (blkdev->feature_persistent) {
+ g_tree_destroy(blkdev->persistent_gnts);
+ assert(batch_maps || blkdev->persistent_gnt_count == 0);
+ if (batch_maps) {
+ blkdev->persistent_gnt_count = 0;
+ g_slist_foreach(blkdev->persistent_regions,
+ (GFunc)remove_persistent_region, blkdev);
+ g_slist_free(blkdev->persistent_regions);
+ }
+ blkdev->feature_persistent = false;
+ }
}
static int blk_free(struct XenDevice *xendev)
@@ -1011,11 +1076,6 @@ static int blk_free(struct XenDevice *xendev)
blk_disconnect(xendev);
}
- /* Free persistent grants */
- if (blkdev->feature_persistent) {
- g_tree_destroy(blkdev->persistent_gnts);
- }
-
while (!QLIST_EMPTY(&blkdev->freelist)) {
ioreq = QLIST_FIRST(&blkdev->freelist);
QLIST_REMOVE(ioreq, list);
--
1.7.10.4
next prev parent reply other threads:[~2014-11-14 11:30 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-11-14 11:29 [Qemu-devel] [PULL for-2.2 0/2] Xen tree 2014-11-14 Stefano Stabellini
2014-11-14 11:30 ` [Qemu-devel] [PULL for-2.2 1/2] pc: piix4_pm: init legacy PCI hotplug when running on Xen Stefano Stabellini
2014-11-14 11:30 ` Stefano Stabellini [this message]
2014-11-14 13:28 ` [Qemu-devel] [PULL for-2.2 0/2] Xen tree 2014-11-14 Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1415964611-20838-2-git-send-email-stefano.stabellini@eu.citrix.com \
--to=stefano.stabellini@eu.citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=konrad.wilk@oracle.com \
--cc=kwolf@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=roger.pau@citrix.com \
--cc=stefanha@redhat.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).