From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>
Subject: [PATCH v9 02/11] x86/hvm/ioreq: simplify code and use consistent naming
Date: Fri, 6 Oct 2017 13:25:10 +0100 [thread overview]
Message-ID: <20171006122519.30345-3-paul.durrant@citrix.com> (raw)
In-Reply-To: <20171006122519.30345-1-paul.durrant@citrix.com>
This patch re-works much of the ioreq server initialization and teardown
code:
- The hvm_map/unmap_ioreq_gfn() functions are expanded to call through
to hvm_alloc/free_ioreq_gfn() rather than expecting them to be called
separately by outer functions.
- Several functions now test the validity of the hvm_ioreq_page gfn value
to determine whether they need to act. This means can be safely called
for the bufioreq page even when it is not used.
- hvm_add/remove_ioreq_gfn() simply return in the case of the default
IOREQ server so callers no longer need to test before calling.
- hvm_ioreq_server_setup_pages() is renamed to hvm_ioreq_server_map_pages()
to mirror the existing hvm_ioreq_server_unmap_pages().
All of this significantly shortens the code.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
v3:
- Rebased on top of 's->is_default' to 'IS_DEFAULT(s)' changes.
- Minor updates in response to review comments from Roger.
---
xen/arch/x86/hvm/ioreq.c | 182 ++++++++++++++++++-----------------------------
1 file changed, 69 insertions(+), 113 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index a6de1e7db2..f162e27ab1 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -217,63 +217,75 @@ bool handle_hvm_io_completion(struct vcpu *v)
return true;
}
-static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
+static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
{
+ struct domain *d = s->domain;
unsigned int i;
- int rc;
- rc = -ENOMEM;
+ ASSERT(!IS_DEFAULT(s));
+
for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
{
if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
- {
- *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
- rc = 0;
- break;
- }
+ return d->arch.hvm_domain.ioreq_gfn.base + i;
}
- return rc;
+ return gfn_x(INVALID_GFN);
}
-static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
+static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
+ unsigned long gfn)
{
+ struct domain *d = s->domain;
unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
- if ( gfn != gfn_x(INVALID_GFN) )
- set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
+ ASSERT(!IS_DEFAULT(s));
+ ASSERT(gfn != gfn_x(INVALID_GFN));
+
+ set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
}
-static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf)
+static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+ if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ return;
+
destroy_ring_for_helper(&iorp->va, iorp->page);
+ iorp->page = NULL;
+
+ if ( !IS_DEFAULT(s) )
+ hvm_free_ioreq_gfn(s, iorp->gfn);
+
+ iorp->gfn = gfn_x(INVALID_GFN);
}
-static int hvm_map_ioreq_page(
- struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
+static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
struct domain *d = s->domain;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- struct page_info *page;
- void *va;
int rc;
- if ( (rc = prepare_ring_for_helper(d, gfn, &page, &va)) )
- return rc;
-
- if ( (iorp->va != NULL) || d->is_dying )
- {
- destroy_ring_for_helper(&va, page);
+ if ( d->is_dying )
return -EINVAL;
- }
- iorp->va = va;
- iorp->page = page;
- iorp->gfn = gfn;
+ if ( IS_DEFAULT(s) )
+ iorp->gfn = buf ?
+ d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+ d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+ else
+ iorp->gfn = hvm_alloc_ioreq_gfn(s);
- return 0;
+ if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ return -ENOMEM;
+
+ rc = prepare_ring_for_helper(d, iorp->gfn, &iorp->page, &iorp->va);
+
+ if ( rc )
+ hvm_unmap_ioreq_gfn(s, buf);
+
+ return rc;
}
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
@@ -286,8 +298,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
FOR_EACH_IOREQ_SERVER(d, id, s)
{
- if ( (s->ioreq.va && s->ioreq.page == page) ||
- (s->bufioreq.va && s->bufioreq.page == page) )
+ if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
{
found = true;
break;
@@ -299,20 +310,30 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
return found;
}
-static void hvm_remove_ioreq_gfn(
- struct domain *d, struct hvm_ioreq_page *iorp)
+static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
+
{
+ struct domain *d = s->domain;
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+ return;
+
if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
_mfn(page_to_mfn(iorp->page)), 0) )
domain_crash(d);
clear_page(iorp->va);
}
-static int hvm_add_ioreq_gfn(
- struct domain *d, struct hvm_ioreq_page *iorp)
+static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
+ struct domain *d = s->domain;
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
+ if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+ return 0;
+
clear_page(iorp->va);
rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
@@ -447,78 +468,25 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
}
static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
- unsigned long ioreq_gfn,
- unsigned long bufioreq_gfn)
+ bool handle_bufioreq)
{
int rc;
- rc = hvm_map_ioreq_page(s, false, ioreq_gfn);
- if ( rc )
- return rc;
-
- if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
- rc = hvm_map_ioreq_page(s, true, bufioreq_gfn);
-
- if ( rc )
- hvm_unmap_ioreq_page(s, false);
-
- return rc;
-}
-
-static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
- bool handle_bufioreq)
-{
- struct domain *d = s->domain;
- unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
- unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
- int rc;
-
- if ( IS_DEFAULT(s) )
- {
- /*
- * The default ioreq server must handle buffered ioreqs, for
- * backwards compatibility.
- */
- ASSERT(handle_bufioreq);
- return hvm_ioreq_server_map_pages(s,
- d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
- d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
- }
-
- rc = hvm_alloc_ioreq_gfn(d, &ioreq_gfn);
+ rc = hvm_map_ioreq_gfn(s, false);
if ( !rc && handle_bufioreq )
- rc = hvm_alloc_ioreq_gfn(d, &bufioreq_gfn);
-
- if ( !rc )
- rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn);
+ rc = hvm_map_ioreq_gfn(s, true);
if ( rc )
- {
- hvm_free_ioreq_gfn(d, ioreq_gfn);
- hvm_free_ioreq_gfn(d, bufioreq_gfn);
- }
+ hvm_unmap_ioreq_gfn(s, false);
return rc;
}
static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
{
- struct domain *d = s->domain;
- bool handle_bufioreq = !!s->bufioreq.va;
-
- if ( handle_bufioreq )
- hvm_unmap_ioreq_page(s, true);
-
- hvm_unmap_ioreq_page(s, false);
-
- if ( !IS_DEFAULT(s) )
- {
- if ( handle_bufioreq )
- hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
-
- hvm_free_ioreq_gfn(d, s->ioreq.gfn);
- }
+ hvm_unmap_ioreq_gfn(s, true);
+ hvm_unmap_ioreq_gfn(s, false);
}
static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
@@ -578,22 +546,15 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
{
- struct domain *d = s->domain;
struct hvm_ioreq_vcpu *sv;
- bool handle_bufioreq = !!s->bufioreq.va;
spin_lock(&s->lock);
if ( s->enabled )
goto done;
- if ( !IS_DEFAULT(s) )
- {
- hvm_remove_ioreq_gfn(d, &s->ioreq);
-
- if ( handle_bufioreq )
- hvm_remove_ioreq_gfn(d, &s->bufioreq);
- }
+ hvm_remove_ioreq_gfn(s, false);
+ hvm_remove_ioreq_gfn(s, true);
s->enabled = true;
@@ -608,21 +569,13 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
{
- struct domain *d = s->domain;
- bool handle_bufioreq = !!s->bufioreq.va;
-
spin_lock(&s->lock);
if ( !s->enabled )
goto done;
- if ( !IS_DEFAULT(s) )
- {
- if ( handle_bufioreq )
- hvm_add_ioreq_gfn(d, &s->bufioreq);
-
- hvm_add_ioreq_gfn(d, &s->ioreq);
- }
+ hvm_add_ioreq_gfn(s, true);
+ hvm_add_ioreq_gfn(s, false);
s->enabled = false;
@@ -644,6 +597,9 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
+ s->ioreq.gfn = gfn_x(INVALID_GFN);
+ s->bufioreq.gfn = gfn_x(INVALID_GFN);
+
rc = hvm_ioreq_server_alloc_rangesets(s, id);
if ( rc )
return rc;
@@ -651,7 +607,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
s->bufioreq_atomic = true;
- rc = hvm_ioreq_server_setup_pages(
+ rc = hvm_ioreq_server_map_pages(
s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
if ( rc )
goto fail_map;
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-10-06 12:25 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-06 12:25 [PATCH v9 00/11] x86: guest resource mapping Paul Durrant
2017-10-06 12:25 ` [PATCH v9 01/11] x86/hvm/ioreq: maintain an array of ioreq servers rather than a list Paul Durrant
2017-10-09 12:40 ` Jan Beulich
2017-10-09 12:45 ` Paul Durrant
2017-10-06 12:25 ` Paul Durrant [this message]
2017-10-06 12:25 ` [PATCH v9 03/11] x86/hvm/ioreq: use gfn_t in struct hvm_ioreq_page Paul Durrant
2017-10-06 12:25 ` [PATCH v9 04/11] x86/hvm/ioreq: defer mapping gfns until they are actually requsted Paul Durrant
2017-10-09 12:45 ` Jan Beulich
2017-10-09 12:47 ` Paul Durrant
2017-10-06 12:25 ` [PATCH v9 05/11] x86/mm: add HYPERVISOR_memory_op to acquire guest resources Paul Durrant
2017-10-09 13:05 ` Jan Beulich
2017-10-10 13:26 ` Paul Durrant
2017-10-11 8:20 ` Jan Beulich
2017-10-09 14:23 ` Jan Beulich
2017-10-10 14:10 ` Paul Durrant
2017-10-10 14:37 ` Paul Durrant
2017-10-11 8:30 ` Jan Beulich
2017-10-11 8:38 ` Paul Durrant
2017-10-11 8:48 ` Jan Beulich
2017-10-06 12:25 ` [PATCH v9 06/11] x86/hvm/ioreq: add a new mappable resource type Paul Durrant
2017-10-09 15:20 ` Jan Beulich
2017-10-10 14:45 ` Paul Durrant
2017-10-11 8:35 ` Jan Beulich
2017-10-06 12:25 ` [PATCH v9 07/11] x86/mm: add an extra command to HYPERVISOR_mmu_update Paul Durrant
2017-10-09 15:44 ` Jan Beulich
2017-10-06 12:25 ` [PATCH v9 08/11] tools/libxenforeignmemory: add support for resource mapping Paul Durrant
2017-10-06 12:25 ` [PATCH v9 09/11] tools/libxenforeignmemory: reduce xenforeignmemory_restrict code footprint Paul Durrant
2017-10-06 12:25 ` [PATCH v9 10/11] common: add a new mappable resource type: XENMEM_resource_grant_table Paul Durrant
2017-10-10 10:25 ` Jan Beulich
2017-10-10 16:01 ` Paul Durrant
2017-10-11 8:47 ` Jan Beulich
2017-10-11 8:54 ` Paul Durrant
2017-10-11 9:43 ` Jan Beulich
2017-10-11 9:54 ` Paul Durrant
2017-10-11 10:12 ` Jan Beulich
2017-10-06 12:25 ` [PATCH v9 11/11] tools/libxenctrl: use new xenforeignmemory API to seed grant table Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171006122519.30345-3-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).