From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 REPOST 09/12] x86/hvm/ioreq: simplify code and use consistent naming
Date: Tue, 22 Aug 2017 15:51:03 +0100 [thread overview]
Message-ID: <20170822145107.6877-10-paul.durrant@citrix.com> (raw)
In-Reply-To: <20170822145107.6877-1-paul.durrant@citrix.com>
This patch re-works much of the IOREQ server initialization and teardown
code:
- The hvm_map/unmap_ioreq_gfn() functions are expanded to call through
to hvm_alloc/free_ioreq_gfn() rather than expecting them to be called
separately by outer functions.
- Several functions now test the validity of the hvm_ioreq_page gfn value
to determine whether they need to act. This means can be safely called
for the bufioreq page even when it is not used.
- hvm_add/remove_ioreq_gfn() simply return in the case of the default
IOREQ server so callers no longer need to test before calling.
- hvm_ioreq_server_setup_pages() is renamed to hvm_ioreq_server_map_pages()
to mirror the existing hvm_ioreq_server_unmap_pages().
All of this significantly shortens the code.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/ioreq.c | 181 ++++++++++++++++++-----------------------------
1 file changed, 69 insertions(+), 112 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 5737082238..edfb394c59 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -181,63 +181,76 @@ bool handle_hvm_io_completion(struct vcpu *v)
return true;
}
-static int hvm_alloc_ioreq_gfn(struct domain *d, unsigned long *gfn)
+static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
{
+ struct domain *d = s->domain;
unsigned int i;
- int rc;
- rc = -ENOMEM;
+ ASSERT(!s->is_default);
+
for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
{
if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
{
- *gfn = d->arch.hvm_domain.ioreq_gfn.base + i;
- rc = 0;
- break;
+ return d->arch.hvm_domain.ioreq_gfn.base + i;
}
}
- return rc;
+ return gfn_x(INVALID_GFN);
}
-static void hvm_free_ioreq_gfn(struct domain *d, unsigned long gfn)
+static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
+ unsigned long gfn)
{
+ struct domain *d = s->domain;
unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
- if ( gfn != gfn_x(INVALID_GFN) )
- set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
+ ASSERT(!s->is_default);
+
+ set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
}
-static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf)
+static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+ if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ return;
+
destroy_ring_for_helper(&iorp->va, iorp->page);
+ iorp->page = NULL;
+
+ if ( !s->is_default )
+ hvm_free_ioreq_gfn(s, iorp->gfn);
+
+ iorp->gfn = gfn_x(INVALID_GFN);
}
-static int hvm_map_ioreq_page(
- struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
+static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
struct domain *d = s->domain;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- struct page_info *page;
- void *va;
int rc;
- if ( (rc = prepare_ring_for_helper(d, gfn, &page, &va)) )
- return rc;
-
- if ( (iorp->va != NULL) || d->is_dying )
- {
- destroy_ring_for_helper(&va, page);
+ if ( d->is_dying )
return -EINVAL;
- }
- iorp->va = va;
- iorp->page = page;
- iorp->gfn = gfn;
+ if ( s->is_default )
+ iorp->gfn = buf ?
+ d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+ d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+ else
+ iorp->gfn = hvm_alloc_ioreq_gfn(s);
+
+ if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ return -ENOMEM;
- return 0;
+ rc = prepare_ring_for_helper(d, iorp->gfn, &iorp->page, &iorp->va);
+
+ if ( rc )
+ hvm_unmap_ioreq_gfn(s, buf);
+
+ return rc;
}
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
@@ -251,8 +264,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
{
- if ( (s->ioreq.va && s->ioreq.page == page) ||
- (s->bufioreq.va && s->bufioreq.page == page) )
+ if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
{
found = true;
break;
@@ -264,20 +276,30 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
return found;
}
-static void hvm_remove_ioreq_gfn(
- struct domain *d, struct hvm_ioreq_page *iorp)
+static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
+
{
+ struct domain *d = s->domain;
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ if ( s->is_default || iorp->gfn == gfn_x(INVALID_GFN) )
+ return;
+
if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
_mfn(page_to_mfn(iorp->page)), 0) )
domain_crash(d);
clear_page(iorp->va);
}
-static int hvm_add_ioreq_gfn(
- struct domain *d, struct hvm_ioreq_page *iorp)
+static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
+ struct domain *d = s->domain;
+ struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
+ if ( s->is_default || iorp->gfn == gfn_x(INVALID_GFN) )
+ return 0;
+
clear_page(iorp->va);
rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
@@ -412,78 +434,25 @@ static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
}
static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
- unsigned long ioreq_gfn,
- unsigned long bufioreq_gfn)
-{
- int rc;
-
- rc = hvm_map_ioreq_page(s, false, ioreq_gfn);
- if ( rc )
- return rc;
-
- if ( bufioreq_gfn != gfn_x(INVALID_GFN) )
- rc = hvm_map_ioreq_page(s, true, bufioreq_gfn);
-
- if ( rc )
- hvm_unmap_ioreq_page(s, false);
-
- return rc;
-}
-
-static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
- bool handle_bufioreq)
+ bool handle_bufioreq)
{
- struct domain *d = s->domain;
- unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
- unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
- int rc;
-
- if ( s->is_default )
- {
- /*
- * The default ioreq server must handle buffered ioreqs, for
- * backwards compatibility.
- */
- ASSERT(handle_bufioreq);
- return hvm_ioreq_server_map_pages(s,
- d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN],
- d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN]);
- }
+ int rc = -ENOMEM;
- rc = hvm_alloc_ioreq_gfn(d, &ioreq_gfn);
+ rc = hvm_map_ioreq_gfn(s, false);
if ( !rc && handle_bufioreq )
- rc = hvm_alloc_ioreq_gfn(d, &bufioreq_gfn);
-
- if ( !rc )
- rc = hvm_ioreq_server_map_pages(s, ioreq_gfn, bufioreq_gfn);
+ rc = hvm_map_ioreq_gfn(s, true);
if ( rc )
- {
- hvm_free_ioreq_gfn(d, ioreq_gfn);
- hvm_free_ioreq_gfn(d, bufioreq_gfn);
- }
+ hvm_unmap_ioreq_gfn(s, false);
return rc;
}
static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
{
- struct domain *d = s->domain;
- bool handle_bufioreq = !!s->bufioreq.va;
-
- if ( handle_bufioreq )
- hvm_unmap_ioreq_page(s, true);
-
- hvm_unmap_ioreq_page(s, false);
-
- if ( !s->is_default )
- {
- if ( handle_bufioreq )
- hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
-
- hvm_free_ioreq_gfn(d, s->ioreq.gfn);
- }
+ hvm_unmap_ioreq_gfn(s, true);
+ hvm_unmap_ioreq_gfn(s, false);
}
static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
@@ -540,22 +509,15 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s)
static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
{
- struct domain *d = s->domain;
struct hvm_ioreq_vcpu *sv;
- bool handle_bufioreq = !!s->bufioreq.va;
spin_lock(&s->lock);
if ( s->enabled )
goto done;
- if ( !s->is_default )
- {
- hvm_remove_ioreq_gfn(d, &s->ioreq);
-
- if ( handle_bufioreq )
- hvm_remove_ioreq_gfn(d, &s->bufioreq);
- }
+ hvm_remove_ioreq_gfn(s, false);
+ hvm_remove_ioreq_gfn(s, true);
s->enabled = true;
@@ -570,21 +532,13 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
{
- struct domain *d = s->domain;
- bool handle_bufioreq = !!s->bufioreq.va;
-
spin_lock(&s->lock);
if ( !s->enabled )
goto done;
- if ( !s->is_default )
- {
- if ( handle_bufioreq )
- hvm_add_ioreq_gfn(d, &s->bufioreq);
-
- hvm_add_ioreq_gfn(d, &s->ioreq);
- }
+ hvm_add_ioreq_gfn(s, true);
+ hvm_add_ioreq_gfn(s, false);
s->enabled = false;
@@ -607,6 +561,9 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
+ s->ioreq.gfn = gfn_x(INVALID_GFN);
+ s->bufioreq.gfn = gfn_x(INVALID_GFN);
+
rc = hvm_ioreq_server_alloc_rangesets(s);
if ( rc )
return rc;
@@ -614,7 +571,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
s->bufioreq_atomic = true;
- rc = hvm_ioreq_server_setup_pages(
+ rc = hvm_ioreq_server_map_pages(
s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
if ( rc )
goto fail_map;
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-08-22 14:51 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-22 14:50 [PATCH v2 REPOST 00/12] x86: guest resource mapping Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 01/12] [x86|arm]: remove code duplication Paul Durrant
2017-08-24 14:12 ` Jan Beulich
2017-08-24 14:16 ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 02/12] x86/mm: allow a privileged PV domain to map guest mfns Paul Durrant
2017-08-24 16:33 ` Wei Liu
2017-08-25 10:05 ` Paul Durrant
2017-08-28 14:38 ` Wei Liu
2017-08-29 8:37 ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 03/12] x86/mm: add HYPERVISOR_memory_op to acquire guest resources Paul Durrant
2017-08-28 15:01 ` Wei Liu
2017-08-29 8:32 ` Paul Durrant
2017-08-29 8:59 ` Jan Beulich
2017-08-29 9:13 ` Paul Durrant
2017-08-29 9:27 ` Jan Beulich
2017-08-29 9:31 ` Paul Durrant
2017-08-29 9:38 ` Jan Beulich
2017-08-29 11:16 ` George Dunlap
2017-08-29 11:19 ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 04/12] tools/libxenforeignmemory: add support for resource mapping Paul Durrant
2017-08-24 15:52 ` Roger Pau Monné
2017-08-24 15:58 ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 05/12] tools/libxenctrl: use new xenforeignmemory API to seed grant table Paul Durrant
2017-08-24 16:02 ` Roger Pau Monné
2017-08-24 16:09 ` Paul Durrant
2017-08-28 15:04 ` Wei Liu
2017-08-22 14:51 ` [PATCH v2 REPOST 06/12] x86/hvm/ioreq: rename .*pfn and .*gmfn to .*gfn Paul Durrant
2017-08-24 16:06 ` Roger Pau Monné
2017-08-28 15:01 ` Wei Liu
2017-08-22 14:51 ` [PATCH v2 REPOST 07/12] x86/hvm/ioreq: use bool rather than bool_t Paul Durrant
2017-08-24 16:11 ` Roger Pau Monné
2017-08-22 14:51 ` [PATCH v2 REPOST 08/12] x86/hvm/ioreq: move is_default into struct hvm_ioreq_server Paul Durrant
2017-08-24 16:21 ` Roger Pau Monné
2017-08-24 16:31 ` Paul Durrant
2017-08-22 14:51 ` Paul Durrant [this message]
2017-08-24 17:02 ` [PATCH v2 REPOST 09/12] x86/hvm/ioreq: simplify code and use consistent naming Roger Pau Monné
2017-08-25 10:18 ` Paul Durrant
2017-08-22 14:51 ` [PATCH v2 REPOST 10/12] x86/hvm/ioreq: use gfn_t in struct hvm_ioreq_page Paul Durrant
2017-08-24 17:05 ` Roger Pau Monné
2017-08-22 14:51 ` [PATCH v2 REPOST 11/12] x86/hvm/ioreq: defer mapping gfns until they are actually requsted Paul Durrant
2017-08-24 17:21 ` Roger Pau Monné
2017-08-25 9:52 ` Paul Durrant
2017-08-28 15:08 ` Wei Liu
2017-08-29 8:51 ` Paul Durrant
2017-08-22 14:51 ` [PATCH v2 REPOST 12/12] x86/hvm/ioreq: add a new mappable resource type Paul Durrant
2017-08-25 9:32 ` Roger Pau Monné
2017-08-25 9:46 ` Paul Durrant
2017-08-25 9:53 ` Roger Pau Monne
2017-08-25 9:58 ` Paul Durrant
2017-08-29 11:36 ` George Dunlap
2017-08-29 13:40 ` George Dunlap
2017-08-29 14:10 ` Paul Durrant
2017-08-29 14:26 ` George Dunlap
2017-08-29 14:31 ` Paul Durrant
2017-08-29 14:38 ` George Dunlap
2017-08-29 14:49 ` Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170822145107.6877-10-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).