From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>
Subject: [PATCH v12 03/11] x86/hvm/ioreq: use gfn_t in struct hvm_ioreq_page
Date: Tue, 17 Oct 2017 14:24:24 +0100 [thread overview]
Message-ID: <20171017132432.24093-4-paul.durrant@citrix.com> (raw)
In-Reply-To: <20171017132432.24093-1-paul.durrant@citrix.com>
This patch adjusts the ioreq server code to use type-safe gfn_t values
where possible. No functional change.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/ioreq.c | 44 ++++++++++++++++++++--------------------
xen/include/asm-x86/hvm/domain.h | 2 +-
2 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 6d81018369..64bb13cec9 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -210,7 +210,7 @@ bool handle_hvm_io_completion(struct vcpu *v)
return true;
}
-static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
+static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
{
struct domain *d = s->domain;
unsigned int i;
@@ -220,20 +220,19 @@ static unsigned long hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
{
if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
- return d->arch.hvm_domain.ioreq_gfn.base + i;
+ return _gfn(d->arch.hvm_domain.ioreq_gfn.base + i);
}
- return gfn_x(INVALID_GFN);
+ return INVALID_GFN;
}
-static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
- unsigned long gfn)
+static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
{
struct domain *d = s->domain;
- unsigned int i = gfn - d->arch.hvm_domain.ioreq_gfn.base;
+ unsigned int i = gfn_x(gfn) - d->arch.hvm_domain.ioreq_gfn.base;
ASSERT(!IS_DEFAULT(s));
- ASSERT(gfn != gfn_x(INVALID_GFN));
+ ASSERT(!gfn_eq(gfn, INVALID_GFN));
set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
}
@@ -242,7 +241,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
{
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
destroy_ring_for_helper(&iorp->va, iorp->page);
@@ -251,7 +250,7 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
if ( !IS_DEFAULT(s) )
hvm_free_ioreq_gfn(s, iorp->gfn);
- iorp->gfn = gfn_x(INVALID_GFN);
+ iorp->gfn = INVALID_GFN;
}
static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
@@ -264,16 +263,17 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
return -EINVAL;
if ( IS_DEFAULT(s) )
- iorp->gfn = buf ?
- d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
- d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+ iorp->gfn = _gfn(buf ?
+ d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+ d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]);
else
iorp->gfn = hvm_alloc_ioreq_gfn(s);
- if ( iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return -ENOMEM;
- rc = prepare_ring_for_helper(d, iorp->gfn, &iorp->page, &iorp->va);
+ rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
+ &iorp->va);
if ( rc )
hvm_unmap_ioreq_gfn(s, buf);
@@ -309,10 +309,10 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
struct domain *d = s->domain;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
return;
- if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
+ if ( guest_physmap_remove_page(d, iorp->gfn,
_mfn(page_to_mfn(iorp->page)), 0) )
domain_crash(d);
clear_page(iorp->va);
@@ -324,12 +324,12 @@ static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
- if ( IS_DEFAULT(s) || iorp->gfn == gfn_x(INVALID_GFN) )
+ if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
return 0;
clear_page(iorp->va);
- rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
+ rc = guest_physmap_add_page(d, iorp->gfn,
_mfn(page_to_mfn(iorp->page)), 0);
if ( rc == 0 )
paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
@@ -590,8 +590,8 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
- s->ioreq.gfn = gfn_x(INVALID_GFN);
- s->bufioreq.gfn = gfn_x(INVALID_GFN);
+ s->ioreq.gfn = INVALID_GFN;
+ s->bufioreq.gfn = INVALID_GFN;
rc = hvm_ioreq_server_alloc_rangesets(s, id);
if ( rc )
@@ -757,11 +757,11 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
ASSERT(!IS_DEFAULT(s));
- *ioreq_gfn = s->ioreq.gfn;
+ *ioreq_gfn = gfn_x(s->ioreq.gfn);
if ( s->bufioreq.va != NULL )
{
- *bufioreq_gfn = s->bufioreq.gfn;
+ *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
*bufioreq_port = s->bufioreq_evtchn;
}
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index e17bbe4004..3bd9c5d7c0 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -36,7 +36,7 @@
#include <public/hvm/dm_op.h>
struct hvm_ioreq_page {
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
void *va;
};
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-10-17 13:24 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-17 13:24 [PATCH v12 00/11] x86: guest resource mapping Paul Durrant
2017-10-17 13:24 ` [PATCH v12 01/11] x86/hvm/ioreq: maintain an array of ioreq servers rather than a list Paul Durrant
2017-10-17 13:24 ` [PATCH v12 02/11] x86/hvm/ioreq: simplify code and use consistent naming Paul Durrant
2017-10-17 13:24 ` Paul Durrant [this message]
2017-10-17 13:24 ` [PATCH v12 04/11] x86/hvm/ioreq: defer mapping gfns until they are actually requsted Paul Durrant
2017-10-17 13:24 ` [PATCH v12 05/11] x86/mm: add HYPERVISOR_memory_op to acquire guest resources Paul Durrant
2017-10-17 14:45 ` Daniel De Graaf
2017-10-19 12:22 ` Julien Grall
2017-10-19 12:57 ` Paul Durrant
2017-10-19 13:29 ` Julien Grall
2017-10-19 13:35 ` Paul Durrant
2017-10-19 14:12 ` Julien Grall
2017-10-19 14:49 ` Paul Durrant
2017-10-19 15:11 ` Jan Beulich
2017-10-19 15:37 ` Julien Grall
2017-10-19 15:47 ` Jan Beulich
2017-10-19 16:06 ` Julien Grall
2017-10-19 16:21 ` Julien Grall
2017-10-20 6:24 ` Jan Beulich
2017-10-20 8:26 ` Paul Durrant
2017-10-20 10:00 ` Julien Grall
2017-10-20 10:10 ` Paul Durrant
2017-10-23 18:04 ` Julien Grall
2017-10-25 8:40 ` Paul Durrant
2017-10-20 6:17 ` Jan Beulich
2017-10-26 15:26 ` Jan Beulich
2017-10-26 15:32 ` Julien Grall
2017-10-26 15:39 ` Jan Beulich
2017-10-27 10:46 ` Julien Grall
2017-10-27 15:19 ` Paul Durrant
2017-10-30 12:08 ` Julien Grall
2017-10-30 13:10 ` Paul Durrant
2017-10-30 12:05 ` Paul Durrant
2017-10-17 13:24 ` [PATCH v12 06/11] x86/hvm/ioreq: add a new mappable resource type Paul Durrant
2017-10-19 12:31 ` Julien Grall
2017-10-19 12:58 ` Paul Durrant
2017-10-19 13:08 ` Julien Grall
2017-10-19 13:08 ` Paul Durrant
2017-10-26 15:36 ` Jan Beulich
2017-10-17 13:24 ` [PATCH v12 07/11] x86/mm: add an extra command to HYPERVISOR_mmu_update Paul Durrant
2017-10-17 13:24 ` [PATCH v12 08/11] tools/libxenforeignmemory: add support for resource mapping Paul Durrant
2017-10-17 13:24 ` [PATCH v12 09/11] tools/libxenforeignmemory: reduce xenforeignmemory_restrict code footprint Paul Durrant
2017-10-17 13:24 ` [PATCH v12 10/11] common: add a new mappable resource type: XENMEM_resource_grant_table Paul Durrant
2017-10-26 15:46 ` Jan Beulich
2017-10-17 13:24 ` [PATCH v12 11/11] tools/libxenctrl: use new xenforeignmemory API to seed grant table Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171017132432.24093-4-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).