From: Aravindh Puthiyaparambil <aravindp@cisco.com>
To: xen-devel@lists.xenproject.org
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
Ian Jackson <ian.jackson@eu.citrix.com>,
Ian Campbell <ian.campbell@citrix.com>,
Aravindh Puthiyaprambil <aravindp@cisco.com>
Subject: [PATCH RFC 4/4] tool/xen-access: Add support for PV domains
Date: Mon, 28 Apr 2014 21:45:05 -0700 [thread overview]
Message-ID: <1398746705-6658-5-git-send-email-aravindp@cisco.com> (raw)
In-Reply-To: <1398746705-6658-1-git-send-email-aravindp@cisco.com>
Add support to the xen-access test program for it work with PV domains.
The main difference is that for PV domains, unlike HVM domains,
xc_mem_access_create_ring_page() has to be called as the page is not
created during domain creation time. PV domains do not need to set
all individual page access permissions during setup and teardown. Enabling
and disabling mem_access does that indirectly.
Signed-off-by: Aravindh Puthiyaprambil <aravindp@cisco.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
---
tools/tests/xen-access/xen-access.c | 122 ++++++++++++++++++++++++------------
1 file changed, 81 insertions(+), 41 deletions(-)
diff --git a/tools/tests/xen-access/xen-access.c b/tools/tests/xen-access/xen-access.c
index 0a84bd5..f9883f4 100644
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -114,7 +114,8 @@ typedef struct xenaccess {
} xenaccess_t;
static int interrupted;
-bool evtchn_bind = 0, evtchn_open = 0, mem_access_enable = 0;
+bool evtchn_bind = 0, evtchn_open = 0, mem_access_enable = 0, pv_ring_page = 0;
+bool hvm = 0;
static void close_handler(int sig)
{
@@ -173,7 +174,7 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t *xenaccess)
if ( xenaccess->mem_event.ring_page )
munmap(xenaccess->mem_event.ring_page, XC_PAGE_SIZE);
- if ( mem_access_enable )
+ if ( mem_access_enable || (!hvm && pv_ring_page) )
{
rc = xc_mem_access_disable(xenaccess->xc_handle,
xenaccess->mem_event.domain_id);
@@ -245,9 +246,57 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id)
/* Initialise lock */
mem_event_ring_lock_init(&xenaccess->mem_event);
+ /* Get domaininfo */
+ xenaccess->domain_info = malloc(sizeof(xc_domaininfo_t));
+ if ( xenaccess->domain_info == NULL )
+ {
+ ERROR("Error allocating memory for domain info");
+ goto err;
+ }
+
+ rc = xc_domain_getinfolist(xenaccess->xc_handle, domain_id, 1,
+ xenaccess->domain_info);
+ if ( rc != 1 )
+ {
+ ERROR("Error getting domain info");
+ goto err;
+ }
+
+ if ( xenaccess->domain_info->flags & XEN_DOMINF_hvm_guest )
+ hvm = 1;
+
+ DPRINTF("max_pages = %"PRIx64"\n", xenaccess->domain_info->max_pages);
+
+ if ( hvm )
+ {
+ rc = xc_get_hvm_param(xch, xenaccess->mem_event.domain_id,
+ HVM_PARAM_ACCESS_RING_PFN, &ring_pfn);
+
+ }
+ else
+ {
+ rc = xc_mem_access_create_ring_page(xch, xenaccess->mem_event.domain_id);
+ if ( rc != 0 )
+ {
+ PERROR("Failed to set ring gfn\n");
+ goto err;
+ }
+
+ pv_ring_page = 1;
+
+ rc = xc_mem_access_get_ring_mfn(xch, xenaccess->mem_event.domain_id,
+ &ring_pfn);
+ }
+
+ if ( rc != 0 )
+ {
+ PERROR("Failed to get ring gfn\n");
+ goto err;
+ }
+
+ DPRINTF("ring_mfn: 0x%lx\n", ring_pfn);
+
/* Map the ring page */
- xc_get_hvm_param(xch, xenaccess->mem_event.domain_id,
- HVM_PARAM_ACCESS_RING_PFN, &ring_pfn);
mmap_pfn = ring_pfn;
xenaccess->mem_event.ring_page =
xc_map_foreign_batch(xch, xenaccess->mem_event.domain_id,
@@ -327,24 +376,6 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id)
xenaccess->mem_event.domain_id, 1, 0, &ring_pfn) )
PERROR("Failed to remove ring from guest physmap");
- /* Get domaininfo */
- xenaccess->domain_info = malloc(sizeof(xc_domaininfo_t));
- if ( xenaccess->domain_info == NULL )
- {
- ERROR("Error allocating memory for domain info");
- goto err;
- }
-
- rc = xc_domain_getinfolist(xenaccess->xc_handle, domain_id, 1,
- xenaccess->domain_info);
- if ( rc != 1 )
- {
- ERROR("Error getting domain info");
- goto err;
- }
-
- DPRINTF("max_pages = %"PRIx64"\n", xenaccess->domain_info->max_pages);
-
return xenaccess;
err:
@@ -526,23 +557,28 @@ int main(int argc, char *argv[])
goto exit;
}
- rc = xc_set_mem_access(xch, domain_id, default_access, 0,
- xenaccess->domain_info->max_pages);
- if ( rc < 0 )
+ if ( hvm )
{
- ERROR("Error %d setting all memory to access type %d\n", rc,
- default_access);
- goto exit;
- }
+ rc = xc_set_mem_access(xch, domain_id, default_access, 0,
+ xenaccess->domain_info->max_pages);
+ if ( rc < 0 )
+ {
+ ERROR("Error %d setting all memory to access type %d\n", rc,
+ default_access);
+ goto exit;
+ }
- if ( int3 )
- rc = xc_set_hvm_param(xch, domain_id, HVM_PARAM_MEMORY_EVENT_INT3, HVMPME_mode_sync);
- else
- rc = xc_set_hvm_param(xch, domain_id, HVM_PARAM_MEMORY_EVENT_INT3, HVMPME_mode_disabled);
- if ( rc < 0 )
- {
- ERROR("Error %d setting int3 mem_event\n", rc);
- goto exit;
+ if ( int3 )
+ rc = xc_set_hvm_param(xch, domain_id, HVM_PARAM_MEMORY_EVENT_INT3,
+ HVMPME_mode_sync);
+ else
+ rc = xc_set_hvm_param(xch, domain_id, HVM_PARAM_MEMORY_EVENT_INT3,
+ HVMPME_mode_disabled);
+ if ( rc < 0 )
+ {
+ ERROR("Error %d setting int3 mem_event\n", rc);
+ goto exit;
+ }
}
/* Wait for access */
@@ -554,10 +590,14 @@ int main(int argc, char *argv[])
/* Unregister for every event */
rc = xc_set_mem_access(xch, domain_id, XENMEM_access_rwx, ~0ull, 0);
- rc = xc_set_mem_access(xch, domain_id, XENMEM_access_rwx, 0,
- xenaccess->domain_info->max_pages);
- rc = xc_set_hvm_param(xch, domain_id, HVM_PARAM_MEMORY_EVENT_INT3, HVMPME_mode_disabled);
-
+ if ( hvm )
+ {
+ rc = xc_set_mem_access(xch, domain_id, XENMEM_access_rwx, 0,
+ xenaccess->domain_info->max_pages);
+ rc = xc_set_hvm_param(xch, domain_id,
+ HVM_PARAM_MEMORY_EVENT_INT3,
+ HVMPME_mode_disabled);
+ }
shutting_down = 1;
}
--
1.9.1
next prev parent reply other threads:[~2014-04-29 4:45 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-04-29 4:45 [PATCH RFC 0/4] Add mem_access support for PV domains Aravindh Puthiyaparambil
2014-04-29 4:45 ` [PATCH RFC 1/4] x86/mm: Shadow and p2m changes for PV mem_access Aravindh Puthiyaparambil
2014-04-29 8:50 ` Jan Beulich
2014-04-29 23:10 ` Aravindh Puthiyaparambil (aravindp)
2014-04-30 6:32 ` Jan Beulich
2014-04-30 22:20 ` Aravindh Puthiyaparambil (aravindp)
2014-05-01 1:11 ` Andres Lagar-Cavilla
2014-05-01 3:18 ` Aravindh Puthiyaparambil (aravindp)
2014-05-01 14:18 ` Tim Deegan
2014-05-01 19:14 ` Aravindh Puthiyaparambil (aravindp)
2014-05-08 12:44 ` Tim Deegan
2014-05-02 8:26 ` Jan Beulich
2014-05-02 16:28 ` Aravindh Puthiyaparambil (aravindp)
2014-04-29 12:05 ` Tamas Lengyel
2014-04-29 23:36 ` Aravindh Puthiyaparambil (aravindp)
2014-05-01 14:39 ` Tim Deegan
2014-05-01 19:26 ` Aravindh Puthiyaparambil (aravindp)
2014-04-29 4:45 ` [PATCH RFC 2/4] x86/mem_access: mem_access and mem_event changes to support PV domains Aravindh Puthiyaparambil
2014-04-29 4:45 ` [PATCH RFC 3/4] tools/libxc: Add APIs to create and get the PV ring page Aravindh Puthiyaparambil
2014-05-02 12:41 ` Ian Campbell
2014-04-29 4:45 ` Aravindh Puthiyaparambil [this message]
2014-05-02 12:43 ` [PATCH RFC 4/4] tool/xen-access: Add support for PV domains Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1398746705-6658-5-git-send-email-aravindp@cisco.com \
--to=aravindp@cisco.com \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).