xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Andres Lagar-Cavilla <andres@lagarcavilla.org>
To: xen-devel@lists.xensource.com
Cc: ian.campbell@citrix.com, andres@gridcentric.ca, tim@xen.org,
	JBeulich@suse.com, ian.jackson@citrix.com, adin@gridcentric.ca
Subject: [PATCH 4 of 8] x86/mm: wire up sharing ring
Date: Tue, 06 Mar 2012 18:50:26 -0500	[thread overview]
Message-ID: <d03aa37a028810174df4.1331077826@xdev.gridcentric.ca> (raw)
In-Reply-To: <patchbomb.1331077822@xdev.gridcentric.ca>

 xen/arch/x86/mm/mem_event.c |  41 +++++++++++++++++++++++++++++++++++++++++
 xen/include/public/domctl.h |  20 +++++++++++++++++++-
 xen/include/xen/sched.h     |   3 +++
 3 files changed, 63 insertions(+), 1 deletions(-)


Now that we have an interface close to finalizing, do the necessary plumbing to
set up a ring for reporting failed allocations in the unshare path.

Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
Acked-by: Tim Deegan <tim@xen.org>

diff -r 39e3ee550391 -r d03aa37a0288 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -432,6 +432,13 @@ static void mem_access_notification(stru
         p2m_mem_access_resume(v->domain);
 }
 
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_sharing_notification(struct vcpu *v, unsigned int port)
+{
+    if ( likely(v->domain->mem_event->share.ring_page != NULL) )
+        mem_sharing_sharing_resume(v->domain);
+}
+
 struct domain *get_mem_event_op_target(uint32_t domain, int *rc)
 {
     struct domain *d;
@@ -599,6 +606,40 @@ int mem_event_domctl(struct domain *d, x
     }
     break;
 
+    case XEN_DOMCTL_MEM_EVENT_OP_SHARING: 
+    {
+        struct mem_event_domain *med = &d->mem_event->share;
+        rc = -EINVAL;
+
+        switch( mec->op )
+        {
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE:
+        {
+            rc = -ENODEV;
+            /* Only HAP is supported */
+            if ( !hap_enabled(d) )
+                break;
+
+            rc = mem_event_enable(d, mec, med, _VPF_mem_sharing, 
+                                    HVM_PARAM_SHARING_RING_PFN,
+                                    mem_sharing_notification);
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE:
+        {
+            if ( med->ring_page )
+                rc = mem_event_disable(d, med);
+        }
+        break;
+
+        default:
+            rc = -ENOSYS;
+            break;
+        }
+    }
+    break;
+
     default:
         rc = -ENOSYS;
     }
diff -r 39e3ee550391 -r d03aa37a0288 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -710,7 +710,7 @@ struct xen_domctl_gdbsx_domstatus {
 /* XEN_DOMCTL_mem_event_op */
 
 /*
-* Domain memory paging
+ * Domain memory paging
  * Page memory in and out.
  * Domctl interface to set up and tear down the 
  * pager<->hypervisor interface. Use XENMEM_paging_op*
@@ -740,6 +740,24 @@ struct xen_domctl_gdbsx_domstatus {
 #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE     0
 #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE    1
 
+/*
+ * Sharing ENOMEM helper.
+ *
+ * As with paging, use the domctl for teardown/setup of the
+ * helper<->hypervisor interface.
+ *
+ * If setup, this ring is used to communicate failed allocations
+ * in the unshare path. XENMEM_sharing_op_resume is used to wake up
+ * vcpus that could not unshare.
+ *
+ * Note that shring can be turned on (as per the domctl below)
+ * *without* this ring being setup.
+ */
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING           3
+
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE    0
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE   1
+
 /* Use for teardown/setup of helper<->hypervisor interface for paging, 
  * access and sharing.*/
 struct xen_domctl_mem_event_op {
diff -r 39e3ee550391 -r d03aa37a0288 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -643,6 +643,9 @@ static inline struct domain *next_domain
  /* VCPU is blocked due to missing mem_access ring. */
 #define _VPF_mem_access      5
 #define VPF_mem_access       (1UL<<_VPF_mem_access)
+ /* VCPU is blocked due to missing mem_sharing ring. */
+#define _VPF_mem_sharing     6
+#define VPF_mem_sharing      (1UL<<_VPF_mem_sharing)
 
 static inline int vcpu_runnable(struct vcpu *v)
 {

  parent reply	other threads:[~2012-03-06 23:50 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-03-06 23:50 [PATCH 0 of 8] Mem event ring interface setup update, V3 Andres Lagar-Cavilla
2012-03-06 23:50 ` [PATCH 1 of 8] Tools: Remove shared page from mem_event/access/paging interfaces Andres Lagar-Cavilla
2012-03-06 23:50 ` [PATCH 2 of 8] x86/hvm: refactor calls to prepare and tear down a helper ring Andres Lagar-Cavilla
2012-03-06 23:50 ` [PATCH 3 of 8] Use a reserved pfn in the guest address space to store mem event rings Andres Lagar-Cavilla
2012-03-06 23:50 ` Andres Lagar-Cavilla [this message]
2012-03-06 23:50 ` [PATCH 5 of 8] Tools: libxc side for setting up the mem sharing ring Andres Lagar-Cavilla
2012-03-06 23:50 ` [PATCH 6 of 8] x86/mm: Clean up mem event structures on domain destruction Andres Lagar-Cavilla
2012-03-06 23:50 ` [PATCH 7 of 8] x86/mm: Fix mem event error message typos Andres Lagar-Cavilla
2012-03-06 23:50 ` [PATCH 8 of 8] Tools: After a helper maps a ring, yank it from the guest physmap Andres Lagar-Cavilla
2012-03-08 15:42   ` Ian Campbell
2012-03-12 11:23   ` Ian Jackson
2012-03-08 13:23 ` [PATCH 0 of 8] Mem event ring interface setup update, V3 Tim Deegan
2012-03-08 14:50   ` Andres Lagar-Cavilla
  -- strict thread matches above, loose matches on Subject: below --
2012-03-08 15:02 [PATCH 0 of 8] Mem event ring interface setup update, V3 rebased Andres Lagar-Cavilla
2012-03-08 15:02 ` [PATCH 4 of 8] x86/mm: wire up sharing ring Andres Lagar-Cavilla

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d03aa37a028810174df4.1331077826@xdev.gridcentric.ca \
    --to=andres@lagarcavilla.org \
    --cc=JBeulich@suse.com \
    --cc=adin@gridcentric.ca \
    --cc=andres@gridcentric.ca \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).