xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: wei.liu2@citrix.com, andrew.cooper3@citrix.com,
	ian.jackson@eu.citrix.com, jbeulich@suse.com,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Daniel De Graaf <dgdegra@tycho.nsa.gov>,
	roger.pau@citrix.com
Subject: [PATCH v3 01/11] x86/domctl: Add XEN_DOMCTL_set_avail_vcpus
Date: Mon, 21 Nov 2016 16:00:37 -0500	[thread overview]
Message-ID: <1479762047-29431-2-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1479762047-29431-1-git-send-email-boris.ostrovsky@oracle.com>

This domctl is called when a VCPU is hot-(un)plugged to a guest (via
'xl vcpu-set').

The primary reason for adding this call is because for PVH guests
the hypervisor needs to send an SCI and set GPE registers. This is
unlike HVM guests that have qemu to perform these tasks.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
CC: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
Changes in v3:
* Clarified commit message to include reason for having the domctl
* Moved domctl to common code
* Made avail_vcpus a bitmap

 tools/flask/policy/modules/dom0.te  |  2 +-
 tools/flask/policy/modules/xen.if   |  4 ++--
 tools/libxc/include/xenctrl.h       |  5 +++++
 tools/libxc/xc_domain.c             | 12 ++++++++++++
 tools/libxl/libxl.c                 |  7 +++++++
 tools/libxl/libxl_dom.c             |  7 +++++++
 xen/common/domctl.c                 | 25 +++++++++++++++++++++++++
 xen/include/public/domctl.h         |  9 +++++++++
 xen/include/xen/sched.h             |  6 ++++++
 xen/xsm/flask/hooks.c               |  3 +++
 xen/xsm/flask/policy/access_vectors |  2 ++
 11 files changed, 79 insertions(+), 3 deletions(-)

diff --git a/tools/flask/policy/modules/dom0.te b/tools/flask/policy/modules/dom0.te
index 2d982d9..fd60c39 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -38,7 +38,7 @@ allow dom0_t dom0_t:domain {
 };
 allow dom0_t dom0_t:domain2 {
 	set_cpuid gettsc settsc setscheduler set_max_evtchn set_vnumainfo
-	get_vnumainfo psr_cmt_op psr_cat_op
+	get_vnumainfo psr_cmt_op psr_cat_op set_avail_vcpus
 };
 allow dom0_t dom0_t:resource { add remove };
 
diff --git a/tools/flask/policy/modules/xen.if b/tools/flask/policy/modules/xen.if
index eb646f5..afbedc2 100644
--- a/tools/flask/policy/modules/xen.if
+++ b/tools/flask/policy/modules/xen.if
@@ -52,7 +52,7 @@ define(`create_domain_common', `
 			settime setdomainhandle getvcpucontext };
 	allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim
 			set_max_evtchn set_vnumainfo get_vnumainfo cacheflush
-			psr_cmt_op psr_cat_op soft_reset };
+			psr_cmt_op psr_cat_op soft_reset set_avail_vcpus};
 	allow $1 $2:security check_context;
 	allow $1 $2:shadow enable;
 	allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op updatemp };
@@ -85,7 +85,7 @@ define(`manage_domain', `
 			getaddrsize pause unpause trigger shutdown destroy
 			setaffinity setdomainmaxmem getscheduler resume
 			setpodtarget getpodtarget };
-    allow $1 $2:domain2 set_vnumainfo;
+    allow $1 $2:domain2 { set_vnumainfo set_avail_vcpus };
 ')
 
 # migrate_domain_out(priv, target)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 2c83544..49e9b9f 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1256,6 +1256,11 @@ int xc_domain_getvnuma(xc_interface *xch,
 int xc_domain_soft_reset(xc_interface *xch,
                          uint32_t domid);
 
+int xc_domain_set_avail_vcpus(xc_interface *xch,
+                              uint32_t domid,
+                              unsigned int num_vcpus);
+
+
 #if defined(__i386__) || defined(__x86_64__)
 /*
  * PC BIOS standard E820 types and structure.
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 296b852..161a80e 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2520,6 +2520,18 @@ int xc_domain_soft_reset(xc_interface *xch,
     domctl.domain = (domid_t)domid;
     return do_domctl(xch, &domctl);
 }
+
+int xc_domain_set_avail_vcpus(xc_interface *xch,
+                              uint32_t domid,
+                              unsigned int num_vcpus)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_set_avail_vcpus;
+    domctl.domain = (domid_t)domid;
+    domctl.u.avail_vcpus.num = num_vcpus;
+    return do_domctl(xch, &domctl);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 33c5e4c..e936f38 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5164,6 +5164,13 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
     default:
         rc = ERROR_INVAL;
     }
+
+    if (!rc) {
+        rc = xc_domain_set_avail_vcpus(ctx->xch, domid, maxcpus);
+        if (rc)
+            LOG(ERROR, "Couldn't set available vcpu count");
+    }
+
 out:
     libxl_dominfo_dispose(&info);
     GC_FREE;
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index d519c8d..9037166 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -309,6 +309,13 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
         return ERROR_FAIL;
     }
 
+    rc = xc_domain_set_avail_vcpus(ctx->xch, domid,
+                                   libxl_bitmap_count_set(&info->avail_vcpus));
+    if (rc) {
+        LOG(ERROR, "Couldn't set available vcpu count (error %d)", rc);
+        return ERROR_FAIL;
+    }
+
     /*
      * Check if the domain has any CPU or node affinity already. If not, try
      * to build up the latter via automatic NUMA placement. In fact, in case
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index b0ee961..626f2cb 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -1157,6 +1157,31 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
             copyback = 1;
         break;
 
+    case XEN_DOMCTL_set_avail_vcpus:
+    {
+        unsigned int num = op->u.avail_vcpus.num;
+        unsigned long *avail_vcpus;
+
+        if ( num > d->max_vcpus || num == 0 )
+        {
+            ret = -EINVAL;
+            break;
+        }
+
+        avail_vcpus = xzalloc_array(unsigned long, BITS_TO_LONGS(d->max_vcpus));
+        if ( !avail_vcpus )
+        {
+            ret = -ENOMEM;
+            break;
+        }
+
+        bitmap_fill(avail_vcpus, num);
+        xfree(d->avail_vcpus);
+        d->avail_vcpus = avail_vcpus;
+
+        break;
+    }
+    
     default:
         ret = arch_do_domctl(op, d, u_domctl);
         break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 177319d..e7c84a9 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1059,6 +1059,13 @@ struct xen_domctl_psr_cmt_op {
 typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
 
+/* XEN_DOMCTL_avail_vcpus */
+struct xen_domctl_avail_vcpus {
+    uint32_t num; /* available number of vcpus */
+};
+typedef struct xen_domctl_avail_vcpus xen_domctl_avail_vcpus_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_avail_vcpus_t);
+
 /*  XEN_DOMCTL_MONITOR_*
  *
  * Enable/disable monitoring various VM events.
@@ -1221,6 +1228,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_monitor_op                    77
 #define XEN_DOMCTL_psr_cat_op                    78
 #define XEN_DOMCTL_soft_reset                    79
+#define XEN_DOMCTL_set_avail_vcpus               80
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -1283,6 +1291,7 @@ struct xen_domctl {
         struct xen_domctl_psr_cmt_op        psr_cmt_op;
         struct xen_domctl_monitor_op        monitor_op;
         struct xen_domctl_psr_cat_op        psr_cat_op;
+        struct xen_domctl_avail_vcpus       avail_vcpus;
         uint8_t                             pad[128];
     } u;
 };
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 1fbda87..104ce61 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -313,6 +313,12 @@ struct domain
     domid_t          domain_id;
 
     unsigned int     max_vcpus;
+    /*
+     * Bitmap of VCPUs that were online during guest creation
+     * plus/minus any hot-(un)plugged VCPUs.
+     */
+    unsigned long   *avail_vcpus;
+
     struct vcpu    **vcpu;
 
     shared_info_t   *shared_info;     /* shared data area */
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 177c11f..377549a 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -748,6 +748,9 @@ static int flask_domctl(struct domain *d, int cmd)
     case XEN_DOMCTL_soft_reset:
         return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SOFT_RESET);
 
+    case XEN_DOMCTL_set_avail_vcpus:
+        return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_AVAIL_VCPUS);
+
     default:
         return avc_unknown_permission("domctl", cmd);
     }
diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
index 49c9a9e..f8a5e6c 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -244,6 +244,8 @@ class domain2
     mem_sharing
 # XEN_DOMCTL_psr_cat_op
     psr_cat_op
+# XEN_DOMCTL_set_avail_vcpus
+    set_avail_vcpus
 }
 
 # Similar to class domain, but primarily contains domctls related to HVM domains
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  reply	other threads:[~2016-11-21 21:00 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-21 21:00 [PATCH v3 00/11] PVH VCPU hotplug support Boris Ostrovsky
2016-11-21 21:00 ` Boris Ostrovsky [this message]
2016-11-22 10:31   ` [PATCH v3 01/11] x86/domctl: Add XEN_DOMCTL_set_avail_vcpus Jan Beulich
2016-11-22 10:39     ` Jan Beulich
2016-11-22 12:34       ` Boris Ostrovsky
2016-11-22 13:59         ` Jan Beulich
2016-11-22 14:37           ` Boris Ostrovsky
2016-11-22 15:07             ` Jan Beulich
2016-11-22 15:43               ` Boris Ostrovsky
2016-11-22 16:01                 ` Jan Beulich
     [not found]                   ` <a4ac4c28-833b-df5f-ce34-1fa72f7c4cd2@oracle.com>
2016-11-22 23:47                     ` Boris Ostrovsky
2016-11-23  8:09                       ` Jan Beulich
2016-11-23 13:33                         ` Boris Ostrovsky
2016-11-23 13:58                           ` Jan Beulich
2016-11-23 14:16                             ` Boris Ostrovsky
2016-11-25 18:16                               ` Boris Ostrovsky
2016-11-28  7:59                                 ` Jan Beulich
2016-11-22 12:19     ` Boris Ostrovsky
2016-11-21 21:00 ` [PATCH v3 02/11] acpi: Define ACPI IO registers for PVH guests Boris Ostrovsky
2016-11-22 10:37   ` Jan Beulich
2016-11-22 12:28     ` Boris Ostrovsky
2016-11-22 14:07       ` Jan Beulich
2016-11-22 14:53         ` Boris Ostrovsky
2016-11-22 15:13           ` Jan Beulich
2016-11-22 15:52             ` Boris Ostrovsky
2016-11-22 16:02               ` Jan Beulich
2016-11-21 21:00 ` [PATCH v3 03/11] pvh: Set online VCPU map to avail_vcpus Boris Ostrovsky
2016-11-21 21:00 ` [PATCH v3 04/11] acpi: Make pmtimer optional in FADT Boris Ostrovsky
2016-11-21 21:00 ` [PATCH v3 05/11] acpi: Power and Sleep ACPI buttons are not emulated for PVH guests Boris Ostrovsky
2016-11-21 21:00 ` [PATCH v3 06/11] acpi: PVH guests need _E02 method Boris Ostrovsky
2016-11-22  9:13   ` Jan Beulich
2016-11-22 20:20   ` Konrad Rzeszutek Wilk
2016-11-21 21:00 ` [PATCH v3 07/11] pvh/ioreq: Install handlers for ACPI-related PVH IO accesses Boris Ostrovsky
2016-11-22 11:34   ` Jan Beulich
2016-11-22 12:38     ` Boris Ostrovsky
2016-11-22 14:08       ` Jan Beulich
2016-11-28 15:16         ` Boris Ostrovsky
2016-11-28 15:48           ` Roger Pau Monné
2016-11-21 21:00 ` [PATCH v3 08/11] pvh/acpi: Handle ACPI accesses for PVH guests Boris Ostrovsky
2016-11-22 14:11   ` Paul Durrant
2016-11-22 15:01   ` Jan Beulich
2016-11-22 15:30     ` Boris Ostrovsky
2016-11-22 16:05       ` Jan Beulich
2016-11-22 16:33         ` Boris Ostrovsky
2016-11-21 21:00 ` [PATCH v3 09/11] events/x86: Define SCI virtual interrupt Boris Ostrovsky
2016-11-22 15:25   ` Jan Beulich
2016-11-22 15:57     ` Boris Ostrovsky
2016-11-22 16:07       ` Jan Beulich
2016-11-21 21:00 ` [PATCH v3 10/11] pvh: Send an SCI on VCPU hotplug event Boris Ostrovsky
2016-11-22 15:32   ` Jan Beulich
2016-11-21 21:00 ` [PATCH v3 11/11] docs: Describe PVHv2's VCPU hotplug procedure Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1479762047-29431-2-git-send-email-boris.ostrovsky@oracle.com \
    --to=boris.ostrovsky@oracle.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).