From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: wei.liu2@citrix.com, andrew.cooper3@citrix.com,
ian.jackson@eu.citrix.com, jbeulich@suse.com,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
roger.pau@citrix.com
Subject: [PATCH v4 12/15] tools: Call XEN_DOMCTL_acpi_access on PVH VCPU hotplug
Date: Tue, 29 Nov 2016 10:33:19 -0500 [thread overview]
Message-ID: <1480433602-13290-13-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1480433602-13290-1-git-send-email-boris.ostrovsky@oracle.com>
Provide libxc interface for accessing ACPI via XEN_DOMCTL_acpi_access.
When a VCPU is hot-(un)plugged to/from a PVH guest update VCPU map
by writing to ACPI's XEN_ACPI_CPU_MAP register and then set GPE0
status bit in GPE0.status.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v4:
* New patch
tools/libxc/include/xenctrl.h | 20 ++++++++++++++++++++
tools/libxc/xc_domain.c | 36 ++++++++++++++++++++++++++++++++++++
tools/libxl/libxl.c | 8 +++++++-
tools/libxl/libxl_arch.h | 4 ++++
tools/libxl/libxl_arm.c | 6 ++++++
tools/libxl/libxl_x86.c | 21 +++++++++++++++++++++
6 files changed, 94 insertions(+), 1 deletion(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 2c83544..e4d735f 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2710,6 +2710,26 @@ int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout);
int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout);
int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout);
+int xc_acpi_access(xc_interface *xch, domid_t domid,
+ uint8_t rw, uint8_t space_id, unsigned long addr,
+ unsigned int bytes, void *val);
+
+static inline int xc_acpi_ioread(xc_interface *xch, domid_t domid,
+ unsigned long port,
+ unsigned int bytes, void *val)
+{
+ return xc_acpi_access(xch, domid, XEN_DOMCTL_ACPI_READ, XEN_ACPI_SYSTEM_IO,
+ port, bytes, val);
+}
+
+static inline int xc_acpi_iowrite(xc_interface *xch, domid_t domid,
+ unsigned long port,
+ unsigned int bytes, void *val)
+{
+ return xc_acpi_access(xch, domid, XEN_DOMCTL_ACPI_WRITE, XEN_ACPI_SYSTEM_IO,
+ port, bytes, val);
+}
+
/* Compat shims */
#include "xenctrl_compat.h"
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 296b852..15c5136 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2520,6 +2520,42 @@ int xc_domain_soft_reset(xc_interface *xch,
domctl.domain = (domid_t)domid;
return do_domctl(xch, &domctl);
}
+
+int
+xc_acpi_access(xc_interface *xch, domid_t domid,
+ uint8_t rw, uint8_t space_id,
+ unsigned long address, unsigned int bytes, void *val)
+{
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(val, bytes, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+ while ( (int)bytes > 0 )
+ {
+ if ( xc_hypercall_bounce_pre(xch, val) )
+ return -1;
+
+ memset(&domctl, 0, sizeof(domctl));
+ domctl.domain = domid;
+ domctl.cmd = XEN_DOMCTL_acpi_access;
+ domctl.u.acpi_access.gas.space_id = space_id;
+ domctl.u.acpi_access.gas.bit_width = (bytes & 31) * 8;
+ domctl.u.acpi_access.gas.bit_offset = 0;
+ domctl.u.acpi_access.gas.address = address;
+ domctl.u.acpi_access.rw = rw;
+ set_xen_guest_handle(domctl.u.acpi_access.val, val);
+
+ if ( do_domctl(xch, &domctl) != 0 )
+ return 1;
+
+ xc_hypercall_bounce_post(xch, val);
+
+ bytes -= 32;
+ address += 32;
+ }
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 33c5e4c..d80ab77 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5147,8 +5147,14 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
switch (libxl__domain_type(gc, domid)) {
case LIBXL_DOMAIN_TYPE_HVM:
switch (libxl__device_model_version_running(gc, domid)) {
- case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN_TRADITIONAL:
case LIBXL_DEVICE_MODEL_VERSION_NONE:
+ rc = libxl__arch_set_vcpuonline(gc, domid, cpumap);
+ if (rc < 0) {
+ LOGE(ERROR, "Can't change vcpu online map (%d)", rc);
+ goto out;
+ }
+ /* fallthrough */
+ case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN_TRADITIONAL:
rc = libxl__set_vcpuonline_xenstore(gc, domid, cpumap, &info);
break;
case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN:
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index 5e1fc60..1869626 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -71,6 +71,10 @@ int libxl__arch_extra_memory(libxl__gc *gc,
const libxl_domain_build_info *info,
uint64_t *out);
+_hidden
+int libxl__arch_set_vcpuonline(libxl__gc *gc, uint32_t domid,
+ libxl_bitmap *cpumap);
+
#if defined(__i386__) || defined(__x86_64__)
#define LAPIC_BASE_ADDRESS 0xfee00000
diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
index d842d88..a64af1b 100644
--- a/tools/libxl/libxl_arm.c
+++ b/tools/libxl/libxl_arm.c
@@ -126,6 +126,12 @@ out:
return rc;
}
+int libxl__arch_set_vcpuonline(libxl__gc *gc, uint32_t domid,
+ libxl_bitmap *cpumap)
+{
+ return ERROR_FAIL;
+}
+
static struct arch_info {
const char *guest_type;
const char *timer_compat;
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index e1844c8..e31b159 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -3,6 +3,9 @@
#include <xc_dom.h>
+#include <xen/arch-x86/xen.h>
+#include <xen/hvm/ioreq.h>
+
int libxl__arch_domain_prepare_config(libxl__gc *gc,
libxl_domain_config *d_config,
xc_domain_configuration_t *xc_config)
@@ -368,6 +371,24 @@ int libxl__arch_extra_memory(libxl__gc *gc,
return 0;
}
+int libxl__arch_set_vcpuonline(libxl__gc *gc, uint32_t domid,
+ libxl_bitmap *cpumap)
+{
+ int rc;
+
+ /*Update VCPU map. */
+ rc = xc_acpi_iowrite(CTX->xch, domid, XEN_ACPI_CPU_MAP,
+ cpumap->size, cpumap->map);
+ if (!rc) {
+ /* Send an SCI. */
+ uint16_t val = 1 << XEN_GPE0_CPUHP_BIT;
+ rc = xc_acpi_iowrite(CTX->xch, domid, ACPI_GPE0_BLK_ADDRESS_V1,
+ sizeof(val), &val);
+ }
+
+ return rc;
+}
+
int libxl__arch_domain_init_hw_description(libxl__gc *gc,
libxl_domain_build_info *info,
libxl__domain_build_state *state,
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-11-29 15:33 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-29 15:33 [PATCH v4 00/15] PVH VCPU hotplug support Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 01/15] x86/pmtimer: Move ACPI registers from PMTState to hvm_domain Boris Ostrovsky
2016-12-01 15:52 ` Jan Beulich
2016-12-01 16:28 ` Boris Ostrovsky
2016-12-01 16:29 ` Andrew Cooper
2016-12-01 16:45 ` Boris Ostrovsky
2016-12-12 16:24 ` Wei Liu
2016-11-29 15:33 ` [PATCH v4 02/15] acpi: Make pmtimer optional in FADT Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 03/15] acpi: Power and Sleep ACPI buttons are not emulated for PVH guests Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 04/15] acpi: PVH guests need _E02 method Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 05/15] acpi/x86: Define ACPI IO registers for PVH guests Boris Ostrovsky
2016-12-01 15:57 ` Jan Beulich
2016-12-01 16:30 ` Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 06/15] domctl: Add XEN_DOMCTL_acpi_access Boris Ostrovsky
2016-12-01 16:06 ` Jan Beulich
2016-12-01 16:43 ` Boris Ostrovsky
2016-12-02 7:48 ` Jan Beulich
2016-12-12 13:08 ` Boris Ostrovsky
2016-12-12 14:02 ` Jan Beulich
2016-12-12 16:19 ` Boris Ostrovsky
2016-12-12 16:24 ` Jan Beulich
2016-12-12 13:28 ` Julien Grall
2016-12-12 16:11 ` Boris Ostrovsky
2016-12-13 13:02 ` Julien Grall
2016-11-29 15:33 ` [PATCH v4 07/15] pvh/acpi: Install handlers for ACPI-related PVH IO accesses Boris Ostrovsky
2016-12-01 16:32 ` Jan Beulich
2016-12-01 17:03 ` Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 08/15] pvh/acpi: Handle ACPI accesses for PVH guests Boris Ostrovsky
2016-12-06 14:34 ` Jan Beulich
2016-12-06 16:37 ` Boris Ostrovsky
2016-12-07 8:06 ` Jan Beulich
2016-11-29 15:33 ` [PATCH v4 09/15] x86/domctl: Handle ACPI access from domctl Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 10/15] events/x86: Define SCI virtual interrupt Boris Ostrovsky
2016-12-06 14:36 ` Jan Beulich
2016-11-29 15:33 ` [PATCH v4 11/15] pvh: Send an SCI on VCPU hotplug event Boris Ostrovsky
2016-12-06 14:50 ` Jan Beulich
2016-12-06 16:43 ` Boris Ostrovsky
2016-11-29 15:33 ` Boris Ostrovsky [this message]
2016-12-12 16:35 ` [PATCH v4 12/15] tools: Call XEN_DOMCTL_acpi_access on PVH VCPU hotplug Wei Liu
2016-12-12 16:47 ` Boris Ostrovsky
2016-12-12 16:50 ` Boris Ostrovsky
2016-12-12 17:09 ` Wei Liu
2016-12-12 17:14 ` Boris Ostrovsky
2016-12-12 17:13 ` Wei Liu
2016-11-29 15:33 ` [PATCH v4 13/15] pvh: Set online VCPU map to avail_vcpus Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 14/15] pvh/acpi: Save ACPI registers for PVH guests Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 15/15] docs: Describe PVHv2's VCPU hotplug procedure Boris Ostrovsky
2016-12-06 20:55 ` Konrad Rzeszutek Wilk
2016-11-29 16:11 ` [PATCH v4 00/15] PVH VCPU hotplug support Jan Beulich
2016-11-29 16:40 ` Boris Ostrovsky
2016-11-29 16:43 ` Jan Beulich
2016-11-29 17:00 ` Boris Ostrovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1480433602-13290-13-git-send-email-boris.ostrovsky@oracle.com \
--to=boris.ostrovsky@oracle.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=roger.pau@citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).