From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: wei.liu2@citrix.com, andrew.cooper3@citrix.com,
ian.jackson@eu.citrix.com, jbeulich@suse.com,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
roger.pau@citrix.com
Subject: [PATCH v6 04/12] pvh/acpi: Handle ACPI accesses for PVH guests
Date: Tue, 3 Jan 2017 09:04:08 -0500 [thread overview]
Message-ID: <1483452256-2879-5-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1483452256-2879-1-git-send-email-boris.ostrovsky@oracle.com>
Subsequent domctl access VCPU map will use the same code. We create
acpi_cpumap_access_common() routines in anticipation of these changes.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v6:
* ACPI registers are only accessed by guest code (not by domctl), thus
acpi_access_common() is no longer needed
* Adjusted access direction (RW) to be a boolean.
* Dropped unnecessary masking of status register
xen/arch/x86/hvm/acpi.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++-
xen/common/domain.c | 1 +
xen/common/domctl.c | 5 +++
xen/include/xen/sched.h | 3 ++
4 files changed, 117 insertions(+), 2 deletions(-)
diff --git a/xen/arch/x86/hvm/acpi.c b/xen/arch/x86/hvm/acpi.c
index 15a9a0e..f0a84f9 100644
--- a/xen/arch/x86/hvm/acpi.c
+++ b/xen/arch/x86/hvm/acpi.c
@@ -2,12 +2,43 @@
*
* Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
*/
+#include <xen/acpi.h>
#include <xen/errno.h>
#include <xen/lib.h>
#include <xen/sched.h>
#include <public/arch-x86/xen.h>
+static int acpi_cpumap_access_common(struct domain *d, bool is_write,
+ unsigned int port,
+ unsigned int bytes, uint32_t *val)
+{
+ unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
+
+ BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN
+ > ACPI_GPE0_BLK_ADDRESS_V1);
+
+ if ( !is_write )
+ {
+ uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
+
+ /*
+ * Clear bits that we are about to read to in case we
+ * copy fewer than @bytes.
+ */
+ *val &= mask;
+
+ if ( ((d->max_vcpus + 7) / 8) > first_byte )
+ memcpy(val, (uint8_t *)d->avail_vcpus + first_byte,
+ min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
+ }
+ else
+ /* Guests do not write CPU map */
+ return X86EMUL_UNHANDLEABLE;
+
+ return X86EMUL_OKAY;
+}
+
int hvm_acpi_domctl_access(struct domain *d,
const struct xen_domctl_acpi_access *access)
{
@@ -17,13 +48,88 @@ int hvm_acpi_domctl_access(struct domain *d,
static int acpi_cpumap_guest_access(int dir, unsigned int port,
unsigned int bytes, uint32_t *val)
{
- return X86EMUL_UNHANDLEABLE;
+ return acpi_cpumap_access_common(current->domain,
+ (dir == IOREQ_WRITE) ? true : false,
+ port, bytes, val);
}
static int acpi_guest_access(int dir, unsigned int port,
unsigned int bytes, uint32_t *val)
{
- return X86EMUL_UNHANDLEABLE;
+ struct domain *d = current->domain;
+ uint16_t *sts = NULL, *en = NULL;
+ const uint16_t *mask_en = NULL;
+ static const uint16_t pm1a_en_mask = ACPI_BITMASK_GLOBAL_LOCK_ENABLE;
+ static const uint16_t gpe0_en_mask = 1U << XEN_ACPI_GPE0_CPUHP_BIT;
+
+ ASSERT(!has_acpi_dm_ff(d));
+
+ switch ( port )
+ {
+ case ACPI_PM1A_EVT_BLK_ADDRESS_V1 ...
+ ACPI_PM1A_EVT_BLK_ADDRESS_V1 +
+ sizeof(d->arch.hvm_domain.acpi.pm1a_sts) +
+ sizeof(d->arch.hvm_domain.acpi.pm1a_en):
+
+ sts = &d->arch.hvm_domain.acpi.pm1a_sts;
+ en = &d->arch.hvm_domain.acpi.pm1a_en;
+ mask_en = &pm1a_en_mask;
+ break;
+
+ case ACPI_GPE0_BLK_ADDRESS_V1 ...
+ ACPI_GPE0_BLK_ADDRESS_V1 +
+ sizeof(d->arch.hvm_domain.acpi.gpe0_sts) +
+ sizeof(d->arch.hvm_domain.acpi.gpe0_en):
+
+ sts = &d->arch.hvm_domain.acpi.gpe0_sts;
+ en = &d->arch.hvm_domain.acpi.gpe0_en;
+ mask_en = &gpe0_en_mask;
+ break;
+
+ default:
+ return X86EMUL_UNHANDLEABLE;
+ }
+
+ if ( dir == IOREQ_READ )
+ {
+ uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
+ uint32_t data = (((uint32_t)*en) << 16) | *sts;
+
+ data >>= 8 * (port & 3);
+ *val = (*val & mask) | (data & ~mask);
+ }
+ else
+ {
+ uint32_t v = *val;
+
+ /* Status register is write-1-to-clear */
+ switch ( port & 3 )
+ {
+ case 0:
+ *sts &= ~(v & 0xff);
+ if ( !--bytes )
+ break;
+ v >>= 8;
+ /* fallthrough */
+ case 1:
+ *sts &= ~((v & 0xff) << 8);
+ if ( !--bytes )
+ break;
+ v >>= 8;
+ /* fallthrough */
+ case 2:
+ *en = ((*en & 0xff00) | (v & 0xff)) & *mask_en;
+ if ( !--bytes )
+ break;
+ v >>= 8;
+ /* fallthrough */
+ case 3:
+ *en = (((v & 0xff) << 8) | (*en & 0xff)) & *mask_en;
+ break;
+ }
+ }
+
+ return X86EMUL_OKAY;
}
void hvm_acpi_init(struct domain *d)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 05130e2..ca1f0ed 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -847,6 +847,7 @@ static void complete_domain_destroy(struct rcu_head *head)
xsm_free_security_domain(d);
free_cpumask_var(d->domain_dirty_cpumask);
xfree(d->vcpu);
+ xfree(d->avail_vcpus);
free_domain_struct(d);
send_global_virq(VIRQ_DOM_EXC);
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index b0ee961..0a08b83 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -651,6 +651,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
goto maxvcpu_out;
}
+ d->avail_vcpus = xzalloc_array(unsigned long,
+ BITS_TO_LONGS(d->max_vcpus));
+ if ( !d->avail_vcpus )
+ goto maxvcpu_out;
+
ret = 0;
maxvcpu_out:
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 063efe6..bee190f 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -315,6 +315,9 @@ struct domain
unsigned int max_vcpus;
struct vcpu **vcpu;
+ /* Bitmap of available VCPUs. */
+ unsigned long *avail_vcpus;
+
shared_info_t *shared_info; /* shared data area */
spinlock_t domain_lock;
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-01-03 14:04 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-01-03 14:04 [PATCH v6 00/12] PVH VCPU hotplug support Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 01/12] domctl: Add XEN_DOMCTL_acpi_access Boris Ostrovsky
2017-01-03 18:21 ` Daniel De Graaf
2017-01-03 20:51 ` Konrad Rzeszutek Wilk
2017-01-03 14:04 ` [PATCH v6 02/12] x86/save: public/arch-x86/hvm/save.h is available to hypervisor and tools only Boris Ostrovsky
2017-01-03 16:55 ` Jan Beulich
2017-01-03 14:04 ` [PATCH v6 03/12] pvh/acpi: Install handlers for ACPI-related PVH IO accesses Boris Ostrovsky
2017-01-03 14:04 ` Boris Ostrovsky [this message]
2017-01-03 14:04 ` [PATCH v6 05/12] x86/domctl: Handle ACPI access from domctl Boris Ostrovsky
2017-07-31 14:14 ` Ross Lagerwall
2017-07-31 14:59 ` Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 06/12] events/x86: Define SCI virtual interrupt Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 07/12] pvh: Send an SCI on VCPU hotplug event Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 08/12] libxl: Update xenstore on VCPU hotplug for all guest types Boris Ostrovsky
2017-01-04 10:36 ` Wei Liu
2017-01-03 14:04 ` [PATCH v6 09/12] tools: Call XEN_DOMCTL_acpi_access on PVH VCPU hotplug Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 10/12] pvh: Set online VCPU map to avail_vcpus Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 11/12] pvh/acpi: Save ACPI registers for PVH guests Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 12/12] docs: Describe PVHv2's VCPU hotplug procedure Boris Ostrovsky
2017-01-03 16:58 ` Jan Beulich
2017-01-03 19:33 ` Boris Ostrovsky
2017-01-04 9:26 ` Jan Beulich
2017-01-03 18:19 ` Stefano Stabellini
2017-01-03 20:31 ` Boris Ostrovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1483452256-2879-5-git-send-email-boris.ostrovsky@oracle.com \
--to=boris.ostrovsky@oracle.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=roger.pau@citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).