xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: wei.liu2@citrix.com, andrew.cooper3@citrix.com,
	ian.jackson@eu.citrix.com, jbeulich@suse.com,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	roger.pau@citrix.com
Subject: [PATCH v5 05/13] pvh/acpi: Handle ACPI accesses for PVH guests
Date: Fri, 16 Dec 2016 18:18:31 -0500	[thread overview]
Message-ID: <1481930319-4796-6-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1481930319-4796-1-git-send-email-boris.ostrovsky@oracle.com>

Subsequent domctl access to ACPI registers and VCPU map will use the
same code. We create acpi_[cpumap_]access_common() routines in anticipation
of these changes.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v5:
* Code movement due to changes in patch 4
* Added fallthough switch statemnt comments
* Free d->avail_vcpus

 xen/arch/x86/hvm/acpi.c | 127 +++++++++++++++++++++++++++++++++++++++++++++++-
 xen/common/domain.c     |   1 +
 xen/common/domctl.c     |   5 ++
 xen/include/xen/sched.h |   3 ++
 4 files changed, 134 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/hvm/acpi.c b/xen/arch/x86/hvm/acpi.c
index 9b2885e..b2299a4 100644
--- a/xen/arch/x86/hvm/acpi.c
+++ b/xen/arch/x86/hvm/acpi.c
@@ -2,12 +2,129 @@
  *
  * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
  */
+#include <xen/acpi.h>
 #include <xen/errno.h>
 #include <xen/lib.h>
 #include <xen/sched.h>
 
 #include <public/arch-x86/xen.h>
 
+static int acpi_cpumap_access_common(struct domain *d,
+                                     int dir, unsigned int port,
+                                     unsigned int bytes, uint32_t *val)
+{
+    unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
+
+    BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN
+                 >= ACPI_GPE0_BLK_ADDRESS_V1);
+
+    if ( dir == XEN_DOMCTL_ACPI_READ )
+    {
+        uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
+
+        /*
+         * Clear bits that we are about to read to in case we
+         * copy fewer than @bytes.
+         */
+        *val &= mask;
+
+        if ( ((d->max_vcpus + 7) / 8) > first_byte )
+            memcpy(val, (uint8_t *)d->avail_vcpus + first_byte,
+                   min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
+    }
+    else
+        /* Guests do not write CPU map */
+        return X86EMUL_UNHANDLEABLE;
+
+    return X86EMUL_OKAY;
+}
+
+static int acpi_access_common(struct domain *d,
+                              int dir, unsigned int port,
+                              unsigned int bytes, uint32_t *val)
+{
+    uint16_t *sts = NULL, *en = NULL;
+    const uint16_t *mask_sts = NULL, *mask_en = NULL;
+    static const uint16_t pm1a_sts_mask = ACPI_BITMASK_GLOBAL_LOCK_STATUS;
+    static const uint16_t pm1a_en_mask = ACPI_BITMASK_GLOBAL_LOCK_ENABLE;
+    static const uint16_t gpe0_sts_mask = 1U << XEN_ACPI_GPE0_CPUHP_BIT;
+    static const uint16_t gpe0_en_mask = 1U << XEN_ACPI_GPE0_CPUHP_BIT;
+
+    ASSERT(!has_acpi_dm_ff(d));
+
+    switch ( port )
+    {
+    case ACPI_PM1A_EVT_BLK_ADDRESS_V1 ...
+        ACPI_PM1A_EVT_BLK_ADDRESS_V1 +
+        sizeof(d->arch.hvm_domain.acpi.pm1a_sts) +
+        sizeof(d->arch.hvm_domain.acpi.pm1a_en):
+
+        sts = &d->arch.hvm_domain.acpi.pm1a_sts;
+        en = &d->arch.hvm_domain.acpi.pm1a_en;
+        mask_sts = &pm1a_sts_mask;
+        mask_en = &pm1a_en_mask;
+        break;
+
+    case ACPI_GPE0_BLK_ADDRESS_V1 ...
+        ACPI_GPE0_BLK_ADDRESS_V1 +
+        sizeof(d->arch.hvm_domain.acpi.gpe0_sts) +
+        sizeof(d->arch.hvm_domain.acpi.gpe0_en):
+
+        sts = &d->arch.hvm_domain.acpi.gpe0_sts;
+        en = &d->arch.hvm_domain.acpi.gpe0_en;
+        mask_sts = &gpe0_sts_mask;
+        mask_en = &gpe0_en_mask;
+        break;
+
+    default:
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    if ( dir == XEN_DOMCTL_ACPI_READ )
+    {
+        uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
+        uint32_t data = (((uint32_t)*en) << 16) | *sts;
+
+        data >>= 8 * (port & 3);
+        *val = (*val & mask) | (data & ~mask);
+    }
+    else
+    {
+        uint32_t v = *val;
+
+        /* Status register is write-1-to-clear by guests */
+        switch ( port & 3 )
+        {
+        case 0:
+            *sts &= ~(v & 0xff);
+            *sts &= *mask_sts;
+            if ( !--bytes )
+                break;
+            v >>= 8;
+            /* fallthrough */
+        case 1:
+            *sts &= ~((v & 0xff) << 8);
+            *sts &= *mask_sts;
+            if ( !--bytes )
+                break;
+            v >>= 8;
+            /* fallthrough */
+        case 2:
+            *en = ((*en & 0xff00) | (v & 0xff)) & *mask_en;
+            if ( !--bytes )
+                break;
+            v >>= 8;
+            /* fallthrough */
+        case 3:
+            *en = (((v & 0xff) << 8) | (*en & 0xff)) & *mask_en;
+            break;
+        }
+    }
+
+    return X86EMUL_OKAY;
+}
+
+
 int hvm_acpi_domctl_access(struct domain *d, uint8_t rw,
                            const xen_acpi_access_t *access,
                            XEN_GUEST_HANDLE_PARAM(void) arg)
@@ -18,13 +135,19 @@ int hvm_acpi_domctl_access(struct domain *d, uint8_t rw,
 static int acpi_guest_access(int dir, unsigned int port,
                              unsigned int bytes, uint32_t *val)
 {
-    return X86EMUL_UNHANDLEABLE;
+    return  acpi_access_common(current->domain,
+                               (dir == IOREQ_READ) ?
+                               XEN_DOMCTL_ACPI_READ: XEN_DOMCTL_ACPI_WRITE,
+                               port, bytes, val);
 }
 
 static int acpi_cpumap_guest_access(int dir, unsigned int port,
                                     unsigned int bytes, uint32_t *val)
 {
-    return X86EMUL_UNHANDLEABLE;
+    return  acpi_cpumap_access_common(current->domain,
+                                      (dir == IOREQ_READ) ?
+                                      XEN_DOMCTL_ACPI_READ: XEN_DOMCTL_ACPI_WRITE,
+                                      port, bytes, val);
 }
 
 void hvm_acpi_init(struct domain *d)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 3abaca9..cb8df09 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -847,6 +847,7 @@ static void complete_domain_destroy(struct rcu_head *head)
     xsm_free_security_domain(d);
     free_cpumask_var(d->domain_dirty_cpumask);
     xfree(d->vcpu);
+    xfree(d->avail_vcpus);
     free_domain_struct(d);
 
     send_global_virq(VIRQ_DOM_EXC);
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index b0ee961..0a08b83 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -651,6 +651,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
                 goto maxvcpu_out;
         }
 
+        d->avail_vcpus = xzalloc_array(unsigned long,
+                                       BITS_TO_LONGS(d->max_vcpus));
+        if ( !d->avail_vcpus )
+            goto maxvcpu_out;
+
         ret = 0;
 
     maxvcpu_out:
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 063efe6..bee190f 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -315,6 +315,9 @@ struct domain
     unsigned int     max_vcpus;
     struct vcpu    **vcpu;
 
+    /* Bitmap of available VCPUs. */
+    unsigned long   *avail_vcpus;
+
     shared_info_t   *shared_info;     /* shared data area */
 
     spinlock_t       domain_lock;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-12-16 23:18 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-16 23:18 [PATCH v5 00/13] PVH VCPU hotplug support Boris Ostrovsky
2016-12-16 23:18 ` [PATCH v5 01/13] x86/pmtimer: Move ACPI registers from PMTState to hvm_domain Boris Ostrovsky
2016-12-19 14:12   ` Jan Beulich
2016-12-16 23:18 ` [PATCH v5 02/13] acpi/x86: Define ACPI IO registers for PVH guests Boris Ostrovsky
2016-12-20 18:07   ` Julien Grall
2016-12-16 23:18 ` [PATCH v5 03/13] domctl: Add XEN_DOMCTL_acpi_access Boris Ostrovsky
2016-12-19 14:17   ` Jan Beulich
2016-12-19 14:48     ` Boris Ostrovsky
2016-12-19 14:53       ` Jan Beulich
2016-12-16 23:18 ` [PATCH v5 04/13] pvh/acpi: Install handlers for ACPI-related PVH IO accesses Boris Ostrovsky
2016-12-20 11:24   ` Jan Beulich
2016-12-20 14:03     ` Boris Ostrovsky
2016-12-20 14:10       ` Jan Beulich
2016-12-20 14:16         ` Boris Ostrovsky
2016-12-20 14:45           ` Jan Beulich
2016-12-20 14:55             ` Andrew Cooper
2016-12-20 15:31               ` Boris Ostrovsky
2016-12-16 23:18 ` Boris Ostrovsky [this message]
2016-12-20 11:50   ` [PATCH v5 05/13] pvh/acpi: Handle ACPI accesses for PVH guests Jan Beulich
2016-12-20 14:35     ` Boris Ostrovsky
2016-12-20 14:47       ` Jan Beulich
2016-12-20 15:29         ` Boris Ostrovsky
2016-12-20 15:41           ` Jan Beulich
2016-12-20 16:46             ` Andrew Cooper
2016-12-20 16:51               ` Boris Ostrovsky
2016-12-16 23:18 ` [PATCH v5 06/13] x86/domctl: Handle ACPI access from domctl Boris Ostrovsky
2016-12-20 13:24   ` Jan Beulich
2016-12-20 14:45     ` Boris Ostrovsky
2016-12-20 14:52       ` Jan Beulich
2016-12-16 23:18 ` [PATCH v5 07/13] events/x86: Define SCI virtual interrupt Boris Ostrovsky
2016-12-16 23:18 ` [PATCH v5 08/13] pvh: Send an SCI on VCPU hotplug event Boris Ostrovsky
2016-12-20 13:37   ` Jan Beulich
2016-12-20 14:54     ` Boris Ostrovsky
2016-12-16 23:18 ` [PATCH v5 09/13] libxl: Update xenstore on VCPU hotplug for all guest types Boris Ostrovsky
2017-01-04 10:34   ` Wei Liu
2017-01-04 13:53     ` Boris Ostrovsky
2016-12-16 23:18 ` [PATCH v5 10/13] tools: Call XEN_DOMCTL_acpi_access on PVH VCPU hotplug Boris Ostrovsky
2017-01-04 10:35   ` Wei Liu
2016-12-16 23:18 ` [PATCH v5 11/13] pvh: Set online VCPU map to avail_vcpus Boris Ostrovsky
2016-12-16 23:18 ` [PATCH v5 12/13] pvh/acpi: Save ACPI registers for PVH guests Boris Ostrovsky
2016-12-20 13:57   ` Jan Beulich
2016-12-20 15:09     ` Boris Ostrovsky
2016-12-20 15:40       ` Jan Beulich
2016-12-16 23:18 ` [PATCH v5 13/13] docs: Describe PVHv2's VCPU hotplug procedure Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1481930319-4796-6-git-send-email-boris.ostrovsky@oracle.com \
    --to=boris.ostrovsky@oracle.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).