From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: wei.liu2@citrix.com, andrew.cooper3@citrix.com,
ian.jackson@eu.citrix.com, jbeulich@suse.com,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
roger.pau@citrix.com
Subject: [PATCH v4 01/15] x86/pmtimer: Move ACPI registers from PMTState to hvm_domain
Date: Tue, 29 Nov 2016 10:33:08 -0500 [thread overview]
Message-ID: <1480433602-13290-2-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1480433602-13290-1-git-send-email-boris.ostrovsky@oracle.com>
These registers (pm1a specifically) are not all specific to pm timer
and are accessed by non-pmtimer code (for example, sleep/power button
emulation).
In addition to moving those regsters to struct hvm_domain rename
HVM save state structures and routines as well.
No functional changes are introduced.
(While this file is being modified, also add emacs mode style rune)
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v4:
* New patch
tools/misc/xen-hvmctx.c | 4 +-
xen/arch/x86/hvm/pmtimer.c | 67 +++++++++++++++++++++-------------
xen/include/asm-x86/hvm/domain.h | 2 +
xen/include/asm-x86/hvm/vpt.h | 1 -
xen/include/public/arch-x86/hvm/save.h | 6 +--
5 files changed, 49 insertions(+), 31 deletions(-)
diff --git a/tools/misc/xen-hvmctx.c b/tools/misc/xen-hvmctx.c
index 32be120..8e8a245 100644
--- a/tools/misc/xen-hvmctx.c
+++ b/tools/misc/xen-hvmctx.c
@@ -342,7 +342,7 @@ static void dump_hpet(void)
static void dump_pmtimer(void)
{
- HVM_SAVE_TYPE(PMTIMER) p;
+ HVM_SAVE_TYPE(ACPI) p;
READ(p);
printf(" ACPI PM: TMR_VAL 0x%x, PM1a_STS 0x%x, PM1a_EN 0x%x\n",
p.tmr_val, (unsigned) p.pm1a_sts, (unsigned) p.pm1a_en);
@@ -462,7 +462,7 @@ int main(int argc, char **argv)
case HVM_SAVE_CODE(PIT): dump_pit(); break;
case HVM_SAVE_CODE(RTC): dump_rtc(); break;
case HVM_SAVE_CODE(HPET): dump_hpet(); break;
- case HVM_SAVE_CODE(PMTIMER): dump_pmtimer(); break;
+ case HVM_SAVE_CODE(ACPI): dump_pmtimer(); break;
case HVM_SAVE_CODE(MTRR): dump_mtrr(); break;
case HVM_SAVE_CODE(VIRIDIAN_DOMAIN): dump_viridian_domain(); break;
case HVM_SAVE_CODE(VIRIDIAN_VCPU): dump_viridian_vcpu(); break;
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 99d1e86..5144928 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -56,9 +56,11 @@
/* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
static void pmt_update_sci(PMTState *s)
{
+ struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
+
ASSERT(spin_is_locked(&s->lock));
- if ( s->pm.pm1a_en & s->pm.pm1a_sts & SCI_MASK )
+ if ( acpi->pm1a_en & acpi->pm1a_sts & SCI_MASK )
hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ);
else
hvm_isa_irq_deassert(s->vcpu->domain, SCI_IRQ);
@@ -72,7 +74,7 @@ void hvm_acpi_power_button(struct domain *d)
return;
spin_lock(&s->lock);
- s->pm.pm1a_sts |= PWRBTN_STS;
+ d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
pmt_update_sci(s);
spin_unlock(&s->lock);
}
@@ -85,7 +87,7 @@ void hvm_acpi_sleep_button(struct domain *d)
return;
spin_lock(&s->lock);
- s->pm.pm1a_sts |= SLPBTN_STS;
+ d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
pmt_update_sci(s);
spin_unlock(&s->lock);
}
@@ -95,7 +97,8 @@ void hvm_acpi_sleep_button(struct domain *d)
static void pmt_update_time(PMTState *s)
{
uint64_t curr_gtime, tmp;
- uint32_t tmr_val = s->pm.tmr_val, msb = tmr_val & TMR_VAL_MSB;
+ struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
+ uint32_t tmr_val = acpi->tmr_val, msb = tmr_val & TMR_VAL_MSB;
ASSERT(spin_is_locked(&s->lock));
@@ -108,12 +111,12 @@ static void pmt_update_time(PMTState *s)
s->last_gtime = curr_gtime;
/* Update timer value atomically wrt lock-free reads in handle_pmt_io(). */
- *(volatile uint32_t *)&s->pm.tmr_val = tmr_val;
+ *(volatile uint32_t *)&acpi->tmr_val = tmr_val;
/* If the counter's MSB has changed, set the status bit */
if ( (tmr_val & TMR_VAL_MSB) != msb )
{
- s->pm.pm1a_sts |= TMR_STS;
+ acpi->pm1a_sts |= TMR_STS;
pmt_update_sci(s);
}
}
@@ -133,7 +136,8 @@ static void pmt_timer_callback(void *opaque)
pmt_update_time(s);
/* How close are we to the next MSB flip? */
- pmt_cycles_until_flip = TMR_VAL_MSB - (s->pm.tmr_val & (TMR_VAL_MSB - 1));
+ pmt_cycles_until_flip = TMR_VAL_MSB -
+ (s->vcpu->domain->arch.hvm_domain.acpi.tmr_val & (TMR_VAL_MSB - 1));
/* Overall time between MSB flips */
time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;
@@ -152,6 +156,7 @@ static int handle_evt_io(
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
struct vcpu *v = current;
+ struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
uint32_t addr, data, byte;
int i;
@@ -175,16 +180,16 @@ static int handle_evt_io(
{
/* PM1a_STS register bits are write-to-clear */
case 0 /* PM1a_STS_ADDR */:
- s->pm.pm1a_sts &= ~byte;
+ acpi->pm1a_sts &= ~byte;
break;
case 1 /* PM1a_STS_ADDR + 1 */:
- s->pm.pm1a_sts &= ~(byte << 8);
+ acpi->pm1a_sts &= ~(byte << 8);
break;
case 2 /* PM1a_EN_ADDR */:
- s->pm.pm1a_en = (s->pm.pm1a_en & 0xff00) | byte;
+ acpi->pm1a_en = (acpi->pm1a_en & 0xff00) | byte;
break;
case 3 /* PM1a_EN_ADDR + 1 */:
- s->pm.pm1a_en = (s->pm.pm1a_en & 0xff) | (byte << 8);
+ acpi->pm1a_en = (acpi->pm1a_en & 0xff) | (byte << 8);
break;
default:
gdprintk(XENLOG_WARNING,
@@ -197,7 +202,7 @@ static int handle_evt_io(
}
else /* p->dir == IOREQ_READ */
{
- data = s->pm.pm1a_sts | (((uint32_t) s->pm.pm1a_en) << 16);
+ data = acpi->pm1a_sts | (((uint32_t) acpi->pm1a_en) << 16);
data >>= 8 * addr;
if ( bytes == 1 ) data &= 0xff;
else if ( bytes == 2 ) data &= 0xffff;
@@ -215,6 +220,7 @@ static int handle_pmt_io(
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
struct vcpu *v = current;
+ struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
if ( bytes != 4 || dir != IOREQ_READ )
@@ -226,7 +232,7 @@ static int handle_pmt_io(
{
/* We hold the lock: update timer value and return it. */
pmt_update_time(s);
- *val = s->pm.tmr_val;
+ *val = acpi->tmr_val;
spin_unlock(&s->lock);
}
else
@@ -237,16 +243,17 @@ static int handle_pmt_io(
* updated value with a lock-free atomic read.
*/
spin_barrier(&s->lock);
- *val = read_atomic(&s->pm.tmr_val);
+ *val = read_atomic(&(acpi->tmr_val));
}
return X86EMUL_OKAY;
}
-static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
+static int acpi_save(struct domain *d, hvm_domain_context_t *h)
{
+ struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
- uint32_t x, msb = s->pm.tmr_val & TMR_VAL_MSB;
+ uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB;
int rc;
if ( !has_vpm(d) )
@@ -261,21 +268,21 @@ static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
x = (((s->vcpu->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(s->vcpu)) -
s->last_gtime) * s->scale) >> 32;
if ( x < 1UL<<31 )
- s->pm.tmr_val += x;
- if ( (s->pm.tmr_val & TMR_VAL_MSB) != msb )
- s->pm.pm1a_sts |= TMR_STS;
+ acpi->tmr_val += x;
+ if ( (acpi->tmr_val & TMR_VAL_MSB) != msb )
+ acpi->pm1a_sts |= TMR_STS;
/* No point in setting the SCI here because we'll already have saved the
* IRQ and *PIC state; we'll fix it up when we restore the domain */
-
- rc = hvm_save_entry(PMTIMER, 0, h, &s->pm);
+ rc = hvm_save_entry(ACPI, 0, h, acpi);
spin_unlock(&s->lock);
return rc;
}
-static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
+static int acpi_load(struct domain *d, hvm_domain_context_t *h)
{
+ struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
if ( !has_vpm(d) )
@@ -284,7 +291,7 @@ static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
spin_lock(&s->lock);
/* Reload the registers */
- if ( hvm_load_entry(PMTIMER, h, &s->pm) )
+ if ( hvm_load_entry(ACPI, h, acpi) )
{
spin_unlock(&s->lock);
return -EINVAL;
@@ -302,7 +309,7 @@ static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(PMTIMER, pmtimer_save, pmtimer_load,
+HVM_REGISTER_SAVE_RESTORE(ACPI, acpi_save, acpi_load,
1, HVMSR_PER_DOM);
int pmtimer_change_ioport(struct domain *d, unsigned int version)
@@ -377,5 +384,15 @@ void pmtimer_reset(struct domain *d)
return;
/* Reset the counter. */
- d->arch.hvm_domain.pl_time->vpmt.pm.tmr_val = 0;
+ d->arch.hvm_domain.acpi.tmr_val = 0;
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index f34d784..d55b432 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -102,6 +102,8 @@ struct hvm_domain {
struct hvm_vioapic *vioapic;
struct hvm_hw_stdvga stdvga;
+ struct hvm_hw_acpi acpi;
+
/* VCPU which is current target for 8259 interrupts. */
struct vcpu *i8259_target;
diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h
index a27bea4..1b7213d 100644
--- a/xen/include/asm-x86/hvm/vpt.h
+++ b/xen/include/asm-x86/hvm/vpt.h
@@ -121,7 +121,6 @@ typedef struct RTCState {
#define FREQUENCE_PMTIMER 3579545 /* Timer should run at 3.579545 MHz */
typedef struct PMTState {
- struct hvm_hw_pmtimer pm; /* 32bit timer value */
struct vcpu *vcpu; /* Keeps sync with this vcpu's guest-time */
uint64_t last_gtime; /* Last (guest) time we updated the timer */
uint32_t not_accounted; /* time not accounted at last update */
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 8d73b51..3997487 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -525,16 +525,16 @@ DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
/*
- * PM timer
+ * ACPI registers
*/
-struct hvm_hw_pmtimer {
+struct hvm_hw_acpi {
uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */
uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */
};
-DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
+DECLARE_HVM_SAVE_TYPE(ACPI, 13, struct hvm_hw_acpi);
/*
* MTRR MSRs
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-11-29 15:33 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-29 15:33 [PATCH v4 00/15] PVH VCPU hotplug support Boris Ostrovsky
2016-11-29 15:33 ` Boris Ostrovsky [this message]
2016-12-01 15:52 ` [PATCH v4 01/15] x86/pmtimer: Move ACPI registers from PMTState to hvm_domain Jan Beulich
2016-12-01 16:28 ` Boris Ostrovsky
2016-12-01 16:29 ` Andrew Cooper
2016-12-01 16:45 ` Boris Ostrovsky
2016-12-12 16:24 ` Wei Liu
2016-11-29 15:33 ` [PATCH v4 02/15] acpi: Make pmtimer optional in FADT Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 03/15] acpi: Power and Sleep ACPI buttons are not emulated for PVH guests Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 04/15] acpi: PVH guests need _E02 method Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 05/15] acpi/x86: Define ACPI IO registers for PVH guests Boris Ostrovsky
2016-12-01 15:57 ` Jan Beulich
2016-12-01 16:30 ` Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 06/15] domctl: Add XEN_DOMCTL_acpi_access Boris Ostrovsky
2016-12-01 16:06 ` Jan Beulich
2016-12-01 16:43 ` Boris Ostrovsky
2016-12-02 7:48 ` Jan Beulich
2016-12-12 13:08 ` Boris Ostrovsky
2016-12-12 14:02 ` Jan Beulich
2016-12-12 16:19 ` Boris Ostrovsky
2016-12-12 16:24 ` Jan Beulich
2016-12-12 13:28 ` Julien Grall
2016-12-12 16:11 ` Boris Ostrovsky
2016-12-13 13:02 ` Julien Grall
2016-11-29 15:33 ` [PATCH v4 07/15] pvh/acpi: Install handlers for ACPI-related PVH IO accesses Boris Ostrovsky
2016-12-01 16:32 ` Jan Beulich
2016-12-01 17:03 ` Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 08/15] pvh/acpi: Handle ACPI accesses for PVH guests Boris Ostrovsky
2016-12-06 14:34 ` Jan Beulich
2016-12-06 16:37 ` Boris Ostrovsky
2016-12-07 8:06 ` Jan Beulich
2016-11-29 15:33 ` [PATCH v4 09/15] x86/domctl: Handle ACPI access from domctl Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 10/15] events/x86: Define SCI virtual interrupt Boris Ostrovsky
2016-12-06 14:36 ` Jan Beulich
2016-11-29 15:33 ` [PATCH v4 11/15] pvh: Send an SCI on VCPU hotplug event Boris Ostrovsky
2016-12-06 14:50 ` Jan Beulich
2016-12-06 16:43 ` Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 12/15] tools: Call XEN_DOMCTL_acpi_access on PVH VCPU hotplug Boris Ostrovsky
2016-12-12 16:35 ` Wei Liu
2016-12-12 16:47 ` Boris Ostrovsky
2016-12-12 16:50 ` Boris Ostrovsky
2016-12-12 17:09 ` Wei Liu
2016-12-12 17:14 ` Boris Ostrovsky
2016-12-12 17:13 ` Wei Liu
2016-11-29 15:33 ` [PATCH v4 13/15] pvh: Set online VCPU map to avail_vcpus Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 14/15] pvh/acpi: Save ACPI registers for PVH guests Boris Ostrovsky
2016-11-29 15:33 ` [PATCH v4 15/15] docs: Describe PVHv2's VCPU hotplug procedure Boris Ostrovsky
2016-12-06 20:55 ` Konrad Rzeszutek Wilk
2016-11-29 16:11 ` [PATCH v4 00/15] PVH VCPU hotplug support Jan Beulich
2016-11-29 16:40 ` Boris Ostrovsky
2016-11-29 16:43 ` Jan Beulich
2016-11-29 17:00 ` Boris Ostrovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1480433602-13290-2-git-send-email-boris.ostrovsky@oracle.com \
--to=boris.ostrovsky@oracle.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=roger.pau@citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).