From: Bharata B Rao <bharata@linux.vnet.ibm.com>
To: qemu-devel@nongnu.org
Cc: qemu-ppc@nongnu.org, david@gibson.dropbear.id.au,
imammedo@redhat.com, groug@kaod.org, nikunj@linux.vnet.ibm.com,
pbonzini@redhat.com, Bharata B Rao <bharata@linux.vnet.ibm.com>
Subject: [Qemu-devel] [RFC PATCH v1 4/5] xics: Use migration_id instead of cpu_index in XICS code
Date: Wed, 6 Jul 2016 14:29:20 +0530 [thread overview]
Message-ID: <1467795561-1007-5-git-send-email-bharata@linux.vnet.ibm.com> (raw)
In-Reply-To: <1467795561-1007-1-git-send-email-bharata@linux.vnet.ibm.com>
xics maintains an array of ICPState structures which is indexed
by cpu_index. Change this to index the ICPState array by migration_id
for pseries-2.7 onwards. This allows migration of guest to suceed
when there are holes in cpu_index range due to CPU hot removal.
NOTE: In rtas_set_xive() and h_ipi(), cpu_dt_id is implicitly
assume to be equivalent to migration_id.
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
---
hw/intc/xics.c | 12 ++++++++----
hw/intc/xics_kvm.c | 11 +++++------
hw/intc/xics_spapr.c | 28 +++++++++++++++++++++-------
3 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index cd48f42..ce7571e 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -50,9 +50,11 @@ int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
void xics_cpu_destroy(XICSState *xics, PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
- ICPState *ss = &xics->ss[cs->cpu_index];
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
+ ICPState *ss = &xics->ss[server];
- assert(cs->cpu_index < xics->nr_servers);
+ assert(server < xics->nr_servers);
assert(cs == ss->cs);
ss->output = NULL;
@@ -63,10 +65,12 @@ void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- ICPState *ss = &xics->ss[cs->cpu_index];
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
+ ICPState *ss = &xics->ss[server];
XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
- assert(cs->cpu_index < xics->nr_servers);
+ assert(server < xics->nr_servers);
ss->cs = cs;
diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
index edbd62f..2c087a4 100644
--- a/hw/intc/xics_kvm.c
+++ b/hw/intc/xics_kvm.c
@@ -326,14 +326,13 @@ static const TypeInfo ics_kvm_info = {
*/
static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
{
- CPUState *cs;
- ICPState *ss;
+ CPUState *cs = CPU(cpu);
KVMXICSState *xicskvm = XICS_SPAPR_KVM(xics);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
+ ICPState *ss = ss = &xics->ss[server];
- cs = CPU(cpu);
- ss = &xics->ss[cs->cpu_index];
-
- assert(cs->cpu_index < xics->nr_servers);
+ assert(server < xics->nr_servers);
if (xicskvm->kernel_xics_fd == -1) {
abort();
}
diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
index 618826d..281ba76 100644
--- a/hw/intc/xics_spapr.c
+++ b/hw/intc/xics_spapr.c
@@ -43,16 +43,20 @@ static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
target_ulong cppr = args[0];
- icp_set_cppr(spapr->xics, cs->cpu_index, cppr);
+ icp_set_cppr(spapr->xics, server, cppr);
return H_SUCCESS;
}
static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
- target_ulong server = xics_get_cpu_index_by_dt_id(args[0]);
+ CPUState *cs = CPU(cpu);
+ target_ulong server = cs->use_migration_id ? args[0] :
+ xics_get_cpu_index_by_dt_id(args[0]);
target_ulong mfrr = args[1];
if (server >= spapr->xics->nr_servers) {
@@ -67,7 +71,9 @@ static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
- uint32_t xirr = icp_accept(spapr->xics->ss + cs->cpu_index);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
+ uint32_t xirr = icp_accept(spapr->xics->ss + server);
args[0] = xirr;
return H_SUCCESS;
@@ -77,7 +83,9 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
- ICPState *ss = &spapr->xics->ss[cs->cpu_index];
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
+ ICPState *ss = &spapr->xics->ss[server];
uint32_t xirr = icp_accept(ss);
args[0] = xirr;
@@ -89,9 +97,11 @@ static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
target_ulong xirr = args[0];
- icp_eoi(spapr->xics, cs->cpu_index, xirr);
+ icp_eoi(spapr->xics, server, xirr);
return H_SUCCESS;
}
@@ -99,8 +109,10 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ int server = cc->get_migration_id(cs);
uint32_t mfrr;
- uint32_t xirr = icp_ipoll(spapr->xics->ss + cs->cpu_index, &mfrr);
+ uint32_t xirr = icp_ipoll(spapr->xics->ss + server, &mfrr);
args[0] = xirr;
args[1] = mfrr;
@@ -113,6 +125,7 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
+ CPUState *cs = CPU(cpu);
ICSState *ics = spapr->xics->ics;
uint32_t nr, server, priority;
@@ -122,7 +135,8 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
}
nr = rtas_ld(args, 0);
- server = xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
+ server = cs->use_migration_id ? rtas_ld(args, 1) :
+ xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
priority = rtas_ld(args, 2);
if (!ics_valid_irq(ics, nr) || (server >= ics->xics->nr_servers)
--
2.7.4
next prev parent reply other threads:[~2016-07-06 8:59 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-07-06 8:59 [Qemu-devel] [RFC PATCH v1 0/5] sPAPR: Fix migration when CPUs are removed in random order Bharata B Rao
2016-07-06 8:59 ` [Qemu-devel] [RFC PATCH v1 1/5] cpu, target-ppc: Move cpu_vmstate_[un]register calls to cpu_common_[un]realize Bharata B Rao
2016-07-06 10:57 ` Igor Mammedov
2016-07-06 14:16 ` Bharata B Rao
2016-07-06 14:44 ` Igor Mammedov
2016-07-06 16:52 ` Bharata B Rao
2016-07-07 0:47 ` David Gibson
2016-07-06 8:59 ` [Qemu-devel] [RFC PATCH v1 2/5] cpu: Introduce CPUState::migration_id Bharata B Rao
2016-07-06 11:34 ` Igor Mammedov
2016-07-06 14:18 ` Bharata B Rao
2016-07-06 14:47 ` Igor Mammedov
2016-07-07 0:53 ` David Gibson
2016-07-06 8:59 ` [Qemu-devel] [RFC PATCH v1 3/5] spapr: Implement CPUClass::get_migration_id() for PowerPC CPUs Bharata B Rao
2016-07-06 12:01 ` Igor Mammedov
2016-07-06 14:21 ` Bharata B Rao
2016-07-06 14:37 ` Greg Kurz
2016-07-07 0:57 ` David Gibson
2016-07-07 12:32 ` Greg Kurz
2016-07-07 0:55 ` David Gibson
2016-07-06 14:35 ` Greg Kurz
2016-07-06 16:53 ` Bharata B Rao
2016-07-07 1:00 ` David Gibson
2016-07-07 13:43 ` [Qemu-devel] [Qemu-ppc] " Mark Cave-Ayland
2016-07-06 8:59 ` Bharata B Rao [this message]
2016-07-06 9:08 ` [Qemu-devel] [RFC PATCH v1 4/5] xics: Use migration_id instead of cpu_index in XICS code Nikunj A Dadhania
2016-07-06 8:59 ` [Qemu-devel] [RFC PATCH v1 5/5] cpu, spapr: Use migration_id from pseries-2.7 onwards Bharata B Rao
2016-07-06 11:44 ` Igor Mammedov
2016-07-06 11:45 ` Igor Mammedov
2016-07-06 14:24 ` Bharata B Rao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1467795561-1007-5-git-send-email-bharata@linux.vnet.ibm.com \
--to=bharata@linux.vnet.ibm.com \
--cc=david@gibson.dropbear.id.au \
--cc=groug@kaod.org \
--cc=imammedo@redhat.com \
--cc=nikunj@linux.vnet.ibm.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).