qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jens Freimann <jfrei@linux.vnet.ibm.com>
To: Christian Borntraeger <borntraeger@de.ibm.com>,
	Alexander Graf <agraf@suse.de>,
	Cornelia Huck <cornelia.huck@de.ibm.com>
Cc: Thomas Huth <thuth@linux.vnet.ibm.com>,
	"Jason J. Herne" <jjherne@us.ibm.com>,
	qemu-devel@nongnu.org,
	David Hildenbrand <dahi@linux.vnet.ibm.com>,
	Jens Freimann <jfrei@linux.vnet.ibm.com>,
	Andreas Faerber <afaerber@suse.de>
Subject: [Qemu-devel] [PATCH] s390x/migration: migrate CPU state
Date: Tue, 30 Sep 2014 10:58:42 +0200	[thread overview]
Message-ID: <1412067522-33971-2-git-send-email-jfrei@linux.vnet.ibm.com> (raw)
In-Reply-To: <1412067522-33971-1-git-send-email-jfrei@linux.vnet.ibm.com>

From: Thomas Huth <thuth@linux.vnet.ibm.com>

This patch provides the cpu save information for dumps and later life
migration and enables migration of the CPU state. The code is based on
earlier work from Christian Borntraeger and Jason Herne.

Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
[provide cpu_post_load()]
Signed-off-by: Jens Freimann <jfrei@linux.vnet.ibm.com>
CC: Andreas Faerber <afaerber@suse.de>
CC: Christian Borntraeger <borntraeger@de.ibm.com>
CC: Jason J. Herne <jjherne@us.ibm.com>
---
 target-s390x/cpu.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 57 insertions(+), 2 deletions(-)

diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c
index ec7df90..c9c237f 100644
--- a/target-s390x/cpu.c
+++ b/target-s390x/cpu.c
@@ -292,9 +292,64 @@ unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
 }
 #endif
 
+static int cpu_post_load(void *opaque, int version_id)
+{
+    S390CPU *cpu = opaque;
+
+    /* the cpu state is fine for QEMU - we just need to push it to kvm */
+    if (kvm_enabled()) {
+        kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
+    }
+
+    return 0;
+}
+
 static const VMStateDescription vmstate_s390_cpu = {
     .name = "cpu",
-    .unmigratable = 1,
+    .post_load = cpu_post_load,
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField[]) {
+        VMSTATE_UINT64(env.fregs[0].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[1].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[2].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[3].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[4].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[5].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[6].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[7].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[8].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[9].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[10].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[11].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[12].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[13].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[14].ll, S390CPU),
+        VMSTATE_UINT64(env.fregs[15].ll, S390CPU),
+        VMSTATE_UINT64_ARRAY(env.regs, S390CPU, 16),
+        VMSTATE_UINT64(env.psw.mask, S390CPU),
+        VMSTATE_UINT64(env.psw.addr, S390CPU),
+        VMSTATE_UINT64(env.psa, S390CPU),
+        VMSTATE_UINT32(env.fpc, S390CPU),
+        VMSTATE_UINT32(env.todpr, S390CPU),
+        VMSTATE_UINT64(env.pfault_token, S390CPU),
+        VMSTATE_UINT64(env.pfault_compare, S390CPU),
+        VMSTATE_UINT64(env.pfault_select, S390CPU),
+        VMSTATE_UINT64(env.cputm, S390CPU),
+        VMSTATE_UINT64(env.ckc, S390CPU),
+        VMSTATE_UINT64(env.gbea, S390CPU),
+        VMSTATE_UINT64(env.pp, S390CPU),
+        VMSTATE_UINT32_ARRAY(env.aregs, S390CPU, 16),
+        VMSTATE_UINT64_ARRAY(env.cregs, S390CPU, 16),
+        VMSTATE_UINT8(env.cpu_state, S390CPU),
+        VMSTATE_END_OF_LIST()
+     },
+    .subsections = (VMStateSubsection[]) {
+        {
+            /* empty */
+        }
+    }
 };
 
 static void s390_cpu_class_init(ObjectClass *oc, void *data)
@@ -323,11 +378,11 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
     cc->handle_mmu_fault = s390_cpu_handle_mmu_fault;
 #else
     cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
+    cc->vmsd = &vmstate_s390_cpu;
     cc->write_elf64_note = s390_cpu_write_elf64_note;
     cc->write_elf64_qemunote = s390_cpu_write_elf64_qemunote;
     cc->cpu_exec_interrupt = s390_cpu_exec_interrupt;
 #endif
-    dc->vmsd = &vmstate_s390_cpu;
     cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
     cc->gdb_core_xml_file = "s390x-core64.xml";
 }
-- 
1.8.5.5

  reply	other threads:[~2014-09-30  8:59 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-30  8:58 [Qemu-devel] [PATCH] s390x/migration: enable migration of CPUs Jens Freimann
2014-09-30  8:58 ` Jens Freimann [this message]
2014-10-01  8:50 ` Cornelia Huck

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1412067522-33971-2-git-send-email-jfrei@linux.vnet.ibm.com \
    --to=jfrei@linux.vnet.ibm.com \
    --cc=afaerber@suse.de \
    --cc=agraf@suse.de \
    --cc=borntraeger@de.ibm.com \
    --cc=cornelia.huck@de.ibm.com \
    --cc=dahi@linux.vnet.ibm.com \
    --cc=jjherne@us.ibm.com \
    --cc=qemu-devel@nongnu.org \
    --cc=thuth@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).