From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: qemu-devel@nongnu.org
Cc: Alexey Kardashevskiy <aik@ozlabs.ru>,
Paolo Bonzini <pbonzini@redhat.com>,
qemu-ppc@nongnu.org, Alexander Graf <agraf@suse.de>
Subject: [Qemu-devel] [PATCH 2/2] spapr_nvram: Enable migration
Date: Thu, 25 Sep 2014 17:02:03 +1000 [thread overview]
Message-ID: <1411628523-3498-3-git-send-email-aik@ozlabs.ru> (raw)
In-Reply-To: <1411628523-3498-1-git-send-email-aik@ozlabs.ru>
The only case when sPAPR NVRAM migrates now is if is backed by a file and
copy-storage migration is performed.
This enables RAM copy of NVRAM even if NVRAM is backed by a file.
This defines a VMSTATE descriptor for NVRAM device so the memory copy
of NVRAM can migrate and be written to a backing file on the destination
if one is provided.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
hw/nvram/spapr_nvram.c | 68 +++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 59 insertions(+), 9 deletions(-)
diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c
index 6a72ef4..254009e 100644
--- a/hw/nvram/spapr_nvram.c
+++ b/hw/nvram/spapr_nvram.c
@@ -76,15 +76,20 @@ static void rtas_nvram_fetch(PowerPCCPU *cpu, sPAPREnvironment *spapr,
return;
}
+ assert(nvram->buf);
+
membuf = cpu_physical_memory_map(buffer, &len, 1);
+
+ alen = len;
if (nvram->drive) {
alen = bdrv_pread(nvram->drive, offset, membuf, len);
+ if (alen > 0) {
+ memcpy(nvram->buf + offset, membuf, alen);
+ }
} else {
- assert(nvram->buf);
-
memcpy(membuf, nvram->buf + offset, len);
- alen = len;
}
+
cpu_physical_memory_unmap(membuf, len, 1, len);
rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS);
@@ -122,14 +127,15 @@ static void rtas_nvram_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
membuf = cpu_physical_memory_map(buffer, &len, 0);
+
+ alen = len;
if (nvram->drive) {
alen = bdrv_pwrite(nvram->drive, offset, membuf, len);
- } else {
- assert(nvram->buf);
-
- memcpy(nvram->buf + offset, membuf, len);
- alen = len;
}
+
+ assert(nvram->buf);
+ memcpy(nvram->buf + offset, membuf, len);
+
cpu_physical_memory_unmap(membuf, len, 0, len);
rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS);
@@ -144,9 +150,10 @@ static int spapr_nvram_init(VIOsPAPRDevice *dev)
nvram->size = bdrv_getlength(nvram->drive);
} else {
nvram->size = DEFAULT_NVRAM_SIZE;
- nvram->buf = g_malloc0(nvram->size);
}
+ nvram->buf = g_malloc0(nvram->size);
+
if ((nvram->size < MIN_NVRAM_SIZE) || (nvram->size > MAX_NVRAM_SIZE)) {
fprintf(stderr, "spapr-nvram must be between %d and %d bytes in size\n",
MIN_NVRAM_SIZE, MAX_NVRAM_SIZE);
@@ -166,6 +173,48 @@ static int spapr_nvram_devnode(VIOsPAPRDevice *dev, void *fdt, int node_off)
return fdt_setprop_cell(fdt, node_off, "#bytes", nvram->size);
}
+static int spapr_nvram_pre_load(void *opaque)
+{
+ sPAPRNVRAM *nvram = VIO_SPAPR_NVRAM(opaque);
+
+ g_free(nvram->buf);
+ nvram->buf = NULL;
+ nvram->size = 0;
+
+ return 0;
+}
+
+static int spapr_nvram_post_load(void *opaque, int version_id)
+{
+ sPAPRNVRAM *nvram = VIO_SPAPR_NVRAM(opaque);
+
+ if (nvram->drive) {
+ int alen = bdrv_pwrite(nvram->drive, 0, nvram->buf, nvram->size);
+
+ if (alen < 0) {
+ return alen;
+ }
+ if (alen != nvram->size) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_spapr_nvram = {
+ .name = "spapr_nvram",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_load = spapr_nvram_pre_load,
+ .post_load = spapr_nvram_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(size, sPAPRNVRAM),
+ VMSTATE_VBUFFER_ALLOC_UINT32(buf, sPAPRNVRAM, 1, NULL, 0, size),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static Property spapr_nvram_properties[] = {
DEFINE_SPAPR_PROPERTIES(sPAPRNVRAM, sdev),
DEFINE_PROP_DRIVE("drive", sPAPRNVRAM, drive),
@@ -184,6 +233,7 @@ static void spapr_nvram_class_init(ObjectClass *klass, void *data)
k->dt_compatible = "qemu,spapr-nvram";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->props = spapr_nvram_properties;
+ dc->vmsd = &vmstate_spapr_nvram;
}
static const TypeInfo spapr_nvram_type_info = {
--
2.0.0
next prev parent reply other threads:[~2014-09-25 7:03 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-25 7:02 [Qemu-devel] [PATCH 0/2] spapr_nvram: Support migration Alexey Kardashevskiy
2014-09-25 7:02 ` [Qemu-devel] [PATCH 1/2] vmstate: Allow dynamic allocation for VBUFFER during migration Alexey Kardashevskiy
2014-09-25 7:02 ` Alexey Kardashevskiy [this message]
2014-09-25 9:43 ` [Qemu-devel] [PATCH 2/2] spapr_nvram: Enable migration Alexander Graf
2014-09-25 10:06 ` Alexey Kardashevskiy
2014-09-26 2:31 ` David Gibson
2014-09-26 2:53 ` Alexey Kardashevskiy
2014-09-29 8:30 ` Alexander Graf
-- strict thread matches above, loose matches on Subject: below --
2014-10-02 9:56 [Qemu-devel] [PATCH 0/2] Here are 2 patches to enable sPAPR NVRAM migration Alexey Kardashevskiy
2014-10-02 9:56 ` [Qemu-devel] [PATCH 2/2] spapr_nvram: Enable migration Alexey Kardashevskiy
2014-10-03 4:11 ` David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1411628523-3498-3-git-send-email-aik@ozlabs.ru \
--to=aik@ozlabs.ru \
--cc=agraf@suse.de \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).