* [PATCH 1/7] migration: Add more error handling to analyze-migration.py
2025-01-07 19:50 [PATCH 0/7] migration: Fix s390 regressions + migration script Fabiano Rosas
@ 2025-01-07 19:50 ` Fabiano Rosas
2025-01-07 19:50 ` [PATCH 2/7] migration: Remove unused argument in vmsd_desc_field_end Fabiano Rosas
` (5 subsequent siblings)
6 siblings, 0 replies; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-07 19:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Peter Xu, Thomas Huth
The analyze-migration script was seen failing in s390x in misterious
ways. It seems we're reaching the VMSDFieldStruct constructor without
any fields, which would indicate an empty .subsection entry, a
VMSTATE_STRUCT with no fields or a vmsd with no fields. We don't have
any of those, at least not without the unmigratable flag set, so this
should never happen.
Add some debug statements so that we can see what's going on the next
time the issue happens.
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Message-Id: <20250103141305.8435-1-farosas@suse.de>
---
scripts/analyze-migration.py | 75 +++++++++++++++++++++---------------
1 file changed, 45 insertions(+), 30 deletions(-)
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index 8a254a5b6a..f2457b1dde 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -429,6 +429,9 @@ def __init__(self, desc, file):
super(VMSDFieldStruct, self).__init__(desc, file)
self.data = collections.OrderedDict()
+ if 'fields' not in self.desc['struct']:
+ raise Exception("No fields in struct. VMSD:\n%s" % self.desc)
+
# When we see compressed array elements, unfold them here
new_fields = []
for field in self.desc['struct']['fields']:
@@ -477,6 +480,10 @@ def read(self):
raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
name = self.file.readstr()
version_id = self.file.read32()
+
+ if not subsection:
+ raise Exception("Empty description for subsection: %s" % name)
+
self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
self.data[name].read()
@@ -574,10 +581,13 @@ def __init__(self, filename):
}
self.filename = filename
self.vmsd_desc = None
+ self.vmsd_json = ""
- def read(self, desc_only = False, dump_memory = False, write_memory = False):
+ def read(self, desc_only = False, dump_memory = False,
+ write_memory = False):
# Read in the whole file
file = MigrationFile(self.filename)
+ self.vmsd_json = file.read_migration_debug_json()
# File magic
data = file.read32()
@@ -635,9 +645,11 @@ def read(self, desc_only = False, dump_memory = False, write_memory = False):
file.close()
def load_vmsd_json(self, file):
- vmsd_json = file.read_migration_debug_json()
- self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
+ self.vmsd_desc = json.loads(self.vmsd_json,
+ object_pairs_hook=collections.OrderedDict)
for device in self.vmsd_desc['devices']:
+ if 'fields' not in device:
+ raise Exception("vmstate for device %s has no fields" % device['name'])
key = (device['name'], device['instance_id'])
value = ( VMSDSection, device )
self.section_classes[key] = value
@@ -666,31 +678,34 @@ def default(self, o):
jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
-if args.extract:
- dump = MigrationDump(args.file)
-
- dump.read(desc_only = True)
- print("desc.json")
- f = open("desc.json", "w")
- f.truncate()
- f.write(jsonenc.encode(dump.vmsd_desc))
- f.close()
-
- dump.read(write_memory = True)
- dict = dump.getDict()
- print("state.json")
- f = open("state.json", "w")
- f.truncate()
- f.write(jsonenc.encode(dict))
- f.close()
-elif args.dump == "state":
- dump = MigrationDump(args.file)
- dump.read(dump_memory = args.memory)
- dict = dump.getDict()
- print(jsonenc.encode(dict))
-elif args.dump == "desc":
- dump = MigrationDump(args.file)
- dump.read(desc_only = True)
- print(jsonenc.encode(dump.vmsd_desc))
-else:
+if not any([args.extract, args.dump == "state", args.dump == "desc"]):
raise Exception("Please specify either -x, -d state or -d desc")
+
+try:
+ dump = MigrationDump(args.file)
+
+ if args.extract:
+ dump.read(desc_only = True)
+
+ print("desc.json")
+ f = open("desc.json", "w")
+ f.truncate()
+ f.write(jsonenc.encode(dump.vmsd_desc))
+ f.close()
+
+ dump.read(write_memory = True)
+ dict = dump.getDict()
+ print("state.json")
+ f = open("state.json", "w")
+ f.truncate()
+ f.write(jsonenc.encode(dict))
+ f.close()
+ elif args.dump == "state":
+ dump.read(dump_memory = args.memory)
+ dict = dump.getDict()
+ print(jsonenc.encode(dict))
+ elif args.dump == "desc":
+ dump.read(desc_only = True)
+ print(jsonenc.encode(dump.vmsd_desc))
+except Exception:
+ raise Exception("Full JSON dump:\n%s", dump.vmsd_json)
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 2/7] migration: Remove unused argument in vmsd_desc_field_end
2025-01-07 19:50 [PATCH 0/7] migration: Fix s390 regressions + migration script Fabiano Rosas
2025-01-07 19:50 ` [PATCH 1/7] migration: Add more error handling to analyze-migration.py Fabiano Rosas
@ 2025-01-07 19:50 ` Fabiano Rosas
2025-01-07 21:14 ` Peter Xu
2025-01-07 19:50 ` [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr Fabiano Rosas
` (4 subsequent siblings)
6 siblings, 1 reply; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-07 19:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Peter Xu, Thomas Huth
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
migration/vmstate.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/migration/vmstate.c b/migration/vmstate.c
index fa002b24e8..aa2821dec6 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -311,7 +311,7 @@ static void vmsd_desc_field_start(const VMStateDescription *vmsd,
static void vmsd_desc_field_end(const VMStateDescription *vmsd,
JSONWriter *vmdesc,
- const VMStateField *field, size_t size, int i)
+ const VMStateField *field, size_t size)
{
if (!vmdesc) {
return;
@@ -420,7 +420,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
}
written_bytes = qemu_file_transferred(f) - old_offset;
- vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i);
+ vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes);
/* Compressed arrays only care about the first element */
if (vmdesc_loop && vmsd_can_compress(field)) {
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr
2025-01-07 19:50 [PATCH 0/7] migration: Fix s390 regressions + migration script Fabiano Rosas
2025-01-07 19:50 ` [PATCH 1/7] migration: Add more error handling to analyze-migration.py Fabiano Rosas
2025-01-07 19:50 ` [PATCH 2/7] migration: Remove unused argument in vmsd_desc_field_end Fabiano Rosas
@ 2025-01-07 19:50 ` Fabiano Rosas
2025-01-07 21:24 ` Peter Xu
2025-01-07 19:50 ` [PATCH 4/7] migration: Fix parsing of s390 stream Fabiano Rosas
` (3 subsequent siblings)
6 siblings, 1 reply; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-07 19:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Peter Xu, Thomas Huth
The migration stream lacks magic numbers at some key points. It's easy
to mis-parse data. Unfortunately, the VMS_NULLPTR_MARKER continues
with the trend. A '0' byte is ambiguous and could be interpreted as a
valid 0x30.
It is maybe not worth trying to change this while keeping backward
compatibility, so add some words of documentation to clarify.
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
migration/vmstate-types.c | 6 ++++++
scripts/analyze-migration.py | 9 +++++++++
2 files changed, 15 insertions(+)
diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
index e83bfccb9e..08ed059f87 100644
--- a/migration/vmstate-types.c
+++ b/migration/vmstate-types.c
@@ -339,6 +339,12 @@ static int put_nullptr(QEMUFile *f, void *pv, size_t size,
const VMStateInfo vmstate_info_nullptr = {
.name = "uint64",
+
+ /*
+ * Ideally these would actually read/write the size of a pointer,
+ * but we're stuck with just a byte now for backward
+ * compatibility.
+ */
.get = get_nullptr,
.put = put_nullptr,
};
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index f2457b1dde..4292fde424 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -388,12 +388,21 @@ def read(self):
return self.data
class VMSDFieldUInt(VMSDFieldInt):
+ NULL_PTR_MARKER = 0x30
+
def __init__(self, desc, file):
super(VMSDFieldUInt, self).__init__(desc, file)
def read(self):
super(VMSDFieldUInt, self).read()
self.data = self.udata
+
+ if self.data == self.NULL_PTR_MARKER:
+ # The migration stream encodes NULL pointers as '0' so any
+ # 0x30 in the stream could be a NULL. There's not much we
+ # can do without breaking backward compatibility.
+ pass
+
return self.data
class VMSDFieldIntLE(VMSDFieldInt):
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr
2025-01-07 19:50 ` [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr Fabiano Rosas
@ 2025-01-07 21:24 ` Peter Xu
2025-01-08 13:31 ` Fabiano Rosas
0 siblings, 1 reply; 18+ messages in thread
From: Peter Xu @ 2025-01-07 21:24 UTC (permalink / raw)
To: Fabiano Rosas; +Cc: qemu-devel, Thomas Huth
On Tue, Jan 07, 2025 at 04:50:21PM -0300, Fabiano Rosas wrote:
> The migration stream lacks magic numbers at some key points. It's easy
> to mis-parse data. Unfortunately, the VMS_NULLPTR_MARKER continues
> with the trend. A '0' byte is ambiguous and could be interpreted as a
> valid 0x30.
>
> It is maybe not worth trying to change this while keeping backward
> compatibility, so add some words of documentation to clarify.
>
> Signed-off-by: Fabiano Rosas <farosas@suse.de>
> ---
> migration/vmstate-types.c | 6 ++++++
> scripts/analyze-migration.py | 9 +++++++++
> 2 files changed, 15 insertions(+)
>
> diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
> index e83bfccb9e..08ed059f87 100644
> --- a/migration/vmstate-types.c
> +++ b/migration/vmstate-types.c
> @@ -339,6 +339,12 @@ static int put_nullptr(QEMUFile *f, void *pv, size_t size,
>
> const VMStateInfo vmstate_info_nullptr = {
> .name = "uint64",
Ouch.. So I overlooked this line and this explains why it didn't go via
VMSDFieldGeneric already.
Instead of below comment, do we still have chance to change this to
something like "uint8"? Then I suppose the script will be able to identify
this properly.
> +
> + /*
> + * Ideally these would actually read/write the size of a pointer,
> + * but we're stuck with just a byte now for backward
> + * compatibility.
> + */
> .get = get_nullptr,
> .put = put_nullptr,
> };
> diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
> index f2457b1dde..4292fde424 100755
> --- a/scripts/analyze-migration.py
> +++ b/scripts/analyze-migration.py
> @@ -388,12 +388,21 @@ def read(self):
> return self.data
>
> class VMSDFieldUInt(VMSDFieldInt):
> + NULL_PTR_MARKER = 0x30
> +
> def __init__(self, desc, file):
> super(VMSDFieldUInt, self).__init__(desc, file)
>
> def read(self):
> super(VMSDFieldUInt, self).read()
> self.data = self.udata
> +
> + if self.data == self.NULL_PTR_MARKER:
> + # The migration stream encodes NULL pointers as '0' so any
> + # 0x30 in the stream could be a NULL. There's not much we
> + # can do without breaking backward compatibility.
> + pass
So this change doesn't do anything, right?
It'll be weird here having it "uint64" but the super().read() will actually
only read 1 byte.. I assume the oneliner change of s/uint64/uint8/ could
be a replacement of this patch, and I hope that'll work too for the script.
So we will still see a bunch of 0x30s but I assume it's ok.
> +
> return self.data
>
> class VMSDFieldIntLE(VMSDFieldInt):
> --
> 2.35.3
>
--
Peter Xu
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr
2025-01-07 21:24 ` Peter Xu
@ 2025-01-08 13:31 ` Fabiano Rosas
2025-01-08 13:48 ` Peter Xu
0 siblings, 1 reply; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-08 13:31 UTC (permalink / raw)
To: Peter Xu; +Cc: qemu-devel, Thomas Huth
Peter Xu <peterx@redhat.com> writes:
> On Tue, Jan 07, 2025 at 04:50:21PM -0300, Fabiano Rosas wrote:
>> The migration stream lacks magic numbers at some key points. It's easy
>> to mis-parse data. Unfortunately, the VMS_NULLPTR_MARKER continues
>> with the trend. A '0' byte is ambiguous and could be interpreted as a
>> valid 0x30.
>>
>> It is maybe not worth trying to change this while keeping backward
>> compatibility, so add some words of documentation to clarify.
>>
>> Signed-off-by: Fabiano Rosas <farosas@suse.de>
>> ---
>> migration/vmstate-types.c | 6 ++++++
>> scripts/analyze-migration.py | 9 +++++++++
>> 2 files changed, 15 insertions(+)
>>
>> diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
>> index e83bfccb9e..08ed059f87 100644
>> --- a/migration/vmstate-types.c
>> +++ b/migration/vmstate-types.c
>> @@ -339,6 +339,12 @@ static int put_nullptr(QEMUFile *f, void *pv, size_t size,
>>
>> const VMStateInfo vmstate_info_nullptr = {
>> .name = "uint64",
>
> Ouch.. So I overlooked this line and this explains why it didn't go via
> VMSDFieldGeneric already.
Yes, actually I overlooked as well that it should match the size of the
data being handled in the get/put functions.
My comment below is about NULL -> 0x30 that I think should instead be
NULL -> 0x3030303030303030 so we have any chance of looking at this and
identifying it's a NULL pointer. When we write 0x30 it might become
confusing for people reading the scripts output that their stream has a
bunch of '0' in the place where pointers should be. If the MAGIC number
were more identifiable, I could change the script to output (null) or 0x0ULL.
We also don't really have the concept of a pointer, which I suspect
might be the real reason behind all this mess. So we'll see:
0x30
0x30
{
.some
.struct
.here
}
0x30
So all this patch was trying to do is document this situation somehow.
>
> Instead of below comment, do we still have chance to change this to
> something like "uint8"? Then I suppose the script will be able to identify
> this properly.
>
>> +
>> + /*
>> + * Ideally these would actually read/write the size of a pointer,
>> + * but we're stuck with just a byte now for backward
>> + * compatibility.
>> + */
>> .get = get_nullptr,
>> .put = put_nullptr,
>> };
>> diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
>> index f2457b1dde..4292fde424 100755
>> --- a/scripts/analyze-migration.py
>> +++ b/scripts/analyze-migration.py
>> @@ -388,12 +388,21 @@ def read(self):
>> return self.data
>>
>> class VMSDFieldUInt(VMSDFieldInt):
>> + NULL_PTR_MARKER = 0x30
>> +
>> def __init__(self, desc, file):
>> super(VMSDFieldUInt, self).__init__(desc, file)
>>
>> def read(self):
>> super(VMSDFieldUInt, self).read()
>> self.data = self.udata
>> +
>> + if self.data == self.NULL_PTR_MARKER:
>> + # The migration stream encodes NULL pointers as '0' so any
>> + # 0x30 in the stream could be a NULL. There's not much we
>> + # can do without breaking backward compatibility.
>> + pass
>
> So this change doesn't do anything, right?
>
> It'll be weird here having it "uint64" but the super().read() will actually
> only read 1 byte.. I assume the oneliner change of s/uint64/uint8/ could
> be a replacement of this patch, and I hope that'll work too for the script.
> So we will still see a bunch of 0x30s but I assume it's ok.
>
>> +
>> return self.data
>>
>> class VMSDFieldIntLE(VMSDFieldInt):
>> --
>> 2.35.3
>>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr
2025-01-08 13:31 ` Fabiano Rosas
@ 2025-01-08 13:48 ` Peter Xu
2025-01-08 14:37 ` Fabiano Rosas
0 siblings, 1 reply; 18+ messages in thread
From: Peter Xu @ 2025-01-08 13:48 UTC (permalink / raw)
To: Fabiano Rosas; +Cc: qemu-devel, Thomas Huth
On Wed, Jan 08, 2025 at 10:31:05AM -0300, Fabiano Rosas wrote:
> Peter Xu <peterx@redhat.com> writes:
>
> > On Tue, Jan 07, 2025 at 04:50:21PM -0300, Fabiano Rosas wrote:
> >> The migration stream lacks magic numbers at some key points. It's easy
> >> to mis-parse data. Unfortunately, the VMS_NULLPTR_MARKER continues
> >> with the trend. A '0' byte is ambiguous and could be interpreted as a
> >> valid 0x30.
> >>
> >> It is maybe not worth trying to change this while keeping backward
> >> compatibility, so add some words of documentation to clarify.
> >>
> >> Signed-off-by: Fabiano Rosas <farosas@suse.de>
> >> ---
> >> migration/vmstate-types.c | 6 ++++++
> >> scripts/analyze-migration.py | 9 +++++++++
> >> 2 files changed, 15 insertions(+)
> >>
> >> diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
> >> index e83bfccb9e..08ed059f87 100644
> >> --- a/migration/vmstate-types.c
> >> +++ b/migration/vmstate-types.c
> >> @@ -339,6 +339,12 @@ static int put_nullptr(QEMUFile *f, void *pv, size_t size,
> >>
> >> const VMStateInfo vmstate_info_nullptr = {
> >> .name = "uint64",
> >
> > Ouch.. So I overlooked this line and this explains why it didn't go via
> > VMSDFieldGeneric already.
>
> Yes, actually I overlooked as well that it should match the size of the
> data being handled in the get/put functions.
>
> My comment below is about NULL -> 0x30 that I think should instead be
> NULL -> 0x3030303030303030 so we have any chance of looking at this and
> identifying it's a NULL pointer. When we write 0x30 it might become
> confusing for people reading the scripts output that their stream has a
> bunch of '0' in the place where pointers should be. If the MAGIC number
> were more identifiable, I could change the script to output (null) or 0x0ULL.
I suppose we can? If we want, by renaming this from "uint64" to "nullptr",
then add an entry for it in Python's vmsd_field_readers.
>
> We also don't really have the concept of a pointer, which I suspect
> might be the real reason behind all this mess. So we'll see:
>
> 0x30
> 0x30
> {
> .some
> .struct
> .here
> }
> 0x30
>
> So all this patch was trying to do is document this situation somehow.
Yes, more docs makes sense, though just to mention it's nothing better here
to use a full size of pointer: firstly it's not possible I think as 32/64
bits have different size of pointers...
More importantly, we're not sending the pointer but a marker, in this case
the size of the real pointer doesn't really matter, IMHO. A marker would
make sense in saving some bytes when / if the array is large and sparse.
Said that, let's try above idea, maybe it's optimal as you said the script
can show things like "nullptr" (or any better name, I think that's better
than "null" at least to show it's not a real pointer, otherwise it's weird
to see any pointer in a migration stream..).
>
> >
> > Instead of below comment, do we still have chance to change this to
> > something like "uint8"? Then I suppose the script will be able to identify
> > this properly.
--
Peter Xu
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr
2025-01-08 13:48 ` Peter Xu
@ 2025-01-08 14:37 ` Fabiano Rosas
0 siblings, 0 replies; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-08 14:37 UTC (permalink / raw)
To: Peter Xu; +Cc: qemu-devel, Thomas Huth
Peter Xu <peterx@redhat.com> writes:
> On Wed, Jan 08, 2025 at 10:31:05AM -0300, Fabiano Rosas wrote:
>> Peter Xu <peterx@redhat.com> writes:
>>
>> > On Tue, Jan 07, 2025 at 04:50:21PM -0300, Fabiano Rosas wrote:
>> >> The migration stream lacks magic numbers at some key points. It's easy
>> >> to mis-parse data. Unfortunately, the VMS_NULLPTR_MARKER continues
>> >> with the trend. A '0' byte is ambiguous and could be interpreted as a
>> >> valid 0x30.
>> >>
>> >> It is maybe not worth trying to change this while keeping backward
>> >> compatibility, so add some words of documentation to clarify.
>> >>
>> >> Signed-off-by: Fabiano Rosas <farosas@suse.de>
>> >> ---
>> >> migration/vmstate-types.c | 6 ++++++
>> >> scripts/analyze-migration.py | 9 +++++++++
>> >> 2 files changed, 15 insertions(+)
>> >>
>> >> diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
>> >> index e83bfccb9e..08ed059f87 100644
>> >> --- a/migration/vmstate-types.c
>> >> +++ b/migration/vmstate-types.c
>> >> @@ -339,6 +339,12 @@ static int put_nullptr(QEMUFile *f, void *pv, size_t size,
>> >>
>> >> const VMStateInfo vmstate_info_nullptr = {
>> >> .name = "uint64",
>> >
>> > Ouch.. So I overlooked this line and this explains why it didn't go via
>> > VMSDFieldGeneric already.
>>
>> Yes, actually I overlooked as well that it should match the size of the
>> data being handled in the get/put functions.
>>
>> My comment below is about NULL -> 0x30 that I think should instead be
>> NULL -> 0x3030303030303030 so we have any chance of looking at this and
>> identifying it's a NULL pointer. When we write 0x30 it might become
>> confusing for people reading the scripts output that their stream has a
>> bunch of '0' in the place where pointers should be. If the MAGIC number
>> were more identifiable, I could change the script to output (null) or 0x0ULL.
>
> I suppose we can? If we want, by renaming this from "uint64" to "nullptr",
> then add an entry for it in Python's vmsd_field_readers.
That would be a nice alternative because it maps NULL to something, just
like the actual stream does. NULL -> '0' in the stream, NULL -> nullptr
in the JSON. I'll give it a try, thanks.
>>
>> We also don't really have the concept of a pointer, which I suspect
>> might be the real reason behind all this mess. So we'll see:
>>
>> 0x30
>> 0x30
>> {
>> .some
>> .struct
>> .here
>> }
>> 0x30
>>
>> So all this patch was trying to do is document this situation somehow.
>
> Yes, more docs makes sense, though just to mention it's nothing better here
> to use a full size of pointer: firstly it's not possible I think as 32/64
> bits have different size of pointers...
>
> More importantly, we're not sending the pointer but a marker, in this case
> the size of the real pointer doesn't really matter, IMHO. A marker would
> make sense in saving some bytes when / if the array is large and sparse.
Right, it's just that a larger data type allows for a more unique
marker, which can be detected more reliably by the consumers of the
stream. The smaller data type is too ambiguous.
>
> Said that, let's try above idea, maybe it's optimal as you said the script
> can show things like "nullptr" (or any better name, I think that's better
> than "null" at least to show it's not a real pointer, otherwise it's weird
> to see any pointer in a migration stream..).
Yes, the script is just presenting the data, we can use what's more
informative.
>
>>
>> >
>> > Instead of below comment, do we still have chance to change this to
>> > something like "uint8"? Then I suppose the script will be able to identify
>> > this properly.
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 4/7] migration: Fix parsing of s390 stream
2025-01-07 19:50 [PATCH 0/7] migration: Fix s390 regressions + migration script Fabiano Rosas
` (2 preceding siblings ...)
2025-01-07 19:50 ` [PATCH 3/7] migration: Document the effect of vmstate_info_nullptr Fabiano Rosas
@ 2025-01-07 19:50 ` Fabiano Rosas
2025-01-07 19:50 ` [PATCH 5/7] migration: Dump correct JSON format for nullptr replacement Fabiano Rosas
` (2 subsequent siblings)
6 siblings, 0 replies; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-07 19:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Peter Xu, Thomas Huth
The parsing for the S390StorageAttributes section is currently leaving
an unconsumed token that is later interpreted by the generic code as
QEMU_VM_EOF, cutting the parsing short.
The migration will issue a STATTR_FLAG_DONE between iterations, which
the script consumes correctly, but there's a final STATTR_FLAG_EOS at
.save_complete that the script is ignoring. Since the EOS flag is a
u64 0x1ULL and the stream is big endian, on little endian hosts a byte
read from it will be 0x0, the same as QEMU_VM_EOF.
Fixes: 81c2c9dd5d ("tests/qtest/migration-test: Fix analyze-migration.py for s390x")
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
scripts/analyze-migration.py | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index 4292fde424..4836920ddc 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -65,6 +65,9 @@ def readvar(self, size = None):
def tell(self):
return self.file.tell()
+ def seek(self, a, b):
+ return self.file.seek(a, b)
+
# The VMSD description is at the end of the file, after EOF. Look for
# the last NULL byte, then for the beginning brace of JSON.
def read_migration_debug_json(self):
@@ -272,11 +275,24 @@ def __init__(self, file, version_id, device, section_key):
self.section_key = section_key
def read(self):
+ pos = 0
while True:
addr_flags = self.file.read64()
flags = addr_flags & 0xfff
- if (flags & (self.STATTR_FLAG_DONE | self.STATTR_FLAG_EOS)):
+
+ if flags & self.STATTR_FLAG_DONE:
+ pos = self.file.tell()
+ continue
+ elif flags & self.STATTR_FLAG_EOS:
return
+ else:
+ # No EOS came after DONE, that's OK, but rewind the
+ # stream because this is not our data.
+ if pos:
+ self.file.seek(pos, os.SEEK_SET)
+ return
+ raise Exception("Unknown flags %x", flags)
+
if (flags & self.STATTR_FLAG_ERROR):
raise Exception("Error in migration stream")
count = self.file.read64()
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 5/7] migration: Dump correct JSON format for nullptr replacement
2025-01-07 19:50 [PATCH 0/7] migration: Fix s390 regressions + migration script Fabiano Rosas
` (3 preceding siblings ...)
2025-01-07 19:50 ` [PATCH 4/7] migration: Fix parsing of s390 stream Fabiano Rosas
@ 2025-01-07 19:50 ` Fabiano Rosas
2025-01-07 19:50 ` [PATCH 6/7] migration: Fix arrays of pointers in JSON writer Fabiano Rosas
2025-01-07 19:50 ` [PATCH 7/7] s390x: Fix CSS migration Fabiano Rosas
6 siblings, 0 replies; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-07 19:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Peter Xu, Thomas Huth
From: Peter Xu <peterx@redhat.com>
QEMU plays a trick with null pointers inside an array of pointers in a VMSD
field. See 07d4e69147 ("migration/vmstate: fix array of ptr with
nullptrs") for more details on why. The idea makes sense in general, but
it may overlooked the JSON writer where it could write nothing in a
"struct" in the JSON hints section.
We hit some analyze-migration.py issues on s390 recently, showing that some
of the struct field contains nothing, like:
{"name": "css", "array_len": 256, "type": "struct", "struct": {}, "size": 1}
As described in details by Fabiano:
https://lore.kernel.org/r/87pll37cin.fsf@suse.de
It could be that we hit some null pointers there, and JSON was gone when
they're null pointers.
To fix it, instead of hacking around only at VMStateInfo level, do that
from VMStateField level, so that JSON writer can also be involved. In this
case, JSON writer will replace the pointer array (which used to be a
"struct") to be the real representation of the nullptr field.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/vmstate.c | 118 ++++++++++++++++++++++++++++++++++----------
1 file changed, 91 insertions(+), 27 deletions(-)
diff --git a/migration/vmstate.c b/migration/vmstate.c
index aa2821dec6..52704c822c 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -51,6 +51,36 @@ vmstate_field_exists(const VMStateDescription *vmsd, const VMStateField *field,
return result;
}
+/*
+ * Create a fake nullptr field when there's a NULL pointer detected in the
+ * array of a VMS_ARRAY_OF_POINTER VMSD field. It's needed because we
+ * can't dereference the NULL pointer.
+ */
+static const VMStateField *
+vmsd_create_fake_nullptr_field(const VMStateField *field)
+{
+ VMStateField *fake = g_new0(VMStateField, 1);
+
+ /* It can only happen on an array of pointers! */
+ assert(field->flags & VMS_ARRAY_OF_POINTER);
+
+ /* Some of fake's properties should match the original's */
+ fake->name = field->name;
+ fake->version_id = field->version_id;
+
+ /* Do not need "field_exists" check as it always exists (which is null) */
+ fake->field_exists = NULL;
+
+ /* See vmstate_info_nullptr - use 1 byte to represent nullptr */
+ fake->size = 1;
+ fake->info = &vmstate_info_nullptr;
+ fake->flags = VMS_SINGLE;
+
+ /* All the rest fields shouldn't matter.. */
+
+ return (const VMStateField *)fake;
+}
+
static int vmstate_n_elems(void *opaque, const VMStateField *field)
{
int n_elems = 1;
@@ -143,23 +173,39 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
}
for (i = 0; i < n_elems; i++) {
void *curr_elem = first_elem + size * i;
+ const VMStateField *inner_field;
if (field->flags & VMS_ARRAY_OF_POINTER) {
curr_elem = *(void **)curr_elem;
}
+
if (!curr_elem && size) {
- /* if null pointer check placeholder and do not follow */
- assert(field->flags & VMS_ARRAY_OF_POINTER);
- ret = vmstate_info_nullptr.get(f, curr_elem, size, NULL);
- } else if (field->flags & VMS_STRUCT) {
- ret = vmstate_load_state(f, field->vmsd, curr_elem,
- field->vmsd->version_id);
- } else if (field->flags & VMS_VSTRUCT) {
- ret = vmstate_load_state(f, field->vmsd, curr_elem,
- field->struct_version_id);
+ /*
+ * If null pointer found (which should only happen in
+ * an array of pointers), use null placeholder and do
+ * not follow.
+ */
+ inner_field = vmsd_create_fake_nullptr_field(field);
} else {
- ret = field->info->get(f, curr_elem, size, field);
+ inner_field = field;
}
+
+ if (inner_field->flags & VMS_STRUCT) {
+ ret = vmstate_load_state(f, inner_field->vmsd, curr_elem,
+ inner_field->vmsd->version_id);
+ } else if (inner_field->flags & VMS_VSTRUCT) {
+ ret = vmstate_load_state(f, inner_field->vmsd, curr_elem,
+ inner_field->struct_version_id);
+ } else {
+ ret = inner_field->info->get(f, curr_elem, size,
+ inner_field);
+ }
+
+ /* If we used a fake temp field.. free it now */
+ if (inner_field != field) {
+ g_clear_pointer((gpointer *)&inner_field, g_free);
+ }
+
if (ret >= 0) {
ret = qemu_file_get_error(f);
}
@@ -387,29 +433,50 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
}
for (i = 0; i < n_elems; i++) {
void *curr_elem = first_elem + size * i;
+ const VMStateField *inner_field;
- vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems);
old_offset = qemu_file_transferred(f);
if (field->flags & VMS_ARRAY_OF_POINTER) {
assert(curr_elem);
curr_elem = *(void **)curr_elem;
}
+
if (!curr_elem && size) {
- /* if null pointer write placeholder and do not follow */
- assert(field->flags & VMS_ARRAY_OF_POINTER);
- ret = vmstate_info_nullptr.put(f, curr_elem, size, NULL,
- NULL);
- } else if (field->flags & VMS_STRUCT) {
- ret = vmstate_save_state(f, field->vmsd, curr_elem,
- vmdesc_loop);
- } else if (field->flags & VMS_VSTRUCT) {
- ret = vmstate_save_state_v(f, field->vmsd, curr_elem,
- vmdesc_loop,
- field->struct_version_id, errp);
+ /*
+ * If null pointer found (which should only happen in
+ * an array of pointers), use null placeholder and do
+ * not follow.
+ */
+ inner_field = vmsd_create_fake_nullptr_field(field);
} else {
- ret = field->info->put(f, curr_elem, size, field,
- vmdesc_loop);
+ inner_field = field;
}
+
+ vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field,
+ i, n_elems);
+
+ if (inner_field->flags & VMS_STRUCT) {
+ ret = vmstate_save_state(f, inner_field->vmsd,
+ curr_elem, vmdesc_loop);
+ } else if (inner_field->flags & VMS_VSTRUCT) {
+ ret = vmstate_save_state_v(f, inner_field->vmsd,
+ curr_elem, vmdesc_loop,
+ inner_field->struct_version_id,
+ errp);
+ } else {
+ ret = inner_field->info->put(f, curr_elem, size,
+ inner_field, vmdesc_loop);
+ }
+
+ written_bytes = qemu_file_transferred(f) - old_offset;
+ vmsd_desc_field_end(vmsd, vmdesc_loop, inner_field,
+ written_bytes);
+
+ /* If we used a fake temp field.. free it now */
+ if (inner_field != field) {
+ g_clear_pointer((gpointer *)&inner_field, g_free);
+ }
+
if (ret) {
error_setg(errp, "Save of field %s/%s failed",
vmsd->name, field->name);
@@ -419,9 +486,6 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
return ret;
}
- written_bytes = qemu_file_transferred(f) - old_offset;
- vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes);
-
/* Compressed arrays only care about the first element */
if (vmdesc_loop && vmsd_can_compress(field)) {
vmdesc_loop = NULL;
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 6/7] migration: Fix arrays of pointers in JSON writer
2025-01-07 19:50 [PATCH 0/7] migration: Fix s390 regressions + migration script Fabiano Rosas
` (4 preceding siblings ...)
2025-01-07 19:50 ` [PATCH 5/7] migration: Dump correct JSON format for nullptr replacement Fabiano Rosas
@ 2025-01-07 19:50 ` Fabiano Rosas
2025-01-07 23:25 ` Peter Xu
2025-01-07 19:50 ` [PATCH 7/7] s390x: Fix CSS migration Fabiano Rosas
6 siblings, 1 reply; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-07 19:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Peter Xu, Thomas Huth
Currently, if an array of pointers contains a NULL pointer, that
pointer will be encoded as '0' in the stream. Since the JSON writer
doesn't define a "pointer" type, that '0' will now be an uint64, which
is different from the original type being pointed to, e.g. struct.
That mixed-type array shouldn't be compressed, otherwise data is lost
as the code currently makes the whole array have the type of the first
element.
While we could disable the array compression when a NULL pointer is
found, the JSON part of the stream still makes part of downtime, so we
should avoid writing unecessary bytes to it.
Keep the array compression in place, but break the array into several
type-contiguous pieces if NULL and non-NULL pointers are mixed.
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
migration/vmstate.c | 33 ++++++++++++++++++++++++++++++++-
scripts/analyze-migration.py | 9 ++++++++-
2 files changed, 40 insertions(+), 2 deletions(-)
diff --git a/migration/vmstate.c b/migration/vmstate.c
index 52704c822c..a79ccf3875 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -425,15 +425,19 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
int size = vmstate_size(opaque, field);
uint64_t old_offset, written_bytes;
JSONWriter *vmdesc_loop = vmdesc;
+ bool is_prev_null = false;
trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);
if (field->flags & VMS_POINTER) {
first_elem = *(void **)first_elem;
assert(first_elem || !n_elems || !size);
}
+
for (i = 0; i < n_elems; i++) {
void *curr_elem = first_elem + size * i;
const VMStateField *inner_field;
+ bool is_null;
+ int max_elems = n_elems - i;
old_offset = qemu_file_transferred(f);
if (field->flags & VMS_ARRAY_OF_POINTER) {
@@ -448,12 +452,39 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
* not follow.
*/
inner_field = vmsd_create_fake_nullptr_field(field);
+ is_null = true;
} else {
inner_field = field;
+ is_null = false;
+ }
+
+ /*
+ * Due to the fake nullptr handling above, if there's mixed
+ * null/non-null data, it doesn't make sense to emit a
+ * compressed array representation spanning the entire array
+ * because the field types will be different (e.g. struct
+ * vs. uint64_t). Search ahead for the next null/non-null
+ * element and start a new compressed array if found.
+ */
+ if (field->flags & VMS_ARRAY_OF_POINTER &&
+ is_null != is_prev_null) {
+
+ is_prev_null = is_null;
+ vmdesc_loop = vmdesc;
+
+ for (int j = i + 1; j < n_elems; j++) {
+ void *elem = *(void **)(first_elem + size * j);
+ bool elem_is_null = !elem && size;
+
+ if (is_null != elem_is_null) {
+ max_elems = j - i;
+ break;
+ }
+ }
}
vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field,
- i, n_elems);
+ i, max_elems);
if (inner_field->flags & VMS_STRUCT) {
ret = vmstate_save_state(f, inner_field->vmsd,
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index 4836920ddc..9138e91a11 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -497,7 +497,14 @@ def read(self):
raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
a.append(field['data'])
else:
- self.data[field['name']] = field['data']
+ # There could be multiple entries for the same field
+ # name, e.g. when a compressed array was broken in
+ # more than one piece.
+ if (field['name'] in self.data and
+ type(self.data[field['name']]) == list):
+ self.data[field['name']].append(field['data'])
+ else:
+ self.data[field['name']] = field['data']
if 'subsections' in self.desc['struct']:
for subsection in self.desc['struct']['subsections']:
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH 6/7] migration: Fix arrays of pointers in JSON writer
2025-01-07 19:50 ` [PATCH 6/7] migration: Fix arrays of pointers in JSON writer Fabiano Rosas
@ 2025-01-07 23:25 ` Peter Xu
2025-01-08 13:52 ` Fabiano Rosas
0 siblings, 1 reply; 18+ messages in thread
From: Peter Xu @ 2025-01-07 23:25 UTC (permalink / raw)
To: Fabiano Rosas; +Cc: qemu-devel, Thomas Huth
On Tue, Jan 07, 2025 at 04:50:24PM -0300, Fabiano Rosas wrote:
> Currently, if an array of pointers contains a NULL pointer, that
> pointer will be encoded as '0' in the stream. Since the JSON writer
> doesn't define a "pointer" type, that '0' will now be an uint64, which
> is different from the original type being pointed to, e.g. struct.
>
> That mixed-type array shouldn't be compressed, otherwise data is lost
> as the code currently makes the whole array have the type of the first
> element.
>
> While we could disable the array compression when a NULL pointer is
> found, the JSON part of the stream still makes part of downtime, so we
> should avoid writing unecessary bytes to it.
>
> Keep the array compression in place, but break the array into several
> type-contiguous pieces if NULL and non-NULL pointers are mixed.
Could I request for a sample JSON dump for an example array in the commit
log? This whole solution looks working but is tricky. A sample could help
people understand (e.g. showing the same "name" being dumped multiple
times..).
Side note: I tried to dump a very basic VM's JSON out to disk, it scares me
on the size:
$ ls -lhS JSON.out
-rw-r--r--. 1 peterx peterx 106K Jan 7 17:18 JSON.out
That's a simplest VM with all default stuff, mostly nothing complex.. I may
really need to measure how the JSON debug strings affect migration function
or perf at some point..
>
> Signed-off-by: Fabiano Rosas <farosas@suse.de>
> ---
> migration/vmstate.c | 33 ++++++++++++++++++++++++++++++++-
> scripts/analyze-migration.py | 9 ++++++++-
> 2 files changed, 40 insertions(+), 2 deletions(-)
>
> diff --git a/migration/vmstate.c b/migration/vmstate.c
> index 52704c822c..a79ccf3875 100644
> --- a/migration/vmstate.c
> +++ b/migration/vmstate.c
> @@ -425,15 +425,19 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
> int size = vmstate_size(opaque, field);
> uint64_t old_offset, written_bytes;
> JSONWriter *vmdesc_loop = vmdesc;
> + bool is_prev_null = false;
>
> trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);
> if (field->flags & VMS_POINTER) {
> first_elem = *(void **)first_elem;
> assert(first_elem || !n_elems || !size);
> }
> +
> for (i = 0; i < n_elems; i++) {
> void *curr_elem = first_elem + size * i;
> const VMStateField *inner_field;
> + bool is_null;
> + int max_elems = n_elems - i;
>
> old_offset = qemu_file_transferred(f);
> if (field->flags & VMS_ARRAY_OF_POINTER) {
> @@ -448,12 +452,39 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
> * not follow.
> */
> inner_field = vmsd_create_fake_nullptr_field(field);
> + is_null = true;
> } else {
> inner_field = field;
> + is_null = false;
> + }
> +
> + /*
> + * Due to the fake nullptr handling above, if there's mixed
> + * null/non-null data, it doesn't make sense to emit a
> + * compressed array representation spanning the entire array
> + * because the field types will be different (e.g. struct
> + * vs. uint64_t). Search ahead for the next null/non-null
> + * element and start a new compressed array if found.
> + */
> + if (field->flags & VMS_ARRAY_OF_POINTER &&
> + is_null != is_prev_null) {
> +
> + is_prev_null = is_null;
> + vmdesc_loop = vmdesc;
> +
> + for (int j = i + 1; j < n_elems; j++) {
> + void *elem = *(void **)(first_elem + size * j);
> + bool elem_is_null = !elem && size;
> +
> + if (is_null != elem_is_null) {
> + max_elems = j - i;
> + break;
> + }
> + }
> }
>
> vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field,
> - i, n_elems);
> + i, max_elems);
>
> if (inner_field->flags & VMS_STRUCT) {
> ret = vmstate_save_state(f, inner_field->vmsd,
> diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
> index 4836920ddc..9138e91a11 100755
> --- a/scripts/analyze-migration.py
> +++ b/scripts/analyze-migration.py
> @@ -497,7 +497,14 @@ def read(self):
> raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
> a.append(field['data'])
> else:
> - self.data[field['name']] = field['data']
> + # There could be multiple entries for the same field
> + # name, e.g. when a compressed array was broken in
> + # more than one piece.
> + if (field['name'] in self.data and
> + type(self.data[field['name']]) == list):
> + self.data[field['name']].append(field['data'])
> + else:
> + self.data[field['name']] = field['data']
Do we realy need these script changes? I thought VMSDFieldStruct always
breaks array_len field into "index" based anyway?
new_fields = []
for field in self.desc['struct']['fields']:
if not 'array_len' in field:
new_fields.append(field)
continue
array_len = field.pop('array_len')
field['index'] = 0
new_fields.append(field)
for i in range(1, array_len):
c = field.copy()
c['index'] = i
new_fields.append(c)
self.desc['struct']['fields'] = new_fields
--
Peter Xu
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 6/7] migration: Fix arrays of pointers in JSON writer
2025-01-07 23:25 ` Peter Xu
@ 2025-01-08 13:52 ` Fabiano Rosas
2025-01-08 16:14 ` Peter Xu
0 siblings, 1 reply; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-08 13:52 UTC (permalink / raw)
To: Peter Xu; +Cc: qemu-devel, Thomas Huth
Peter Xu <peterx@redhat.com> writes:
> On Tue, Jan 07, 2025 at 04:50:24PM -0300, Fabiano Rosas wrote:
>> Currently, if an array of pointers contains a NULL pointer, that
>> pointer will be encoded as '0' in the stream. Since the JSON writer
>> doesn't define a "pointer" type, that '0' will now be an uint64, which
>> is different from the original type being pointed to, e.g. struct.
>>
>> That mixed-type array shouldn't be compressed, otherwise data is lost
>> as the code currently makes the whole array have the type of the first
>> element.
>>
>> While we could disable the array compression when a NULL pointer is
>> found, the JSON part of the stream still makes part of downtime, so we
>> should avoid writing unecessary bytes to it.
>>
>> Keep the array compression in place, but break the array into several
>> type-contiguous pieces if NULL and non-NULL pointers are mixed.
>
> Could I request for a sample JSON dump for an example array in the commit
> log? This whole solution looks working but is tricky. A sample could help
> people understand (e.g. showing the same "name" being dumped multiple
> times..).
{"name": "s390_css", "instance_id": 0, "vmsd_name": "s390_css",
"version": 1, "fields": [
...,
{"name": "css", "array_len": 254, "type": "uint8", "size": 1},
{"name": "css", "type": "struct", "struct": {
"vmsd_name": "s390_css_img", "version": 1, "fields": [{"name":
"chpids", "array_len": 256, "type": "struct", "struct": {"vmsd_name":
"s390_chp_info", "version": 1, "fields": [{"name": "in_use", "type":
"uint8", "size": 1}, {"name": "type", "type": "uint8", "size": 1},
{"name": "is_virtual", "type": "uint8", "size": 1}]}, "size": 3}]},
"size": 768},
{"name": "css", "type": "uint8", "size": 1},
...
]}
>
> Side note: I tried to dump a very basic VM's JSON out to disk, it scares me
> on the size:
>
> $ ls -lhS JSON.out
> -rw-r--r--. 1 peterx peterx 106K Jan 7 17:18 JSON.out
>
> That's a simplest VM with all default stuff, mostly nothing complex.. I may
> really need to measure how the JSON debug strings affect migration function
> or perf at some point..
>
Agreed.
>>
>> Signed-off-by: Fabiano Rosas <farosas@suse.de>
>> ---
>> migration/vmstate.c | 33 ++++++++++++++++++++++++++++++++-
>> scripts/analyze-migration.py | 9 ++++++++-
>> 2 files changed, 40 insertions(+), 2 deletions(-)
>>
>> diff --git a/migration/vmstate.c b/migration/vmstate.c
>> index 52704c822c..a79ccf3875 100644
>> --- a/migration/vmstate.c
>> +++ b/migration/vmstate.c
>> @@ -425,15 +425,19 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
>> int size = vmstate_size(opaque, field);
>> uint64_t old_offset, written_bytes;
>> JSONWriter *vmdesc_loop = vmdesc;
>> + bool is_prev_null = false;
>>
>> trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);
>> if (field->flags & VMS_POINTER) {
>> first_elem = *(void **)first_elem;
>> assert(first_elem || !n_elems || !size);
>> }
>> +
>> for (i = 0; i < n_elems; i++) {
>> void *curr_elem = first_elem + size * i;
>> const VMStateField *inner_field;
>> + bool is_null;
>> + int max_elems = n_elems - i;
>>
>> old_offset = qemu_file_transferred(f);
>> if (field->flags & VMS_ARRAY_OF_POINTER) {
>> @@ -448,12 +452,39 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
>> * not follow.
>> */
>> inner_field = vmsd_create_fake_nullptr_field(field);
>> + is_null = true;
>> } else {
>> inner_field = field;
>> + is_null = false;
>> + }
>> +
>> + /*
>> + * Due to the fake nullptr handling above, if there's mixed
>> + * null/non-null data, it doesn't make sense to emit a
>> + * compressed array representation spanning the entire array
>> + * because the field types will be different (e.g. struct
>> + * vs. uint64_t). Search ahead for the next null/non-null
>> + * element and start a new compressed array if found.
>> + */
>> + if (field->flags & VMS_ARRAY_OF_POINTER &&
>> + is_null != is_prev_null) {
>> +
>> + is_prev_null = is_null;
>> + vmdesc_loop = vmdesc;
>> +
>> + for (int j = i + 1; j < n_elems; j++) {
>> + void *elem = *(void **)(first_elem + size * j);
>> + bool elem_is_null = !elem && size;
>> +
>> + if (is_null != elem_is_null) {
>> + max_elems = j - i;
>> + break;
>> + }
>> + }
>> }
>>
>> vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field,
>> - i, n_elems);
>> + i, max_elems);
>>
>> if (inner_field->flags & VMS_STRUCT) {
>> ret = vmstate_save_state(f, inner_field->vmsd,
>> diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
>> index 4836920ddc..9138e91a11 100755
>> --- a/scripts/analyze-migration.py
>> +++ b/scripts/analyze-migration.py
>> @@ -497,7 +497,14 @@ def read(self):
>> raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
>> a.append(field['data'])
There's actually a bug here, the code above does:
if len(a) != int(field['index']):
raise Exception()
Which only works with this patch because the compressed array happens to
come first.
>> else:
>> - self.data[field['name']] = field['data']
>> + # There could be multiple entries for the same field
>> + # name, e.g. when a compressed array was broken in
>> + # more than one piece.
>> + if (field['name'] in self.data and
>> + type(self.data[field['name']]) == list):
>> + self.data[field['name']].append(field['data'])
>> + else:
>> + self.data[field['name']] = field['data']
>
> Do we realy need these script changes? I thought VMSDFieldStruct always
> breaks array_len field into "index" based anyway?
>
> new_fields = []
> for field in self.desc['struct']['fields']:
> if not 'array_len' in field:
> new_fields.append(field)
> continue
> array_len = field.pop('array_len')
> field['index'] = 0
> new_fields.append(field)
> for i in range(1, array_len):
> c = field.copy()
> c['index'] = i
> new_fields.append(c)
>
> self.desc['struct']['fields'] = new_fields
This code is about decompressing the array, it doesn't handle multiple
entries with the same name. See the JSON I posted up there.
This makes the single:
{"name": "css", "array_len": 254, "type": "uint8", "size": 1},
become multiple:
{"name": "css", "index": 0, "type": "uint8", "size": 1},
{"name": "css", "index": 1, "type": "uint8", "size": 1},
...
{"name": "css", "index": 253, "type": "uint8", "size": 1},
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 6/7] migration: Fix arrays of pointers in JSON writer
2025-01-08 13:52 ` Fabiano Rosas
@ 2025-01-08 16:14 ` Peter Xu
2025-01-08 17:15 ` Fabiano Rosas
0 siblings, 1 reply; 18+ messages in thread
From: Peter Xu @ 2025-01-08 16:14 UTC (permalink / raw)
To: Fabiano Rosas; +Cc: qemu-devel, Thomas Huth
On Wed, Jan 08, 2025 at 10:52:30AM -0300, Fabiano Rosas wrote:
> Peter Xu <peterx@redhat.com> writes:
>
> > On Tue, Jan 07, 2025 at 04:50:24PM -0300, Fabiano Rosas wrote:
> >> Currently, if an array of pointers contains a NULL pointer, that
> >> pointer will be encoded as '0' in the stream. Since the JSON writer
> >> doesn't define a "pointer" type, that '0' will now be an uint64, which
> >> is different from the original type being pointed to, e.g. struct.
> >>
> >> That mixed-type array shouldn't be compressed, otherwise data is lost
> >> as the code currently makes the whole array have the type of the first
> >> element.
> >>
> >> While we could disable the array compression when a NULL pointer is
> >> found, the JSON part of the stream still makes part of downtime, so we
> >> should avoid writing unecessary bytes to it.
> >>
> >> Keep the array compression in place, but break the array into several
> >> type-contiguous pieces if NULL and non-NULL pointers are mixed.
> >
> > Could I request for a sample JSON dump for an example array in the commit
> > log? This whole solution looks working but is tricky. A sample could help
> > people understand (e.g. showing the same "name" being dumped multiple
> > times..).
>
> {"name": "s390_css", "instance_id": 0, "vmsd_name": "s390_css",
> "version": 1, "fields": [
> ...,
> {"name": "css", "array_len": 254, "type": "uint8", "size": 1},
> {"name": "css", "type": "struct", "struct": {
> "vmsd_name": "s390_css_img", "version": 1, "fields": [{"name":
> "chpids", "array_len": 256, "type": "struct", "struct": {"vmsd_name":
> "s390_chp_info", "version": 1, "fields": [{"name": "in_use", "type":
> "uint8", "size": 1}, {"name": "type", "type": "uint8", "size": 1},
> {"name": "is_virtual", "type": "uint8", "size": 1}]}, "size": 3}]},
> "size": 768},
> {"name": "css", "type": "uint8", "size": 1},
> ...
> ]}
Yes something like this would work, thanks. We could even omit most of the
struct details but only show the important ones:
{"name": "s390_css", "instance_id": 0, "vmsd_name": "s390_css",
"version": 1, "fields": [
...,
{"name": "css", "array_len": 254, "type": "uint8", "size": 1},
{"name": "css", "type": "struct", "struct": {"vmsd_name": "s390_css_img", ... }, "size": 768},
{"name": "css", "type": "uint8", "size": 1},
...
]}
>
> >
> > Side note: I tried to dump a very basic VM's JSON out to disk, it scares me
> > on the size:
> >
> > $ ls -lhS JSON.out
> > -rw-r--r--. 1 peterx peterx 106K Jan 7 17:18 JSON.out
> >
> > That's a simplest VM with all default stuff, mostly nothing complex.. I may
> > really need to measure how the JSON debug strings affect migration function
> > or perf at some point..
> >
>
> Agreed.
>
> >>
> >> Signed-off-by: Fabiano Rosas <farosas@suse.de>
> >> ---
> >> migration/vmstate.c | 33 ++++++++++++++++++++++++++++++++-
> >> scripts/analyze-migration.py | 9 ++++++++-
> >> 2 files changed, 40 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/migration/vmstate.c b/migration/vmstate.c
> >> index 52704c822c..a79ccf3875 100644
> >> --- a/migration/vmstate.c
> >> +++ b/migration/vmstate.c
> >> @@ -425,15 +425,19 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
> >> int size = vmstate_size(opaque, field);
> >> uint64_t old_offset, written_bytes;
> >> JSONWriter *vmdesc_loop = vmdesc;
> >> + bool is_prev_null = false;
> >>
> >> trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);
> >> if (field->flags & VMS_POINTER) {
> >> first_elem = *(void **)first_elem;
> >> assert(first_elem || !n_elems || !size);
> >> }
> >> +
> >> for (i = 0; i < n_elems; i++) {
> >> void *curr_elem = first_elem + size * i;
> >> const VMStateField *inner_field;
> >> + bool is_null;
> >> + int max_elems = n_elems - i;
> >>
> >> old_offset = qemu_file_transferred(f);
> >> if (field->flags & VMS_ARRAY_OF_POINTER) {
> >> @@ -448,12 +452,39 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
> >> * not follow.
> >> */
> >> inner_field = vmsd_create_fake_nullptr_field(field);
> >> + is_null = true;
> >> } else {
> >> inner_field = field;
> >> + is_null = false;
> >> + }
> >> +
> >> + /*
> >> + * Due to the fake nullptr handling above, if there's mixed
> >> + * null/non-null data, it doesn't make sense to emit a
> >> + * compressed array representation spanning the entire array
> >> + * because the field types will be different (e.g. struct
> >> + * vs. uint64_t). Search ahead for the next null/non-null
> >> + * element and start a new compressed array if found.
> >> + */
> >> + if (field->flags & VMS_ARRAY_OF_POINTER &&
> >> + is_null != is_prev_null) {
> >> +
> >> + is_prev_null = is_null;
> >> + vmdesc_loop = vmdesc;
> >> +
> >> + for (int j = i + 1; j < n_elems; j++) {
> >> + void *elem = *(void **)(first_elem + size * j);
> >> + bool elem_is_null = !elem && size;
> >> +
> >> + if (is_null != elem_is_null) {
> >> + max_elems = j - i;
> >> + break;
> >> + }
> >> + }
> >> }
> >>
> >> vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field,
> >> - i, n_elems);
> >> + i, max_elems);
> >>
> >> if (inner_field->flags & VMS_STRUCT) {
> >> ret = vmstate_save_state(f, inner_field->vmsd,
> >> diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
> >> index 4836920ddc..9138e91a11 100755
> >> --- a/scripts/analyze-migration.py
> >> +++ b/scripts/analyze-migration.py
> >> @@ -497,7 +497,14 @@ def read(self):
> >> raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
> >> a.append(field['data'])
>
> There's actually a bug here, the code above does:
>
> if len(a) != int(field['index']):
> raise Exception()
>
> Which only works with this patch because the compressed array happens to
> come first.
I think it will work no matter how it's ordered after your patch? IOW I'd
hope it'll keep working if the 1st is a nullptr:
{"name": "css", "type": "uint8", "size": 1},
{"name": "css", "type": "struct", "struct": {"vmsd_name": "s390_css_img", ... }, "size": 768},
{"name": "css", "array_len": 254, "type": "uint8", "size": 1},
Because IIUC the python script will parse each of the lines above into a
VMSD field.
>
> >> else:
> >> - self.data[field['name']] = field['data']
> >> + # There could be multiple entries for the same field
> >> + # name, e.g. when a compressed array was broken in
> >> + # more than one piece.
> >> + if (field['name'] in self.data and
> >> + type(self.data[field['name']]) == list):
> >> + self.data[field['name']].append(field['data'])
> >> + else:
> >> + self.data[field['name']] = field['data']
> >
> > Do we realy need these script changes? I thought VMSDFieldStruct always
> > breaks array_len field into "index" based anyway?
> >
> > new_fields = []
> > for field in self.desc['struct']['fields']:
> > if not 'array_len' in field:
> > new_fields.append(field)
> > continue
> > array_len = field.pop('array_len')
> > field['index'] = 0
> > new_fields.append(field)
> > for i in range(1, array_len):
> > c = field.copy()
> > c['index'] = i
> > new_fields.append(c)
> >
> > self.desc['struct']['fields'] = new_fields
>
> This code is about decompressing the array, it doesn't handle multiple
> entries with the same name. See the JSON I posted up there.
>
> This makes the single:
>
> {"name": "css", "array_len": 254, "type": "uint8", "size": 1},
>
> become multiple:
>
> {"name": "css", "index": 0, "type": "uint8", "size": 1},
> {"name": "css", "index": 1, "type": "uint8", "size": 1},
> ...
> {"name": "css", "index": 253, "type": "uint8", "size": 1},
Correct.
I think that means for each of the break-down entries there'll be an
"index" if it's an array. What you changed above is the case where "index"
is not available, which is processing the non-array entry. Why does that
need change? What happens if you run this without the python part you
changed in this patch?
--
Peter Xu
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 6/7] migration: Fix arrays of pointers in JSON writer
2025-01-08 16:14 ` Peter Xu
@ 2025-01-08 17:15 ` Fabiano Rosas
2025-01-08 17:56 ` Peter Xu
0 siblings, 1 reply; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-08 17:15 UTC (permalink / raw)
To: Peter Xu; +Cc: qemu-devel, Thomas Huth
Peter Xu <peterx@redhat.com> writes:
> On Wed, Jan 08, 2025 at 10:52:30AM -0300, Fabiano Rosas wrote:
>> Peter Xu <peterx@redhat.com> writes:
>>
>> > On Tue, Jan 07, 2025 at 04:50:24PM -0300, Fabiano Rosas wrote:
>> >> Currently, if an array of pointers contains a NULL pointer, that
>> >> pointer will be encoded as '0' in the stream. Since the JSON writer
>> >> doesn't define a "pointer" type, that '0' will now be an uint64, which
>> >> is different from the original type being pointed to, e.g. struct.
>> >>
>> >> That mixed-type array shouldn't be compressed, otherwise data is lost
>> >> as the code currently makes the whole array have the type of the first
>> >> element.
>> >>
>> >> While we could disable the array compression when a NULL pointer is
>> >> found, the JSON part of the stream still makes part of downtime, so we
>> >> should avoid writing unecessary bytes to it.
>> >>
>> >> Keep the array compression in place, but break the array into several
>> >> type-contiguous pieces if NULL and non-NULL pointers are mixed.
>> >
>> > Could I request for a sample JSON dump for an example array in the commit
>> > log? This whole solution looks working but is tricky. A sample could help
>> > people understand (e.g. showing the same "name" being dumped multiple
>> > times..).
>>
>> {"name": "s390_css", "instance_id": 0, "vmsd_name": "s390_css",
>> "version": 1, "fields": [
>> ...,
>> {"name": "css", "array_len": 254, "type": "uint8", "size": 1},
>> {"name": "css", "type": "struct", "struct": {
>> "vmsd_name": "s390_css_img", "version": 1, "fields": [{"name":
>> "chpids", "array_len": 256, "type": "struct", "struct": {"vmsd_name":
>> "s390_chp_info", "version": 1, "fields": [{"name": "in_use", "type":
>> "uint8", "size": 1}, {"name": "type", "type": "uint8", "size": 1},
>> {"name": "is_virtual", "type": "uint8", "size": 1}]}, "size": 3}]},
>> "size": 768},
>> {"name": "css", "type": "uint8", "size": 1},
>> ...
>> ]}
>
> Yes something like this would work, thanks. We could even omit most of the
> struct details but only show the important ones:
>
> {"name": "s390_css", "instance_id": 0, "vmsd_name": "s390_css",
> "version": 1, "fields": [
> ...,
> {"name": "css", "array_len": 254, "type": "uint8", "size": 1},
> {"name": "css", "type": "struct", "struct": {"vmsd_name": "s390_css_img", ... }, "size": 768},
> {"name": "css", "type": "uint8", "size": 1},
> ...
> ]}
>
>>
>> >
>> > Side note: I tried to dump a very basic VM's JSON out to disk, it scares me
>> > on the size:
>> >
>> > $ ls -lhS JSON.out
>> > -rw-r--r--. 1 peterx peterx 106K Jan 7 17:18 JSON.out
>> >
>> > That's a simplest VM with all default stuff, mostly nothing complex.. I may
>> > really need to measure how the JSON debug strings affect migration function
>> > or perf at some point..
>> >
>>
>> Agreed.
>>
>> >>
>> >> Signed-off-by: Fabiano Rosas <farosas@suse.de>
>> >> ---
>> >> migration/vmstate.c | 33 ++++++++++++++++++++++++++++++++-
>> >> scripts/analyze-migration.py | 9 ++++++++-
>> >> 2 files changed, 40 insertions(+), 2 deletions(-)
>> >>
>> >> diff --git a/migration/vmstate.c b/migration/vmstate.c
>> >> index 52704c822c..a79ccf3875 100644
>> >> --- a/migration/vmstate.c
>> >> +++ b/migration/vmstate.c
>> >> @@ -425,15 +425,19 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
>> >> int size = vmstate_size(opaque, field);
>> >> uint64_t old_offset, written_bytes;
>> >> JSONWriter *vmdesc_loop = vmdesc;
>> >> + bool is_prev_null = false;
>> >>
>> >> trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);
>> >> if (field->flags & VMS_POINTER) {
>> >> first_elem = *(void **)first_elem;
>> >> assert(first_elem || !n_elems || !size);
>> >> }
>> >> +
>> >> for (i = 0; i < n_elems; i++) {
>> >> void *curr_elem = first_elem + size * i;
>> >> const VMStateField *inner_field;
>> >> + bool is_null;
>> >> + int max_elems = n_elems - i;
>> >>
>> >> old_offset = qemu_file_transferred(f);
>> >> if (field->flags & VMS_ARRAY_OF_POINTER) {
>> >> @@ -448,12 +452,39 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
>> >> * not follow.
>> >> */
>> >> inner_field = vmsd_create_fake_nullptr_field(field);
>> >> + is_null = true;
>> >> } else {
>> >> inner_field = field;
>> >> + is_null = false;
>> >> + }
>> >> +
>> >> + /*
>> >> + * Due to the fake nullptr handling above, if there's mixed
>> >> + * null/non-null data, it doesn't make sense to emit a
>> >> + * compressed array representation spanning the entire array
>> >> + * because the field types will be different (e.g. struct
>> >> + * vs. uint64_t). Search ahead for the next null/non-null
>> >> + * element and start a new compressed array if found.
>> >> + */
>> >> + if (field->flags & VMS_ARRAY_OF_POINTER &&
>> >> + is_null != is_prev_null) {
>> >> +
>> >> + is_prev_null = is_null;
>> >> + vmdesc_loop = vmdesc;
>> >> +
>> >> + for (int j = i + 1; j < n_elems; j++) {
>> >> + void *elem = *(void **)(first_elem + size * j);
>> >> + bool elem_is_null = !elem && size;
>> >> +
>> >> + if (is_null != elem_is_null) {
>> >> + max_elems = j - i;
>> >> + break;
>> >> + }
>> >> + }
>> >> }
>> >>
>> >> vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field,
>> >> - i, n_elems);
>> >> + i, max_elems);
>> >>
>> >> if (inner_field->flags & VMS_STRUCT) {
>> >> ret = vmstate_save_state(f, inner_field->vmsd,
>> >> diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
>> >> index 4836920ddc..9138e91a11 100755
>> >> --- a/scripts/analyze-migration.py
>> >> +++ b/scripts/analyze-migration.py
>> >> @@ -497,7 +497,14 @@ def read(self):
>> >> raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
>> >> a.append(field['data'])
>>
>> There's actually a bug here, the code above does:
>>
>> if len(a) != int(field['index']):
>> raise Exception()
>>
>> Which only works with this patch because the compressed array happens to
>> come first.
>
> I think it will work no matter how it's ordered after your patch? IOW I'd
> hope it'll keep working if the 1st is a nullptr:
>
> {"name": "css", "type": "uint8", "size": 1},
> {"name": "css", "type": "struct", "struct": {"vmsd_name": "s390_css_img", ... }, "size": 768},
> {"name": "css", "array_len": 254, "type": "uint8", "size": 1},
>
> Because IIUC the python script will parse each of the lines above into a
> VMSD field.
Yes, but all fields go into self.data of the VMSDFieldStruct, so
self.data["css"] will increase beyond the size of the array.
>>
>> >> else:
>> >> - self.data[field['name']] = field['data']
>> >> + # There could be multiple entries for the same field
>> >> + # name, e.g. when a compressed array was broken in
>> >> + # more than one piece.
>> >> + if (field['name'] in self.data and
>> >> + type(self.data[field['name']]) == list):
>> >> + self.data[field['name']].append(field['data'])
>> >> + else:
>> >> + self.data[field['name']] = field['data']
>> >
>> > Do we realy need these script changes? I thought VMSDFieldStruct always
>> > breaks array_len field into "index" based anyway?
>> >
>> > new_fields = []
>> > for field in self.desc['struct']['fields']:
>> > if not 'array_len' in field:
>> > new_fields.append(field)
>> > continue
>> > array_len = field.pop('array_len')
>> > field['index'] = 0
>> > new_fields.append(field)
>> > for i in range(1, array_len):
>> > c = field.copy()
>> > c['index'] = i
>> > new_fields.append(c)
>> >
>> > self.desc['struct']['fields'] = new_fields
>>
>> This code is about decompressing the array, it doesn't handle multiple
>> entries with the same name. See the JSON I posted up there.
>>
>> This makes the single:
>>
>> {"name": "css", "array_len": 254, "type": "uint8", "size": 1},
>>
>> become multiple:
>>
>> {"name": "css", "index": 0, "type": "uint8", "size": 1},
>> {"name": "css", "index": 1, "type": "uint8", "size": 1},
>> ...
>> {"name": "css", "index": 253, "type": "uint8", "size": 1},
>
> Correct.
>
> I think that means for each of the break-down entries there'll be an
> "index" if it's an array. What you changed above is the case where "index"
> is not available, which is processing the non-array entry. Why does that
> need change?
It needs to append to, not overwrite the previous self.data[name]
> What happens if you run this without the python part you
> changed in this patch?
The last nullptr overwrites everything else:
"s390_css (14)": {
"pending_crws": "00",
"sei_pending": false,
"do_crw_mchk": true,
"crws_lost": false,
"max_cssid": "0x00",
"max_ssid": "0x00",
"chnmon_active": false,
"chnmon_area": "0x0000000000000000",
--> "css": "nullptr",
"default_cssid": "0xfe"
},
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 6/7] migration: Fix arrays of pointers in JSON writer
2025-01-08 17:15 ` Fabiano Rosas
@ 2025-01-08 17:56 ` Peter Xu
0 siblings, 0 replies; 18+ messages in thread
From: Peter Xu @ 2025-01-08 17:56 UTC (permalink / raw)
To: Fabiano Rosas; +Cc: qemu-devel, Thomas Huth
On Wed, Jan 08, 2025 at 02:15:19PM -0300, Fabiano Rosas wrote:
> >> >> else:
> >> >> - self.data[field['name']] = field['data']
> >> >> + # There could be multiple entries for the same field
> >> >> + # name, e.g. when a compressed array was broken in
> >> >> + # more than one piece.
> >> >> + if (field['name'] in self.data and
> >> >> + type(self.data[field['name']]) == list):
> >> >> + self.data[field['name']].append(field['data'])
> >> >> + else:
> >> >> + self.data[field['name']] = field['data']
[...]
> The last nullptr overwrites everything else:
>
> "s390_css (14)": {
> "pending_crws": "00",
> "sei_pending": false,
> "do_crw_mchk": true,
> "crws_lost": false,
> "max_cssid": "0x00",
> "max_ssid": "0x00",
> "chnmon_active": false,
> "chnmon_area": "0x0000000000000000",
> --> "css": "nullptr",
> "default_cssid": "0xfe"
> },
Oh I see what you meant..
Then I am guessing the current change may not always work, e.g. when the
1st entry only contains one element rather than an array, like:
{"name": "css", "type": "uint8", "size": 1},
{"name": "css", "type": "struct", "struct": {"vmsd_name": "s390_css_img", ... }, "size": 768},
{"name": "css", "array_len": 254, "type": "uint8", "size": 1},
Here we may need something like:
name = field['name']
if (name in self.data):
if (type(self.data[name]) is not list):
self.data[name] = [self.data[name]]
self.data[name].append(field['data'])
else:
self.data[name] = field['data']
--
Peter Xu
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 7/7] s390x: Fix CSS migration
2025-01-07 19:50 [PATCH 0/7] migration: Fix s390 regressions + migration script Fabiano Rosas
` (5 preceding siblings ...)
2025-01-07 19:50 ` [PATCH 6/7] migration: Fix arrays of pointers in JSON writer Fabiano Rosas
@ 2025-01-07 19:50 ` Fabiano Rosas
6 siblings, 0 replies; 18+ messages in thread
From: Fabiano Rosas @ 2025-01-07 19:50 UTC (permalink / raw)
To: qemu-devel; +Cc: Peter Xu, Thomas Huth, Paolo Bonzini, qemu-stable
Commit a55ae46683 ("s390: move css_migration_enabled from machine to
css.c") disabled CSS migration globally instead of doing it
per-instance.
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: qemu-stable@nongnu.org #9.1
Fixes: a55ae46683 ("s390: move css_migration_enabled from machine to css.c")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2704
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
Message-Id: <20241213160120.23880-3-farosas@suse.de>
---
hw/s390x/s390-virtio-ccw.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 2be8da2913..7bbf412620 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -1257,6 +1257,7 @@ static void ccw_machine_2_9_instance_options(MachineState *machine)
s390_cpudef_featoff_greater(12, 1, S390_FEAT_ZPCI);
s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_INT_SUPPRESSION);
s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_EVENT_NOTIFICATION);
+ css_migration_enabled = false;
}
static void ccw_machine_2_9_class_options(MachineClass *mc)
@@ -1269,7 +1270,6 @@ static void ccw_machine_2_9_class_options(MachineClass *mc)
ccw_machine_2_10_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
- css_migration_enabled = false;
}
DEFINE_CCW_MACHINE(2, 9);
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread