* [PATCH] migration/rdma: add x-rdma-chunk-shift parameter @ 2026-03-16 6:23 Samuel Zhang 2026-03-19 8:48 ` Markus Armbruster 2026-03-26 2:58 ` [PATCH v2] migration/rdma: add x-rdma-chunk-size parameter Samuel Zhang 0 siblings, 2 replies; 6+ messages in thread From: Samuel Zhang @ 2026-03-16 6:23 UTC (permalink / raw) To: qemu-devel; +Cc: Emily.Deng, PengJu.Zhou, guoqing.zhang The default 1MB RDMA chunk size causes slow live migration because each chunk triggers a write_flush (ibv_post_send). For 8GB RAM, 1MB chunks produce ~15000 flushes vs ~3700 with 1GB chunks. Add x-rdma-chunk-shift parameter to configure the RDMA chunk size (2^N bytes) for faster migration. Usage: -global migration.x-rdma-chunk-shift=30 Performance with RDMA live migration of 8GB RAM VM: | x-rdma-chunk-shift | chunk size | time (s) | throughput (Mbps) | |--------------------|------------|----------|-------------------| | 20 (default) | 1 MB | 37.915 | 1,007 | | 25 | 32 MB | 17.880 | 2,260 | | 30 | 1 GB | 4.368 | 17,529 | Signed-off-by: Samuel Zhang <guoqing.zhang@amd.com> --- migration/options.c | 13 +++++++++++++ migration/options.h | 1 + migration/rdma.c | 37 ++++++++++++++++++++++--------------- qapi/migration.json | 9 ++++++++- 4 files changed, 44 insertions(+), 16 deletions(-) diff --git a/migration/options.c b/migration/options.c index f33b297929..1503ae35a2 100644 --- a/migration/options.c +++ b/migration/options.c @@ -90,6 +90,7 @@ const PropertyInfo qdev_prop_StrOrNull; #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ +#define DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT 20 /* 1MB */ const Property migration_properties[] = { DEFINE_PROP_BOOL("store-global-state", MigrationState, @@ -183,6 +184,9 @@ const Property migration_properties[] = { DEFINE_PROP_ZERO_PAGE_DETECTION("zero-page-detection", MigrationState, parameters.zero_page_detection, ZERO_PAGE_DETECTION_MULTIFD), + DEFINE_PROP_UINT8("x-rdma-chunk-shift", MigrationState, + parameters.x_rdma_chunk_shift, + DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT), /* Migration capabilities */ DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), @@ -993,6 +997,15 @@ ZeroPageDetection migrate_zero_page_detection(void) return s->parameters.zero_page_detection; } +uint8_t migrate_rdma_chunk_shift(void) +{ + MigrationState *s = migrate_get_current(); + uint8_t chunk_shift = s->parameters.x_rdma_chunk_shift; + + assert(20 <= chunk_shift && chunk_shift <= 30); + return chunk_shift; +} + /* parameters helpers */ AnnounceParameters *migrate_announce_params(void) diff --git a/migration/options.h b/migration/options.h index b502871097..3f214465a3 100644 --- a/migration/options.h +++ b/migration/options.h @@ -87,6 +87,7 @@ const char *migrate_tls_creds(void); const char *migrate_tls_hostname(void); uint64_t migrate_xbzrle_cache_size(void); ZeroPageDetection migrate_zero_page_detection(void); +uint8_t migrate_rdma_chunk_shift(void); /* parameters helpers */ diff --git a/migration/rdma.c b/migration/rdma.c index 55ab85650a..d914a7cd3b 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -44,11 +44,18 @@ #define RDMA_RESOLVE_TIMEOUT_MS 10000 -/* Do not merge data if larger than this. */ -#define RDMA_MERGE_MAX (2 * 1024 * 1024) -#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) +#define RDMA_SIGNALED_SEND_MAX 512 + +static inline uint64_t rdma_chunk_size(void) +{ + return 1UL << migrate_rdma_chunk_shift(); +} -#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ +/* Do not merge data if larger than this. */ +static inline uint64_t rdma_merge_max(void) +{ + return rdma_chunk_size() * 2; +} /* * This is only for non-live state being migrated. @@ -527,21 +534,21 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, static inline uint64_t ram_chunk_index(const uint8_t *start, const uint8_t *host) { - return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; + return ((uintptr_t) host - (uintptr_t) start) >> migrate_rdma_chunk_shift(); } static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, uint64_t i) { return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr + - (i << RDMA_REG_CHUNK_SHIFT)); + (i << migrate_rdma_chunk_shift())); } static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, uint64_t i) { uint8_t *result = ram_chunk_start(rdma_ram_block, i) + - (1UL << RDMA_REG_CHUNK_SHIFT); + rdma_chunk_size(); if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { result = rdma_ram_block->local_host_addr + rdma_ram_block->length; @@ -1841,6 +1848,7 @@ static int qemu_rdma_write_one(RDMAContext *rdma, struct ibv_send_wr *bad_wr; int reg_result_idx, ret, count = 0; uint64_t chunk, chunks; + uint64_t chunk_size = rdma_chunk_size(); uint8_t *chunk_start, *chunk_end; RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); RDMARegister reg; @@ -1861,22 +1869,21 @@ retry: chunk_start = ram_chunk_start(block, chunk); if (block->is_ram_block) { - chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); + chunks = length / chunk_size; - if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { + if (chunks && ((length % chunk_size) == 0)) { chunks--; } } else { - chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); + chunks = block->length / chunk_size; - if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { + if (chunks && ((block->length % chunk_size) == 0)) { chunks--; } } trace_qemu_rdma_write_one_top(chunks + 1, - (chunks + 1) * - (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); + (chunks + 1) * chunk_size / 1024 / 1024); chunk_end = ram_chunk_end(block, chunk + chunks); @@ -2176,7 +2183,7 @@ static int qemu_rdma_write(RDMAContext *rdma, rdma->current_length += len; /* flush it if buffer is too large */ - if (rdma->current_length >= RDMA_MERGE_MAX) { + if (rdma->current_length >= rdma_merge_max()) { return qemu_rdma_write_flush(rdma, errp); } @@ -3522,7 +3529,7 @@ int rdma_registration_handle(QEMUFile *f) } else { chunk = reg->key.chunk; host_addr = block->local_host_addr + - (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); + (reg->key.chunk * rdma_chunk_size()); /* Check for particularly bad chunk value */ if (host_addr < (void *)block->local_host_addr) { error_report("rdma: bad chunk for block %s" diff --git a/qapi/migration.json b/qapi/migration.json index 7134d4ce47..0521bf3d69 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -1007,9 +1007,14 @@ # is @cpr-exec. The first list element is the program's filename, # the remainder its arguments. (Since 10.2) # +# @x-rdma-chunk-shift: RDMA memory registration chunk shift. +# The chunk size is 2^N bytes where N is the value. +# Defaults to 20 (1 MiB). Only takes effect for RDMA migration. +# (Since 10.2) +# # Features: # -# @unstable: Members @x-checkpoint-delay and +# @unstable: Members @x-rdma-chunk-shift, @x-checkpoint-delay and # @x-vcpu-dirty-limit-period are experimental. # # Since: 2.4 @@ -1045,6 +1050,8 @@ '*vcpu-dirty-limit': 'uint64', '*mode': 'MigMode', '*zero-page-detection': 'ZeroPageDetection', + '*x-rdma-chunk-shift': { 'type': 'uint8', + 'features': [ 'unstable' ] }, '*direct-io': 'bool', '*cpr-exec-command': [ 'str' ]} } -- 2.43.7 ^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] migration/rdma: add x-rdma-chunk-shift parameter 2026-03-16 6:23 [PATCH] migration/rdma: add x-rdma-chunk-shift parameter Samuel Zhang @ 2026-03-19 8:48 ` Markus Armbruster 2026-03-20 3:39 ` Zhang, GuoQing (Sam) 2026-03-26 2:58 ` [PATCH v2] migration/rdma: add x-rdma-chunk-size parameter Samuel Zhang 1 sibling, 1 reply; 6+ messages in thread From: Markus Armbruster @ 2026-03-19 8:48 UTC (permalink / raw) To: Samuel Zhang; +Cc: qemu-devel, Emily.Deng, PengJu.Zhou Samuel Zhang <guoqing.zhang@amd.com> writes: > The default 1MB RDMA chunk size causes slow live migration because > each chunk triggers a write_flush (ibv_post_send). For 8GB RAM, > 1MB chunks produce ~15000 flushes vs ~3700 with 1GB chunks. > > Add x-rdma-chunk-shift parameter to configure the RDMA chunk size > (2^N bytes) for faster migration. > Usage: -global migration.x-rdma-chunk-shift=30 > > Performance with RDMA live migration of 8GB RAM VM: > > | x-rdma-chunk-shift | chunk size | time (s) | throughput (Mbps) | > |--------------------|------------|----------|-------------------| > | 20 (default) | 1 MB | 37.915 | 1,007 | > | 25 | 32 MB | 17.880 | 2,260 | > | 30 | 1 GB | 4.368 | 17,529 | > > Signed-off-by: Samuel Zhang <guoqing.zhang@amd.com> > --- > migration/options.c | 13 +++++++++++++ > migration/options.h | 1 + > migration/rdma.c | 37 ++++++++++++++++++++++--------------- > qapi/migration.json | 9 ++++++++- > 4 files changed, 44 insertions(+), 16 deletions(-) > > diff --git a/migration/options.c b/migration/options.c > index f33b297929..1503ae35a2 100644 > --- a/migration/options.c > +++ b/migration/options.c > @@ -90,6 +90,7 @@ const PropertyInfo qdev_prop_StrOrNull; > > #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ > #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ > +#define DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT 20 /* 1MB */ > > const Property migration_properties[] = { > DEFINE_PROP_BOOL("store-global-state", MigrationState, > @@ -183,6 +184,9 @@ const Property migration_properties[] = { > DEFINE_PROP_ZERO_PAGE_DETECTION("zero-page-detection", MigrationState, > parameters.zero_page_detection, > ZERO_PAGE_DETECTION_MULTIFD), > + DEFINE_PROP_UINT8("x-rdma-chunk-shift", MigrationState, > + parameters.x_rdma_chunk_shift, > + DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT), > > /* Migration capabilities */ > DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), > @@ -993,6 +997,15 @@ ZeroPageDetection migrate_zero_page_detection(void) > return s->parameters.zero_page_detection; > } > > +uint8_t migrate_rdma_chunk_shift(void) > +{ > + MigrationState *s = migrate_get_current(); > + uint8_t chunk_shift = s->parameters.x_rdma_chunk_shift; > + > + assert(20 <= chunk_shift && chunk_shift <= 30); Where is this ensured? > + return chunk_shift; > +} > + > /* parameters helpers */ > > AnnounceParameters *migrate_announce_params(void) > diff --git a/migration/options.h b/migration/options.h > index b502871097..3f214465a3 100644 > --- a/migration/options.h > +++ b/migration/options.h > @@ -87,6 +87,7 @@ const char *migrate_tls_creds(void); > const char *migrate_tls_hostname(void); > uint64_t migrate_xbzrle_cache_size(void); > ZeroPageDetection migrate_zero_page_detection(void); > +uint8_t migrate_rdma_chunk_shift(void); > > /* parameters helpers */ > > diff --git a/migration/rdma.c b/migration/rdma.c > index 55ab85650a..d914a7cd3b 100644 > --- a/migration/rdma.c > +++ b/migration/rdma.c > @@ -44,11 +44,18 @@ > > #define RDMA_RESOLVE_TIMEOUT_MS 10000 > > -/* Do not merge data if larger than this. */ > -#define RDMA_MERGE_MAX (2 * 1024 * 1024) > -#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) > +#define RDMA_SIGNALED_SEND_MAX 512 > + > +static inline uint64_t rdma_chunk_size(void) > +{ > + return 1UL << migrate_rdma_chunk_shift(); > +} > > -#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ > +/* Do not merge data if larger than this. */ > +static inline uint64_t rdma_merge_max(void) > +{ > + return rdma_chunk_size() * 2; > +} > > /* > * This is only for non-live state being migrated. > @@ -527,21 +534,21 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, > static inline uint64_t ram_chunk_index(const uint8_t *start, > const uint8_t *host) > { > - return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; > + return ((uintptr_t) host - (uintptr_t) start) >> migrate_rdma_chunk_shift(); > } > > static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, > uint64_t i) > { > return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr + > - (i << RDMA_REG_CHUNK_SHIFT)); > + (i << migrate_rdma_chunk_shift())); > } > > static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, > uint64_t i) > { > uint8_t *result = ram_chunk_start(rdma_ram_block, i) + > - (1UL << RDMA_REG_CHUNK_SHIFT); > + rdma_chunk_size(); > > if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { > result = rdma_ram_block->local_host_addr + rdma_ram_block->length; > @@ -1841,6 +1848,7 @@ static int qemu_rdma_write_one(RDMAContext *rdma, > struct ibv_send_wr *bad_wr; > int reg_result_idx, ret, count = 0; > uint64_t chunk, chunks; > + uint64_t chunk_size = rdma_chunk_size(); > uint8_t *chunk_start, *chunk_end; > RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); > RDMARegister reg; > @@ -1861,22 +1869,21 @@ retry: > chunk_start = ram_chunk_start(block, chunk); > > if (block->is_ram_block) { > - chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); > + chunks = length / chunk_size; > > - if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { > + if (chunks && ((length % chunk_size) == 0)) { > chunks--; > } > } else { > - chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); > + chunks = block->length / chunk_size; > > - if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { > + if (chunks && ((block->length % chunk_size) == 0)) { > chunks--; > } > } > > trace_qemu_rdma_write_one_top(chunks + 1, > - (chunks + 1) * > - (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); > + (chunks + 1) * chunk_size / 1024 / 1024); > > chunk_end = ram_chunk_end(block, chunk + chunks); > > @@ -2176,7 +2183,7 @@ static int qemu_rdma_write(RDMAContext *rdma, > rdma->current_length += len; > > /* flush it if buffer is too large */ > - if (rdma->current_length >= RDMA_MERGE_MAX) { > + if (rdma->current_length >= rdma_merge_max()) { > return qemu_rdma_write_flush(rdma, errp); > } > > @@ -3522,7 +3529,7 @@ int rdma_registration_handle(QEMUFile *f) > } else { > chunk = reg->key.chunk; > host_addr = block->local_host_addr + > - (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); > + (reg->key.chunk * rdma_chunk_size()); > /* Check for particularly bad chunk value */ > if (host_addr < (void *)block->local_host_addr) { > error_report("rdma: bad chunk for block %s" > diff --git a/qapi/migration.json b/qapi/migration.json > index 7134d4ce47..0521bf3d69 100644 > --- a/qapi/migration.json > +++ b/qapi/migration.json > @@ -1007,9 +1007,14 @@ > # is @cpr-exec. The first list element is the program's filename, > # the remainder its arguments. (Since 10.2) > # > +# @x-rdma-chunk-shift: RDMA memory registration chunk shift. > +# The chunk size is 2^N bytes where N is the value. The value of what? Oh, the value of @x-rdma-chunk-shift. Acceptable range? I doubt 0 or 255 work :) Would this be easier to document if we make it a byte count @x-rdma-chunk-size, must be a power of two? > +# Defaults to 20 (1 MiB). Only takes effect for RDMA migration. > +# (Since 10.2) 11.0 right now, but realistically 11.1. > +# > # Features: > # > -# @unstable: Members @x-checkpoint-delay and > +# @unstable: Members @x-rdma-chunk-shift, @x-checkpoint-delay and > # @x-vcpu-dirty-limit-period are experimental. Keep the list of members sorted: # @unstable: Members @x-checkpoint-delay, @x-rdma-chunk-shift, and # @x-vcpu-dirty-limit-period are experimental. > # > # Since: 2.4 > @@ -1045,6 +1050,8 @@ > '*vcpu-dirty-limit': 'uint64', > '*mode': 'MigMode', > '*zero-page-detection': 'ZeroPageDetection', > + '*x-rdma-chunk-shift': { 'type': 'uint8', > + 'features': [ 'unstable' ] }, > '*direct-io': 'bool', > '*cpr-exec-command': [ 'str' ]} } ^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] migration/rdma: add x-rdma-chunk-shift parameter 2026-03-19 8:48 ` Markus Armbruster @ 2026-03-20 3:39 ` Zhang, GuoQing (Sam) 2026-03-20 7:42 ` Markus Armbruster 0 siblings, 1 reply; 6+ messages in thread From: Zhang, GuoQing (Sam) @ 2026-03-20 3:39 UTC (permalink / raw) To: Markus Armbruster, Samuel Zhang; +Cc: qemu-devel, Emily.Deng, PengJu.Zhou Hi Markus, Thank you for your feedback. Please see my replies inline. Regards Sam On 2026/3/19 16:48, Markus Armbruster wrote: > [You don't often get email fromarmbru@redhat.com. Learn why this is important athttps://aka.ms/LearnAboutSenderIdentification ] > > Samuel Zhang<guoqing.zhang@amd.com> writes: > >> The default 1MB RDMA chunk size causes slow live migration because >> each chunk triggers a write_flush (ibv_post_send). For 8GB RAM, >> 1MB chunks produce ~15000 flushes vs ~3700 with 1GB chunks. >> >> Add x-rdma-chunk-shift parameter to configure the RDMA chunk size >> (2^N bytes) for faster migration. >> Usage: -global migration.x-rdma-chunk-shift=30 >> >> Performance with RDMA live migration of 8GB RAM VM: >> >> | x-rdma-chunk-shift | chunk size | time (s) | throughput (Mbps) | >> |--------------------|------------|----------|-------------------| >> | 20 (default) | 1 MB | 37.915 | 1,007 | >> | 25 | 32 MB | 17.880 | 2,260 | >> | 30 | 1 GB | 4.368 | 17,529 | >> >> Signed-off-by: Samuel Zhang<guoqing.zhang@amd.com> >> --- >> migration/options.c | 13 +++++++++++++ >> migration/options.h | 1 + >> migration/rdma.c | 37 ++++++++++++++++++++++--------------- >> qapi/migration.json | 9 ++++++++- >> 4 files changed, 44 insertions(+), 16 deletions(-) >> >> diff --git a/migration/options.c b/migration/options.c >> index f33b297929..1503ae35a2 100644 >> --- a/migration/options.c >> +++ b/migration/options.c >> @@ -90,6 +90,7 @@ const PropertyInfo qdev_prop_StrOrNull; >> >> #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ >> #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ >> +#define DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT 20 /* 1MB */ >> >> const Property migration_properties[] = { >> DEFINE_PROP_BOOL("store-global-state", MigrationState, >> @@ -183,6 +184,9 @@ const Property migration_properties[] = { >> DEFINE_PROP_ZERO_PAGE_DETECTION("zero-page-detection", MigrationState, >> parameters.zero_page_detection, >> ZERO_PAGE_DETECTION_MULTIFD), >> + DEFINE_PROP_UINT8("x-rdma-chunk-shift", MigrationState, >> + parameters.x_rdma_chunk_shift, >> + DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT), >> >> /* Migration capabilities */ >> DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), >> @@ -993,6 +997,15 @@ ZeroPageDetection migrate_zero_page_detection(void) >> return s->parameters.zero_page_detection; >> } >> >> +uint8_t migrate_rdma_chunk_shift(void) >> +{ >> + MigrationState *s = migrate_get_current(); >> + uint8_t chunk_shift = s->parameters.x_rdma_chunk_shift; >> + >> + assert(20 <= chunk_shift && chunk_shift <= 30); > Where is this ensured? It is ensured just here. The chunk_shift is provided by user when starting qemu process. `-global migration.x-rdma-chunk-shift=30` Valid range is [20, 30], if user provided an invalid value, assert will fail and qemu will exit with error logs. I don't know any better places to validate the value. Any suggestions on this? Thank you! >> + return chunk_shift; >> +} >> + >> /* parameters helpers */ >> >> AnnounceParameters *migrate_announce_params(void) >> diff --git a/migration/options.h b/migration/options.h >> index b502871097..3f214465a3 100644 >> --- a/migration/options.h >> +++ b/migration/options.h >> @@ -87,6 +87,7 @@ const char *migrate_tls_creds(void); >> const char *migrate_tls_hostname(void); >> uint64_t migrate_xbzrle_cache_size(void); >> ZeroPageDetection migrate_zero_page_detection(void); >> +uint8_t migrate_rdma_chunk_shift(void); >> >> /* parameters helpers */ >> >> diff --git a/migration/rdma.c b/migration/rdma.c >> index 55ab85650a..d914a7cd3b 100644 >> --- a/migration/rdma.c >> +++ b/migration/rdma.c >> @@ -44,11 +44,18 @@ >> >> #define RDMA_RESOLVE_TIMEOUT_MS 10000 >> >> -/* Do not merge data if larger than this. */ >> -#define RDMA_MERGE_MAX (2 * 1024 * 1024) >> -#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) >> +#define RDMA_SIGNALED_SEND_MAX 512 >> + >> +static inline uint64_t rdma_chunk_size(void) >> +{ >> + return 1UL << migrate_rdma_chunk_shift(); >> +} >> >> -#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ >> +/* Do not merge data if larger than this. */ >> +static inline uint64_t rdma_merge_max(void) >> +{ >> + return rdma_chunk_size() * 2; >> +} >> >> /* >> * This is only for non-live state being migrated. >> @@ -527,21 +534,21 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, >> static inline uint64_t ram_chunk_index(const uint8_t *start, >> const uint8_t *host) >> { >> - return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; >> + return ((uintptr_t) host - (uintptr_t) start) >> migrate_rdma_chunk_shift(); >> } >> >> static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, >> uint64_t i) >> { >> return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr + >> - (i << RDMA_REG_CHUNK_SHIFT)); >> + (i << migrate_rdma_chunk_shift())); >> } >> >> static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, >> uint64_t i) >> { >> uint8_t *result = ram_chunk_start(rdma_ram_block, i) + >> - (1UL << RDMA_REG_CHUNK_SHIFT); >> + rdma_chunk_size(); >> >> if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { >> result = rdma_ram_block->local_host_addr + rdma_ram_block->length; >> @@ -1841,6 +1848,7 @@ static int qemu_rdma_write_one(RDMAContext *rdma, >> struct ibv_send_wr *bad_wr; >> int reg_result_idx, ret, count = 0; >> uint64_t chunk, chunks; >> + uint64_t chunk_size = rdma_chunk_size(); >> uint8_t *chunk_start, *chunk_end; >> RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); >> RDMARegister reg; >> @@ -1861,22 +1869,21 @@ retry: >> chunk_start = ram_chunk_start(block, chunk); >> >> if (block->is_ram_block) { >> - chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); >> + chunks = length / chunk_size; >> >> - if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { >> + if (chunks && ((length % chunk_size) == 0)) { >> chunks--; >> } >> } else { >> - chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); >> + chunks = block->length / chunk_size; >> >> - if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { >> + if (chunks && ((block->length % chunk_size) == 0)) { >> chunks--; >> } >> } >> >> trace_qemu_rdma_write_one_top(chunks + 1, >> - (chunks + 1) * >> - (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); >> + (chunks + 1) * chunk_size / 1024 / 1024); >> >> chunk_end = ram_chunk_end(block, chunk + chunks); >> >> @@ -2176,7 +2183,7 @@ static int qemu_rdma_write(RDMAContext *rdma, >> rdma->current_length += len; >> >> /* flush it if buffer is too large */ >> - if (rdma->current_length >= RDMA_MERGE_MAX) { >> + if (rdma->current_length >= rdma_merge_max()) { >> return qemu_rdma_write_flush(rdma, errp); >> } >> >> @@ -3522,7 +3529,7 @@ int rdma_registration_handle(QEMUFile *f) >> } else { >> chunk = reg->key.chunk; >> host_addr = block->local_host_addr + >> - (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); >> + (reg->key.chunk * rdma_chunk_size()); >> /* Check for particularly bad chunk value */ >> if (host_addr < (void *)block->local_host_addr) { >> error_report("rdma: bad chunk for block %s" >> diff --git a/qapi/migration.json b/qapi/migration.json >> index 7134d4ce47..0521bf3d69 100644 >> --- a/qapi/migration.json >> +++ b/qapi/migration.json >> @@ -1007,9 +1007,14 @@ >> # is @cpr-exec. The first list element is the program's filename, >> # the remainder its arguments. (Since 10.2) >> # >> +# @x-rdma-chunk-shift: RDMA memory registration chunk shift. >> +# The chunk size is 2^N bytes where N is the value. > The value of what? Oh, the value of @x-rdma-chunk-shift. > > Acceptable range? I doubt 0 or 255 work :) The valid range is [20, 30]. 20 is the default/original value. 30 is the largest value working on my servers with Mellanox RDMA NIC. > Would this be easier to document if we make it a byte count > @x-rdma-chunk-size, must be a power of two? Switch to byte count will be easier to document, but the user may find it a bit harder to use. `-global migration.x-rdma-chunk-size=1073741824` The valid range is [1048576, 1073741824] and it should be power of 2. Do you prefer `x-rdma-chunk-size`? If yes, I will make the switch in v2 patch. >> +# Defaults to 20 (1 MiB). Only takes effect for RDMA migration. >> +# (Since 10.2) > 11.0 right now, but realistically 11.1. OK. I will update it in v2 patch. >> +# >> # Features: >> # >> -# @unstable: Members @x-checkpoint-delay and >> +# @unstable: Members @x-rdma-chunk-shift, @x-checkpoint-delay and >> # @x-vcpu-dirty-limit-period are experimental. > Keep the list of members sorted: > > # @unstable: Members @x-checkpoint-delay, @x-rdma-chunk-shift, and > # @x-vcpu-dirty-limit-period are experimental. OK. I will update it in v2 patch. > >> # >> # Since: 2.4 >> @@ -1045,6 +1050,8 @@ >> '*vcpu-dirty-limit': 'uint64', >> '*mode': 'MigMode', >> '*zero-page-detection': 'ZeroPageDetection', >> + '*x-rdma-chunk-shift': { 'type': 'uint8', >> + 'features': [ 'unstable' ] }, >> '*direct-io': 'bool', >> '*cpr-exec-command': [ 'str' ]} } ^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] migration/rdma: add x-rdma-chunk-shift parameter 2026-03-20 3:39 ` Zhang, GuoQing (Sam) @ 2026-03-20 7:42 ` Markus Armbruster 0 siblings, 0 replies; 6+ messages in thread From: Markus Armbruster @ 2026-03-20 7:42 UTC (permalink / raw) To: Zhang, GuoQing (Sam) Cc: Samuel Zhang, qemu-devel, Emily.Deng, PengJu.Zhou, Peter Xu, Fabiano Rosas, Li Zhijian I just realized you neglected to cc: migration maintainers. I'm fixing that for you. You can use scripts/get_maintainer *patch to find maintainers you might want to cc:. "Zhang, GuoQing (Sam)" <guoqzhan@amd.com> writes: > Hi Markus, > > Thank you for your feedback. Please see my replies inline. > > Regards > Sam > > > On 2026/3/19 16:48, Markus Armbruster wrote: [...] >> Samuel Zhang<guoqing.zhang@amd.com> writes: >> >>> The default 1MB RDMA chunk size causes slow live migration because >>> each chunk triggers a write_flush (ibv_post_send). For 8GB RAM, >>> 1MB chunks produce ~15000 flushes vs ~3700 with 1GB chunks. >>> >>> Add x-rdma-chunk-shift parameter to configure the RDMA chunk size >>> (2^N bytes) for faster migration. >>> Usage: -global migration.x-rdma-chunk-shift=30 Does monitor command migrate-set-parameters work? Testing... it's accepted: { "execute": "migrate-set-parameters", "arguments": { "x-rdma-chunk-shift": 30 } } {"return": {}} Didn't test it actually works. However, query-migrate-parameters does not show the new parameter. Needs fixing. See migrate_mark_all_params_present(). Also missing: update to hmp_migrate_set_parameter() and hmp_info_migrate_parameters(). Friends don't let friends use -global unless there is no other way. >>> Performance with RDMA live migration of 8GB RAM VM: >>> >>> | x-rdma-chunk-shift | chunk size | time (s) | throughput (Mbps) | >>> |--------------------|------------|----------|-------------------| >>> | 20 (default) | 1 MB | 37.915 | 1,007 | >>> | 25 | 32 MB | 17.880 | 2,260 | >>> | 30 | 1 GB | 4.368 | 17,529 | >>> >>> Signed-off-by: Samuel Zhang<guoqing.zhang@amd.com> >>> --- >>> migration/options.c | 13 +++++++++++++ >>> migration/options.h | 1 + >>> migration/rdma.c | 37 ++++++++++++++++++++++--------------- >>> qapi/migration.json | 9 ++++++++- >>> 4 files changed, 44 insertions(+), 16 deletions(-) >>> >>> diff --git a/migration/options.c b/migration/options.c >>> index f33b297929..1503ae35a2 100644 >>> --- a/migration/options.c >>> +++ b/migration/options.c >>> @@ -90,6 +90,7 @@ const PropertyInfo qdev_prop_StrOrNull; >>> >>> #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ >>> #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ >>> +#define DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT 20 /* 1MB */ >>> >>> const Property migration_properties[] = { >>> DEFINE_PROP_BOOL("store-global-state", MigrationState, >>> @@ -183,6 +184,9 @@ const Property migration_properties[] = { >>> DEFINE_PROP_ZERO_PAGE_DETECTION("zero-page-detection", MigrationState, >>> parameters.zero_page_detection, >>> ZERO_PAGE_DETECTION_MULTIFD), >>> + DEFINE_PROP_UINT8("x-rdma-chunk-shift", MigrationState, >>> + parameters.x_rdma_chunk_shift, >>> + DEFAULT_MIGRATE_X_RDMA_CHUNK_SHIFT), >>> >>> /* Migration capabilities */ >>> DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), >>> @@ -993,6 +997,15 @@ ZeroPageDetection migrate_zero_page_detection(void) >>> return s->parameters.zero_page_detection; >>> } >>> >>> +uint8_t migrate_rdma_chunk_shift(void) >>> +{ >>> + MigrationState *s = migrate_get_current(); >>> + uint8_t chunk_shift = s->parameters.x_rdma_chunk_shift; >>> + >>> + assert(20 <= chunk_shift && chunk_shift <= 30); >> >> Where is this ensured? > > It is ensured just here. So, invalid configuration will be accepted, the VM starts, and when you try to migrate it, it crashes. Potential data loss. Crashing on bad input is always wrong. Always, always, always test with bad input, too. I caught in in review this time, but it could've slipped through easily. Dangerous. > The chunk_shift is provided by user when starting qemu process. > > `-global migration.x-rdma-chunk-shift=30` > > Valid range is [20, 30], if user provided an invalid value, assert will fail and qemu will exit with error logs. > > I don't know any better places to validate the value. Any suggestions on this? Thank you! It's perfectly fine not to know things! Just ask then. @x-rdma-chunk-shift is a member of MigrationParameters. The other members are validated migrate_params_check(). Try doing the same for @x-rdma-chunk-shift. >>> + return chunk_shift; >>> +} >>> + >>> /* parameters helpers */ >>> >>> AnnounceParameters *migrate_announce_params(void) >>> diff --git a/migration/options.h b/migration/options.h >>> index b502871097..3f214465a3 100644 >>> --- a/migration/options.h >>> +++ b/migration/options.h >>> @@ -87,6 +87,7 @@ const char *migrate_tls_creds(void); >>> const char *migrate_tls_hostname(void); >>> uint64_t migrate_xbzrle_cache_size(void); >>> ZeroPageDetection migrate_zero_page_detection(void); >>> +uint8_t migrate_rdma_chunk_shift(void); >>> >>> /* parameters helpers */ >>> >>> diff --git a/migration/rdma.c b/migration/rdma.c >>> index 55ab85650a..d914a7cd3b 100644 >>> --- a/migration/rdma.c >>> +++ b/migration/rdma.c >>> @@ -44,11 +44,18 @@ >>> >>> #define RDMA_RESOLVE_TIMEOUT_MS 10000 >>> >>> -/* Do not merge data if larger than this. */ >>> -#define RDMA_MERGE_MAX (2 * 1024 * 1024) >>> -#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) >>> +#define RDMA_SIGNALED_SEND_MAX 512 >>> + >>> +static inline uint64_t rdma_chunk_size(void) >>> +{ >>> + return 1UL << migrate_rdma_chunk_shift(); >>> +} >>> >>> -#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ >>> +/* Do not merge data if larger than this. */ >>> +static inline uint64_t rdma_merge_max(void) >>> +{ >>> + return rdma_chunk_size() * 2; >>> +} >>> >>> /* >>> * This is only for non-live state being migrated. >>> @@ -527,21 +534,21 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, >>> static inline uint64_t ram_chunk_index(const uint8_t *start, >>> const uint8_t *host) >>> { >>> - return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; >>> + return ((uintptr_t) host - (uintptr_t) start) >> migrate_rdma_chunk_shift(); >>> } >>> >>> static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, >>> uint64_t i) >>> { >>> return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr + >>> - (i << RDMA_REG_CHUNK_SHIFT)); >>> + (i << migrate_rdma_chunk_shift())); >>> } >>> >>> static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, >>> uint64_t i) >>> { >>> uint8_t *result = ram_chunk_start(rdma_ram_block, i) + >>> - (1UL << RDMA_REG_CHUNK_SHIFT); >>> + rdma_chunk_size(); >>> >>> if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { >>> result = rdma_ram_block->local_host_addr + rdma_ram_block->length; >>> @@ -1841,6 +1848,7 @@ static int qemu_rdma_write_one(RDMAContext *rdma, >>> struct ibv_send_wr *bad_wr; >>> int reg_result_idx, ret, count = 0; >>> uint64_t chunk, chunks; >>> + uint64_t chunk_size = rdma_chunk_size(); >>> uint8_t *chunk_start, *chunk_end; >>> RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); >>> RDMARegister reg; >>> @@ -1861,22 +1869,21 @@ retry: >>> chunk_start = ram_chunk_start(block, chunk); >>> >>> if (block->is_ram_block) { >>> - chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); >>> + chunks = length / chunk_size; >>> >>> - if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { >>> + if (chunks && ((length % chunk_size) == 0)) { >>> chunks--; >>> } >>> } else { >>> - chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); >>> + chunks = block->length / chunk_size; >>> >>> - if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { >>> + if (chunks && ((block->length % chunk_size) == 0)) { >>> chunks--; >>> } >>> } >>> >>> trace_qemu_rdma_write_one_top(chunks + 1, >>> - (chunks + 1) * >>> - (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); >>> + (chunks + 1) * chunk_size / 1024 / 1024); >>> >>> chunk_end = ram_chunk_end(block, chunk + chunks); >>> >>> @@ -2176,7 +2183,7 @@ static int qemu_rdma_write(RDMAContext *rdma, >>> rdma->current_length += len; >>> >>> /* flush it if buffer is too large */ >>> - if (rdma->current_length >= RDMA_MERGE_MAX) { >>> + if (rdma->current_length >= rdma_merge_max()) { >>> return qemu_rdma_write_flush(rdma, errp); >>> } >>> >>> @@ -3522,7 +3529,7 @@ int rdma_registration_handle(QEMUFile *f) >>> } else { >>> chunk = reg->key.chunk; >>> host_addr = block->local_host_addr + >>> - (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); >>> + (reg->key.chunk * rdma_chunk_size()); >>> /* Check for particularly bad chunk value */ >>> if (host_addr < (void *)block->local_host_addr) { >>> error_report("rdma: bad chunk for block %s" >>> diff --git a/qapi/migration.json b/qapi/migration.json >>> index 7134d4ce47..0521bf3d69 100644 >>> --- a/qapi/migration.json >>> +++ b/qapi/migration.json >>> @@ -1007,9 +1007,14 @@ >>> # is @cpr-exec. The first list element is the program's filename, >>> # the remainder its arguments. (Since 10.2) >>> # >>> +# @x-rdma-chunk-shift: RDMA memory registration chunk shift. >>> +# The chunk size is 2^N bytes where N is the value. >> >> The value of what? Oh, the value of @x-rdma-chunk-shift. >> >> Acceptable range? I doubt 0 or 255 work :) > > The valid range is [20, 30]. 20 is the default/original value. 30 is the largest value working on my servers with Mellanox RDMA NIC. The range needs to be documented here. >> Would this be easier to document if we make it a byte count >> @x-rdma-chunk-size, must be a power of two? > > Switch to byte count will be easier to document, but the user may find it a bit harder to use. > > `-global migration.x-rdma-chunk-size=1073741824` > > The valid range is [1048576, 1073741824] and it should be power of 2. > > > Do you prefer `x-rdma-chunk-size`? If yes, I will make the switch in v2 patch. Have a look at migration parameter @max-bandwidth. It supports size prefixes like 1M. >>> +# Defaults to 20 (1 MiB). Only takes effect for RDMA migration. >>> +# (Since 10.2) >> >> 11.0 right now, but realistically 11.1. > > OK. I will update it in v2 patch. > > >>> +# >>> # Features: >>> # >>> -# @unstable: Members @x-checkpoint-delay and >>> +# @unstable: Members @x-rdma-chunk-shift, @x-checkpoint-delay and >>> # @x-vcpu-dirty-limit-period are experimental. >> >> Keep the list of members sorted: >> >> # @unstable: Members @x-checkpoint-delay, @x-rdma-chunk-shift, and >> # @x-vcpu-dirty-limit-period are experimental. > > > OK. I will update it in v2 patch. > > >> >>> # >>> # Since: 2.4 >>> @@ -1045,6 +1050,8 @@ >>> '*vcpu-dirty-limit': 'uint64', >>> '*mode': 'MigMode', >>> '*zero-page-detection': 'ZeroPageDetection', >>> + '*x-rdma-chunk-shift': { 'type': 'uint8', >>> + 'features': [ 'unstable' ] }, >>> '*direct-io': 'bool', >>> '*cpr-exec-command': [ 'str' ]} } ^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v2] migration/rdma: add x-rdma-chunk-size parameter 2026-03-16 6:23 [PATCH] migration/rdma: add x-rdma-chunk-shift parameter Samuel Zhang 2026-03-19 8:48 ` Markus Armbruster @ 2026-03-26 2:58 ` Samuel Zhang 2026-03-26 6:57 ` Markus Armbruster 1 sibling, 1 reply; 6+ messages in thread From: Samuel Zhang @ 2026-03-26 2:58 UTC (permalink / raw) To: qemu-devel Cc: peterx, farosas, lizhijian, eblake, armbru, Emily.Deng, Victor.Zhao, PengJu.Zhou, Qing.Ma, Samuel Zhang The default 1MB RDMA chunk size causes slow live migration because each chunk triggers a write_flush (ibv_post_send). For 8GB RAM, 1MB chunk size produce ~15000 flushes vs ~3700 with 1024MB chunk size. Add x-rdma-chunk-size parameter to configure the RDMA chunk size for faster migration. Usage: `migrate_set_parameter x-rdma-chunk-size 1024M` Performance with RDMA live migration of 8GB RAM VM: | x-rdma-chunk-size (B) | time (s) | throughput (MB/s) | |-----------------------|----------|-------------------| | 1M (default) | 37.915 | 1,007 | | 32M | 17.880 | 2,260 | | 1024M | 4.368 | 17,529 | Signed-off-by: Samuel Zhang <guoqing.zhang@amd.com> --- v2: - Renamed x-rdma-chunk-shift to x-rdma-chunk-size (byte count) - Added validation in migrate_params_check() - Added hmp_migrate_set_parameter() support - Added hmp_info_migrate_parameters() support - Added migrate_mark_all_params_present() - Use qemu_strtosz() for size suffix support migration/migration-hmp-cmds.c | 17 +++++++++++++++++ migration/options.c | 32 +++++++++++++++++++++++++++++++- migration/options.h | 1 + migration/rdma.c | 30 ++++++++++++++++-------------- qapi/migration.json | 11 +++++++++-- 5 files changed, 74 insertions(+), 17 deletions(-) diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c index 0a193b8f54..2c005c08a6 100644 --- a/migration/migration-hmp-cmds.c +++ b/migration/migration-hmp-cmds.c @@ -451,6 +451,13 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) params->direct_io ? "on" : "off"); } + if (params->has_x_rdma_chunk_size) { + monitor_printf(mon, "%s: %" PRIu64 " bytes\n", + MigrationParameter_str( + MIGRATION_PARAMETER_X_RDMA_CHUNK_SIZE), + params->x_rdma_chunk_size); + } + assert(params->has_cpr_exec_command); monitor_print_cpr_exec_command(mon, params->cpr_exec_command); } @@ -730,6 +737,16 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) p->has_mode = true; visit_type_MigMode(v, param, &p->mode, &err); break; + case MIGRATION_PARAMETER_X_RDMA_CHUNK_SIZE: + p->has_x_rdma_chunk_size = true; + ret = qemu_strtosz(valuestr, NULL, &valuebw); + if (ret != 0 || valuebw < (1<<20) || valuebw > (1<<30) + || !is_power_of_2(valuebw)) { + error_setg(&err, "Invalid size %s", valuestr); + break; + } + p->x_rdma_chunk_size = valuebw; + break; case MIGRATION_PARAMETER_DIRECT_IO: p->has_direct_io = true; visit_type_bool(v, param, &p->direct_io, &err); diff --git a/migration/options.c b/migration/options.c index f33b297929..91dd874b5e 100644 --- a/migration/options.c +++ b/migration/options.c @@ -90,6 +90,7 @@ const PropertyInfo qdev_prop_StrOrNull; #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ +#define DEFAULT_MIGRATE_X_RDMA_CHUNK_SIZE (1<<20) /* 1MB */ const Property migration_properties[] = { DEFINE_PROP_BOOL("store-global-state", MigrationState, @@ -183,6 +184,9 @@ const Property migration_properties[] = { DEFINE_PROP_ZERO_PAGE_DETECTION("zero-page-detection", MigrationState, parameters.zero_page_detection, ZERO_PAGE_DETECTION_MULTIFD), + DEFINE_PROP_UINT64("x-rdma-chunk-size", MigrationState, + parameters.x_rdma_chunk_size, + DEFAULT_MIGRATE_X_RDMA_CHUNK_SIZE), /* Migration capabilities */ DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), @@ -993,6 +997,15 @@ ZeroPageDetection migrate_zero_page_detection(void) return s->parameters.zero_page_detection; } +uint64_t migrate_rdma_chunk_size(void) +{ + MigrationState *s = migrate_get_current(); + uint64_t size = s->parameters.x_rdma_chunk_size; + + assert((1<<20) <= size && size <= (1<<30) && is_power_of_2(size)); + return size; +} + /* parameters helpers */ AnnounceParameters *migrate_announce_params(void) @@ -1055,7 +1068,7 @@ static void migrate_mark_all_params_present(MigrationParameters *p) &p->has_announce_step, &p->has_block_bitmap_mapping, &p->has_x_vcpu_dirty_limit_period, &p->has_vcpu_dirty_limit, &p->has_mode, &p->has_zero_page_detection, &p->has_direct_io, - &p->has_cpr_exec_command, + &p->has_x_rdma_chunk_size, &p->has_cpr_exec_command, }; len = ARRAY_SIZE(has_fields); @@ -1227,6 +1240,15 @@ bool migrate_params_check(MigrationParameters *params, Error **errp) return false; } + if (params->has_x_rdma_chunk_size && + (params->x_rdma_chunk_size < (1<<20) || + params->x_rdma_chunk_size > (1<<30) || + !is_power_of_2(params->x_rdma_chunk_size))) { + error_setg(errp, "Option x_rdma_chunk_size expects " + "a power of 2 in the range 1M to 1024M"); + return false; + } + if (!check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) { error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: "); return false; @@ -1391,6 +1413,10 @@ static void migrate_params_test_apply(MigrationParameters *params, dest->direct_io = params->direct_io; } + if (params->has_x_rdma_chunk_size) { + dest->x_rdma_chunk_size = params->x_rdma_chunk_size; + } + if (params->has_cpr_exec_command) { dest->cpr_exec_command = params->cpr_exec_command; } @@ -1517,6 +1543,10 @@ static void migrate_params_apply(MigrationParameters *params) s->parameters.direct_io = params->direct_io; } + if (params->has_x_rdma_chunk_size) { + s->parameters.x_rdma_chunk_size = params->x_rdma_chunk_size; + } + if (params->has_cpr_exec_command) { qapi_free_strList(s->parameters.cpr_exec_command); s->parameters.cpr_exec_command = diff --git a/migration/options.h b/migration/options.h index b502871097..b46221998a 100644 --- a/migration/options.h +++ b/migration/options.h @@ -87,6 +87,7 @@ const char *migrate_tls_creds(void); const char *migrate_tls_hostname(void); uint64_t migrate_xbzrle_cache_size(void); ZeroPageDetection migrate_zero_page_detection(void); +uint64_t migrate_rdma_chunk_size(void); /* parameters helpers */ diff --git a/migration/rdma.c b/migration/rdma.c index 55ab85650a..3e37a1d440 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -45,10 +45,12 @@ #define RDMA_RESOLVE_TIMEOUT_MS 10000 /* Do not merge data if larger than this. */ -#define RDMA_MERGE_MAX (2 * 1024 * 1024) -#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) +static inline uint64_t rdma_merge_max(void) +{ + return migrate_rdma_chunk_size() * 2; +} -#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ +#define RDMA_SIGNALED_SEND_MAX 512 /* * This is only for non-live state being migrated. @@ -527,21 +529,21 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, static inline uint64_t ram_chunk_index(const uint8_t *start, const uint8_t *host) { - return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; + return ((uintptr_t) host - (uintptr_t) start) / migrate_rdma_chunk_size(); } static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, uint64_t i) { return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr + - (i << RDMA_REG_CHUNK_SHIFT)); + (i * migrate_rdma_chunk_size())); } static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, uint64_t i) { uint8_t *result = ram_chunk_start(rdma_ram_block, i) + - (1UL << RDMA_REG_CHUNK_SHIFT); + migrate_rdma_chunk_size(); if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { result = rdma_ram_block->local_host_addr + rdma_ram_block->length; @@ -1841,6 +1843,7 @@ static int qemu_rdma_write_one(RDMAContext *rdma, struct ibv_send_wr *bad_wr; int reg_result_idx, ret, count = 0; uint64_t chunk, chunks; + uint64_t chunk_size = migrate_rdma_chunk_size(); uint8_t *chunk_start, *chunk_end; RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); RDMARegister reg; @@ -1861,22 +1864,21 @@ retry: chunk_start = ram_chunk_start(block, chunk); if (block->is_ram_block) { - chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); + chunks = length / chunk_size; - if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { + if (chunks && ((length % chunk_size) == 0)) { chunks--; } } else { - chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); + chunks = block->length / chunk_size; - if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { + if (chunks && ((block->length % chunk_size) == 0)) { chunks--; } } trace_qemu_rdma_write_one_top(chunks + 1, - (chunks + 1) * - (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); + (chunks + 1) * chunk_size / 1024 / 1024); chunk_end = ram_chunk_end(block, chunk + chunks); @@ -2176,7 +2178,7 @@ static int qemu_rdma_write(RDMAContext *rdma, rdma->current_length += len; /* flush it if buffer is too large */ - if (rdma->current_length >= RDMA_MERGE_MAX) { + if (rdma->current_length >= rdma_merge_max()) { return qemu_rdma_write_flush(rdma, errp); } @@ -3522,7 +3524,7 @@ int rdma_registration_handle(QEMUFile *f) } else { chunk = reg->key.chunk; host_addr = block->local_host_addr + - (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); + (reg->key.chunk * migrate_rdma_chunk_size()); /* Check for particularly bad chunk value */ if (host_addr < (void *)block->local_host_addr) { error_report("rdma: bad chunk for block %s" diff --git a/qapi/migration.json b/qapi/migration.json index 7134d4ce47..94d2c1c65f 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -806,7 +806,7 @@ # # Features: # -# @unstable: Members @x-checkpoint-delay and +# @unstable: Members @x-checkpoint-delay, @x-rdma-chunk-size, and # @x-vcpu-dirty-limit-period are experimental. # # Since: 2.4 @@ -831,6 +831,7 @@ 'mode', 'zero-page-detection', 'direct-io', + { 'name': 'x-rdma-chunk-size', 'features': [ 'unstable' ] }, 'cpr-exec-command'] } ## @@ -1007,9 +1008,13 @@ # is @cpr-exec. The first list element is the program's filename, # the remainder its arguments. (Since 10.2) # +# @x-rdma-chunk-size: RDMA memory registration chunk size in bytes. +# Default is 1M. Must be a power of 2 in the range [1M, 1024M]. +# Only takes effect for RDMA migration. (Since 11.1) +# # Features: # -# @unstable: Members @x-checkpoint-delay and +# @unstable: Members @x-checkpoint-delay, @x-rdma-chunk-size, and # @x-vcpu-dirty-limit-period are experimental. # # Since: 2.4 @@ -1046,6 +1051,8 @@ '*mode': 'MigMode', '*zero-page-detection': 'ZeroPageDetection', '*direct-io': 'bool', + '*x-rdma-chunk-size': { 'type': 'uint64', + 'features': [ 'unstable' ] }, '*cpr-exec-command': [ 'str' ]} } ## -- 2.43.7 ^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2] migration/rdma: add x-rdma-chunk-size parameter 2026-03-26 2:58 ` [PATCH v2] migration/rdma: add x-rdma-chunk-size parameter Samuel Zhang @ 2026-03-26 6:57 ` Markus Armbruster 0 siblings, 0 replies; 6+ messages in thread From: Markus Armbruster @ 2026-03-26 6:57 UTC (permalink / raw) To: Samuel Zhang Cc: qemu-devel, peterx, farosas, lizhijian, eblake, armbru, Emily.Deng, Victor.Zhao, PengJu.Zhou, Qing.Ma By convention, we don't post new patches in reply to old ones. Next time :) Samuel Zhang <guoqing.zhang@amd.com> writes: > The default 1MB RDMA chunk size causes slow live migration because > each chunk triggers a write_flush (ibv_post_send). For 8GB RAM, > 1MB chunk size produce ~15000 flushes vs ~3700 with 1024MB chunk size. > > Add x-rdma-chunk-size parameter to configure the RDMA chunk size for > faster migration. > Usage: `migrate_set_parameter x-rdma-chunk-size 1024M` > > Performance with RDMA live migration of 8GB RAM VM: > > | x-rdma-chunk-size (B) | time (s) | throughput (MB/s) | > |-----------------------|----------|-------------------| > | 1M (default) | 37.915 | 1,007 | > | 32M | 17.880 | 2,260 | > | 1024M | 4.368 | 17,529 | > > Signed-off-by: Samuel Zhang <guoqing.zhang@amd.com> > --- > v2: > - Renamed x-rdma-chunk-shift to x-rdma-chunk-size (byte count) > - Added validation in migrate_params_check() > - Added hmp_migrate_set_parameter() support > - Added hmp_info_migrate_parameters() support > - Added migrate_mark_all_params_present() > - Use qemu_strtosz() for size suffix support > > migration/migration-hmp-cmds.c | 17 +++++++++++++++++ > migration/options.c | 32 +++++++++++++++++++++++++++++++- > migration/options.h | 1 + > migration/rdma.c | 30 ++++++++++++++++-------------- > qapi/migration.json | 11 +++++++++-- > 5 files changed, 74 insertions(+), 17 deletions(-) > > diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c > index 0a193b8f54..2c005c08a6 100644 > --- a/migration/migration-hmp-cmds.c > +++ b/migration/migration-hmp-cmds.c > @@ -451,6 +451,13 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) > params->direct_io ? "on" : "off"); > } > > + if (params->has_x_rdma_chunk_size) { > + monitor_printf(mon, "%s: %" PRIu64 " bytes\n", > + MigrationParameter_str( > + MIGRATION_PARAMETER_X_RDMA_CHUNK_SIZE), > + params->x_rdma_chunk_size); > + } > + > assert(params->has_cpr_exec_command); > monitor_print_cpr_exec_command(mon, params->cpr_exec_command); > } > @@ -730,6 +737,16 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) > p->has_mode = true; > visit_type_MigMode(v, param, &p->mode, &err); > break; > + case MIGRATION_PARAMETER_X_RDMA_CHUNK_SIZE: > + p->has_x_rdma_chunk_size = true; > + ret = qemu_strtosz(valuestr, NULL, &valuebw); We use several variations of the conversion to size: default examples function suffix scale "1" "64K" ----------------------------------------------------------- qemu_strtosz() B 1024 1 64*1024 qemu_strtosz_MiB() M 1024 1024*1024 64*1024 qemu_strtosz_metric() B 1000 1 64*1000 Unfortunate complication of the user interface if you ask me, but changing it now is likely a bad idea. My point is: which one to use here? This function uses two: qemu_strtosz_MiB() directly, and qemu_strtosz() via visit_type_size(). Unless you have a specific reason to want default suffix 'M', use visit_type_size(), it's less code, and the error reporting is better. > + if (ret != 0 || valuebw < (1<<20) || valuebw > (1<<30) > + || !is_power_of_2(valuebw)) { > + error_setg(&err, "Invalid size %s", valuestr); > + break; > + } This is partly redundant with the checking in migrate_params_check(). If you use visit_type_size(), you don't need it at all. If you use qemu_strtosz_MiB(), you should check less. Have a look at the other uses in this function. > + p->x_rdma_chunk_size = valuebw; > + break; > case MIGRATION_PARAMETER_DIRECT_IO: > p->has_direct_io = true; > visit_type_bool(v, param, &p->direct_io, &err); > diff --git a/migration/options.c b/migration/options.c > index f33b297929..91dd874b5e 100644 > --- a/migration/options.c > +++ b/migration/options.c > @@ -90,6 +90,7 @@ const PropertyInfo qdev_prop_StrOrNull; > > #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ > #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ > +#define DEFAULT_MIGRATE_X_RDMA_CHUNK_SIZE (1<<20) /* 1MB */ > > const Property migration_properties[] = { > DEFINE_PROP_BOOL("store-global-state", MigrationState, > @@ -183,6 +184,9 @@ const Property migration_properties[] = { > DEFINE_PROP_ZERO_PAGE_DETECTION("zero-page-detection", MigrationState, > parameters.zero_page_detection, > ZERO_PAGE_DETECTION_MULTIFD), > + DEFINE_PROP_UINT64("x-rdma-chunk-size", MigrationState, > + parameters.x_rdma_chunk_size, > + DEFAULT_MIGRATE_X_RDMA_CHUNK_SIZE), > > /* Migration capabilities */ > DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), > @@ -993,6 +997,15 @@ ZeroPageDetection migrate_zero_page_detection(void) > return s->parameters.zero_page_detection; > } > > +uint64_t migrate_rdma_chunk_size(void) > +{ > + MigrationState *s = migrate_get_current(); > + uint64_t size = s->parameters.x_rdma_chunk_size; > + > + assert((1<<20) <= size && size <= (1<<30) && is_power_of_2(size)); Suggest MiB <= size && size <= GiB. > + return size; > +} > + > /* parameters helpers */ > > AnnounceParameters *migrate_announce_params(void) > @@ -1055,7 +1068,7 @@ static void migrate_mark_all_params_present(MigrationParameters *p) > &p->has_announce_step, &p->has_block_bitmap_mapping, > &p->has_x_vcpu_dirty_limit_period, &p->has_vcpu_dirty_limit, > &p->has_mode, &p->has_zero_page_detection, &p->has_direct_io, > - &p->has_cpr_exec_command, > + &p->has_x_rdma_chunk_size, &p->has_cpr_exec_command, > }; > > len = ARRAY_SIZE(has_fields); > @@ -1227,6 +1240,15 @@ bool migrate_params_check(MigrationParameters *params, Error **errp) > return false; > } > > + if (params->has_x_rdma_chunk_size && > + (params->x_rdma_chunk_size < (1<<20) || > + params->x_rdma_chunk_size > (1<<30) || Suggest < MiB and > GiB. > + !is_power_of_2(params->x_rdma_chunk_size))) { > + error_setg(errp, "Option x_rdma_chunk_size expects " > + "a power of 2 in the range 1M to 1024M"); > + return false; > + } > + > if (!check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) { > error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: "); > return false; > @@ -1391,6 +1413,10 @@ static void migrate_params_test_apply(MigrationParameters *params, > dest->direct_io = params->direct_io; > } > > + if (params->has_x_rdma_chunk_size) { > + dest->x_rdma_chunk_size = params->x_rdma_chunk_size; > + } > + > if (params->has_cpr_exec_command) { > dest->cpr_exec_command = params->cpr_exec_command; > } > @@ -1517,6 +1543,10 @@ static void migrate_params_apply(MigrationParameters *params) > s->parameters.direct_io = params->direct_io; > } > > + if (params->has_x_rdma_chunk_size) { > + s->parameters.x_rdma_chunk_size = params->x_rdma_chunk_size; > + } > + > if (params->has_cpr_exec_command) { > qapi_free_strList(s->parameters.cpr_exec_command); > s->parameters.cpr_exec_command = > diff --git a/migration/options.h b/migration/options.h > index b502871097..b46221998a 100644 > --- a/migration/options.h > +++ b/migration/options.h > @@ -87,6 +87,7 @@ const char *migrate_tls_creds(void); > const char *migrate_tls_hostname(void); > uint64_t migrate_xbzrle_cache_size(void); > ZeroPageDetection migrate_zero_page_detection(void); > +uint64_t migrate_rdma_chunk_size(void); > > /* parameters helpers */ > > diff --git a/migration/rdma.c b/migration/rdma.c > index 55ab85650a..3e37a1d440 100644 > --- a/migration/rdma.c > +++ b/migration/rdma.c > @@ -45,10 +45,12 @@ > #define RDMA_RESOLVE_TIMEOUT_MS 10000 > > /* Do not merge data if larger than this. */ > -#define RDMA_MERGE_MAX (2 * 1024 * 1024) > -#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) > +static inline uint64_t rdma_merge_max(void) > +{ > + return migrate_rdma_chunk_size() * 2; > +} > > -#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ > +#define RDMA_SIGNALED_SEND_MAX 512 > > /* > * This is only for non-live state being migrated. > @@ -527,21 +529,21 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, > static inline uint64_t ram_chunk_index(const uint8_t *start, > const uint8_t *host) > { > - return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; > + return ((uintptr_t) host - (uintptr_t) start) / migrate_rdma_chunk_size(); Double-checking: this function isn't speed-critical, correct? > } > > static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, > uint64_t i) > { > return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr + > - (i << RDMA_REG_CHUNK_SHIFT)); > + (i * migrate_rdma_chunk_size())); > } > > static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, > uint64_t i) > { > uint8_t *result = ram_chunk_start(rdma_ram_block, i) + > - (1UL << RDMA_REG_CHUNK_SHIFT); > + migrate_rdma_chunk_size(); > > if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { > result = rdma_ram_block->local_host_addr + rdma_ram_block->length; > @@ -1841,6 +1843,7 @@ static int qemu_rdma_write_one(RDMAContext *rdma, > struct ibv_send_wr *bad_wr; > int reg_result_idx, ret, count = 0; > uint64_t chunk, chunks; > + uint64_t chunk_size = migrate_rdma_chunk_size(); > uint8_t *chunk_start, *chunk_end; > RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); > RDMARegister reg; > @@ -1861,22 +1864,21 @@ retry: > chunk_start = ram_chunk_start(block, chunk); > > if (block->is_ram_block) { > - chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); > + chunks = length / chunk_size; > > - if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { > + if (chunks && ((length % chunk_size) == 0)) { > chunks--; > } > } else { > - chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); > + chunks = block->length / chunk_size; > > - if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { > + if (chunks && ((block->length % chunk_size) == 0)) { > chunks--; > } > } > > trace_qemu_rdma_write_one_top(chunks + 1, > - (chunks + 1) * > - (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); > + (chunks + 1) * chunk_size / 1024 / 1024); > > chunk_end = ram_chunk_end(block, chunk + chunks); > > @@ -2176,7 +2178,7 @@ static int qemu_rdma_write(RDMAContext *rdma, > rdma->current_length += len; > > /* flush it if buffer is too large */ > - if (rdma->current_length >= RDMA_MERGE_MAX) { > + if (rdma->current_length >= rdma_merge_max()) { > return qemu_rdma_write_flush(rdma, errp); > } > > @@ -3522,7 +3524,7 @@ int rdma_registration_handle(QEMUFile *f) > } else { > chunk = reg->key.chunk; > host_addr = block->local_host_addr + > - (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); > + (reg->key.chunk * migrate_rdma_chunk_size()); > /* Check for particularly bad chunk value */ > if (host_addr < (void *)block->local_host_addr) { > error_report("rdma: bad chunk for block %s" > diff --git a/qapi/migration.json b/qapi/migration.json > index 7134d4ce47..94d2c1c65f 100644 > --- a/qapi/migration.json > +++ b/qapi/migration.json > @@ -806,7 +806,7 @@ > # > # Features: > # > -# @unstable: Members @x-checkpoint-delay and > +# @unstable: Members @x-checkpoint-delay, @x-rdma-chunk-size, and > # @x-vcpu-dirty-limit-period are experimental. > # > # Since: 2.4 > @@ -831,6 +831,7 @@ > 'mode', > 'zero-page-detection', > 'direct-io', > + { 'name': 'x-rdma-chunk-size', 'features': [ 'unstable' ] }, > 'cpr-exec-command'] } > > ## > @@ -1007,9 +1008,13 @@ > # is @cpr-exec. The first list element is the program's filename, > # the remainder its arguments. (Since 10.2) > # > +# @x-rdma-chunk-size: RDMA memory registration chunk size in bytes. > +# Default is 1M. Must be a power of 2 in the range [1M, 1024M]. Let's use 1MiB and 1024MiB for extra clarity. > +# Only takes effect for RDMA migration. (Since 11.1) > +# > # Features: > # > -# @unstable: Members @x-checkpoint-delay and > +# @unstable: Members @x-checkpoint-delay, @x-rdma-chunk-size, and > # @x-vcpu-dirty-limit-period are experimental. > # > # Since: 2.4 > @@ -1046,6 +1051,8 @@ > '*mode': 'MigMode', > '*zero-page-detection': 'ZeroPageDetection', > '*direct-io': 'bool', > + '*x-rdma-chunk-size': { 'type': 'uint64', > + 'features': [ 'unstable' ] }, > '*cpr-exec-command': [ 'str' ]} } > > ## ^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2026-03-26 6:58 UTC | newest] Thread overview: 6+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2026-03-16 6:23 [PATCH] migration/rdma: add x-rdma-chunk-shift parameter Samuel Zhang 2026-03-19 8:48 ` Markus Armbruster 2026-03-20 3:39 ` Zhang, GuoQing (Sam) 2026-03-20 7:42 ` Markus Armbruster 2026-03-26 2:58 ` [PATCH v2] migration/rdma: add x-rdma-chunk-size parameter Samuel Zhang 2026-03-26 6:57 ` Markus Armbruster
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox