From: Fabiano Rosas <farosas@suse.de>
To: qemu-devel@nongnu.org
Cc: "Peter Xu" <peterx@redhat.com>,
"Maciej S . Szmigiero" <mail@maciej.szmigiero.name>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [PATCH v6 01/19] migration/multifd: Reduce access to p->pages
Date: Tue, 27 Aug 2024 14:45:48 -0300 [thread overview]
Message-ID: <20240827174606.10352-2-farosas@suse.de> (raw)
In-Reply-To: <20240827174606.10352-1-farosas@suse.de>
I'm about to replace the p->pages pointer with an opaque pointer, so
do a cleanup now to reduce direct accesses to p->page, which makes the
next diffs cleaner.
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
migration/multifd-qpl.c | 8 +++++---
migration/multifd-uadk.c | 9 +++++----
migration/multifd-zlib.c | 2 +-
migration/multifd-zstd.c | 2 +-
migration/multifd.c | 13 +++++++------
5 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c
index 9265098ee7..f8c84c52cf 100644
--- a/migration/multifd-qpl.c
+++ b/migration/multifd-qpl.c
@@ -404,13 +404,14 @@ retry:
static void multifd_qpl_compress_pages_slow_path(MultiFDSendParams *p)
{
QplData *qpl = p->compress_data;
+ MultiFDPages_t *pages = p->pages;
uint32_t size = p->page_size;
qpl_job *job = qpl->sw_job;
uint8_t *zbuf = qpl->zbuf;
uint8_t *buf;
- for (int i = 0; i < p->pages->normal_num; i++) {
- buf = p->pages->block->host + p->pages->offset[i];
+ for (int i = 0; i < pages->normal_num; i++) {
+ buf = pages->block->host + pages->offset[i];
multifd_qpl_prepare_comp_job(job, buf, zbuf, size);
if (qpl_execute_job(job) == QPL_STS_OK) {
multifd_qpl_fill_packet(i, p, zbuf, job->total_out);
@@ -498,6 +499,7 @@ static void multifd_qpl_compress_pages(MultiFDSendParams *p)
static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp)
{
QplData *qpl = p->compress_data;
+ MultiFDPages_t *pages = p->pages;
uint32_t len = 0;
if (!multifd_send_prepare_common(p)) {
@@ -505,7 +507,7 @@ static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp)
}
/* The first IOV is used to store the compressed page lengths */
- len = p->pages->normal_num * sizeof(uint32_t);
+ len = pages->normal_num * sizeof(uint32_t);
multifd_qpl_fill_iov(p, (uint8_t *) qpl->zlen, len);
if (qpl->hw_avail) {
multifd_qpl_compress_pages(p);
diff --git a/migration/multifd-uadk.c b/migration/multifd-uadk.c
index d12353fb21..b8ba3cd9c1 100644
--- a/migration/multifd-uadk.c
+++ b/migration/multifd-uadk.c
@@ -174,19 +174,20 @@ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp)
uint32_t hdr_size;
uint8_t *buf = uadk_data->buf;
int ret = 0;
+ MultiFDPages_t *pages = p->pages;
if (!multifd_send_prepare_common(p)) {
goto out;
}
- hdr_size = p->pages->normal_num * sizeof(uint32_t);
+ hdr_size = pages->normal_num * sizeof(uint32_t);
/* prepare the header that stores the lengths of all compressed data */
prepare_next_iov(p, uadk_data->buf_hdr, hdr_size);
- for (int i = 0; i < p->pages->normal_num; i++) {
+ for (int i = 0; i < pages->normal_num; i++) {
struct wd_comp_req creq = {
.op_type = WD_DIR_COMPRESS,
- .src = p->pages->block->host + p->pages->offset[i],
+ .src = pages->block->host + pages->offset[i],
.src_len = p->page_size,
.dst = buf,
/* Set dst_len to double the src in case compressed out >= page_size */
@@ -214,7 +215,7 @@ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp)
*/
if (!uadk_data->handle || creq.dst_len >= p->page_size) {
uadk_data->buf_hdr[i] = cpu_to_be32(p->page_size);
- prepare_next_iov(p, p->pages->block->host + p->pages->offset[i],
+ prepare_next_iov(p, pages->block->host + pages->offset[i],
p->page_size);
buf += p->page_size;
}
diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c
index 2ced69487e..65f8aba5c8 100644
--- a/migration/multifd-zlib.c
+++ b/migration/multifd-zlib.c
@@ -147,7 +147,7 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
* with compression. zlib does not guarantee that this is safe,
* therefore copy the page before calling deflate().
*/
- memcpy(z->buf, p->pages->block->host + pages->offset[i], p->page_size);
+ memcpy(z->buf, pages->block->host + pages->offset[i], p->page_size);
zs->avail_in = p->page_size;
zs->next_in = z->buf;
diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c
index ca17b7e310..cb6075a9a5 100644
--- a/migration/multifd-zstd.c
+++ b/migration/multifd-zstd.c
@@ -138,7 +138,7 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
if (i == pages->normal_num - 1) {
flush = ZSTD_e_flush;
}
- z->in.src = p->pages->block->host + pages->offset[i];
+ z->in.src = pages->block->host + pages->offset[i];
z->in.size = p->page_size;
z->in.pos = 0;
diff --git a/migration/multifd.c b/migration/multifd.c
index a6db05502a..0bd9c2253e 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -114,11 +114,11 @@ static void multifd_set_file_bitmap(MultiFDSendParams *p)
assert(pages->block);
- for (int i = 0; i < p->pages->normal_num; i++) {
+ for (int i = 0; i < pages->normal_num; i++) {
ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true);
}
- for (int i = p->pages->normal_num; i < p->pages->num; i++) {
+ for (int i = pages->normal_num; i < pages->num; i++) {
ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false);
}
}
@@ -417,7 +417,7 @@ void multifd_send_fill_packet(MultiFDSendParams *p)
int i;
packet->flags = cpu_to_be32(p->flags);
- packet->pages_alloc = cpu_to_be32(p->pages->allocated);
+ packet->pages_alloc = cpu_to_be32(pages->allocated);
packet->normal_pages = cpu_to_be32(pages->normal_num);
packet->zero_pages = cpu_to_be32(zero_num);
packet->next_packet_size = cpu_to_be32(p->next_packet_size);
@@ -953,7 +953,7 @@ static void *multifd_send_thread(void *opaque)
if (migrate_mapped_ram()) {
ret = file_write_ramblock_iov(p->c, p->iov, p->iovs_num,
- p->pages->block, &local_err);
+ pages->block, &local_err);
} else {
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num,
NULL, 0, p->write_flags,
@@ -969,7 +969,7 @@ static void *multifd_send_thread(void *opaque)
stat64_add(&mig_stats.normal_pages, pages->normal_num);
stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num);
- multifd_pages_reset(p->pages);
+ multifd_pages_reset(pages);
p->next_packet_size = 0;
/*
@@ -1690,9 +1690,10 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
bool multifd_send_prepare_common(MultiFDSendParams *p)
{
+ MultiFDPages_t *pages = p->pages;
multifd_send_zero_page_detect(p);
- if (!p->pages->normal_num) {
+ if (!pages->normal_num) {
p->next_packet_size = 0;
return false;
}
--
2.35.3
next prev parent reply other threads:[~2024-08-27 17:49 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-27 17:45 [PATCH v6 00/19] migration/multifd: Remove multifd_send_state->pages Fabiano Rosas
2024-08-27 17:45 ` Fabiano Rosas [this message]
2024-08-27 17:45 ` [PATCH v6 02/19] migration/multifd: Inline page_size and page_count Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 03/19] migration/multifd: Remove pages->allocated Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 04/19] migration/multifd: Pass in MultiFDPages_t to file_write_ramblock_iov Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 05/19] migration/multifd: Introduce MultiFDSendData Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 06/19] migration/multifd: Make MultiFDPages_t:offset a flexible array member Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 07/19] migration/multifd: Replace p->pages with an union pointer Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 08/19] migration/multifd: Move pages accounting into multifd_send_zero_page_detect() Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 09/19] migration/multifd: Remove total pages tracing Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 10/19] migration/multifd: Isolate ram pages packet data Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 11/19] migration/multifd: Don't send ram data during SYNC Fabiano Rosas
2024-08-27 17:45 ` [PATCH v6 12/19] migration/multifd: Replace multifd_send_state->pages with client data Fabiano Rosas
2024-08-27 17:46 ` [PATCH v6 13/19] migration/multifd: Allow multifd sync without flush Fabiano Rosas
2024-08-27 17:46 ` [PATCH v6 14/19] migration/multifd: Standardize on multifd ops names Fabiano Rosas
2024-08-27 17:46 ` [PATCH v6 15/19] migration/multifd: Register nocomp ops dynamically Fabiano Rosas
2024-08-27 17:46 ` [PATCH v6 16/19] migration/multifd: Move nocomp code into multifd-nocomp.c Fabiano Rosas
2024-08-27 17:46 ` [PATCH v6 17/19] migration/multifd: Make MultiFDMethods const Fabiano Rosas
2024-08-27 17:46 ` [PATCH v6 18/19] migration/multifd: Stop changing the packet on recv side Fabiano Rosas
2024-08-27 18:07 ` Peter Xu
2024-08-27 18:45 ` Fabiano Rosas
2024-08-27 19:05 ` Peter Xu
2024-08-27 19:27 ` Fabiano Rosas
2024-08-27 19:49 ` Peter Xu
2024-08-27 17:46 ` [PATCH v6 19/19] migration/multifd: Add documentation for multifd methods Fabiano Rosas
2024-08-27 18:30 ` Peter Xu
2024-08-27 18:54 ` Fabiano Rosas
2024-08-27 19:09 ` Peter Xu
2024-08-27 19:17 ` Fabiano Rosas
2024-08-27 19:44 ` Peter Xu
2024-08-27 20:22 ` Fabiano Rosas
2024-08-27 21:40 ` Peter Xu
2024-08-28 13:04 ` Fabiano Rosas
2024-08-28 13:13 ` Peter Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240827174606.10352-2-farosas@suse.de \
--to=farosas@suse.de \
--cc=mail@maciej.szmigiero.name \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).