From: Juan Quintela <quintela@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"David Hildenbrand" <david@redhat.com>,
"Laurent Vivier" <laurent@vivier.eu>,
"Stefan Hajnoczi" <stefanha@redhat.com>,
"Fam Zheng" <fam@euphon.net>,
qemu-block@nongnu.org,
"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
"Thomas Huth" <thuth@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
qemu-trivial@nongnu.org, "Michael Tokarev" <mjt@tls.msk.ru>,
"Daniel P. Berrangé" <berrange@redhat.com>,
"Marc-André Lureau" <marcandre.lureau@redhat.com>,
"Peter Xu" <peterx@redhat.com>,
"Juan Quintela" <quintela@redhat.com>
Subject: [PATCH 26/30] migration: Move last_sent_block into PageSearchStatus
Date: Tue, 15 Nov 2022 13:12:22 +0100 [thread overview]
Message-ID: <20221115121226.26609-27-quintela@redhat.com> (raw)
In-Reply-To: <20221115121226.26609-1-quintela@redhat.com>
From: Peter Xu <peterx@redhat.com>
Since we use PageSearchStatus to represent a channel, it makes perfect
sense to keep last_sent_block (aka, leverage RAM_SAVE_FLAG_CONTINUE) to be
per-channel rather than global because each channel can be sending
different pages on ramblocks.
Hence move it from RAMState into PageSearchStatus.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
migration/ram.c | 71 ++++++++++++++++++++++++++++---------------------
1 file changed, 41 insertions(+), 30 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index bdb29ac4d9..dbdde5a6a5 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -117,6 +117,8 @@ XBZRLECacheStats xbzrle_counters;
struct PageSearchStatus {
/* The migration channel used for a specific host page */
QEMUFile *pss_channel;
+ /* Last block from where we have sent data */
+ RAMBlock *last_sent_block;
/* Current block being searched */
RAMBlock *block;
/* Current page to search from */
@@ -396,8 +398,6 @@ struct RAMState {
int uffdio_fd;
/* Last block that we have visited searching for dirty pages */
RAMBlock *last_seen_block;
- /* Last block from where we have sent data */
- RAMBlock *last_sent_block;
/* Last dirty target page we have sent */
ram_addr_t last_page;
/* last ram version we have seen */
@@ -712,16 +712,17 @@ exit:
*
* Returns the number of bytes written
*
- * @f: QEMUFile where to send the data
+ * @pss: current PSS channel status
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* in the lower bits, it contains flags
*/
-static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
+static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
size_t size, len;
- bool same_block = (block == rs->last_sent_block);
+ bool same_block = (block == pss->last_sent_block);
+ QEMUFile *f = pss->pss_channel;
if (same_block) {
offset |= RAM_SAVE_FLAG_CONTINUE;
@@ -734,7 +735,7 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)block->idstr, len);
size += 1 + len;
- rs->last_sent_block = block;
+ pss->last_sent_block = block;
}
return size;
}
@@ -818,17 +819,19 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
* -1 means that xbzrle would be longer than normal
*
* @rs: current RAM state
+ * @pss: current PSS channel
* @current_data: pointer to the address of the page contents
* @current_addr: addr of the page
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
+static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
uint8_t **current_data, ram_addr_t current_addr,
RAMBlock *block, ram_addr_t offset)
{
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
+ QEMUFile *file = pss->pss_channel;
if (!cache_is_cached(XBZRLE.cache, current_addr,
ram_counters.dirty_sync_count)) {
@@ -893,7 +896,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
}
/* Send XBZRLE based compressed page */
- bytes_xbzrle = save_page_header(rs, file, block,
+ bytes_xbzrle = save_page_header(pss, block,
offset | RAM_SAVE_FLAG_XBZRLE);
qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
qemu_put_be16(file, encoded_len);
@@ -1324,19 +1327,19 @@ void ram_release_page(const char *rbname, uint64_t offset)
* Returns the size of data written to the file, 0 means the page is not
* a zero page
*
- * @rs: current RAM state
- * @file: the file where the data is saved
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
+static int save_zero_page_to_file(PageSearchStatus *pss,
RAMBlock *block, ram_addr_t offset)
{
uint8_t *p = block->host + offset;
+ QEMUFile *file = pss->pss_channel;
int len = 0;
if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
- len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
+ len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(file, 0);
len += 1;
ram_release_page(block->idstr, offset);
@@ -1349,14 +1352,14 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
*
* Returns the number of pages written.
*
- * @rs: current RAM state
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_zero_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
+static int save_zero_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
- int len = save_zero_page_to_file(rs, file, block, offset);
+ int len = save_zero_page_to_file(pss, block, offset);
if (len) {
stat64_add(&ram_atomic_counters.duplicate, 1);
@@ -1409,16 +1412,18 @@ static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
*
* Returns the number of pages written.
*
- * @rs: current RAM state
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* @buf: the page to be sent
* @async: send to page asyncly
*/
-static int save_normal_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
+static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset, uint8_t *buf, bool async)
{
- ram_transferred_add(save_page_header(rs, file, block,
+ QEMUFile *file = pss->pss_channel;
+
+ ram_transferred_add(save_page_header(pss, block,
offset | RAM_SAVE_FLAG_PAGE));
if (async) {
qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
@@ -1458,7 +1463,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
XBZRLE_cache_lock();
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
- pages = save_xbzrle_page(rs, pss->pss_channel, &p, current_addr,
+ pages = save_xbzrle_page(rs, pss, &p, current_addr,
block, offset);
if (!rs->last_stage) {
/* Can't send this cached data async, since the cache page
@@ -1470,8 +1475,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
/* XBZRLE overflow or normal page */
if (pages == -1) {
- pages = save_normal_page(rs, pss->pss_channel, block, offset,
- p, send_async);
+ pages = save_normal_page(pss, block, offset, p, send_async);
}
XBZRLE_cache_unlock();
@@ -1494,14 +1498,15 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf)
{
RAMState *rs = ram_state;
+ PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
uint8_t *p = block->host + offset;
int ret;
- if (save_zero_page_to_file(rs, f, block, offset)) {
+ if (save_zero_page_to_file(pss, block, offset)) {
return true;
}
- save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
+ save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
/*
* copy it to a internal buffer to avoid it being modified by VM
@@ -2321,7 +2326,8 @@ static bool save_page_use_compression(RAMState *rs)
* has been properly handled by compression, otherwise needs other
* paths to handle it
*/
-static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
+static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
+ RAMBlock *block, ram_addr_t offset)
{
if (!save_page_use_compression(rs)) {
return false;
@@ -2337,7 +2343,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
* We post the fist page as normal page as compression will take
* much CPU resource.
*/
- if (block != rs->last_sent_block) {
+ if (block != pss->last_sent_block) {
flush_compressed_data(rs);
return false;
}
@@ -2368,11 +2374,11 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
return res;
}
- if (save_compress_page(rs, block, offset)) {
+ if (save_compress_page(rs, pss, block, offset)) {
return 1;
}
- res = save_zero_page(rs, pss->pss_channel, block, offset);
+ res = save_zero_page(pss, block, offset);
if (res > 0) {
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale
@@ -2503,7 +2509,7 @@ static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss)
* If channel switched, reset last_sent_block since the old sent block
* may not be on the same channel.
*/
- rs->last_sent_block = NULL;
+ pss->last_sent_block = NULL;
trace_postcopy_preempt_switch_channel(channel);
}
@@ -2842,8 +2848,13 @@ static void ram_save_cleanup(void *opaque)
static void ram_state_reset(RAMState *rs)
{
+ int i;
+
+ for (i = 0; i < RAM_CHANNEL_MAX; i++) {
+ rs->pss[i].last_sent_block = NULL;
+ }
+
rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
rs->last_page = 0;
rs->last_version = ram_list.version;
rs->xbzrle_enabled = false;
@@ -3037,8 +3048,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
migration_bitmap_sync(rs);
/* Easiest way to make sure we don't resume in the middle of a host-page */
+ rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
rs->last_page = 0;
postcopy_each_ram_send_discard(ms);
--
2.38.1
next prev parent reply other threads:[~2022-11-15 12:32 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-15 12:11 [PATCH 00/30] Migration PULL request Juan Quintela
2022-11-15 12:11 ` [PATCH 01/30] migration/channel-block: fix return value for qio_channel_block_{readv, writev} Juan Quintela
2022-11-15 12:11 ` [PATCH 02/30] migration/multifd/zero-copy: Create helper function for flushing Juan Quintela
2022-11-15 12:11 ` [PATCH 03/30] migration: check magic value for deciding the mapping of channels Juan Quintela
2022-11-15 12:12 ` [PATCH 04/30] multifd: Create page_size fields into both MultiFD{Recv, Send}Params Juan Quintela
2022-11-15 12:12 ` [PATCH 05/30] multifd: Create page_count " Juan Quintela
2022-11-15 12:12 ` [PATCH 06/30] migration: Export ram_transferred_ram() Juan Quintela
2022-11-15 12:12 ` [PATCH 07/30] migration: Export ram_release_page() Juan Quintela
2022-11-15 12:12 ` [PATCH 08/30] Update AVX512 support for xbzrle_encode_buffer Juan Quintela
2022-11-15 12:12 ` [PATCH 09/30] Unit test code and benchmark code Juan Quintela
2022-11-15 12:12 ` [PATCH 10/30] migration: Fix possible infinite loop of ram save process Juan Quintela
2022-11-15 12:12 ` [PATCH 11/30] migration: Fix race on qemu_file_shutdown() Juan Quintela
2022-11-15 12:12 ` [PATCH 12/30] migration: Disallow postcopy preempt to be used with compress Juan Quintela
2022-11-15 12:12 ` [PATCH 13/30] migration: Use non-atomic ops for clear log bitmap Juan Quintela
2022-11-15 12:12 ` [PATCH 14/30] migration: Disable multifd explicitly with compression Juan Quintela
2022-11-15 12:12 ` [PATCH 15/30] migration: Take bitmap mutex when completing ram migration Juan Quintela
2022-11-15 12:12 ` [PATCH 16/30] migration: Add postcopy_preempt_active() Juan Quintela
2022-11-15 12:12 ` [PATCH 17/30] migration: Cleanup xbzrle zero page cache update logic Juan Quintela
2022-11-15 12:12 ` [PATCH 18/30] migration: Trivial cleanup save_page_header() on same block check Juan Quintela
2022-11-15 12:12 ` [PATCH 19/30] migration: Remove RAMState.f references in compression code Juan Quintela
2022-11-15 12:12 ` [PATCH 20/30] migration: Yield bitmap_mutex properly when sending/sleeping Juan Quintela
2022-11-15 12:12 ` [PATCH 21/30] migration: Use atomic ops properly for page accountings Juan Quintela
2022-11-15 12:12 ` [PATCH 22/30] migration: Teach PSS about host page Juan Quintela
2022-11-15 12:12 ` [PATCH 23/30] migration: Introduce pss_channel Juan Quintela
2022-11-15 12:12 ` [PATCH 24/30] migration: Add pss_init() Juan Quintela
2022-11-15 12:12 ` [PATCH 25/30] migration: Make PageSearchStatus part of RAMState Juan Quintela
2022-11-15 12:12 ` Juan Quintela [this message]
2022-11-15 12:12 ` [PATCH 27/30] migration: Send requested page directly in rp-return thread Juan Quintela
2022-11-15 12:12 ` [PATCH 28/30] migration: Remove old preempt code around state maintainance Juan Quintela
2022-11-15 12:12 ` [PATCH 29/30] migration: Drop rs->f Juan Quintela
2022-11-15 12:12 ` [PATCH 30/30] migration: Block migration comment or code is wrong Juan Quintela
2022-11-15 14:55 ` [PATCH 00/30] Migration PULL request Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221115121226.26609-27-quintela@redhat.com \
--to=quintela@redhat.com \
--cc=berrange@redhat.com \
--cc=david@redhat.com \
--cc=dgilbert@redhat.com \
--cc=fam@euphon.net \
--cc=laurent@vivier.eu \
--cc=marcandre.lureau@redhat.com \
--cc=mjt@tls.msk.ru \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-trivial@nongnu.org \
--cc=stefanha@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).