From: Peter Xu <peterx@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>,
peterx@redhat.com, "Daniel P . Berrange" <berrange@redhat.com>,
Juan Quintela <quintela@redhat.com>,
ani@anisinha.ca,
Leonardo Bras Soares Passos <lsoaresp@redhat.com>,
Manish Mishra <manish.mishra@nutanix.com>
Subject: [PATCH v2 12/15] migration: Move last_sent_block into PageSearchStatus
Date: Tue, 11 Oct 2022 17:55:56 -0400 [thread overview]
Message-ID: <20221011215559.602584-13-peterx@redhat.com> (raw)
In-Reply-To: <20221011215559.602584-1-peterx@redhat.com>
Since we use PageSearchStatus to represent a channel, it makes perfect
sense to keep last_sent_block (aka, leverage RAM_SAVE_FLAG_CONTINUE) to be
per-channel rather than global because each channel can be sending
different pages on ramblocks.
Hence move it from RAMState into PageSearchStatus.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/ram.c | 71 ++++++++++++++++++++++++++++---------------------
1 file changed, 41 insertions(+), 30 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index 486dc47583..cbaa5650b8 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -89,6 +89,8 @@ XBZRLECacheStats xbzrle_counters;
struct PageSearchStatus {
/* The migration channel used for a specific host page */
QEMUFile *pss_channel;
+ /* Last block from where we have sent data */
+ RAMBlock *last_sent_block;
/* Current block being searched */
RAMBlock *block;
/* Current page to search from */
@@ -368,8 +370,6 @@ struct RAMState {
int uffdio_fd;
/* Last block that we have visited searching for dirty pages */
RAMBlock *last_seen_block;
- /* Last block from where we have sent data */
- RAMBlock *last_sent_block;
/* Last dirty target page we have sent */
ram_addr_t last_page;
/* last ram version we have seen */
@@ -684,16 +684,17 @@ exit:
*
* Returns the number of bytes written
*
- * @f: QEMUFile where to send the data
+ * @pss: current PSS channel status
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* in the lower bits, it contains flags
*/
-static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
+static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
size_t size, len;
- bool same_block = (block == rs->last_sent_block);
+ bool same_block = (block == pss->last_sent_block);
+ QEMUFile *f = pss->pss_channel;
if (same_block) {
offset |= RAM_SAVE_FLAG_CONTINUE;
@@ -706,7 +707,7 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)block->idstr, len);
size += 1 + len;
- rs->last_sent_block = block;
+ pss->last_sent_block = block;
}
return size;
}
@@ -790,17 +791,19 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
* -1 means that xbzrle would be longer than normal
*
* @rs: current RAM state
+ * @pss: current PSS channel
* @current_data: pointer to the address of the page contents
* @current_addr: addr of the page
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
+static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
uint8_t **current_data, ram_addr_t current_addr,
RAMBlock *block, ram_addr_t offset)
{
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
+ QEMUFile *file = pss->pss_channel;
if (!cache_is_cached(XBZRLE.cache, current_addr,
ram_counters.dirty_sync_count)) {
@@ -865,7 +868,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
}
/* Send XBZRLE based compressed page */
- bytes_xbzrle = save_page_header(rs, file, block,
+ bytes_xbzrle = save_page_header(pss, block,
offset | RAM_SAVE_FLAG_XBZRLE);
qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
qemu_put_be16(file, encoded_len);
@@ -1296,19 +1299,19 @@ static void ram_release_page(const char *rbname, uint64_t offset)
* Returns the size of data written to the file, 0 means the page is not
* a zero page
*
- * @rs: current RAM state
- * @file: the file where the data is saved
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
+static int save_zero_page_to_file(PageSearchStatus *pss,
RAMBlock *block, ram_addr_t offset)
{
uint8_t *p = block->host + offset;
+ QEMUFile *file = pss->pss_channel;
int len = 0;
if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
- len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
+ len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(file, 0);
len += 1;
ram_release_page(block->idstr, offset);
@@ -1321,14 +1324,14 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
*
* Returns the number of pages written.
*
- * @rs: current RAM state
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_zero_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
+static int save_zero_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
- int len = save_zero_page_to_file(rs, file, block, offset);
+ int len = save_zero_page_to_file(pss, block, offset);
if (len) {
stat64_add(&ram_atomic_counters.duplicate, 1);
@@ -1381,16 +1384,18 @@ static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
*
* Returns the number of pages written.
*
- * @rs: current RAM state
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* @buf: the page to be sent
* @async: send to page asyncly
*/
-static int save_normal_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
+static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset, uint8_t *buf, bool async)
{
- ram_transferred_add(save_page_header(rs, file, block,
+ QEMUFile *file = pss->pss_channel;
+
+ ram_transferred_add(save_page_header(pss, block,
offset | RAM_SAVE_FLAG_PAGE));
if (async) {
qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
@@ -1430,7 +1435,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
XBZRLE_cache_lock();
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
- pages = save_xbzrle_page(rs, pss->pss_channel, &p, current_addr,
+ pages = save_xbzrle_page(rs, pss, &p, current_addr,
block, offset);
if (!rs->last_stage) {
/* Can't send this cached data async, since the cache page
@@ -1442,8 +1447,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
/* XBZRLE overflow or normal page */
if (pages == -1) {
- pages = save_normal_page(rs, pss->pss_channel, block, offset,
- p, send_async);
+ pages = save_normal_page(pss, block, offset, p, send_async);
}
XBZRLE_cache_unlock();
@@ -1466,14 +1470,15 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf)
{
RAMState *rs = ram_state;
+ PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
uint8_t *p = block->host + offset;
int ret;
- if (save_zero_page_to_file(rs, f, block, offset)) {
+ if (save_zero_page_to_file(pss, block, offset)) {
return true;
}
- save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
+ save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
/*
* copy it to a internal buffer to avoid it being modified by VM
@@ -2293,7 +2298,8 @@ static bool save_page_use_compression(RAMState *rs)
* has been properly handled by compression, otherwise needs other
* paths to handle it
*/
-static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
+static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
+ RAMBlock *block, ram_addr_t offset)
{
if (!save_page_use_compression(rs)) {
return false;
@@ -2309,7 +2315,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
* We post the fist page as normal page as compression will take
* much CPU resource.
*/
- if (block != rs->last_sent_block) {
+ if (block != pss->last_sent_block) {
flush_compressed_data(rs);
return false;
}
@@ -2340,11 +2346,11 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
return res;
}
- if (save_compress_page(rs, block, offset)) {
+ if (save_compress_page(rs, pss, block, offset)) {
return 1;
}
- res = save_zero_page(rs, pss->pss_channel, block, offset);
+ res = save_zero_page(pss, block, offset);
if (res > 0) {
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale
@@ -2475,7 +2481,7 @@ static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss)
* If channel switched, reset last_sent_block since the old sent block
* may not be on the same channel.
*/
- rs->last_sent_block = NULL;
+ pss->last_sent_block = NULL;
trace_postcopy_preempt_switch_channel(channel);
}
@@ -2809,8 +2815,13 @@ static void ram_save_cleanup(void *opaque)
static void ram_state_reset(RAMState *rs)
{
+ int i;
+
+ for (i = 0; i < RAM_CHANNEL_MAX; i++) {
+ rs->pss[i].last_sent_block = NULL;
+ }
+
rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
rs->last_page = 0;
rs->last_version = ram_list.version;
rs->xbzrle_enabled = false;
@@ -3004,8 +3015,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
migration_bitmap_sync(rs);
/* Easiest way to make sure we don't resume in the middle of a host-page */
+ rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
rs->last_page = 0;
postcopy_each_ram_send_discard(ms);
--
2.37.3
next prev parent reply other threads:[~2022-10-11 22:13 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-11 21:55 [PATCH v2 00/15] migration: Postcopy Preempt-Full Peter Xu
2022-10-11 21:55 ` [PATCH v2 01/15] migration: Take bitmap mutex when completing ram migration Peter Xu
2022-10-12 16:14 ` Dr. David Alan Gilbert
2022-11-14 15:06 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 02/15] migration: Add postcopy_preempt_active() Peter Xu
2022-11-14 15:07 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 03/15] migration: Cleanup xbzrle zero page cache update logic Peter Xu
2022-11-14 15:08 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 04/15] migration: Trivial cleanup save_page_header() on same block check Peter Xu
2022-11-14 15:08 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 05/15] migration: Remove RAMState.f references in compression code Peter Xu
2022-11-14 15:09 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 06/15] migration: Yield bitmap_mutex properly when sending/sleeping Peter Xu
2022-10-12 16:43 ` Dr. David Alan Gilbert
2022-10-12 17:51 ` Peter Xu
2022-10-13 16:19 ` Peter Xu
2022-10-13 16:37 ` Dr. David Alan Gilbert
2022-11-15 9:19 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 07/15] migration: Use atomic ops properly for page accountings Peter Xu
2022-11-14 15:14 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 08/15] migration: Teach PSS about host page Peter Xu
2022-10-11 21:55 ` [PATCH v2 09/15] migration: Introduce pss_channel Peter Xu
2022-10-11 21:55 ` [PATCH v2 10/15] migration: Add pss_init() Peter Xu
2022-10-11 21:55 ` [PATCH v2 11/15] migration: Make PageSearchStatus part of RAMState Peter Xu
2022-10-11 21:55 ` Peter Xu [this message]
2022-11-14 15:19 ` [PATCH v2 12/15] migration: Move last_sent_block into PageSearchStatus Juan Quintela
2022-10-11 21:55 ` [PATCH v2 13/15] migration: Send requested page directly in rp-return thread Peter Xu
2022-10-11 21:55 ` [PATCH v2 14/15] migration: Remove old preempt code around state maintainance Peter Xu
2022-11-14 15:19 ` Juan Quintela
2022-10-11 21:55 ` [PATCH v2 15/15] migration: Drop rs->f Peter Xu
2022-11-14 15:18 ` Juan Quintela
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221011215559.602584-13-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=ani@anisinha.ca \
--cc=berrange@redhat.com \
--cc=dgilbert@redhat.com \
--cc=lsoaresp@redhat.com \
--cc=manish.mishra@nutanix.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).