From: guangrong.xiao@gmail.com
To: pbonzini@redhat.com, mst@redhat.com, mtosatti@redhat.com
Cc: qemu-devel@nongnu.org, kvm@vger.kernel.org, dgilbert@redhat.com,
peterx@redhat.com, wei.w.wang@intel.com, jiang.biao2@zte.com.cn,
eblake@redhat.com, Xiao Guangrong <xiaoguangrong@tencent.com>
Subject: [Qemu-devel] [PATCH v4 05/10] migration: move handle of zero page to the thread
Date: Tue, 21 Aug 2018 16:10:24 +0800 [thread overview]
Message-ID: <20180821081029.26121-6-xiaoguangrong@tencent.com> (raw)
In-Reply-To: <20180821081029.26121-1-xiaoguangrong@tencent.com>
From: Xiao Guangrong <xiaoguangrong@tencent.com>
Detecting zero page is not a light work, moving it to the thread to
speed the main thread up, btw, handling ram_release_pages() for the
zero page is moved to the thread as well
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
---
migration/ram.c | 96 +++++++++++++++++++++++++++++++++++++++++----------------
1 file changed, 70 insertions(+), 26 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index e463de4f69..d804d01aae 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -340,6 +340,7 @@ typedef struct PageSearchStatus PageSearchStatus;
struct CompressParam {
bool done;
bool quit;
+ bool zero_page;
QEMUFile *file;
QemuMutex mutex;
QemuCond cond;
@@ -381,7 +382,7 @@ static QemuThread *decompress_threads;
static QemuMutex decomp_done_lock;
static QemuCond decomp_done_cond;
-static void do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
+static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf);
static void *do_data_compress(void *opaque)
@@ -389,6 +390,7 @@ static void *do_data_compress(void *opaque)
CompressParam *param = opaque;
RAMBlock *block;
ram_addr_t offset;
+ bool zero_page;
qemu_mutex_lock(¶m->mutex);
while (!param->quit) {
@@ -398,11 +400,12 @@ static void *do_data_compress(void *opaque)
param->block = NULL;
qemu_mutex_unlock(¶m->mutex);
- do_compress_ram_page(param->file, ¶m->stream, block, offset,
- param->originbuf);
+ zero_page = do_compress_ram_page(param->file, ¶m->stream,
+ block, offset, param->originbuf);
qemu_mutex_lock(&comp_done_lock);
param->done = true;
+ param->zero_page = zero_page;
qemu_cond_signal(&comp_done_cond);
qemu_mutex_unlock(&comp_done_lock);
@@ -1842,13 +1845,19 @@ static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
return 1;
}
-static void do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
+static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf)
{
RAMState *rs = ram_state;
uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
+ bool zero_page = false;
int ret;
+ if (save_zero_page_to_file(rs, f, block, offset)) {
+ zero_page = true;
+ goto exit;
+ }
+
save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
/*
@@ -1861,10 +1870,21 @@ static void do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
if (ret < 0) {
qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
error_report("compressed data failed!");
- return;
+ return false;
}
+exit:
ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
+ return zero_page;
+}
+
+static void
+update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
+{
+ if (param->zero_page) {
+ ram_counters.duplicate++;
+ }
+ ram_counters.transferred += bytes_xmit;
}
static void flush_compressed_data(RAMState *rs)
@@ -1888,7 +1908,12 @@ static void flush_compressed_data(RAMState *rs)
qemu_mutex_lock(&comp_param[idx].mutex);
if (!comp_param[idx].quit) {
len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
- ram_counters.transferred += len;
+ /*
+ * it's safe to fetch zero_page without holding comp_done_lock
+ * as there is no further request submitted to the thread,
+ * i.e, the thread should be waiting for a request at this point.
+ */
+ update_compress_thread_counts(&comp_param[idx], len);
}
qemu_mutex_unlock(&comp_param[idx].mutex);
}
@@ -1919,7 +1944,7 @@ retry:
qemu_cond_signal(&comp_param[idx].cond);
qemu_mutex_unlock(&comp_param[idx].mutex);
pages = 1;
- ram_counters.transferred += bytes_xmit;
+ update_compress_thread_counts(&comp_param[idx], bytes_xmit);
break;
}
}
@@ -2193,6 +2218,39 @@ static bool save_page_use_compression(RAMState *rs)
return false;
}
+/*
+ * try to compress the page before posting it out, return true if the page
+ * has been properly handled by compression, otherwise needs other
+ * paths to handle it
+ */
+static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
+{
+ if (!save_page_use_compression(rs)) {
+ return false;
+ }
+
+ /*
+ * When starting the process of a new block, the first page of
+ * the block should be sent out before other pages in the same
+ * block, and all the pages in last block should have been sent
+ * out, keeping this order is important, because the 'cont' flag
+ * is used to avoid resending the block name.
+ *
+ * We post the fist page as normal page as compression will take
+ * much CPU resource.
+ */
+ if (block != rs->last_sent_block) {
+ flush_compressed_data(rs);
+ return false;
+ }
+
+ if (compress_page_with_multi_thread(rs, block, offset) > 0) {
+ return true;
+ }
+
+ return false;
+}
+
/**
* ram_save_target_page: save one target page
*
@@ -2213,15 +2271,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
return res;
}
- /*
- * When starting the process of a new block, the first page of
- * the block should be sent out before other pages in the same
- * block, and all the pages in last block should have been sent
- * out, keeping this order is important, because the 'cont' flag
- * is used to avoid resending the block name.
- */
- if (block != rs->last_sent_block && save_page_use_compression(rs)) {
- flush_compressed_data(rs);
+ if (save_compress_page(rs, block, offset)) {
+ return 1;
}
res = save_zero_page(rs, block, offset);
@@ -2239,17 +2290,10 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
}
/*
- * Make sure the first page is sent out before other pages.
- *
- * we post it as normal page as compression will take much
- * CPU resource.
+ * do not use multifd for compression as the first page in the new
+ * block should be posted out before sending the compressed page
*/
- if (block == rs->last_sent_block && save_page_use_compression(rs)) {
- res = compress_page_with_multi_thread(rs, block, offset);
- if (res > 0) {
- return res;
- }
- } else if (migrate_use_multifd()) {
+ if (!save_page_use_compression(rs) && migrate_use_multifd()) {
return ram_save_multifd_page(rs, block, offset);
}
--
2.14.4
next prev parent reply other threads:[~2018-08-21 8:11 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-08-21 8:10 [Qemu-devel] [PATCH v4 00/10] migration: compression optimization guangrong.xiao
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 01/10] migration: do not wait for free thread guangrong.xiao
2018-08-22 10:25 ` Juan Quintela
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 02/10] migration: fix counting normal page for compression guangrong.xiao
2018-08-22 10:20 ` Juan Quintela
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 03/10] migration: introduce save_zero_page_to_file guangrong.xiao
2018-08-22 10:21 ` Juan Quintela
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 04/10] migration: drop the return value of do_compress_ram_page guangrong.xiao
2018-08-22 10:22 ` Juan Quintela
2018-08-21 8:10 ` guangrong.xiao [this message]
2018-08-22 10:25 ` [Qemu-devel] [PATCH v4 05/10] migration: move handle of zero page to the thread Juan Quintela
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 06/10] migration: hold the lock only if it is really needed guangrong.xiao
2018-08-22 10:24 ` Juan Quintela
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 07/10] migration: do not flush_compressed_data at the end of each iteration guangrong.xiao
2018-08-22 4:56 ` Peter Xu
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 08/10] migration: fix calculating xbzrle_counters.cache_miss_rate guangrong.xiao
2018-08-22 4:58 ` Peter Xu
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 09/10] migration: show the statistics of compression guangrong.xiao
2018-08-21 8:10 ` [Qemu-devel] [PATCH v4 10/10] migration: handle the error condition properly guangrong.xiao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180821081029.26121-6-xiaoguangrong@tencent.com \
--to=guangrong.xiao@gmail.com \
--cc=dgilbert@redhat.com \
--cc=eblake@redhat.com \
--cc=jiang.biao2@zte.com.cn \
--cc=kvm@vger.kernel.org \
--cc=mst@redhat.com \
--cc=mtosatti@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=wei.w.wang@intel.com \
--cc=xiaoguangrong@tencent.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).