From: Yuan Liu <yuan1.liu@intel.com>
To: peterx@redhat.com, farosas@suse.de
Cc: qemu-devel@nongnu.org, hao.xiang@bytedance.com,
bryan.zhang@bytedance.com, yuan1.liu@intel.com,
nanhai.zou@intel.com
Subject: [PATCH v5 5/7] migration/multifd: implement initialization of qpl compression
Date: Wed, 20 Mar 2024 00:45:25 +0800 [thread overview]
Message-ID: <20240319164527.1873891-6-yuan1.liu@intel.com> (raw)
In-Reply-To: <20240319164527.1873891-1-yuan1.liu@intel.com>
the qpl initialization includes memory allocation for compressed
data and the qpl job initialization.
the qpl initialization will check whether the In-Memory Analytics
Accelerator(IAA) hardware is available, if the platform does not
have IAA hardware or the IAA hardware is not available, the QPL
compression initialization will fail.
Signed-off-by: Yuan Liu <yuan1.liu@intel.com>
Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
---
migration/multifd-qpl.c | 243 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 242 insertions(+), 1 deletion(-)
diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c
index 056a68a060..6de65e9da7 100644
--- a/migration/multifd-qpl.c
+++ b/migration/multifd-qpl.c
@@ -9,12 +9,253 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
+
#include "qemu/osdep.h"
#include "qemu/module.h"
+#include "qapi/error.h"
+#include "migration.h"
+#include "multifd.h"
+#include "qpl/qpl.h"
+
+typedef struct {
+ qpl_job **job_array;
+ /* the number of allocated jobs */
+ uint32_t job_num;
+ /* the size of data processed by a qpl job */
+ uint32_t data_size;
+ /* compressed data buffer */
+ uint8_t *zbuf;
+ /* the length of compressed data */
+ uint32_t *zbuf_hdr;
+} QplData;
+
+static void free_zbuf(QplData *qpl)
+{
+ if (qpl->zbuf != NULL) {
+ munmap(qpl->zbuf, qpl->job_num * qpl->data_size);
+ qpl->zbuf = NULL;
+ }
+ if (qpl->zbuf_hdr != NULL) {
+ g_free(qpl->zbuf_hdr);
+ qpl->zbuf_hdr = NULL;
+ }
+}
+
+static int alloc_zbuf(QplData *qpl, uint8_t chan_id, Error **errp)
+{
+ int flags = MAP_PRIVATE | MAP_POPULATE | MAP_ANONYMOUS;
+ uint32_t size = qpl->job_num * qpl->data_size;
+ uint8_t *buf;
+
+ buf = (uint8_t *) mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
+ if (buf == MAP_FAILED) {
+ error_setg(errp, "multifd: %u: alloc_zbuf failed, job num %u, size %u",
+ chan_id, qpl->job_num, qpl->data_size);
+ return -1;
+ }
+ qpl->zbuf = buf;
+ qpl->zbuf_hdr = g_new0(uint32_t, qpl->job_num);
+ return 0;
+}
+
+static void free_jobs(QplData *qpl)
+{
+ for (int i = 0; i < qpl->job_num; i++) {
+ qpl_fini_job(qpl->job_array[i]);
+ g_free(qpl->job_array[i]);
+ qpl->job_array[i] = NULL;
+ }
+ g_free(qpl->job_array);
+ qpl->job_array = NULL;
+}
+
+static int alloc_jobs(QplData *qpl, uint8_t chan_id, Error **errp)
+{
+ qpl_status status;
+ uint32_t job_size = 0;
+ qpl_job *job = NULL;
+ /* always use IAA hardware accelerator */
+ qpl_path_t path = qpl_path_hardware;
+
+ status = qpl_get_job_size(path, &job_size);
+ if (status != QPL_STS_OK) {
+ error_setg(errp, "multifd: %u: qpl_get_job_size failed with error %d",
+ chan_id, status);
+ return -1;
+ }
+ qpl->job_array = g_new0(qpl_job *, qpl->job_num);
+ for (int i = 0; i < qpl->job_num; i++) {
+ job = g_malloc0(job_size);
+ status = qpl_init_job(path, job);
+ if (status != QPL_STS_OK) {
+ error_setg(errp, "multifd: %u: qpl_init_job failed with error %d",
+ chan_id, status);
+ free_jobs(qpl);
+ return -1;
+ }
+ qpl->job_array[i] = job;
+ }
+ return 0;
+}
+
+static int init_qpl(QplData *qpl, uint32_t job_num, uint32_t data_size,
+ uint8_t chan_id, Error **errp)
+{
+ qpl->job_num = job_num;
+ qpl->data_size = data_size;
+ if (alloc_zbuf(qpl, chan_id, errp) != 0) {
+ return -1;
+ }
+ if (alloc_jobs(qpl, chan_id, errp) != 0) {
+ free_zbuf(qpl);
+ return -1;
+ }
+ return 0;
+}
+
+static void deinit_qpl(QplData *qpl)
+{
+ if (qpl != NULL) {
+ free_jobs(qpl);
+ free_zbuf(qpl);
+ qpl->job_num = 0;
+ qpl->data_size = 0;
+ }
+}
+
+/**
+ * qpl_send_setup: setup send side
+ *
+ * Setup each channel with QPL compression.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_send_setup(MultiFDSendParams *p, Error **errp)
+{
+ QplData *qpl;
+
+ qpl = g_new0(QplData, 1);
+ if (init_qpl(qpl, p->page_count, p->page_size, p->id, errp) != 0) {
+ g_free(qpl);
+ return -1;
+ }
+ p->compress_data = qpl;
+
+ assert(p->iov == NULL);
+ /*
+ * Each page will be compressed independently and sent using an IOV. The
+ * additional two IOVs are used to store packet header and compressed data
+ * length
+ */
+ p->iov = g_new0(struct iovec, p->page_count + 2);
+ return 0;
+}
+
+/**
+ * qpl_send_cleanup: cleanup send side
+ *
+ * Close the channel and return memory.
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static void qpl_send_cleanup(MultiFDSendParams *p, Error **errp)
+{
+ QplData *qpl = p->compress_data;
+
+ deinit_qpl(qpl);
+ g_free(p->compress_data);
+ p->compress_data = NULL;
+}
+
+/**
+ * qpl_send_prepare: prepare data to be able to send
+ *
+ * Create a compressed buffer with all the pages that we are going to
+ * send.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_send_prepare(MultiFDSendParams *p, Error **errp)
+{
+ /* Implement in next patch */
+ return -1;
+}
+
+/**
+ * qpl_recv_setup: setup receive side
+ *
+ * Create the compressed channel and buffer.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
+{
+ QplData *qpl;
+
+ qpl = g_new0(QplData, 1);
+ if (init_qpl(qpl, p->page_count, p->page_size, p->id, errp) != 0) {
+ g_free(qpl);
+ return -1;
+ }
+ p->compress_data = qpl;
+ return 0;
+}
+
+/**
+ * qpl_recv_cleanup: setup receive side
+ *
+ * Close the channel and return memory.
+ *
+ * @p: Params for the channel that we are using
+ */
+static void qpl_recv_cleanup(MultiFDRecvParams *p)
+{
+ QplData *qpl = p->compress_data;
+
+ deinit_qpl(qpl);
+ g_free(p->compress_data);
+ p->compress_data = NULL;
+}
+
+/**
+ * qpl_recv: read the data from the channel into actual pages
+ *
+ * Read the compressed buffer, and uncompress it into the actual
+ * pages.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_recv(MultiFDRecvParams *p, Error **errp)
+{
+ /* Implement in next patch */
+ return -1;
+}
+
+static MultiFDMethods multifd_qpl_ops = {
+ .send_setup = qpl_send_setup,
+ .send_cleanup = qpl_send_cleanup,
+ .send_prepare = qpl_send_prepare,
+ .recv_setup = qpl_recv_setup,
+ .recv_cleanup = qpl_recv_cleanup,
+ .recv = qpl_recv,
+};
static void multifd_qpl_register(void)
{
- /* noop */
+ multifd_register_ops(MULTIFD_COMPRESSION_QPL, &multifd_qpl_ops);
}
migration_init(multifd_qpl_register);
--
2.39.3
next prev parent reply other threads:[~2024-03-20 8:35 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-19 16:45 [PATCH v5 0/7] Live Migration With IAA Yuan Liu
2024-03-19 16:45 ` [PATCH v5 1/7] docs/migration: add qpl compression feature Yuan Liu
2024-03-26 17:58 ` Peter Xu
2024-03-27 2:14 ` Liu, Yuan1
2024-03-19 16:45 ` [PATCH v5 2/7] migration/multifd: put IOV initialization into compression method Yuan Liu
2024-03-20 15:18 ` Fabiano Rosas
2024-03-20 15:32 ` Liu, Yuan1
2024-03-19 16:45 ` [PATCH v5 3/7] configure: add --enable-qpl build option Yuan Liu
2024-03-20 8:55 ` Thomas Huth
2024-03-20 8:56 ` Thomas Huth
2024-03-20 14:34 ` Liu, Yuan1
2024-03-20 10:31 ` Daniel P. Berrangé
2024-03-20 14:42 ` Liu, Yuan1
2024-03-19 16:45 ` [PATCH v5 4/7] migration/multifd: add qpl compression method Yuan Liu
2024-03-27 19:49 ` Peter Xu
2024-03-28 3:03 ` Liu, Yuan1
2024-03-19 16:45 ` Yuan Liu [this message]
2024-03-20 10:42 ` [PATCH v5 5/7] migration/multifd: implement initialization of qpl compression Daniel P. Berrangé
2024-03-20 15:02 ` Liu, Yuan1
2024-03-20 15:20 ` Daniel P. Berrangé
2024-03-20 16:04 ` Liu, Yuan1
2024-03-20 15:34 ` Peter Xu
2024-03-20 16:23 ` Liu, Yuan1
2024-03-20 20:31 ` Peter Xu
2024-03-21 1:37 ` Liu, Yuan1
2024-03-21 15:28 ` Peter Xu
2024-03-22 2:06 ` Liu, Yuan1
2024-03-22 14:47 ` Liu, Yuan1
2024-03-22 16:40 ` Peter Xu
2024-03-27 19:25 ` Peter Xu
2024-03-28 2:32 ` Liu, Yuan1
2024-03-28 15:16 ` Peter Xu
2024-03-29 2:04 ` Liu, Yuan1
2024-03-19 16:45 ` [PATCH v5 6/7] migration/multifd: implement qpl compression and decompression Yuan Liu
2024-03-19 16:45 ` [PATCH v5 7/7] tests/migration-test: add qpl compression test Yuan Liu
2024-03-20 10:45 ` Daniel P. Berrangé
2024-03-20 15:30 ` Liu, Yuan1
2024-03-20 15:39 ` Daniel P. Berrangé
2024-03-20 16:26 ` Liu, Yuan1
2024-03-26 20:30 ` [PATCH v5 0/7] Live Migration With IAA Peter Xu
2024-03-27 3:20 ` Liu, Yuan1
2024-03-27 19:46 ` Peter Xu
2024-03-28 3:02 ` Liu, Yuan1
2024-03-28 15:22 ` Peter Xu
2024-03-29 3:33 ` Liu, Yuan1
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240319164527.1873891-6-yuan1.liu@intel.com \
--to=yuan1.liu@intel.com \
--cc=bryan.zhang@bytedance.com \
--cc=farosas@suse.de \
--cc=hao.xiang@bytedance.com \
--cc=nanhai.zou@intel.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).