xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Bruno Alvisio <bruno.alvisio@gmail.com>
To: xen-devel@lists.xen.org, wei.liu2@citrix.com, dave@recoil.org,
	ian.jackson@eu.citrix.com
Subject: [PATCH RFC v3 RESEND 12/12] Migration with Local Disks Mirroring: Introduce pre_mirror_disks_stream_phase op to xc_sr_save_ops
Date: Sat, 23 Dec 2017 14:03:36 +0000	[thread overview]
Message-ID: <1514037816-40864-13-git-send-email-bruno.alvisio@gmail.com> (raw)
In-Reply-To: <1514037816-40864-1-git-send-email-bruno.alvisio@gmail.com>

A new op pre_mirror_stream_phase is introduced as part of the xc_sr_save_ops.
This op sends all pfns and params that need to be transferred before the disks
mirroring jobs can be started. Note that no new libxc record type is created.

The save flow is modified such that: if the stream_phase ==
XC_PRE_MIRROR_DISKS_STREAM_PHASE only the pre_mirror_disks op is executed as
part of the save(). In all other libxc phase types, the original flow is
executed.

Signed-off-by: Bruno Alvisio <bruno.alvisio@gmail.com>
---
 tools/libxc/xc_sr_common.h       |  11 ++++
 tools/libxc/xc_sr_save.c         |  24 +++++++--
 tools/libxc/xc_sr_save_x86_hvm.c | 109 +++++++++++++++++++++++++++------------
 3 files changed, 105 insertions(+), 39 deletions(-)

diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index 8cf393f..44f4103 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -96,6 +96,13 @@ struct xc_sr_save_ops
      * after a successful save, or upon encountering an error.
      */
     int (*cleanup)(struct xc_sr_context *ctx);
+
+    /**
+     * Send the necessary records/params to allow the start of the local
+     * disks mirroring job in the destination node. It will be called exactly
+     * once only if the stream phase type == XC_STREAM_PHASE_PRE_MIRROR_DISKS
+     */
+    int (*pre_mirror_disks_stream_phase)(struct xc_sr_context *ctx);
 };
 
 
@@ -398,6 +405,10 @@ int read_record(struct xc_sr_context *ctx, int fd, struct xc_sr_record *rec);
 int populate_pfns(struct xc_sr_context *ctx, unsigned count,
                   const xen_pfn_t *original_pfns, const uint32_t *types);
 
+int add_to_batch(struct xc_sr_context *ctx, xen_pfn_t pfn);
+
+int flush_batch(struct xc_sr_context *ctx);
+
 #endif
 /*
  * Local variables:
diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c
index b7498e3..557dafe 100644
--- a/tools/libxc/xc_sr_save.c
+++ b/tools/libxc/xc_sr_save.c
@@ -279,7 +279,7 @@ static int write_batch(struct xc_sr_context *ctx)
 /*
  * Flush a batch of pfns into the stream.
  */
-static int flush_batch(struct xc_sr_context *ctx)
+int flush_batch(struct xc_sr_context *ctx)
 {
     int rc = 0;
 
@@ -301,7 +301,7 @@ static int flush_batch(struct xc_sr_context *ctx)
 /*
  * Add a single pfn to the batch, flushing the batch if full.
  */
-static int add_to_batch(struct xc_sr_context *ctx, xen_pfn_t pfn)
+int add_to_batch(struct xc_sr_context *ctx, xen_pfn_t pfn)
 {
     int rc = 0;
 
@@ -842,8 +842,12 @@ static int save(struct xc_sr_context *ctx, uint16_t guest_type)
     xc_interface *xch = ctx->xch;
     int rc, saved_rc = 0, saved_errno = 0;
 
-    IPRINTF("Saving domain %d, type %s",
-            ctx->domid, dhdr_type_to_str(guest_type));
+    if ( ctx->stream_phase == XC_STREAM_PHASE_PRE_MIRROR_DISKS )
+        IPRINTF("Pre-mirroring disks save phase for domain %d, type %s",
+                ctx->domid, dhdr_type_to_str(guest_type));
+    else
+        IPRINTF("Saving domain %d, type %s",
+                ctx->domid, dhdr_type_to_str(guest_type));
 
     rc = setup(ctx);
     if ( rc )
@@ -855,6 +859,13 @@ static int save(struct xc_sr_context *ctx, uint16_t guest_type)
     if ( rc )
         goto err;
 
+    if ( ctx->stream_phase == XC_STREAM_PHASE_PRE_MIRROR_DISKS ) {
+        rc = ctx->save.ops.pre_mirror_disks_stream_phase(ctx);
+        if ( rc )
+            goto err;
+        goto end;
+    }
+
     rc = ctx->save.ops.start_of_stream(ctx);
     if ( rc )
         goto err;
@@ -939,6 +950,7 @@ static int save(struct xc_sr_context *ctx, uint16_t guest_type)
         }
     } while ( ctx->save.checkpointed != XC_MIG_STREAM_NONE );
 
+ end:
     xc_report_progress_single(xch, "End of stream");
 
     rc = write_end_record(ctx);
@@ -974,6 +986,7 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
         {
             .xch = xch,
             .fd = io_fd,
+            .stream_phase = stream_phase
         };
 
     /* GCC 4.4 (of CentOS 6.x vintage) can' t initialise anonymous unions. */
@@ -989,7 +1002,8 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
            stream_type == XC_MIG_STREAM_COLO);
 
     /* Sanity checks for callbacks. */
-    if ( hvm )
+    /* The pre mirror disks phase stream doesn't enable/disable qemu log */
+    if ( hvm && ctx.stream_phase != XC_STREAM_PHASE_PRE_MIRROR_DISKS )
         assert(callbacks->switch_qemu_logdirty);
     if ( ctx.save.checkpointed )
         assert(callbacks->checkpoint && callbacks->postcopy);
diff --git a/tools/libxc/xc_sr_save_x86_hvm.c b/tools/libxc/xc_sr_save_x86_hvm.c
index 97a8c49..423edd7 100644
--- a/tools/libxc/xc_sr_save_x86_hvm.c
+++ b/tools/libxc/xc_sr_save_x86_hvm.c
@@ -4,6 +4,32 @@
 
 #include <xen/hvm/params.h>
 
+static const unsigned int params[] = {
+    HVM_PARAM_STORE_PFN,
+    HVM_PARAM_IOREQ_PFN,
+    HVM_PARAM_BUFIOREQ_PFN,
+    HVM_PARAM_PAGING_RING_PFN,
+    HVM_PARAM_MONITOR_RING_PFN,
+    HVM_PARAM_SHARING_RING_PFN,
+    HVM_PARAM_VM86_TSS_SIZED,
+    HVM_PARAM_CONSOLE_PFN,
+    HVM_PARAM_ACPI_IOPORTS_LOCATION,
+    HVM_PARAM_VIRIDIAN,
+    HVM_PARAM_IDENT_PT,
+    HVM_PARAM_PAE_ENABLED,
+    HVM_PARAM_VM_GENERATION_ID_ADDR,
+    HVM_PARAM_IOREQ_SERVER_PFN,
+    HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+    HVM_PARAM_X87_FIP_WIDTH,
+    HVM_PARAM_MCA_CAP,
+};
+
+static const unsigned int params_mirroring[] = {
+    HVM_PARAM_STORE_PFN,
+    HVM_PARAM_IOREQ_PFN,
+    HVM_PARAM_BUFIOREQ_PFN,
+};
+
 /*
  * Query for the HVM context and write an HVM_CONTEXT record into the stream.
  */
@@ -58,30 +84,11 @@ static int write_hvm_context(struct xc_sr_context *ctx)
  * Query for a range of HVM parameters and write an HVM_PARAMS record into the
  * stream.
  */
-static int write_hvm_params(struct xc_sr_context *ctx)
+static int write_hvm_params(struct xc_sr_context *ctx,
+                            const unsigned int *params, unsigned int nr_params)
 {
-    static const unsigned int params[] = {
-        HVM_PARAM_STORE_PFN,
-        HVM_PARAM_IOREQ_PFN,
-        HVM_PARAM_BUFIOREQ_PFN,
-        HVM_PARAM_PAGING_RING_PFN,
-        HVM_PARAM_MONITOR_RING_PFN,
-        HVM_PARAM_SHARING_RING_PFN,
-        HVM_PARAM_VM86_TSS_SIZED,
-        HVM_PARAM_CONSOLE_PFN,
-        HVM_PARAM_ACPI_IOPORTS_LOCATION,
-        HVM_PARAM_VIRIDIAN,
-        HVM_PARAM_IDENT_PT,
-        HVM_PARAM_PAE_ENABLED,
-        HVM_PARAM_VM_GENERATION_ID_ADDR,
-        HVM_PARAM_IOREQ_SERVER_PFN,
-        HVM_PARAM_NR_IOREQ_SERVER_PAGES,
-        HVM_PARAM_X87_FIP_WIDTH,
-        HVM_PARAM_MCA_CAP,
-    };
-
     xc_interface *xch = ctx->xch;
-    struct xc_sr_rec_hvm_params_entry entries[ARRAY_SIZE(params)];
+    struct xc_sr_rec_hvm_params_entry entries[nr_params];
     struct xc_sr_rec_hvm_params hdr = {
         .count = 0,
     };
@@ -93,7 +100,7 @@ static int write_hvm_params(struct xc_sr_context *ctx)
     unsigned int i;
     int rc;
 
-    for ( i = 0; i < ARRAY_SIZE(params); i++ )
+    for ( i = 0; i < nr_params; i++ )
     {
         uint32_t index = params[i];
         uint64_t value;
@@ -160,7 +167,8 @@ static int x86_hvm_setup(struct xc_sr_context *ctx)
 
     ctx->save.p2m_size = nr_pfns;
 
-    if ( ctx->save.callbacks->switch_qemu_logdirty(
+    if ( ctx->stream_phase != XC_STREAM_PHASE_PRE_MIRROR_DISKS &&
+         ctx->save.callbacks->switch_qemu_logdirty(
              ctx->domid, 1, ctx->save.callbacks->data) )
     {
         PERROR("Couldn't enable qemu log-dirty mode");
@@ -205,19 +213,51 @@ static int x86_hvm_end_of_checkpoint(struct xc_sr_context *ctx)
         return rc;
 
     /* Write HVM_PARAMS record contains applicable HVM params. */
-    rc = write_hvm_params(ctx);
+    rc = write_hvm_params(ctx, params, ARRAY_SIZE(params));
     if ( rc )
         return rc;
 
     return 0;
 }
 
+static int x86_hvm_send_pre_mirror_disks_pages(struct xc_sr_context *ctx)
+{
+    xc_interface *xch = ctx->xch;
+    uint64_t value;
+    unsigned int i;
+    int rc;
+
+    xc_set_progress_prefix(xch, "Pre-mirroring local disks phase");
+
+    for (i = 0; i < ARRAY_SIZE(params_mirroring); i++)
+    {
+        rc = xc_hvm_param_get(xch, ctx->domid, params_mirroring[i], &value);
+        if ( rc )
+            goto out;
+        rc = add_to_batch(ctx, value);
+        if ( rc )
+            goto out;
+    }
+
+    rc = flush_batch(ctx);
+    if ( rc )
+        goto out;
+
+    rc = write_hvm_params(ctx, params_mirroring, ARRAY_SIZE(params_mirroring));
+    if ( rc )
+        goto out;
+
+ out:
+    return rc;
+}
+
 static int x86_hvm_cleanup(struct xc_sr_context *ctx)
 {
     xc_interface *xch = ctx->xch;
 
     /* If qemu successfully enabled logdirty mode, attempt to disable. */
-    if ( ctx->x86_hvm.save.qemu_enabled_logdirty &&
+    if ( ctx->stream_phase != XC_STREAM_PHASE_PRE_MIRROR_DISKS &&
+         ctx->x86_hvm.save.qemu_enabled_logdirty &&
          ctx->save.callbacks->switch_qemu_logdirty(
              ctx->domid, 0, ctx->save.callbacks->data) )
     {
@@ -230,14 +270,15 @@ static int x86_hvm_cleanup(struct xc_sr_context *ctx)
 
 struct xc_sr_save_ops save_ops_x86_hvm =
 {
-    .pfn_to_gfn          = x86_hvm_pfn_to_gfn,
-    .normalise_page      = x86_hvm_normalise_page,
-    .setup               = x86_hvm_setup,
-    .start_of_stream     = x86_hvm_start_of_stream,
-    .start_of_checkpoint = x86_hvm_start_of_checkpoint,
-    .end_of_checkpoint   = x86_hvm_end_of_checkpoint,
-    .check_vm_state      = x86_hvm_check_vm_state,
-    .cleanup             = x86_hvm_cleanup,
+    .pfn_to_gfn                    = x86_hvm_pfn_to_gfn,
+    .normalise_page                = x86_hvm_normalise_page,
+    .setup                         = x86_hvm_setup,
+    .start_of_stream               = x86_hvm_start_of_stream,
+    .start_of_checkpoint           = x86_hvm_start_of_checkpoint,
+    .end_of_checkpoint             = x86_hvm_end_of_checkpoint,
+    .check_vm_state                = x86_hvm_check_vm_state,
+    .cleanup                       = x86_hvm_cleanup,
+    .pre_mirror_disks_stream_phase = x86_hvm_send_pre_mirror_disks_pages,
 };
 
 /*
-- 
2.3.2 (Apple Git-55)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

      parent reply	other threads:[~2017-12-23 14:03 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-23 14:03 [PATCH RFC v3 RESEND 00/12] Migration with Local Disks Mirroring Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 01/12] Migration with Local Disks Mirroring: Added support in libxl to handle QMP events Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 02/12] Migration with Local Disks Mirroring: Added QMP commands used for mirroring disks Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 03/12] Migration with Local Disks Mirroring: Refactored migrate_read_fixedmessage Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 04/12] Migration with Local Disks Mirroring: Added a new '-q' flag to xl migrate for disk mirorring Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 05/12] Migration with Local Disks Mirroring: QEMU process is started with '-incoming defer' option Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 06/12] Migration with Local Disks Mirroring: Added 'mirror_disks' field to domain_create_state Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 07/12] Migration with Local Disks Mirroring: Added new libxl_read_stream and callbacks in restore flow Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 08/12] Migration with Local Disks Mirroring: New stream phase type for libxl streams Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 09/12] Migration with Local Disks Mirroring: New stream phase type for libxc streams Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 10/12] Migration with Local Disks Mirroring: libxl save flow support Bruno Alvisio
2017-12-23 14:03 ` [PATCH RFC v3 RESEND 11/12] Migration with Local Disks Mirroring: libxl write stream support for stream phase type Bruno Alvisio
2017-12-23 14:03 ` Bruno Alvisio [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1514037816-40864-13-git-send-email-bruno.alvisio@gmail.com \
    --to=bruno.alvisio@gmail.com \
    --cc=dave@recoil.org \
    --cc=ian.jackson@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).