xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Wen Congyang <wency@cn.fujitsu.com>
To: Dong Eddie <eddie.dong@intel.com>,
	Lai Jiangshan <laijs@cn.fujitsu.com>,
	xen-devl <xen-devel@lists.xen.org>,
	Shriram Rajagopalan <rshriram@cs.ubc.ca>
Cc: Jiang Yunhong <yunhong.jiang@intel.com>,
	Wen Congyang <wency@cn.fujitsu.com>,
	Ye Wei <wei.ye1987@gmail.com>, Xu Yao <xuyao.xu@huawei.com>,
	Hong Tao <bobby.hong@huawei.com>
Subject: [RFC Patch v2 06/16] colo: implement restore_callbacks init()/free()
Date: Thu, 11 Jul 2013 16:35:38 +0800	[thread overview]
Message-ID: <1373531748-12547-7-git-send-email-wency@cn.fujitsu.com> (raw)
In-Reply-To: <1373531748-12547-1-git-send-email-wency@cn.fujitsu.com>

This patch implements restore callbacks for colo:
1. init(): allocate some memory
2. free(): free the memory allocated in init()

Signed-off-by: Ye Wei <wei.ye1987@gmail.com>
Signed-off-by: Jiang Yunhong <yunhong.jiang@intel.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
---
 tools/libxc/Makefile                 |    2 +-
 tools/libxc/xc_domain_restore_colo.c |  145 ++++++++++++++++++++++++++++++++++
 tools/libxc/xc_save_restore_colo.h   |   10 +++
 3 files changed, 156 insertions(+), 1 deletions(-)
 create mode 100644 tools/libxc/xc_domain_restore_colo.c
 create mode 100644 tools/libxc/xc_save_restore_colo.h

diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index 512a994..70994b9 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -42,7 +42,7 @@ CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c
 GUEST_SRCS-y :=
 GUEST_SRCS-y += xg_private.c xc_suspend.c
 ifeq ($(CONFIG_MIGRATE),y)
-GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c
+GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c xc_domain_restore_colo.c
 GUEST_SRCS-y += xc_offline_page.c xc_compression.c
 else
 GUEST_SRCS-y += xc_nomigrate.c
diff --git a/tools/libxc/xc_domain_restore_colo.c b/tools/libxc/xc_domain_restore_colo.c
new file mode 100644
index 0000000..674e55e
--- /dev/null
+++ b/tools/libxc/xc_domain_restore_colo.c
@@ -0,0 +1,145 @@
+#include <xc_save_restore_colo.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <xc_bitops.h>
+
+struct restore_colo_data
+{
+    unsigned long max_mem_pfn;
+
+    /* cache the whole memory
+     *
+     * The SVM is running in colo mode, so should cache the whole memory
+     * of SVM.
+     */
+    char* pagebase;
+
+    /* which page is dirty? */
+    unsigned long *dirty_pages;
+
+    /* suspend evtchn */
+    int local_port;
+
+    xc_evtchn *xce;
+
+    int first_time;
+
+    /* PV */
+    /* store the pfn type on slave side */
+    unsigned long *pfn_type_slaver;
+    xen_pfn_t p2m_fll;
+
+    /* cache p2m frame list list */
+    char *p2m_frame_list_list;
+
+    /* cache p2m frame list */
+    char *p2m_frame_list;
+
+    /* temp buffer(avoid malloc/free frequently) */
+    unsigned long *pfn_batch_slaver;
+    unsigned long *pfn_type_batch_slaver;
+    unsigned long *p2m_frame_list_temp;
+};
+
+/* we restore only one vm in a process, so it is safe to use global variable */
+DECLARE_HYPERCALL_BUFFER(unsigned long, dirty_pages);
+
+int colo_init(struct restore_data *comm_data, void **data)
+{
+    xc_dominfo_t info;
+    int i;
+    unsigned long size;
+    xc_interface *xch = comm_data->xch;
+    struct restore_colo_data *colo_data;
+    struct domain_info_context *dinfo = comm_data->dinfo;
+
+    if (dirty_pages)
+        /* restore_colo_init() is called more than once?? */
+        return -1;
+
+    colo_data = calloc(1, sizeof(struct restore_colo_data));
+    if (!colo_data)
+        return -1;
+
+    if (comm_data->hvm)
+    {
+        /* hvm is unsupported now */
+        free(colo_data);
+        return -1;
+    }
+
+    if (xc_domain_getinfo(xch, comm_data->dom, 1, &info) != 1)
+    {
+        PERROR("Could not get domain info");
+        goto err;
+    }
+
+    colo_data->max_mem_pfn = info.max_memkb >> (PAGE_SHIFT - 10);
+
+    colo_data->pfn_type_slaver = calloc(dinfo->p2m_size, sizeof(xen_pfn_t));
+    colo_data->pfn_batch_slaver = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
+    colo_data->pfn_type_batch_slaver = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
+    colo_data->p2m_frame_list_temp = malloc(P2M_FL_ENTRIES * sizeof(unsigned long));
+    colo_data->p2m_frame_list_list = malloc(PAGE_SIZE);
+    colo_data->p2m_frame_list = malloc(P2M_FLL_ENTRIES * PAGE_SIZE);
+    if (!colo_data->pfn_type_slaver || !colo_data->pfn_batch_slaver ||
+        !colo_data->pfn_type_batch_slaver || !colo_data->p2m_frame_list_temp ||
+        !colo_data->p2m_frame_list_list || !colo_data->p2m_frame_list) {
+        PERROR("Could not allocate memory for restore colo data");
+        goto err;
+    }
+
+    dirty_pages = xc_hypercall_buffer_alloc_pages(xch, dirty_pages,
+                        NRPAGES(bitmap_size(dinfo->p2m_size)));
+    colo_data->dirty_pages = dirty_pages;
+
+    size = dinfo->p2m_size * PAGE_SIZE;
+    colo_data->pagebase = malloc(size);
+    if (!colo_data->dirty_pages || !colo_data->pagebase) {
+        PERROR("Could not allocate memory for restore colo data");
+        goto err;
+    }
+
+    colo_data->xce = xc_evtchn_open(NULL, 0);
+    if (!colo_data->xce) {
+        PERROR("Could not open evtchn");
+        goto err;
+    }
+
+    for (i = 0; i < dinfo->p2m_size; i++)
+        comm_data->pfn_type[i] = XEN_DOMCTL_PFINFO_XTAB;
+    memset(dirty_pages, 0xff, bitmap_size(dinfo->p2m_size));
+    colo_data->first_time = 1;
+    colo_data->local_port = -1;
+    *data = colo_data;
+
+    return 0;
+
+err:
+    colo_free(comm_data, colo_data);
+    *data = NULL;
+    return -1;
+}
+
+void colo_free(struct restore_data *comm_data, void *data)
+{
+    struct restore_colo_data *colo_data = data;
+    struct domain_info_context *dinfo = comm_data->dinfo;
+
+    if (!colo_data)
+        return;
+
+    free(colo_data->pfn_type_slaver);
+    free(colo_data->pagebase);
+    free(colo_data->pfn_batch_slaver);
+    free(colo_data->pfn_type_batch_slaver);
+    free(colo_data->p2m_frame_list_temp);
+    free(colo_data->p2m_frame_list);
+    free(colo_data->p2m_frame_list_list);
+    if (dirty_pages)
+        xc_hypercall_buffer_free_pages(comm_data->xch, dirty_pages,
+                                       NRPAGES(bitmap_size(dinfo->p2m_size)));
+    if (colo_data->xce)
+        xc_evtchn_close(colo_data->xce);
+    free(colo_data);
+}
diff --git a/tools/libxc/xc_save_restore_colo.h b/tools/libxc/xc_save_restore_colo.h
new file mode 100644
index 0000000..b5416af
--- /dev/null
+++ b/tools/libxc/xc_save_restore_colo.h
@@ -0,0 +1,10 @@
+#ifndef XC_SAVE_RESTORE_COLO_H
+#define XC_SAVE_RESTORE_COLO_H
+
+#include <xg_save_restore.h>
+#include <xg_private.h>
+
+extern int colo_init(struct restore_data *, void **);
+extern void colo_free(struct restore_data *, void *);
+
+#endif
-- 
1.7.4

  parent reply	other threads:[~2013-07-11  8:35 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-07-11  8:35 [RFC Patch v2 00/16] COarse-grain LOck-stepping Virtual Machines for Non-stop Service Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 01/16] xen: introduce new hypercall to reset vcpu Wen Congyang
2013-07-11  9:44   ` Andrew Cooper
2013-07-11  9:58     ` Wen Congyang
2013-07-11 10:01       ` Ian Campbell
2013-08-01 11:48   ` Tim Deegan
2013-08-06  6:47     ` Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 02/16] block-remus: introduce colo mode Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 03/16] block-remus: introduce a interface to allow the user specify which mode the backup end uses Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 04/16] dominfo.completeRestore() will be called more than once in colo mode Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 05/16] xc_domain_restore: introduce restore_callbacks for colo Wen Congyang
2013-07-11  8:35 ` Wen Congyang [this message]
2013-07-11  8:35 ` [RFC Patch v2 07/16] colo: implement restore_callbacks get_page() Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 08/16] colo: implement restore_callbacks flush_memory Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 09/16] colo: implement restore_callbacks update_p2m() Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 10/16] colo: implement restore_callbacks finish_restore() Wen Congyang
2013-07-11  9:40   ` Ian Campbell
2013-07-11  9:54     ` Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 11/16] xc_restore: implement for colo Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 12/16] XendCheckpoint: implement colo Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 13/16] xc_domain_save: flush cache before calling callbacks->postcopy() Wen Congyang
2013-07-11 13:43   ` Andrew Cooper
2013-07-12  1:36     ` Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 14/16] add callback to configure network for colo Wen Congyang
2013-07-11  8:35 ` [RFC Patch v2 15/16] xc_domain_save: implement save_callbacks " Wen Congyang
2013-07-11 13:52   ` Andrew Cooper
2013-07-11  8:35 ` [RFC Patch v2 16/16] remus: implement colo mode Wen Congyang
2013-07-11  9:37 ` [RFC Patch v2 00/16] COarse-grain LOck-stepping Virtual Machines for Non-stop Service Andrew Cooper
2013-07-11  9:40 ` Ian Campbell
2013-07-14 14:33   ` Shriram Rajagopalan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1373531748-12547-7-git-send-email-wency@cn.fujitsu.com \
    --to=wency@cn.fujitsu.com \
    --cc=bobby.hong@huawei.com \
    --cc=eddie.dong@intel.com \
    --cc=laijs@cn.fujitsu.com \
    --cc=rshriram@cs.ubc.ca \
    --cc=wei.ye1987@gmail.com \
    --cc=xen-devel@lists.xen.org \
    --cc=xuyao.xu@huawei.com \
    --cc=yunhong.jiang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).