xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Elena Ufimtseva <elena.ufimtseva@oracle.com>,
	Wei Liu <wei.liu2@citrix.com>,
	Ian Campbell <ian.campbell@citrix.com>,
	Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Jan Beulich <jbeulich@suse.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH RFC v1 07/13] libxl: switch HVM domain building to use xc_dom_* helpers
Date: Mon, 22 Jun 2015 18:11:21 +0200	[thread overview]
Message-ID: <1434989487-74940-8-git-send-email-roger.pau@citrix.com> (raw)
In-Reply-To: <1434989487-74940-1-git-send-email-roger.pau@citrix.com>

Now that we have all the code in place HVM domain building in libxl can be
switched to use the xc_dom_* family of functions, just like they are used in
order to build PV guests.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Elena Ufimtseva <elena.ufimtseva@oracle.com>
---
TBH HVM building still avoids calling some xc_dom_* functions that are
called in order to build PV guests. IMHO we should look into having the
exact same flow in order to build PV or HVM domains, so libxl__build_pv and
libxl__build_hvm can be unified into a single function.
---
 tools/libxl/libxl_dom.c      | 146 ++++++++++++++++++++++++++++---------------
 tools/libxl/libxl_internal.h |   2 +-
 tools/libxl/libxl_vnuma.c    |  12 ++--
 3 files changed, 105 insertions(+), 55 deletions(-)

diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 6273052..12ede26 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -801,39 +801,39 @@ static int hvm_build_set_params(xc_interface *handle, uint32_t domid,
 
 static int hvm_build_set_xs_values(libxl__gc *gc,
                                    uint32_t domid,
-                                   struct xc_hvm_build_args *args)
+                                   struct xc_dom_image *dom)
 {
     char *path = NULL;
     int ret = 0;
 
-    if (args->smbios_module.guest_addr_out) {
+    if (dom->smbios_module.guest_addr_out) {
         path = GCSPRINTF("/local/domain/%d/"HVM_XS_SMBIOS_PT_ADDRESS, domid);
 
         ret = libxl__xs_write(gc, XBT_NULL, path, "0x%"PRIx64,
-                              args->smbios_module.guest_addr_out);
+                              dom->smbios_module.guest_addr_out);
         if (ret)
             goto err;
 
         path = GCSPRINTF("/local/domain/%d/"HVM_XS_SMBIOS_PT_LENGTH, domid);
 
         ret = libxl__xs_write(gc, XBT_NULL, path, "0x%x",
-                              args->smbios_module.length);
+                              dom->smbios_module.length);
         if (ret)
             goto err;
     }
 
-    if (args->acpi_module.guest_addr_out) {
+    if (dom->acpi_module.guest_addr_out) {
         path = GCSPRINTF("/local/domain/%d/"HVM_XS_ACPI_PT_ADDRESS, domid);
 
         ret = libxl__xs_write(gc, XBT_NULL, path, "0x%"PRIx64,
-                              args->acpi_module.guest_addr_out);
+                              dom->acpi_module.guest_addr_out);
         if (ret)
             goto err;
 
         path = GCSPRINTF("/local/domain/%d/"HVM_XS_ACPI_PT_LENGTH, domid);
 
         ret = libxl__xs_write(gc, XBT_NULL, path, "0x%x",
-                              args->acpi_module.length);
+                              dom->acpi_module.length);
         if (ret)
             goto err;
     }
@@ -847,7 +847,7 @@ err:
 
 static int libxl__domain_firmware(libxl__gc *gc,
                                   libxl_domain_build_info *info,
-                                  struct xc_hvm_build_args *args)
+                                  struct xc_dom_image *dom)
 {
     libxl_ctx *ctx = libxl__gc_owner(gc);
     const char *firmware;
@@ -873,8 +873,13 @@ static int libxl__domain_firmware(libxl__gc *gc,
             break;
         }
     }
-    args->image_file_name = libxl__abs_path(gc, firmware,
-                                            libxl__xenfirmwaredir_path());
+
+    rc = xc_dom_kernel_file(dom, libxl__abs_path(gc, firmware,
+                                                 libxl__xenfirmwaredir_path()));
+    if (rc != 0) {
+        LOGE(ERROR, "xc_dom_kernel_file failed");
+        goto out;
+    }
 
     if (info->u.hvm.smbios_firmware) {
         data = NULL;
@@ -888,8 +893,8 @@ static int libxl__domain_firmware(libxl__gc *gc,
         libxl__ptr_add(gc, data);
         if (datalen) {
             /* Only accept non-empty files */
-            args->smbios_module.data = data;
-            args->smbios_module.length = (uint32_t)datalen;
+            dom->smbios_module.data = data;
+            dom->smbios_module.length = (uint32_t)datalen;
         }
     }
 
@@ -905,8 +910,8 @@ static int libxl__domain_firmware(libxl__gc *gc,
         libxl__ptr_add(gc, data);
         if (datalen) {
             /* Only accept non-empty files */
-            args->acpi_module.data = data;
-            args->acpi_module.length = (uint32_t)datalen;
+            dom->acpi_module.data = data;
+            dom->acpi_module.length = (uint32_t)datalen;
         }
     }
 
@@ -920,50 +925,61 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
               libxl__domain_build_state *state)
 {
     libxl_ctx *ctx = libxl__gc_owner(gc);
-    struct xc_hvm_build_args args = {};
     int ret, rc = ERROR_FAIL;
-    uint64_t mmio_start, lowmem_end, highmem_end;
+    uint64_t mmio_start, lowmem_end, highmem_end, mem_size;
+    struct xc_dom_image *dom = NULL;
+
+    xc_dom_loginit(ctx->xch);
+
+    dom = xc_dom_allocate(ctx->xch, NULL, NULL);
+    if (!dom) {
+        LOGE(ERROR, "xc_dom_allocate failed");
+        goto out;
+    }
+
+    dom->container_type = XC_DOM_HVM_CONTAINER;
 
-    memset(&args, 0, sizeof(struct xc_hvm_build_args));
     /* The params from the configuration file are in Mb, which are then
      * multiplied by 1 Kb. This was then divided off when calling
      * the old xc_hvm_build_target_mem() which then turned them to bytes.
      * Do all this in one step here...
      */
-    args.mem_size = (uint64_t)(info->max_memkb - info->video_memkb) << 10;
-    args.mem_target = (uint64_t)(info->target_memkb - info->video_memkb) << 10;
-    args.claim_enabled = libxl_defbool_val(info->claim_mode);
+    mem_size = (uint64_t)(info->max_memkb - info->video_memkb) << 10;
+    dom->target_pages = (uint64_t)(info->target_memkb - info->video_memkb) >> 2;
+    dom->claim_enabled = libxl_defbool_val(info->claim_mode);
     if (info->u.hvm.mmio_hole_memkb) {
         uint64_t max_ram_below_4g = (1ULL << 32) -
             (info->u.hvm.mmio_hole_memkb << 10);
 
         if (max_ram_below_4g < HVM_BELOW_4G_MMIO_START)
-            args.mmio_size = info->u.hvm.mmio_hole_memkb << 10;
+            dom->mmio_size = info->u.hvm.mmio_hole_memkb << 10;
     }
-    if (libxl__domain_firmware(gc, info, &args)) {
+    if (libxl__domain_firmware(gc, info, dom)) {
         LOG(ERROR, "initializing domain firmware failed");
         goto out;
     }
-    if (args.mem_target == 0)
-        args.mem_target = args.mem_size;
-    if (args.mmio_size == 0)
-        args.mmio_size = HVM_BELOW_4G_MMIO_LENGTH;
-    lowmem_end = args.mem_size;
+
+    if (dom->target_pages == 0)
+        dom->target_pages = mem_size >> XC_PAGE_SHIFT;
+    if (dom->mmio_size == 0)
+        dom->mmio_size = HVM_BELOW_4G_MMIO_LENGTH;
+    lowmem_end = mem_size;
     highmem_end = 0;
-    mmio_start = (1ull << 32) - args.mmio_size;
+    mmio_start = (1ull << 32) - dom->mmio_size;
     if (lowmem_end > mmio_start)
     {
         highmem_end = (1ull << 32) + (lowmem_end - mmio_start);
         lowmem_end = mmio_start;
     }
-    args.lowmem_end = lowmem_end;
-    args.highmem_end = highmem_end;
-    args.mmio_start = mmio_start;
+    dom->lowmem_end = lowmem_end;
+    dom->highmem_end = highmem_end;
+    dom->mmio_start = mmio_start;
+    dom->vga_hole = 1;
 
     if (info->num_vnuma_nodes != 0) {
         int i;
 
-        ret = libxl__vnuma_build_vmemrange_hvm(gc, domid, info, state, &args);
+        ret = libxl__vnuma_build_vmemrange_hvm(gc, domid, info, state, dom);
         if (ret) {
             LOGEV(ERROR, ret, "hvm build vmemranges failed");
             goto out;
@@ -973,27 +989,57 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
         ret = set_vnuma_info(gc, domid, info, state);
         if (ret) goto out;
 
-        args.nr_vmemranges = state->num_vmemranges;
-        args.vmemranges = libxl__malloc(gc, sizeof(*args.vmemranges) *
-                                        args.nr_vmemranges);
+        dom->nr_vmemranges = state->num_vmemranges;
+        dom->vmemranges = libxl__malloc(gc, sizeof(*dom->vmemranges) *
+                                        dom->nr_vmemranges);
 
-        for (i = 0; i < args.nr_vmemranges; i++) {
-            args.vmemranges[i].start = state->vmemranges[i].start;
-            args.vmemranges[i].end   = state->vmemranges[i].end;
-            args.vmemranges[i].flags = state->vmemranges[i].flags;
-            args.vmemranges[i].nid   = state->vmemranges[i].nid;
+        for (i = 0; i < dom->nr_vmemranges; i++) {
+            dom->vmemranges[i].start = state->vmemranges[i].start;
+            dom->vmemranges[i].end   = state->vmemranges[i].end;
+            dom->vmemranges[i].flags = state->vmemranges[i].flags;
+            dom->vmemranges[i].nid   = state->vmemranges[i].nid;
         }
 
-        args.nr_vnodes = info->num_vnuma_nodes;
-        args.vnode_to_pnode = libxl__malloc(gc, sizeof(*args.vnode_to_pnode) *
-                                            args.nr_vnodes);
-        for (i = 0; i < args.nr_vnodes; i++)
-            args.vnode_to_pnode[i] = info->vnuma_nodes[i].pnode;
+        dom->nr_vnodes = info->num_vnuma_nodes;
+        dom->vnode_to_pnode = libxl__malloc(gc, sizeof(*dom->vnode_to_pnode) *
+                                            dom->nr_vnodes);
+        for (i = 0; i < dom->nr_vnodes; i++)
+            dom->vnode_to_pnode[i] = info->vnuma_nodes[i].pnode;
+    }
+
+    rc = xc_dom_boot_xen_init(dom, ctx->xch, domid);
+    if (rc != 0) {
+        LOGE(ERROR, "xc_dom_boot_xen_init failed");
+        goto out;
     }
 
-    ret = xc_hvm_build(ctx->xch, domid, &args);
-    if (ret) {
-        LOGEV(ERROR, ret, "hvm building failed");
+    rc = xc_dom_parse_image(dom);
+    if (rc != 0) {
+        LOGE(ERROR, "xc_dom_parse_image failed");
+        goto out;
+    }
+
+    rc = xc_dom_mem_init(dom, mem_size / (1024 * 1024));
+    if (rc != 0) {
+        LOGE(ERROR, "xc_dom_mem_init failed");
+        goto out;
+    }
+
+    rc = xc_dom_boot_mem_init(dom);
+    if (rc != 0) {
+        LOGE(ERROR, "xc_dom_boot_mem_init failed");
+        goto out;
+    }
+
+    rc = xc_dom_build_image(dom);
+    if (rc != 0) {
+        LOGE(ERROR, "xc_dom_build_image failed");
+        goto out;
+    }
+
+    rc = xc_dom_boot_image(dom);
+    if (rc != 0) {
+        LOGE(ERROR, "xc_dom_boot_image failed");
         goto out;
     }
 
@@ -1006,14 +1052,16 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
         goto out;
     }
 
-    ret = hvm_build_set_xs_values(gc, domid, &args);
+    ret = hvm_build_set_xs_values(gc, domid, dom);
     if (ret) {
         LOG(ERROR, "hvm build set xenstore values failed (ret=%d)", ret);
         goto out;
     }
 
+    xc_dom_release(dom);
     return 0;
 out:
+    if (dom != NULL) xc_dom_release(dom);
     return rc;
 }
 
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index e96d6b5..71b3714 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -3461,7 +3461,7 @@ int libxl__vnuma_build_vmemrange_hvm(libxl__gc *gc,
                                      uint32_t domid,
                                      libxl_domain_build_info *b_info,
                                      libxl__domain_build_state *state,
-                                     struct xc_hvm_build_args *args);
+                                     struct xc_dom_image *dom);
 bool libxl__vnuma_configured(const libxl_domain_build_info *b_info);
 
 _hidden int libxl__ms_vm_genid_set(libxl__gc *gc, uint32_t domid,
diff --git a/tools/libxl/libxl_vnuma.c b/tools/libxl/libxl_vnuma.c
index 56856d2..db22799 100644
--- a/tools/libxl/libxl_vnuma.c
+++ b/tools/libxl/libxl_vnuma.c
@@ -17,6 +17,8 @@
 #include "libxl_arch.h"
 #include <stdlib.h>
 
+#include <xc_dom.h>
+
 bool libxl__vnuma_configured(const libxl_domain_build_info *b_info)
 {
     return b_info->num_vnuma_nodes != 0;
@@ -252,7 +254,7 @@ int libxl__vnuma_build_vmemrange_hvm(libxl__gc *gc,
                                      uint32_t domid,
                                      libxl_domain_build_info *b_info,
                                      libxl__domain_build_state *state,
-                                     struct xc_hvm_build_args *args)
+                                     struct xc_dom_image *dom)
 {
     uint64_t hole_start, hole_end, next;
     int nid, nr_vmemrange;
@@ -264,10 +266,10 @@ int libxl__vnuma_build_vmemrange_hvm(libxl__gc *gc,
      * Guest physical address space layout:
      * [0, hole_start) [hole_start, hole_end) [hole_end, highmem_end)
      */
-    hole_start = args->lowmem_end < args->mmio_start ?
-        args->lowmem_end : args->mmio_start;
-    hole_end = (args->mmio_start + args->mmio_size) > (1ULL << 32) ?
-        (args->mmio_start + args->mmio_size) : (1ULL << 32);
+    hole_start = dom->lowmem_end < dom->mmio_start ?
+        dom->lowmem_end : dom->mmio_start;
+    hole_end = (dom->mmio_start + dom->mmio_size) > (1ULL << 32) ?
+        (dom->mmio_start + dom->mmio_size) : (1ULL << 32);
 
     assert(state->vmemranges == NULL);
 
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2015-06-22 16:11 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-22 16:11 [PATCH RFC v1 00/13] Introduce HMV without dm and new boot ABI Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 01/13] libxc: split x86 HVM setup_guest into smaller logical functions Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 02/13] libxc: unify xc_dom_p2m_{host/guest} Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 03/13] libxc: introduce the notion of a container type Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 04/13] libxc: allow arch_setup_meminit to populate HVM domain memory Roger Pau Monne
2015-06-25 10:29   ` Wei Liu
2015-06-25 10:33     ` Wei Liu
2015-06-22 16:11 ` [PATCH RFC v1 05/13] libxc: introduce a domain loader for HVM guest firmware Roger Pau Monne
2015-06-23  9:29   ` Jan Beulich
2015-06-23  9:36     ` Roger Pau Monné
2015-07-10 19:09   ` Konrad Rzeszutek Wilk
2015-06-22 16:11 ` [PATCH RFC v1 06/13] libxc: introduce a xc_dom_arch for hvm-3.0-x86_32 guests Roger Pau Monne
2015-06-22 16:11 ` Roger Pau Monne [this message]
2015-06-22 16:11 ` [PATCH RFC v1 08/13] libxc: remove dead x86 HVM code Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 09/13] elfnotes: intorduce a new PHYS_ENTRY elfnote Roger Pau Monne
2015-06-23  9:35   ` Jan Beulich
2015-06-23  9:40     ` Roger Pau Monné
2015-06-23 10:01       ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 10/13] lib{xc/xl}: allow the creation of HVM domains with a kernel Roger Pau Monne
2015-06-25 10:39   ` Wei Liu
2015-06-22 16:11 ` [PATCH RFC v1 11/13] xen/libxl: allow creating HVM guests without a device model Roger Pau Monne
2015-06-23  9:41   ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 12/13] xen: allow 64bit HVM guests to use XENMEM_memory_map Roger Pau Monne
2015-06-23  9:43   ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 13/13] xenconsole: try to attach to PV console if HVM fails Roger Pau Monne
2015-06-22 17:55 ` [PATCH RFC v1 00/13] Introduce HMV without dm and new boot ABI Stefano Stabellini
2015-06-22 18:05   ` Konrad Rzeszutek Wilk
2015-06-23  8:14     ` Roger Pau Monné
2015-06-23 10:55     ` Stefano Stabellini
2015-06-23 12:50       ` Ian Campbell
2015-06-23 13:12         ` Stefano Stabellini
2015-06-24  2:45           ` Boris Ostrovsky
2015-06-24  9:47       ` Roger Pau Monné
2015-06-24 10:05         ` Jan Beulich
2015-06-24 10:14           ` Roger Pau Monné
2015-06-24 11:52             ` Boris Ostrovsky
2015-06-24 12:04               ` Roger Pau Monné
2015-06-24 13:36                 ` Konrad Rzeszutek Wilk
2015-07-03 16:22               ` Tim Deegan
2015-06-24 13:26         ` Stefano Stabellini
2015-06-24 16:30           ` Boris Ostrovsky
2015-06-24 17:54             ` Stefano Stabellini
2015-06-23  7:14   ` Roger Pau Monné

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1434989487-74940-8-git-send-email-roger.pau@citrix.com \
    --to=roger.pau@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=elena.ufimtseva@oracle.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).