From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Elena Ufimtseva <elena.ufimtseva@oracle.com>,
Wei Liu <wei.liu2@citrix.com>,
Ian Campbell <ian.campbell@citrix.com>,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Ian Jackson <ian.jackson@eu.citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH RFC v1 01/13] libxc: split x86 HVM setup_guest into smaller logical functions
Date: Mon, 22 Jun 2015 18:11:15 +0200 [thread overview]
Message-ID: <1434989487-74940-2-git-send-email-roger.pau@citrix.com> (raw)
In-Reply-To: <1434989487-74940-1-git-send-email-roger.pau@citrix.com>
This is just a preparatory change to clean up the code in setup_guest.
Should not introduce any functional changes.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Elena Ufimtseva <elena.ufimtseva@oracle.com>
---
tools/libxc/xc_hvm_build_x86.c | 198 ++++++++++++++++++++++++-----------------
1 file changed, 117 insertions(+), 81 deletions(-)
diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
index 003ea06..f7616a8 100644
--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -232,28 +232,20 @@ static int check_mmio_hole(uint64_t start, uint64_t memsize,
return 1;
}
-static int setup_guest(xc_interface *xch,
- uint32_t dom, struct xc_hvm_build_args *args,
- char *image, unsigned long image_size)
+static int xc_hvm_populate_memory(xc_interface *xch, uint32_t dom,
+ struct xc_hvm_build_args *args,
+ xen_pfn_t *page_array)
{
- xen_pfn_t *page_array = NULL;
unsigned long i, vmemid, nr_pages = args->mem_size >> PAGE_SHIFT;
unsigned long p2m_size;
unsigned long target_pages = args->mem_target >> PAGE_SHIFT;
- unsigned long entry_eip, cur_pages, cur_pfn;
- void *hvm_info_page;
- uint32_t *ident_pt;
- struct elf_binary elf;
- uint64_t v_start, v_end;
- uint64_t m_start = 0, m_end = 0;
+ unsigned long cur_pages, cur_pfn;
int rc;
xen_capabilities_info_t caps;
unsigned long stat_normal_pages = 0, stat_2mb_pages = 0,
stat_1gb_pages = 0;
unsigned int memflags = 0;
int claim_enabled = args->claim_enabled;
- xen_pfn_t special_array[NR_SPECIAL_PAGES];
- xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES];
uint64_t total_pages;
xen_vmemrange_t dummy_vmemrange[2];
unsigned int dummy_vnode_to_pnode[1];
@@ -261,19 +253,6 @@ static int setup_guest(xc_interface *xch,
unsigned int *vnode_to_pnode;
unsigned int nr_vmemranges, nr_vnodes;
- memset(&elf, 0, sizeof(elf));
- if ( elf_init(&elf, image, image_size) != 0 )
- {
- PERROR("Could not initialise ELF image");
- goto error_out;
- }
-
- xc_elf_set_logfile(xch, &elf, 1);
-
- elf_parse_binary(&elf);
- v_start = 0;
- v_end = args->mem_size;
-
if ( nr_pages > target_pages )
memflags |= XENMEMF_populate_on_demand;
@@ -346,24 +325,6 @@ static int setup_guest(xc_interface *xch,
goto error_out;
}
- if ( modules_init(args, v_end, &elf, &m_start, &m_end) != 0 )
- {
- ERROR("Insufficient space to load modules.");
- goto error_out;
- }
-
- DPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n");
- DPRINTF(" Loader: %016"PRIx64"->%016"PRIx64"\n", elf.pstart, elf.pend);
- DPRINTF(" Modules: %016"PRIx64"->%016"PRIx64"\n", m_start, m_end);
- DPRINTF(" TOTAL: %016"PRIx64"->%016"PRIx64"\n", v_start, v_end);
- DPRINTF(" ENTRY: %016"PRIx64"\n", elf_uval(&elf, elf.ehdr, e_entry));
-
- if ( (page_array = malloc(p2m_size * sizeof(xen_pfn_t))) == NULL )
- {
- PERROR("Could not allocate memory.");
- goto error_out;
- }
-
for ( i = 0; i < p2m_size; i++ )
page_array[i] = ((xen_pfn_t)-1);
for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ )
@@ -563,7 +524,54 @@ static int setup_guest(xc_interface *xch,
DPRINTF(" 4KB PAGES: 0x%016lx\n", stat_normal_pages);
DPRINTF(" 2MB PAGES: 0x%016lx\n", stat_2mb_pages);
DPRINTF(" 1GB PAGES: 0x%016lx\n", stat_1gb_pages);
-
+
+ rc = 0;
+ goto out;
+ error_out:
+ rc = -1;
+ out:
+
+ /* ensure no unclaimed pages are left unused */
+ xc_domain_claim_pages(xch, dom, 0 /* cancels the claim */);
+
+ return rc;
+}
+
+static int xc_hvm_load_image(xc_interface *xch,
+ uint32_t dom, struct xc_hvm_build_args *args,
+ xen_pfn_t *page_array)
+{
+ unsigned long entry_eip, image_size;
+ struct elf_binary elf;
+ uint64_t v_start, v_end;
+ uint64_t m_start = 0, m_end = 0;
+ char *image;
+ int rc;
+
+ image = xc_read_image(xch, args->image_file_name, &image_size);
+ if ( image == NULL )
+ return -1;
+
+ memset(&elf, 0, sizeof(elf));
+ if ( elf_init(&elf, image, image_size) != 0 )
+ goto error_out;
+
+ xc_elf_set_logfile(xch, &elf, 1);
+
+ elf_parse_binary(&elf);
+ v_start = 0;
+ v_end = args->mem_size;
+
+ if ( modules_init(args, v_end, &elf, &m_start, &m_end) != 0 )
+ {
+ ERROR("Insufficient space to load modules.");
+ goto error_out;
+ }
+
+ DPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n");
+ DPRINTF(" Loader: %016"PRIx64"->%016"PRIx64"\n", elf.pstart, elf.pend);
+ DPRINTF(" Modules: %016"PRIx64"->%016"PRIx64"\n", m_start, m_end);
+
if ( loadelfimage(xch, &elf, dom, page_array) != 0 )
{
PERROR("Could not load ELF image");
@@ -576,6 +584,44 @@ static int setup_guest(xc_interface *xch,
goto error_out;
}
+ /* Insert JMP <rel32> instruction at address 0x0 to reach entry point. */
+ entry_eip = elf_uval(&elf, elf.ehdr, e_entry);
+ if ( entry_eip != 0 )
+ {
+ char *page0 = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, 0);
+ if ( page0 == NULL )
+ goto error_out;
+ page0[0] = 0xe9;
+ *(uint32_t *)&page0[1] = entry_eip - 5;
+ munmap(page0, PAGE_SIZE);
+ }
+
+ rc = 0;
+ goto out;
+ error_out:
+ rc = -1;
+ out:
+ if ( elf_check_broken(&elf) )
+ ERROR("HVM ELF broken: %s", elf_check_broken(&elf));
+ free(image);
+
+ return rc;
+}
+
+static int xc_hvm_populate_params(xc_interface *xch, uint32_t dom,
+ struct xc_hvm_build_args *args)
+{
+ unsigned long i;
+ void *hvm_info_page;
+ uint32_t *ident_pt;
+ uint64_t v_end;
+ int rc;
+ xen_pfn_t special_array[NR_SPECIAL_PAGES];
+ xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES];
+
+ v_end = args->mem_size;
+
if ( (hvm_info_page = xc_map_foreign_range(
xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
HVM_INFO_PFN)) == NULL )
@@ -664,34 +710,12 @@ static int setup_guest(xc_interface *xch,
xc_hvm_param_set(xch, dom, HVM_PARAM_IDENT_PT,
special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT);
- /* Insert JMP <rel32> instruction at address 0x0 to reach entry point. */
- entry_eip = elf_uval(&elf, elf.ehdr, e_entry);
- if ( entry_eip != 0 )
- {
- char *page0 = xc_map_foreign_range(
- xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, 0);
- if ( page0 == NULL )
- {
- PERROR("Could not map page0");
- goto error_out;
- }
- page0[0] = 0xe9;
- *(uint32_t *)&page0[1] = entry_eip - 5;
- munmap(page0, PAGE_SIZE);
- }
-
rc = 0;
goto out;
error_out:
rc = -1;
out:
- if ( elf_check_broken(&elf) )
- ERROR("HVM ELF broken: %s", elf_check_broken(&elf));
-
- /* ensure no unclaimed pages are left unused */
- xc_domain_claim_pages(xch, dom, 0 /* cancels the claim */);
- free(page_array);
return rc;
}
@@ -702,9 +726,8 @@ int xc_hvm_build(xc_interface *xch, uint32_t domid,
struct xc_hvm_build_args *hvm_args)
{
struct xc_hvm_build_args args = *hvm_args;
- void *image;
- unsigned long image_size;
- int sts;
+ xen_pfn_t *parray = NULL;
+ int rc;
if ( domid == 0 )
return -1;
@@ -715,24 +738,37 @@ int xc_hvm_build(xc_interface *xch, uint32_t domid,
if ( args.mem_size < (2ull << 20) || args.mem_target < (2ull << 20) )
return -1;
- image = xc_read_image(xch, args.image_file_name, &image_size);
- if ( image == NULL )
+ parray = malloc((args.mem_size >> PAGE_SHIFT) * sizeof(xen_pfn_t));
+ if ( parray == NULL )
return -1;
- sts = setup_guest(xch, domid, &args, image, image_size);
-
- if (!sts)
+ rc = xc_hvm_populate_memory(xch, domid, &args, parray);
+ if ( rc != 0 )
{
- /* Return module load addresses to caller */
- hvm_args->acpi_module.guest_addr_out =
- args.acpi_module.guest_addr_out;
- hvm_args->smbios_module.guest_addr_out =
- args.smbios_module.guest_addr_out;
+ PERROR("xc_hvm_populate_memory failed");
+ goto out;
+ }
+ rc = xc_hvm_load_image(xch, domid, &args, parray);
+ if ( rc != 0 )
+ {
+ PERROR("xc_hvm_load_image failed");
+ goto out;
+ }
+ rc = xc_hvm_populate_params(xch, domid, &args);
+ if ( rc != 0 )
+ {
+ PERROR("xc_hvm_populate_params failed");
+ goto out;
}
- free(image);
+ /* Return module load addresses to caller */
+ hvm_args->acpi_module.guest_addr_out = args.acpi_module.guest_addr_out;
+ hvm_args->smbios_module.guest_addr_out = args.smbios_module.guest_addr_out;
- return sts;
+out:
+ free(parray);
+
+ return rc;
}
/* xc_hvm_build_target_mem:
--
1.9.5 (Apple Git-50.3)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2015-06-22 16:11 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-22 16:11 [PATCH RFC v1 00/13] Introduce HMV without dm and new boot ABI Roger Pau Monne
2015-06-22 16:11 ` Roger Pau Monne [this message]
2015-06-22 16:11 ` [PATCH RFC v1 02/13] libxc: unify xc_dom_p2m_{host/guest} Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 03/13] libxc: introduce the notion of a container type Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 04/13] libxc: allow arch_setup_meminit to populate HVM domain memory Roger Pau Monne
2015-06-25 10:29 ` Wei Liu
2015-06-25 10:33 ` Wei Liu
2015-06-22 16:11 ` [PATCH RFC v1 05/13] libxc: introduce a domain loader for HVM guest firmware Roger Pau Monne
2015-06-23 9:29 ` Jan Beulich
2015-06-23 9:36 ` Roger Pau Monné
2015-07-10 19:09 ` Konrad Rzeszutek Wilk
2015-06-22 16:11 ` [PATCH RFC v1 06/13] libxc: introduce a xc_dom_arch for hvm-3.0-x86_32 guests Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 07/13] libxl: switch HVM domain building to use xc_dom_* helpers Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 08/13] libxc: remove dead x86 HVM code Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 09/13] elfnotes: intorduce a new PHYS_ENTRY elfnote Roger Pau Monne
2015-06-23 9:35 ` Jan Beulich
2015-06-23 9:40 ` Roger Pau Monné
2015-06-23 10:01 ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 10/13] lib{xc/xl}: allow the creation of HVM domains with a kernel Roger Pau Monne
2015-06-25 10:39 ` Wei Liu
2015-06-22 16:11 ` [PATCH RFC v1 11/13] xen/libxl: allow creating HVM guests without a device model Roger Pau Monne
2015-06-23 9:41 ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 12/13] xen: allow 64bit HVM guests to use XENMEM_memory_map Roger Pau Monne
2015-06-23 9:43 ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 13/13] xenconsole: try to attach to PV console if HVM fails Roger Pau Monne
2015-06-22 17:55 ` [PATCH RFC v1 00/13] Introduce HMV without dm and new boot ABI Stefano Stabellini
2015-06-22 18:05 ` Konrad Rzeszutek Wilk
2015-06-23 8:14 ` Roger Pau Monné
2015-06-23 10:55 ` Stefano Stabellini
2015-06-23 12:50 ` Ian Campbell
2015-06-23 13:12 ` Stefano Stabellini
2015-06-24 2:45 ` Boris Ostrovsky
2015-06-24 9:47 ` Roger Pau Monné
2015-06-24 10:05 ` Jan Beulich
2015-06-24 10:14 ` Roger Pau Monné
2015-06-24 11:52 ` Boris Ostrovsky
2015-06-24 12:04 ` Roger Pau Monné
2015-06-24 13:36 ` Konrad Rzeszutek Wilk
2015-07-03 16:22 ` Tim Deegan
2015-06-24 13:26 ` Stefano Stabellini
2015-06-24 16:30 ` Boris Ostrovsky
2015-06-24 17:54 ` Stefano Stabellini
2015-06-23 7:14 ` Roger Pau Monné
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1434989487-74940-2-git-send-email-roger.pau@citrix.com \
--to=roger.pau@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=boris.ostrovsky@oracle.com \
--cc=elena.ufimtseva@oracle.com \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).