xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Elena Ufimtseva <elena.ufimtseva@oracle.com>,
	Wei Liu <wei.liu2@citrix.com>,
	Ian Campbell <ian.campbell@citrix.com>,
	Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Jan Beulich <jbeulich@suse.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH RFC v1 06/13] libxc: introduce a xc_dom_arch for hvm-3.0-x86_32 guests
Date: Mon, 22 Jun 2015 18:11:20 +0200	[thread overview]
Message-ID: <1434989487-74940-7-git-send-email-roger.pau@citrix.com> (raw)
In-Reply-To: <1434989487-74940-1-git-send-email-roger.pau@citrix.com>

This xc_dom_arch will be used in order to build HVM domains. The code is
based on the existing xc_hvm_populate_params function.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Elena Ufimtseva <elena.ufimtseva@oracle.com>
---
This is abusing the alloc_magic_pages hook in order to setup everything,
which is not the best approach but it works. In later versions I would like
to break alloc_magic_pages_hvm into smaller functions that can be used to
populate the remaining hooks (start_info and shared_info).
---
 tools/libxc/xc_dom_x86.c | 183 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 183 insertions(+)

diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 8a1ef24..0d9ec42 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -49,6 +49,20 @@
 
 #define VGA_HOLE_SIZE (0x20)
 
+#define SPECIALPAGE_PAGING   0
+#define SPECIALPAGE_ACCESS   1
+#define SPECIALPAGE_SHARING  2
+#define SPECIALPAGE_BUFIOREQ 3
+#define SPECIALPAGE_XENSTORE 4
+#define SPECIALPAGE_IOREQ    5
+#define SPECIALPAGE_IDENT_PT 6
+#define SPECIALPAGE_CONSOLE  7
+#define NR_SPECIAL_PAGES     8
+#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
+
+#define NR_IOREQ_SERVER_PAGES 8
+#define ioreq_server_pfn(x) (special_pfn(0) - NR_IOREQ_SERVER_PAGES + (x))
+
 #define bits_to_mask(bits)       (((xen_vaddr_t)1 << (bits))-1)
 #define round_down(addr, mask)   ((addr) & ~(mask))
 #define round_up(addr, mask)     ((addr) | (mask))
@@ -467,6 +481,135 @@ static int alloc_magic_pages(struct xc_dom_image *dom)
     return 0;
 }
 
+static void build_hvm_info(void *hvm_info_page, struct xc_dom_image *dom)
+{
+    struct hvm_info_table *hvm_info = (struct hvm_info_table *)
+        (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET);
+    uint8_t sum;
+    int i;
+
+    memset(hvm_info_page, 0, PAGE_SIZE);
+
+    /* Fill in the header. */
+    strncpy(hvm_info->signature, "HVM INFO", 8);
+    hvm_info->length = sizeof(struct hvm_info_table);
+
+    /* Sensible defaults: these can be overridden by the caller. */
+    hvm_info->apic_mode = 1;
+    hvm_info->nr_vcpus = 1;
+    memset(hvm_info->vcpu_online, 0xff, sizeof(hvm_info->vcpu_online));
+
+    /* Memory parameters. */
+    hvm_info->low_mem_pgend = dom->lowmem_end >> PAGE_SHIFT;
+    hvm_info->high_mem_pgend = dom->highmem_end >> PAGE_SHIFT;
+    hvm_info->reserved_mem_pgstart = ioreq_server_pfn(0);
+
+    /* Finish with the checksum. */
+    for ( i = 0, sum = 0; i < hvm_info->length; i++ )
+        sum += ((uint8_t *)hvm_info)[i];
+    hvm_info->checksum = -sum;
+}
+
+static int alloc_magic_pages_hvm(struct xc_dom_image *dom)
+{
+    unsigned long i;
+    void *hvm_info_page;
+    uint32_t *ident_pt, domid = dom->guest_domid;
+    int rc;
+    xen_pfn_t special_array[NR_SPECIAL_PAGES];
+    xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES];
+    xc_interface *xch = dom->xch;
+
+    if ( (hvm_info_page = xc_map_foreign_range(
+              xch, domid, PAGE_SIZE, PROT_READ | PROT_WRITE,
+              HVM_INFO_PFN)) == NULL )
+        goto error_out;
+    build_hvm_info(hvm_info_page, dom);
+    munmap(hvm_info_page, PAGE_SIZE);
+
+    /* Allocate and clear special pages. */
+    for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
+        special_array[i] = special_pfn(i);
+
+    rc = xc_domain_populate_physmap_exact(xch, domid, NR_SPECIAL_PAGES, 0, 0,
+                                          special_array);
+    if ( rc != 0 )
+    {
+        DOMPRINTF("Could not allocate special pages.");
+        goto error_out;
+    }
+
+    if ( xc_clear_domain_pages(xch, domid, special_pfn(0), NR_SPECIAL_PAGES) )
+            goto error_out;
+
+    xc_hvm_param_set(xch, domid, HVM_PARAM_STORE_PFN,
+                     special_pfn(SPECIALPAGE_XENSTORE));
+    xc_hvm_param_set(xch, domid, HVM_PARAM_BUFIOREQ_PFN,
+                     special_pfn(SPECIALPAGE_BUFIOREQ));
+    xc_hvm_param_set(xch, domid, HVM_PARAM_IOREQ_PFN,
+                     special_pfn(SPECIALPAGE_IOREQ));
+    xc_hvm_param_set(xch, domid, HVM_PARAM_CONSOLE_PFN,
+                     special_pfn(SPECIALPAGE_CONSOLE));
+    xc_hvm_param_set(xch, domid, HVM_PARAM_PAGING_RING_PFN,
+                     special_pfn(SPECIALPAGE_PAGING));
+    xc_hvm_param_set(xch, domid, HVM_PARAM_MONITOR_RING_PFN,
+                     special_pfn(SPECIALPAGE_ACCESS));
+    xc_hvm_param_set(xch, domid, HVM_PARAM_SHARING_RING_PFN,
+                     special_pfn(SPECIALPAGE_SHARING));
+
+    /*
+     * Allocate and clear additional ioreq server pages. The default
+     * server will use the IOREQ and BUFIOREQ special pages above.
+     */
+    for ( i = 0; i < NR_IOREQ_SERVER_PAGES; i++ )
+        ioreq_server_array[i] = ioreq_server_pfn(i);
+
+    rc = xc_domain_populate_physmap_exact(xch, domid, NR_IOREQ_SERVER_PAGES, 0,
+                                          0, ioreq_server_array);
+    if ( rc != 0 )
+    {
+        DOMPRINTF("Could not allocate ioreq server pages.");
+        goto error_out;
+    }
+
+    if ( xc_clear_domain_pages(xch, domid, ioreq_server_pfn(0),
+                               NR_IOREQ_SERVER_PAGES) )
+            goto error_out;
+
+    /* Tell the domain where the pages are and how many there are */
+    xc_hvm_param_set(xch, domid, HVM_PARAM_IOREQ_SERVER_PFN,
+                     ioreq_server_pfn(0));
+    xc_hvm_param_set(xch, domid, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                     NR_IOREQ_SERVER_PAGES);
+
+    /*
+     * Identity-map page table is required for running with CR0.PG=0 when
+     * using Intel EPT. Create a 32-bit non-PAE page directory of superpages.
+     */
+    if ( (ident_pt = xc_map_foreign_range(
+              xch, domid, PAGE_SIZE, PROT_READ | PROT_WRITE,
+              special_pfn(SPECIALPAGE_IDENT_PT))) == NULL )
+        goto error_out;
+    for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ )
+        ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
+                       _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
+    munmap(ident_pt, PAGE_SIZE);
+    xc_hvm_param_set(xch, domid, HVM_PARAM_IDENT_PT,
+                     special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT);
+
+    dom->console_pfn = special_pfn(SPECIALPAGE_CONSOLE);
+    dom->xenstore_pfn = special_pfn(SPECIALPAGE_XENSTORE);
+    dom->parms.virt_hypercall = -1;
+
+    rc = 0;
+    goto out;
+ error_out:
+    rc = -1;
+ out:
+
+    return rc;
+}
+
 /* ------------------------------------------------------------------------ */
 
 static int start_info_x86_32(struct xc_dom_image *dom)
@@ -674,6 +817,28 @@ static int vcpu_x86_64(struct xc_dom_image *dom, void *ptr)
     return 0;
 }
 
+static int vcpu_hvm(struct xc_dom_image *dom, void *ptr)
+{
+    vcpu_guest_context_x86_64_t *ctxt = ptr;
+
+    DOMPRINTF_CALLED(dom->xch);
+
+    /* clear everything */
+    memset(ctxt, 0, sizeof(*ctxt));
+
+    ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
+    ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
+    ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
+    ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
+    ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
+    ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
+    ctxt->user_regs.rip = dom->parms.phys_entry;
+
+    ctxt->flags = VGCF_in_kernel_X86_32 | VGCF_online_X86_32;
+
+    return 0;
+}
+
 /* ------------------------------------------------------------------------ */
 
 static struct xc_dom_arch xc_dom_32_pae = {
@@ -702,10 +867,24 @@ static struct xc_dom_arch xc_dom_64 = {
     .vcpu = vcpu_x86_64,
 };
 
+static struct xc_dom_arch xc_hvm_32 = {
+    .guest_type = "hvm-3.0-x86_32",
+    .native_protocol = XEN_IO_PROTO_ABI_X86_32,
+    .page_shift = PAGE_SHIFT_X86,
+    .sizeof_pfn = 4,
+    .alloc_magic_pages = alloc_magic_pages_hvm,
+    .count_pgtables = NULL,
+    .setup_pgtables = NULL,
+    .start_info = NULL,
+    .shared_info = NULL,
+    .vcpu = vcpu_hvm,
+};
+
 static void __init register_arch_hooks(void)
 {
     xc_dom_register_arch_hooks(&xc_dom_32_pae);
     xc_dom_register_arch_hooks(&xc_dom_64);
+    xc_dom_register_arch_hooks(&xc_hvm_32);
 }
 
 static int x86_compat(xc_interface *xch, domid_t domid, char *guest_type)
@@ -1352,6 +1531,10 @@ int arch_setup_bootlate(struct xc_dom_image *dom)
     xen_pfn_t shinfo;
     int i, rc;
 
+    if ( dom->container_type == XC_DOM_HVM_CONTAINER )
+        /* Nothing to do for HVM-type guests. */
+        return 0;
+
     for ( i = 0; i < ARRAY_SIZE(types); i++ )
         if ( !strcmp(types[i].guest, dom->guest_type) )
             pgd_type = types[i].pgd_type;
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2015-06-22 16:11 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-22 16:11 [PATCH RFC v1 00/13] Introduce HMV without dm and new boot ABI Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 01/13] libxc: split x86 HVM setup_guest into smaller logical functions Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 02/13] libxc: unify xc_dom_p2m_{host/guest} Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 03/13] libxc: introduce the notion of a container type Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 04/13] libxc: allow arch_setup_meminit to populate HVM domain memory Roger Pau Monne
2015-06-25 10:29   ` Wei Liu
2015-06-25 10:33     ` Wei Liu
2015-06-22 16:11 ` [PATCH RFC v1 05/13] libxc: introduce a domain loader for HVM guest firmware Roger Pau Monne
2015-06-23  9:29   ` Jan Beulich
2015-06-23  9:36     ` Roger Pau Monné
2015-07-10 19:09   ` Konrad Rzeszutek Wilk
2015-06-22 16:11 ` Roger Pau Monne [this message]
2015-06-22 16:11 ` [PATCH RFC v1 07/13] libxl: switch HVM domain building to use xc_dom_* helpers Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 08/13] libxc: remove dead x86 HVM code Roger Pau Monne
2015-06-22 16:11 ` [PATCH RFC v1 09/13] elfnotes: intorduce a new PHYS_ENTRY elfnote Roger Pau Monne
2015-06-23  9:35   ` Jan Beulich
2015-06-23  9:40     ` Roger Pau Monné
2015-06-23 10:01       ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 10/13] lib{xc/xl}: allow the creation of HVM domains with a kernel Roger Pau Monne
2015-06-25 10:39   ` Wei Liu
2015-06-22 16:11 ` [PATCH RFC v1 11/13] xen/libxl: allow creating HVM guests without a device model Roger Pau Monne
2015-06-23  9:41   ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 12/13] xen: allow 64bit HVM guests to use XENMEM_memory_map Roger Pau Monne
2015-06-23  9:43   ` Jan Beulich
2015-06-22 16:11 ` [PATCH RFC v1 13/13] xenconsole: try to attach to PV console if HVM fails Roger Pau Monne
2015-06-22 17:55 ` [PATCH RFC v1 00/13] Introduce HMV without dm and new boot ABI Stefano Stabellini
2015-06-22 18:05   ` Konrad Rzeszutek Wilk
2015-06-23  8:14     ` Roger Pau Monné
2015-06-23 10:55     ` Stefano Stabellini
2015-06-23 12:50       ` Ian Campbell
2015-06-23 13:12         ` Stefano Stabellini
2015-06-24  2:45           ` Boris Ostrovsky
2015-06-24  9:47       ` Roger Pau Monné
2015-06-24 10:05         ` Jan Beulich
2015-06-24 10:14           ` Roger Pau Monné
2015-06-24 11:52             ` Boris Ostrovsky
2015-06-24 12:04               ` Roger Pau Monné
2015-06-24 13:36                 ` Konrad Rzeszutek Wilk
2015-07-03 16:22               ` Tim Deegan
2015-06-24 13:26         ` Stefano Stabellini
2015-06-24 16:30           ` Boris Ostrovsky
2015-06-24 17:54             ` Stefano Stabellini
2015-06-23  7:14   ` Roger Pau Monné

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1434989487-74940-7-git-send-email-roger.pau@citrix.com \
    --to=roger.pau@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=elena.ufimtseva@oracle.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).