xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Dulloor <dulloor@gmail.com>
To: xen-devel@lists.xensource.com
Subject: [vNUMA v2][PATCH 5/8] cleanup setup_guest function
Date: Sun, 1 Aug 2010 15:04:02 -0700	[thread overview]
Message-ID: <AANLkTikyK00jXPjosZU0LDBubfT8G6iK6j0iML3FK3PJ@mail.gmail.com> (raw)
In-Reply-To: <AANLkTilF3eQAQqeoKF6WZELTi8hYGdzsdDY53uxJnI3o@mail.gmail.com>

[-- Attachment #1: Type: text/plain, Size: 143 bytes --]

Cleanup and code re-org in setup_guest, in preparation for next patch.
No logic change.

-dulloor

Signed-off-by : Dulloor <dulloor@gmail.com>

[-- Attachment #2: xen-05-setup-guest-cleanup.patch --]
[-- Type: text/x-patch, Size: 15440 bytes --]

setup_guest cleanup

diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -115,58 +115,45 @@ static int check_mmio_hole(uint64_t star
         return 1;
 }
 
-static int setup_guest(xc_interface *xch,
-                       uint32_t dom, int memsize, int target,
-                       char *image, unsigned long image_size)
+static long populate_physmap(xc_interface *xch, uint32_t domid,
+                            unsigned long count, unsigned long extent_order,
+                            unsigned int mem_flags, xen_pfn_t *extent_start,
+                            unsigned long cur_pages)
+{
+    int i;
+    xen_pfn_t sp_extents[count >> extent_order];
+    struct xen_memory_reservation reservation = {
+        .nr_extents   = count >> extent_order,
+        .extent_order = extent_order,
+        .mem_flags    = mem_flags,
+        .domid        = domid
+    };
+
+    if (extent_order)
+    {
+        for ( i = 0; i < reservation.nr_extents; i++ )
+            sp_extents[i] = extent_start[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
+        extent_start = sp_extents;
+    }
+    else
+        extent_start += cur_pages;
+    set_xen_guest_handle(reservation.extent_start, extent_start);
+
+    return xc_memory_op(xch, XENMEM_populate_physmap, &reservation);
+}
+
+static int setup_guest_memory(xc_interface *xch, uint32_t dom,
+                            unsigned long nr_pages, unsigned long target_pages,
+                            struct elf_binary *elf)
 {
     xen_pfn_t *page_array = NULL;
-    unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
-    unsigned long target_pages = (unsigned long)target << (20 - PAGE_SHIFT);
-    unsigned long pod_pages = 0;
-    unsigned long entry_eip, cur_pages;
-    void *hvm_info_page;
-    uint32_t *ident_pt;
-    struct elf_binary elf;
-    uint64_t v_start, v_end;
-    int rc;
-    xen_capabilities_info_t caps;
-    unsigned long stat_normal_pages = 0, stat_2mb_pages = 0, 
-        stat_1gb_pages = 0;
+    unsigned long pod_pages = 0, cur_pages, i;
+    unsigned long stat_normal_pages = 0, stat_2mb_pages = 0, stat_1gb_pages = 0;
     int pod_mode = 0;
 
-    /* An HVM guest must be initialised with at least 2MB memory. */
-    if ( memsize < 2 || target < 2 )
-        goto error_out;
-
-    if ( memsize > target )
+    if ( nr_pages > target_pages )
         pod_mode = 1;
 
-    if ( elf_init(&elf, image, image_size) != 0 )
-        goto error_out;
-    elf_parse_binary(&elf);
-    v_start = 0;
-    v_end = (unsigned long long)memsize << 20;
-
-    if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
-    {
-        PERROR("Could not get Xen capabilities");
-        goto error_out;
-    }
-
-    if ( (elf.pstart & (PAGE_SIZE - 1)) != 0 )
-    {
-        PERROR("Guest OS must load to a page boundary.");
-        goto error_out;
-    }
-
-    IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
-            "  Loader:        %016"PRIx64"->%016"PRIx64"\n"
-            "  TOTAL:         %016"PRIx64"->%016"PRIx64"\n"
-            "  ENTRY ADDRESS: %016"PRIx64"\n",
-            elf.pstart, elf.pend,
-            v_start, v_end,
-            elf_uval(&elf, elf.ehdr, e_entry));
-
     if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
     {
         PERROR("Could not allocate memory.");
@@ -188,54 +175,72 @@ static int setup_guest(xc_interface *xch
      * Under 2MB mode, we allocate pages in batches of no more than 8MB to 
      * ensure that we can be preempted and hence dom0 remains responsive.
      */
-    rc = xc_domain_memory_populate_physmap(
-        xch, dom, 0xa0, 0, 0, &page_array[0x00]);
+    if (populate_physmap(xch, dom, 0xa0, 0, 0, page_array, 0x00) != 0xa0 )
+    {
+        PERROR("Could not allocate memory.");
+        goto error_out;
+    }
     cur_pages = 0xc0;
     stat_normal_pages = 0xc0;
-    while ( (rc == 0) && (nr_pages > cur_pages) )
+
+#define ALIGN_COUNT_TO_MAX_PAGES(count, cur_pages, max_pages)       \
+do{                                                                 \
+    /* Take care the corner cases of super page tails */            \
+    if ( ((cur_pages & (max_pages-1)) != 0) &&                      \
+                (count > (-cur_pages & (max_pages-1))) )            \
+                    count = -cur_pages & (max_pages-1);             \
+    else if ( ((count & (max_pages-1)) != 0) &&                     \
+                (count > max_pages) )                               \
+                    count &= ~(max_pages-1);                        \
+}while(0)
+
+    while ( nr_pages > cur_pages )
     {
         /* Clip count to maximum 1GB extent. */
+        long done;
         unsigned long count = nr_pages - cur_pages;
-        unsigned long max_pages = SUPERPAGE_1GB_NR_PFNS;
 
-        if ( count > max_pages )
-            count = max_pages;
-        
-        /* Take care the corner cases of super page tails */
-        if ( ((cur_pages & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) &&
-             (count > (-cur_pages & (SUPERPAGE_1GB_NR_PFNS-1))) )
-            count = -cur_pages & (SUPERPAGE_1GB_NR_PFNS-1);
-        else if ( ((count & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) &&
-                  (count > SUPERPAGE_1GB_NR_PFNS) )
-            count &= ~(SUPERPAGE_1GB_NR_PFNS - 1);
+        if ( count > SUPERPAGE_1GB_NR_PFNS )
+            count = SUPERPAGE_1GB_NR_PFNS;
 
-        /* Attemp to allocate 1GB super page. Because in each pass we only
+        /* Attempt to allocate 1GB super page. Because in each pass we only
          * allocate at most 1GB, we don't have to clip super page boundaries.
          */
+        ALIGN_COUNT_TO_MAX_PAGES(count, cur_pages, SUPERPAGE_1GB_NR_PFNS);
         if ( ((count | cur_pages) & (SUPERPAGE_1GB_NR_PFNS - 1)) == 0 &&
              /* Check if there exists MMIO hole in the 1GB memory range */
              !check_mmio_hole(cur_pages << PAGE_SHIFT,
                               SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT) )
         {
-            long done;
-            xen_pfn_t sp_extents[count >> SUPERPAGE_1GB_SHIFT];
-            struct xen_memory_reservation sp_req = {
-                .nr_extents   = count >> SUPERPAGE_1GB_SHIFT,
-                .extent_order = SUPERPAGE_1GB_SHIFT,
-                .domid        = dom
-            };
+            done = populate_physmap(xch, dom, count, SUPERPAGE_1GB_SHIFT, 
+                                    (pod_mode)?XENMEMF_populate_on_demand:0,
+                                    page_array, cur_pages);
+            stat_1gb_pages += done;
+            done <<= SUPERPAGE_1GB_SHIFT;
+            if ( pod_mode && target_pages > cur_pages )
+            {
+                int d = target_pages - cur_pages;
+                pod_pages += ( done < d ) ? done : d;
+            }
+            cur_pages += done;
+            count -= done;
+        }
 
-            if ( pod_mode )
-                sp_req.mem_flags = XENMEMF_populate_on_demand;
-
-            set_xen_guest_handle(sp_req.extent_start, sp_extents);
-            for ( i = 0; i < sp_req.nr_extents; i++ )
-                sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
-            done = xc_memory_op(xch, XENMEM_populate_physmap, &sp_req);
-            if ( done > 0 )
+        if ( count != 0 )
+        {
+            /* Clip count to maximum 8MB extent. */
+            if ( count > SUPERPAGE_2MB_NR_PFNS*4 )
+                count = SUPERPAGE_2MB_NR_PFNS*4;
+            
+            /* Attempt to allocate superpage extents. */
+            ALIGN_COUNT_TO_MAX_PAGES(count, cur_pages, SUPERPAGE_2MB_NR_PFNS);
+            if ( ((count | cur_pages) & (SUPERPAGE_2MB_NR_PFNS - 1)) == 0 )
             {
-                stat_1gb_pages += done;
-                done <<= SUPERPAGE_1GB_SHIFT;
+                done = populate_physmap(xch, dom, count, SUPERPAGE_2MB_SHIFT, 
+                                    (pod_mode)?XENMEMF_populate_on_demand:0,
+                                    page_array, cur_pages);
+                stat_2mb_pages += done;
+                done <<= SUPERPAGE_2MB_SHIFT;
                 if ( pod_mode && target_pages > cur_pages )
                 {
                     int d = target_pages - cur_pages;
@@ -246,76 +251,32 @@ static int setup_guest(xc_interface *xch
             }
         }
 
-        if ( count != 0 )
-        {
-            /* Clip count to maximum 8MB extent. */
-            max_pages = SUPERPAGE_2MB_NR_PFNS * 4;
-            if ( count > max_pages )
-                count = max_pages;
-            
-            /* Clip partial superpage extents to superpage boundaries. */
-            if ( ((cur_pages & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) &&
-                 (count > (-cur_pages & (SUPERPAGE_2MB_NR_PFNS-1))) )
-                count = -cur_pages & (SUPERPAGE_2MB_NR_PFNS-1);
-            else if ( ((count & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) &&
-                      (count > SUPERPAGE_2MB_NR_PFNS) )
-                count &= ~(SUPERPAGE_2MB_NR_PFNS - 1); /* clip non-s.p. tail */
-
-            /* Attempt to allocate superpage extents. */
-            if ( ((count | cur_pages) & (SUPERPAGE_2MB_NR_PFNS - 1)) == 0 )
-            {
-                long done;
-                xen_pfn_t sp_extents[count >> SUPERPAGE_2MB_SHIFT];
-                struct xen_memory_reservation sp_req = {
-                    .nr_extents   = count >> SUPERPAGE_2MB_SHIFT,
-                    .extent_order = SUPERPAGE_2MB_SHIFT,
-                    .domid        = dom
-                };
-
-                if ( pod_mode )
-                    sp_req.mem_flags = XENMEMF_populate_on_demand;
-
-                set_xen_guest_handle(sp_req.extent_start, sp_extents);
-                for ( i = 0; i < sp_req.nr_extents; i++ )
-                    sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
-                done = xc_memory_op(xch, XENMEM_populate_physmap, &sp_req);
-                if ( done > 0 )
-                {
-                    stat_2mb_pages += done;
-                    done <<= SUPERPAGE_2MB_SHIFT;
-                    if ( pod_mode && target_pages > cur_pages )
-                    {
-                        int d = target_pages - cur_pages;
-                        pod_pages += ( done < d ) ? done : d;
-                    }
-                    cur_pages += done;
-                    count -= done;
-                }
-            }
-        }
-
         /* Fall back to 4kB extents. */
         if ( count != 0 )
         {
-            rc = xc_domain_memory_populate_physmap(
-                xch, dom, count, 0, 0, &page_array[cur_pages]);
+            done = populate_physmap(xch, dom, count, 0, 0, 
+                                                    page_array, cur_pages);
+            if ( done != count )
+            {
+                PERROR("Could not allocate memory for HVM guest.");
+                goto error_out;
+            }
+            stat_normal_pages += count;
             cur_pages += count;
-            stat_normal_pages += count;
             if ( pod_mode )
                 pod_pages -= count;
         }
     }
+#undef ALIGN_COUNT_TO_MAX_PAGES
 
     if ( pod_mode )
-        rc = xc_domain_memory_set_pod_target(xch,
-                                             dom,
-                                             pod_pages,
-                                             NULL, NULL, NULL);
-
-    if ( rc != 0 )
     {
-        PERROR("Could not allocate memory for HVM guest.");
-        goto error_out;
+        if ( xc_domain_memory_set_pod_target(xch, dom, pod_pages,
+                                                    NULL, NULL, NULL) )
+        {
+            PERROR("Could not set POD target for HVM guest.");
+            goto error_out;
+        }
     }
 
     IPRINTF("PHYSICAL MEMORY ALLOCATION:\n"
@@ -323,23 +284,37 @@ static int setup_guest(xc_interface *xch
             "  2MB PAGES: 0x%016lx\n"
             "  1GB PAGES: 0x%016lx\n",
             stat_normal_pages, stat_2mb_pages, stat_1gb_pages);
-    
-    if ( loadelfimage(xch, &elf, dom, page_array) != 0 )
+  
+    if ( loadelfimage(xch, elf, dom, page_array) )
         goto error_out;
+    free(page_array);
+    return 0;
+
+error_out:
+    if ( page_array )
+        free(page_array);
+    return -1;
+}
+
+static int
+setup_guest_special_pages(xc_interface *xch, uint32_t dom, uint64_t memsize)
+{
+    void *hvm_info_page;
+    uint32_t *ident_pt;
+    unsigned long i;
 
     if ( (hvm_info_page = xc_map_foreign_range(
               xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
               HVM_INFO_PFN)) == NULL )
         goto error_out;
-    build_hvm_info(hvm_info_page, v_end);
+    build_hvm_info(hvm_info_page, memsize);
     munmap(hvm_info_page, PAGE_SIZE);
 
     /* Allocate and clear special pages. */
     for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
     {
         xen_pfn_t pfn = special_pfn(i);
-        rc = xc_domain_memory_populate_physmap(xch, dom, 1, 0, 0, &pfn);
-        if ( rc != 0 )
+        if ( xc_domain_memory_populate_physmap(xch, dom, 1, 0, 0, &pfn) )
         {
             PERROR("Could not allocate %d'th special page.", i);
             goto error_out;
@@ -370,6 +345,61 @@ static int setup_guest(xc_interface *xch
     xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT,
                      special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT);
 
+    return 0;
+error_out:
+    return -1;
+}
+
+static int setup_guest(xc_interface *xch,
+                       uint32_t dom, int memsize, int target,
+                       char *image, unsigned long image_size)
+{
+    unsigned long entry_eip;
+    struct elf_binary elf;
+    uint64_t v_start, v_end;
+    int rc;
+    xen_capabilities_info_t caps;
+
+    /* An HVM guest must be initialised with at least 2MB memory. */
+    if ( memsize < 2 || target < 2 )
+        goto error_out;
+
+    if ( elf_init(&elf, image, image_size) != 0 )
+        goto error_out;
+    elf_parse_binary(&elf);
+    v_start = 0;
+    v_end = (unsigned long long)memsize << 20;
+
+    if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
+    {
+        PERROR("Could not get Xen capabilities");
+        goto error_out;
+    }
+
+    if ( (elf.pstart & (PAGE_SIZE - 1)) != 0 )
+    {
+        PERROR("Guest OS must load to a page boundary.");
+        goto error_out;
+    }
+
+    IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
+            "  Loader:        %016"PRIx64"->%016"PRIx64"\n"
+            "  TOTAL:         %016"PRIx64"->%016"PRIx64"\n"
+            "  ENTRY ADDRESS: %016"PRIx64"\n",
+            elf.pstart, elf.pend,
+            v_start, v_end,
+            elf_uval(&elf, elf.ehdr, e_entry));
+
+    rc = setup_guest_memory(xch, dom, 
+                    (unsigned long)memsize << (20 - PAGE_SHIFT),
+                    (unsigned long)target << (20 - PAGE_SHIFT), &elf);
+    if ( rc < 0 )
+        goto error_out;
+
+    rc = setup_guest_special_pages(xch, dom, v_end);
+    if ( rc < 0 )
+        goto error_out;
+
     /* Insert JMP <rel32> instruction at address 0x0 to reach entry point. */
     entry_eip = elf_uval(&elf, elf.ehdr, e_entry);
     if ( entry_eip != 0 )
@@ -383,11 +413,9 @@ static int setup_guest(xc_interface *xch
         munmap(page0, PAGE_SIZE);
     }
 
-    free(page_array);
     return 0;
 
  error_out:
-    free(page_array);
     return -1;
 }
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

      reply	other threads:[~2010-08-01 22:04 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1BEA8649F0C00540AB2811D7922ECB6C9338B4D1@orsmsx507.amr.corp.intel.com>
2010-07-02 23:54 ` [XEN][vNUMA][PATCH 6/9] cleanup setup_guest function Dulloor
2010-08-01 22:04   ` Dulloor [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=AANLkTikyK00jXPjosZU0LDBubfT8G6iK6j0iML3FK3PJ@mail.gmail.com \
    --to=dulloor@gmail.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).