xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] x86/PoD: prevent guest from being destroyed upon early access to its memory
@ 2012-08-14  8:45 Jan Beulich
  2012-08-15 13:50 ` George Dunlap
  0 siblings, 1 reply; 3+ messages in thread
From: Jan Beulich @ 2012-08-14  8:45 UTC (permalink / raw)
  To: xen-devel; +Cc: Juergen Gross

[-- Attachment #1: Type: text/plain, Size: 5191 bytes --]

x86/PoD: prevent guest from being destroyed upon early access to its memory

When an external agent (e.g. a monitoring daemon) happens to access the
memory of a PoD guest prior to setting the PoD target, that access must
fail for there not being any page in the PoD cache, and only the space
above the low 2Mb gets scanned for victim pages (while only the low 2Mb
got real pages populated so far).

To accomodate for this
- set the PoD target first
- do all physmap population in PoD mode (i.e. not just large [2Mb or
  1Gb] pages)
- slightly lift the restrictions enforced by p2m_pod_set_mem_target()
  to accomodate for the changed tools behavior

Tested-by: Jürgen Groß <juergen.gross@ts.fujitsu.com>
           (in a 4.0.x based incarnation)
Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -160,7 +160,7 @@ static int setup_guest(xc_interface *xch
     int pod_mode = 0;
 
     if ( nr_pages > target_pages )
-        pod_mode = 1;
+        pod_mode = XENMEMF_populate_on_demand;
 
     memset(&elf, 0, sizeof(elf));
     if ( elf_init(&elf, image, image_size) != 0 )
@@ -197,6 +197,22 @@ static int setup_guest(xc_interface *xch
     for ( i = mmio_start >> PAGE_SHIFT; i < nr_pages; i++ )
         page_array[i] += mmio_size >> PAGE_SHIFT;
 
+    if ( pod_mode )
+    {
+        /*
+         * Subtract 0x20 from target_pages for the VGA "hole".  Xen will
+         * adjust the PoD cache size so that domain tot_pages will be
+         * target_pages - 0x20 after this call.
+         */
+        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
+                                      NULL, NULL, NULL);
+        if ( rc != 0 )
+        {
+            PERROR("Could not set PoD target for HVM guest.\n");
+            goto error_out;
+        }
+    }
+
     /*
      * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
      *
@@ -208,7 +224,7 @@ static int setup_guest(xc_interface *xch
      * ensure that we can be preempted and hence dom0 remains responsive.
      */
     rc = xc_domain_populate_physmap_exact(
-        xch, dom, 0xa0, 0, 0, &page_array[0x00]);
+        xch, dom, 0xa0, 0, pod_mode, &page_array[0x00]);
     cur_pages = 0xc0;
     stat_normal_pages = 0xc0;
     while ( (rc == 0) && (nr_pages > cur_pages) )
@@ -247,8 +263,7 @@ static int setup_guest(xc_interface *xch
                 sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
 
             done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_1GB_SHIFT,
-                                              pod_mode ? XENMEMF_populate_on_demand : 0,
-                                              sp_extents);
+                                              pod_mode, sp_extents);
 
             if ( done > 0 )
             {
@@ -285,8 +300,7 @@ static int setup_guest(xc_interface *xch
                     sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
 
                 done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_2MB_SHIFT,
-                                                  pod_mode ? XENMEMF_populate_on_demand : 0,
-                                                  sp_extents);
+                                                  pod_mode, sp_extents);
 
                 if ( done > 0 )
                 {
@@ -302,19 +316,12 @@ static int setup_guest(xc_interface *xch
         if ( count != 0 )
         {
             rc = xc_domain_populate_physmap_exact(
-                xch, dom, count, 0, 0, &page_array[cur_pages]);
+                xch, dom, count, 0, pod_mode, &page_array[cur_pages]);
             cur_pages += count;
             stat_normal_pages += count;
         }
     }
 
-    /* Subtract 0x20 from target_pages for the VGA "hole".  Xen will
-     * adjust the PoD cache size so that domain tot_pages will be
-     * target_pages - 0x20 after this call. */
-    if ( pod_mode )
-        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
-                                      NULL, NULL, NULL);
-
     if ( rc != 0 )
     {
         PERROR("Could not allocate memory for HVM guest.");
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -344,8 +344,9 @@ p2m_pod_set_mem_target(struct domain *d,
 
     pod_lock(p2m);
 
-    /* P == B: Nothing to do. */
-    if ( p2m->pod.entry_count == 0 )
+    /* P == B: Nothing to do (unless the guest is being created). */
+    populated = d->tot_pages - p2m->pod.count;
+    if ( populated > 0 && p2m->pod.entry_count == 0 )
         goto out;
 
     /* Don't do anything if the domain is being torn down */
@@ -357,13 +358,11 @@ p2m_pod_set_mem_target(struct domain *d,
     if ( target < d->tot_pages )
         goto out;
 
-    populated  = d->tot_pages - p2m->pod.count;
-
     pod_target = target - populated;
 
     /* B < T': Set the cache size equal to # of outstanding entries,
      * let the balloon driver fill in the rest. */
-    if ( pod_target > p2m->pod.entry_count )
+    if ( populated > 0 && pod_target > p2m->pod.entry_count )
         pod_target = p2m->pod.entry_count;
 
     ASSERT( pod_target >= p2m->pod.count );



[-- Attachment #2: x86-PoD-early-access.patch --]
[-- Type: text/plain, Size: 5319 bytes --]

x86/PoD: prevent guest from being destroyed upon early access to its memory

When an external agent (e.g. a monitoring daemon) happens to access the
memory of a PoD guest prior to setting the PoD target, that access must
fail for there not being any page in the PoD cache, and only the space
above the low 2Mb gets scanned for victim pages (while only the low 2Mb
got real pages populated so far).

To accomodate for this
- set the PoD target first
- do all physmap population in PoD mode (i.e. not just large [2Mb or
  1Gb] pages)
- slightly lift the restrictions enforced by p2m_pod_set_mem_target()
  to accomodate for the changed tools behavior

Tested-by: Jürgen Groß <juergen.gross@ts.fujitsu.com>
           (in a 4.0.x based incarnation)
Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -160,7 +160,7 @@ static int setup_guest(xc_interface *xch
     int pod_mode = 0;
 
     if ( nr_pages > target_pages )
-        pod_mode = 1;
+        pod_mode = XENMEMF_populate_on_demand;
 
     memset(&elf, 0, sizeof(elf));
     if ( elf_init(&elf, image, image_size) != 0 )
@@ -197,6 +197,22 @@ static int setup_guest(xc_interface *xch
     for ( i = mmio_start >> PAGE_SHIFT; i < nr_pages; i++ )
         page_array[i] += mmio_size >> PAGE_SHIFT;
 
+    if ( pod_mode )
+    {
+        /*
+         * Subtract 0x20 from target_pages for the VGA "hole".  Xen will
+         * adjust the PoD cache size so that domain tot_pages will be
+         * target_pages - 0x20 after this call.
+         */
+        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
+                                      NULL, NULL, NULL);
+        if ( rc != 0 )
+        {
+            PERROR("Could not set PoD target for HVM guest.\n");
+            goto error_out;
+        }
+    }
+
     /*
      * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
      *
@@ -208,7 +224,7 @@ static int setup_guest(xc_interface *xch
      * ensure that we can be preempted and hence dom0 remains responsive.
      */
     rc = xc_domain_populate_physmap_exact(
-        xch, dom, 0xa0, 0, 0, &page_array[0x00]);
+        xch, dom, 0xa0, 0, pod_mode, &page_array[0x00]);
     cur_pages = 0xc0;
     stat_normal_pages = 0xc0;
     while ( (rc == 0) && (nr_pages > cur_pages) )
@@ -247,8 +263,7 @@ static int setup_guest(xc_interface *xch
                 sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
 
             done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_1GB_SHIFT,
-                                              pod_mode ? XENMEMF_populate_on_demand : 0,
-                                              sp_extents);
+                                              pod_mode, sp_extents);
 
             if ( done > 0 )
             {
@@ -285,8 +300,7 @@ static int setup_guest(xc_interface *xch
                     sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
 
                 done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_2MB_SHIFT,
-                                                  pod_mode ? XENMEMF_populate_on_demand : 0,
-                                                  sp_extents);
+                                                  pod_mode, sp_extents);
 
                 if ( done > 0 )
                 {
@@ -302,19 +316,12 @@ static int setup_guest(xc_interface *xch
         if ( count != 0 )
         {
             rc = xc_domain_populate_physmap_exact(
-                xch, dom, count, 0, 0, &page_array[cur_pages]);
+                xch, dom, count, 0, pod_mode, &page_array[cur_pages]);
             cur_pages += count;
             stat_normal_pages += count;
         }
     }
 
-    /* Subtract 0x20 from target_pages for the VGA "hole".  Xen will
-     * adjust the PoD cache size so that domain tot_pages will be
-     * target_pages - 0x20 after this call. */
-    if ( pod_mode )
-        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
-                                      NULL, NULL, NULL);
-
     if ( rc != 0 )
     {
         PERROR("Could not allocate memory for HVM guest.");
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -344,8 +344,9 @@ p2m_pod_set_mem_target(struct domain *d,
 
     pod_lock(p2m);
 
-    /* P == B: Nothing to do. */
-    if ( p2m->pod.entry_count == 0 )
+    /* P == B: Nothing to do (unless the guest is being created). */
+    populated = d->tot_pages - p2m->pod.count;
+    if ( populated > 0 && p2m->pod.entry_count == 0 )
         goto out;
 
     /* Don't do anything if the domain is being torn down */
@@ -357,13 +358,11 @@ p2m_pod_set_mem_target(struct domain *d,
     if ( target < d->tot_pages )
         goto out;
 
-    populated  = d->tot_pages - p2m->pod.count;
-
     pod_target = target - populated;
 
     /* B < T': Set the cache size equal to # of outstanding entries,
      * let the balloon driver fill in the rest. */
-    if ( pod_target > p2m->pod.entry_count )
+    if ( populated > 0 && pod_target > p2m->pod.entry_count )
         pod_target = p2m->pod.entry_count;
 
     ASSERT( pod_target >= p2m->pod.count );

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 1/2] x86/PoD: prevent guest from being destroyed upon early access to its memory
  2012-08-14  8:45 [PATCH 1/2] x86/PoD: prevent guest from being destroyed upon early access to its memory Jan Beulich
@ 2012-08-15 13:50 ` George Dunlap
  2012-08-16  4:34   ` Juergen Gross
  0 siblings, 1 reply; 3+ messages in thread
From: George Dunlap @ 2012-08-15 13:50 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Juergen Gross, xen-devel

On Tue, Aug 14, 2012 at 9:45 AM, Jan Beulich <JBeulich@suse.com> wrote:
> x86/PoD: prevent guest from being destroyed upon early access to its memory
>
> When an external agent (e.g. a monitoring daemon) happens to access the
> memory of a PoD guest prior to setting the PoD target, that access must
> fail for there not being any page in the PoD cache, and only the space
> above the low 2Mb gets scanned for victim pages (while only the low 2Mb
> got real pages populated so far).
>
> To accomodate for this
> - set the PoD target first
> - do all physmap population in PoD mode (i.e. not just large [2Mb or
>   1Gb] pages)
> - slightly lift the restrictions enforced by p2m_pod_set_mem_target()
>   to accomodate for the changed tools behavior
>
> Tested-by: Jürgen Groß <juergen.gross@ts.fujitsu.com>
>            (in a 4.0.x based incarnation)
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: George Dunlap <george.dunlap@eu.citrix.com>

However, my "hg qpush" chokes on the German characters in Juergen's name...

 -George

>
> --- a/tools/libxc/xc_hvm_build_x86.c
> +++ b/tools/libxc/xc_hvm_build_x86.c
> @@ -160,7 +160,7 @@ static int setup_guest(xc_interface *xch
>      int pod_mode = 0;
>
>      if ( nr_pages > target_pages )
> -        pod_mode = 1;
> +        pod_mode = XENMEMF_populate_on_demand;
>
>      memset(&elf, 0, sizeof(elf));
>      if ( elf_init(&elf, image, image_size) != 0 )
> @@ -197,6 +197,22 @@ static int setup_guest(xc_interface *xch
>      for ( i = mmio_start >> PAGE_SHIFT; i < nr_pages; i++ )
>          page_array[i] += mmio_size >> PAGE_SHIFT;
>
> +    if ( pod_mode )
> +    {
> +        /*
> +         * Subtract 0x20 from target_pages for the VGA "hole".  Xen will
> +         * adjust the PoD cache size so that domain tot_pages will be
> +         * target_pages - 0x20 after this call.
> +         */
> +        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
> +                                      NULL, NULL, NULL);
> +        if ( rc != 0 )
> +        {
> +            PERROR("Could not set PoD target for HVM guest.\n");
> +            goto error_out;
> +        }
> +    }
> +
>      /*
>       * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
>       *
> @@ -208,7 +224,7 @@ static int setup_guest(xc_interface *xch
>       * ensure that we can be preempted and hence dom0 remains responsive.
>       */
>      rc = xc_domain_populate_physmap_exact(
> -        xch, dom, 0xa0, 0, 0, &page_array[0x00]);
> +        xch, dom, 0xa0, 0, pod_mode, &page_array[0x00]);
>      cur_pages = 0xc0;
>      stat_normal_pages = 0xc0;
>      while ( (rc == 0) && (nr_pages > cur_pages) )
> @@ -247,8 +263,7 @@ static int setup_guest(xc_interface *xch
>                  sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
>
>              done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_1GB_SHIFT,
> -                                              pod_mode ? XENMEMF_populate_on_demand : 0,
> -                                              sp_extents);
> +                                              pod_mode, sp_extents);
>
>              if ( done > 0 )
>              {
> @@ -285,8 +300,7 @@ static int setup_guest(xc_interface *xch
>                      sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
>
>                  done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_2MB_SHIFT,
> -                                                  pod_mode ? XENMEMF_populate_on_demand : 0,
> -                                                  sp_extents);
> +                                                  pod_mode, sp_extents);
>
>                  if ( done > 0 )
>                  {
> @@ -302,19 +316,12 @@ static int setup_guest(xc_interface *xch
>          if ( count != 0 )
>          {
>              rc = xc_domain_populate_physmap_exact(
> -                xch, dom, count, 0, 0, &page_array[cur_pages]);
> +                xch, dom, count, 0, pod_mode, &page_array[cur_pages]);
>              cur_pages += count;
>              stat_normal_pages += count;
>          }
>      }
>
> -    /* Subtract 0x20 from target_pages for the VGA "hole".  Xen will
> -     * adjust the PoD cache size so that domain tot_pages will be
> -     * target_pages - 0x20 after this call. */
> -    if ( pod_mode )
> -        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
> -                                      NULL, NULL, NULL);
> -
>      if ( rc != 0 )
>      {
>          PERROR("Could not allocate memory for HVM guest.");
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -344,8 +344,9 @@ p2m_pod_set_mem_target(struct domain *d,
>
>      pod_lock(p2m);
>
> -    /* P == B: Nothing to do. */
> -    if ( p2m->pod.entry_count == 0 )
> +    /* P == B: Nothing to do (unless the guest is being created). */
> +    populated = d->tot_pages - p2m->pod.count;
> +    if ( populated > 0 && p2m->pod.entry_count == 0 )
>          goto out;
>
>      /* Don't do anything if the domain is being torn down */
> @@ -357,13 +358,11 @@ p2m_pod_set_mem_target(struct domain *d,
>      if ( target < d->tot_pages )
>          goto out;
>
> -    populated  = d->tot_pages - p2m->pod.count;
> -
>      pod_target = target - populated;
>
>      /* B < T': Set the cache size equal to # of outstanding entries,
>       * let the balloon driver fill in the rest. */
> -    if ( pod_target > p2m->pod.entry_count )
> +    if ( populated > 0 && pod_target > p2m->pod.entry_count )
>          pod_target = p2m->pod.entry_count;
>
>      ASSERT( pod_target >= p2m->pod.count );
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 1/2] x86/PoD: prevent guest from being destroyed upon early access to its memory
  2012-08-15 13:50 ` George Dunlap
@ 2012-08-16  4:34   ` Juergen Gross
  0 siblings, 0 replies; 3+ messages in thread
From: Juergen Gross @ 2012-08-16  4:34 UTC (permalink / raw)
  To: George Dunlap; +Cc: Jan Beulich, xen-devel

Am 15.08.2012 15:50, schrieb George Dunlap:
> On Tue, Aug 14, 2012 at 9:45 AM, Jan Beulich<JBeulich@suse.com>  wrote:
>> x86/PoD: prevent guest from being destroyed upon early access to its memory
>>
>> When an external agent (e.g. a monitoring daemon) happens to access the
>> memory of a PoD guest prior to setting the PoD target, that access must
>> fail for there not being any page in the PoD cache, and only the space
>> above the low 2Mb gets scanned for victim pages (while only the low 2Mb
>> got real pages populated so far).
>>
>> To accomodate for this
>> - set the PoD target first
>> - do all physmap population in PoD mode (i.e. not just large [2Mb or
>>    1Gb] pages)
>> - slightly lift the restrictions enforced by p2m_pod_set_mem_target()
>>    to accomodate for the changed tools behavior
>>
>> Tested-by: Jürgen Groß<juergen.gross@ts.fujitsu.com>
>>             (in a 4.0.x based incarnation)
>> Signed-off-by: Jan Beulich<jbeulich@suse.com>
>
> Acked-by: George Dunlap<george.dunlap@eu.citrix.com>
>
> However, my "hg qpush" chokes on the German characters in Juergen's name...

:-)

Feel free to change it to "Juergen Gross <juergen.gross@ts.fujitsu.com>".


Juergen

>
>   -George
>
>>
>> --- a/tools/libxc/xc_hvm_build_x86.c
>> +++ b/tools/libxc/xc_hvm_build_x86.c
>> @@ -160,7 +160,7 @@ static int setup_guest(xc_interface *xch
>>       int pod_mode = 0;
>>
>>       if ( nr_pages>  target_pages )
>> -        pod_mode = 1;
>> +        pod_mode = XENMEMF_populate_on_demand;
>>
>>       memset(&elf, 0, sizeof(elf));
>>       if ( elf_init(&elf, image, image_size) != 0 )
>> @@ -197,6 +197,22 @@ static int setup_guest(xc_interface *xch
>>       for ( i = mmio_start>>  PAGE_SHIFT; i<  nr_pages; i++ )
>>           page_array[i] += mmio_size>>  PAGE_SHIFT;
>>
>> +    if ( pod_mode )
>> +    {
>> +        /*
>> +         * Subtract 0x20 from target_pages for the VGA "hole".  Xen will
>> +         * adjust the PoD cache size so that domain tot_pages will be
>> +         * target_pages - 0x20 after this call.
>> +         */
>> +        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
>> +                                      NULL, NULL, NULL);
>> +        if ( rc != 0 )
>> +        {
>> +            PERROR("Could not set PoD target for HVM guest.\n");
>> +            goto error_out;
>> +        }
>> +    }
>> +
>>       /*
>>        * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
>>        *
>> @@ -208,7 +224,7 @@ static int setup_guest(xc_interface *xch
>>        * ensure that we can be preempted and hence dom0 remains responsive.
>>        */
>>       rc = xc_domain_populate_physmap_exact(
>> -        xch, dom, 0xa0, 0, 0,&page_array[0x00]);
>> +        xch, dom, 0xa0, 0, pod_mode,&page_array[0x00]);
>>       cur_pages = 0xc0;
>>       stat_normal_pages = 0xc0;
>>       while ( (rc == 0)&&  (nr_pages>  cur_pages) )
>> @@ -247,8 +263,7 @@ static int setup_guest(xc_interface *xch
>>                   sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
>>
>>               done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_1GB_SHIFT,
>> -                                              pod_mode ? XENMEMF_populate_on_demand : 0,
>> -                                              sp_extents);
>> +                                              pod_mode, sp_extents);
>>
>>               if ( done>  0 )
>>               {
>> @@ -285,8 +300,7 @@ static int setup_guest(xc_interface *xch
>>                       sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
>>
>>                   done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_2MB_SHIFT,
>> -                                                  pod_mode ? XENMEMF_populate_on_demand : 0,
>> -                                                  sp_extents);
>> +                                                  pod_mode, sp_extents);
>>
>>                   if ( done>  0 )
>>                   {
>> @@ -302,19 +316,12 @@ static int setup_guest(xc_interface *xch
>>           if ( count != 0 )
>>           {
>>               rc = xc_domain_populate_physmap_exact(
>> -                xch, dom, count, 0, 0,&page_array[cur_pages]);
>> +                xch, dom, count, 0, pod_mode,&page_array[cur_pages]);
>>               cur_pages += count;
>>               stat_normal_pages += count;
>>           }
>>       }
>>
>> -    /* Subtract 0x20 from target_pages for the VGA "hole".  Xen will
>> -     * adjust the PoD cache size so that domain tot_pages will be
>> -     * target_pages - 0x20 after this call. */
>> -    if ( pod_mode )
>> -        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
>> -                                      NULL, NULL, NULL);
>> -
>>       if ( rc != 0 )
>>       {
>>           PERROR("Could not allocate memory for HVM guest.");
>> --- a/xen/arch/x86/mm/p2m-pod.c
>> +++ b/xen/arch/x86/mm/p2m-pod.c
>> @@ -344,8 +344,9 @@ p2m_pod_set_mem_target(struct domain *d,
>>
>>       pod_lock(p2m);
>>
>> -    /* P == B: Nothing to do. */
>> -    if ( p2m->pod.entry_count == 0 )
>> +    /* P == B: Nothing to do (unless the guest is being created). */
>> +    populated = d->tot_pages - p2m->pod.count;
>> +    if ( populated>  0&&  p2m->pod.entry_count == 0 )
>>           goto out;
>>
>>       /* Don't do anything if the domain is being torn down */
>> @@ -357,13 +358,11 @@ p2m_pod_set_mem_target(struct domain *d,
>>       if ( target<  d->tot_pages )
>>           goto out;
>>
>> -    populated  = d->tot_pages - p2m->pod.count;
>> -
>>       pod_target = target - populated;
>>
>>       /* B<  T': Set the cache size equal to # of outstanding entries,
>>        * let the balloon driver fill in the rest. */
>> -    if ( pod_target>  p2m->pod.entry_count )
>> +    if ( populated>  0&&  pod_target>  p2m->pod.entry_count )
>>           pod_target = p2m->pod.entry_count;
>>
>>       ASSERT( pod_target>= p2m->pod.count );
>>
>>
>>
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xen.org
>> http://lists.xen.org/xen-devel
>>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
>
>


-- 
Juergen Gross                 Principal Developer Operating Systems
PDG ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2012-08-16  4:34 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-08-14  8:45 [PATCH 1/2] x86/PoD: prevent guest from being destroyed upon early access to its memory Jan Beulich
2012-08-15 13:50 ` George Dunlap
2012-08-16  4:34   ` Juergen Gross

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).