xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/3] xen: delay page scrubbing to allocation path
@ 2014-06-30 13:39 Bob Liu
  0 siblings, 0 replies; 4+ messages in thread
From: Bob Liu @ 2014-06-30 13:39 UTC (permalink / raw)
  To: xen-devel; +Cc: keir, ian.campbell, George.Dunlap, andrew.cooper3

Because of page scrubbing, it's very slow to destroy a domain with large memory.
It takes around 10 minutes when destroy a guest of nearly 1 TB of memory.

This patch introduced a "PGC_need_scrub" flag, pages with this flag need to be
scrubbed before use. During domain destory just mark pages as "PGC_need_scrub"
and then add them to free list, so that xl can return quickly.

Note: PCG_need_scrub pages and normal pages are not mergeable

v2:
 * Fix issue: Avoid to scrub all 4Tb when a 4Tb chunk found for a request of a
 single page.
 * Replace more scrub_one_page() place by setting "need_scrub"
 * No more use an extra _scrub[] array

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/page_alloc.c  |   63 ++++++++++++++++++++++++++++++++++------------
 xen/include/asm-arm/mm.h |    5 +++-
 xen/include/asm-x86/mm.h |    5 +++-
 3 files changed, 55 insertions(+), 18 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 58677d0..c184f86 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -711,6 +711,12 @@ static struct page_info *alloc_heap_pages(
 
     for ( i = 0; i < (1 << order); i++ )
     {
+        if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
+        {
+            scrub_one_page(&pg[i]);
+            pg[i].count_info &= ~PGC_need_scrub;
+        }
+
         /* Reference count must continuously be zero for free pages. */
         BUG_ON(pg[i].count_info != PGC_state_free);
         pg[i].count_info = PGC_state_inuse;
@@ -827,7 +833,7 @@ static int reserve_offlined_page(struct page_info *head)
 
 /* Free 2^@order set of pages. */
 static void free_heap_pages(
-    struct page_info *pg, unsigned int order)
+    struct page_info *pg, unsigned int order, bool_t need_scrub)
 {
     unsigned long mask, mfn = page_to_mfn(pg);
     unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
@@ -876,6 +882,15 @@ static void free_heap_pages(
         midsize_alloc_zone_pages = max(
             midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
 
+    if ( need_scrub )
+    {
+        if ( !tainted )
+        {
+            for ( i = 0; i < (1 << order); i++ )
+                pg[i].count_info |= PGC_need_scrub;
+        }
+    }
+
     /* Merge chunks as far as possible. */
     while ( order < MAX_ORDER )
     {
@@ -889,6 +904,17 @@ static void free_heap_pages(
                  (PFN_ORDER(pg-mask) != order) ||
                  (phys_to_nid(page_to_maddr(pg-mask)) != node) )
                 break;
+            /* If we need scrub, only merge with PGC_need_scrub pages */
+            if ( need_scrub )
+            {
+                if ( !test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
+                    break;
+            }
+            else
+            {
+                if ( test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
+                    break;
+            }
             pg -= mask;
             page_list_del(pg, &heap(node, zone, order));
         }
@@ -900,6 +926,16 @@ static void free_heap_pages(
                  (PFN_ORDER(pg+mask) != order) ||
                  (phys_to_nid(page_to_maddr(pg+mask)) != node) )
                 break;
+            if ( need_scrub )
+            {
+                if ( !test_bit(_PGC_need_scrub, &(pg+mask)->count_info) )
+                    break;
+            }
+            else
+            {
+                if ( test_bit(_PGC_need_scrub, &(pg+mask)->count_info) )
+                    break;
+            }
             page_list_del(pg + mask, &heap(node, zone, order));
         }
 
@@ -1132,7 +1168,7 @@ unsigned int online_page(unsigned long mfn, uint32_t *status)
     spin_unlock(&heap_lock);
 
     if ( (y & PGC_state) == PGC_state_offlined )
-        free_heap_pages(pg, 0);
+        free_heap_pages(pg, 0, 0);
 
     return ret;
 }
@@ -1201,7 +1237,7 @@ static void init_heap_pages(
             nr_pages -= n;
         }
 
-        free_heap_pages(pg+i, 0);
+        free_heap_pages(pg+i, 0, 0);
     }
 }
 
@@ -1535,7 +1571,7 @@ void free_xenheap_pages(void *v, unsigned int order)
 
     memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
 
-    free_heap_pages(virt_to_page(v), order);
+    free_heap_pages(virt_to_page(v), order, 1);
 }
 
 #else
@@ -1588,11 +1624,10 @@ void free_xenheap_pages(void *v, unsigned int order)
 
     for ( i = 0; i < (1u << order); i++ )
     {
-        scrub_one_page(&pg[i]);
         pg[i].count_info &= ~PGC_xen_heap;
     }
 
-    free_heap_pages(pg, order);
+    free_heap_pages(pg, order, 1);
 }
 
 #endif
@@ -1696,7 +1731,7 @@ struct page_info *alloc_domheap_pages(
 
     if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
     {
-        free_heap_pages(pg, order);
+        free_heap_pages(pg, order, 0);
         return NULL;
     }
     
@@ -1745,24 +1780,20 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
          * domain has died we assume responsibility for erasure.
          */
         if ( unlikely(d->is_dying) )
-            for ( i = 0; i < (1 << order); i++ )
-                scrub_one_page(&pg[i]);
-
-        free_heap_pages(pg, order);
+            free_heap_pages(pg, order, 1);
+        else
+            free_heap_pages(pg, order, 0);
     }
     else if ( unlikely(d == dom_cow) )
     {
         ASSERT(order == 0); 
-        scrub_one_page(pg);
-        free_heap_pages(pg, 0);
+        free_heap_pages(pg, 0, 1);
         drop_dom_ref = 0;
     }
     else
     {
         /* Freeing anonymous domain-heap pages. */
-        for ( i = 0; i < (1 << order); i++ )
-            scrub_one_page(&pg[i]);
-        free_heap_pages(pg, order);
+        free_heap_pages(pg, order, 1);
         drop_dom_ref = 0;
     }
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 2552d34..e8913a8 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -103,9 +103,12 @@ struct page_info
 #define PGC_state_offlined PG_mask(2, 9)
 #define PGC_state_free    PG_mask(3, 9)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page need to be scrubbed */
+#define _PGC_need_scrub   PG_shift(10)
+#define PGC_need_scrub    PG_mask(1, 10)
 
 /* Count of references to this frame. */
-#define PGC_count_width   PG_shift(9)
+#define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index d253117..35746ab 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -223,9 +223,12 @@ struct page_info
 #define PGC_state_offlined PG_mask(2, 9)
 #define PGC_state_free    PG_mask(3, 9)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page need to be scrubbed */
+#define _PGC_need_scrub   PG_shift(10)
+#define PGC_need_scrub    PG_mask(1, 10)
 
  /* Count of references to this frame. */
-#define PGC_count_width   PG_shift(9)
+#define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 struct spage_info
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 1/3] xen: delay page scrubbing to allocation path
@ 2014-06-30 13:39 Bob Liu
  2014-06-30 15:56 ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Bob Liu @ 2014-06-30 13:39 UTC (permalink / raw)
  To: xen-devel; +Cc: keir, ian.campbell, George.Dunlap, andrew.cooper3, JBeulich

Because of page scrubbing, it's very slow to destroy a domain with large memory.
It takes around 10 minutes when destroy a guest of nearly 1 TB of memory.

This patch introduced a "PGC_need_scrub" flag, pages with this flag need to be
scrubbed before use. During domain destory just mark pages as "PGC_need_scrub"
and then add them to free list, so that xl can return quickly.

Note: PCG_need_scrub pages and normal pages are not mergeable

v2:
 * Fix issue: Avoid to scrub all 4Tb when a 4Tb chunk found for a request of a
 single page.
 * Replace more scrub_one_page() place by setting "need_scrub"
 * No more use an extra _scrub[] array

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 xen/common/page_alloc.c  |   63 ++++++++++++++++++++++++++++++++++------------
 xen/include/asm-arm/mm.h |    5 +++-
 xen/include/asm-x86/mm.h |    5 +++-
 3 files changed, 55 insertions(+), 18 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 58677d0..c184f86 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -711,6 +711,12 @@ static struct page_info *alloc_heap_pages(
 
     for ( i = 0; i < (1 << order); i++ )
     {
+        if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
+        {
+            scrub_one_page(&pg[i]);
+            pg[i].count_info &= ~PGC_need_scrub;
+        }
+
         /* Reference count must continuously be zero for free pages. */
         BUG_ON(pg[i].count_info != PGC_state_free);
         pg[i].count_info = PGC_state_inuse;
@@ -827,7 +833,7 @@ static int reserve_offlined_page(struct page_info *head)
 
 /* Free 2^@order set of pages. */
 static void free_heap_pages(
-    struct page_info *pg, unsigned int order)
+    struct page_info *pg, unsigned int order, bool_t need_scrub)
 {
     unsigned long mask, mfn = page_to_mfn(pg);
     unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
@@ -876,6 +882,15 @@ static void free_heap_pages(
         midsize_alloc_zone_pages = max(
             midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
 
+    if ( need_scrub )
+    {
+        if ( !tainted )
+        {
+            for ( i = 0; i < (1 << order); i++ )
+                pg[i].count_info |= PGC_need_scrub;
+        }
+    }
+
     /* Merge chunks as far as possible. */
     while ( order < MAX_ORDER )
     {
@@ -889,6 +904,17 @@ static void free_heap_pages(
                  (PFN_ORDER(pg-mask) != order) ||
                  (phys_to_nid(page_to_maddr(pg-mask)) != node) )
                 break;
+            /* If we need scrub, only merge with PGC_need_scrub pages */
+            if ( need_scrub )
+            {
+                if ( !test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
+                    break;
+            }
+            else
+            {
+                if ( test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
+                    break;
+            }
             pg -= mask;
             page_list_del(pg, &heap(node, zone, order));
         }
@@ -900,6 +926,16 @@ static void free_heap_pages(
                  (PFN_ORDER(pg+mask) != order) ||
                  (phys_to_nid(page_to_maddr(pg+mask)) != node) )
                 break;
+            if ( need_scrub )
+            {
+                if ( !test_bit(_PGC_need_scrub, &(pg+mask)->count_info) )
+                    break;
+            }
+            else
+            {
+                if ( test_bit(_PGC_need_scrub, &(pg+mask)->count_info) )
+                    break;
+            }
             page_list_del(pg + mask, &heap(node, zone, order));
         }
 
@@ -1132,7 +1168,7 @@ unsigned int online_page(unsigned long mfn, uint32_t *status)
     spin_unlock(&heap_lock);
 
     if ( (y & PGC_state) == PGC_state_offlined )
-        free_heap_pages(pg, 0);
+        free_heap_pages(pg, 0, 0);
 
     return ret;
 }
@@ -1201,7 +1237,7 @@ static void init_heap_pages(
             nr_pages -= n;
         }
 
-        free_heap_pages(pg+i, 0);
+        free_heap_pages(pg+i, 0, 0);
     }
 }
 
@@ -1535,7 +1571,7 @@ void free_xenheap_pages(void *v, unsigned int order)
 
     memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
 
-    free_heap_pages(virt_to_page(v), order);
+    free_heap_pages(virt_to_page(v), order, 1);
 }
 
 #else
@@ -1588,11 +1624,10 @@ void free_xenheap_pages(void *v, unsigned int order)
 
     for ( i = 0; i < (1u << order); i++ )
     {
-        scrub_one_page(&pg[i]);
         pg[i].count_info &= ~PGC_xen_heap;
     }
 
-    free_heap_pages(pg, order);
+    free_heap_pages(pg, order, 1);
 }
 
 #endif
@@ -1696,7 +1731,7 @@ struct page_info *alloc_domheap_pages(
 
     if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
     {
-        free_heap_pages(pg, order);
+        free_heap_pages(pg, order, 0);
         return NULL;
     }
     
@@ -1745,24 +1780,20 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
          * domain has died we assume responsibility for erasure.
          */
         if ( unlikely(d->is_dying) )
-            for ( i = 0; i < (1 << order); i++ )
-                scrub_one_page(&pg[i]);
-
-        free_heap_pages(pg, order);
+            free_heap_pages(pg, order, 1);
+        else
+            free_heap_pages(pg, order, 0);
     }
     else if ( unlikely(d == dom_cow) )
     {
         ASSERT(order == 0); 
-        scrub_one_page(pg);
-        free_heap_pages(pg, 0);
+        free_heap_pages(pg, 0, 1);
         drop_dom_ref = 0;
     }
     else
     {
         /* Freeing anonymous domain-heap pages. */
-        for ( i = 0; i < (1 << order); i++ )
-            scrub_one_page(&pg[i]);
-        free_heap_pages(pg, order);
+        free_heap_pages(pg, order, 1);
         drop_dom_ref = 0;
     }
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 2552d34..e8913a8 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -103,9 +103,12 @@ struct page_info
 #define PGC_state_offlined PG_mask(2, 9)
 #define PGC_state_free    PG_mask(3, 9)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page need to be scrubbed */
+#define _PGC_need_scrub   PG_shift(10)
+#define PGC_need_scrub    PG_mask(1, 10)
 
 /* Count of references to this frame. */
-#define PGC_count_width   PG_shift(9)
+#define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index d253117..35746ab 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -223,9 +223,12 @@ struct page_info
 #define PGC_state_offlined PG_mask(2, 9)
 #define PGC_state_free    PG_mask(3, 9)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page need to be scrubbed */
+#define _PGC_need_scrub   PG_shift(10)
+#define PGC_need_scrub    PG_mask(1, 10)
 
  /* Count of references to this frame. */
-#define PGC_count_width   PG_shift(9)
+#define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 struct spage_info
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 1/3] xen: delay page scrubbing to allocation path
  2014-06-30 13:39 Bob Liu
@ 2014-06-30 15:56 ` Jan Beulich
  2014-07-01  8:12   ` Bob Liu
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2014-06-30 15:56 UTC (permalink / raw)
  To: Bob Liu; +Cc: keir, ian.campbell, George.Dunlap, andrew.cooper3, xen-devel

>>> On 30.06.14 at 15:39, <lliubbo@gmail.com> wrote:
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -711,6 +711,12 @@ static struct page_info *alloc_heap_pages(
>  
>      for ( i = 0; i < (1 << order); i++ )
>      {
> +        if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
> +        {
> +            scrub_one_page(&pg[i]);
> +            pg[i].count_info &= ~PGC_need_scrub;
> +        }
> +

heap_lock is still being held here - scrubbing should be done after it
was dropped (or else you re-introduce the same latency problem to
other paths now needing to wait for the scrubbing to complete).

> @@ -876,6 +882,15 @@ static void free_heap_pages(
>          midsize_alloc_zone_pages = max(
>              midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
>  
> +    if ( need_scrub )
> +    {
> +        if ( !tainted )
> +        {
> +            for ( i = 0; i < (1 << order); i++ )
> +                pg[i].count_info |= PGC_need_scrub;
> +        }
> +    }

Two if()s like these should be folded into one.

> @@ -889,6 +904,17 @@ static void free_heap_pages(
>                   (PFN_ORDER(pg-mask) != order) ||
>                   (phys_to_nid(page_to_maddr(pg-mask)) != node) )
>                  break;
> +            /* If we need scrub, only merge with PGC_need_scrub pages */
> +            if ( need_scrub )
> +            {
> +                if ( !test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
> +                    break;
> +            }
> +            else
> +            {
> +                if ( test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
> +                    break;
> +            }

You're setting PGC_need_scrub on each 4k page anyway (which
is debatable), hence there's no need to look at the passed in
need_scrub flag here: Just check whether both chunks have the
flag set the same. Same below.

> @@ -1535,7 +1571,7 @@ void free_xenheap_pages(void *v, unsigned int order)
>  
>      memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
>  
> -    free_heap_pages(virt_to_page(v), order);
> +    free_heap_pages(virt_to_page(v), order, 1);

Why?

> @@ -1588,11 +1624,10 @@ void free_xenheap_pages(void *v, unsigned int order)
>  
>      for ( i = 0; i < (1u << order); i++ )
>      {
> -        scrub_one_page(&pg[i]);
>          pg[i].count_info &= ~PGC_xen_heap;
>      }
>  
> -    free_heap_pages(pg, order);
> +    free_heap_pages(pg, order, 1);

The flags needs to be 1 here, but I don't see why you also pass 1 in
the other free_xenheap_pages() incarnation above.

> @@ -1745,24 +1780,20 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
>           * domain has died we assume responsibility for erasure.
>           */
>          if ( unlikely(d->is_dying) )
> -            for ( i = 0; i < (1 << order); i++ )
> -                scrub_one_page(&pg[i]);
> -
> -        free_heap_pages(pg, order);
> +            free_heap_pages(pg, order, 1);
> +        else
> +            free_heap_pages(pg, order, 0);
>      }
>      else if ( unlikely(d == dom_cow) )
>      {
>          ASSERT(order == 0); 
> -        scrub_one_page(pg);
> -        free_heap_pages(pg, 0);
> +        free_heap_pages(pg, 0, 1);
>          drop_dom_ref = 0;
>      }
>      else
>      {
>          /* Freeing anonymous domain-heap pages. */
> -        for ( i = 0; i < (1 << order); i++ )
> -            scrub_one_page(&pg[i]);
> -        free_heap_pages(pg, order);
> +        free_heap_pages(pg, order, 1);
>          drop_dom_ref = 0;
>      }
>  

This hunk is patching no longer existing code (see commit daa4b800
"slightly consolidate code in free_domheap_pages()").

Jan

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 1/3] xen: delay page scrubbing to allocation path
  2014-06-30 15:56 ` Jan Beulich
@ 2014-07-01  8:12   ` Bob Liu
  0 siblings, 0 replies; 4+ messages in thread
From: Bob Liu @ 2014-07-01  8:12 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Bob Liu, keir, ian.campbell, George.Dunlap, andrew.cooper3,
	xen-devel


On 06/30/2014 11:56 PM, Jan Beulich wrote:
>>>> On 30.06.14 at 15:39, <lliubbo@gmail.com> wrote:
>> --- a/xen/common/page_alloc.c
>> +++ b/xen/common/page_alloc.c
>> @@ -711,6 +711,12 @@ static struct page_info *alloc_heap_pages(
>>  
>>      for ( i = 0; i < (1 << order); i++ )
>>      {
>> +        if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
>> +        {
>> +            scrub_one_page(&pg[i]);
>> +            pg[i].count_info &= ~PGC_need_scrub;
>> +        }
>> +
> 
> heap_lock is still being held here - scrubbing should be done after it
> was dropped (or else you re-introduce the same latency problem to
> other paths now needing to wait for the scrubbing to complete).
> 

I see, now it only avoids this case e.g don't scrub all 4Tb when a 4Tb
chunk found for a request of a single page.

Anyway I will move the scrubbing out of spinlock in next version.

>> @@ -876,6 +882,15 @@ static void free_heap_pages(
>>          midsize_alloc_zone_pages = max(
>>              midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
>>  
>> +    if ( need_scrub )
>> +    {
>> +        if ( !tainted )
>> +        {
>> +            for ( i = 0; i < (1 << order); i++ )
>> +                pg[i].count_info |= PGC_need_scrub;
>> +        }
>> +    }
> 
> Two if()s like these should be folded into one.
> 

Will be fixed.

>> @@ -889,6 +904,17 @@ static void free_heap_pages(
>>                   (PFN_ORDER(pg-mask) != order) ||
>>                   (phys_to_nid(page_to_maddr(pg-mask)) != node) )
>>                  break;
>> +            /* If we need scrub, only merge with PGC_need_scrub pages */
>> +            if ( need_scrub )
>> +            {
>> +                if ( !test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
>> +                    break;
>> +            }
>> +            else
>> +            {
>> +                if ( test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
>> +                    break;
>> +            }
> 
> You're setting PGC_need_scrub on each 4k page anyway (which
> is debatable), hence there's no need to look at the passed in
> need_scrub flag here: Just check whether both chunks have the
> flag set the same. Same below.
> 

Right, thanks for your suggestion.

>> @@ -1535,7 +1571,7 @@ void free_xenheap_pages(void *v, unsigned int order)
>>  
>>      memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
>>  
>> -    free_heap_pages(virt_to_page(v), order);
>> +    free_heap_pages(virt_to_page(v), order, 1);
> 
> Why?
> 

Sorry, a mistake here and will be fixed.

>> @@ -1588,11 +1624,10 @@ void free_xenheap_pages(void *v, unsigned int order)
>>  
>>      for ( i = 0; i < (1u << order); i++ )
>>      {
>> -        scrub_one_page(&pg[i]);
>>          pg[i].count_info &= ~PGC_xen_heap;
>>      }
>>  
>> -    free_heap_pages(pg, order);
>> +    free_heap_pages(pg, order, 1);
> 
> The flags needs to be 1 here, but I don't see why you also pass 1 in
> the other free_xenheap_pages() incarnation above.
> 
>> @@ -1745,24 +1780,20 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
>>           * domain has died we assume responsibility for erasure.
>>           */
>>          if ( unlikely(d->is_dying) )
>> -            for ( i = 0; i < (1 << order); i++ )
>> -                scrub_one_page(&pg[i]);
>> -
>> -        free_heap_pages(pg, order);
>> +            free_heap_pages(pg, order, 1);
>> +        else
>> +            free_heap_pages(pg, order, 0);
>>      }
>>      else if ( unlikely(d == dom_cow) )
>>      {
>>          ASSERT(order == 0); 
>> -        scrub_one_page(pg);
>> -        free_heap_pages(pg, 0);
>> +        free_heap_pages(pg, 0, 1);
>>          drop_dom_ref = 0;
>>      }
>>      else
>>      {
>>          /* Freeing anonymous domain-heap pages. */
>> -        for ( i = 0; i < (1 << order); i++ )
>> -            scrub_one_page(&pg[i]);
>> -        free_heap_pages(pg, order);
>> +        free_heap_pages(pg, order, 1);
>>          drop_dom_ref = 0;
>>      }
>>  
> 
> This hunk is patching no longer existing code (see commit daa4b800
> "slightly consolidate code in free_domheap_pages()").
> 

I'll rebase this patch to an newer version after that commit.
Thanks again.

-- 
Regards,
-Bob

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2014-07-01  8:12 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-06-30 13:39 [PATCH v2 1/3] xen: delay page scrubbing to allocation path Bob Liu
  -- strict thread matches above, loose matches on Subject: below --
2014-06-30 13:39 Bob Liu
2014-06-30 15:56 ` Jan Beulich
2014-07-01  8:12   ` Bob Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).