xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/6] x86/HVM: cache attribute pinning adjustments
@ 2016-03-03 16:28 Jan Beulich
  2016-03-03 16:33 ` [PATCH 1/6] x86/HVM: honor cache attribute pinning for RAM only Jan Beulich
                   ` (6 more replies)
  0 siblings, 7 replies; 16+ messages in thread
From: Jan Beulich @ 2016-03-03 16:28 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu, Tim Deegan

1: honor cache attribute pinning for RAM only
2: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()
3: adjust hvm_set_mem_pinned_cacheattr() error indications
4: limit flushing on cache attribute pinning adjustments
5: adjust hvm_get_mem_pinned_cacheattr() GFN parameter
6: re-format cache attribute pinning code

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/6] x86/HVM: honor cache attribute pinning for RAM only
  2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
@ 2016-03-03 16:33 ` Jan Beulich
  2016-03-03 17:36   ` Andrew Cooper
  2016-03-03 16:36 ` [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr() Jan Beulich
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2016-03-03 16:33 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 2304 bytes --]

Call hvm_get_mem_pinned_cacheattr() for RAM ranges only, and only when
the guest has a physical device assigned: XEN_DOMCTL_pin_mem_cacheattr
is documented to be intended for RAM only.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -782,32 +782,29 @@ int epte_get_entry_emt(struct domain *d,
                                  mfn_x(mfn) + (1UL << order) - 1) )
         return -1;
 
-    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+    if ( direct_mmio )
     {
-    case 1:
+        if ( (mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >> order )
+            return MTRR_TYPE_UNCACHABLE;
+        if ( order )
+            return -1;
         *ipat = 1;
-        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
-    case -1:
-        return -1;
+        return MTRR_TYPE_WRBACK;
     }
 
     if ( !need_iommu(d) && !cache_flush_permitted(d) )
     {
-        ASSERT(!direct_mmio ||
-               !((mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >>
-                 order));
         *ipat = 1;
         return MTRR_TYPE_WRBACK;
     }
 
-    if ( direct_mmio )
+    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
     {
-        if ( (mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >> order )
-            return MTRR_TYPE_UNCACHABLE;
-        if ( order )
-            return -1;
+    case 1:
         *ipat = 1;
-        return MTRR_TYPE_WRBACK;
+        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
+    case -1:
+        return -1;
     }
 
     gmtrr_mtype = is_hvm_domain(d) && v ?
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -618,7 +618,8 @@ _sh_propagate(struct vcpu *v,
          * 3) if disables snoop control, compute the PAT index with
          *    gMTRR and gPAT.
          */
-        if ( hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
+        if ( !mmio_mfn &&
+             hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);




[-- Attachment #2: x86-HVM-mem-pinned-cacheattr-RAM.patch --]
[-- Type: text/plain, Size: 2353 bytes --]

x86/HVM: honor cache attribute pinning for RAM only

Call hvm_get_mem_pinned_cacheattr() for RAM ranges only, and only when
the guest has a physical device assigned: XEN_DOMCTL_pin_mem_cacheattr
is documented to be intended for RAM only.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -782,32 +782,29 @@ int epte_get_entry_emt(struct domain *d,
                                  mfn_x(mfn) + (1UL << order) - 1) )
         return -1;
 
-    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+    if ( direct_mmio )
     {
-    case 1:
+        if ( (mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >> order )
+            return MTRR_TYPE_UNCACHABLE;
+        if ( order )
+            return -1;
         *ipat = 1;
-        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
-    case -1:
-        return -1;
+        return MTRR_TYPE_WRBACK;
     }
 
     if ( !need_iommu(d) && !cache_flush_permitted(d) )
     {
-        ASSERT(!direct_mmio ||
-               !((mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >>
-                 order));
         *ipat = 1;
         return MTRR_TYPE_WRBACK;
     }
 
-    if ( direct_mmio )
+    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
     {
-        if ( (mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >> order )
-            return MTRR_TYPE_UNCACHABLE;
-        if ( order )
-            return -1;
+    case 1:
         *ipat = 1;
-        return MTRR_TYPE_WRBACK;
+        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
+    case -1:
+        return -1;
     }
 
     gmtrr_mtype = is_hvm_domain(d) && v ?
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -618,7 +618,8 @@ _sh_propagate(struct vcpu *v,
          * 3) if disables snoop control, compute the PAT index with
          *    gMTRR and gPAT.
          */
-        if ( hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
+        if ( !mmio_mfn &&
+             hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()
  2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
  2016-03-03 16:33 ` [PATCH 1/6] x86/HVM: honor cache attribute pinning for RAM only Jan Beulich
@ 2016-03-03 16:36 ` Jan Beulich
  2016-03-03 16:59   ` Wei Liu
  2016-03-03 17:35   ` Andrew Cooper
  2016-03-03 16:37 ` [PATCH 3/6] x86/HVM: adjust hvm_set_mem_pinned_cacheattr() error indications Jan Beulich
                   ` (4 subsequent siblings)
  6 siblings, 2 replies; 16+ messages in thread
From: Jan Beulich @ 2016-03-03 16:36 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 3651 bytes --]

Its return value can easily serve the purpose. We cannot, however,
return unspecific "success" anymore for a domain of the wrong type -
since no caller exists that would call this for PV domains, simply add
an ASSERT().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -546,17 +546,13 @@ void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type)
+    unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     uint64_t mask = ~(uint64_t)0 << order;
-    int rc = 0;
+    int rc = -ENXIO;
 
-    *type = ~0;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
+    ASSERT(has_hvm_container_domain(d));
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
@@ -566,14 +562,13 @@ int hvm_get_mem_pinned_cacheattr(
         if ( ((guest_fn & mask) >= range->start) &&
              ((guest_fn | ~mask) <= range->end) )
         {
-            *type = range->type;
-            rc = 1;
+            rc = range->type;
             break;
         }
         if ( ((guest_fn & mask) <= range->end) &&
              (range->start <= (guest_fn | ~mask)) )
         {
-            rc = -1;
+            rc = -EADDRNOTAVAIL;
             break;
         }
     }
@@ -762,7 +757,6 @@ int epte_get_entry_emt(struct domain *d,
                        unsigned int order, uint8_t *ipat, bool_t direct_mmio)
 {
     int gmtrr_mtype, hmtrr_mtype;
-    uint32_t type;
     struct vcpu *v = current;
 
     *ipat = 0;
@@ -798,14 +792,15 @@ int epte_get_entry_emt(struct domain *d,
         return MTRR_TYPE_WRBACK;
     }
 
-    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+    if ( gmtrr_mtype >= 0 )
     {
-    case 1:
         *ipat = 1;
-        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
-    case -1:
-        return -1;
+        return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
+                                                : MTRR_TYPE_UNCACHABLE;
     }
+    if ( gmtrr_mtype == -EADDRNOTAVAIL )
+        return -1;
 
     gmtrr_mtype = is_hvm_domain(d) && v ?
                   get_mtrr_type(&v->arch.hvm_vcpu.mtrr,
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -607,7 +607,7 @@ _sh_propagate(struct vcpu *v,
     if ( (level == 1) && is_hvm_domain(d) &&
          !is_xen_heap_mfn(mfn_x(target_mfn)) )
     {
-        unsigned int type;
+        int type;
 
         ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
 
@@ -619,7 +619,8 @@ _sh_propagate(struct vcpu *v,
          *    gMTRR and gPAT.
          */
         if ( !mmio_mfn &&
-             hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
+             (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
+                                                  0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type);
+    unsigned int order);
 
 
 /* Set pinned caching type for a domain. */




[-- Attachment #2: x86-HVM-mem-pinned-cacheattr-indirection.patch --]
[-- Type: text/plain, Size: 3724 bytes --]

x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()

Its return value can easily serve the purpose. We cannot, however,
return unspecific "success" anymore for a domain of the wrong type -
since no caller exists that would call this for PV domains, simply add
an ASSERT().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -546,17 +546,13 @@ void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type)
+    unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     uint64_t mask = ~(uint64_t)0 << order;
-    int rc = 0;
+    int rc = -ENXIO;
 
-    *type = ~0;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
+    ASSERT(has_hvm_container_domain(d));
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
@@ -566,14 +562,13 @@ int hvm_get_mem_pinned_cacheattr(
         if ( ((guest_fn & mask) >= range->start) &&
              ((guest_fn | ~mask) <= range->end) )
         {
-            *type = range->type;
-            rc = 1;
+            rc = range->type;
             break;
         }
         if ( ((guest_fn & mask) <= range->end) &&
              (range->start <= (guest_fn | ~mask)) )
         {
-            rc = -1;
+            rc = -EADDRNOTAVAIL;
             break;
         }
     }
@@ -762,7 +757,6 @@ int epte_get_entry_emt(struct domain *d,
                        unsigned int order, uint8_t *ipat, bool_t direct_mmio)
 {
     int gmtrr_mtype, hmtrr_mtype;
-    uint32_t type;
     struct vcpu *v = current;
 
     *ipat = 0;
@@ -798,14 +792,15 @@ int epte_get_entry_emt(struct domain *d,
         return MTRR_TYPE_WRBACK;
     }
 
-    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+    if ( gmtrr_mtype >= 0 )
     {
-    case 1:
         *ipat = 1;
-        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
-    case -1:
-        return -1;
+        return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
+                                                : MTRR_TYPE_UNCACHABLE;
     }
+    if ( gmtrr_mtype == -EADDRNOTAVAIL )
+        return -1;
 
     gmtrr_mtype = is_hvm_domain(d) && v ?
                   get_mtrr_type(&v->arch.hvm_vcpu.mtrr,
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -607,7 +607,7 @@ _sh_propagate(struct vcpu *v,
     if ( (level == 1) && is_hvm_domain(d) &&
          !is_xen_heap_mfn(mfn_x(target_mfn)) )
     {
-        unsigned int type;
+        int type;
 
         ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
 
@@ -619,7 +619,8 @@ _sh_propagate(struct vcpu *v,
          *    gMTRR and gPAT.
          */
         if ( !mmio_mfn &&
-             hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
+             (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
+                                                  0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type);
+    unsigned int order);
 
 
 /* Set pinned caching type for a domain. */

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 3/6] x86/HVM: adjust hvm_set_mem_pinned_cacheattr() error indications
  2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
  2016-03-03 16:33 ` [PATCH 1/6] x86/HVM: honor cache attribute pinning for RAM only Jan Beulich
  2016-03-03 16:36 ` [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr() Jan Beulich
@ 2016-03-03 16:37 ` Jan Beulich
  2016-03-03 17:37   ` Andrew Cooper
  2016-03-03 16:37 ` [PATCH 4/6] x86/HVM: limit flushing on cache attribute pinning adjustments Jan Beulich
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2016-03-03 16:37 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 625 bytes --]

Make it return an error on bad domain kind or obviously bad GFN range.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -591,8 +591,11 @@ int32_t hvm_set_mem_pinned_cacheattr(
     struct hvm_mem_pinned_cacheattr_range *range;
     int rc = 1;
 
-    if ( !is_hvm_domain(d) || gfn_end < gfn_start )
-        return 0;
+    if ( !is_hvm_domain(d) )
+        return -EOPNOTSUPP;
+
+    if ( gfn_end < gfn_start || (gfn_start | gfn_end) >> paddr_bits )
+        return -EINVAL;
 
     if ( type == XEN_DOMCTL_DELETE_MEM_CACHEATTR )
     {




[-- Attachment #2: x86-HVM-mem-pinned-cacheattr-set.patch --]
[-- Type: text/plain, Size: 687 bytes --]

x86/HVM: adjust hvm_set_mem_pinned_cacheattr() error indications

Make it return an error on bad domain kind or obviously bad GFN range.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -591,8 +591,11 @@ int32_t hvm_set_mem_pinned_cacheattr(
     struct hvm_mem_pinned_cacheattr_range *range;
     int rc = 1;
 
-    if ( !is_hvm_domain(d) || gfn_end < gfn_start )
-        return 0;
+    if ( !is_hvm_domain(d) )
+        return -EOPNOTSUPP;
+
+    if ( gfn_end < gfn_start || (gfn_start | gfn_end) >> paddr_bits )
+        return -EINVAL;
 
     if ( type == XEN_DOMCTL_DELETE_MEM_CACHEATTR )
     {

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 4/6] x86/HVM: limit flushing on cache attribute pinning adjustments
  2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
                   ` (2 preceding siblings ...)
  2016-03-03 16:37 ` [PATCH 3/6] x86/HVM: adjust hvm_set_mem_pinned_cacheattr() error indications Jan Beulich
@ 2016-03-03 16:37 ` Jan Beulich
  2016-03-03 17:38   ` Andrew Cooper
  2016-03-03 16:38 ` [PATCH 5/6] x86/HVM: adjust hvm_get_mem_pinned_cacheattr() GFN parameter Jan Beulich
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2016-03-03 16:37 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 1340 bytes --]

Avoid cache flush on EPT when removing a UC- range, since when used
this type gets converted to UC anyway (there's no UC- among the types
valid in MTRRs and hence EPT's emt field).

We might further wwant to consider only forcing write buffer flushes
when removing WC ranges.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -611,8 +611,21 @@ int32_t hvm_set_mem_pinned_cacheattr(
                 type = range->type;
                 call_rcu(&range->rcu, free_pinned_cacheattr_entry);
                 p2m_memory_type_changed(d);
-                if ( type != PAT_TYPE_UNCACHABLE )
+                switch ( type )
+                {
+                case PAT_TYPE_UC_MINUS:
+                    /*
+                     * For EPT we can also avoid the flush in this case;
+                     * see epte_get_entry_emt().
+                     */
+                    if ( hap_enabled(d) && cpu_has_vmx )
+                case PAT_TYPE_UNCACHABLE:
+                        break;
+                    /* fall through */
+                default:
                     flush_all(FLUSH_CACHE);
+                    break;
+                }
                 return 0;
             }
         rcu_read_unlock(&pinned_cacheattr_rcu_lock);




[-- Attachment #2: x86-HVM-mem-pinned-cacheattr-flush.patch --]
[-- Type: text/plain, Size: 1400 bytes --]

x86/HVM: limit flushing on cache attribute pinning adjustments

Avoid cache flush on EPT when removing a UC- range, since when used
this type gets converted to UC anyway (there's no UC- among the types
valid in MTRRs and hence EPT's emt field).

We might further wwant to consider only forcing write buffer flushes
when removing WC ranges.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -611,8 +611,21 @@ int32_t hvm_set_mem_pinned_cacheattr(
                 type = range->type;
                 call_rcu(&range->rcu, free_pinned_cacheattr_entry);
                 p2m_memory_type_changed(d);
-                if ( type != PAT_TYPE_UNCACHABLE )
+                switch ( type )
+                {
+                case PAT_TYPE_UC_MINUS:
+                    /*
+                     * For EPT we can also avoid the flush in this case;
+                     * see epte_get_entry_emt().
+                     */
+                    if ( hap_enabled(d) && cpu_has_vmx )
+                case PAT_TYPE_UNCACHABLE:
+                        break;
+                    /* fall through */
+                default:
                     flush_all(FLUSH_CACHE);
+                    break;
+                }
                 return 0;
             }
         rcu_read_unlock(&pinned_cacheattr_rcu_lock);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 5/6] x86/HVM: adjust hvm_get_mem_pinned_cacheattr() GFN parameter
  2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
                   ` (3 preceding siblings ...)
  2016-03-03 16:37 ` [PATCH 4/6] x86/HVM: limit flushing on cache attribute pinning adjustments Jan Beulich
@ 2016-03-03 16:38 ` Jan Beulich
  2016-03-03 17:39   ` Andrew Cooper
  2016-03-03 16:38 ` [PATCH 6/6] x86/HVM: re-format cache attribute pinning code Jan Beulich
  2016-03-04 10:36 ` [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Tim Deegan
  6 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2016-03-03 16:38 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 2870 bytes --]

Make it gfn_t and rename it accordingly.

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -545,7 +545,7 @@ void hvm_destroy_cacheattr_region_list(
 
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
-    uint64_t guest_fn,
+    gfn_t gfn,
     unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
@@ -559,14 +559,14 @@ int hvm_get_mem_pinned_cacheattr(
                               &d->arch.hvm_domain.pinned_cacheattr_ranges,
                               list )
     {
-        if ( ((guest_fn & mask) >= range->start) &&
-             ((guest_fn | ~mask) <= range->end) )
+        if ( ((gfn_x(gfn) & mask) >= range->start) &&
+             ((gfn_x(gfn) | ~mask) <= range->end) )
         {
             rc = range->type;
             break;
         }
-        if ( ((guest_fn & mask) <= range->end) &&
-             (range->start <= (guest_fn | ~mask)) )
+        if ( ((gfn_x(gfn) & mask) <= range->end) &&
+             ((gfn_x(gfn) | ~mask) >= range->start) )
         {
             rc = -EADDRNOTAVAIL;
             break;
@@ -808,7 +808,7 @@ int epte_get_entry_emt(struct domain *d,
         return MTRR_TYPE_WRBACK;
     }
 
-    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, _gfn(gfn), order);
     if ( gmtrr_mtype >= 0 )
     {
         *ipat = 1;
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -619,8 +619,7 @@ _sh_propagate(struct vcpu *v,
          *    gMTRR and gPAT.
          */
         if ( !mmio_mfn &&
-             (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
-                                                  0)) >= 0 )
+             (type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -1,20 +1,22 @@
 #ifndef __HVM_CACHEATTR_H__
 #define __HVM_CACHEATTR_H__
 
+#include <xen/mm.h>
+
 void hvm_init_cacheattr_region_list(
     struct domain *d);
 void hvm_destroy_cacheattr_region_list(
     struct domain *d);
 
 /*
- * To see guest_fn is in the pinned range or not,
+ * Check whether gfn is in the pinned range:
  * if yes, return 1, and set type to value in this range
  * if no,  return 0, setting type to ~0
  * if ambiguous, return -1, setting type to ~0 (possible only for order > 0)
  */
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
-    uint64_t guest_fn,
+    gfn_t gfn,
     unsigned int order);
 
 




[-- Attachment #2: x86-HVM-mem-pinned-cacheattr-gfn.patch --]
[-- Type: text/plain, Size: 2928 bytes --]

x86/HVM: adjust hvm_get_mem_pinned_cacheattr() GFN parameter

Make it gfn_t and rename it accordingly.

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -545,7 +545,7 @@ void hvm_destroy_cacheattr_region_list(
 
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
-    uint64_t guest_fn,
+    gfn_t gfn,
     unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
@@ -559,14 +559,14 @@ int hvm_get_mem_pinned_cacheattr(
                               &d->arch.hvm_domain.pinned_cacheattr_ranges,
                               list )
     {
-        if ( ((guest_fn & mask) >= range->start) &&
-             ((guest_fn | ~mask) <= range->end) )
+        if ( ((gfn_x(gfn) & mask) >= range->start) &&
+             ((gfn_x(gfn) | ~mask) <= range->end) )
         {
             rc = range->type;
             break;
         }
-        if ( ((guest_fn & mask) <= range->end) &&
-             (range->start <= (guest_fn | ~mask)) )
+        if ( ((gfn_x(gfn) & mask) <= range->end) &&
+             ((gfn_x(gfn) | ~mask) >= range->start) )
         {
             rc = -EADDRNOTAVAIL;
             break;
@@ -808,7 +808,7 @@ int epte_get_entry_emt(struct domain *d,
         return MTRR_TYPE_WRBACK;
     }
 
-    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, _gfn(gfn), order);
     if ( gmtrr_mtype >= 0 )
     {
         *ipat = 1;
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -619,8 +619,7 @@ _sh_propagate(struct vcpu *v,
          *    gMTRR and gPAT.
          */
         if ( !mmio_mfn &&
-             (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
-                                                  0)) >= 0 )
+             (type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -1,20 +1,22 @@
 #ifndef __HVM_CACHEATTR_H__
 #define __HVM_CACHEATTR_H__
 
+#include <xen/mm.h>
+
 void hvm_init_cacheattr_region_list(
     struct domain *d);
 void hvm_destroy_cacheattr_region_list(
     struct domain *d);
 
 /*
- * To see guest_fn is in the pinned range or not,
+ * Check whether gfn is in the pinned range:
  * if yes, return 1, and set type to value in this range
  * if no,  return 0, setting type to ~0
  * if ambiguous, return -1, setting type to ~0 (possible only for order > 0)
  */
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
-    uint64_t guest_fn,
+    gfn_t gfn,
     unsigned int order);
 
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 6/6] x86/HVM: re-format cache attribute pinning code
  2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
                   ` (4 preceding siblings ...)
  2016-03-03 16:38 ` [PATCH 5/6] x86/HVM: adjust hvm_get_mem_pinned_cacheattr() GFN parameter Jan Beulich
@ 2016-03-03 16:38 ` Jan Beulich
  2016-03-03 17:40   ` Andrew Cooper
  2016-03-04 10:36 ` [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Tim Deegan
  6 siblings, 1 reply; 16+ messages in thread
From: Jan Beulich @ 2016-03-03 16:38 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Wei Liu, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 4242 bytes --]

No intended functional change, albeit it includes ditching a redundant
is_hvm_domain().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -521,14 +521,12 @@ struct hvm_mem_pinned_cacheattr_range {
 
 static DEFINE_RCU_READ_LOCK(pinned_cacheattr_rcu_lock);
 
-void hvm_init_cacheattr_region_list(
-    struct domain *d)
+void hvm_init_cacheattr_region_list(struct domain *d)
 {
     INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
 }
 
-void hvm_destroy_cacheattr_region_list(
-    struct domain *d)
+void hvm_destroy_cacheattr_region_list(struct domain *d)
 {
     struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
     struct hvm_mem_pinned_cacheattr_range *range;
@@ -543,10 +541,8 @@ void hvm_destroy_cacheattr_region_list(
     }
 }
 
-int hvm_get_mem_pinned_cacheattr(
-    struct domain *d,
-    gfn_t gfn,
-    unsigned int order)
+int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn,
+                                 unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     uint64_t mask = ~(uint64_t)0 << order;
@@ -582,11 +578,8 @@ static void free_pinned_cacheattr_entry(
     xfree(container_of(rcu, struct hvm_mem_pinned_cacheattr_range, rcu));
 }
 
-int32_t hvm_set_mem_pinned_cacheattr(
-    struct domain *d,
-    uint64_t gfn_start,
-    uint64_t gfn_end,
-    uint32_t  type)
+int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
+                                 uint64_t gfn_end, uint32_t type)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     int rc = 1;
@@ -597,8 +590,9 @@ int32_t hvm_set_mem_pinned_cacheattr(
     if ( gfn_end < gfn_start || (gfn_start | gfn_end) >> paddr_bits )
         return -EINVAL;
 
-    if ( type == XEN_DOMCTL_DELETE_MEM_CACHEATTR )
+    switch ( type )
     {
+    case XEN_DOMCTL_DELETE_MEM_CACHEATTR:
         /* Remove the requested range. */
         rcu_read_lock(&pinned_cacheattr_rcu_lock);
         list_for_each_entry_rcu ( range,
@@ -630,16 +624,18 @@ int32_t hvm_set_mem_pinned_cacheattr(
             }
         rcu_read_unlock(&pinned_cacheattr_rcu_lock);
         return -ENOENT;
-    }
 
-    if ( !((type == PAT_TYPE_UNCACHABLE) ||
-           (type == PAT_TYPE_WRCOMB) ||
-           (type == PAT_TYPE_WRTHROUGH) ||
-           (type == PAT_TYPE_WRPROT) ||
-           (type == PAT_TYPE_WRBACK) ||
-           (type == PAT_TYPE_UC_MINUS)) ||
-         !is_hvm_domain(d) )
+    case PAT_TYPE_UC_MINUS:
+    case PAT_TYPE_UNCACHABLE:
+    case PAT_TYPE_WRBACK:
+    case PAT_TYPE_WRCOMB:
+    case PAT_TYPE_WRPROT:
+    case PAT_TYPE_WRTHROUGH:
+        break;
+
+    default:
         return -EINVAL;
+    }
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -3,28 +3,21 @@
 
 #include <xen/mm.h>
 
-void hvm_init_cacheattr_region_list(
-    struct domain *d);
-void hvm_destroy_cacheattr_region_list(
-    struct domain *d);
+struct domain;
+void hvm_init_cacheattr_region_list(struct domain *d);
+void hvm_destroy_cacheattr_region_list(struct domain *d);
 
 /*
  * Check whether gfn is in the pinned range:
- * if yes, return 1, and set type to value in this range
- * if no,  return 0, setting type to ~0
- * if ambiguous, return -1, setting type to ~0 (possible only for order > 0)
+ * if yes, return the (non-negative) type
+ * if no or ambiguous, return a negative error code
  */
-int hvm_get_mem_pinned_cacheattr(
-    struct domain *d,
-    gfn_t gfn,
-    unsigned int order);
+int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn,
+                                 unsigned int order);
 
 
 /* Set pinned caching type for a domain. */
-int32_t hvm_set_mem_pinned_cacheattr(
-    struct domain *d,
-    uint64_t gfn_start,
-    uint64_t gfn_end,
-    uint32_t  type);
+int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
+                                 uint64_t gfn_end, uint32_t type);
 
 #endif /* __HVM_CACHEATTR_H__ */



[-- Attachment #2: x86-HVM-mem-pinned-cacheattr-formatting.patch --]
[-- Type: text/plain, Size: 4289 bytes --]

x86/HVM: re-format cache attribute pinning code

No intended functional change, albeit it includes ditching a redundant
is_hvm_domain().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -521,14 +521,12 @@ struct hvm_mem_pinned_cacheattr_range {
 
 static DEFINE_RCU_READ_LOCK(pinned_cacheattr_rcu_lock);
 
-void hvm_init_cacheattr_region_list(
-    struct domain *d)
+void hvm_init_cacheattr_region_list(struct domain *d)
 {
     INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
 }
 
-void hvm_destroy_cacheattr_region_list(
-    struct domain *d)
+void hvm_destroy_cacheattr_region_list(struct domain *d)
 {
     struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
     struct hvm_mem_pinned_cacheattr_range *range;
@@ -543,10 +541,8 @@ void hvm_destroy_cacheattr_region_list(
     }
 }
 
-int hvm_get_mem_pinned_cacheattr(
-    struct domain *d,
-    gfn_t gfn,
-    unsigned int order)
+int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn,
+                                 unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     uint64_t mask = ~(uint64_t)0 << order;
@@ -582,11 +578,8 @@ static void free_pinned_cacheattr_entry(
     xfree(container_of(rcu, struct hvm_mem_pinned_cacheattr_range, rcu));
 }
 
-int32_t hvm_set_mem_pinned_cacheattr(
-    struct domain *d,
-    uint64_t gfn_start,
-    uint64_t gfn_end,
-    uint32_t  type)
+int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
+                                 uint64_t gfn_end, uint32_t type)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     int rc = 1;
@@ -597,8 +590,9 @@ int32_t hvm_set_mem_pinned_cacheattr(
     if ( gfn_end < gfn_start || (gfn_start | gfn_end) >> paddr_bits )
         return -EINVAL;
 
-    if ( type == XEN_DOMCTL_DELETE_MEM_CACHEATTR )
+    switch ( type )
     {
+    case XEN_DOMCTL_DELETE_MEM_CACHEATTR:
         /* Remove the requested range. */
         rcu_read_lock(&pinned_cacheattr_rcu_lock);
         list_for_each_entry_rcu ( range,
@@ -630,16 +624,18 @@ int32_t hvm_set_mem_pinned_cacheattr(
             }
         rcu_read_unlock(&pinned_cacheattr_rcu_lock);
         return -ENOENT;
-    }
 
-    if ( !((type == PAT_TYPE_UNCACHABLE) ||
-           (type == PAT_TYPE_WRCOMB) ||
-           (type == PAT_TYPE_WRTHROUGH) ||
-           (type == PAT_TYPE_WRPROT) ||
-           (type == PAT_TYPE_WRBACK) ||
-           (type == PAT_TYPE_UC_MINUS)) ||
-         !is_hvm_domain(d) )
+    case PAT_TYPE_UC_MINUS:
+    case PAT_TYPE_UNCACHABLE:
+    case PAT_TYPE_WRBACK:
+    case PAT_TYPE_WRCOMB:
+    case PAT_TYPE_WRPROT:
+    case PAT_TYPE_WRTHROUGH:
+        break;
+
+    default:
         return -EINVAL;
+    }
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -3,28 +3,21 @@
 
 #include <xen/mm.h>
 
-void hvm_init_cacheattr_region_list(
-    struct domain *d);
-void hvm_destroy_cacheattr_region_list(
-    struct domain *d);
+struct domain;
+void hvm_init_cacheattr_region_list(struct domain *d);
+void hvm_destroy_cacheattr_region_list(struct domain *d);
 
 /*
  * Check whether gfn is in the pinned range:
- * if yes, return 1, and set type to value in this range
- * if no,  return 0, setting type to ~0
- * if ambiguous, return -1, setting type to ~0 (possible only for order > 0)
+ * if yes, return the (non-negative) type
+ * if no or ambiguous, return a negative error code
  */
-int hvm_get_mem_pinned_cacheattr(
-    struct domain *d,
-    gfn_t gfn,
-    unsigned int order);
+int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn,
+                                 unsigned int order);
 
 
 /* Set pinned caching type for a domain. */
-int32_t hvm_set_mem_pinned_cacheattr(
-    struct domain *d,
-    uint64_t gfn_start,
-    uint64_t gfn_end,
-    uint32_t  type);
+int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
+                                 uint64_t gfn_end, uint32_t type);
 
 #endif /* __HVM_CACHEATTR_H__ */

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()
  2016-03-03 16:36 ` [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr() Jan Beulich
@ 2016-03-03 16:59   ` Wei Liu
  2016-03-03 17:07     ` Wei Liu
  2016-03-03 17:35   ` Andrew Cooper
  1 sibling, 1 reply; 16+ messages in thread
From: Wei Liu @ 2016-03-03 16:59 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Tim Deegan, Keir Fraser, Wei Liu, Andrew Cooper

On Thu, Mar 03, 2016 at 09:36:19AM -0700, Jan Beulich wrote:
[...]
>              sflags |= pat_type_2_pte_flags(type);
>          else if ( d->arch.hvm_domain.is_in_uc_mode )
>              sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
> --- a/xen/include/asm-x86/hvm/cacheattr.h
> +++ b/xen/include/asm-x86/hvm/cacheattr.h
> @@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
>  int hvm_get_mem_pinned_cacheattr(
>      struct domain *d,
>      uint64_t guest_fn,
> -    unsigned int order,
> -    uint32_t *type);
> +    unsigned int order);
>  

You seem to have forgotten to update the comment for this function as
you did in previous patch.

Wei.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()
  2016-03-03 16:59   ` Wei Liu
@ 2016-03-03 17:07     ` Wei Liu
  0 siblings, 0 replies; 16+ messages in thread
From: Wei Liu @ 2016-03-03 17:07 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Tim Deegan, Keir Fraser, Wei Liu, Andrew Cooper

On Thu, Mar 03, 2016 at 04:59:22PM +0000, Wei Liu wrote:
> On Thu, Mar 03, 2016 at 09:36:19AM -0700, Jan Beulich wrote:
> [...]
> >              sflags |= pat_type_2_pte_flags(type);
> >          else if ( d->arch.hvm_domain.is_in_uc_mode )
> >              sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
> > --- a/xen/include/asm-x86/hvm/cacheattr.h
> > +++ b/xen/include/asm-x86/hvm/cacheattr.h
> > @@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
> >  int hvm_get_mem_pinned_cacheattr(
> >      struct domain *d,
> >      uint64_t guest_fn,
> > -    unsigned int order,
> > -    uint32_t *type);
> > +    unsigned int order);
> >  
> 
> You seem to have forgotten to update the comment for this function as
> you did in previous patch.

Oh well, the updated comment went into the final patch of this series.

Wei.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()
  2016-03-03 16:36 ` [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr() Jan Beulich
  2016-03-03 16:59   ` Wei Liu
@ 2016-03-03 17:35   ` Andrew Cooper
  1 sibling, 0 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-03-03 17:35 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser, Wei Liu, Tim Deegan

On 03/03/16 16:36, Jan Beulich wrote:
> Its return value can easily serve the purpose. We cannot, however,
> return unspecific "success" anymore for a domain of the wrong type -
> since no caller exists that would call this for PV domains, simply add
> an ASSERT().
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/6] x86/HVM: honor cache attribute pinning for RAM only
  2016-03-03 16:33 ` [PATCH 1/6] x86/HVM: honor cache attribute pinning for RAM only Jan Beulich
@ 2016-03-03 17:36   ` Andrew Cooper
  0 siblings, 0 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-03-03 17:36 UTC (permalink / raw)
  To: xen-devel

On 03/03/16 16:33, Jan Beulich wrote:
> Call hvm_get_mem_pinned_cacheattr() for RAM ranges only, and only when
> the guest has a physical device assigned: XEN_DOMCTL_pin_mem_cacheattr
> is documented to be intended for RAM only.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] x86/HVM: adjust hvm_set_mem_pinned_cacheattr() error indications
  2016-03-03 16:37 ` [PATCH 3/6] x86/HVM: adjust hvm_set_mem_pinned_cacheattr() error indications Jan Beulich
@ 2016-03-03 17:37   ` Andrew Cooper
  0 siblings, 0 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-03-03 17:37 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser, Wei Liu, Tim Deegan

On 03/03/16 16:37, Jan Beulich wrote:
> Make it return an error on bad domain kind or obviously bad GFN range.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 4/6] x86/HVM: limit flushing on cache attribute pinning adjustments
  2016-03-03 16:37 ` [PATCH 4/6] x86/HVM: limit flushing on cache attribute pinning adjustments Jan Beulich
@ 2016-03-03 17:38   ` Andrew Cooper
  0 siblings, 0 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-03-03 17:38 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser, Wei Liu, Tim Deegan

On 03/03/16 16:37, Jan Beulich wrote:
> Avoid cache flush on EPT when removing a UC- range, since when used
> this type gets converted to UC anyway (there's no UC- among the types
> valid in MTRRs and hence EPT's emt field).
>
> We might further wwant to consider only forcing write buffer flushes
> when removing WC ranges.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/6] x86/HVM: adjust hvm_get_mem_pinned_cacheattr() GFN parameter
  2016-03-03 16:38 ` [PATCH 5/6] x86/HVM: adjust hvm_get_mem_pinned_cacheattr() GFN parameter Jan Beulich
@ 2016-03-03 17:39   ` Andrew Cooper
  0 siblings, 0 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-03-03 17:39 UTC (permalink / raw)
  To: xen-devel

On 03/03/16 16:38, Jan Beulich wrote:
> Make it gfn_t and rename it accordingly.
>
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] x86/HVM: re-format cache attribute pinning code
  2016-03-03 16:38 ` [PATCH 6/6] x86/HVM: re-format cache attribute pinning code Jan Beulich
@ 2016-03-03 17:40   ` Andrew Cooper
  0 siblings, 0 replies; 16+ messages in thread
From: Andrew Cooper @ 2016-03-03 17:40 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Wei Liu, Keir Fraser, Tim Deegan

On 03/03/16 16:38, Jan Beulich wrote:
> No intended functional change, albeit it includes ditching a redundant
> is_hvm_domain().
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/6] x86/HVM: cache attribute pinning adjustments
  2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
                   ` (5 preceding siblings ...)
  2016-03-03 16:38 ` [PATCH 6/6] x86/HVM: re-format cache attribute pinning code Jan Beulich
@ 2016-03-04 10:36 ` Tim Deegan
  6 siblings, 0 replies; 16+ messages in thread
From: Tim Deegan @ 2016-03-04 10:36 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Keir Fraser, Wei Liu, Andrew Cooper

At 09:28 -0700 on 03 Mar (1456997339), Jan Beulich wrote:
> 1: honor cache attribute pinning for RAM only
> 2: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()
> 3: adjust hvm_set_mem_pinned_cacheattr() error indications
> 4: limit flushing on cache attribute pinning adjustments
> 5: adjust hvm_get_mem_pinned_cacheattr() GFN parameter
> 6: re-format cache attribute pinning code
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

All the shadow-pagetable bits:
Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2016-03-04 10:36 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-03-03 16:28 [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Jan Beulich
2016-03-03 16:33 ` [PATCH 1/6] x86/HVM: honor cache attribute pinning for RAM only Jan Beulich
2016-03-03 17:36   ` Andrew Cooper
2016-03-03 16:36 ` [PATCH 2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr() Jan Beulich
2016-03-03 16:59   ` Wei Liu
2016-03-03 17:07     ` Wei Liu
2016-03-03 17:35   ` Andrew Cooper
2016-03-03 16:37 ` [PATCH 3/6] x86/HVM: adjust hvm_set_mem_pinned_cacheattr() error indications Jan Beulich
2016-03-03 17:37   ` Andrew Cooper
2016-03-03 16:37 ` [PATCH 4/6] x86/HVM: limit flushing on cache attribute pinning adjustments Jan Beulich
2016-03-03 17:38   ` Andrew Cooper
2016-03-03 16:38 ` [PATCH 5/6] x86/HVM: adjust hvm_get_mem_pinned_cacheattr() GFN parameter Jan Beulich
2016-03-03 17:39   ` Andrew Cooper
2016-03-03 16:38 ` [PATCH 6/6] x86/HVM: re-format cache attribute pinning code Jan Beulich
2016-03-03 17:40   ` Andrew Cooper
2016-03-04 10:36 ` [PATCH 0/6] x86/HVM: cache attribute pinning adjustments Tim Deegan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).