xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tklengyel@sec.in.tum.de>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, tim@xen.org, julien.grall@linaro.org,
	ian.jackson@eu.citrix.com, stefano.stabellini@citrix.com,
	andres@lagarcavilla.org, jbeulich@suse.com,
	dgdegra@tycho.nsa.gov, Tamas K Lengyel <tklengyel@sec.in.tum.de>
Subject: [PATCH v3 08/15] xen/arm: p2m type definitions and changes
Date: Mon,  1 Sep 2014 16:22:02 +0200	[thread overview]
Message-ID: <1409581329-2607-9-git-send-email-tklengyel@sec.in.tum.de> (raw)
In-Reply-To: <1409581329-2607-1-git-send-email-tklengyel@sec.in.tum.de>

Define p2m_access_t in ARM and add necessary changes for page table
construction routines to pass the default access information. Also,
define the Radix tree that will hold access permission settings as
the PTE's don't have enough software programmable bits available.

Signed-off-by: Tamas K Lengyel <tklengyel@sec.in.tum.de>
---
 xen/arch/arm/p2m.c        | 38 ++++++++++++++++-------
 xen/include/asm-arm/p2m.h | 78 +++++++++++++++++++++++++++++++++++------------
 2 files changed, 85 insertions(+), 31 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 143199b..a6dea5b 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -10,6 +10,9 @@
 #include <asm/event.h>
 #include <asm/hardirq.h>
 #include <asm/page.h>
+#include <xen/mem_event.h>
+#include <public/mem_event.h>
+#include <xen/mem_access.h>
 
 /* First level P2M is 2 consecutive pages */
 #define P2M_FIRST_ORDER 1
@@ -458,7 +461,8 @@ static int apply_one_level(struct domain *d,
                            paddr_t *maddr,
                            bool_t *flush,
                            int mattr,
-                           p2m_type_t t)
+                           p2m_type_t t,
+                           p2m_access_t a)
 {
     /* Helpers to lookup the properties of each level */
     const paddr_t level_sizes[] =
@@ -690,7 +694,8 @@ static int apply_p2m_changes(struct domain *d,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
                      int mattr,
-                     p2m_type_t t)
+                     p2m_type_t t,
+                     p2m_access_t a)
 {
     int rc, ret;
     struct p2m_domain *p2m = &d->arch.p2m;
@@ -755,7 +760,7 @@ static int apply_p2m_changes(struct domain *d,
                               1, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -776,7 +781,7 @@ static int apply_p2m_changes(struct domain *d,
                               2, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -795,7 +800,7 @@ static int apply_p2m_changes(struct domain *d,
                               3, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         /* L3 had better have done something! We cannot descend any further */
         BUG_ON(ret == P2M_ONE_DESCEND);
@@ -837,7 +842,8 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t end)
 {
     return apply_p2m_changes(d, ALLOCATE, start, end,
-                             0, MATTR_MEM, p2m_ram_rw);
+                             0, MATTR_MEM, p2m_ram_rw,
+                             d->arch.p2m.default_access);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -849,7 +855,8 @@ int map_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr_mfns),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_mmio_direct);
+                             MATTR_DEV, p2m_mmio_direct,
+                             d->arch.p2m.default_access);
 }
 
 int guest_physmap_add_entry(struct domain *d,
@@ -861,7 +868,8 @@ int guest_physmap_add_entry(struct domain *d,
     return apply_p2m_changes(d, INSERT,
                              pfn_to_paddr(gpfn),
                              pfn_to_paddr(gpfn + (1 << page_order)),
-                             pfn_to_paddr(mfn), MATTR_MEM, t);
+                             pfn_to_paddr(mfn), MATTR_MEM, t,
+                             d->arch.p2m.default_access);
 }
 
 void guest_physmap_remove_page(struct domain *d,
@@ -871,7 +879,8 @@ void guest_physmap_remove_page(struct domain *d,
     apply_p2m_changes(d, REMOVE,
                       pfn_to_paddr(gpfn),
                       pfn_to_paddr(gpfn + (1<<page_order)),
-                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid,
+                      d->arch.p2m.default_access);
 }
 
 int p2m_alloc_table(struct domain *d)
@@ -974,6 +983,8 @@ void p2m_teardown(struct domain *d)
 
     p2m_free_vmid(d);
 
+    radix_tree_destroy(&p2m->mem_access_settings, NULL);
+
     spin_unlock(&p2m->lock);
 }
 
@@ -999,6 +1010,9 @@ int p2m_init(struct domain *d)
     p2m->max_mapped_gfn = 0;
     p2m->lowest_mapped_gfn = ULONG_MAX;
 
+    p2m->default_access = p2m_access_rwx;
+    radix_tree_init(&p2m->mem_access_settings);
+
 err:
     spin_unlock(&p2m->lock);
 
@@ -1013,7 +1027,8 @@ int relinquish_p2m_mapping(struct domain *d)
                               pfn_to_paddr(p2m->lowest_mapped_gfn),
                               pfn_to_paddr(p2m->max_mapped_gfn),
                               pfn_to_paddr(INVALID_MFN),
-                              MATTR_MEM, p2m_invalid);
+                              MATTR_MEM, p2m_invalid,
+                              d->arch.p2m.default_access);
 }
 
 int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
@@ -1027,7 +1042,8 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
                              pfn_to_paddr(start_mfn),
                              pfn_to_paddr(end_mfn),
                              pfn_to_paddr(INVALID_MFN),
-                             MATTR_MEM, p2m_invalid);
+                             MATTR_MEM, p2m_invalid,
+                             d->arch.p2m.default_access);
 }
 
 unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 06c93a0..afdbf84 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,9 +2,54 @@
 #define _XEN_P2M_H
 
 #include <xen/mm.h>
+#include <xen/radix-tree.h>
+#include <public/memory.h>
+#include <public/mem_event.h>
 
 struct domain;
 
+/* List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+    p2m_invalid = 0,    /* Nothing mapped here */
+    p2m_ram_rw,         /* Normal read/write guest RAM */
+    p2m_ram_ro,         /* Read-only; writes are silently dropped */
+    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
+    p2m_map_foreign,    /* Ram pages from foreign domain */
+    p2m_grant_map_rw,   /* Read/write grant mapping */
+    p2m_grant_map_ro,   /* Read-only grant mapping */
+    /* The types below are only used to decide the page attribute in the P2M */
+    p2m_iommu_map_rw,   /* Read/write iommu mapping */
+    p2m_iommu_map_ro,   /* Read-only iommu mapping */
+    p2m_max_real_type,  /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given by the p2m_type_t memory type. Violations
+ * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+    p2m_access_n    = 0, /* No access permissions allowed */
+    p2m_access_r    = 1,
+    p2m_access_w    = 2,
+    p2m_access_rw   = 3,
+    p2m_access_x    = 4,
+    p2m_access_rx   = 5,
+    p2m_access_wx   = 6,
+    p2m_access_rwx  = 7
+} p2m_access_t;
+
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -38,27 +83,20 @@ struct p2m_domain {
          * at each p2m tree level. */
         unsigned long shattered[4];
     } stats;
-};
 
-/* List of possible type for each page in the p2m entry.
- * The number of available bit per page in the pte for this purpose is 4 bits.
- * So it's possible to only have 16 fields. If we run out of value in the
- * future, it's possible to use higher value for pseudo-type and don't store
- * them in the p2m entry.
- */
-typedef enum {
-    p2m_invalid = 0,    /* Nothing mapped here */
-    p2m_ram_rw,         /* Normal read/write guest RAM */
-    p2m_ram_ro,         /* Read-only; writes are silently dropped */
-    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
-    p2m_map_foreign,    /* Ram pages from foreign domain */
-    p2m_grant_map_rw,   /* Read/write grant mapping */
-    p2m_grant_map_ro,   /* Read-only grant mapping */
-    /* The types below are only used to decide the page attribute in the P2M */
-    p2m_iommu_map_rw,   /* Read/write iommu mapping */
-    p2m_iommu_map_ro,   /* Read-only iommu mapping */
-    p2m_max_real_type,  /* Types after this won't be store in the p2m */
-} p2m_type_t;
+    /* Default P2M access type for each page in the the domain: new pages,
+     * swapped in pages, cleared pages, and pages that are ambiquously
+     * retyped get this access type. See definition of p2m_access_t. */
+    p2m_access_t default_access;
+
+    /* If true, and an access fault comes in and there is no mem_event listener,
+     * pause domain. Otherwise, remove access restrictions. */
+    bool_t access_required;
+
+    /* Radix tree to store the p2m_access_t settings as the pte's don't have
+     * enough available bits to store this information. */
+    struct radix_tree_root mem_access_settings;
+};
 
 #define p2m_is_foreign(_t)  ((_t) == p2m_map_foreign)
 #define p2m_is_ram(_t)      ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
-- 
2.1.0.rc1

  parent reply	other threads:[~2014-09-01 14:22 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-01 14:21 [PATCH v3 00/15] Mem_event and mem_access for ARM Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 01/15] xen: Relocate mem_access and mem_event into common Tamas K Lengyel
2014-09-01 15:06   ` Jan Beulich
2014-09-01 15:15     ` Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 02/15] xen: Relocate struct npfec definition " Tamas K Lengyel
2014-09-01 15:44   ` Jan Beulich
2014-09-01 14:21 ` [PATCH v3 03/15] xen: Relocate mem_event_op domctl and access_op memop " Tamas K Lengyel
2014-09-01 15:46   ` Jan Beulich
2014-09-01 16:25     ` Tamas K Lengyel
2014-09-02  6:30       ` Jan Beulich
2014-09-02  7:43         ` Tamas K Lengyel
2014-09-01 18:11   ` Julien Grall
2014-09-01 20:51     ` Tamas K Lengyel
2014-09-02  6:53       ` Jan Beulich
2014-09-02  7:41         ` Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 04/15] xen/mem_event: Clean out superfluous white-spaces Tamas K Lengyel
2014-09-01 14:21 ` [PATCH v3 05/15] xen/mem_event: Relax error condition on debug builds Tamas K Lengyel
2014-09-01 15:47   ` Jan Beulich
2014-09-01 14:22 ` [PATCH v3 06/15] xen/mem_event: Abstract architecture specific sanity checks Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 07/15] xen/mem_access: Abstract architecture specific sanity check Tamas K Lengyel
2014-09-01 15:50   ` Jan Beulich
2014-09-01 14:22 ` Tamas K Lengyel [this message]
2014-09-01 14:22 ` [PATCH v3 09/15] xen/arm: Add set access required domctl Tamas K Lengyel
2014-09-01 19:10   ` Julien Grall
2014-09-02  7:48     ` Tamas K Lengyel
2014-09-02  8:17       ` Jan Beulich
2014-09-02  9:23         ` Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 10/15] xen/arm: Data abort exception (R/W) mem_events Tamas K Lengyel
2014-09-01 21:07   ` Julien Grall
2014-09-02  9:06     ` Tamas K Lengyel
2014-09-03 20:20       ` Julien Grall
2014-09-03 21:56         ` Tamas K Lengyel
2014-09-08 20:41           ` Julien Grall
2014-09-09  9:20             ` Ian Campbell
2014-09-09 13:08               ` Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 11/15] xen/arm: Instruction prefetch abort (X) mem_event handling Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 12/15] xen/arm: Shatter large pages when using mem_acces Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 13/15] xen/arm: Enable the compilation of mem_access and mem_event on ARM Tamas K Lengyel
2014-09-03 14:38   ` Daniel De Graaf
2014-09-01 14:22 ` [PATCH v3 14/15] tools/libxc: Allocate magic page for mem access " Tamas K Lengyel
2014-09-01 14:22 ` [PATCH v3 15/15] tools/tests: Enable xen-access " Tamas K Lengyel
2014-09-01 21:26   ` Julien Grall
2014-09-02  8:49     ` Tamas K Lengyel
2014-09-02 12:15     ` Tamas K Lengyel
2014-09-03 20:27       ` Julien Grall
2014-09-03 22:06         ` Tamas K Lengyel
2014-09-01 19:56 ` [PATCH v3 00/15] Mem_event and mem_access for ARM Julien Grall
2014-09-02  9:47   ` Tamas K Lengyel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409581329-2607-9-git-send-email-tklengyel@sec.in.tum.de \
    --to=tklengyel@sec.in.tum.de \
    --cc=andres@lagarcavilla.org \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@linaro.org \
    --cc=stefano.stabellini@citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).